core_url
"""
Copyright (C) 2020 New Entity Operations Inc.
ALL RIGHTS RESERVED
core_url holds retrieval methods for URL and other network based operations.
You can also use the modify the system language to implement various url and
other network based requests.
"""
## Standard Imports:
import urllib.request
from urllib.parse import urlparse
## import urllib.urlopen
from socket import (
gaierror,
)
## Custom Imports:
from core_middlelayer import (
DIRDATA,
SLUG_DEFAULT_NETWORK, SLUG_BOOKMARKS_ONLY, SLUG_OUTPUT_URL,
OUTPUT_FLUSH,
YOUR_SET_LOCATION, YOUR_USER_AGENT,
PROXY_GATE_KEY, PROXY_GATE_URL, PROXY_GATE_USER,
PROXY_GATE_PORT, PROXY_GATE_PROTOCOL, PROXY_GATE_SUFFIX
)
## Create a _OUTPUT connector
SLUG_OUTPUT = SLUG_BOOKMARKS_ONLY+SLUG_OUTPUT_URL
## URL Collector
url_list = []
## URL modifying routines
class URLsancuary:
sancurary_object_list = []
class Id:
def __init__(self):
print(self)
class URL:
def __init__(self):
print(self)
class Options:
def __init__(self):
print(self)
class ReachObject:
def __init__(self, id, url, options, linker):
self.id = id
self.url = url
self.options = options
self.linker = linker
def __str__(self):
return 'ReachObject(' \
'{self.id}, {self.url}, {self.options},' \
'{self.linker})'.format(self=self)
def __repr__(self):
return 'ReachObject(' \
'{a}{self.id}, {b}{self.url}, {c}{self.options},' \
'{d}{self.linker})'.format(self=self,
a="ID: ",
b="URL: ",
c="Options: ",
d="Linker: "
)
class KeyConstructor:
def __init__(self, id, url, options, linker):
self.id = id
self.url = url
self.options = options
self.linker = linker
def __str__(self):
return 'KeyConstructor(' \
'{self.id}, {self.url}, {self.options},' \
'{self.linker})'.format(self=self)
def __repr__(self):
return 'KeyConstructor(' \
'{self.id}, {self.url}, {self.options},' \
'{self.linker})'.format(self=self)
#*************************************#
# START: STEP 9 - NETWORK ROUTINES #
#*************************************#
print(
"---------- START: STEP 9 - NETWORK ROUTINES ----------"
)
# Print out the header to orient the reader
ConstructedClass = KeyConstructor(
'id', 'url', 'options', 'linker'
)
print(ConstructedClass)
print(
"---------- STOP: STEP 9 - NETWORK ROUTINES ----------\n"
)
#**********************************#
# STOP: STEP 9 - NETWORK ROUTINES #
#**********************************#
## Backend Network Request functions
class BackendFacilitator:
"""
This is the 'working_url' logic
"""
def check_bookmark_bin_fresh():
"""
Wipe the Bookmark temp storage basket
"""
with open(SLUG_OUTPUT, 'w') as file:
file.write("")
print("----- Wiped Records: Starting Resync -----")
file.close()
def check_bookmark_bin():
total = 0
with open(SLUG_OUTPUT, 'r') as file:
for line in file:
try:
num = line
total += num
except ValueError:
print('{} is not a number!'.format(line))
print('You have: {} Records in this Entity'.format(total))
file.close()
## Link list that can be appended to the Flush_Buckett for termination or storage
links = []
# Temp bucket that can write away from
Flush_Buckett = []
class URLGrab:
def generate_URL():
for i in links:
zX = i[1]
# Add each value to the flush buckett
Flush_Buckett.append(zX)
update_entity()
def update_bookmark_bin():
with open(SLUG_OUTPUT, 'w') as file:
for i in Flush_Buckett:
file.write('%s\n' % i)
file.close
def entity():
update_entity()
def flush():
with open(DIRDATA+OUTPUT_FLUSH, 'w') as file:
for i in Flush_Buckett:
file.write('%s\n' % i)
file.close
## A slug bucket stores the url pre-format for storage in the Flush_Bucket
proxy_handler = urllib.request.ProxyHandler({
PROXY_GATE_PROTOCOL: PROXY_GATE_SUFFIX+\
PROXY_GATE_USER+":"+\
PROXY_GATE_KEY+"@"+\
PROXY_GATE_URL+":"+\
PROXY_GATE_PORT
})
## For core_FRONTEND
WEB_BIN = []
bin_backward = []
bin_forward = []
class Move:
def left():
ADDRESS = reached[:-1]
# return(ADDRESS)
SLUG_URL_BACK = bin_backward[:-1]
bin_forward.clear()
bin_forward.append(SLUG_URL_BACK)
return(SLUG_URL_BACK)
def right():
try:
return(bin_forward[0])
except:
ValueIndexError
ADDRESS = reached[:-1]
# return(ADDRESS)
# best way would be to search for the last known occurance of this address
# and then insert from there
return(bin_foward[0])
bin_forward.clear()
class get_WEB_RESOURCE_FROM_CLI:
def GO():
address = input(
"What website would you like to load?"
)
return(address)
class get_WEB_RESOURCE:
def GO(object):
address = object
return(address)
class get_DEFAULT_RESOURCE:
def GO():
address = SLUG_DEFAULT_NETWORK
return(address)
## Slug buckett
Slug_Bucket = []
reached = []
class url_slug:
def GO(object=SLUG_DEFAULT_NETWORK):
if object == SLUG_DEFAULT_NETWORK:
user_agent = YOUR_USER_AGENT
WEB_DOC = urllib.request.build_opener(proxy_handler)
WEB_DOC.addheaders = [
('User-agent', user_agent),
('location', YOUR_SET_LOCATION)
]
urllib.request.install_opener(WEB_DOC)
ADDRESS_FEED = get_DEFAULT_RESOURCE.GO()
ADDRESS = ADDRESS_FEED
try:
with urllib.request.urlopen(ADDRESS) as code_to_render:
SOURCED_CONTENT = code_to_render.read()
print(SOURCED_CONTENT)
WEB_DOC_CURED = SOURCED_CONTENT
Slug_Bucket.append(WEB_DOC_CURED)
bin_backward.append(WEB_DOC_CURED)
bin_forward.clear()
bin_forward.append(WEB_DOC_CURED)
code_to_render.close()
reached.append(ADDRESS)
except:
gaierror and urllib.error.URLError
#****************************************************#
# START: INSTANCE 3 - core_url #
#****************************************************#
print(
"---------- START: INSTANCE 3 - core_url ----------\n"
)
reached.append('REQUEST ERROR')
SOURCED_CONTENT = 'REQUEST ERROR'
print(SOURCED_CONTENT)
print(
"---------- STOP: INSTANCE 3 - core_url ----------\n"
)
WEB_DOC_CURED = SOURCED_CONTENT
Slug_Bucket.append(WEB_DOC_CURED)
bin_backward.append(WEB_DOC_CURED)
bin_forward.clear()
bin_forward.append(WEB_DOC_CURED)
else:
user_agent = YOUR_USER_AGENT
WEB_DOC = urllib.request.build_opener(proxy_handler)
WEB_DOC.addheaders = [
('User-agent', user_agent),
('location', YOUR_SET_LOCATION)
]
urllib.request.install_opener(WEB_DOC)
ADDRESS_FEED = get_WEB_RESOURCE.GO(object)
ADDRESS = ADDRESS_FEED
try:
#values = {'location': 'New York'}
#user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)'
#headers = {'User-Agent': user_agent}
#data = urllib.parse.urlencode(values)
#EXPANDED_REQUEST = urllib.request.Request(ADDRESS, data, headers)
with urllib.request.urlopen(ADDRESS) as code_to_render:
#WEB_DOC_TEXT = code_to_render.read()
SOURCED_CONTENT = code_to_render.read()
print(SOURCED_CONTENT)
WEB_DOC_CURED = SOURCED_CONTENT
Slug_Bucket.append(WEB_DOC_CURED)
bin_backward.append(WEB_DOC_CURED)
bin_forward.clear()
bin_forward.append(WEB_DOC_CURED)
code_to_render.close()
reached.append(ADDRESS)
except:
gaierror and urllib.error.URLError
#****************************************************#
# START: INSTANCE 3 - core_url #
#****************************************************#
print(
"---------- START: INSTANCE 3 - core_url ----------\n"
)
reached.append('REQUEST ERROR')
SOURCED_CONTENT = 'REQUEST ERROR'
print(SOURCED_CONTENT)
print(
"---------- STOP: INSTANCE 3 - core_url ----------\n"
)
#****************************************************#
# STOP: INSTANCE 3 - core_url #
#****************************************************#
WEB_DOC_CURED = SOURCED_CONTENT
Slug_Bucket.append(WEB_DOC_CURED)
bin_forward.clear()
bin_forward.append(WEB_DOC_CURED)
class URL_SYSTEM:
def GO():
INSERT_TO_WEBFRAME = url_slug.GO()
WEB_BIN.append(INSERT_TO_WEBFRAME)
#*************************************************#
# START: SUBSTEP: 9a - FORK NETWORK PROCESS #
#*************************************************#
print(
"---------- START: SUBSTEP: 9a - FORK NETWORK PROCESS ----------"
)
print(WEB_BIN)
# URL_SYSTEM.GO(object='https://ryanmckenna.com')
print(
"---------- STOP: SUBSTEP: 9a - FORK NETWORK PROCESS ----------\n"
)
#*************************************************#
# STOP: SUBSTEP: 9a - FORK NETWORK PROCESS #
#*************************************************#
def GO():
url_slug.GO()
def update_slug():
with open(DIRDATA+OUTPUT_SLUG, 'w') as file:
for i in Slug_Bucket:
file.write('%s\n' % i.__repr__())
file.close
def slug():
update_slug()
Return HOME