GRAPHIC: New Entity Operations™ Alpha Logo

EntityScript



core_interface




# -*- coding: utf-8 -*-
"""
COPYRIGHT (C) 2020-2023 NEW ENTITY OPERATIONS INC. ALL RIGHTS RESERVED
INSTANCE: origin
MODIFIED: 2023/05/02
OVERVIEW:

 core_interface is a call logic API to system-dependent interactions and
  system-responses

 define short scripts and other 'system resources' or potential interactions
  that the system can provide. Use the API to interface with those interactions.

 These could be audio, visual, or even bus resources. There is unlimited
  scope available, and any language can be extended into the interface.

 Each navigation element should be implemented in a 1->N component waterfall.
 To gain system specific addon functionality, use the interface to import
 core_navigator and access the API

 USAGE

 >>> import core_interface
 >>> core_interface.core_navigator
 ... * components available here * ...

SETUP related if desired:

Run the setup.py function in config
Default: Off
Reasoning: After the initial setup, CONFIG.setup shouldn't be run

This programs holds the DIL (Default Interface Logic)

The DIL is a standardized way to interface with system resources

"""
__version__ = "0.0.5"
__author__ = "Ryan McKenna"
__copyright__ = "Copyright (C) 2020-2023 New Entity Operations Inc."
__credits__ = [
 "Ryan McKenna",
 "New Entity Operations Inc.", "New Entity Operations, LLC"]
__email__ = "Operator@NewEntityOperations.com"
__license__ = "New Entity License"
__maintainer__ = "Ryan McKenna"
__status__ = "Production"

## MODE-> facilities
from MODE.facilities import (author_list,
 BUCKET_EQ, BUCKET_EQA, BUCKET_EQL,
 BUCKET_EQO, BUCKET_EQOL, BUCKET_EQR, BUCKET_ext_DO,
 BUCKET_PHASE, BUCKET_PLE, BUCKET_TEMP_NODE,
 byteorder, chdir, check_output, copyfileobj, CReal,
 ctermid, Cure, date, datetime, environ, exclude_list,
 getcwd, getgid, getfilesystemencoding,
 getlogin, getuid, glob, GZIP_OPEN, include_list, kill,
 LAST_KING_SLUG, LAST_QUEEN_SLUG,
 listdir, mkdtemp, name, NamedTemporaryFile, oFo, OPENTARFILE,
 OUTPUT_STRUCTURE, path, Path, PATH_INSTANCE, Popen, QEKB,
 recursive_exclude_list, rename, remove, rmtree,
 SIGTERM, sleep, stat, strftime, system, system_member, TGD, walk, ZipFile)

# core_middlelayer
from core_middlelayer import (
 ABOUT_ACCESS, ABOUT_ASSET, ABOUT_DS, ABOUT_ENTITY, ABOUT_IDENTITY, ABOUT_RING,
 ABOUT_SUPPORTING, ACTIVEES, ALLOWED_LANG_BASH, ALLOWED_LANG_PHP,
 ALLOWED_LANG_PYTHON, ALLOWED_LANG_RUBY, ARCHIVETYPE, ARCHIVE_ENTITY_GZ,
 ARCHIVE_BYTE_VALUE, asset_folder, CHANGELOG, CORE_DIR, DIRACCESS, DIRBACKUP,
 DIRDATA, DIRDOCS, DIRDOCUMENTATION, DIRENTITIES, DIRFIGMENT,
 DIRFIGMENT_STAGING, DIRFIGMENT_QUICK_FORMAT, DIRIDENTITY, DIROPENPACKAGER,
 ENCODING_SET, ENTITY_LEDGER, ENTITY_LOCATION, FIGMENT_LEDGER,
 FILE_TEXT_FIGMENT, FILE_TEXT_SERVER, LICENSE, LOCATIONSOURCE,
 malware_guard_clam_log, malware_guard_event_log, malware_guard_inotify_log,
 malware_guard_path, malware_guard_runner_slug,
 MEMBER_ACTIVE_HOLDER, MY_SYSTEM_HOME, NAMEARCHIVE, OPENPACKAGERINDEX, PACKAGES,
 PATH_FULL_CORE, PHASE_ENTITYSCRIPT_ORE, PHASE_ENTITYSCRIPT_RING, PHASE_FIGMENT,
 PHASE_LOG, PHASE_SERVER,
 proxy_cache_slug, proxy_log_slug, proxy_path,
 proxy_path_runner_slug, README, RINGENTITY_LOC,
 SH_SERVERUP, SLUG_ENTITY_LEDGER, SLUGARCHIVE, SLUG_ENTITY_LEDGER,
 STRUCTURE, temp_directory, temp_NODE_KEY,temp_PATH,
 USAGE, USR_SYSTEM_LOCATION, VCNKEY_ALLOWED, VCNKEY_STRUCTURE,
 ARCHIVEINI, ARCHIVEINIBACKUP, BROWSEMEHINI, BROWSEMEHINIBACKUP,
 CONFIGINI, CONFIGINIBACKUP, COREINI, COREINIBACKUP,
 DESIGN_BASICSINI, DESIGN_BASICSINIBACKUP, EVENTLOLLIINI,
 EVENTLOLLIINIBACKUP, GATEKEEPERINI, GATEKEEPERINIBACKUP, INTERFACEINI,
 INTERFACEINIBACKUP, LOCKERLINKSINI, LOCKERLINKSINIBACKUP, NETWORKINI,
 NETWORKINIBACKUP, OPENPACKAGERINI, OPENPACKAGERINIBACKUP,
 REMINDMEINI, REMINDMEINIBACKUP, RINGINI, RINGINIBACKUP, SPECIALINI,
 SPECIALINIBACKUP, TESTINGINI, TESTINGINIBACKUP, VCNINI, VCNINIBACKUP,
 XYZOBJECTSINI, XYZOBJECTSINIBACKUP)

## Give yourself a set of operational extensions in the core_navigator script
## Methods in _navigator should aid in system querying operations
## core_navigator
import core_navigator

## Always localize the ENTITY_LEDGER_FILE
ENTITY_LEDGER_FILE = PATH_INSTANCE+RINGENTITY_LOC+SLUG_ENTITY_LEDGER
ENTITY_ASSET_LOCATION = asset_folder+"ENTITY/"
## Standard Operations on Startup: Setup Methodology Start->End
setup_text = 'TRINE three part system-> Setup: '
setup_stop = 'Done'
setup_encoding_runtime = ENCODING_SET
setup_encoding_shell = ENCODING_SET

################################################################################
## Time Units
################################################################################
## TimeStamp
class TimeStamp:
 """
 Provide TimeStamp related instance of any specified sub-type
 """
 def date_stamp_day():
  """
  Interface to the system date_stamp_day routine to determine the system DAY
  """
  return(str(date.today()))

 def time_stamp_time():
  """
  Interface to the system date_stamp_time routine to determine the system time
  NOW
  """
  return(str(datetime.now().time()))

## TimeSystem
class TimeSystem:
 """
 Display the time from your System in local units X and other various units
 X1-Xn. X1 is the default 'global' time and requires an internet connection
 X2-Xn are defined by the 'operator'
 """
 def time_now():
  return(strftime('%H:%M:%S %p'))

################################################################################
## Utilities
################################################################################
class Clone:
 """
 Clone instances from a designated location to another valid and allowed area
 """
 def ini_archive():
  """
  Backup the archive.ini configuration file
  Instance-> archive.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+ARCHIVEINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+ARCHIVEINIBACKUP)
 def ini_browsemeh():
  """
  Backup the browsemeh.ini configuration file
  Instance-> browsemeh.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+BROWSEMEHINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+BROWSEMEHINIBACKUP)
 def ini_config():
  """
  Backup the config.ini configuration file
  Instance-> config.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+CONFIGINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+CONFIGINIBACKUP)
 def ini_core():
  """
  Backup the core.ini configuration file
  Instance-> core.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+COREINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+COREINIBACKUP)
 def ini_design_basics():
  """
  Backup the design_basics.ini configuration file
  Instance-> design_basics.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+DESIGN_BASICSINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+DESIGN_BASICSINIBACKUP)
 def ini_eventlolli():
  """
  Backup the eventlolli.ini configuration file
  Instance-> eventlolli.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+EVENTLOLLIINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+EVENTLOLLIINIBACKUP)
 def ini_gatekeeper():
  """
  Backup the gatekeeper.ini configuration file
  Instance-> gatekeeper.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+GATEKEEPERINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+GATEKEEPERINIBACKUP)
 def ini_interface():
  """
  Backup the interface.ini configuration file
  Instance-> interface.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+INTERFACEINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+INTERFACEINIBACKUP)
 def ini_lockerlinks():
  """
  Backup the lockerlinks.ini configuration file
  Instance-> lockerlinks.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+LOCKERLINKSINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+LOCKERLINKSINIBACKUP)
 def ini_network():
  """
  Backup the network.ini configuration file
  Instance-> network.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+NETWORKINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+NETWORKINIBACKUP)
 def ini_openpackager():
  """
  Backup the openpackager.ini configuration file
  Instance-> openpackager.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+OPENPACKAGERINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+OPENPACKAGERINIBACKUP)
 def ini_remindme():
  """
  Backup the remindme.ini configuration file
  Instance-> remindme.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+REMINDMEINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+REMINDMEINIBACKUP)
 def ini_ring():
  """
  Backup the ring.ini configuration file
  Instance-> ring.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+RINGINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+RINGINIBACKUP)
 def ini_special():
  """
  Backup the special.ini configuration file
  Instance-> special.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+SPECIALINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+SPECIALINIBACKUP)
 def ini_testing():
  """
  Backup the testing.ini configuration file
  Instance-> testing.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+TESTINGINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+TESTINGINIBACKUP)
 def ini_VCN():
  """
  Backup the VCN.ini configuration file
  Instance-> VCN.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+VCNINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+VCNINIBACKUP)
 def ini_XYZObjects():
  """
  Backup the XYZObjects.ini configuration file
  Instance-> XYZObjects.ini
  """
  copyfile(PATH_INSTANCE+DIRCONFIG+XYZOBJECTSINI,
   PATH_INSTANCE+DIRBACKUP+DIRCONFIG+XYZOBJECTSINIBACKUP)

 def Z_Completed_Task():
  print("CLONE-> generic configuration backup: complete")

class Encoding:
 def file_system():
  return(ENCODING_SET)

class RuntimeNode:
 """
 RuntimeNode behavior is for initializing setup tasks
 """
 def assemble_structure():
  print(setup_text+"part 2")
  try:
   with open(PATH_INSTANCE+STRUCTURE) as f:
    for line in f:
     print(line)
   f.close()
  ## If the file doesn't exist, generate it
  except FileNotFoundError:
   Structure.assemble()

  print(setup_text+"part 3")
  print(setup_stop)

 def establish():
  print(setup_text+"part 1")
  chdir(CORE_DIR)
  print('Establishing the runtime on NODE: '+str(getcwd()))

 def set_environments():
  print("RUNTIME-> encoding is "+str(setup_encoding_runtime)+" for: "+str(z))
  print("SHELL-> encoding is "+str(setup_encoding_shell))
  print(setup_text+"part 1 2, and 3...")

class Structure:
 """
 Structure is a formatting object for the program structure.

 that allows you to run a pre-built routine to set an OUTPUT_STRUCTURE,
 as well as NOT_PATH steps that will exclude paths according to the format.

 The format is '-not -path' "$PATH" followed by a \
 You can build the structure in core_operations and make the interface
 available through that routine.
 In summary, this will generate a a program tree and make it available to
  the instance

 STRUCTURE.es can be read and verified upon startup elsewhere to ensure that
 the file structures aren't changing from one startup period to the next

 The file should be in the the [DATADIR]

 In this case, the default: /NOVASTORE/PROGRAMS/trine/DATA/
 """
 ## provide an output structure
 OUTPUT_STRUCTURE = PATH_INSTANCE+DIRDATA+OUTPUT_STRUCTURE
 ## provide any paths to exclude from the structure. Follow the convention.
 NOT_PATH = \
  '-not -path "'+PATH_INSTANCE+'__pycache__'+'"' \
  ' -not -path "'+PATH_INSTANCE+'trine-env/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'CONFIG/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'DATA/LOGS/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'DOCUMENTATION/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'FILTER/\|_\|'+ \
   'FILTER_MACHINES/MACHINE_DATA/vpn/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'FILTER/\|_\|'+ \
   'FILTER_MACHINES/MACHINES/RECOVERY/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'FILTER/\|_\|'+ \
   'FILTER_MACHINES/MACHINES/STARTING/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'FILTER/\|_\|'+ \
   'FILTER_MACHINES/MACHINES/EXPLORER_RIG/RIG_VM/Virtual_Hard_Disks/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'FILTER/\|_\|'+ \
   'FILTER_MACHINES/MACHINES/EXPLORER_RIG/RIG_VM/Virtual_Machines/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'FILTER/\|_\|'+ \
   'FILTER_MACHINES/MACHINES/EXPLORER_RIG/universe/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'IDENTITY/PHOTO/background/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'IDENTITY/PHOTO/media/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'LEARNING/RRR_RECOVERY/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'LEARNING/ZZZ_DEPRECATED/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'LEARNING/TOPICS/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'OPENPACKAGER/corehost/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'OPENPACKAGER/jupyter/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'RING/BACKUP/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'RING/DOCUMENTS/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'RING/ENTITY/FIGMENTS/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'RING/PROGRAMS/AIREP/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'RING/PROGRAMS/COLLECTIONS/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'RING/PROGRAMS/LINUX/*'+'"' \
  ' -not -path "'+PATH_INSTANCE+'RING/PROGRAMS/WINE/*'+'"'
 ## provide any types to include in the structure
 TYPE = '-type d'

 def assemble():
  # Generate this without personal data
  # This funciton can be run anywhere from a bash alias
  structure=' find '+PATH_INSTANCE+' '+Structure.TYPE+' '+\
   Structure.NOT_PATH+' | sort > '+Structure.OUTPUT_STRUCTURE
  ## execute the structure
  check_output(structure, shell=True)
  ## verify the structure to std output
  print("STRUCTURE ROUTINE-> was: "+str(structure))

###############################################################################
## System Programmign Language interfaces
################################################################################
## allowed C.ORE languages
class ALLOWED_LANGUAGES():
 """
 Provide any relevant programming language information here
 """
 def allowed_lang_BASH():
  """
  Allowed Programming Language: Bash
  """
  print(ALLOWED_LANG_BASH)

 def allowed_lang_PHP():
  """
  Allowed Programming Languge: PHP
  """
  print(ALLOWED_LANG_PHP)

 def allowed_lang_PYTHON():
  """
  Allowed Programming Language: Python
  """
  print(ALLOWED_LANG_PYTHON)

 def allowed_lang_RUBY():
  """
  Allowed Programming  Language: Ruby
  """
  print(ALLOWED_LANG_RUBY)

class get_GENERIC_INTERFACES:
 """
 Provide generic interfaces to the application. These are standardized
 scripts that are meant to be uniform descriptive files
 V2TEMPEST WATERFALL: *category*_*NAME_OF_FUNCTION*
 """
 ## ABOUT_ACCESS
 def information_ABOUT_ACCESS():
  """
  ABOUT_ACCESS.ds
  """
  print(PATH_INSTANCE+DIRDOCS+ABOUT_ACCESS+": ")
  try:
   with open(PATH_INSTANCE+DIRDOCS+ABOUT_ACCESS, oFo.read) as f:
    for line in f:
     print(line)
   f.close()
  except FileNotFoundError:
   print("Unable to locate "+ABOUT_ACCESS)

 ## ABOUT_ASSET
 def information_ABOUT_ASSET():
  """
  ABOUT_ASSET.ds
  """
  print(PATH_INSTANCE+DIRDOCS+ABOUT_ASSET+": ")
  try:
   with open(PATH_INSTANCE+DIRDOCS+ABOUT_ASSET, oFo.read) as f:
    for line in f:
     print(line)
   f.close()
  except FileNotFoundError:
   print("Unable to locate "+ABOUT_ASSET)

 ## ABOUT_DS
 def information_ABOUT_DS():
  """
  ABOUT_DS.ds
  """
  print(PATH_INSTANCE+DIRDOCS+ABOUT_DS+": ")
  try:
   with open(PATH_INSTANCE+DIRDOCS+ABOUT_DS, oFo.read) as f:
    for line in f:
     print(line)
   f.close()
  except FileNotFoundError:
   print("Unable to locate "+ABOUT_DS)

 ## ABOUT_ENTITY
 def information_ABOUT_ENTITY():
  """
  ABOUT_ENTITY.ds
  """
  print(PATH_INSTANCE+DIRDOCS+ABOUT_ENTITY+": ")
  try:
   with open(PATH_INSTANCE+DIRDOCS+ABOUT_ENTITY, oFo.read) as f:
    for line in f:
     print(line)
   f.close()
  except FileNotFoundError:
   print("Unable to locate "+ABOUT_ENTITY)

 ## ABOUT_IDENTITY
 def information_ABOUT_IDENTITY():
  """
  ABOUT_IDENTITY.ds
  """
  print(PATH_INSTANCE+DIRDOCS+ABOUT_IDENTITY+": ")
  try:
   with open(PATH_INSTANCE+DIRDOCS+ABOUT_IDENTITY, oFo.read) as f:
    for line in f:
     print(line)
   f.close()
  except FileNotFoundError:
   print("Unable to locate "+ABOUT_IDENTITY)

 ## ABOUT_RING
 def information_ABOUT_RING():
  """
  ABOUT_RING.ds
  """
  print(PATH_INSTANCE+DIRDOCS+ABOUT_RING+": ")
  try:
   with open(PATH_INSTANCE+DIRDOCS+ABOUT_RING, oFo.read) as f:
    for line in f:
     print(line)
   f.close()
  except FileNotFoundError:
   print("Unable to locate "+ABOUT_RING)

 ## ABOUT_SUPPORTING
 def information_ABOUT_SUPPORTING():
  """
  SUPPORTING.ds
  """
  print(PATH_INSTANCE+DIRDOCS+ABOUT_SUPPORTING+": ")
  try:
   with open(PATH_INSTANCE+DIRDOCS+ABOUT_SUPPORTING, oFo.read) as f:
    for line in f:
     print(line)
   f.close()
  except FileNotFoundError:
   print("Unable to locate "+ABOUT_SUPPORTING)

 ## CHANGELOG
 def information_CHANGELOG():
  """
  Read and Display the changelog.ds
  """
  print(PATH_INSTANCE+DIRDOCS+CHANGELOG+": ")
  try:
   with open(PATH_INSTANCE+DIRDOCS+CHANGELOG, oFo.read) as f:
    for line in f:
     print(line)
   f.close()
  except FileNotFoundError:
   print("Unable to locate "+CHANGELOG)

 ## CORE_PACKAGES
 def information_CORE_PACKAGES():
  """
  Get all additionally installed env-packages
  """
  print(PATH_INSTANCE+DIRDATA+PACKAGES+": ")
  try:
   with open(PATH_INSTANCE+DIRDATA+PACKAGES, oFo.read) as f:
    for line in f:
     print(line)
   f.close()
  except FileNotFoundError:
   print("Unable to locate "+PACKAGES)

 # README
 def information_README():
  """
  Read and Display the README.ds
  """
  print(PATH_INSTANCE+DIRDOCS+README+": ")
  try:
   with open(PATH_INSTANCE+DIRDOCS+README, oFo.read) as f:
    for line in f:
     print(line)
   f.close()
  except FileNotFoundError:
   print("Unable to locate "+README)

 ## USAGE
 def information_USAGE():
  """
  USAGE.ds
  """
  print(PATH_INSTANCE+DIRDOCS+USAGE+": ")
  try:
   with open(PATH_INSTANCE+DIRDOCS+USAGE, oFo.read) as f:
    for line in f:
     print(line)
   f.close()
  except FileNotFoundError:
   print("Unable to locate "+USAGE)

 ## INSTANCE_PERMISSIONS
 def licenses_INSTANCE_PERMISSIONS():
  """
  Return the LICENSE of the full program
  """
  print(PATH_INSTANCE+DIRLICENSES+LICENSE+": ")
  try:
   with open(PATH_INSTANCE+DIRLICENSES+LICENSE, oFo.read) as f:
    for line in f:
     print(line)
   f.close()
  except FileNotFoundError:
   print("Unable to locate "+LICENSE)

################################################################################
## Complex Operators
################################################################################
class MergeValues:
 """
 Provide an interface to merge phase instances from LOCAL->GLOBAL and similar
 """
 def and_backup_server_phase():
  with open(
   PATH_INSTANCE+DIRDATA+PHASE_SERVER, oFo.read_binary) as file_holder:
   with GZIP_OPEN(
    PATH_INSTANCE+"BACKUP/"+\
     TimeStamp.time_stamp_time()+"-"+\
     TimeStamp.date_stamp_day()+"-"+\
     PHASE_SERVER+".gz",
     oFo.write_binary) as file_merge_destination:
    copyfileobj(file_holder, file_merge_destination)

 def and_do_not_backup_server_phase():
  pass

class PullValues:
 """
 Provide an interface that can offer generic instances for pull-ready data
 formats
 """
 ## PARSE LIST
 def ENTITYSCRIPT_PARSE_LIST(agent):
  # FILTER the query
  PARSE_ENTITIES.parse_all_entities(agent=agent)

 ## ACTIVE
 def ENTITYSCRIPT_QUALIFIERS(agent):
  # FILTER the query
  FILTER_ENTITYSCRIPT_QUALIFIERS.pull_all_qualifiers(agent=agent)

 ## RING
 def ENTITYSCRIPT_QUALIFIERS_RING(agent):
  # FILTER the query
  FILTER_ENTITYSCRIPT_QUALIFIERS_RING.pull_all_qualifiers(agent=agent)

 ## ORE
 def ENTITYSCRIPT_QUALIFIERS_ORE(agent):
  # Remove the ESO with a slice
  TGD = agent[3:]
  FILTER_ENTITYSCRIPT_QUALIFIERS_ORE.pull_all_qualifiers(agent=TGD)

 ## OVERVIEW
 def ENTITYSCRIPT_OVERVIEW(agent):
  TGD = agent
  FILTER_ENTITYSCRIPT_OVERVIEW.pull_all_qualifiers(agent=TGD)

class ProvideValues:
 """
 Provide an interface that can offer generic instances to from the machine
 """
 routine_x = ""

class ReseedValues:
 """
 Provide an interface to reseed the values of various phases in the instance
 Note* - All reseed values are reestablishment routines, where the instance
 is torn down and then rebuilt with default values
 """
 def entity_ledger():
  """
  Reseed the entity_ledger
  """
  blank = ""
  datascript_id = "datascript_id = ['"
  title = "title = ['"
  package_type = "package_type = ['"
  package_id = "package_id = ['"
  DLIST = "DLIST = ['"

  #entity_summary_file.append(title+', '+DLIST+', '+datascript_id+',
  # '+package_type', '+package_id+'\n')
  AcceptedFileTypes = ['**/*.entity']
  file_input_mechanism = []
  entity_summary_list = []

  ## Always look for entities in the designated asset folder: Craw by pattern
  ## provided: Defaults to **/*.entity but can also be extended to others
  for file_accepted in AcceptedFileTypes:
   file_input_mechanism.extend(glob(ENTITY_ASSET_LOCATION+file_accepted,
    recursive=True))

  ## Sort the mechanism
  file_input_mechanism.sort()

  for EntityFile in file_input_mechanism:
   entity_summary_list.append(EntityFile)

  ## Test: Passing...
  print(entity_summary_list)

  ## Entity Snapshot
  entity_summary_list_clean = []
  for file in entity_summary_list:
   with open(file, oFo.read) as f:
    local_list = []
    for line in f:
     if datascript_id in line:
      line_needed = line
      cleaned_line = line_needed.replace(datascript_id,blank)
      formatted_line = cleaned_line.replace("']",blank)
      field_result = formatted_line
      local_list.append(str(field_result))
     elif DLIST in line:
      line_needed = line
      cleaned_line = line_needed.replace(DLIST,blank)
      formatted_line = cleaned_line.replace("']",blank)
      field_result = formatted_line
      local_list.append(str(field_result))
     elif title in line:
      line_needed = line
      cleaned_line = line_needed.replace(title,blank)
      formatted_line = cleaned_line.replace("']",blank)
      field_result = formatted_line
      local_list.append(str(field_result))
     elif package_type in line:
      line_needed = line
      cleaned_line = line_needed.replace(package_type,blank)
      formatted_line = cleaned_line.replace("']",blank)
      field_result = formatted_line
      local_list.append(str(field_result))
     elif package_id in line:
      line_needed = line
      cleaned_line = line_needed.replace(package_id,blank)
      formatted_line = cleaned_line.replace("']",blank)
      field_result = formatted_line
      local_list.append(str(field_result))
     else:
      pass

    ## Sort the local_list
    local_list.sort()
    entity_summary_list_clean.append(local_list)
   f.close()

   #print(entity_summary_list_clean)
   #for i in entity_summary_list_clean:
   # print(i)

  ## Sort the entity_summary_list_clean
  entity_summary_list_clean.sort()

  ## Always write to the local list
  with open(ENTITY_LEDGER_FILE, oFo.write) as f:
   for i in entity_summary_list_clean:
    f.write(str(i)+'\n')
  f.close()

 def figment():
  """
  Reseed the figment phase
  """
  try:
   with open(PATH_INSTANCE+DIRDATA+PHASE_FIGMENT, oFo.write_text) as f:
    f.write(FILE_TEXT_FIGMENT+Cure.terminate_line)
   f.close()
  except FileNotFoundError:
   print("The file"+str(PHASE_FIGMEN)+" does not exist.")

 def server():
  """
  Reseed the server phase
  """
  try:
   with open(PATH_INSTANCE+DIRDATA+PHASE_SERVER, oFo.write_text) as f:
    f.write(FILE_TEXT_SERVER+Cure.terminate_line)
   f.close()
  except FileNotFoundError:
   print("The file "+str(PHASE_SERVER)+" does not exist.")

################################################################################
## Constructs
################################################################################
class EntityScript:
 """
 Provide an abstraction for working with EntityScript related activities
 """
 def instance_read_from_active(agent):
  """
  Provide a generic active-entity reader with no filter
  """
  with open(PATH_INSTANCE+DIRDATA+ACTIVEES, oFo.read) as f:
   ## Provide values to the EQA bucket
   #BUCKET_EQA.append("-" * 79 +"\n")
   BUCKET_EQA.append("SIMPLE: PARSE - DATASCRIPT FILE: "+BUCKET_EQ[0]+"\n")
   #BUCKET_EQA.append("-" * 79 +"\n")
   for row in f:
    #print(row)
    BUCKET_EQA.append(row+"\n")
    #BUCKET_EQA.append("-" * 79 +"\n")
   ## only hook in the values in debug mode
   z = str(BUCKET_EQ[:])
   print("RAN WITH SEARCH SLUG: "+z)
  f.close()

 def instance_read_from_entities(agent):
  """
  Provide a generic active-entity reader with no filter
  """
  with open(ENTITY_LOCATION+SLUG_ENTITY_LEDGER) as f:
   ## Provide values to the PLE bucket
   #BUCKET_PLE.append("-" * 79 +"\n")
   BUCKET_PLE.append("SIMPLE: PARSE - ENTITIES FILE: "+BUCKET_EQ[0]+"\n")
   #BUCKET_PLE.append("-" * 79 +"\n")
   for row in f:
    #print(row)
    BUCKET_PLE.append(row+"\n")
    #BUCKET_PLE.append("-" * 79 +"\n")
   ## only hook in the values in debug mode
   z = str(BUCKET_EQ[:])
   print("RAN WITH SEARCH SLUG: "+z)
  f.close()

 def instance_read_from_ring(agent):
  """
  Provide generic data from an EntityScript instance
  """
  ## read the ring instance, and store the variable
  R = open(PATH_INSTANCE+DIRDATA+PHASE_ENTITYSCRIPT_RING, oFo.read)
  ENTITY = R.readlines()[1]
  R.close
  print("Entity Loaded from-> RING: "+str(ENTITY))
  with open(PATH_INSTANCE+DIRDATA+ACTIVEES, oFo.read) as f:
   #BUCKET_EQR.append("-" * 79 +"\n")
   BUCKET_EQR.append("SIMPLE: PARSE from RING position-> "+str(ENTITY)+": "+\
    BUCKET_EQ[0]+"\n")
   #BUCKET_EQR.append("-" * 79 +"\n")
   for row in f:
    #print(row)
    if agent in row:
     BUCKET_EQR.append(row+"\n")
     #BUCKET_EQR.append("-" * 79 +"\n")
     #print(a)
    else:
     pass
   z = str(BUCKET_EQ[:])
   print("RAN WITH SEARCH SLUG: "+z)
  f.close()

 #### not debugged yet
 def instance_read_from_ore(agent):
  """
  Provide generic data-matching lookup from an ORE Ledger instance
  """
  with open(PATH_INSTANCE+DIRDATA+PHASE_ENTITYSCRIPT_ORE, oFo.read) as f:
   #BUCKET_EQOL.append("-" * 79 +"\n")
   BUCKET_EQOL.append("SIMPLE: PARSE - ORE, MAIN BRANCH FILE: "+\
    QEQB[0]+"\n")
   #BUCKET_EQOL.append("-" * 79 +"\n")
   for row in f:
    print(row)
    if agent in row:
     BUCKET_EQOL.append(row+"\n")
     #BUCKET_EQOL.append("-" * 79 +"\n")
     #print(a)
    else:
     pass
   z = str(BUCKET_EQOL[:])
   print("RAN WITH SEARCH SLUG: "+z)
  f.close()

 def instance_return_ore():
  """
  return a populated LIST_PARSED_TRUE_SEARCH container with ORE values
  """
  with open(PATH_INSTANCE+DIRDATA+PHASE_ENTITYSCRIPT_ORE, oFo.read) as f:
   print("ORE-> available:")
   for row in f:
    LIST_PARSED_TRUE_SEARCH.append(row)
  f.close()

 def instance_return_active_event(event=None):
  """
  return a populated LIST_PARSED container with ORE values, while taking
  an event
  """
  with open(PATH_INSTANCE+DIRDATA+ACTIVEES, oFo.read) as f:
   print("ORE-> parsed list available:")
   for row in f:
    LIST_PARSED.append(row)
  f.close()

###############################################################################
## FILTER_*
###############################################################################
class FILTER_ENTITYSCRIPT_QUALIFIERS:
 def pull_all_qualifiers(agent):
  print("LOOKUP STARTED WITH: "+agent)
  if agent == '':
   print("Nothing was done because no input was received.")
   ## On a null-agent, eliminate the relevant buckets
   BUCKET_EQ.clear()
   BUCKET_EQA.clear()
   BUCKET_EQO.clear()
   BUCKET_EQOL.clear()
   BUCKET_EQR.clear()
  else:
   ## Provide a query-ready agent
   ## On non-empty FILTER_* query, append agent to EQ-> generic
   BUCKET_EQ.append(agent)

class FILTER_ENTITYSCRIPT_QUALIFIERS_RING:
 def pull_all_qualifiers(agent):
  print("LOOKUP STARTED WITH: "+agent)
  if agent == '':
   print("Nothing was done because no input was received.")
   ## On a null-agent, eliminate the relevant buckets
   BUCKET_EQ.clear()
   BUCKET_EQA.clear()
   BUCKET_EQO.clear()
   BUCKET_EQOL.clear()
   BUCKET_EQR.clear()
  else:
   ## Provide a query-ready agent
   ## On non-empty FILTER_* query, append agent to EQ-> generic
   BUCKET_EQ.append(agent)

## TESTED_BELOW LINE-> no
class FILTER_ENTITYSCRIPT_QUALIFIERS_ORE:
 def pull_all_qualifiers(agent):
  print("LOOKUP STARTED WITH: "+agent)
  if agent == '':
   print("Nothing was done because no input was received.")
   BUCKET_EQO.clear()
   BUCKET_EQOL.clear()
  else:
   BUCKET_EQO.append(agent)

class FILTER_ENTITYSCRIPT_QUALIFIERS_OVERVIEW:
 def pull_all_qualifiers(agent):
  print("LOOKUP STARTED WITH: "+agent)
  if agent == '':
   print("Nothing was done because no input was received.")
   BUCKET_EQO.clear()
   BUCKET_EQOL.clear()
  else:
   ## Here, the key-lookup is 'ESO'.
   ## All entity script is proceeded by the "True Key': In this case, ES
   ## So ES is the key-lookup prefix 'True Key', and ESO is the key-lookup
   ## with O being the "True Key" modifier def instance_return_ore():
   if TGD.startswith('ESO'):
    ## This will hold the stateful title so you can continue to do searches
    ## QUALIFIER_LOOKUP_SEED = QEKB[0]
    FILTER_ENTITYSCRIPT_QUALIFIERS.pull_all_qualifiers(agent)
   elif TGD.startswith('ESR'):
    FILTER_ENTITYSCRIPT_QUALIFIERS_RING.pull_all_qualifiers(agent)
   else:
    TGD = TGD[3:]
    LIST_TRUE_SEARCH.append(agent)
    BUCKET_EQQ.clear()
    BUCKET_EQQ.append(TGD)

###############################################################################
## PARSE_*
###############################################################################
class PARSE_ENTITIES:
 """
 Provide an entity interface to access and load various .entity values
 """
 def parse_all_entities(agent=""):
  print("Parsing Entities List...LOOKUP STARTED WITH: "+agent)
  if agent == '':
   print("Nothing was done because no input was received.")
   ## On a null-agent, eliminate the relevant buckets
   BUCKET_EQ.append(agent)
  else:
   ## Provide a query-ready agent
   ## On non-empty FILTER_* query, append agent to EQ-> generic
   BUCKET_EQ.append(agent)

 def parse_entity_instance(instance):
  """
  Load one individual entity
  """
  ## Allow for one instance lookup at a time, unless the state is extended
  ## into multiple simultaneous lookup states
  AcceptedFileTypes = ["**/*"+str(instance)]
  file_input_mechanism = []
  entity_summary_list = []

  try:
   ## Look for the specific instance provided by the glob
   for file_accepted in AcceptedFileTypes:
    file_input_mechanism.extend(glob(ENTITY_ASSET_LOCATION+file_accepted,
     recursive=True))

   ## Ifthe file_input_mechanism is populated, build the entity_summary_list
   for EntityFile in file_input_mechanism:
    entity_summary_list.append(EntityFile)

   ## Try to open the specific entity, and if it opens, write the data to the
   ## active phase
   try:
    with open(entity_summary_list[0]) as f:
     with open(PATH_INSTANCE+DIRDATA+ACTIVEES, oFo.write) as AE:
      for row in f:
       AE.write(row)
       ## provide a segmenting formatter
       #AE.write("-" * int(DEFAULT_CHAR_OUTPUT)+"\n")
     AE.close()
    f.close()
   ## If no instance is located with the passed parameters, nullify the ACTIVEES
   except FileNotFoundError:
    with open(PATH_INSTANCE+DIRDATA+ACTIVEES, oFo.write) as AE:
     AE.write("The file wasn't found.")
    AE.close()

  ## When the IndexError hits, it means the lookup came back as a failed
  ## state, or it wasn't found in the glob
  except IndexError:
   with open(PATH_INSTANCE+DIRDATA+ACTIVEES, oFo.write) as AE:
    AE.write("Couldn't locate the Datascript file.")
   AE.close()

## Always localize the ENTITY_LEDGER_FILE
ENTITY_LEDGER_FILE = PATH_INSTANCE+RINGENTITY_LOC+SLUG_ENTITY_LEDGER
ENTITY_ASSET_LOCATION = asset_folder+"ENTITY/"

################################################################################
## extras
################################################################################
class HTMLReadableFile:
 """
 Generatre an HTML summary of the environment and defined features
 """
 def summarize():
  SLUG_INDEX = DIROPENPACKAGER+OPENPACKAGERINDEX
  return(SLUG_INDEX)

################################################################################
## generics
################################################################################
## activate_commander
class activate_commander:
 """
 Activate whatever program you currently have in 'ENTITY_ACTIVE'
 This program now takes command line inputs
 """
 pass

## activate_note
class activate_note:
 """
 Use the program that's setup to record 'Basic Document Logic (BDL)
 'core_add.py' is the default
 """
 pass

## activate_space
class activate_space:
 """
 Various types of X,Y sized (in pixels) spacers
 """
 def expanded():
  """
  Move a space at point SeperatorP, to the maximum allowed length range
  """
  pass

 def dots():
  """
  Make a line of 'dots' at point SeperatorP, of the default size dots_Size
  """
  pass

 def solid_line():
  """
  Make a 'solid_line' at point SeperatorP, of the default size
  'solid_line_Size'
  """
  pass

 def set_length():
  """
  Global seperator size logic in pixels.
  """
  pass

 def block_standard():
  """
  Default positioning for the SeperatorP globally available function
  """
  pass

## AudioDevice
class AudioDevice:
 """
 Available audio devices, such as speakers, mics, headphones, or soundcards
 """
 def audio_available(INTERFACE):
  """
  Provide an available audio interface
  """
  pass

 def audio_qualities(ATTRIBUTES, CONFIRUATION, STATUS, QUALITY):
  """
  Provide the audio qualities
  """
  pass

## audio_interface
class audio_interface:
 """
 What audio interface is in use?
 If any is defined,  what is the current sound output variables
 The percentage should be out of 100, from 0 being the none, to 100 being
 the allowed maximum.
 """
 def audio_interface_in_use():
  """
  none or *, where * is the defined autio interface in use
  """
  pass

 def audio_volume_percentage():
  """
  What is the volume percentage out of 100?
  Recommended to start at 25% and work around from there.
  """
  pass

 def audio_current_channels():
  """
  What programmatic channels are currerntly receiving
  'audi_ output' from 'interface_in_use()'?
  """
  pass

## authenticated: YYYYYYY
class authenticated:
 """
 What 'operator' is authenticated?
 What is their 'status' on the system?
 Do login information tasks based off of dynamic inputs
 """
 def what_member():
  """
  Display brief details about what member is executing the current command
  """
  print("Syste-> Operator Details")
  getgid()
  getuid()
  print(getlogin())

 def member_state(self, logged_in, unauthenticated, Admin):
  """
  'logged in' will equal 0 for no, and 1 for  yes
  'unauthenticated' returns 1 when no privs are deteched
  'Admin' returns 1 when system privs are deteched
  """
  def __init__(self, logged_in, unauthenticated):
   self.logged_in = logged_in
   self.unauthenticated = unauthenticated
   self.Admin = Admin

  def member_quit():
   """
   Allow the logged in operator to quit the application
   If the Admin is being used, fall back to the system operator
   If loggin is desired, 'log_exit' == 1
   """
   pass

 def read_active_member_data():
  """
  Check if a member was logged in and currently using the system
  """
  with open(PATH_INSTANCE+DIRDATA+MEMBER_ACTIVE_HOLDER,
   oFo.read) as ACTIVE_MEMBER:
   for row in ACTIVE_MEMBER:
    z = row
    return(z)
  ## Close the reader
  ACTIVE_MEMBER.close()

## backup
class backup:
 """
 Backup your system or individual parts of C.ORE
 Toggle each feature on to 1, and off to 0
 """
 ## Define a preferred backup tool
 backup_format = 'rsync'
 def adult():
  """
  Defaul adult-entertianment routines
  """
  ADULT_ONE = 'ADULT_ONE'
  ADULT_ONE

 def clone():
  """
  Clone your system to a new system-location, using locic L1
  """
  to_S_NELINUXBOX = 'to_S_NELINUXBOX'
  to_S_NELINUXBOX

 def main():
  """
  Define your main system-level backup functions
  """
  NOVA = 'NOVA'
  NOVA
  FROM_X_TO_X1 = 'FROM_X_TO_X1'
  FROM_X_TO_X1
  FROM_X_TO_X2 = 'FROM_X_TO_X2'
  FROM_X_TO_X2

 def segment():
  """
  Define your main tree-level backup functions
  tree-level backups are specific nodes in the main system only
  """
  backup_etc = 'backup_etc'
  backup_etc
  backup_home = 'backup_home'
  backup_home
  backup_sbin = 'backup_sbin'
  backup_sbin
  backup_var = 'backup_var'
  backup_var
  backup_usr = 'backup_usr'
  backup_usr

 def special():
  """
  Define any behaviors to make new systems or perform new cases
  """
  MULTIPLY_NOVA = 'MULTIPLY_NOVA'
  MULTIPLY_NOVA

## Binary
class Binary:
 """
 Binary Interface container: Extend as needed to facilitate binary
 operations
 """
 class writer:
  pass

## Connected
class ConnectedNetwork:
 """
 Are you connected to a Network Interface of any sort?
 If yes, on what connection?
 What is the state of the web connection?
 Are you using a direct connection, or a Proxy?
 """
 def connected():
  """
  Define the Web Interface:
  For each Interface->
  1 if you're connected
  0 if not
  """
  pass

 def disconnect():
  """
  If YourInterface.connected == 1
   disconnect() == enabled
  else:
   fail
  """
  pass

 def iweb_interface():
  """
  What is the interface you're connected to the inter-web on?
  """
  pass

## cpu_cycles
class cpu_cycles():
 """
 Display the amount of cylces being performed by your cpu
 Types: NOW, THEN, PREDICTED
 """
 def current_ghz_scheduled():
  """
  GHZ scheduled on the processor
  """
  pass

 def cpu_total():
  """
  CPU cores in use
  """
  pass

 def cpu_by_core(core):
  """
  CPU GHZ scheduled by core
  """
  print(core)

 def cpu_max_ghz():
  """
  CPU max ghz available
  """
  pass

 def cpu_temp(temp):
  """
  CPU temp
  """
  pass

## DiskCapacityChecker
class DiskCapacityChecker:
 """
 Display the space read-out of disk X in unit Y, and other disk details
 """
 def inspect_disk():
  """
  Provide basic details of the disk
  """
  print('Inspecting '+name+' one moment')
  sleep(1)
  print('It looks like the byte order on your machine is')
  print(byteorder)
  sleep(1)
  print('Current Encoding: '+getfilesystemencoding()+' .'+' .'+' .')
  ## Disk check here...

## DiskCrypt
class DiskCrypt:
 """
 Encrypt a disk at root point X with Hashing construct Y
 """
 disk_point = "NONE SPECIFIED"
 construct = 'sha512'

## DiskState
class DiskState:
 """
 Show the status of the disk
 """
 def disk_evaluated(DISK):
  """
  Evaluate a valid disk/device
  """
  pass

 def disk_buffer_now(DISK, BUFFER):
  """
  Evaluate the buffer of of the disk now
  """
  pass

 def disk_read_now(DISK, READS):
  """
  Evaluate the amount of writes happeing to the specified disk now
  """
  pass

 def disk_write_now(DISK, WRITES):
  """
  Evaluate the amount of writes happeing to the specified disk now
  """
  pass

## DisplayBar
class DisplayBar:
 """
 'DisplayBar' receives embedded dynamic objects and methods in X, Y form
 """
 def bar_height_y():
  """
  'bar_height_y' is the Y->DisplayBar value in pixels
  """
  pass

 def bar_height_x():
  """
  'bar_height_x' is the X->DisplayBar value in pixels
  """
  pass

 def bar_color():
  """
  'bar_color' is the RGB meta-value of the 'DisplayBar'
  """
  pass

 def bar_position():
  """
  'DisplayBar' relative position from position 0,0 pixels
  0x,0y represents the top-left corner
  """
  pass

 def bar_locked():
  """
  'DisplayBar' logic determining if the bar can be moved
  """
  pass

 def bar_immutable():
  """
  'DisplayBar' logic determining if the bar can receive
  additional entries, or if it's effectively immutable
  """
  pass

## DisplayDevice
class DisplayDevice:
 """
 Available display devices, such as monitors, or extra visual screens
 """
 def display_available(DISPLAY):
  """
  Provide an available display
  """
  pass

 def display_qualities(ATTRIBUTES, CONFIRUATION, DISPLAY, RESOLUTION, STATUS):
  """
  Provide the display qualities
  """
  pass

## DisplayStaticImage
class DisplayStaticImage:
 """
 Display a static_image file-type from point X that is a file format Y
 """
 def play(static_image):
  """
  Play the static_image from point X of allowed type Y
  """
  print(static_image)

 def destroy(static_image):
  """
  Exit the static_image from point X, and destroy the association
  """
  print(static_image)

 def crop(static_image):
  """
  Crop the static_image from current_size, CS, to new_size, NS
  In pixels, (X, Y)
  """
  print(static_image)

## DisplayVideo
class DisplayVideo:
 """
 Play a video file from point X that is of file format allowed Y
 """
 def play(video):
  """
  Play the video
  """
  print(video)

 def stop(video):
  """
  Stop the video and destroy the link to point X
  """
  pass

 def trim(video):
  """
  Trim the video from point P1, to point P2, and save P1-P2 as
  file-format 'NewVideo'
  """
  pass

## gpu_cycles
class gpu_cycles():
 """
 Display the amount of gpu cycles and vram being performed utilized by your
 gpu. Also, give an overview of the GPU
 Types: NOW, THEN, PREDICTED
 """
 def current_vram_scheduled():
  """
  virutal ram scheduled on the processor
  """
  pass

 def gpu_fan_speed(revolutions_per_period):
  """
  gpu fan speed
  """
  pass

 def gpu_max_temp(temp):
  """
  gpu max temp
  """
  pass

 def gpu_temp(temp):
  """
  gpu temp
  """
  print(core)

 def vram_capactity(vram_state):
  """
  vram capactity
  """
  pass

 def vram_last(vram):
  """
  last vram state
  """
  pass

## GrabScreenPicture
class GrabScreenPicture:
 """
 Grab a photo-image of position X,Y from 0,0 default (upper left corner)
 of size X,Y, and clarity Z and save in point S
 and image_format_construct IF
 """
 def image_type():
  """
  Either full_screen, or section
  """
  pass

 def image_size():
  """
  If full_screen = 0, then set size X,Y in pixels
  """
  X = 0
  Y = 0

 def image_clarity():
  """
  Determine the clarify of your image capture in DPI
  """
  DPI = 200

 def image_save_point():
  """
  Set the image capture location
  """
  pass

 def image_format_construct():
  """
  What type of image will this be saved as?
  This should be set by the default 'allowed_image_formats' list
  """
  pass

## GrabScreenVideo
class GrabScreenVideo:
 """
 Grab video-image of position X,Y from 0,0, of size X,Y, and clarity Z and
 save at point S and audio construct A with video_format_construct VF
 """
 def video_type():
  """
  Either full_screen, or section
  """
  pass

 def video_size():
  """
  If full_screen = 0, then set size X,Y
  """
  X = 0
  Y = 0

 def video_clarity():
  """
  Determine the clarify of your video in DPI
  """
  DPI = 200

 def video_save_point():
  """
  Set the photo-grab location
  """
  pass

 def video_audio_construct():
  """
  set the audio output construct to use
  """
  pass

 def video_format_construct():
  """
  What type of video will this be saved as?
  """
  pass

## io_bucket
class io_bucket:
 """
 The 'io_bucket' holds the current member and defaults to 'system_member'
 """
 member_bucket = [system_member]

class LogMaker:
 """
 Create standardized Logs. Inputs include date/time
 'LogMaker' can be expanded to include any preset routines or program schedules
 """
 date = TimeStamp.time_stamp_time()
 time = TimeStamp.date_stamp_day()


## LockedValue
class LockedVault:
 """
 Optional module containing your system dependent keys (GPG by default)
 """
 def open_vault():
  """
  Enter your valut key
  """
  pass

 def add_to_vault():
  """
  Add a local or global key to your vault
  """
  is_global = 0
  is_local = 0

 def add_to_vault_asset():
  """
  Specify an asset to store within the vault, using key choice X
  """
  pass

## MachinePackages
class MachinePackages:
 """
 Provide a MachinePackages interface to the core
 Display the packages installed in C.ORE, as summarized by PIP
 """
 def generate_package_list():
  """
  use core_config to write each module that is turned on to the core_packages
  silo
  """
  pass

 def generate_program_package_list():
  """
  use pip to generate an updated package list and display it to the operator
  """
  pass

 def generate_system_packages():
  """
  Use the system to generate a custom package package list and update it
  to the core_packages.es instance
  """
  pass

 def inspect_system_packages():
  """
  Provide the PACKAGES information, which can be linked to any system-wide
  package overview
  """
  with open(PATH_INSTANCE+DIRDATA+PACKAGES) as f:
   for line in f:
    print(line)
  f.close()

## MachineTemperature
class MachineTemperature():
 """
 Display various machine-based temperatures
 """
 def gpu_temp():
  """
  Get the current GPU temp in Fahrenheit
  """
  pass

 def cpu_temp():
  """
  Get the current CPU temp in Fahrenheit
  """
  pass

 def additional_temps():
  """
  Define the 'TemperatureInterface' input and set
  default temperature unit standard (F, or C)
  """
  pass

## NetworkTraffic
class NetworkTraffic:
 """
 Define NetTraffic I/O logic
 Inspect your network traffic after x seconds, for behavior Bx
 """
 def net_traffic_down():
  """
  Current inbound data on the interface in MB
  """
  pass

 def net_traffic_up():
  """
  Current outbound data on the interface in MB
  """
  pass

 def define_interface():
  """
  Establish a new 'VirtualNetworkInterface'
  """
  pass

## NodeTreePath
class NodeTreePath:
 """
 Display a tree of a folder from path X
 """
 def starting_folder():
  """
  Define the root of the tree
  """
  pass

 def ignore_folder_types():
  """
  Define explicitly ignored sub-tree nodes from starting_folder()
  """
  pass

## PlayAudio
class PlayAudio:
 """
 Play audio file from point X that is a file of allowed format Y
 """
 def play(audio):
  """
  Play the audio
  """
  print(audio)

 def stop(audio):
  """
  Stop the audio and destroy the link to point x
  """

 def trim(audio):
  """
  Trim the audio from point P1, to point P2, and save P1-P2 as
  file-format 'NewAudio'
  """
  pass

## Power
class Power:
 """
 Power attributes
 """
 def power_used_now(POWER_UNITS):
  """
  Display the total power units being utilized now by your machine
  """
  pass

 def power_free_last(POWER_UNITS):
  """
  Display the total power units available as of the last reporeted power
  cycle
  """
  pass

 def power_max(POWER_UNITS):
  """
  Display the power unit maximum for your machine
  """
  pass

## quick_finder
class quick_finder:
 """
 Activate a new work space, with the default workspace being 'space_A'
 Set the available 'workspace_pool'
 """
 workspaces = 1
 max_workspaces = 1

## RunPackage
class RunPackage:
 """
 Runs an allowed package X, from location Y, in 'ENTITY_ACTIVE'
 """
 def run(package):
  print(package)
  ## logic here...

## ram_state
class ram_state():
 """
 Display the status of your ram function
 """
 def current_ram_available():
  """
  display the amount of ram you have available as of the last reported cycle
  """
  pass

 def ram_generation():
  """
  display the generation of the ram
  """

 def ram_max(unit_value):
  """
  max amount of ram available in the specified unit value
  """
  pass

## RoboticDevice
class RoboticDevice:
 """
 Available robotic devices, such as chatbots, machines, or other autonomous
 systems
 """
 def robot_available(ROBOTIC_INTERFACE):
  """
  Provide an available robotic interface
  """
  pass

 def robotic_qualities(ATTRIBUTES, CONFIRUATION, STATUS, SUBSYSTEM):
  """
  Provide the audio qualities
  """
  pass

## SERVERALPHA: server state
class SERVERALPHA:
 """
 SERVERALPHA is the base server that you get in Cognitive ORE.
 You're able to start it up in various ways and have it function as an I/O
 server for you while you use other instances to send or retrieve the hooked
 data.

 It has one common channel and allows for process siloing and multiple
 instances in a threaded environment.
 """
 SERVERSELF_PID_LIST = []
 SERVERSELF = Popen([PATH_FULL_CORE+SH_SERVERUP])
 SERVERSELF_PID_LIST.append(SERVERSELF.pid)

 class SERVER:
  def MAKE(SEVERSELF):
   ## Load the server
   print(SERVERALPHA.SERVERSELF_PID_LIST[0])
   print("Doing: Subprocessing for core_server.py...")

  def shutdown_routine(SERVERSELF):
   #def get_PROCESS_BY_NAME(process_name):
   #    return int(check_output(["pidof","-s",process_name]))
   kill(SERVERALPHA.SERVERSELF_PID_LIST[0], SIGTERM)
   SERVERALPHA.SERVERSELF_PID_LIST.clear()
   SERVERALPHA.SERVERSELF_PID_LIST.append(SERVERSELF.pid)

 ## Create a second subprocess channel
 def DECLUTTER_REQUESTER():
  SERVERALPHA.SERVER.shutdown_routine(SERVERALPHA.SERVERSELF)

 ## Create a second subprocess channel
 def NEW_MAKE_REQUESTER():
  SERVERALPHA.DECLUTTER_REQUESTER()
  SERVERALPHA.SERVER.MAKE(SERVERALPHA.SERVERSELF)

## Storage
class Storage:
 """
 Define storage-related objects
 """
 def evaluate_storage_object(OBJECT):
  """
  evaluate a storage object
  """
  pass

 def inodes_available(OBJECT):
  """
  inodes available on the current storage device
  """
  pass

 def storage_capacity_available(AVAILABLE_STORAGE_UNITS, OBJECT):
  """
  The amount of storage units available in a given unit value
  """
  pass

 def storage_capacity_used(AVAILABLE_STORAGE_UNITS, OBJECT):
  """
  The amount of storage units used in a given unit value
  """
  pass

 def storage_devices_discovered(INTERFACE):
  """
  The total storage objects discovered on a provided interface
  """
  pass

## utility_functions
class utility_functions:
 """
 Commands to interact with stateful or preset configurations
 """
 def program_killer():
  """
  Kill the program by passing 'program_killer(program_to_kill)'
  ...
  """
  pass

## VirtualizeSystem
class VirtualizeSystem:
 """
 virtualize a system from root-point X, in folder safe-root-point X1
 'VirtualizeSystem' gives you methods to operate the machine
 The machine operates in safe-root-point methodology
 This methodology doesn't require a windowing system to function

 This is done by defining and  then binding the main C.ORE directory
 to specified virtual directories for each task and routine

 VE (Virtual Enhancements) NAT I/O Device Member ($MEMBER) Overview:
 Method 1 setup-file contents:
 virtualize: ~/.$ROOT_PATH > to /home/$MEMBER/.CORE/RING/IPDVS/
 as: READ-ONLY
 with-member: system_member
 credentials: git
 """
 def initialize():
  """
  Load the 'VirtualSystem' from root-point X at time Y
  """
  pass

 def new_system_root():
  """
  The location_name of the new safe-root-point system
  """
  location_name = None

## WordDictionary
class WordDictionary:
 """
 Display all known words and definitions present in X language
 X is English by default
 """
 def inspect_words():
  """
  UTF-8 representation of the word, in language X
  """
  pass

###############################################################################
## Added Interfaces
###############################################################################
class ENTITY_COUNTER:
 """
 Provide an interface to ENTITY-type instances
 """
 def ENTITIES():
  x = Ring.ENTITY.tally_entities()
  return(x)
 def PHASE_SERVER():
  x = Ring.ENTITY.tally_phase_server()
  return(x)
 def PHASE_FIGMENT():
  x = Ring.ENTITY.tally_phase_figments()
  return(x)
 def SHOW_SIZE():
  x = Ring.ENTITY.size()
  return(x)

################################################################################
## Temporary Instances
################################################################################
class Temporary_Instance:
 """
 A temporary instance is a location that's meant to be stored for a short period
 of time in a non-permanent way. These locations can be modded for a variety
 of purposes, such as archiving, temporary message ques, and more.
 """
 def BUILT_TEMP_LOCATION(INSTANCE, KEY):
  """
  Provide a temp instance build runner
  The phase takes an instance, and a key. The key modifies the name
   of the temp instance to tag it so it can be uniquely globbed later
  """
  dir(INSTANCE)
  print("BUILT-> Temporary Node: "+str(INSTANCE))
  print("Temporary Node-> Name: "+str(INSTANCE.name)+str(KEY))
  BUCKET_TEMP_NODE.append(INSTANCE)

 def BUILD_TEMP_INDEX():
  """
  Build an index of available temporary instances by key definition

  Make a runner that takes every NODE in the temp location
  that starts with the NODE key, and add it to the temp NODE_BUCKET
  for file in /tmp/*
  """
  pass

 def destroy_temp_instance(NODE):
  """
  Destroy an active, yet temporary instance
  core_operations.BUCKET_TEMP_NODE.append(
   "/tmp/tmpa15uyk_i_XHNHSOPH")
  """
  if NODE in BUCKET_TEMP_NODE:
   print("TEMP NODE LOCATED-> Set for destruction...")
   remove(NODE)
   print("Temporary node: "+str(NODE)+" was destroyed...")
  else:
   print("TEMP NODE: "+str(NODE)+" was not located in the TEMP BUCKET")

################################################################################
## Archiving
################################################################################
## Archive Handling: gzip, tar, zip
class ArchiveHandler:
 """
 Control various types of archives
 """
 def temporary_instance(single=0, TEMP=1):
  """
  Build and operate 'on the fly' archive extractions
  TEMP=1 is the default, which will only extract the archives to a
  designated directory in /tmp. You can overload the function,
  and set temp to 0, and it will store the archive in DATA/EXTRACTIONS/

  To post a single temp message que:
  core_operations.ArchiveHandler.temporary_instance(single=1, TEMP=1)

  Temp instances can be initialized and destroyed on the fly, as needed
  """
  ## Provide a generic temporary instance that takes
  ## If a single instance is presented, handle it with a temporary file
  ## *note, single instances are not directories or complex nodes.
  ## Those are always single=0
  ## Rebuild the BUCKET_ext_DO
  BUCKET_ext_DO.clear()
  if single==1:
   NAMED_TEMP_FILE = NamedTemporaryFile(delete=False)
   WRITE_TO_TEMP_INSTANCE = input("Message Que, Temp: ").encode()
   ## Add the temp_NODE_KEY
   NAMED_TEMP_FILE.write(WRITE_TO_TEMP_INSTANCE)
   ## rename the file with the node key
   rename(str(NAMED_TEMP_FILE.name), str(NAMED_TEMP_FILE.name)+temp_NODE_KEY)
   ## Always destroy temp instances
   Temporary_Instance.BUILT_TEMP_LOCATION(
    INSTANCE=NAMED_TEMP_FILE, KEY=temp_NODE_KEY)
  else:
   ## Always get the file location
   FILE_TO_OPEN = input("What file would you like to open? ")
   open_temp_FILE_path = Path(FILE_TO_OPEN)
   ## Build an extension verification-slug
   BUCKET_ext_DO.append(open_temp_FILE_path.name)

 def backup_entities():
  archive_name_ENTITY = path.expanduser(path.join(
   PATH_INSTANCE+SLUGARCHIVE, NAMEARCHIVE))
  archive_root_ENTITY = path.expanduser(path.join(
   LOCATIONSOURCE, DIRLOCATION))
  make_archive(archive_name_ENTITY, ARCHIVETYPE, archive_root_ENTITY)
  print("ARCHIVE-> .entity created: "+str(archive_root_ENTITY)+\
   str(archive_name_ENTITY)+str(ARCHIVETYPE))

 def extract_node_gz():
  ## .gz
  if BUCKET_ext_DO[0].lower().endswith('.gz'):
   """
   Unpack a gzip file into a temp directory
   """
   print("Extraction-> .gz: Starting file extraction...")
   sleep(1)
   try:
    with GZIP_OPEN(FILE_TO_OPEN, oFo.read_binary) as TEMP_FILE:
     contents_gzip = TEMP_FILE.read()
     RETURN_TO_DIRECTORY = getcwd()
     print("Pinning return directory to: "+str(RETURN_TO_DIRECTORY))
     ## If the temp flag is set, only extract to the /tmp/* archive
     if TEMP==1:
      print(TEMP_FILE)
      ## Direct write-setter
      Curred_file_name = temp_FILE.name+str(temp_DIR_NAME)
      BUILT_TEMP_INSTANCE(INSTANCE=TEMP_FILE,
       KEY=temp_NODE_KEY)
      ## Populate the BUCKET_TEMP_NODE to allow for dynamic termination
      BUCKET_TEMP_NODE.append(Curred_file_name)
     ## If the temp flag is overloaded, extract to DIRDATA
     else:
      print(TEMP_FILE)
      print("Navigating to the temporary directory: "+str(temp_PATH))
      chdir(temp_PATH)
      ## If the storage is persistent, set the file name back to
      ##  BUCKET_ext_DO[0]
      ## Extracting to regular file
      Curred_file_name = BUCKET_ext_DO[0][:-3]
      BUILT_TEMP_INSTANCE(INSTANCE=NAMED_TEMP_FILE,
       KEY="")

     with open(Curred_file_name, oFo.write_binary) as NEW_FILE:
      ## Write the bytes
      NEW_FILE.write(contents_gzip)
     NEW_FILE.close()
     ## For automatic deleting of the temp directory
     # shutil.rmtree('temp1', ignore_errors=True)
     print("Done...")

    ## Close the temp file automatically
    TEMP_FILE.close()

   except FileNotFoundError as e:
    #if "ReadError" in str(e):
    # print("File Extraction Error-> The .gz destination couldn't be read...")
    #else:
    print("File Extraction Error-> No .gz destination was found...")
  else:
   print("Provide a valid .gz location to extract")

 def extract_tar_node():
  ## .tar
  if BUCKET_ext_DO[0].lower().endswith('.tar'):
   """
   Unpack a tar file into a temp directory
   """
   print("Extraction-> .tar: Starting file extraction...")
   sleep(1)
   try:
    TEMP_FILE = OPENTARFILE(FILE_TO_OPEN, oFo.read)
    RETURN_TO_DIRECTORY = getcwd()
    print("Pinning return directory to: "+str(RETURN_TO_DIRECTORY))
    ## If the temp flag is set, only extract to the /tmp/* archive
    if TEMP==1:
     ## Direct write-setter
     Curred_file_name = temp_FILE.name+str(temp_DIR_NAME)
     ## Populate the BUCKET_TEMP_NODE to allow for dynamic termination
     BUCKET_TEMP_NODE.append(Curred_file_name)
     ## If the temp flag is overloaded, extract to DIRDATA
     BUILT_TEMP_INSTANCE(INSTANCE=TEMP_FILE,
      KEY=temp_NODE_KEY)
    else:
     print("Navigating to the temporary directory: "+str(temp_PATH))
     chdir(temp_PATH)
     ## If the storage is persistent, set the file name back to BUCKET_ext_DO[0]
     ## Extracting to regular file
     Curred_file_name = BUCKET_ext_DO[0][:-4]
     BUILT_TEMP_INSTANCE(INSTANCE=NAMED_TEMP_FILE,
      KEY="")

    ## We're already in the right location, so do a raw unzip
    TEMP_FILE.extractall()
    ## For a nested/custom unpack
    TEMP_FILE.extractall(Curred_file_name)
    chdir(RETURN_TO_DIRECTORY)
    ## For automatic deleting of the temp directory
    # shutil.rmtree('temp1', ignore_errors=True)
    print("Done...")

    ## Close the temp file automatically
    TEMP_FILE.close()

   except FileNotFoundError as e:
    #if "ReadError" in str(e):
    # print("File Extraction Error-> The .tar destination couldn't be read...")
    #else:
    print("File Extraction Error-> No .tar destination was found...")
  else:
   print("Provide a valid .tar location to extract")

 def extract_zip_node():
  ## .zip
  if ext_DO.lower().endswith('.zip'):
   """
   Unpack a .zip file into a temp directory
   """
   print("Extraction-> .zip: Starting file extraction...")
   sleep(1)
   print("Unpacking a zip archive into a temp directory.")
   with ZipFile(open_temp_FILE, oFo.read) as TEMP_FILE:
    print("Instance Unpacked: "+str(temp_FILE.name))
    chdir(temp_PATH)
    TEMP_FILE.extractall(temp_DIR_NAME)
    sleep(1)
    ## For automatic deleting of the temp directory
    #shutil.rmtree('temp1', ignore_errors=True)
    print("Done...")

   ## Close the temp file automatically
   TEMP_FILE.close()

  else:
   print("Provide a valid .zip location to extract")

###############################################
# Dugout - OOP to System Interface Commands
###############################################
class dugout:
 """
 dugout modules allow you to extend your system and have it function with
 extended os-level environment extensions. These are either bash scripts or
 written in shell. The environment scripts are almost 5,000 lines long, so there
 there are a lot of them! There are two default construct available - NODE,
 and file. NODES can be files, but files do not default to nodes.
 """
 def dugout(NODE):
  """
  dugout_directory is the root path of your dugout directory. This has been
  refactored and now dugout exists in the virtual operating-system file:
  'NOVASTORE'
  """
  node = "/NOVASTORE/MAINTAINME/dugout/"

 def ALL_static_variables(file):
  """
  ALL_static_variables.dugout is a place to define all the NODE-level endpoints
  that are available within your environment
  """
  file = ALL_static_variables.dugout

 def archive(NODE):
  """
  archive/ allows you to build archive rules for use with programs such as
  rsync or scp
  """
  node = "archive/"

 def bash_display(file):
  """
  bash_display.dugout allows you to setup bash display output and environment
  perameters for generic bash-environments
  """
  file = "bash_display.dugout"

 def bash_display_root(file):
  """
  bash_display_root.dugout allows you to setup bash display output and
  environment perameters for root-level instances
  """
  file = "bash_display_root.dugout"

 def bash_display_standard(file):
  """
  bash_display_root.dugout allows you to setup bash display output and
  environment perameters for standard-level instances
  """
  file = "bash_display_root.dugout"

 def connection_mainframe(file):
  """
  connection_mainframe is a controller to do setup and perform backups from
  critical systems over the network. This includes git access and ssh instance
  hooking
  """
  file = 'connection_mainframe.dugout'

 def core_manifest(file):
  """
  core_manifest allows you to create a setup and run environment for C.ORE
  """
  file = 'core_manifest.dugout'

 def cron(file):
  """
  cron allows you to create a setup and runtime-environment for linked
  CRON scripts. You can abstract root-level CRON systems here
  """
  file = 'cron.dugout'

 def disk_id(file):
  """
  disk_id allows you to define setup DISK_ID_SLUG locations by storage-related
  access definitions
  """
  file = 'disk_id.dugout'

 def disk_ops_id(file):
  """
  disk_ops_id allows you to create and setupt disk-storage related tasks, such
  as backups and branch management with certain rules/runtime methods predefined
  """
  file = 'disk_ops_id.dugout'

 def editor(file):
  """
  editor allows you to create a standardized editor for your system, or branch
  sub-system editors according to a defined/enforced format
  """
  file = 'editor.dugout'

 def highlight_keyword(file):
  """
  highilght_keyword.dugout allows you to link in linters, or highlighting
  sub-systems to perform on the fly operations on text-strings that are active
  within the instance.
  """
  file = "highlight_keyword.dugout"

 def linker(NODE):
  """
  A linker is a middle-layer call script that can be used to abstract away
  the direct call location of a related runtime-task. This can be helpful in
  masking program behavior.
  """
  node = "linker/"

 def msdos_ops(file):
  """
  msdos_ops allows you to abstract MSDOS commands into your default terminal
  output program and use that emulator as if it were running MSDOS
  """
  file = 'msdos_ops.dugout'

 def navigation_methods(file):
  """
  Shortcode hooks that allow you to communicate or access complex-locations
  with a shorter interface command
  """
  file = "navigation_methods.dugout"

 def network_ops_id(file):
  """
  network_ops_id allows you to create baseline network or inter-web functions
  using a  variety of network interface types such as ssh
  """
  file = 'network_ops_id.dugout'

 def operator_id(file):
  """
  operator_id allows you to setup a variety of default members and groups that
  can then be applied as Access Control System defaults for various activity
  """
  file = 'operator_id.dugout'
  operator = 'DEFAULT'

 def pretty_things(file):
  """
  pretty_things allows you to enhance the standard output of various types of
  return objects that occur while operating your system. This can help enhance
  the visual layout of those processes.
  """
  file = 'pretty_things.dugout'

 def program_enhance_bash(file):
  """
  program_enhance_bash is used to enhance the customization-level of default
  bash installations and give the operator a much deeper set of interface
  activity options.
  """
  file = 'program_enhance_bash.dugout'

 def program_enhance(file):
  """
  program_enhance is used to export various standardized environment variables
  into the environment that enchance system level programs. Some examples is
  adding functionalities to GEMs in Ruby, or offering extended Jupyter notebook
  modules at runtime
  """
  file = 'program_enhance.dugout'

 def program_env_wm(file):
  """
  program_env_wm sets traditional proxy ruotines that can be made available to
  the operator with short-commands to abstract away complex behavior
  """
  file = 'program_env_wm.dugout'

 def program_special_env(file):
  """
  program_special_env sets system level proxy-routines and enforces them for
  specified program types
  """
  file = 'program_special_env.dugout'

 def run_program(file):
  """
  run_program allows you to establish run-time program runners. These runners
  can be accessed directly, but they're meant to be invoked by the
  system_blueprints modules directly.
  """
  file = 'run_program.dugout'

 def assignProxy():
  """
  Not defined for security reasons
  """
  pass

 def clrProxy():
  """
  Not defined for security reasons
  """
  pass

 def vtwoProxy():
  """
  Not defined for security reasons
  """
  pass

 def shortcut_id(file):
  """
  shortcut_id allows you to setup export variables as shortcode that are meant
  to be run by the operator according to a comfortable convention of their
  choice. You can create summarized or aggregated tasks according to a short
  code list
  """
  file = 'shortcut_id.dugout'

 def system_blueprints(file):
  """
  Create system blueprints that can be used to defined linked member/group
  logic into specific runtime events
  """
  file = "system_blueprints.dugout"

 def system_paths(file):
  """
  Define system paths and make them available to the environment
  """
  file = "system_paths.dugout"

 def version_id(file):
  """
  version_id allows you to create and maintain a unified_id for your system
  """
  file = 'version_id.dugout'

 def vpn_id(file):
  """
  vpn_id allows you to setup and maintain VPN or Access point nodes. This is
  recommended for any government or corporate computer
  """
  file = 'vpn_id.dugout'

###############################################################################
## AIR: ACCESS, IDENTITY, RING
###############################################################################
## Access
class Access:
 """
 Virtual Content Network definition files: Set MASTERLIST and STRUCTURE
 """
 ## Localize the Access path by overriding the path variable
 path = DIRACCESS
 ## MASTERLIST
 def MASTERLIST():
  """
  'MASTERLIST' contains the immutable defition of what system member is able
  to modify the IPDVC
  """
  vcnkey = VCNKEY_ALLOWED
  print("VCNKEY-> Able to modify: "+str(vcnkey))
  return(vcnkey)

 def STRUCTURE():
  """
  'STRUCTURE' contains the immutable definition of what folder structures are
  allowed in the IDENTITY section
  """
  vcnkey = VCNKEY_STRUCTURE
  print("VCNKY-> Able to modify: "+str(vcnkey))
  ## Build associated term-states
  termid = ctermid()
  cwd = getcwd()
  ## Overview: Fake output by default
  print('Your currently in the '+termid+' terminal and in the'+cwd)
  print('Would you like to proceed?')
  print('Welp, nothing to do, goodbye...')

## Identity
class Identity:
 """
 Local-only files. Identity-dependent or defined file types only
 """
 ## Localize the Identity path by overriding the path variable
 path = DIRIDENTITY
 def PHOTO(entity):
  """
  All 'PHOTO' files will adhere to the photo.entity/ds definitions
  """
  entity = 'photo.ds'

 def background(sub_entity):
  """
  'background' files are images of a predetermined type
  These images may be set to the system default image
  """
  sub_entity = 'background.ds'

 def badges(sub_entity):
  """
  'badges' are files that represent or signify specific baseline access rules
  """
  sub_entity = 'badges.ds'

 def media(sub_entity):
  """
  'media' are files that you're sharing with others over the network or on
  a local system
  """
  sub_entity = 'media.ds'

 def operator(sub_entity):
  """
  'operator' images are those that are displayed while
  the system member navigates public systems
  """
  sub_entity = 'operator.ds'

 def TAGLINE(entity):
  """
  Define TAGLINE properties (Public when on a network)
  TAGLINE_OPERATOR.entity/ds relationship files
  should be altered through the defined way
  and are immutable besides through the standard editing function
  """
  entity = 'TAGLINE_OPERATOR.ds'

 def OPERATOR_RULES(entity):
  """
  Define a 'secret level' definition and baseline operator rules for the
  system OPERATOR_RULES.entity/ds relationship files. These should not be
  altered during the runtime of the machine and are immutable by default
  """
  entity = 'OPERATOR_RULES.ds'

## Ring
class Ring:
 """
 Activated entity-instances: entity-specific operations that can be used inside
 of the IPDVC
 """
 class CONTAINER:
  """
  Provide a generic runtime container
  """
  def instace_contained():
   """
   You can add a single-purpose sub-system here
   """
   pass

 class ENTITY:
  """
  Entity files: These comprise commonly-linked data-types using .ds/.entity
  relationships, and alias related masks.

  These also comprise data you've built or collected on your member machine.
  Built or "collected" data can be generated by you and your activity, or by
  someone within the community and shared to you through an .entity

  Some .ds files are able to be displayed publicly, others contained data that
  would require a license to repost outside of your own machine.

  All generic .entity/.ds relationships are defined here
  """
  def COLLECTIONS(sub_entity):
   """
   Any open-source program that can be used as a standalone library or as a
   functional linked-library within your program. A collection may be something
   that you pulled from a git repo
   """
   entity_sub = 'COLLECTIONS.ds'

  def DOCUMENTS():
   """
   Any .ds-compliant raw format that included allowed instances
   """
   entity_sub = 'DOCUMENTS.ds'

  def LINUX(sub_entity):
   """
   Any Linux-based open-source program that can either be compiled into, or
   exist within a container that links to the instance
   """
   entity_sub = 'LINUX.ds'

  def tally_entities():
   """
   Read the amount  of Entities in the ENTITY_LEDGER.ds file
   """
   i_local = -CReal.yes
   with open(DIRENTITIES+ENTITY_LEDGER, oFo.read) as local:
    for line in local:
     i_local =+ CReal.yes
   local.close()
   return(str(i_local))

  def tally_phase_figments():
   """
   Read the amount of Figments in the FIGMENT_LEDGER.ds file
   """
   phase_lines = -CReal.yes
   with open(DIRFIGMENT+FIGMENT_LEDGER, oFo.read) as c:
    for line in c:
     phase_lines =+ 1
   c.close()
   return(str(phase_lines))

  def tally_phase_server():
   """
   Read the amount  of 'streams' in the PHASE_SERVER.es instance
   """
   ## Start at -1 and weight out the header count
   phase_lines = -CReal.yes
   with open(PATH_INSTANCE+DIRDATA+PHASE_SERVER, oFo.read_text) as c:
    for line in c:
     phase_lines += CReal.yes
   c.close()
   return(str(phase_lines))

  def size():
   """
   Read the size of your 'ENTITY_ARCHIVE.tar.gz'
   """
   ## shift to kilobytes >> 10
   ## shift to megabytes >> 20
   ## shift to gigabytes >> 30
   try:
    return(Path(PATH_INSTANCE+DIRBACKUP+\
     ARCHIVE_ENTITY_GZ).stat().st_size >> 20, ARCHIVE_BYTE_VALUE)
   except FileNotFoundError:
    return("No .gz archive")

    def PROGRAMS(entity):
     """
     All programs that run on the machine will exist here.
     Programs under development do not belong here

     You can define what types of programs can run,
     along with various extensions and programming languages
     """
     entity = 'PROGRAMS.ds'

  def WINE(sub_entity):
   """
   Any external open-source or closed-source program that runs through a
   Windows environment extension setter, such as WINE.
   WINE programs can be sandboxed if provided to the WINE.ds manifest.
   """
   entity_sub = 'WINE.ds'

#############################################
# CORE SETTINGS
#############################################
## CORE
class CORE:
 """
 CORE (Cognitive Operations Resource Enclave) is an extension interface
 for storing overloaded system values that helps an operator customize a system
 through a masked-set of user-generated values. C.ORE should be thought of only
 as an abstraction layer - and if you wish to customize an interface, it should
 be done through an overloaded CORE abstraction.

 All pass routines are not enabled by default, but the operator can easily
 add the correct definition into the module to enable the feature-set
 """
 def ACCESS():
  """
  Access various internal content-types within CORE
  """
  pass

 def CONFIG():
  """
  Provide system configurations
  """
  pass

 def DATA():
  """
  Store system data, and provide data-dependent module extension behavior
  """
  pass

 def DOCUMENTATION():
  """
  Provide for user-access-ready documentation
  """
  path_documentation = DIRDOCUMENTATION

  ## Base language features
  about_entity_ds = 'ABOUT_ENTITY.ds'
  about_entity_entity = 'ABOUT_ENTITY.entity'
  changelog = 'changelog'

  ## Allowed Add-on languages
  PHP = 'PHP/'
  PYTHON = 'PYTHON/'
  RUBY = 'RUBY/'

  SLUG_ENTITY_DS = path_documentation+about_entity_ds
  SLUG_ENTITY_ENTITY = path_documentation+about_entity_entity
  SLUG_CHANGELOG = path_documentation+changelog

  SLUG_DOCUMENTATION_PHP = path_documentation+PHP
  SLUG_DOCUMENTATION_PYTHON = path_documentatino+PYTHON
  SLUG_DOCUMENTATION_RUBY = path_documentation+RUBY

 class FILTER:
  """
  FILTER automatically takes a setup file and creates a dispensable retrieval
  environment that adheres to specific filtered attributes
  """
  def file_tree():
   """
   Define the dispensable environment tree for a filter
   """
   MACHINES = '|_|FILTER_MACHINES'
   COMMUNICATION_RULES = '|_|FILTER_COMMUNICATION_RULES'
   ADULT_CONTENT_DECIDER = '|_|FILTER_ADULT_CONTENT_DECIDER'

  def ADULT_CONTENT_DECIDER():
   """
   Define a Adult Content Policy (ACP) according to the operator. This will
   stores the definitions of a custom ACP in the FILTER_ADULT_CONTENT.RING file
   A ACP can be custom generated, or downloaded via the community definition
   database (CDD). You may also use an AI generated network routine to
   completely filter out adult content if you're within a protected facility or
   corporate network
   """
   pass

  def COMMUNICATION_RULES():
   """
   Define rules for interacting with the inter-webs and other P2P networks
   Stores the definitions in the FILTER_COMMUNICATION_RULES.RING file.
   CRs can be custom generated, or downloaded via the community definition
   database (CDD). You may also use AI generated network routines to completely
   control your communication protocols. This is recommended for government
   and corporate scenarios.
   """
   pass

  def MACHINES():
   """
   Define the functionality of your dispensable machine. A dispensable machine
   will generate on each instance creation, and will completely terminate on
   each shutdown protocol. The machines themselves are never the same twice,
   and data derived from the machine should be stored according to the IPDVC
   data-storage protocol.

   FROM_STOCK_0: Anything filtered into a FALSE category. All this content will
    be filtered to read-only
   FROM_STOCK_1: Anything filtered into a TRUE category. Executable files will
    need super-user privs to execute
   MACHINE_DATA: Anything you're able to use within the dispensable machine
    i.e: vpn credentials, cloned environment variables, or mirrored operator
    credentials, such as git
   MACHINES: Public machine folder formats. These are made available to both
    Public and Archived format that can be sent to the "TO_SFTP". TO_SFTP stores
    all files you're sending over a network to a SFTP catch-all system
   MULTI_PIPELINE: Any multi-paradigm database logic or storage slugs that are
    meant to be mirrored into a perma-storage scheme elsewhere in the instance
   """
   FROM_STOCK_0 = 'FROM_STOCK_0'
   FROM_STOCK_1 = 'FROM_STOCK_1'
   MACHINE_DATA = 'MACHINE_DATA'
   MACHINES = 'MACHINES'
   TO_SFTP = 'TO_SFTP'
   MULTI_PIPELINE = 'MULTI_PIPELINE'

  def EXPLORER_RIG():
   """
   A public virtual explorer rig to navigate the network with.
   This is done with a default dispensabl container and this should default
   to running through a proxy or vpn in corporate or government scenarios
   """
   pass

  def attributes():
   """
   Default EXPLORER_RIG functional folder system. You can add attributes to the
   rig to make it more robust - Be careful what is added.
   """
   pass

  def block():
   """
   Define network restrictions, allowed-lists, or disallowed-lists here
   Also lists are stored in the NETWORK_ACESS.RING folder
   """
   pass

  def block_builder():
   """
   Define logic blocks to impact your EXPLORER_RIG. Logic blocks can enhance
   block-dependent behavior, such as enabling graphics sub-systems like OpenGL.
   """
   pass

  def database():
   """
   Display all local database types here. Higher function DBs should be
   stored in the MULTI_PIPELINE folder according to a ruleset. Database is meant
   primarily to store single-purpose non-threaded data-types (sqlite3, etc.)
   """
   pass

  def database_builder():
   """
   Basic DB field builder for various tasks
   """
   pass

  def interface_assets():
   """
   If you're sharing a public index-slug (Your own HTML world page)
   store the file types here.
   """
   pass

  def css():
   """
   CSS file types for your public index-slug
   """
   pass

  def image():
   """
   Various image formats that can be shared on your index-slug
   The default is NewEntity.io compliant image-sharing protocols
   """
   pass

  def javascript():
   """
   Javascipt files that you're using that aren't embedded into your main
   index-slug page. The default is NewEntity.io compliant interactive-response
   protocols
   """
   pass

  def skin():
   """
   A 'skin' is a preset that is created by the user or by the community and
   shared online to enchance the visual properities of your rig
   """
   pass

  def RIG_VM():
   """
   Virtual Machine formats can be run through here as a added security
   layer
   """
   pass

  def Virtual_Hard_Disks():
   """
   Any .vhd formats or similar would go here
   """
   pass

  def Virtual_Machines():
   """
   Any Virtual Machine mappers or setup/init logic goes here
   """
   pass

  def standard_library():
   """
   Any automation functions needed for your EXPLORER_RIG that operate through
   a default never-changing standard library
   """
   pass

  def universe():
   """
   All objects discovered in your EXPLORER_RIG
   """
   pass

  def RECOVERY():
   """
   Any crash logs or incomplete formats should be sent here as non-executable
   read-only instances
   """
   pass

  def STARTING():
   """
   A zipped, rar, tar, gzip, bzip, or other preset unpackable archive format
   of the EXPLORER_RIG. This can be thought of as a preset explorer-rig
   """
   pass

  def HARDWARE():
   """
   Text version of your hardware specifications. These are abstracted away by
   the rig and never provided to the network for any reason.
   """
   command = 'lspci > HARDWARE/hardware.ds'

  def IDENTITY():
   """
   Identity houses operator specific information, such as profile photos,
   network addresses, and identifying information. There are various settings
   available to make you more or less anonymous over the network.
   """
   pass

  def LEARNING():
   """
   A storage container to build a learning archive of information you've
   discovered over the network
   """
   pass

  def LICENSES(license):
   """
   Any relevant use-based licneses
   """
   licenses_path = 'LICENSES/'
   license = 'LICENSE.ds'
   SLUG_LICENSE = licenses_path+license

  def OPENPACKAGER():
   """
   Packages that can be used in a community-vetted or discoverable context
   """
   pass

  def env(environment):
   """
   A custom environment option
   """
   environment = 'trine-env'

  def cache(cache_file):
   """
   You can either turn the cache on, or turn it off. You can either write to it,
   or not
   """
   cache_file = '__pycache__'
   on = 1

## Operations Resource Enclave
class ORE:
 """
 ORE Main Overloaded Interface
 """
 def about_entity_ds(about_overview):
  """
  Information about your data
  """
  about_overview =  'ABOUT_ENTITY.ds'

 def about_entity_entity(about_overview):
  """
  Information about your entities
  """
  about_overview = 'ABOUT_ENTITY.entity'

 def readme(about_overview):
  """
  General information
  """
  about_overview = 'README.ds'

 def ComprisedPackages():
  """
  What packages are in use on your machine?
  """
  used_packages = 'all_packages.ds'

 def ContributorsAndAuthors():
  """
  A system-embedded authors list
  """
  path_authors = 'LICENSES/'
  authors = 'AUTHORS.ds'
  SLUG_AUTHORS = path_authors+authors
  authors_list.append('Ryan McKenna')

 def Updates():
  """
  A default updates information silo
  """
  updates = 'changelog'

 def MainNodes():
  """
  Minimum viable node list
  """
  add = 'core_add.py'
  config = 'core_config.py'
  count = 'core_count.py'
  creator = 'core_creator.py'
  gather = 'core_gather.py'
  hash = 'core_hash.py'
  interface = 'core_interface.py'
  modify = 'core_modify.py'
  navigator = 'core_navigator.py'
  operations = 'core_operations.py'
  main = 'CORE.py'
  seek = 'core_seeker.py'
  settings = 'core_settings.py'
  view = 'core_view.py'

 def EntityScriptDataFiles():
  """
  Base EntityScript data instances
  """
  core = 'core.es'
  structure = 'STRUCTURE.es'

 def SupportFiles():
  """
  Various generic support files for initalizing your rig/entity instance
  """
  path_support_files = 'OPENPACKAGER/'
  rip = 'z_rip-jaw-alpha.py'
  mindex = 'z_see_mindex.py'

 def setup(file):
  """
  Generic setup builder script
  """
  file = 'setup.py'

 def configure(file):
  """
  Generic config.ini file
  """
  file = 'config.ini'

 def manifest(file):
  """
  C.ORE MANIFEST: Version 1.1 - 05/25/2023
  """
  file = 'MANIFEST.in'

 def include(types: list):
  """
  include block for the manifest
  """
  ## Any types here
  # for i in types...
  include_list.clear()
  include_list.append('changelog')
  include_list.append('*.gitignore')
  include_list.append(' *.ini')
  include_list.append('*.py')
  include_list.append('*.RING')
  include_list.append('*.travis')

 def exclude(types: list):
  """
  exclude block for the manifest
  """
  ## Any types here
  # for i in types:...
  exclude_list.clear()
  exclude_list.append('*.ds')
  exclude_list.append('*.entity')
  exclude_list.append('*.es')
  exclude_list.append('*.pyc')
  exclude_list.append('*.vcn')

 def recursive_exclude(types: list):
  """
  recursive_exclude block for the manifest 'recursive-exclude'
  """
  ## Any types here
  # for i in types:...
  recursive_exclude_list.clear()
  recursive_exclude_list.append('CONFIG')
  recursive_exclude_list.append('FILTER')
  recursive_exclude_list.append('LEARNING')

################################################################################
## Cleanup routines
################################################################################
## Setup the argument parser
def Maintenance():
 """
 Maintenance allows you to define runners for various system maintenance tasks.

 These taks are provided in two initial groups:
 1.) Proxy: A default interface to access various parts of the network within
  the program. Direct navigation is discouraged in favor of a proxy

 2.) Security: A default security scheme that consists of both malware detection
  through a third party program of your choice, and a default subsection

 Maintenance also grants access to "MAINTAIN ME". Maintain Me is written in
 bash and provides a huge amount of underlying system scripting extensions.

 Maintain Me is not included by default.

 All takss here work with silos that accumulate data during machine use. Some
 of this data may destroyed, and other parts may have to be saved for various
 reasons.

 You can provide custom maintenance routines for each type of data collection
 silo. You may also extend each routine to include wiping, saving, reseeding,
 or just reconfiguring the instance according to a desired format.

 This also allows you to skip certain processes at import by passing global
 parser options that can be extended to enable additional use-cases.
 """
 ## Pass the instance if it's a direct import
 if __name__ == "__main__":
  pass
 else:
  ## Waterfall any linked-log dirs
  proxy_log_dir = proxy_path
  security_log_dir = malware_guard_path

  print("Proxy Log Directory: "+str(proxy_log_dir))
  print("Security Log Directory: "+str(security_log_dir))

  ## Establish your runner paths
  RUNNER_PROXY = MY_SYSTEM_HOME+proxy_path_runner_slug
  RUNNER_SECURITY = MY_SYSTEM_HOME+malware_guard_runner_slug

  print("PROXY-> RUNNER: "+str(RUNNER_PROXY))
  print("SECURITY-> RUNNER: "+str(RUNNER_SECURITY))

  ## maintainme_files_proxy
  maintainme_files_proxy = [
   proxy_log_dir+proxy_log_slug, proxy_log_dir+proxy_cache_slug]
  ## maintainme_files_security
  maintainme_files_security = [
   USR_SYSTEM_LOCATION+malware_guard_clam_log,
   USR_SYSTEM_LOCATION+malware_guard_event_log,
   USR_SYSTEM_LOCATION+malware_guard_inotify_log]

  ## Custom teardown and setup function follows reseed_*TEARDOWNTYPE*
  def reseed_proxy():
   """
   You may way to reseed your proxy storage silo as it will become indefinitely
   large and you may not need to peserve the entire instance.
   """
   try:
    for x in maintainme_files_proxy:
     remove(x)
     print("success")
   except PermissionError:
    print("ALERT:  proxy files -> " \
     "You need elevated permissions to proceed, dropping to shell-script")
   try:
    system(RUNNER_PROXY)
    print("STATUS: Complete")
   except FileNotFoundError:
    print("STATUS: No action taken")

  def reseed_security():
   """
   You may want to reseed your security storage silo as it will become
   indefiitely large with continued system use.
   """
   try:
    for x in maintainme_files_security:
     remove(x)
    print("success")
   except PermissionError:
    print("ALERT:  maldetect files -> " \
     "You need elevated permissions to proceed, dropping to shell-script")
   try:
    system(RUNNER_SECURITY)
    print("STATUS: Complete")
   except FileNotFoundError:
    print("STATUS: No action taken")

  def reseed_nova():
   """
   reseed_*ITEM* is the setup function for whatever you name your
   default backup routine. This links together the teardown list
   """
   reseed_proxy()
   reseed_security()

  def maintainmesummary():
   ## Default runners
   print("------ ARGS -------")
   print("--reseed=a was provided to the argument parser")
   print("------ DONE -------")
   ## Summary Section
   chdir(proxy_path)
   print(getcwd())
   print(listdir())
   print(path.exists(proxy_path+proxy_log_slug))
   print(environ.get('USER'))
   print(stat(proxy_path))
   for dirpath, dirnames, filenames in walk(proxy_path):
    print('Current Path: ', dirpath)
    print('Directories: ', dirnames)
    print('Files: ', filenames)
    print('---')

   for dirpath, dirnames, filenames in walk(malware_guard_path):
    print('Current Path: ', dirpath)
    print('Directories: ', dirnames)
    print('Files: ', filenames)
    print('---')
    last_mod_time = stat(proxy_log_slug).st_mtime

    ## print the relevant results
    print(datetime.fromtimestamp(last_mod_time))

  maintainmesummary()
  ## Generic runner
  reseed_nova()



Return HOME