id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
171,652 | import importlib
import os
import sys
import warnings
import pywintypes
import win32api
import win32con
import win32service
import winerror
error = RuntimeError
def LocateSpecificServiceExe(serviceName):
# Return the .exe name of any service.
hkey = win32api.RegOpenKey(
win32con.HKEY_LOCAL_MACHINE,
"SYSTEM\\CurrentControlSet\\Services\\%s" % (serviceName),
0,
win32con.KEY_ALL_ACCESS,
)
try:
return win32api.RegQueryValueEx(hkey, "ImagePath")[0]
finally:
hkey.Close()
def InstallService(
pythonClassString,
serviceName,
displayName,
startType=None,
errorControl=None,
bRunInteractive=0,
serviceDeps=None,
userName=None,
password=None,
exeName=None,
perfMonIni=None,
perfMonDll=None,
exeArgs=None,
description=None,
delayedstart=None,
):
# Handle the default arguments.
if startType is None:
startType = win32service.SERVICE_DEMAND_START
serviceType = win32service.SERVICE_WIN32_OWN_PROCESS
if bRunInteractive:
serviceType = serviceType | win32service.SERVICE_INTERACTIVE_PROCESS
if errorControl is None:
errorControl = win32service.SERVICE_ERROR_NORMAL
exeName = '"%s"' % LocatePythonServiceExe(exeName)
commandLine = _GetCommandLine(exeName, exeArgs)
hscm = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_ALL_ACCESS)
try:
hs = win32service.CreateService(
hscm,
serviceName,
displayName,
win32service.SERVICE_ALL_ACCESS, # desired access
serviceType, # service type
startType,
errorControl, # error control type
commandLine,
None,
0,
serviceDeps,
userName,
password,
)
if description is not None:
try:
win32service.ChangeServiceConfig2(
hs, win32service.SERVICE_CONFIG_DESCRIPTION, description
)
except NotImplementedError:
pass ## ChangeServiceConfig2 and description do not exist on NT
if delayedstart is not None:
try:
win32service.ChangeServiceConfig2(
hs,
win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO,
delayedstart,
)
except (win32service.error, NotImplementedError):
## delayed start only exists on Vista and later - warn only when trying to set delayed to True
warnings.warn("Delayed Start not available on this system")
win32service.CloseServiceHandle(hs)
finally:
win32service.CloseServiceHandle(hscm)
InstallPythonClassString(pythonClassString, serviceName)
# If I have performance monitor info to install, do that.
if perfMonIni is not None:
InstallPerfmonForService(serviceName, perfMonIni, perfMonDll)
def ChangeServiceConfig(
pythonClassString,
serviceName,
startType=None,
errorControl=None,
bRunInteractive=0,
serviceDeps=None,
userName=None,
password=None,
exeName=None,
displayName=None,
perfMonIni=None,
perfMonDll=None,
exeArgs=None,
description=None,
delayedstart=None,
):
# Before doing anything, remove any perfmon counters.
try:
import perfmon
perfmon.UnloadPerfCounterTextStrings("python.exe " + serviceName)
except (ImportError, win32api.error):
pass
# The EXE location may have changed
exeName = '"%s"' % LocatePythonServiceExe(exeName)
# Handle the default arguments.
if startType is None:
startType = win32service.SERVICE_NO_CHANGE
if errorControl is None:
errorControl = win32service.SERVICE_NO_CHANGE
hscm = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_ALL_ACCESS)
serviceType = win32service.SERVICE_WIN32_OWN_PROCESS
if bRunInteractive:
serviceType = serviceType | win32service.SERVICE_INTERACTIVE_PROCESS
commandLine = _GetCommandLine(exeName, exeArgs)
try:
hs = SmartOpenService(hscm, serviceName, win32service.SERVICE_ALL_ACCESS)
try:
win32service.ChangeServiceConfig(
hs,
serviceType, # service type
startType,
errorControl, # error control type
commandLine,
None,
0,
serviceDeps,
userName,
password,
displayName,
)
if description is not None:
try:
win32service.ChangeServiceConfig2(
hs, win32service.SERVICE_CONFIG_DESCRIPTION, description
)
except NotImplementedError:
pass ## ChangeServiceConfig2 and description do not exist on NT
if delayedstart is not None:
try:
win32service.ChangeServiceConfig2(
hs,
win32service.SERVICE_CONFIG_DELAYED_AUTO_START_INFO,
delayedstart,
)
except (win32service.error, NotImplementedError):
## Delayed start only exists on Vista and later. On Nt, will raise NotImplementedError since ChangeServiceConfig2
## doensn't exist. On Win2k and XP, will fail with ERROR_INVALID_LEVEL
## Warn only if trying to set delayed to True
if delayedstart:
warnings.warn("Delayed Start not available on this system")
finally:
win32service.CloseServiceHandle(hs)
finally:
win32service.CloseServiceHandle(hscm)
InstallPythonClassString(pythonClassString, serviceName)
# If I have performance monitor info to install, do that.
if perfMonIni is not None:
InstallPerfmonForService(serviceName, perfMonIni, perfMonDll)
def RemoveService(serviceName):
try:
import perfmon
perfmon.UnloadPerfCounterTextStrings("python.exe " + serviceName)
except (ImportError, win32api.error):
pass
hscm = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_ALL_ACCESS)
try:
hs = SmartOpenService(hscm, serviceName, win32service.SERVICE_ALL_ACCESS)
win32service.DeleteService(hs)
win32service.CloseServiceHandle(hs)
finally:
win32service.CloseServiceHandle(hscm)
import win32evtlogutil
try:
win32evtlogutil.RemoveSourceFromRegistry(serviceName)
except win32api.error:
pass
def WaitForServiceStatus(serviceName, status, waitSecs, machine=None):
"""Waits for the service to return the specified status. You
should have already requested the service to enter that state"""
for i in range(waitSecs * 4):
now_status = QueryServiceStatus(serviceName, machine)[1]
if now_status == status:
break
win32api.Sleep(250)
else:
raise pywintypes.error(
winerror.ERROR_SERVICE_REQUEST_TIMEOUT,
"QueryServiceStatus",
win32api.FormatMessage(winerror.ERROR_SERVICE_REQUEST_TIMEOUT)[:-2],
)
def StopServiceWithDeps(serviceName, machine=None, waitSecs=30):
# Stop a service recursively looking for dependant services
hscm = win32service.OpenSCManager(machine, None, win32service.SC_MANAGER_ALL_ACCESS)
try:
deps = __FindSvcDeps(serviceName)
for dep in deps:
hs = win32service.OpenService(hscm, dep, win32service.SERVICE_ALL_ACCESS)
try:
__StopServiceWithTimeout(hs, waitSecs)
finally:
win32service.CloseServiceHandle(hs)
# Now my service!
hs = win32service.OpenService(
hscm, serviceName, win32service.SERVICE_ALL_ACCESS
)
try:
__StopServiceWithTimeout(hs, waitSecs)
finally:
win32service.CloseServiceHandle(hs)
finally:
win32service.CloseServiceHandle(hscm)
def StopService(serviceName, machine=None):
return ControlService(serviceName, win32service.SERVICE_CONTROL_STOP, machine)
def StartService(serviceName, args=None, machine=None):
hscm = win32service.OpenSCManager(machine, None, win32service.SC_MANAGER_ALL_ACCESS)
try:
hs = SmartOpenService(hscm, serviceName, win32service.SERVICE_ALL_ACCESS)
try:
win32service.StartService(hs, args)
finally:
win32service.CloseServiceHandle(hs)
finally:
win32service.CloseServiceHandle(hscm)
def RestartService(serviceName, args=None, waitSeconds=30, machine=None):
"Stop the service, and then start it again (with some tolerance for allowing it to stop.)"
try:
StopService(serviceName, machine)
except pywintypes.error as exc:
# Allow only "service not running" error
if exc.winerror != winerror.ERROR_SERVICE_NOT_ACTIVE:
raise
# Give it a few goes, as the service may take time to stop
for i in range(waitSeconds):
try:
StartService(serviceName, args, machine)
break
except pywintypes.error as exc:
if exc.winerror != winerror.ERROR_SERVICE_ALREADY_RUNNING:
raise
win32api.Sleep(1000)
else:
print("Gave up waiting for the old service to stop!")
def DebugService(cls, argv=[]):
# Run a service in "debug" mode. Re-implements what pythonservice.exe
# does when it sees a "-debug" param.
# Currently only used by "frozen" (ie, py2exe) programs (but later may
# end up being used for all services should we ever remove
# pythonservice.exe)
import servicemanager
global g_debugService
print("Debugging service %s - press Ctrl+C to stop." % (cls._svc_name_,))
servicemanager.Debugging(True)
servicemanager.PrepareToHostSingle(cls)
g_debugService = cls(argv)
# Setup a ctrl+c handler to simulate a "stop"
win32api.SetConsoleCtrlHandler(_DebugCtrlHandler, True)
try:
g_debugService.SvcRun()
finally:
win32api.SetConsoleCtrlHandler(_DebugCtrlHandler, False)
servicemanager.Debugging(False)
g_debugService = None
def GetServiceClassString(cls, argv=None):
if argv is None:
argv = sys.argv
import pickle
modName = pickle.whichmodule(cls, cls.__name__)
if modName == "__main__":
try:
fname = win32api.GetFullPathName(argv[0])
path = os.path.split(fname)[0]
# Eaaaahhhh - sometimes this will be a short filename, which causes
# problems with 1.5.1 and the silly filename case rule.
filelist = win32api.FindFiles(fname)
# win32api.FindFiles will not detect files in a zip or exe. If list is empty,
# skip the test and hope the file really exists.
if len(filelist) != 0:
# Get the long name
fname = os.path.join(path, filelist[0][8])
except win32api.error:
raise error(
"Could not resolve the path name '%s' to a full path" % (argv[0])
)
modName = os.path.splitext(fname)[0]
return modName + "." + cls.__name__
def usage():
try:
fname = os.path.split(sys.argv[0])[1]
except:
fname = sys.argv[0]
print(
"Usage: '%s [options] install|update|remove|start [...]|stop|restart [...]|debug [...]'"
% fname
)
print("Options for 'install' and 'update' commands only:")
print(" --username domain\\username : The Username the service is to run under")
print(" --password password : The password for the username")
print(
" --startup [manual|auto|disabled|delayed] : How the service starts, default = manual"
)
print(" --interactive : Allow the service to interact with the desktop.")
print(
" --perfmonini file: .ini file to use for registering performance monitor data"
)
print(" --perfmondll file: .dll file to use when querying the service for")
print(" performance data, default = perfmondata.dll")
print("Options for 'start' and 'stop' commands only:")
print(" --wait seconds: Wait for the service to actually start or stop.")
print(" If you specify --wait with the 'stop' option, the service")
print(" and all dependent services will be stopped, each waiting")
print(" the specified period.")
sys.exit(1)
The provided code snippet includes necessary dependencies for implementing the `HandleCommandLine` function. Write a Python function `def HandleCommandLine( cls, serviceClassString=None, argv=None, customInstallOptions="", customOptionHandler=None, )` to solve the following problem:
Utility function allowing services to process the command line. Allows standard commands such as 'start', 'stop', 'debug', 'install' etc. Install supports 'standard' command line options prefixed with '--', such as --username, --password, etc. In addition, the function allows custom command line options to be handled by the calling function.
Here is the function:
def HandleCommandLine(
cls,
serviceClassString=None,
argv=None,
customInstallOptions="",
customOptionHandler=None,
):
"""Utility function allowing services to process the command line.
Allows standard commands such as 'start', 'stop', 'debug', 'install' etc.
Install supports 'standard' command line options prefixed with '--', such as
--username, --password, etc. In addition,
the function allows custom command line options to be handled by the calling function.
"""
err = 0
if argv is None:
argv = sys.argv
if len(argv) <= 1:
usage()
serviceName = cls._svc_name_
serviceDisplayName = cls._svc_display_name_
if serviceClassString is None:
serviceClassString = GetServiceClassString(cls)
# Pull apart the command line
import getopt
try:
opts, args = getopt.getopt(
argv[1:],
customInstallOptions,
[
"password=",
"username=",
"startup=",
"perfmonini=",
"perfmondll=",
"interactive",
"wait=",
],
)
except getopt.error as details:
print(details)
usage()
userName = None
password = None
perfMonIni = perfMonDll = None
startup = None
delayedstart = None
interactive = None
waitSecs = 0
for opt, val in opts:
if opt == "--username":
userName = val
elif opt == "--password":
password = val
elif opt == "--perfmonini":
perfMonIni = val
elif opt == "--perfmondll":
perfMonDll = val
elif opt == "--interactive":
interactive = 1
elif opt == "--startup":
map = {
"manual": win32service.SERVICE_DEMAND_START,
"auto": win32service.SERVICE_AUTO_START,
"delayed": win32service.SERVICE_AUTO_START, ## ChangeServiceConfig2 called later
"disabled": win32service.SERVICE_DISABLED,
}
try:
startup = map[val.lower()]
except KeyError:
print("'%s' is not a valid startup option" % val)
if val.lower() == "delayed":
delayedstart = True
elif val.lower() == "auto":
delayedstart = False
## else no change
elif opt == "--wait":
try:
waitSecs = int(val)
except ValueError:
print("--wait must specify an integer number of seconds.")
usage()
arg = args[0]
knownArg = 0
# First we process all arguments which pass additional args on
if arg == "start":
knownArg = 1
print("Starting service %s" % (serviceName))
try:
StartService(serviceName, args[1:])
if waitSecs:
WaitForServiceStatus(
serviceName, win32service.SERVICE_RUNNING, waitSecs
)
except win32service.error as exc:
print("Error starting service: %s" % exc.strerror)
err = exc.winerror
elif arg == "restart":
knownArg = 1
print("Restarting service %s" % (serviceName))
RestartService(serviceName, args[1:])
if waitSecs:
WaitForServiceStatus(serviceName, win32service.SERVICE_RUNNING, waitSecs)
elif arg == "debug":
knownArg = 1
if not hasattr(sys, "frozen"):
# non-frozen services use pythonservice.exe which handles a
# -debug option
svcArgs = " ".join(args[1:])
try:
exeName = LocateSpecificServiceExe(serviceName)
except win32api.error as exc:
if exc.winerror == winerror.ERROR_FILE_NOT_FOUND:
print("The service does not appear to be installed.")
print("Please install the service before debugging it.")
sys.exit(1)
raise
try:
os.system("%s -debug %s %s" % (exeName, serviceName, svcArgs))
# ^C is used to kill the debug service. Sometimes Python also gets
# interrupted - ignore it...
except KeyboardInterrupt:
pass
else:
# py2exe services don't use pythonservice - so we simulate
# debugging here.
DebugService(cls, args)
if not knownArg and len(args) != 1:
usage() # the rest of the cmds don't take addn args
if arg == "install":
knownArg = 1
try:
serviceDeps = cls._svc_deps_
except AttributeError:
serviceDeps = None
try:
exeName = cls._exe_name_
except AttributeError:
exeName = None # Default to PythonService.exe
try:
exeArgs = cls._exe_args_
except AttributeError:
exeArgs = None
try:
description = cls._svc_description_
except AttributeError:
description = None
print("Installing service %s" % (serviceName,))
# Note that we install the service before calling the custom option
# handler, so if the custom handler fails, we have an installed service (from NT's POV)
# but is unlikely to work, as the Python code controlling it failed. Therefore
# we remove the service if the first bit works, but the second doesnt!
try:
InstallService(
serviceClassString,
serviceName,
serviceDisplayName,
serviceDeps=serviceDeps,
startType=startup,
bRunInteractive=interactive,
userName=userName,
password=password,
exeName=exeName,
perfMonIni=perfMonIni,
perfMonDll=perfMonDll,
exeArgs=exeArgs,
description=description,
delayedstart=delayedstart,
)
if customOptionHandler:
customOptionHandler(*(opts,))
print("Service installed")
except win32service.error as exc:
if exc.winerror == winerror.ERROR_SERVICE_EXISTS:
arg = "update" # Fall through to the "update" param!
else:
print(
"Error installing service: %s (%d)" % (exc.strerror, exc.winerror)
)
err = exc.winerror
except ValueError as msg: # Can be raised by custom option handler.
print("Error installing service: %s" % str(msg))
err = -1
# xxx - maybe I should remove after _any_ failed install - however,
# xxx - it may be useful to help debug to leave the service as it failed.
# xxx - We really _must_ remove as per the comments above...
# As we failed here, remove the service, so the next installation
# attempt works.
try:
RemoveService(serviceName)
except win32api.error:
print("Warning - could not remove the partially installed service.")
if arg == "update":
knownArg = 1
try:
serviceDeps = cls._svc_deps_
except AttributeError:
serviceDeps = None
try:
exeName = cls._exe_name_
except AttributeError:
exeName = None # Default to PythonService.exe
try:
exeArgs = cls._exe_args_
except AttributeError:
exeArgs = None
try:
description = cls._svc_description_
except AttributeError:
description = None
print("Changing service configuration")
try:
ChangeServiceConfig(
serviceClassString,
serviceName,
serviceDeps=serviceDeps,
startType=startup,
bRunInteractive=interactive,
userName=userName,
password=password,
exeName=exeName,
displayName=serviceDisplayName,
perfMonIni=perfMonIni,
perfMonDll=perfMonDll,
exeArgs=exeArgs,
description=description,
delayedstart=delayedstart,
)
if customOptionHandler:
customOptionHandler(*(opts,))
print("Service updated")
except win32service.error as exc:
print(
"Error changing service configuration: %s (%d)"
% (exc.strerror, exc.winerror)
)
err = exc.winerror
elif arg == "remove":
knownArg = 1
print("Removing service %s" % (serviceName))
try:
RemoveService(serviceName)
print("Service removed")
except win32service.error as exc:
print("Error removing service: %s (%d)" % (exc.strerror, exc.winerror))
err = exc.winerror
elif arg == "stop":
knownArg = 1
print("Stopping service %s" % (serviceName))
try:
if waitSecs:
StopServiceWithDeps(serviceName, waitSecs=waitSecs)
else:
StopService(serviceName)
except win32service.error as exc:
print("Error stopping service: %s (%d)" % (exc.strerror, exc.winerror))
err = exc.winerror
if not knownArg:
err = -1
print("Unknown command - '%s'" % arg)
usage()
return err | Utility function allowing services to process the command line. Allows standard commands such as 'start', 'stop', 'debug', 'install' etc. Install supports 'standard' command line options prefixed with '--', such as --username, --password, etc. In addition, the function allows custom command line options to be handled by the calling function. |
171,653 | import warnings
import os
import sys
import regutil
import win32api
import win32con
def CheckRegisteredExe(exename):
try:
os.stat(
win32api.RegQueryValue(
regutil.GetRootKey(), regutil.GetAppPathsKey() + "\\" + exename
)
)
# except SystemError:
except (os.error, win32api.error):
print("Registration of %s - Not registered correctly" % exename)
def CheckPythonPaths(verbose):
if verbose:
print("Python Paths:")
# Check the core path
if verbose:
print("\tCore Path:", end=" ")
try:
appPath = win32api.RegQueryValue(
regutil.GetRootKey(), regutil.BuildDefaultPythonKey() + "\\PythonPath"
)
except win32api.error as exc:
print("** does not exist - ", exc.strerror)
problem = CheckPathString(appPath)
if problem:
print(problem)
else:
if verbose:
print(appPath)
key = win32api.RegOpenKey(
regutil.GetRootKey(),
regutil.BuildDefaultPythonKey() + "\\PythonPath",
0,
win32con.KEY_READ,
)
try:
keyNo = 0
while 1:
try:
appName = win32api.RegEnumKey(key, keyNo)
appPath = win32api.RegQueryValue(key, appName)
if verbose:
print("\t" + appName + ":", end=" ")
if appPath:
problem = CheckPathString(appPath)
if problem:
print(problem)
else:
if verbose:
print(appPath)
else:
if verbose:
print("(empty)")
keyNo = keyNo + 1
except win32api.error:
break
finally:
win32api.RegCloseKey(key)
def CheckHelpFiles(verbose):
if verbose:
print("Help Files:")
try:
key = win32api.RegOpenKey(
regutil.GetRootKey(),
regutil.BuildDefaultPythonKey() + "\\Help",
0,
win32con.KEY_READ,
)
except win32api.error as exc:
import winerror
if exc.winerror != winerror.ERROR_FILE_NOT_FOUND:
raise
return
try:
keyNo = 0
while 1:
try:
helpDesc = win32api.RegEnumKey(key, keyNo)
helpFile = win32api.RegQueryValue(key, helpDesc)
if verbose:
print("\t" + helpDesc + ":", end=" ")
# query the os section.
try:
os.stat(helpFile)
if verbose:
print(helpFile)
except os.error:
print("** Help file %s does not exist" % helpFile)
keyNo = keyNo + 1
except win32api.error as exc:
import winerror
if exc.winerror != winerror.ERROR_NO_MORE_ITEMS:
raise
break
finally:
win32api.RegCloseKey(key)
def CheckRegisteredModules(verbose):
# Check out all registered modules.
k = regutil.BuildDefaultPythonKey() + "\\Modules"
try:
keyhandle = win32api.RegOpenKey(regutil.GetRootKey(), k)
print("WARNING: 'Modules' registry entry is deprectated and evil!")
except win32api.error as exc:
import winerror
if exc.winerror != winerror.ERROR_FILE_NOT_FOUND:
raise
return
def CheckRegistry(verbose=0):
# check the registered modules
if verbose and "pythonpath" in os.environ:
print("Warning - PythonPath in environment - please check it!")
# Check out all paths on sys.path
CheckPythonPaths(verbose)
CheckHelpFiles(verbose)
CheckRegisteredModules(verbose)
CheckRegisteredExe("Python.exe") | null |
171,654 | import time
import win32pdh
def find_pdh_counter_localized_name(english_name, machine_name=None):
if not counter_english_map:
import win32api
import win32con
counter_reg_value = win32api.RegQueryValueEx(
win32con.HKEY_PERFORMANCE_DATA, "Counter 009"
)
counter_list = counter_reg_value[0]
for i in range(0, len(counter_list) - 1, 2):
try:
counter_id = int(counter_list[i])
except ValueError:
continue
counter_english_map[counter_list[i + 1].lower()] = counter_id
return win32pdh.LookupPerfNameByIndex(
machine_name, counter_english_map[english_name.lower()]
)
def GetPerformanceAttributes(
object, counter, instance=None, inum=-1, format=win32pdh.PDH_FMT_LONG, machine=None
):
# NOTE: Many counters require 2 samples to give accurate results,
# including "% Processor Time" (as by definition, at any instant, a
# thread's CPU usage is either 0 or 100). To read counters like this,
# you should copy this function, but keep the counter open, and call
# CollectQueryData() each time you need to know.
# See http://support.microsoft.com/default.aspx?scid=kb;EN-US;q262938
# and http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
# My older explanation for this was that the "AddCounter" process forced
# the CPU to 100%, but the above makes more sense :)
path = win32pdh.MakeCounterPath((machine, object, instance, None, inum, counter))
hq = win32pdh.OpenQuery()
try:
hc = win32pdh.AddCounter(hq, path)
try:
win32pdh.CollectQueryData(hq)
type, val = win32pdh.GetFormattedCounterValue(hc, format)
return val
finally:
win32pdh.RemoveCounter(hc)
finally:
win32pdh.CloseQuery(hq)
The provided code snippet includes necessary dependencies for implementing the `FindPerformanceAttributesByName` function. Write a Python function `def FindPerformanceAttributesByName( instanceName, object=None, counter=None, format=win32pdh.PDH_FMT_LONG, machine=None, bRefresh=0, )` to solve the following problem:
Find performance attributes by (case insensitive) instance name. Given a process name, return a list with the requested attributes. Most useful for returning a tuple of PIDs given a process name.
Here is the function:
def FindPerformanceAttributesByName(
instanceName,
object=None,
counter=None,
format=win32pdh.PDH_FMT_LONG,
machine=None,
bRefresh=0,
):
"""Find performance attributes by (case insensitive) instance name.
Given a process name, return a list with the requested attributes.
Most useful for returning a tuple of PIDs given a process name.
"""
if object is None:
object = find_pdh_counter_localized_name("Process", machine)
if counter is None:
counter = find_pdh_counter_localized_name("ID Process", machine)
if bRefresh: # PDH docs say this is how you do a refresh.
win32pdh.EnumObjects(None, machine, 0, 1)
instanceName = instanceName.lower()
items, instances = win32pdh.EnumObjectItems(None, None, object, -1)
# Track multiple instances.
instance_dict = {}
for instance in instances:
try:
instance_dict[instance] = instance_dict[instance] + 1
except KeyError:
instance_dict[instance] = 0
ret = []
for instance, max_instances in instance_dict.items():
for inum in range(max_instances + 1):
if instance.lower() == instanceName:
ret.append(
GetPerformanceAttributes(
object, counter, instance, inum, format, machine
)
)
return ret | Find performance attributes by (case insensitive) instance name. Given a process name, return a list with the requested attributes. Most useful for returning a tuple of PIDs given a process name. |
171,655 | import time
import win32pdh
def find_pdh_counter_localized_name(english_name, machine_name=None):
if not counter_english_map:
import win32api
import win32con
counter_reg_value = win32api.RegQueryValueEx(
win32con.HKEY_PERFORMANCE_DATA, "Counter 009"
)
counter_list = counter_reg_value[0]
for i in range(0, len(counter_list) - 1, 2):
try:
counter_id = int(counter_list[i])
except ValueError:
continue
counter_english_map[counter_list[i + 1].lower()] = counter_id
return win32pdh.LookupPerfNameByIndex(
machine_name, counter_english_map[english_name.lower()]
)
def ShowAllProcesses():
object = find_pdh_counter_localized_name("Process")
items, instances = win32pdh.EnumObjectItems(
None, None, object, win32pdh.PERF_DETAIL_WIZARD
)
# Need to track multiple instances of the same name.
instance_dict = {}
for instance in instances:
try:
instance_dict[instance] = instance_dict[instance] + 1
except KeyError:
instance_dict[instance] = 0
# Bit of a hack to get useful info.
items = [find_pdh_counter_localized_name("ID Process")] + items[:5]
print("Process Name", ",".join(items))
for instance, max_instances in instance_dict.items():
for inum in range(max_instances + 1):
hq = win32pdh.OpenQuery()
hcs = []
for item in items:
path = win32pdh.MakeCounterPath(
(None, object, instance, None, inum, item)
)
hcs.append(win32pdh.AddCounter(hq, path))
win32pdh.CollectQueryData(hq)
# as per http://support.microsoft.com/default.aspx?scid=kb;EN-US;q262938, some "%" based
# counters need two collections
time.sleep(0.01)
win32pdh.CollectQueryData(hq)
print("%-15s\t" % (instance[:15]), end=" ")
for hc in hcs:
type, val = win32pdh.GetFormattedCounterValue(hc, win32pdh.PDH_FMT_LONG)
print("%5d" % (val), end=" ")
win32pdh.RemoveCounter(hc)
print()
win32pdh.CloseQuery(hq) | null |
171,656 | import time
import win32pdh
def BrowseCallBackDemo(counters):
## BrowseCounters can now return multiple counter paths
for counter in counters:
(
machine,
object,
instance,
parentInstance,
index,
counterName,
) = win32pdh.ParseCounterPath(counter)
result = GetPerformanceAttributes(
object, counterName, instance, index, win32pdh.PDH_FMT_DOUBLE, machine
)
print("Value of '%s' is" % counter, result)
print(
"Added '%s' on object '%s' (machine %s), instance %s(%d)-parent of %s"
% (counterName, object, machine, instance, index, parentInstance)
)
return 0
def browse(
callback=BrowseCallBackDemo,
title="Python Browser",
level=win32pdh.PERF_DETAIL_WIZARD,
):
win32pdh.BrowseCounters(None, 0, callback, level, title, ReturnMultiple=True) | null |
171,657 |
def HRESULT_FROM_WIN32(scode):
return -2147024896 | (scode & 65535) | null |
171,658 |
def SUCCEEDED(Status):
return (Status) >= 0 | null |
171,659 |
def FAILED(Status):
return Status < 0 | null |
171,660 |
def SCODE_CODE(sc):
return (sc) & 65535 | null |
171,661 |
def HRESULT_FACILITY(hr):
return ((hr) >> 16) & 8191 | null |
171,662 |
def SCODE_FACILITY(sc):
return ((sc) >> 16) & 8191 | null |
171,663 |
def HRESULT_SEVERITY(hr):
return ((hr) >> 31) & 1 | null |
171,664 |
def SCODE_SEVERITY(sc):
return ((sc) >> 31) & 1 | null |
171,665 | FACILITY_NT_BIT = 268435456
def HRESULT_FROM_NT(x):
return x | FACILITY_NT_BIT | null |
171,666 |
def GetScode(hr):
return hr | null |
171,667 |
def ResultFromScode(sc):
return sc | null |
171,668 | import pywintypes
from ntsecuritycon import FILE_READ_DATA, FILE_WRITE_DATA
def CTL_CODE(DeviceType, Function, Method, Access):
return (DeviceType << 16) | (Access << 14) | (Function << 2) | Method | null |
171,669 | import pywintypes
from ntsecuritycon import FILE_READ_DATA, FILE_WRITE_DATA
def DEVICE_TYPE_FROM_CTL_CODE(ctrlCode):
return (ctrlCode & 0xFFFF0000) >> 16 | null |
171,670 | import glob
import optparse
import os
import struct
import sys
from win32api import BeginUpdateResource, EndUpdateResource, UpdateResource
def VS_VERSION_INFO(maj, min, sub, build, sdata, vdata, debug=0, is_dll=1):
def stamp(pathname, options):
# For some reason, the API functions report success if the file is open
# but doesnt work! Try and open the file for writing, just to see if it is
# likely the stamp will work!
try:
f = open(pathname, "a+b")
f.close()
except IOError as why:
print("WARNING: File %s could not be opened - %s" % (pathname, why))
ver = options.version
try:
bits = [int(i) for i in ver.split(".")]
vmaj, vmin, vsub, vbuild = bits
except (IndexError, TypeError, ValueError):
raise ValueError("--version must be a.b.c.d (all integers) - got %r" % ver)
ifn = options.internal_name
if not ifn:
ifn = os.path.basename(pathname)
ofn = options.original_filename
if ofn is None:
ofn = os.path.basename(pathname)
sdata = {
"Comments": options.comments,
"CompanyName": options.company,
"FileDescription": options.description,
"FileVersion": ver,
"InternalName": ifn,
"LegalCopyright": options.copyright,
"LegalTrademarks": options.trademarks,
"OriginalFilename": ofn,
"ProductName": options.product,
"ProductVersion": ver,
}
vdata = {
"Translation": struct.pack("hh", 0x409, 1252),
}
is_dll = options.dll
if is_dll is None:
is_dll = os.path.splitext(pathname)[1].lower() in ".dll .pyd".split()
is_debug = options.debug
if is_debug is None:
is_debug = os.path.splitext(pathname)[0].lower().endswith("_d")
# convert None to blank strings
for k, v in list(sdata.items()):
if v is None:
sdata[k] = ""
vs = VS_VERSION_INFO(vmaj, vmin, vsub, vbuild, sdata, vdata, is_debug, is_dll)
h = BeginUpdateResource(pathname, 0)
UpdateResource(h, 16, 1, vs)
EndUpdateResource(h, 0)
if options.verbose:
print("Stamped:", pathname) | null |
171,671 |
def SEC_SUCCESS(Status):
return (Status) >= 0 | null |
171,672 |
def GET_ALG_CLASS(x):
return x & (7 << 13) | null |
171,673 |
def GET_ALG_TYPE(x):
return x & (15 << 9) | null |
171,674 |
def GET_ALG_SID(x):
return x & (511) | null |
171,675 | CRYPT_SUCCEED = 1
def RCRYPT_SUCCEEDED(rt):
return (rt) == CRYPT_SUCCEED | null |
171,676 | CRYPT_FAILED = 0
def RCRYPT_FAILED(rt):
return (rt) == CRYPT_FAILED | null |
171,677 | CERT_ENCODING_TYPE_MASK = 0x0000FFFF
def GET_CERT_ENCODING_TYPE(X):
return X & CERT_ENCODING_TYPE_MASK | null |
171,678 | CMSG_ENCODING_TYPE_MASK = -65536
def GET_CMSG_ENCODING_TYPE(X):
return X & CMSG_ENCODING_TYPE_MASK | null |
171,679 |
def INDEXTOOVERLAYMASK(i):
return i << 8 | null |
171,680 |
def INDEXTOSTATEIMAGEMASK(i):
return i << 12 | null |
171,681 | import importlib.machinery
import importlib.util
import os
import sys
def __import_pywin32_system_module__(modname, globs):
# This has been through a number of iterations. The problem: how to
# locate pywintypesXX.dll when it may be in a number of places, and how
# to avoid ever loading it twice. This problem is compounded by the
# fact that the "right" way to do this requires win32api, but this
# itself requires pywintypesXX.
# And the killer problem is that someone may have done 'import win32api'
# before this code is called. In that case Windows will have already
# loaded pywintypesXX as part of loading win32api - but by the time
# we get here, we may locate a different one. This appears to work, but
# then starts raising bizarre TypeErrors complaining that something
# is not a pywintypes type when it clearly is!
# So in what we hope is the last major iteration of this, we now
# rely on a _win32sysloader module, implemented in C but not relying
# on pywintypesXX.dll. It then can check if the DLL we are looking for
# lib is already loaded.
# See if this is a debug build.
suffix = "_d" if "_d.pyd" in importlib.machinery.EXTENSION_SUFFIXES else ""
filename = "%s%d%d%s.dll" % (
modname,
sys.version_info[0],
sys.version_info[1],
suffix,
)
if hasattr(sys, "frozen"):
# If we are running from a frozen program (py2exe, McMillan, freeze)
# then we try and load the DLL from our sys.path
# XXX - This path may also benefit from _win32sysloader? However,
# MarkH has never seen the DLL load problem with py2exe programs...
for look in sys.path:
# If the sys.path entry is a (presumably) .zip file, use the
# directory
if os.path.isfile(look):
look = os.path.dirname(look)
found = os.path.join(look, filename)
if os.path.isfile(found):
break
else:
raise ImportError(
"Module '%s' isn't in frozen sys.path %s" % (modname, sys.path)
)
else:
# First see if it already in our process - if so, we must use that.
import _win32sysloader
found = _win32sysloader.GetModuleFilename(filename)
if found is None:
# We ask Windows to load it next. This is in an attempt to
# get the exact same module loaded should pywintypes be imported
# first (which is how we are here) or if, eg, win32api was imported
# first thereby implicitly loading the DLL.
# Sadly though, it doesn't quite work - if pywintypesxx.dll
# is in system32 *and* the executable's directory, on XP SP2, an
# import of win32api will cause Windows to load pywintypes
# from system32, where LoadLibrary for that name will
# load the one in the exe's dir.
# That shouldn't really matter though, so long as we only ever
# get one loaded.
found = _win32sysloader.LoadModule(filename)
if found is None:
# Windows can't find it - which although isn't relevent here,
# means that we *must* be the first win32 import, as an attempt
# to import win32api etc would fail when Windows attempts to
# locate the DLL.
# This is most likely to happen for "non-admin" installs, where
# we can't put the files anywhere else on the global path.
# If there is a version in our Python directory, use that
if os.path.isfile(os.path.join(sys.prefix, filename)):
found = os.path.join(sys.prefix, filename)
if found is None:
# Not in the Python directory? Maybe we were installed via
# easy_install...
if os.path.isfile(os.path.join(os.path.dirname(__file__), filename)):
found = os.path.join(os.path.dirname(__file__), filename)
# There are 2 site-packages directories - one "global" and one "user".
# We could be in either, or both (but with different versions!). Factors include
# virtualenvs, post-install script being run or not, `setup.py install` flags, etc.
# In a worst-case, it means, say 'python -c "import win32api"'
# will not work but 'python -c "import pywintypes, win32api"' will,
# but it's better than nothing.
# We use the same logic as pywin32_bootstrap to find potential location for the dll
# Simply import pywin32_system32 and look in the paths in pywin32_system32.__path__
if found is None:
import pywin32_system32
for path in pywin32_system32.__path__:
maybe = os.path.join(path, filename)
if os.path.isfile(maybe):
found = maybe
break
if found is None:
# give up in disgust.
raise ImportError("No system module '%s' (%s)" % (modname, filename))
# After importing the module, sys.modules is updated to the DLL we just
# loaded - which isn't what we want. So we update sys.modules to refer to
# this module, and update our globals from it.
old_mod = sys.modules[modname]
# Load the DLL.
loader = importlib.machinery.ExtensionFileLoader(modname, found)
spec = importlib.machinery.ModuleSpec(name=modname, loader=loader, origin=found)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
# Check the sys.modules[] behaviour we describe above is true...
assert sys.modules[modname] is mod
# as above - re-reset to the *old* module object then update globs.
sys.modules[modname] = old_mod
globs.update(mod.__dict__) | null |
171,682 | import os
import shutil
import sys
import winreg
import win32api
def _docopy(src, dest):
orig_src = src
if not os.path.isfile(src):
src = os.path.join(os.path.split(sys.argv[0])[0], src)
print(
"Can not find %s or %s to copy"
% (os.path.abspath(orig_src), os.path.abspath(src))
)
return 0
try:
shutil.copy(src, dest)
print("Copied %s -> %s" % (src, dest))
return 1
except:
print("Error copying '%s' -> '%s'" % (src, dest))
print(str(sys.exc_info[1]))
usage_and_die(3)
def _doregister(mod_name, dll_name):
assert os.path.isfile(dll_name), "Shouldn't get here if the file doesn't exist!"
try:
key = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
"Software\\Python\\PythonCore\\%s\\Modules\\%s" % (sys.winver, mod_name),
)
except winreg.error:
try:
key = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
"Software\\Python\\PythonCore\\%s\\Modules\\%s"
% (sys.winver, mod_name),
)
except winreg.error:
print(
"Could not find the existing '%s' module registered in the registry"
% (mod_name,)
)
usage_and_die(4)
# Create the debug key.
sub_key = winreg.CreateKey(key, "Debug")
winreg.SetValue(sub_key, None, winreg.REG_SZ, dll_name)
print("Registered '%s' in the registry" % (dll_name,))
path, fname = path, fname = os.path.split(win32api.GetModuleFileName(sys.dllhandle))
base, ext = os.path.splitext(fname)
_docopy(base + "_d" + ext, path)
def _domodule(mod_name, release_mod_filename):
path, fname = os.path.split(release_mod_filename)
base, ext = os.path.splitext(fname)
new_fname = base + "_d" + ext
if _docopy(new_fname, path):
_doregister(mod_name, os.path.abspath(os.path.join(path, new_fname))) | null |
171,683 | import sys
import win32api
import win32con
import win32pdhutil
def killProcName(procname):
# Change suggested by Dan Knierim, who found that this performed a
# "refresh", allowing us to kill processes created since this was run
# for the first time.
try:
win32pdhutil.GetPerformanceAttributes("Process", "ID Process", procname)
except:
pass
pids = win32pdhutil.FindPerformanceAttributesByName(procname)
# If _my_ pid in there, remove it!
try:
pids.remove(win32api.GetCurrentProcessId())
except ValueError:
pass
if len(pids) == 0:
result = "Can't find %s" % procname
elif len(pids) > 1:
result = "Found too many %s's - pids=`%s`" % (procname, pids)
else:
handle = win32api.OpenProcess(win32con.PROCESS_TERMINATE, 0, pids[0])
win32api.TerminateProcess(handle, 0)
win32api.CloseHandle(handle)
result = ""
return result | null |
171,684 | import sys
import win32ras
class ConnectionError(Exception):
pass
The provided code snippet includes necessary dependencies for implementing the `Connect` function. Write a Python function `def Connect(rasEntryName, numRetries=5)` to solve the following problem:
Make a connection to the specified RAS entry. Returns a tuple of (bool, handle) on success. - bool is 1 if a new connection was established, or 0 is a connection already existed. - handle is a RAS HANDLE that can be passed to Disconnect() to end the connection. Raises a ConnectionError if the connection could not be established.
Here is the function:
def Connect(rasEntryName, numRetries=5):
"""Make a connection to the specified RAS entry.
Returns a tuple of (bool, handle) on success.
- bool is 1 if a new connection was established, or 0 is a connection already existed.
- handle is a RAS HANDLE that can be passed to Disconnect() to end the connection.
Raises a ConnectionError if the connection could not be established.
"""
assert numRetries > 0
for info in win32ras.EnumConnections():
if info[1].lower() == rasEntryName.lower():
print("Already connected to", rasEntryName)
return 0, info[0]
dial_params, have_pw = win32ras.GetEntryDialParams(None, rasEntryName)
if not have_pw:
print("Error: The password is not saved for this connection")
print(
"Please connect manually selecting the 'save password' option and try again"
)
sys.exit(1)
print("Connecting to", rasEntryName, "...")
retryCount = numRetries
while retryCount > 0:
rasHandle, errCode = win32ras.Dial(None, None, dial_params, None)
if win32ras.IsHandleValid(rasHandle):
bValid = 1
break
print("Retrying...")
win32api.Sleep(5000)
retryCount = retryCount - 1
if errCode:
raise ConnectionError(errCode, win32ras.GetErrorString(errCode))
return 1, rasHandle | Make a connection to the specified RAS entry. Returns a tuple of (bool, handle) on success. - bool is 1 if a new connection was established, or 0 is a connection already existed. - handle is a RAS HANDLE that can be passed to Disconnect() to end the connection. Raises a ConnectionError if the connection could not be established. |
171,685 | import sys
import win32ras
class ConnectionError(Exception):
pass
def Disconnect(handle):
if type(handle) == type(""): # have they passed a connection name?
for info in win32ras.EnumConnections():
if info[1].lower() == handle.lower():
handle = info[0]
break
else:
raise ConnectionError(0, "Not connected to entry '%s'" % handle)
win32ras.HangUp(handle) | null |
171,686 | import sys
import win32ras
usage = """rasutil.py - Utilities for using RAS
Usage:
rasutil [-r retryCount] [-c rasname] [-d rasname]
-r retryCount - Number of times to retry the RAS connection
-c rasname - Connect to the phonebook entry specified by rasname
-d rasname - Disconnect from the phonebook entry specified by rasname
"""
def Usage(why):
print(why)
print(usage)
sys.exit(1) | null |
171,687 | import sys
def LocatePath(fileName, searchPaths):
"""Like LocateFileName, but returns a directory only."""
import os
return os.path.abspath(os.path.split(LocateFileName(fileName, searchPaths))[0])
The provided code snippet includes necessary dependencies for implementing the `LocateOptionalPath` function. Write a Python function `def LocateOptionalPath(fileName, searchPaths)` to solve the following problem:
Like LocatePath, but returns None if the user cancels.
Here is the function:
def LocateOptionalPath(fileName, searchPaths):
"""Like LocatePath, but returns None if the user cancels."""
try:
return LocatePath(fileName, searchPaths)
except KeyboardInterrupt:
return None | Like LocatePath, but returns None if the user cancels. |
171,688 | import sys
def LocateFileName(fileNamesString, searchPaths):
"""Locate a file name, anywhere on the search path.
If the file can not be located, prompt the user to find it for us
(using a common OpenFile dialog)
Raises KeyboardInterrupt if the user cancels.
"""
import os
import regutil
fileNames = fileNamesString.split(";")
for path in searchPaths:
for fileName in fileNames:
try:
retPath = os.path.join(path, fileName)
os.stat(retPath)
break
except os.error:
retPath = None
if retPath:
break
else:
fileName = fileNames[0]
try:
import win32con
import win32ui
except ImportError:
raise error(
"Need to locate the file %s, but the win32ui module is not available\nPlease run the program again, passing as a parameter the path to this file."
% fileName
)
# Display a common dialog to locate the file.
flags = win32con.OFN_FILEMUSTEXIST
ext = os.path.splitext(fileName)[1]
filter = "Files of requested type (*%s)|*%s||" % (ext, ext)
dlg = win32ui.CreateFileDialog(1, None, fileName, flags, filter, None)
dlg.SetOFNTitle("Locate " + fileName)
if dlg.DoModal() != win32con.IDOK:
raise KeyboardInterrupt("User cancelled the process")
retPath = dlg.GetPathName()
return os.path.abspath(retPath)
The provided code snippet includes necessary dependencies for implementing the `LocateOptionalFileName` function. Write a Python function `def LocateOptionalFileName(fileName, searchPaths=None)` to solve the following problem:
Like LocateFileName, but returns None if the user cancels.
Here is the function:
def LocateOptionalFileName(fileName, searchPaths=None):
"""Like LocateFileName, but returns None if the user cancels."""
try:
return LocateFileName(fileName, searchPaths)
except KeyboardInterrupt:
return None | Like LocateFileName, but returns None if the user cancels. |
171,689 | class error(Exception):
pass
import sys
def FindPackagePath(packageName, knownFileName, searchPaths):
"""Find a package.
Given a ni style package name, check the package is registered.
First place looked is the registry for an existing entry. Then
the searchPaths are searched.
"""
import os
import regutil
pathLook = regutil.GetRegisteredNamedPath(packageName)
if pathLook and IsPackageDir(pathLook, packageName, knownFileName):
return pathLook, None # The currently registered one is good.
# Search down the search paths.
for pathLook in searchPaths:
if IsPackageDir(pathLook, packageName, knownFileName):
# Found it
ret = os.path.abspath(pathLook)
return ret, ret
raise error("The package %s can not be located" % packageName)
The provided code snippet includes necessary dependencies for implementing the `FindRegisterPackage` function. Write a Python function `def FindRegisterPackage(packageName, knownFile, searchPaths, registryAppName=None)` to solve the following problem:
Find and Register a package. Assumes the core registry setup correctly. In addition, if the location located by the package is already in the **core** path, then an entry is registered, but no path. (no other paths are checked, as the application whose path was used may later be uninstalled. This should not happen with the core)
Here is the function:
def FindRegisterPackage(packageName, knownFile, searchPaths, registryAppName=None):
"""Find and Register a package.
Assumes the core registry setup correctly.
In addition, if the location located by the package is already
in the **core** path, then an entry is registered, but no path.
(no other paths are checked, as the application whose path was used
may later be uninstalled. This should not happen with the core)
"""
import regutil
if not packageName:
raise error("A package name must be supplied")
corePaths = regutil.GetRegisteredNamedPath(None).split(";")
if not searchPaths:
searchPaths = corePaths
registryAppName = registryAppName or packageName
try:
pathLook, pathAdd = FindPackagePath(packageName, knownFile, searchPaths)
if pathAdd is not None:
if pathAdd in corePaths:
pathAdd = ""
regutil.RegisterNamedPath(registryAppName, pathAdd)
return pathLook
except error as details:
print(
"*** The %s package could not be registered - %s" % (packageName, details)
)
print(
"*** Please ensure you have passed the correct paths on the command line."
)
print(
"*** - For packages, you should pass a path to the packages parent directory,"
)
print("*** - and not the package directory itself...") | Find and Register a package. Assumes the core registry setup correctly. In addition, if the location located by the package is already in the **core** path, then an entry is registered, but no path. (no other paths are checked, as the application whose path was used may later be uninstalled. This should not happen with the core) |
171,690 | class error(Exception):
pass
import sys
def FindAppPath(appName, knownFileName, searchPaths):
"""Find an application.
First place looked is the registry for an existing entry. Then
the searchPaths are searched.
"""
# Look in the first path.
import os
import regutil
regPath = regutil.GetRegisteredNamedPath(appName)
if regPath:
pathLook = regPath.split(";")[0]
if regPath and FileExists(os.path.join(pathLook, knownFileName)):
return None # The currently registered one is good.
# Search down the search paths.
for pathLook in searchPaths:
if FileExists(os.path.join(pathLook, knownFileName)):
# Found it
return os.path.abspath(pathLook)
raise error(
"The file %s can not be located for application %s" % (knownFileName, appName)
)
The provided code snippet includes necessary dependencies for implementing the `FindRegisterApp` function. Write a Python function `def FindRegisterApp(appName, knownFiles, searchPaths)` to solve the following problem:
Find and Register a package. Assumes the core registry setup correctly.
Here is the function:
def FindRegisterApp(appName, knownFiles, searchPaths):
"""Find and Register a package.
Assumes the core registry setup correctly.
"""
import regutil
if type(knownFiles) == type(""):
knownFiles = [knownFiles]
paths = []
try:
for knownFile in knownFiles:
pathLook = FindAppPath(appName, knownFile, searchPaths)
if pathLook:
paths.append(pathLook)
except error as details:
print("*** ", details)
return
regutil.RegisterNamedPath(appName, ";".join(paths)) | Find and Register a package. Assumes the core registry setup correctly. |
171,691 | import sys
def LocateFileName(fileNamesString, searchPaths):
"""Locate a file name, anywhere on the search path.
If the file can not be located, prompt the user to find it for us
(using a common OpenFile dialog)
Raises KeyboardInterrupt if the user cancels.
"""
import os
import regutil
fileNames = fileNamesString.split(";")
for path in searchPaths:
for fileName in fileNames:
try:
retPath = os.path.join(path, fileName)
os.stat(retPath)
break
except os.error:
retPath = None
if retPath:
break
else:
fileName = fileNames[0]
try:
import win32con
import win32ui
except ImportError:
raise error(
"Need to locate the file %s, but the win32ui module is not available\nPlease run the program again, passing as a parameter the path to this file."
% fileName
)
# Display a common dialog to locate the file.
flags = win32con.OFN_FILEMUSTEXIST
ext = os.path.splitext(fileName)[1]
filter = "Files of requested type (*%s)|*%s||" % (ext, ext)
dlg = win32ui.CreateFileDialog(1, None, fileName, flags, filter, None)
dlg.SetOFNTitle("Locate " + fileName)
if dlg.DoModal() != win32con.IDOK:
raise KeyboardInterrupt("User cancelled the process")
retPath = dlg.GetPathName()
return os.path.abspath(retPath)
def LocatePythonCore(searchPaths):
"""Locate and validate the core Python directories. Returns a list
of paths that should be used as the core (ie, un-named) portion of
the Python path.
"""
import os
import regutil
currentPath = regutil.GetRegisteredNamedPath(None)
if currentPath:
presearchPaths = currentPath.split(";")
else:
presearchPaths = [os.path.abspath(".")]
libPath = None
for path in presearchPaths:
if FileExists(os.path.join(path, "os.py")):
libPath = path
break
if libPath is None and searchPaths is not None:
libPath = LocatePath("os.py", searchPaths)
if libPath is None:
raise error("The core Python library could not be located.")
corePath = None
suffix = IsDebug()
for path in presearchPaths:
if FileExists(os.path.join(path, "unicodedata%s.pyd" % suffix)):
corePath = path
break
if corePath is None and searchPaths is not None:
corePath = LocatePath("unicodedata%s.pyd" % suffix, searchPaths)
if corePath is None:
raise error("The core Python path could not be located.")
installPath = os.path.abspath(os.path.join(libPath, ".."))
return installPath, [libPath, corePath]
The provided code snippet includes necessary dependencies for implementing the `SetupCore` function. Write a Python function `def SetupCore(searchPaths)` to solve the following problem:
Setup the core Python information in the registry. This function makes no assumptions about the current state of sys.path. After this function has completed, you should have access to the standard Python library, and the standard Win32 extensions
Here is the function:
def SetupCore(searchPaths):
"""Setup the core Python information in the registry.
This function makes no assumptions about the current state of sys.path.
After this function has completed, you should have access to the standard
Python library, and the standard Win32 extensions
"""
import sys
for path in searchPaths:
sys.path.append(path)
import os
import regutil
import win32api
import win32con
installPath, corePaths = LocatePythonCore(searchPaths)
# Register the core Pythonpath.
print(corePaths)
regutil.RegisterNamedPath(None, ";".join(corePaths))
# Register the install path.
hKey = win32api.RegCreateKey(regutil.GetRootKey(), regutil.BuildDefaultPythonKey())
try:
# Core Paths.
win32api.RegSetValue(hKey, "InstallPath", win32con.REG_SZ, installPath)
finally:
win32api.RegCloseKey(hKey)
# Register the win32 core paths.
win32paths = (
os.path.abspath(os.path.split(win32api.__file__)[0])
+ ";"
+ os.path.abspath(
os.path.split(LocateFileName("win32con.py;win32con.pyc", sys.path))[0]
)
)
# Python has builtin support for finding a "DLLs" directory, but
# not a PCBuild. Having it in the core paths means it is ignored when
# an EXE not in the Python dir is hosting us - so we add it as a named
# value
check = os.path.join(sys.prefix, "PCBuild")
if "64 bit" in sys.version:
check = os.path.join(check, "amd64")
if os.path.isdir(check):
regutil.RegisterNamedPath("PCBuild", check) | Setup the core Python information in the registry. This function makes no assumptions about the current state of sys.path. After this function has completed, you should have access to the standard Python library, and the standard Win32 extensions |
171,692 | import sys
def IsDebug():
"""Return "_d" if we're running a debug version.
This is to be used within DLL names when locating them.
"""
import importlib.machinery
return "_d" if "_d.pyd" in importlib.machinery.EXTENSION_SUFFIXES else ""
def QuotedFileName(fname):
"""Given a filename, return a quoted version if necessary"""
import regutil
try:
fname.index(" ") # Other chars forcing quote?
return '"%s"' % fname
except ValueError:
# No space in name.
return fname
def FindRegisterPythonExe(exeAlias, searchPaths, actualFileNames=None):
"""Find and Register a Python exe (not necessarily *the* python.exe)
Assumes the core registry setup correctly.
"""
import regutil
fname, ok = FindPythonExe(exeAlias, actualFileNames, searchPaths)
if not ok:
regutil.RegisterPythonExe(fname, exeAlias)
return fname
def FindRegisterHelpFile(helpFile, searchPaths, helpDesc=None):
import regutil
try:
pathLook = FindHelpPath(helpFile, helpDesc, searchPaths)
except error as details:
print("*** ", details)
return
# print "%s found at %s" % (helpFile, pathLook)
regutil.RegisterHelpFile(helpFile, pathLook, helpDesc)
The provided code snippet includes necessary dependencies for implementing the `RegisterShellInfo` function. Write a Python function `def RegisterShellInfo(searchPaths)` to solve the following problem:
Registers key parts of the Python installation with the Windows Shell. Assumes a valid, minimal Python installation exists (ie, SetupCore() has been previously successfully run)
Here is the function:
def RegisterShellInfo(searchPaths):
"""Registers key parts of the Python installation with the Windows Shell.
Assumes a valid, minimal Python installation exists
(ie, SetupCore() has been previously successfully run)
"""
import regutil
import win32con
suffix = IsDebug()
# Set up a pointer to the .exe's
exePath = FindRegisterPythonExe("Python%s.exe" % suffix, searchPaths)
regutil.SetRegistryDefaultValue(".py", "Python.File", win32con.HKEY_CLASSES_ROOT)
regutil.RegisterShellCommand("Open", QuotedFileName(exePath) + ' "%1" %*', "&Run")
regutil.SetRegistryDefaultValue(
"Python.File\\DefaultIcon", "%s,0" % exePath, win32con.HKEY_CLASSES_ROOT
)
FindRegisterHelpFile("Python.hlp", searchPaths, "Main Python Documentation")
FindRegisterHelpFile("ActivePython.chm", searchPaths, "Main Python Documentation")
# We consider the win32 core, as it contains all the win32 api type
# stuff we need. | Registers key parts of the Python installation with the Windows Shell. Assumes a valid, minimal Python installation exists (ie, SetupCore() has been previously successfully run) |
171,693 | import fnmatch
import getopt
import os
import string
import sys
import win32api
import win32con
import win32file
import wincerapi
class InvalidUsage(Exception):
pass
def isdir(name, local=1):
try:
attr = GetFileAttributes(name, local)
return attr & win32con.FILE_ATTRIBUTE_DIRECTORY
except win32api.error:
return 0
def CopyFileToCe(src_name, dest_name, progress=None):
sh = win32file.CreateFile(
src_name, win32con.GENERIC_READ, 0, None, win32con.OPEN_EXISTING, 0, None
)
bytes = 0
try:
dh = wincerapi.CeCreateFile(
dest_name, win32con.GENERIC_WRITE, 0, None, win32con.OPEN_ALWAYS, 0, None
)
try:
while 1:
hr, data = win32file.ReadFile(sh, 2048)
if not data:
break
wincerapi.CeWriteFile(dh, data)
bytes = bytes + len(data)
if progress is not None:
progress(bytes)
finally:
pass
dh.Close()
finally:
sh.Close()
return bytes
def BuildFileList(spec, local, recurse, filter, filter_args, recursed_path=""):
files = []
if isdir(spec, local):
path = spec
raw_spec = "*"
else:
path, raw_spec = os.path.split(spec)
if recurse:
# Need full scan, to get sub-direcetories.
infos = FindFiles(os.path.join(path, "*"), local)
else:
infos = FindFiles(os.path.join(path, raw_spec), local)
for info in infos:
src_name = str(info[8])
full_src_name = os.path.join(path, src_name)
if local: # Can't do this for CE!
full_src_name = win32api.GetFullPathName(full_src_name)
if isdir(full_src_name, local):
if recurse and src_name not in [".", ".."]:
new_spec = os.path.join(full_src_name, raw_spec)
files = files + BuildFileList(
new_spec,
local,
1,
filter,
filter_args,
os.path.join(recursed_path, src_name),
)
if fnmatch.fnmatch(src_name, raw_spec):
rel_name = os.path.join(recursed_path, src_name)
filter_data = filter(full_src_name, rel_name, info, local, filter_args)
if filter_data is not None:
files.append((full_src_name, info, filter_data))
return files
def _copyfilter(full_name, rel_name, info, local, bMaintainDir):
if isdir(full_name, local):
return
if bMaintainDir:
return rel_name
return os.path.split(rel_name)[1]
import pywin.dialogs.status
import win32ui
class FileCopyProgressDialog(pywin.dialogs.status.CStatusProgressDialog):
def CopyProgress(self, bytes):
self.Set(bytes / 1024)
The provided code snippet includes necessary dependencies for implementing the `copy` function. Write a Python function `def copy(args)` to solve the following problem:
copy src [src ...], dest Copy files to/from the CE device
Here is the function:
def copy(args):
"""copy src [src ...], dest
Copy files to/from the CE device
"""
bRecurse = bVerbose = 0
bMaintainDir = 1
try:
opts, args = getopt.getopt(args, "rv")
except getopt.error as details:
raise InvalidUsage(details)
for o, v in opts:
if o == "-r":
bRecuse = 1
elif o == "-v":
bVerbose = 1
if len(args) < 2:
raise InvalidUsage("Must specify a source and destination")
src = args[:-1]
dest = args[-1]
# See if WCE: leading anywhere indicates a direction.
if string.find(src[0], "WCE:") == 0:
bToDevice = 0
elif string.find(dest, "WCE:") == 0:
bToDevice = 1
else:
# Assume copy to device.
bToDevice = 1
if not isdir(dest, not bToDevice):
print("%s does not indicate a directory")
files = [] # List of FQ (from_name, to_name)
num_files = 0
num_bytes = 0
dialog = FileCopyProgressDialog("Copying files")
dialog.CreateWindow(win32ui.GetMainFrame())
if bToDevice:
for spec in src:
new = BuildFileList(spec, 1, bRecurse, _copyfilter, bMaintainDir)
if not new:
print("Warning: '%s' did not match any files" % (spec))
files = files + new
for full_src, src_info, dest_info in files:
dest_name = os.path.join(dest, dest_info)
size = src_info[5]
print("Size=", size)
if bVerbose:
print(full_src, "->", dest_name, "- ", end=" ")
dialog.SetText(dest_name)
dialog.Set(0, size / 1024)
bytes = CopyFileToCe(full_src, dest_name, dialog.CopyProgress)
num_bytes = num_bytes + bytes
if bVerbose:
print(bytes, "bytes")
num_files = num_files + 1
dialog.Close()
print("%d files copied (%d bytes)" % (num_files, num_bytes)) | copy src [src ...], dest Copy files to/from the CE device |
171,694 | import fnmatch
import getopt
import os
import string
import sys
import win32api
import win32con
import win32file
import wincerapi
class InvalidUsage(Exception):
pass
def BuildFileList(spec, local, recurse, filter, filter_args, recursed_path=""):
files = []
if isdir(spec, local):
path = spec
raw_spec = "*"
else:
path, raw_spec = os.path.split(spec)
if recurse:
# Need full scan, to get sub-direcetories.
infos = FindFiles(os.path.join(path, "*"), local)
else:
infos = FindFiles(os.path.join(path, raw_spec), local)
for info in infos:
src_name = str(info[8])
full_src_name = os.path.join(path, src_name)
if local: # Can't do this for CE!
full_src_name = win32api.GetFullPathName(full_src_name)
if isdir(full_src_name, local):
if recurse and src_name not in [".", ".."]:
new_spec = os.path.join(full_src_name, raw_spec)
files = files + BuildFileList(
new_spec,
local,
1,
filter,
filter_args,
os.path.join(recursed_path, src_name),
)
if fnmatch.fnmatch(src_name, raw_spec):
rel_name = os.path.join(recursed_path, src_name)
filter_data = filter(full_src_name, rel_name, info, local, filter_args)
if filter_data is not None:
files.append((full_src_name, info, filter_data))
return files
import pywin.dialogs.status
import win32ui
def _dirfilter(*args):
return args[1]
The provided code snippet includes necessary dependencies for implementing the `dir` function. Write a Python function `def dir(args)` to solve the following problem:
dir directory_name ... Perform a directory listing on the remote device
Here is the function:
def dir(args):
"""dir directory_name ...
Perform a directory listing on the remote device
"""
bRecurse = 0
try:
opts, args = getopt.getopt(args, "r")
except getopt.error as details:
raise InvalidUsage(details)
for o, v in opts:
if o == "-r":
bRecurse = 1
for arg in args:
print("Directory of WCE:%s" % arg)
files = BuildFileList(arg, 0, bRecurse, _dirfilter, None)
total_size = 0
for full_name, info, rel_name in files:
date_str = info[3].Format("%d-%b-%Y %H:%M")
attr_string = " "
if info[0] & win32con.FILE_ATTRIBUTE_DIRECTORY:
attr_string = "<DIR>"
print("%s %s %10d %s" % (date_str, attr_string, info[5], rel_name))
total_size = total_size + info[5]
print(" " * 14 + "%3d files, %10d bytes" % (len(files), total_size)) | dir directory_name ... Perform a directory listing on the remote device |
171,695 | import fnmatch
import getopt
import os
import string
import sys
import win32api
import win32con
import win32file
import wincerapi
import pywin.dialogs.status
import win32ui
The provided code snippet includes necessary dependencies for implementing the `run` function. Write a Python function `def run(args)` to solve the following problem:
run program [args] Starts the specified program on the remote device.
Here is the function:
def run(args):
"""run program [args]
Starts the specified program on the remote device.
"""
prog_args = []
for arg in args:
if " " in arg:
prog_args.append('"' + arg + '"')
else:
prog_args.append(arg)
prog_args = string.join(prog_args, " ")
wincerapi.CeCreateProcess(prog_args, "", None, None, 0, 0, None, "", None) | run program [args] Starts the specified program on the remote device. |
171,696 | import fnmatch
import getopt
import os
import string
import sys
import win32api
import win32con
import win32file
import wincerapi
def print_error(api_exc, msg):
hr, fn, errmsg = api_exc
print("%s - %s(%d)" % (msg, errmsg, hr))
import pywin.dialogs.status
import win32ui
The provided code snippet includes necessary dependencies for implementing the `delete` function. Write a Python function `def delete(args)` to solve the following problem:
delete file, ... Delete one or more remote files
Here is the function:
def delete(args):
"""delete file, ...
Delete one or more remote files
"""
for arg in args:
try:
wincerapi.CeDeleteFile(arg)
print("Deleted: %s" % arg)
except win32api.error as details:
print_error(details, "Error deleting '%s'" % arg) | delete file, ... Delete one or more remote files |
171,697 | import fnmatch
import getopt
import os
import string
import sys
import win32api
import win32con
import win32file
import wincerapi
import pywin.dialogs.status
import win32ui
def DumpCommands():
print("%-10s - %s" % ("Command", "Description"))
print("%-10s - %s" % ("-------", "-----------"))
for name, item in list(globals().items()):
if type(item) == type(DumpCommands):
doc = getattr(item, "__doc__", "")
if doc:
lines = string.split(doc, "\n")
print("%-10s - %s" % (name, lines[0]))
for line in lines[1:]:
if line:
print(" " * 8, line) | null |
171,698 | import os
import time
import win32api
import win32evtlog
def BackupClearLog(logType):
datePrefix = time.strftime("%Y%m%d", time.localtime(time.time()))
fileExists = 1
retry = 0
while fileExists:
if retry == 0:
index = ""
else:
index = "-%d" % retry
try:
fname = os.path.join(
win32api.GetTempPath(),
"%s%s-%s" % (datePrefix, index, logType) + ".evt",
)
os.stat(fname)
except os.error:
fileExists = 0
retry = retry + 1
# OK - have unique file name.
try:
hlog = win32evtlog.OpenEventLog(None, logType)
except win32evtlogutil.error as details:
print("Could not open the event log", details)
return
try:
if win32evtlog.GetNumberOfEventLogRecords(hlog) == 0:
print("No records in event log %s - not backed up" % logType)
return
win32evtlog.ClearEventLog(hlog, fname)
print("Backed up %s log to %s" % (logType, fname))
finally:
win32evtlog.CloseEventLog(hlog) | null |
171,699 | import string
import time
import traceback
import pythoncom
import win32com.client
import win32com.client.gencache
import win32con
constants = win32com.client.constants
def VssLog(project, linePrefix="", noLabels=5, maxItems=150):
lines = []
num = 0
labelNum = 0
for i in project.GetVersions(constants.VSSFLAG_RECURSYES):
num = num + 1
if num > maxItems:
break
commentDesc = itemDesc = ""
if i.Action[:5] == "Added":
continue
if len(i.Label):
labelNum = labelNum + 1
itemDesc = i.Action
else:
itemDesc = i.VSSItem.Name
if str(itemDesc[-4:]) == ".dsp":
continue
if i.Comment:
commentDesc = "\n%s\t%s" % (linePrefix, i.Comment)
lines.append(
"%s%s\t%s%s"
% (
linePrefix,
time.asctime(time.localtime(int(i.Date))),
itemDesc,
commentDesc,
)
)
if labelNum > noLabels:
break
return string.join(lines, "\n") | null |
171,700 | import os
import string
import sys
import bulkstamp
import vssutil
import win32api
def BrandProject(
vssProjectName,
descFile,
stampPath,
filesToSubstitute,
buildDesc=None,
auto=0,
bRebrand=0,
):
# vssProjectName -- The name of the VSS project to brand.
# descFile -- A test file containing descriptions of the files in the release.
# stampPath -- The full path to where the files referenced in descFile can be found.
path = win32api.GetFullPathName(stampPath)
build = vssutil.MakeNewBuildNo(vssProjectName, buildDesc, auto, bRebrand)
if build is None:
print("Cancelled")
return
bulkstamp.scan(build, stampPath, descFile)
for infile, outfile in filesToSubstitute:
SubstituteVSSInFile(vssProjectName, infile, outfile)
return 1 | null |
171,701 | import os
import string
import sys
import bulkstamp
import vssutil
import win32api
def usage(msg):
print(msg)
print(
"""\
%s Usage:
%s [options] vssProject descFile stampPath
Automatically brand a VSS project with an automatically incremented
build number, and stamp DLL/EXE files with the build number.
Checks that no files are checked out in the project, and finds the last
build number, and suggests the next number.
Options:
-a - Auto increment the build number, and brand (otherwise prompt
for the build number after looking for the previous)
-r - Restamp the files with the existing build number.
-d - A description for the VSS Label.
-f infile=outfile - Substitute special VSS labels in the specified text
file with the text extracted from VSS.
"""
% (os.path.basename(sys.argv[0]), os.path.basename(sys.argv[0]))
)
sys.exit(1) | null |
171,702 | from collections import namedtuple
from tornado import web, gen
from jupyter_server.transutils import _i18n
from jupyter_server.utils import (
ensure_async
)
from jupyter_server.base.handlers import path_regex, FilesRedirectHandler
from jupyter_server.extension.handler import (
ExtensionHandlerMixin,
ExtensionHandlerJinjaMixin
)
from jupyter_server.base.handlers import JupyterHandler
from nbclassic import nbclassic_path
def namedtuple(
typename: Union[str, unicode],
field_names: Union[str, unicode, Iterable[Union[str, unicode]]],
verbose: bool = ...,
rename: bool = ...,
) -> Type[Tuple[Any, ...]]: ...
_i18n = trans.gettext
def get_exporter(name, config=get_config()): # noqa
"""Given an exporter name or import path, return a class ready to be instantiated
Raises ExporterName if exporter is not found or ExporterDisabledError if not enabled
"""
if name == "ipynb":
name = "notebook"
try:
exporters = entry_points(group="nbconvert.exporters")
exporter = [e for e in exporters if e.name == name or e.name == name.lower()][0].load()
if getattr(exporter(config=config), "enabled", True):
return exporter
else:
raise ExporterDisabledError('Exporter "%s" disabled in configuration' % (name))
except IndexError:
pass
if "." in name:
try:
exporter = import_item(name)
if getattr(exporter(config=config), "enabled", True):
return exporter
else:
raise ExporterDisabledError('Exporter "%s" disabled in configuration' % (name))
except ImportError:
log = get_logger()
log.error("Error importing %s" % name, exc_info=True)
msg = 'Unknown exporter "{}", did you mean one of: {}?'.format(
name, ", ".join(get_export_names())
)
raise ExporterNameError(msg)
def get_export_names(config=get_config()): # noqa
"""Return a list of the currently supported export targets
Exporters can be found in external packages by registering
them as an nbconvert.exporter entrypoint.
"""
exporters = sorted(e.name for e in entry_points(group="nbconvert.exporters"))
if os.environ.get("NBCONVERT_DISABLE_CONFIG_EXPORTERS"):
get_logger().info(
"Config exporter loading disabled, no additional exporters will be automatically included."
)
return exporters
enabled_exporters = []
for exporter_name in exporters:
try:
e = get_exporter(exporter_name)(config=config)
if e.enabled:
enabled_exporters.append(exporter_name)
except (ExporterDisabledError, ValueError):
pass
return enabled_exporters
def get_frontend_exporters():
from nbconvert.exporters.base import get_export_names, get_exporter
# name=exporter_name, display=export_from_notebook+extension
ExporterInfo = namedtuple('ExporterInfo', ['name', 'display'])
default_exporters = [
ExporterInfo(name='html', display='HTML (.html)'),
ExporterInfo(name='latex', display='LaTeX (.tex)'),
ExporterInfo(name='markdown', display='Markdown (.md)'),
ExporterInfo(name='notebook', display='Notebook (.ipynb)'),
ExporterInfo(name='pdf', display='PDF via LaTeX (.pdf)'),
ExporterInfo(name='rst', display='reST (.rst)'),
ExporterInfo(name='script', display='Script (.txt)'),
ExporterInfo(name='slides', display='Reveal.js slides (.slides.html)')
]
frontend_exporters = []
for name in get_export_names():
exporter_class = get_exporter(name)
exporter_instance = exporter_class()
ux_name = getattr(exporter_instance, 'export_from_notebook', None)
super_uxname = getattr(super(exporter_class, exporter_instance),
'export_from_notebook', None)
# Ensure export_from_notebook is explicitly defined & not inherited
if ux_name is not None and ux_name != super_uxname:
display = _i18n('{} ({})'.format(ux_name,
exporter_instance.file_extension))
frontend_exporters.append(ExporterInfo(name, display))
# Ensure default_exporters are in frontend_exporters if not already
# This protects against nbconvert versions lower than 5.5
names = set(exporter.name.lower() for exporter in frontend_exporters)
for exporter in default_exporters:
if exporter.name not in names:
frontend_exporters.append(exporter)
# Protect against nbconvert 5.5.0
python_exporter = ExporterInfo(name='python', display='python (.py)')
if python_exporter in frontend_exporters:
frontend_exporters.remove(python_exporter)
# Protect against nbconvert 5.4.x
template_exporter = ExporterInfo(name='custom', display='custom (.txt)')
if template_exporter in frontend_exporters:
frontend_exporters.remove(template_exporter)
return sorted(frontend_exporters) | null |
171,705 | import sys
The provided code snippet includes necessary dependencies for implementing the `shim_notebook` function. Write a Python function `def shim_notebook()` to solve the following problem:
Define in sys.module the needed notebook packages that should be fullfilled by their corresponding and backwards-compatible jupyter-server packages. TODO Can we lazy load these loadings? Note: We could a custom module loader to achieve similar functionality. The logic thar conditional loading seems to be more complicated than simply listing by hand the needed subpackages but could avoid latency on server start. https://docs.python.org/3/library/importlib.html#importlib.abc.Loader These are the notebook packages we need to shim: auth base bundler <- no, already available in nbclassic edit <- no, already available in nbclassic files gateway i18n <- no, already available in nbclassic kernelspecs nbconvert notebook <- no, already available in nbclassic prometheus services static <- no, already available in nbclassic templates <- no, already available in nbclassic terminal <- no, already available in nbclassic tests <- no, already available in nbclassic tree <- no, already available in nbclassic view __init__.py <- no, already available in nbclassic __main__.py <- no, already available in nbclassic _sysinfo.py <- no, already available in nbclassic _tz.py _version.py <- no, already available in nbclassic config_manager.py <- no, already available in nbclassic extensions.py <- no, already available in nbclassic jstest.py <- no, already available in nbclassic log.py nbextensions.py <- no, already available in nbclassic notebookapp.py <- no, already available in nbclassic serverextensions.py <- no, already available in nbclassic traittypes.py <- no, already available in nbclassic transutils.py <- no, already available in nbclassic utils.py
Here is the function:
def shim_notebook():
"""Define in sys.module the needed notebook packages that should be fullfilled by
their corresponding and backwards-compatible jupyter-server packages.
TODO Can we lazy load these loadings?
Note: We could a custom module loader to achieve similar functionality. The
logic thar conditional loading seems to be more complicated than simply
listing by hand the needed subpackages but could avoid latency on server start.
https://docs.python.org/3/library/importlib.html#importlib.abc.Loader
These are the notebook packages we need to shim:
auth
base
bundler <- no, already available in nbclassic
edit <- no, already available in nbclassic
files
gateway
i18n <- no, already available in nbclassic
kernelspecs
nbconvert
notebook <- no, already available in nbclassic
prometheus
services
static <- no, already available in nbclassic
templates <- no, already available in nbclassic
terminal <- no, already available in nbclassic
tests <- no, already available in nbclassic
tree <- no, already available in nbclassic
view
__init__.py <- no, already available in nbclassic
__main__.py <- no, already available in nbclassic
_sysinfo.py <- no, already available in nbclassic
_tz.py
_version.py <- no, already available in nbclassic
config_manager.py <- no, already available in nbclassic
extensions.py <- no, already available in nbclassic
jstest.py <- no, already available in nbclassic
log.py
nbextensions.py <- no, already available in nbclassic
notebookapp.py <- no, already available in nbclassic
serverextensions.py <- no, already available in nbclassic
traittypes.py <- no, already available in nbclassic
transutils.py <- no, already available in nbclassic
utils.py
"""
from jupyter_server import auth
sys.modules["notebook.auth"] = auth
from jupyter_server import base
sys.modules["notebook.base"] = base
from jupyter_server import files
sys.modules["notebook.files"] = files
from jupyter_server import gateway
sys.modules["notebook.gateway"] = gateway
from jupyter_server import kernelspecs
sys.modules["notebook.kernelspecs"] = kernelspecs
from jupyter_server import nbconvert
sys.modules["notebook.nbconvert"] = nbconvert
from jupyter_server import prometheus
sys.modules["notebook.prometheus"] = prometheus
from jupyter_server import services
sys.modules["notebook.services"] = services
from jupyter_server import view
sys.modules["notebook.view"] = view
from jupyter_server import _tz
sys.modules["notebook._tz"] = _tz
from jupyter_server import log
sys.modules["notebook.log"] = log
from jupyter_server import utils
sys.modules["notebook.utils"] = utils
from jupyter_server.base import handlers
base.handlers.IPythonHandler = handlers.JupyterHandler
sys.modules["notebook.base.handlers.IPythonHandler"] = base.handlers.JupyterHandler | Define in sys.module the needed notebook packages that should be fullfilled by their corresponding and backwards-compatible jupyter-server packages. TODO Can we lazy load these loadings? Note: We could a custom module loader to achieve similar functionality. The logic thar conditional loading seems to be more complicated than simply listing by hand the needed subpackages but could avoid latency on server start. https://docs.python.org/3/library/importlib.html#importlib.abc.Loader These are the notebook packages we need to shim: auth base bundler <- no, already available in nbclassic edit <- no, already available in nbclassic files gateway i18n <- no, already available in nbclassic kernelspecs nbconvert notebook <- no, already available in nbclassic prometheus services static <- no, already available in nbclassic templates <- no, already available in nbclassic terminal <- no, already available in nbclassic tests <- no, already available in nbclassic tree <- no, already available in nbclassic view __init__.py <- no, already available in nbclassic __main__.py <- no, already available in nbclassic _sysinfo.py <- no, already available in nbclassic _tz.py _version.py <- no, already available in nbclassic config_manager.py <- no, already available in nbclassic extensions.py <- no, already available in nbclassic jstest.py <- no, already available in nbclassic log.py nbextensions.py <- no, already available in nbclassic notebookapp.py <- no, already available in nbclassic serverextensions.py <- no, already available in nbclassic traittypes.py <- no, already available in nbclassic transutils.py <- no, already available in nbclassic utils.py |
171,708 | import os
import platform
import pprint
import sys
import subprocess
from ipython_genutils import py3compat, encoding
import nbclassic
from nbclassic import _version
def pkg_info(pkg_path):
"""Return dict describing the context of this package
Parameters
----------
pkg_path : str
path containing __init__.py for package
Returns
-------
context : dict
with named parameters of interest
"""
src, hsh = pkg_commit_hash(pkg_path)
return dict(
notebook_version=_version,
notebook_path=pkg_path,
commit_source=src,
commit_hash=hsh,
sys_version=sys.version,
sys_executable=sys.executable,
sys_platform=sys.platform,
platform=platform.platform(),
os_name=os.name,
default_encoding=encoding.DEFAULT_ENCODING,
)
The provided code snippet includes necessary dependencies for implementing the `get_sys_info` function. Write a Python function `def get_sys_info()` to solve the following problem:
Return useful information about the system as a dict.
Here is the function:
def get_sys_info():
"""Return useful information about the system as a dict."""
p = os.path
path = p.realpath(p.dirname(p.abspath(p.join(nbclassic.__file__))))
return pkg_info(path) | Return useful information about the system as a dict. |
171,709 | import os
import io
import tarfile
import nbformat
The provided code snippet includes necessary dependencies for implementing the `_jupyter_bundlerextension_paths` function. Write a Python function `def _jupyter_bundlerextension_paths()` to solve the following problem:
Metadata for notebook bundlerextension
Here is the function:
def _jupyter_bundlerextension_paths():
"""Metadata for notebook bundlerextension"""
return [{
# unique bundler name
"name": "tarball_bundler",
# module containing bundle function
"module_name": "nbclassic.bundler.tarball_bundler",
# human-readable menu item label
"label" : "Notebook Tarball (tar.gz)",
# group under 'deploy' or 'download' menu
"group" : "download",
}] | Metadata for notebook bundlerextension |
171,710 | import os
import io
import tarfile
import nbformat
The provided code snippet includes necessary dependencies for implementing the `bundle` function. Write a Python function `def bundle(handler, model)` to solve the following problem:
Create a compressed tarball containing the notebook document. Parameters ---------- handler : tornado.web.RequestHandler Handler that serviced the bundle request model : dict Notebook model from the configured ContentManager
Here is the function:
def bundle(handler, model):
"""Create a compressed tarball containing the notebook document.
Parameters
----------
handler : tornado.web.RequestHandler
Handler that serviced the bundle request
model : dict
Notebook model from the configured ContentManager
"""
notebook_filename = model['name']
notebook_content = nbformat.writes(model['content']).encode('utf-8')
notebook_name = os.path.splitext(notebook_filename)[0]
tar_filename = '{}.tar.gz'.format(notebook_name)
info = tarfile.TarInfo(notebook_filename)
info.size = len(notebook_content)
with io.BytesIO() as tar_buffer:
with tarfile.open(tar_filename, "w:gz", fileobj=tar_buffer) as tar:
tar.addfile(info, io.BytesIO(notebook_content))
handler.set_attachment_header(tar_filename)
handler.set_header('Content-Type', 'application/gzip')
# Return the buffer value as the response
handler.finish(tar_buffer.getvalue()) | Create a compressed tarball containing the notebook document. Parameters ---------- handler : tornado.web.RequestHandler Handler that serviced the bundle request model : dict Notebook model from the configured ContentManager |
171,712 | import os
import io
import zipfile
import nbclassic.bundler.tools as tools
The provided code snippet includes necessary dependencies for implementing the `_jupyter_bundlerextension_paths` function. Write a Python function `def _jupyter_bundlerextension_paths()` to solve the following problem:
Metadata for notebook bundlerextension
Here is the function:
def _jupyter_bundlerextension_paths():
"""Metadata for notebook bundlerextension"""
return [{
'name': 'notebook_zip_download',
'label': 'IPython Notebook bundle (.zip)',
'module_name': 'nbclassic.bundler.zip_bundler',
'group': 'download'
}] | Metadata for notebook bundlerextension |
171,713 | import os
import io
import zipfile
import nbclassic.bundler.tools as tools
The provided code snippet includes necessary dependencies for implementing the `bundle` function. Write a Python function `def bundle(handler, model)` to solve the following problem:
Create a zip file containing the original notebook and files referenced from it. Retain the referenced files in paths relative to the nbclassic. Return the zip as a file download. Assumes the notebook and other files are all on local disk. Parameters ---------- handler : tornado.web.RequestHandler Handler that serviced the bundle request model : dict Notebook model from the configured ContentManager
Here is the function:
def bundle(handler, model):
"""Create a zip file containing the original notebook and files referenced
from it. Retain the referenced files in paths relative to the nbclassic.
Return the zip as a file download.
Assumes the notebook and other files are all on local disk.
Parameters
----------
handler : tornado.web.RequestHandler
Handler that serviced the bundle request
model : dict
Notebook model from the configured ContentManager
"""
abs_nb_path = os.path.join(handler.settings['contents_manager'].root_dir,
model['path'])
notebook_filename = model['name']
notebook_name = os.path.splitext(notebook_filename)[0]
# Headers
zip_filename = os.path.splitext(notebook_name)[0] + '.zip'
handler.set_attachment_header(zip_filename)
handler.set_header('Content-Type', 'application/zip')
# Get associated files
ref_filenames = tools.get_file_references(abs_nb_path, 4)
# Prepare the zip file
zip_buffer = io.BytesIO()
zipf = zipfile.ZipFile(zip_buffer, mode='w', compression=zipfile.ZIP_DEFLATED)
zipf.write(abs_nb_path, notebook_filename)
notebook_dir = os.path.dirname(abs_nb_path)
for nb_relative_filename in ref_filenames:
# Build absolute path to file on disk
abs_fn = os.path.join(notebook_dir, nb_relative_filename)
# Store file under path relative to notebook
zipf.write(abs_fn, nb_relative_filename)
zipf.close()
# Return the buffer value as the response
handler.finish(zip_buffer.getvalue()) | Create a zip file containing the original notebook and files referenced from it. Retain the referenced files in paths relative to the nbclassic. Return the zip as a file download. Assumes the notebook and other files are all on local disk. Parameters ---------- handler : tornado.web.RequestHandler Handler that serviced the bundle request model : dict Notebook model from the configured ContentManager |
171,714 | import asyncio
import inspect
import concurrent.futures
from nbclassic import nbclassic_path
from traitlets.utils.importstring import import_item
from tornado import web, gen
from jupyter_server.utils import url2path
from jupyter_server.base.handlers import JupyterHandler
from jupyter_server.services.config import ConfigManager
from . import tools
The provided code snippet includes necessary dependencies for implementing the `maybe_future` function. Write a Python function `def maybe_future(obj)` to solve the following problem:
Like tornado's deprecated gen.maybe_future but more compatible with asyncio for recent versions of tornado
Here is the function:
def maybe_future(obj):
"""Like tornado's deprecated gen.maybe_future
but more compatible with asyncio for recent versions
of tornado
"""
if inspect.isawaitable(obj):
return asyncio.ensure_future(obj)
elif isinstance(obj, concurrent.futures.Future):
return asyncio.wrap_future(obj)
else:
# not awaitable, wrap scalar in future
f = asyncio.Future()
f.set_result(obj)
return f | Like tornado's deprecated gen.maybe_future but more compatible with asyncio for recent versions of tornado |
171,715 | import sys
import os
from ..extensions import BaseExtensionApp, _get_config_dir, GREEN_ENABLED, RED_DISABLED
from .._version import __version__
from nbclassic.config_manager import BaseJSONConfigManager
from jupyter_core.paths import jupyter_config_path
from traitlets.utils.importstring import import_item
from traitlets import Bool
def _set_bundler_state_python(state, module, user, sys_prefix, logger=None):
"""Enables or disables bundlers defined in a Python package.
Returns a list of whether the state was achieved for each bundler.
Parameters
----------
state : Bool
Whether the extensions should be enabled
module : str
Importable Python module exposing the
magic-named `_jupyter_bundlerextension_paths` function
user : bool
Whether to enable in the user's nbconfig directory.
sys_prefix : bool
Enable/disable in the sys.prefix, i.e. environment
logger : Jupyter logger [optional]
Logger instance to use
"""
m, bundlers = _get_bundler_metadata(module)
return [_set_bundler_state(name=bundler["name"],
label=bundler["label"],
module_name=bundler["module_name"],
group=bundler["group"],
state=state,
user=user, sys_prefix=sys_prefix,
logger=logger)
for bundler in bundlers]
The provided code snippet includes necessary dependencies for implementing the `enable_bundler_python` function. Write a Python function `def enable_bundler_python(module, user=True, sys_prefix=False, logger=None)` to solve the following problem:
Enables bundlers defined in a Python package. Returns whether each bundle defined in the packaged was enabled or not. Parameters ---------- module : str Importable Python module exposing the magic-named `_jupyter_bundlerextension_paths` function user : bool [default: True] Whether to enable in the user's nbconfig directory. sys_prefix : bool [default: False] Whether to enable in the sys.prefix, i.e. environment. Will override `user` logger : Jupyter logger [optional] Logger instance to use
Here is the function:
def enable_bundler_python(module, user=True, sys_prefix=False, logger=None):
"""Enables bundlers defined in a Python package.
Returns whether each bundle defined in the packaged was enabled or not.
Parameters
----------
module : str
Importable Python module exposing the
magic-named `_jupyter_bundlerextension_paths` function
user : bool [default: True]
Whether to enable in the user's nbconfig directory.
sys_prefix : bool [default: False]
Whether to enable in the sys.prefix, i.e. environment. Will override
`user`
logger : Jupyter logger [optional]
Logger instance to use
"""
return _set_bundler_state_python(True, module, user, sys_prefix,
logger=logger) | Enables bundlers defined in a Python package. Returns whether each bundle defined in the packaged was enabled or not. Parameters ---------- module : str Importable Python module exposing the magic-named `_jupyter_bundlerextension_paths` function user : bool [default: True] Whether to enable in the user's nbconfig directory. sys_prefix : bool [default: False] Whether to enable in the sys.prefix, i.e. environment. Will override `user` logger : Jupyter logger [optional] Logger instance to use |
171,716 | import sys
import os
from ..extensions import BaseExtensionApp, _get_config_dir, GREEN_ENABLED, RED_DISABLED
from .._version import __version__
from nbclassic.config_manager import BaseJSONConfigManager
from jupyter_core.paths import jupyter_config_path
from traitlets.utils.importstring import import_item
from traitlets import Bool
def _set_bundler_state_python(state, module, user, sys_prefix, logger=None):
"""Enables or disables bundlers defined in a Python package.
Returns a list of whether the state was achieved for each bundler.
Parameters
----------
state : Bool
Whether the extensions should be enabled
module : str
Importable Python module exposing the
magic-named `_jupyter_bundlerextension_paths` function
user : bool
Whether to enable in the user's nbconfig directory.
sys_prefix : bool
Enable/disable in the sys.prefix, i.e. environment
logger : Jupyter logger [optional]
Logger instance to use
"""
m, bundlers = _get_bundler_metadata(module)
return [_set_bundler_state(name=bundler["name"],
label=bundler["label"],
module_name=bundler["module_name"],
group=bundler["group"],
state=state,
user=user, sys_prefix=sys_prefix,
logger=logger)
for bundler in bundlers]
The provided code snippet includes necessary dependencies for implementing the `disable_bundler_python` function. Write a Python function `def disable_bundler_python(module, user=True, sys_prefix=False, logger=None)` to solve the following problem:
Disables bundlers defined in a Python package. Returns whether each bundle defined in the packaged was enabled or not. Parameters ---------- module : str Importable Python module exposing the magic-named `_jupyter_bundlerextension_paths` function user : bool [default: True] Whether to enable in the user's nbconfig directory. sys_prefix : bool [default: False] Whether to enable in the sys.prefix, i.e. environment. Will override `user` logger : Jupyter logger [optional] Logger instance to use
Here is the function:
def disable_bundler_python(module, user=True, sys_prefix=False, logger=None):
"""Disables bundlers defined in a Python package.
Returns whether each bundle defined in the packaged was enabled or not.
Parameters
----------
module : str
Importable Python module exposing the
magic-named `_jupyter_bundlerextension_paths` function
user : bool [default: True]
Whether to enable in the user's nbconfig directory.
sys_prefix : bool [default: False]
Whether to enable in the sys.prefix, i.e. environment. Will override
`user`
logger : Jupyter logger [optional]
Logger instance to use
"""
return _set_bundler_state_python(False, module, user, sys_prefix,
logger=logger) | Disables bundlers defined in a Python package. Returns whether each bundle defined in the packaged was enabled or not. Parameters ---------- module : str Importable Python module exposing the magic-named `_jupyter_bundlerextension_paths` function user : bool [default: True] Whether to enable in the user's nbconfig directory. sys_prefix : bool [default: False] Whether to enable in the sys.prefix, i.e. environment. Will override `user` logger : Jupyter logger [optional] Logger instance to use |
171,717 | import os
import shutil
import sys
import tarfile
import zipfile
from os.path import basename, join as pjoin, normpath
from urllib.parse import urlparse
from urllib.request import urlretrieve
from jupyter_core.paths import (
jupyter_data_dir, jupyter_config_path, jupyter_path,
SYSTEM_JUPYTER_PATH, ENV_JUPYTER_PATH,
)
from jupyter_core.utils import ensure_dir_exists
from ipython_genutils.py3compat import string_types, cast_unicode_py2
from ipython_genutils.tempdir import TemporaryDirectory
from ._version import __version__
from .config_manager import BaseJSONConfigManager
from traitlets.utils.importstring import import_item
from .extensions import (
BaseExtensionApp, _get_config_dir, GREEN_ENABLED, RED_DISABLED, GREEN_OK, RED_X,
ArgumentConflict, _base_aliases, _base_flags,
)
from traitlets import Bool, Unicode
def _get_nbextension_dir(user=False, sys_prefix=False, prefix=None, nbextensions_dir=None):
"""Return the nbextension directory specified
Parameters
----------
user : bool [default: False]
Get the user's .jupyter/nbextensions directory
sys_prefix : bool [default: False]
Get sys.prefix, i.e. ~/.envs/my-env/share/jupyter/nbextensions
prefix : str [optional]
Get custom prefix
nbextensions_dir : str [optional]
Get what you put in
"""
conflicting = [
('user', user),
('prefix', prefix),
('nbextensions_dir', nbextensions_dir),
('sys_prefix', sys_prefix),
]
conflicting_set = [f'{n}={v!r}' for n, v in conflicting if v]
if len(conflicting_set) > 1:
raise ArgumentConflict(
f"cannot specify more than one of user, sys_prefix, prefix, or nbextensions_dir, "
f"but got: {', '.join(conflicting_set)}"
)
if user:
nbext = pjoin(jupyter_data_dir(), 'nbextensions')
elif sys_prefix:
nbext = pjoin(ENV_JUPYTER_PATH[0], 'nbextensions')
elif prefix:
nbext = pjoin(prefix, 'share', 'jupyter', 'nbextensions')
elif nbextensions_dir:
nbext = nbextensions_dir
else:
nbext = pjoin(SYSTEM_JUPYTER_PATH[0], 'nbextensions')
return nbext
The provided code snippet includes necessary dependencies for implementing the `check_nbextension` function. Write a Python function `def check_nbextension(files, user=False, prefix=None, nbextensions_dir=None, sys_prefix=False)` to solve the following problem:
Check whether nbextension files have been installed Returns True if all files are found, False if any are missing. Parameters ---------- files : list(paths) a list of relative paths within nbextensions. user : bool [default: False] Whether to check the user's .jupyter/nbextensions directory. Otherwise check a system-wide install (e.g. /usr/local/share/jupyter/nbextensions). prefix : str [optional] Specify install prefix, if it should differ from default (e.g. /usr/local). Will check prefix/share/jupyter/nbextensions nbextensions_dir : str [optional] Specify absolute path of nbextensions directory explicitly. sys_prefix : bool [default: False] Install into the sys.prefix, i.e. environment
Here is the function:
def check_nbextension(files, user=False, prefix=None, nbextensions_dir=None, sys_prefix=False):
"""Check whether nbextension files have been installed
Returns True if all files are found, False if any are missing.
Parameters
----------
files : list(paths)
a list of relative paths within nbextensions.
user : bool [default: False]
Whether to check the user's .jupyter/nbextensions directory.
Otherwise check a system-wide install (e.g. /usr/local/share/jupyter/nbextensions).
prefix : str [optional]
Specify install prefix, if it should differ from default (e.g. /usr/local).
Will check prefix/share/jupyter/nbextensions
nbextensions_dir : str [optional]
Specify absolute path of nbextensions directory explicitly.
sys_prefix : bool [default: False]
Install into the sys.prefix, i.e. environment
"""
nbext = _get_nbextension_dir(user=user, sys_prefix=sys_prefix, prefix=prefix, nbextensions_dir=nbextensions_dir)
# make sure nbextensions dir exists
if not os.path.exists(nbext):
return False
if isinstance(files, string_types):
# one file given, turn it into a list
files = [files]
return all(os.path.exists(pjoin(nbext, f)) for f in files) | Check whether nbextension files have been installed Returns True if all files are found, False if any are missing. Parameters ---------- files : list(paths) a list of relative paths within nbextensions. user : bool [default: False] Whether to check the user's .jupyter/nbextensions directory. Otherwise check a system-wide install (e.g. /usr/local/share/jupyter/nbextensions). prefix : str [optional] Specify install prefix, if it should differ from default (e.g. /usr/local). Will check prefix/share/jupyter/nbextensions nbextensions_dir : str [optional] Specify absolute path of nbextensions directory explicitly. sys_prefix : bool [default: False] Install into the sys.prefix, i.e. environment |
171,718 | import os
import shutil
import sys
import tarfile
import zipfile
from os.path import basename, join as pjoin, normpath
from urllib.parse import urlparse
from urllib.request import urlretrieve
from jupyter_core.paths import (
jupyter_data_dir, jupyter_config_path, jupyter_path,
SYSTEM_JUPYTER_PATH, ENV_JUPYTER_PATH,
)
from jupyter_core.utils import ensure_dir_exists
from ipython_genutils.py3compat import string_types, cast_unicode_py2
from ipython_genutils.tempdir import TemporaryDirectory
from ._version import __version__
from .config_manager import BaseJSONConfigManager
from traitlets.utils.importstring import import_item
def install_nbextension(path, overwrite=False, symlink=False,
user=False, prefix=None, nbextensions_dir=None,
destination=None, verbose=DEPRECATED_ARGUMENT,
logger=None, sys_prefix=False
):
"""Install a Javascript extension for the notebook
Stages files and/or directories into the nbextensions directory.
By default, this compares modification time, and only stages files that need updating.
If `overwrite` is specified, matching files are purged before proceeding.
Parameters
----------
path : path to file, directory, zip or tarball archive, or URL to install
By default, the file will be installed with its base name, so '/path/to/foo'
will install to 'nbextensions/foo'. See the destination argument below to change this.
Archives (zip or tarballs) will be extracted into the nbextensions directory.
overwrite : bool [default: False]
If True, always install the files, regardless of what may already be installed.
symlink : bool [default: False]
If True, create a symlink in nbextensions, rather than copying files.
Not allowed with URLs or archives. Windows support for symlinks requires
Vista or above, Python 3, and a permission bit which only admin users
have by default, so don't rely on it.
user : bool [default: False]
Whether to install to the user's nbextensions directory.
Otherwise do a system-wide install (e.g. /usr/local/share/jupyter/nbextensions).
prefix : str [optional]
Specify install prefix, if it should differ from default (e.g. /usr/local).
Will install to ``<prefix>/share/jupyter/nbextensions``
nbextensions_dir : str [optional]
Specify absolute path of nbextensions directory explicitly.
destination : str [optional]
name the nbextension is installed to. For example, if destination is 'foo', then
the source file will be installed to 'nbextensions/foo', regardless of the source name.
This cannot be specified if an archive is given as the source.
logger : Jupyter logger [optional]
Logger instance to use
"""
if verbose != DEPRECATED_ARGUMENT:
import warnings
warnings.warn("`install_nbextension`'s `verbose` parameter is deprecated, it will have no effects and will be removed in Notebook 5.0", DeprecationWarning)
# the actual path to which we eventually installed
full_dest = None
nbext = _get_nbextension_dir(user=user, sys_prefix=sys_prefix, prefix=prefix, nbextensions_dir=nbextensions_dir)
# make sure nbextensions dir exists
ensure_dir_exists(nbext)
# forcing symlink parameter to False if os.symlink does not exist (e.g., on Windows machines running python 2)
if not hasattr(os, 'symlink'):
symlink = False
if isinstance(path, (list, tuple)):
raise TypeError("path must be a string pointing to a single extension to install; call this function multiple times to install multiple extensions")
path = cast_unicode_py2(path)
if path.startswith(('https://', 'http://')):
if symlink:
raise ValueError("Cannot symlink from URLs")
# Given a URL, download it
with TemporaryDirectory() as td:
filename = urlparse(path).path.split('/')[-1]
local_path = os.path.join(td, filename)
if logger:
logger.info(f"Downloading: {path} -> {local_path}")
urlretrieve(path, local_path)
# now install from the local copy
full_dest = install_nbextension(local_path, overwrite=overwrite, symlink=symlink,
nbextensions_dir=nbext, destination=destination, logger=logger)
elif path.endswith('.zip') or _safe_is_tarfile(path):
if symlink:
raise ValueError("Cannot symlink from archives")
if destination:
raise ValueError("Cannot give destination for archives")
if logger:
logger.info(f"Extracting: {path} -> {nbext}")
if path.endswith('.zip'):
archive = zipfile.ZipFile(path)
elif _safe_is_tarfile(path):
archive = tarfile.open(path)
archive.extractall(nbext)
archive.close()
# TODO: what to do here
full_dest = None
else:
if not destination:
destination = basename(normpath(path))
destination = cast_unicode_py2(destination)
full_dest = normpath(pjoin(nbext, destination))
if overwrite and os.path.lexists(full_dest):
if logger:
logger.info(f"Removing: {full_dest}")
if os.path.isdir(full_dest) and not os.path.islink(full_dest):
shutil.rmtree(full_dest)
else:
os.remove(full_dest)
if symlink:
path = os.path.abspath(path)
if not os.path.exists(full_dest):
if logger:
logger.info(f"Symlinking: {full_dest} -> {path}")
os.symlink(path, full_dest)
elif os.path.isdir(path):
path = pjoin(os.path.abspath(path), '') # end in path separator
for parent, dirs, files in os.walk(path):
dest_dir = pjoin(full_dest, parent[len(path):])
if not os.path.exists(dest_dir):
if logger:
logger.info(f"Making directory: {dest_dir}")
os.makedirs(dest_dir)
for file_name in files:
src = pjoin(parent, file_name)
dest_file = pjoin(dest_dir, file_name)
_maybe_copy(src, dest_file, logger=logger)
else:
src = path
_maybe_copy(src, full_dest, logger=logger)
return full_dest
def validate_nbextension_python(spec, full_dest, logger=None):
"""Assess the health of an installed nbextension
Returns a list of warnings.
Parameters
----------
spec : dict
A single entry of _jupyter_nbextension_paths():
[{
'section': 'notebook',
'src': 'mockextension',
'dest': '_mockdestination',
'require': '_mockdestination/index'
}]
full_dest : str
The on-disk location of the installed nbextension: this should end
with `nbextensions/<dest>`
logger : Jupyter logger [optional]
Logger instance to use
"""
infos = []
warnings = []
section = spec.get("section", None)
if section in NBCONFIG_SECTIONS:
infos.append(f" {GREEN_OK} section: {section}")
else:
warnings.append(f" {RED_X} section: {section}")
require = spec.get("require", None)
if require is not None:
require_path = os.path.join(
full_dest[0:-len(spec["dest"])],
f"{require}.js")
if os.path.exists(require_path):
infos.append(f" {GREEN_OK} require: {require_path}")
else:
warnings.append(f" {RED_X} require: {require_path}")
if logger:
if warnings:
logger.warning("- Validating: problems found:")
for msg in warnings:
logger.warning(msg)
for msg in infos:
logger.info(msg)
logger.warning(f"Full spec: {spec}")
else:
logger.info(f"- Validating: {GREEN_OK}")
return warnings
from .extensions import (
BaseExtensionApp, _get_config_dir, GREEN_ENABLED, RED_DISABLED, GREEN_OK, RED_X,
ArgumentConflict, _base_aliases, _base_flags,
)
from traitlets import Bool, Unicode
def _get_nbextension_metadata(module):
"""Get the list of nbextension paths associated with a Python module.
Returns a tuple of (the module, [{
'section': 'notebook',
'src': 'mockextension',
'dest': '_mockdestination',
'require': '_mockdestination/index'
}])
Parameters
----------
module : str
Importable Python module exposing the
magic-named `_jupyter_nbextension_paths` function
"""
m = import_item(module)
if not hasattr(m, '_jupyter_nbextension_paths'):
raise KeyError(
f'The Python module {module} is not a valid nbextension, '
f'it is missing the `_jupyter_nbextension_paths()` method.'
)
nbexts = m._jupyter_nbextension_paths()
return m, nbexts
The provided code snippet includes necessary dependencies for implementing the `install_nbextension_python` function. Write a Python function `def install_nbextension_python(module, overwrite=False, symlink=False, user=False, sys_prefix=False, prefix=None, nbextensions_dir=None, logger=None)` to solve the following problem:
Install an nbextension bundled in a Python package. Returns a list of installed/updated directories. See install_nbextension for parameter information.
Here is the function:
def install_nbextension_python(module, overwrite=False, symlink=False,
user=False, sys_prefix=False, prefix=None, nbextensions_dir=None, logger=None):
"""Install an nbextension bundled in a Python package.
Returns a list of installed/updated directories.
See install_nbextension for parameter information."""
m, nbexts = _get_nbextension_metadata(module)
base_path = os.path.split(m.__file__)[0]
full_dests = []
for nbext in nbexts:
src = os.path.join(base_path, nbext['src'])
dest = nbext['dest']
if logger:
logger.info(f"Installing {src} -> {dest}")
full_dest = install_nbextension(
src, overwrite=overwrite, symlink=symlink,
user=user, sys_prefix=sys_prefix, prefix=prefix, nbextensions_dir=nbextensions_dir,
destination=dest, logger=logger
)
validate_nbextension_python(nbext, full_dest, logger)
full_dests.append(full_dest)
return full_dests | Install an nbextension bundled in a Python package. Returns a list of installed/updated directories. See install_nbextension for parameter information. |
171,719 | import os
import shutil
import sys
import tarfile
import zipfile
from os.path import basename, join as pjoin, normpath
from urllib.parse import urlparse
from urllib.request import urlretrieve
from jupyter_core.paths import (
jupyter_data_dir, jupyter_config_path, jupyter_path,
SYSTEM_JUPYTER_PATH, ENV_JUPYTER_PATH,
)
from jupyter_core.utils import ensure_dir_exists
from ipython_genutils.py3compat import string_types, cast_unicode_py2
from ipython_genutils.tempdir import TemporaryDirectory
from ._version import __version__
from .config_manager import BaseJSONConfigManager
from traitlets.utils.importstring import import_item
from .extensions import (
BaseExtensionApp, _get_config_dir, GREEN_ENABLED, RED_DISABLED, GREEN_OK, RED_X,
ArgumentConflict, _base_aliases, _base_flags,
)
from traitlets import Bool, Unicode
def jupyter_path(*subdirs: str) -> List[str]:
"""Return a list of directories to search for data files
JUPYTER_PATH environment variable has highest priority.
If the JUPYTER_PREFER_ENV_PATH environment variable is set, the environment-level
directories will have priority over user-level directories.
If the Python site.ENABLE_USER_SITE variable is True, we also add the
appropriate Python user site subdirectory to the user-level directories.
If ``*subdirs`` are given, that subdirectory will be added to each element.
Examples:
>>> jupyter_path()
['~/.local/jupyter', '/usr/local/share/jupyter']
>>> jupyter_path('kernels')
['~/.local/jupyter/kernels', '/usr/local/share/jupyter/kernels']
"""
paths: List[str] = []
# highest priority is explicit environment variable
if os.environ.get("JUPYTER_PATH"):
paths.extend(p.rstrip(os.sep) for p in os.environ["JUPYTER_PATH"].split(os.pathsep))
# Next is environment or user, depending on the JUPYTER_PREFER_ENV_PATH flag
user = [jupyter_data_dir()]
if site.ENABLE_USER_SITE:
# Check if site.getuserbase() exists to be compatible with virtualenv,
# which often does not have this method.
userbase: Optional[str]
userbase = site.getuserbase() if hasattr(site, "getuserbase") else site.USER_BASE
if userbase:
userdir = os.path.join(userbase, "share", "jupyter")
if userdir not in user:
user.append(userdir)
env = [p for p in ENV_JUPYTER_PATH if p not in SYSTEM_JUPYTER_PATH]
if prefer_environment_over_user():
paths.extend(env)
paths.extend(user)
else:
paths.extend(user)
paths.extend(env)
# finally, system
paths.extend(SYSTEM_JUPYTER_PATH)
# add subdir, if requested
if subdirs:
paths = [pjoin(p, *subdirs) for p in paths]
return paths
The provided code snippet includes necessary dependencies for implementing the `_find_uninstall_nbextension` function. Write a Python function `def _find_uninstall_nbextension(filename, logger=None)` to solve the following problem:
Remove nbextension files from the first location they are found. Returns True if files were removed, False otherwise.
Here is the function:
def _find_uninstall_nbextension(filename, logger=None):
"""Remove nbextension files from the first location they are found.
Returns True if files were removed, False otherwise.
"""
filename = cast_unicode_py2(filename)
for nbext in jupyter_path('nbextensions'):
path = pjoin(nbext, filename)
if os.path.lexists(path):
if logger:
logger.info(f"Removing: {path}")
if os.path.isdir(path) and not os.path.islink(path):
shutil.rmtree(path)
else:
os.remove(path)
return True
return False | Remove nbextension files from the first location they are found. Returns True if files were removed, False otherwise. |
171,723 | import os
import shutil
import sys
import tarfile
import zipfile
from os.path import basename, join as pjoin, normpath
from urllib.parse import urlparse
from urllib.request import urlretrieve
from jupyter_core.paths import (
jupyter_data_dir, jupyter_config_path, jupyter_path,
SYSTEM_JUPYTER_PATH, ENV_JUPYTER_PATH,
)
from jupyter_core.utils import ensure_dir_exists
from ipython_genutils.py3compat import string_types, cast_unicode_py2
from ipython_genutils.tempdir import TemporaryDirectory
from ._version import __version__
from .config_manager import BaseJSONConfigManager
from traitlets.utils.importstring import import_item
from .extensions import (
BaseExtensionApp, _get_config_dir, GREEN_ENABLED, RED_DISABLED, GREEN_OK, RED_X,
ArgumentConflict, _base_aliases, _base_flags,
)
from traitlets import Bool, Unicode
def jupyter_config_path() -> List[str]:
"""Return the search path for Jupyter config files as a list.
If the JUPYTER_PREFER_ENV_PATH environment variable is set, the
environment-level directories will have priority over user-level
directories.
If the Python site.ENABLE_USER_SITE variable is True, we also add the
appropriate Python user site subdirectory to the user-level directories.
"""
if os.environ.get("JUPYTER_NO_CONFIG"):
# jupyter_config_dir makes a blank config when JUPYTER_NO_CONFIG is set.
return [jupyter_config_dir()]
paths: List[str] = []
# highest priority is explicit environment variable
if os.environ.get("JUPYTER_CONFIG_PATH"):
paths.extend(p.rstrip(os.sep) for p in os.environ["JUPYTER_CONFIG_PATH"].split(os.pathsep))
# Next is environment or user, depending on the JUPYTER_PREFER_ENV_PATH flag
user = [jupyter_config_dir()]
if site.ENABLE_USER_SITE:
userbase: Optional[str]
# Check if site.getuserbase() exists to be compatible with virtualenv,
# which often does not have this method.
userbase = site.getuserbase() if hasattr(site, "getuserbase") else site.USER_BASE
if userbase:
userdir = os.path.join(userbase, "etc", "jupyter")
if userdir not in user:
user.append(userdir)
env = [p for p in ENV_CONFIG_PATH if p not in SYSTEM_CONFIG_PATH]
if prefer_environment_over_user():
paths.extend(env)
paths.extend(user)
else:
paths.extend(user)
paths.extend(env)
# Finally, system path
paths.extend(SYSTEM_CONFIG_PATH)
return paths
class BaseJSONConfigManager(LoggingConfigurable):
"""General JSON config manager
Deals with persisting/storing config in a json file with optionally
default values in a {section_name}.d directory.
"""
config_dir = Unicode('.')
read_directory = Bool(True)
def ensure_config_dir_exists(self):
"""Will try to create the config_dir directory."""
try:
os.makedirs(self.config_dir, 0o755)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def file_name(self, section_name):
"""Returns the json filename for the section_name: {config_dir}/{section_name}.json"""
return os.path.join(self.config_dir, section_name+'.json')
def directory(self, section_name):
"""Returns the directory name for the section name: {config_dir}/{section_name}.d"""
return os.path.join(self.config_dir, section_name+'.d')
def get(self, section_name, include_root=True):
"""Retrieve the config data for the specified section.
Returns the data as a dictionary, or an empty dictionary if the file
doesn't exist.
When include_root is False, it will not read the root .json file,
effectively returning the default values.
"""
paths = [self.file_name(section_name)] if include_root else []
if self.read_directory:
pattern = os.path.join(self.directory(section_name), '*.json')
# These json files should be processed first so that the
# {section_name}.json take precedence.
# The idea behind this is that installing a Python package may
# put a json file somewhere in the a .d directory, while the
# .json file is probably a user configuration.
paths = sorted(glob.glob(pattern)) + paths
self.log.debug('Paths used for configuration of %s: \n\t%s', section_name, '\n\t'.join(paths))
data = {}
for path in paths:
if os.path.isfile(path):
with open(path, encoding='utf-8') as f:
recursive_update(data, json.load(f))
return data
def set(self, section_name, data):
"""Store the given config data.
"""
filename = self.file_name(section_name)
self.ensure_config_dir_exists()
if self.read_directory:
# we will modify data in place, so make a copy
data = copy.deepcopy(data)
defaults = self.get(section_name, include_root=False)
remove_defaults(data, defaults)
# Generate the JSON up front, since it could raise an exception,
# in order to avoid writing half-finished corrupted data to disk.
json_content = json.dumps(data, indent=2)
f = open(filename, 'w', encoding='utf-8')
with f:
f.write(json_content)
def update(self, section_name, new_data):
"""Modify the config section by recursively updating it with new_data.
Returns the modified config data as a dictionary.
"""
data = self.get(section_name)
recursive_update(data, new_data)
self.set(section_name, data)
return data
The provided code snippet includes necessary dependencies for implementing the `_find_disable_nbextension` function. Write a Python function `def _find_disable_nbextension(section, require, logger=None)` to solve the following problem:
Disable an nbextension from the first config location where it is enabled. Returns True if it changed any config, False otherwise.
Here is the function:
def _find_disable_nbextension(section, require, logger=None):
"""Disable an nbextension from the first config location where it is enabled.
Returns True if it changed any config, False otherwise.
"""
for config_dir in jupyter_config_path():
cm = BaseJSONConfigManager(
config_dir=os.path.join(config_dir, 'nbconfig'))
d = cm.get(section)
if d.get('load_extensions', {}).get(require, None):
if logger:
logger.info("Disabling %s extension in %s", require, config_dir)
cm.update(section, {'load_extensions': {require: None}})
return True
return False | Disable an nbextension from the first config location where it is enabled. Returns True if it changed any config, False otherwise. |
171,726 | import warnings
The provided code snippet includes necessary dependencies for implementing the `parameterized` function. Write a Python function `def parameterized(*params)` to solve the following problem:
Decorator to create parameterized rules. Parameterized rule methods must be named starting with 'p_' and contain 'xxx', and their docstrings may contain 'xxx' and 'yyy'. These will be replaced by the given parameter tuples. For example, ``p_xxx_rule()`` with docstring 'xxx_rule : yyy' when decorated with ``@parameterized(('id', 'ID'))`` produces ``p_id_rule()`` with the docstring 'id_rule : ID'. Using multiple tuples produces multiple rules.
Here is the function:
def parameterized(*params):
""" Decorator to create parameterized rules.
Parameterized rule methods must be named starting with 'p_' and contain
'xxx', and their docstrings may contain 'xxx' and 'yyy'. These will be
replaced by the given parameter tuples. For example, ``p_xxx_rule()`` with
docstring 'xxx_rule : yyy' when decorated with
``@parameterized(('id', 'ID'))`` produces ``p_id_rule()`` with the docstring
'id_rule : ID'. Using multiple tuples produces multiple rules.
"""
def decorate(rule_func):
rule_func._params = params
return rule_func
return decorate | Decorator to create parameterized rules. Parameterized rule methods must be named starting with 'p_' and contain 'xxx', and their docstrings may contain 'xxx' and 'yyy'. These will be replaced by the given parameter tuples. For example, ``p_xxx_rule()`` with docstring 'xxx_rule : yyy' when decorated with ``@parameterized(('id', 'ID'))`` produces ``p_id_rule()`` with the docstring 'id_rule : ID'. Using multiple tuples produces multiple rules. |
171,727 | import warnings
def _create_param_rules(cls, func):
""" Create ply.yacc rules based on a parameterized rule function
Generates new methods (one per each pair of parameters) based on the
template rule function `func`, and attaches them to `cls`. The rule
function's parameters must be accessible via its `_params` attribute.
"""
for xxx, yyy in func._params:
# Use the template method's body for each new method
def param_rule(self, p):
func(self, p)
# Substitute in the params for the grammar rule and function name
param_rule.__doc__ = func.__doc__.replace('xxx', xxx).replace('yyy', yyy)
param_rule.__name__ = func.__name__.replace('xxx', xxx)
# Attach the new method to the class
setattr(cls, param_rule.__name__, param_rule)
The provided code snippet includes necessary dependencies for implementing the `template` function. Write a Python function `def template(cls)` to solve the following problem:
Class decorator to generate rules from parameterized rule templates. See `parameterized` for more information on parameterized rules.
Here is the function:
def template(cls):
""" Class decorator to generate rules from parameterized rule templates.
See `parameterized` for more information on parameterized rules.
"""
issued_nodoc_warning = False
for attr_name in dir(cls):
if attr_name.startswith('p_'):
method = getattr(cls, attr_name)
if hasattr(method, '_params'):
# Remove the template method
delattr(cls, attr_name)
# Create parameterized rules from this method; only run this if
# the method has a docstring. This is to address an issue when
# pycparser's users are installed in -OO mode which strips
# docstrings away.
# See: https://github.com/eliben/pycparser/pull/198/ and
# https://github.com/eliben/pycparser/issues/197
# for discussion.
if method.__doc__ is not None:
_create_param_rules(cls, method)
elif not issued_nodoc_warning:
warnings.warn(
'parsing methods must have __doc__ for pycparser to work properly',
RuntimeWarning,
stacklevel=2)
issued_nodoc_warning = True
return cls | Class decorator to generate rules from parameterized rule templates. See `parameterized` for more information on parameterized rules. |
171,728 | from . import c_ast
def _extract_nested_case(case_node, stmts_list):
""" Recursively extract consecutive Case statements that are made nested
by the parser and add them to the stmts_list.
"""
if isinstance(case_node.stmts[0], (c_ast.Case, c_ast.Default)):
stmts_list.append(case_node.stmts.pop())
_extract_nested_case(stmts_list[-1], stmts_list)
The provided code snippet includes necessary dependencies for implementing the `fix_switch_cases` function. Write a Python function `def fix_switch_cases(switch_node)` to solve the following problem:
The 'case' statements in a 'switch' come out of parsing with one child node, so subsequent statements are just tucked to the parent Compound. Additionally, consecutive (fall-through) case statements come out messy. This is a peculiarity of the C grammar. The following: switch (myvar) { case 10: k = 10; p = k + 1; return 10; case 20: case 30: return 20; default: break; } Creates this tree (pseudo-dump): Switch ID: myvar Compound: Case 10: k = 10 p = k + 1 return 10 Case 20: Case 30: return 20 Default: break The goal of this transform is to fix this mess, turning it into the following: Switch ID: myvar Compound: Case 10: k = 10 p = k + 1 return 10 Case 20: Case 30: return 20 Default: break A fixed AST node is returned. The argument may be modified.
Here is the function:
def fix_switch_cases(switch_node):
""" The 'case' statements in a 'switch' come out of parsing with one
child node, so subsequent statements are just tucked to the parent
Compound. Additionally, consecutive (fall-through) case statements
come out messy. This is a peculiarity of the C grammar. The following:
switch (myvar) {
case 10:
k = 10;
p = k + 1;
return 10;
case 20:
case 30:
return 20;
default:
break;
}
Creates this tree (pseudo-dump):
Switch
ID: myvar
Compound:
Case 10:
k = 10
p = k + 1
return 10
Case 20:
Case 30:
return 20
Default:
break
The goal of this transform is to fix this mess, turning it into the
following:
Switch
ID: myvar
Compound:
Case 10:
k = 10
p = k + 1
return 10
Case 20:
Case 30:
return 20
Default:
break
A fixed AST node is returned. The argument may be modified.
"""
assert isinstance(switch_node, c_ast.Switch)
if not isinstance(switch_node.stmt, c_ast.Compound):
return switch_node
# The new Compound child for the Switch, which will collect children in the
# correct order
new_compound = c_ast.Compound([], switch_node.stmt.coord)
# The last Case/Default node
last_case = None
# Goes over the children of the Compound below the Switch, adding them
# either directly below new_compound or below the last Case as appropriate
# (for `switch(cond) {}`, block_items would have been None)
for child in (switch_node.stmt.block_items or []):
if isinstance(child, (c_ast.Case, c_ast.Default)):
# If it's a Case/Default:
# 1. Add it to the Compound and mark as "last case"
# 2. If its immediate child is also a Case or Default, promote it
# to a sibling.
new_compound.block_items.append(child)
_extract_nested_case(child, new_compound.block_items)
last_case = new_compound.block_items[-1]
else:
# Other statements are added as children to the last case, if it
# exists.
if last_case is None:
new_compound.block_items.append(child)
else:
last_case.stmts.append(child)
switch_node.stmt = new_compound
return switch_node | The 'case' statements in a 'switch' come out of parsing with one child node, so subsequent statements are just tucked to the parent Compound. Additionally, consecutive (fall-through) case statements come out messy. This is a peculiarity of the C grammar. The following: switch (myvar) { case 10: k = 10; p = k + 1; return 10; case 20: case 30: return 20; default: break; } Creates this tree (pseudo-dump): Switch ID: myvar Compound: Case 10: k = 10 p = k + 1 return 10 Case 20: Case 30: return 20 Default: break The goal of this transform is to fix this mess, turning it into the following: Switch ID: myvar Compound: Case 10: k = 10 p = k + 1 return 10 Case 20: Case 30: return 20 Default: break A fixed AST node is returned. The argument may be modified. |
171,729 | from . import c_ast
def _fix_atomic_specifiers_once(decl):
""" Performs one 'fix' round of atomic specifiers.
Returns (modified_decl, found) where found is True iff a fix was made.
"""
parent = decl
grandparent = None
node = decl.type
while node is not None:
if isinstance(node, c_ast.Typename) and '_Atomic' in node.quals:
break
try:
grandparent = parent
parent = node
node = node.type
except AttributeError:
# If we've reached a node without a `type` field, it means we won't
# find what we're looking for at this point; give up the search
# and return the original decl unmodified.
return decl, False
assert isinstance(parent, c_ast.TypeDecl)
grandparent.type = node.type
if '_Atomic' not in node.type.quals:
node.type.quals.append('_Atomic')
return decl, True
The provided code snippet includes necessary dependencies for implementing the `fix_atomic_specifiers` function. Write a Python function `def fix_atomic_specifiers(decl)` to solve the following problem:
Atomic specifiers like _Atomic(type) are unusually structured, conferring a qualifier upon the contained type. This function fixes a decl with atomic specifiers to have a sane AST structure, by removing spurious Typename->TypeDecl pairs and attaching the _Atomic qualifier in the right place.
Here is the function:
def fix_atomic_specifiers(decl):
""" Atomic specifiers like _Atomic(type) are unusually structured,
conferring a qualifier upon the contained type.
This function fixes a decl with atomic specifiers to have a sane AST
structure, by removing spurious Typename->TypeDecl pairs and attaching
the _Atomic qualifier in the right place.
"""
# There can be multiple levels of _Atomic in a decl; fix them until a
# fixed point is reached.
while True:
decl, found = _fix_atomic_specifiers_once(decl)
if not found:
break
# Make sure to add an _Atomic qual on the topmost decl if needed. Also
# restore the declname on the innermost TypeDecl (it gets placed in the
# wrong place during construction).
typ = decl
while not isinstance(typ, c_ast.TypeDecl):
try:
typ = typ.type
except AttributeError:
return decl
if '_Atomic' in typ.quals and '_Atomic' not in decl.quals:
decl.quals.append('_Atomic')
if typ.declname is None:
typ.declname = decl.name
return decl | Atomic specifiers like _Atomic(type) are unusually structured, conferring a qualifier upon the contained type. This function fixes a decl with atomic specifiers to have a sane AST structure, by removing spurious Typename->TypeDecl pairs and attaching the _Atomic qualifier in the right place. |
171,730 | import re
import sys
import types
import copy
import os
import inspect
def _funcs_to_names(funclist, namelist):
result = []
for f, name in zip(funclist, namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result | null |
171,731 | import re
import sys
import types
import copy
import os
import inspect
def _names_to_funcs(namelist, fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]], n[1]))
else:
result.append(n)
return result | null |
171,732 | import re
import sys
import types
import copy
import os
import inspect
def _statetoken(s, names):
nonstate = 1
parts = s.split('_')
for i, part in enumerate(parts[1:], 1):
if part not in names and part != 'ANY':
break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = '_'.join(parts[i:])
return (states, tokenname) | null |
171,733 | import re
import sys
import types
import copy
import os
import inspect
class PlyLogger(object):
def __init__(self, f):
self.f = f
def critical(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
info = critical
debug = critical
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re, findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = 'INITIAL' # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexstateeoff = {} # Dictionary of eof functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lexeoff = None # EOF rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = '' # Ignored characters
self.lexliterals = '' # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = False # Optimized mode
def clone(self, object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = {}
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object, f[0].__name__), f[1]))
newre.append((cre, newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = {}
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object, ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self, lextab, outputdir=''):
if isinstance(lextab, types.ModuleType):
raise IOError("Won't overwrite existing lextab module")
basetabmodule = lextab.split('.')[-1]
filename = os.path.join(outputdir, basetabmodule) + '.py'
with open(filename, 'w') as tf:
tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
tf.write('_tabversion = %s\n' % repr(__tabversion__))
tf.write('_lextokens = set(%s)\n' % repr(tuple(self.lextokens)))
tf.write('_lexreflags = %s\n' % repr(self.lexreflags))
tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
# Rewrite the lexstatere table, replacing function objects with function names
tabre = {}
for statename, lre in self.lexstatere.items():
titem = []
for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
titem.append((retext, _funcs_to_names(func, renames)))
tabre[statename] = titem
tf.write('_lexstatere = %s\n' % repr(tabre))
tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
taberr = {}
for statename, ef in self.lexstateerrorf.items():
taberr[statename] = ef.__name__ if ef else None
tf.write('_lexstateerrorf = %s\n' % repr(taberr))
tabeof = {}
for statename, ef in self.lexstateeoff.items():
tabeof[statename] = ef.__name__ if ef else None
tf.write('_lexstateeoff = %s\n' % repr(tabeof))
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self, tabfile, fdict):
if isinstance(tabfile, types.ModuleType):
lextab = tabfile
else:
exec('import %s' % tabfile)
lextab = sys.modules[tabfile]
if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
raise ImportError('Inconsistent PLY version')
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lextokens_all = self.lextokens | set(self.lexliterals)
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = {}
self.lexstateretext = {}
for statename, lre in lextab._lexstatere.items():
titem = []
txtitem = []
for pat, func_name in lre:
titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict)))
self.lexstatere[statename] = titem
self.lexstateretext[statename] = txtitem
self.lexstateerrorf = {}
for statename, ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[statename] = fdict[ef]
self.lexstateeoff = {}
for statename, ef in lextab._lexstateeoff.items():
self.lexstateeoff[statename] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self, s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c, StringTypes):
raise ValueError('Expected a string')
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self, state):
if state not in self.lexstatere:
raise ValueError('Undefined state')
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state, '')
self.lexerrorf = self.lexstateerrorf.get(state, None)
self.lexeoff = self.lexstateeoff.get(state, None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self, state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self, n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre, lexindexfunc in self.lexre:
m = lexre.match(lexdata, lexpos)
if not m:
continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func, tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if newtok.type not in self.lextokens_all:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.__code__.co_filename, func.__code__.co_firstlineno,
func.__name__, newtok.type), lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = 'error'
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok:
continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
if self.lexeoff:
tok = LexToken()
tok.type = 'eof'
tok.value = ''
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
self.lexpos = lexpos
newtok = self.lexeoff(tok)
return newtok
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError('No input string given with input()')
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
def _get_regex(func):
return getattr(func, 'regex', func.__doc__)
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
def _form_master_re(relist, reflags, ldict, toknames):
if not relist:
return []
regex = '|'.join(relist)
try:
lexre = re.compile(regex, reflags)
# Build the index to function map for the matching engine
lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
lexindexnames = lexindexfunc[:]
for f, i in lexre.groupindex.items():
handle = ldict.get(f, None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle, toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find('ignore_') > 0:
lexindexfunc[i] = (None, None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre, lexindexfunc)], [regex], [lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0:
m = 1
llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
return (llist+rlist), (lre+rre), (lnames+rnames)
class LexerReflect(object):
def __init__(self, ldict, log=None, reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = {'INITIAL': 'inclusive'}
self.modules = set()
self.error = False
self.log = PlyLogger(sys.stderr) if log is None else log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get('tokens', None)
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'", n)
self.error = True
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get('literals', '')
if not self.literals:
self.literals = ''
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c, StringTypes) or len(c) > 1:
self.log.error('Invalid literal %s. Must be a single character', repr(c))
self.error = True
except TypeError:
self.log.error('Invalid literals specification. literals must be a sequence of characters')
self.error = True
def get_states(self):
self.states = self.ldict.get('states', None)
# Build statemap
if self.states:
if not isinstance(self.states, (tuple, list)):
self.log.error('states must be defined as a tuple or list')
self.error = True
else:
for s in self.states:
if not isinstance(s, tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
self.error = True
continue
name, statetype = s
if not isinstance(name, StringTypes):
self.log.error('State name %s must be a string', repr(name))
self.error = True
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
self.error = True
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined", name)
self.error = True
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_']
# Now build up a list of functions and a list of strings
self.toknames = {} # Mapping of symbols to token names
self.funcsym = {} # Symbols defined as functions
self.strsym = {} # Symbols defined as strings
self.ignore = {} # Ignore strings by state
self.errorf = {} # Error functions by state
self.eoff = {} # EOF functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error('No rules of the form t_rulename are defined')
self.error = True
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f, self.stateinfo)
self.toknames[f] = tokname
if hasattr(t, '__call__'):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'eof':
for s in states:
self.eoff[s] = t
elif tokname == 'ignore':
line = t.__code__.co_firstlineno
file = t.__code__.co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
self.error = True
else:
for s in states:
self.funcsym[s].append((f, t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if '\\' in t:
self.log.warning("%s contains a literal backslash '\\'", f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = True
else:
for s in states:
self.strsym[s].append((f, t))
else:
self.log.error('%s not defined as a function or string', f)
self.error = True
# Sort the functions by line number
for f in self.funcsym.values():
f.sort(key=lambda x: x[1].__code__.co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
s.sort(key=lambda x: len(x[1]), reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
continue
if not _get_regex(f):
self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags)
if c.match(''):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
self.error = True
except re.error as e:
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
if '#' in _get_regex(f):
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
self.error = True
# Validate all rules defined by strings
for name, r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = True
continue
if tokname not in self.tokens and tokname.find('ignore_') < 0:
self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (name, r), self.reflags)
if (c.match('')):
self.log.error("Regular expression for rule '%s' matches empty string", name)
self.error = True
except re.error as e:
self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
self.error = True
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'", state)
self.error = True
# Validate the error function
efunc = self.errorf.get(state, None)
if efunc:
f = efunc
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
for module in self.modules:
self.validate_module(module)
# -----------------------------------------------------------------------------
# validate_module()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the source code of the given module.
# -----------------------------------------------------------------------------
def validate_module(self, module):
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
return
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = {}
linen += 1
for line in lines:
m = fre.match(line)
if not m:
m = sre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
self.error = True
linen += 1
def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None):
if lextab is None:
lextab = 'lextab'
global lexer
ldict = None
stateinfo = {'INITIAL': 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token, input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object:
module = object
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
ldict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in ldict:
ldict['__file__'] = sys.modules[ldict['__module__']].__file__
else:
ldict = get_caller_module_dict(2)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = ldict.get('__package__')
if pkg and isinstance(lextab, str):
if '.' not in lextab:
lextab = pkg + '.' + lextab
# Collect parser information from the dictionary
linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab, ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info('lex: tokens = %r', linfo.tokens)
debuglog.info('lex: literals = %r', linfo.literals)
debuglog.info('lex: states = %r', linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = set()
for n in linfo.tokens:
lexobj.lextokens.add(n)
# Get literals specification
if isinstance(linfo.literals, (list, tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = {}
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
# Now add all of the simple rules
for name, r in linfo.strsym[state]:
regex_list.append('(?P<%s>%s)' % (name, r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i, text in enumerate(re_text):
debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state, stype in stateinfo.items():
if state != 'INITIAL' and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere['INITIAL']
lexobj.lexretext = lexobj.lexstateretext['INITIAL']
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
if not lexobj.lexerrorf:
errorlog.warning('No t_error rule is defined')
# Set up eof functions
lexobj.lexstateeoff = linfo.eoff
lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
# Check state information for ignore and error rules
for s, stype in stateinfo.items():
if stype == 'exclusive':
if s not in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if s not in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if s not in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
if s not in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If lextab specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(lextab, types.ModuleType):
srcfile = lextab.__file__
else:
if '.' not in lextab:
srcfile = ldict['__file__']
else:
parts = lextab.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
try:
lexobj.writetab(lextab, outputdir)
except IOError as e:
errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
return lexobj | null |
171,734 | import re
import sys
import types
import copy
import os
import inspect
def runmain(lexer=None, data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write('Reading from standard input (type EOF to end):\n')
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while True:
tok = _token()
if not tok:
break
sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos)) | null |
171,735 | import re
import sys
import types
import copy
import os
import inspect
def _get_regex(func):
return getattr(func, 'regex', func.__doc__)
def TOKEN(r):
def set_regex(f):
if hasattr(r, '__call__'):
f.regex = _get_regex(r)
else:
f.regex = r
return f
return set_regex | null |
171,736 | import os.path
import shutil
def get_source_range(lines, tag):
srclines = enumerate(lines)
start_tag = '#--! %s-start' % tag
end_tag = '#--! %s-end' % tag
for start_index, line in srclines:
if line.strip().startswith(start_tag):
break
for end_index, line in srclines:
if line.strip().endswith(end_tag):
break
return (start_index + 1, end_index) | null |
171,737 | import os.path
import shutil
def filter_section(lines, tag):
filtered_lines = []
include = True
tag_text = '#--! %s' % tag
for line in lines:
if line.strip().startswith(tag_text):
include = not include
elif include:
filtered_lines.append(line)
return filtered_lines | null |
171,738 | import sys
import re
import copy
import time
import os.path
The provided code snippet includes necessary dependencies for implementing the `t_CPP_WS` function. Write a Python function `def t_CPP_WS(t)` to solve the following problem:
r'\s+
Here is the function:
def t_CPP_WS(t):
r'\s+'
t.lexer.lineno += t.value.count("\n")
return t | r'\s+ |
171,739 | import sys
import re
import copy
import time
import os.path
The provided code snippet includes necessary dependencies for implementing the `CPP_INTEGER` function. Write a Python function `def CPP_INTEGER(t)` to solve the following problem:
r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)
Here is the function:
def CPP_INTEGER(t):
r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)'
return t | r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?) |
171,740 | import sys
import re
import copy
import time
import os.path
The provided code snippet includes necessary dependencies for implementing the `t_CPP_STRING` function. Write a Python function `def t_CPP_STRING(t)` to solve the following problem:
r'\"([^\\\n]|(\\(.|\n)))*?\"
Here is the function:
def t_CPP_STRING(t):
r'\"([^\\\n]|(\\(.|\n)))*?\"'
t.lexer.lineno += t.value.count("\n")
return t | r'\"([^\\\n]|(\\(.|\n)))*?\" |
171,741 | import sys
import re
import copy
import time
import os.path
The provided code snippet includes necessary dependencies for implementing the `t_CPP_CHAR` function. Write a Python function `def t_CPP_CHAR(t)` to solve the following problem:
r'(L)?\'([^\\\n]|(\\(.|\n)))*?\
Here is the function:
def t_CPP_CHAR(t):
r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
t.lexer.lineno += t.value.count("\n")
return t | r'(L)?\'([^\\\n]|(\\(.|\n)))*?\ |
171,742 | import sys
import re
import copy
import time
import os.path
The provided code snippet includes necessary dependencies for implementing the `t_CPP_COMMENT1` function. Write a Python function `def t_CPP_COMMENT1(t)` to solve the following problem:
r'(/\*(.|\n)*?\*/)
Here is the function:
def t_CPP_COMMENT1(t):
r'(/\*(.|\n)*?\*/)'
ncr = t.value.count("\n")
t.lexer.lineno += ncr
# replace with one space or a number of '\n'
t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' '
return t | r'(/\*(.|\n)*?\*/) |
171,743 | import sys
import re
import copy
import time
import os.path
The provided code snippet includes necessary dependencies for implementing the `t_CPP_COMMENT2` function. Write a Python function `def t_CPP_COMMENT2(t)` to solve the following problem:
r'(//.*?(\n|$))
Here is the function:
def t_CPP_COMMENT2(t):
r'(//.*?(\n|$))'
# replace with '/n'
t.type = 'CPP_WS'; t.value = '\n'
return t | r'(//.*?(\n|$)) |
171,744 | import sys
import re
import copy
import time
import os.path
def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
t.lexer.skip(1)
return t | null |
171,745 | import sys
import re
import copy
import time
import os.path
_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
_trigraph_rep = {
'=':'#',
'/':'\\',
"'":'^',
'(':'[',
')':']',
'!':'|',
'<':'{',
'>':'}',
'-':'~'
}
def trigraph(input):
return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input) | null |
171,746 | import re
import types
import sys
import os.path
import inspect
import base64
import warnings
resultlimit = 40
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit] + ' ...'
result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
return result | null |
171,747 | import re
import types
import sys
import os.path
import inspect
import base64
import warnings
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return '<%s @ 0x%x>' % (type(r).__name__, id(r)) | null |
171,748 | import re
import types
import sys
import os.path
import inspect
import base64
import warnings
_token = None
_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
Instead, invoke the methods on the associated parser instance:
def p_error(p):
...
# Use parser.errok(), parser.token(), parser.restart()
...
parser = yacc.yacc()
'''
def token():
warnings.warn(_warnmsg)
return _token() | null |
171,749 | import re
import types
import sys
import os.path
import inspect
import base64
import warnings
_errok = None
_token = None
_restart = None
def errok():
def restart():
def call_errorfunc(errorfunc, token, parser):
global _errok, _token, _restart
_errok = parser.errok
_token = parser.token
_restart = parser.restart
r = errorfunc(token)
try:
del _errok, _token, _restart
except NameError:
pass
return r | null |
171,750 | import re
import types
import sys
import os.path
import inspect
import base64
import warnings
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None | null |
171,751 | import re
import types
import sys
import os.path
import inspect
import base64
import warnings
def traverse(x, N, stack, F, X, R, FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y, N, stack, F, X, R, FP)
N[x] = min(N[x], N[y])
for a in F.get(y, []):
if a not in F[x]:
F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
def digraph(X, R, FP):
N = {}
for x in X:
N[x] = 0
stack = []
F = {}
for x in X:
if N[x] == 0:
traverse(x, N, stack, F, X, R, FP)
return F | null |
171,752 | import re
import types
import sys
import os.path
import inspect
import base64
import warnings
def parse_grammar(doc, file, line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p:
continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
grammar.append((file, dline, prodname, syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
return grammar | null |
171,753 | import re
import types
import sys
import os.path
import inspect
import base64
import warnings
__version__ = '3.10'
yaccdebug = True
debug_file = 'parser.out'
tab_module = 'parsetab'
if sys.version_info[0] < 3:
string_types = basestring
else:
string_types = str
class PlyLogger(object):
def __init__(self, f):
self.f = f
def debug(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
info = debug
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
critical = debug
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
class YaccError(Exception):
pass
class LRParser:
def __init__(self, lrtab, errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
self.set_defaulted_states()
self.errorok = True
def errok(self):
self.errorok = True
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
# Defaulted state support.
# This method identifies parser states where there is only one possible reduction action.
# For such states, the parser can make a choose to make a rule reduction without consuming
# the next look-ahead token. This delayed invocation of the tokenizer can be useful in
# certain kinds of advanced parsing situations where the lexer and parser interact with
# each other or change states (i.e., manipulation of scope, lexer states, etc.).
#
# See: https://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
def set_defaulted_states(self):
self.defaulted_states = {}
for state, actions in self.action.items():
rules = list(actions.values())
if len(rules) == 1 and rules[0] < 0:
self.defaulted_states[state] = rules[0]
def disable_defaulted_states(self):
self.defaulted_states = {}
def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug, int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
elif tracking:
return self.parseopt(input, lexer, debug, tracking, tokenfunc)
else:
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. Optimized versions of this function
# are automatically created by the ply/ygen.py script. This script cuts out
# sections enclosed in markers such as this:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parsedebug-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
#--! DEBUG
debug.info('PLY: PARSE DEBUG START')
#--! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
#--! DEBUG
debug.debug('')
debug.debug('State : %s', state)
#--! DEBUG
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
#--! DEBUG
debug.debug('Defaulted state %s: Reduce using %d', state, -t)
#--! DEBUG
#--! DEBUG
debug.debug('Stack : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
#--! DEBUG
debug.debug('Action : Shift and goto state %s', t)
#--! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
#--! DEBUG
if plen:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
'['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
goto[statestack[-1-plen]][pname])
else:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
goto[statestack[-1]][pname])
#--! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
#--! DEBUG
debug.info('Done : Returning %s', format_result(result))
debug.info('PLY: PARSE DEBUG END')
#--! DEBUG
return result
if t is None:
#--! DEBUG
debug.error('Error : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parsedebug-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
# This code is automatically generated by the ply/ygen.py script. Make
# changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
# by the ply/ygen.py script. Make changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-notrack-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-notrack-end
class GrammarError(YaccError):
pass
class Grammar(object):
def __init__(self, terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = {} # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = {} # A dictionary of precomputed FIRST(x) symbols
self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self, index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self, term, assoc, level):
assert self.Productions == [None], 'Must call set_precedence() before add_production()'
if term in self.Precedence:
raise GrammarError('Precedence already specified for terminal %r' % term)
if assoc not in ['left', 'right', 'nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc, level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self, prodname, syms, func=None, file='', line=0):
if prodname in self.Terminals:
raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
if prodname == 'error':
raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
if not _is_identifier.match(prodname):
raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
# Look for literal tokens
for n, s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
(file, line, s, prodname))
if c not in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
if syms[-2] != '%prec':
raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
(file, line))
precname = syms[-1]
prodprec = self.Precedence.get(precname)
if not prodprec:
raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
else:
self.UsedPrecedence.add(precname)
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms, self.Terminals)
prodprec = self.Precedence.get(precname, ('right', 0))
# See if the rule is already in the rulemap
map = '%s -> %s' % (prodname, syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
'Previous definition at %s:%d' % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if prodname not in self.Nonterminals:
self.Nonterminals[prodname] = []
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if t not in self.Nonterminals:
self.Nonterminals[t] = []
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber, prodname, syms, prodprec, func, file, line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [p]
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self, start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError('start symbol %s undefined' % start)
self.Productions[0] = Production(0, "S'", [start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if s in reachable:
return
reachable.add(s)
for p in self.Prodnames.get(s, []):
for r in p.prod:
mark_reachable_from(r)
reachable = set()
mark_reachable_from(self.Productions[0].prod[0])
return [s for s in self.Nonterminals if s not in reachable]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = True
terminates['$end'] = True
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = False
# Then propagate termination until no change:
while True:
some_change = False
for (n, pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = False
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = True
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = True
some_change = True
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s, term) in terminates.items():
if not term:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p:
continue
for s in p.prod:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
result.append((s, p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s, v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s, v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname, self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self, beta):
# We are computing First(x1,x2,x3,...,xn)
result = []
for x in beta:
x_produces_empty = False
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = True
else:
if f not in result:
result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while True:
some_change = False
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append(f)
some_change = True
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self, start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = []
if not start:
start = self.Productions[1].name
self.Follow[start] = ['$end']
while True:
didadd = False
for p in self.Productions[1:]:
# Here is the production set
for i, B in enumerate(p.prod):
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = False
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if f == '<empty>':
hasempty = True
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if not didadd:
break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while True:
if i > len(p):
lri = None
else:
lri = LRItem(p, i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError, KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri:
break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
class VersionError(YaccError):
pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self, module):
if isinstance(module, types.ModuleType):
parsetab = module
else:
exec('import %s' % module)
parsetab = sys.modules[module]
if parsetab._tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self, filename):
try:
import cPickle as pickle
except ImportError:
import pickle
if not os.path.exists(filename):
raise ImportError
in_f = open(filename, 'rb')
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self, pdict):
for p in self.lr_productions:
p.bind(pdict)
class LRGeneratedTable(LRTable):
def __init__(self, grammar, method='LALR', log=None):
if method not in ['SLR', 'LALR']:
raise LALRError('Unsupported method %s' % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self, I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = True
while didadd:
didadd = False
for j in J:
for x in j.lr_after:
if getattr(x, 'lr0_added', 0) == self._add_count:
continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = True
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self, I, x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I), x))
if g:
return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x)
if not s:
s = {}
self.lr_goto_cache[x] = s
gs = []
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n))
if not s1:
s1 = {}
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end')
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I), x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = {}
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I, x)
if not g or id(g) in self.lr0_cidhash:
continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = set()
num_nullable = 0
while True:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable.add(p.name)
continue
for t in p.prod:
if t not in nullable:
break
else:
nullable.add(p.name)
if len(nullable) == num_nullable:
break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self, C):
trans = []
for stateno, state in enumerate(C):
for p in state:
if p.lr_index < p.len - 1:
t = (stateno, p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans:
trans.append(t)
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self, C, trans, nullable):
dr_set = {}
state, N = trans
terms = []
g = self.lr0_goto(C[state], N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms:
terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self, C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state], N)
j = self.lr0_cidhash.get(id(g), -1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j, a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self, C, trans, nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state, N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N:
continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j, t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals:
break # No forget it
if p.prod[li] not in nullable:
break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j, t))
g = self.lr0_goto(C[j], t) # Go to next set
j = self.lr0_cidhash.get(id(g), -1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name:
continue
if r.len != p.len:
continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]:
break
i = i + 1
else:
lookb.append((j, r))
for i in includes:
if i not in includedict:
includedict[i] = []
includedict[i].append((state, N))
lookdict[(state, N)] = lookb
return lookdict, includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self, C, ntrans, nullable):
FP = lambda x: self.dr_relation(C, x, nullable)
R = lambda x: self.reads_relation(C, x, nullable)
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self, ntrans, readsets, inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x, [])
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self, lookbacks, followset):
for trans, lb in lookbacks.items():
# Loop over productions in lookback
for state, p in lb:
if state not in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans, [])
for a in f:
if a not in p.lookaheads[state]:
p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self, C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C, trans, nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C, trans, nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans, readsets, included)
# Add all of the lookaheads
self.add_lookaheads(lookd, followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = {} # Action production array (temporary)
log.info('Parsing method: %s', self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [] # List of actions
st_action = {}
st_actionp = {}
st_goto = {}
log.info('')
log.info('state %d', st)
log.info('')
for p in I:
log.info(' (%d) %s', p.number, p)
log.info('')
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action['$end'] = 0
st_actionp['$end'] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
r = st_action.get(a)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from rule being reduced (p)
rprec, rlevel = Productions[p.number].prec
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp, rejectp = pp, oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp, rejectp = oldp, pp
self.rr_conflicts.append((st, chosenp, rejectp))
log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
a, st_actionp[a].number, st_actionp[a])
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I, a)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
# We are in a shift state
actlist.append((a, p, 'shift and go to state %d' % j))
r = st_action.get(a)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError('Shift/shift conflict in state %d' % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from the rule that could have been reduced
rprec, rlevel = Productions[st_actionp[a].number].prec
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = {}
for a, p, m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(' %-15s %s', a, m)
_actprint[(a, m)] = 1
log.info('')
# Print the actions that were not used. (debugging)
not_used = 0
for a, p, m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a, m) in _actprint:
log.debug(' ! %-15s [ %s ]', a, m)
not_used = 1
_actprint[(a, m)] = 1
if not_used:
log.debug('')
# Construct the goto table for this state
nkeys = {}
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I, n)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
st_goto[n] = j
log.info(' %-30s shift and go to state %d', n, j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self, tabmodule, outputdir='', signature=''):
if isinstance(tabmodule, types.ModuleType):
raise IOError("Won't overwrite existing tabmodule")
basemodulename = tabmodule.split('.')[-1]
filename = os.path.join(outputdir, basemodulename) + '.py'
try:
f = open(filename, 'w')
f.write('''
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = {}
for s, nd in self.lr_action.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_action_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
''')
else:
f.write('\n_lr_action = { ')
for k, v in self.lr_action.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
if smaller:
# Factor out names to try and make smaller
items = {}
for s, nd in self.lr_goto.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_goto_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
''')
else:
f.write('\n_lr_goto = { ')
for k, v in self.lr_goto.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
# Write production table
f.write('_lr_productions = [\n')
for p in self.lr_productions:
if p.func:
f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
p.func, os.path.basename(p.file), p.line))
else:
f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
f.write(']\n')
f.close()
except IOError as e:
raise
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self, filename, signature=''):
try:
import cPickle as pickle
except ImportError:
import pickle
with open(filename, 'wb') as outf:
pickle.dump(__tabversion__, outf, pickle_protocol)
pickle.dump(self.lr_method, outf, pickle_protocol)
pickle.dump(signature, outf, pickle_protocol)
pickle.dump(self.lr_action, outf, pickle_protocol)
pickle.dump(self.lr_goto, outf, pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
else:
outp.append((str(p), p.name, p.len, None, None, None))
pickle.dump(outp, outf, pickle_protocol)
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
class ParserReflect(object):
def __init__(self, pdict, log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.modules = set()
self.grammar = []
self.error = False
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_modules()
return self.error
# Compute a signature over the grammar
def signature(self):
parts = []
try:
if self.start:
parts.append(self.start)
if self.prec:
parts.append(''.join([''.join(p) for p in self.prec]))
if self.tokens:
parts.append(' '.join(self.tokens))
for f in self.pfuncs:
if f[3]:
parts.append(f[3])
except (TypeError, ValueError):
pass
return ''.join(parts)
# -----------------------------------------------------------------------------
# validate_modules()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_modules(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for module in self.modules:
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
continue
counthash = {}
for linen, line in enumerate(lines):
linen += 1
m = fre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
filename, linen, name, prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start, string_types):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func, types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = True
return
eline = self.error_func.__code__.co_firstlineno
efile = self.error_func.__code__.co_filename
module = inspect.getmodule(self.error_func)
self.modules.add(module)
argcount = self.error_func.__code__.co_argcount - ismethod
if argcount != 1:
self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
self.error = True
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get('tokens')
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = True
return
terminals = set()
for n in self.tokens:
if n in terminals:
self.log.warning('Token %r multiply defined', n)
terminals.add(n)
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get('precedence')
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec, (list, tuple)):
self.log.error('precedence must be a list or tuple')
self.error = True
return
for level, p in enumerate(self.prec):
if not isinstance(p, (list, tuple)):
self.log.error('Bad precedence table')
self.error = True
return
if len(p) < 2:
self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
self.error = True
return
assoc = p[0]
if not isinstance(assoc, string_types):
self.log.error('precedence associativity must be a string')
self.error = True
return
for term in p[1:]:
if not isinstance(term, string_types):
self.log.error('precedence items must be strings')
self.error = True
return
preclist.append((term, assoc, level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if not name.startswith('p_') or name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno)
module = inspect.getmodule(item)
p_functions.append((line, module, name, item.__doc__))
# Sort all of the actions by line number; make sure to stringify
# modules to make them sortable, since `line` may not uniquely sort all
# p functions
p_functions.sort(key=lambda p_function: (
p_function[0],
str(p_function[1]),
p_function[2],
p_function[3]))
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error('no rules of the form p_rulename are defined')
self.error = True
return
for line, module, name, doc in self.pfuncs:
file = inspect.getsourcefile(module)
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func.__code__.co_argcount > reqargs:
self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
self.error = True
elif func.__code__.co_argcount < reqargs:
self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
self.error = True
elif not func.__doc__:
self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
file, line, func.__name__)
else:
try:
parsed_g = parse_grammar(doc, file, line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError as e:
self.log.error(str(e))
self.error = True
# Looks like a valid grammar rule
# Mark the file in which defined.
self.modules.add(module)
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n, v in self.pdict.items():
if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
continue
if n.startswith('t_'):
continue
if n.startswith('p_') and n != 'p_error':
self.log.warning('%r not defined as a function', n)
if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
(isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
if v.__doc__:
try:
doc = v.__doc__.split(' ')
if doc[1] == ':':
self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
v.__code__.co_filename, v.__code__.co_firstlineno, n)
except IndexError:
pass
self.grammar = grammar
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
outputdir=None, debuglog=None, errorlog=None, picklefile=None):
if tabmodule is None:
tabmodule = tab_module
# Reference to the parsing method of the last built parser
global parse
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
pdict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in pdict:
pdict['__file__'] = sys.modules[pdict['__module__']].__file__
else:
pdict = get_caller_module_dict(2)
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If tabmodule specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(tabmodule, types.ModuleType):
srcfile = tabmodule.__file__
else:
if '.' not in tabmodule:
srcfile = pdict['__file__']
else:
parts = tabmodule.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = pdict.get('__package__')
if pkg and isinstance(tabmodule, str):
if '.' not in tabmodule:
tabmodule = pkg + '.' + tabmodule
# Set start symbol if it's specified directly using an argument
if start is not None:
pdict['start'] = start
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict, log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError('Unable to build parser')
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
except Exception as e:
errorlog.warning('There was a problem loading the table file: %r', e)
except VersionError as e:
errorlog.warning(str(e))
except ImportError:
pass
if debuglog is None:
if debug:
try:
debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
except IOError as e:
errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
debuglog = NullLogger()
else:
debuglog = NullLogger()
debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__)
errors = False
# Validate the parser information
if pinfo.validate_all():
raise YaccError('Unable to build parser')
if not pinfo.error_func:
errorlog.warning('no p_error() function is defined')
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term, assoc, level)
except GrammarError as e:
errorlog.warning('%s', e)
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname, syms, funcname, file, line)
except GrammarError as e:
errorlog.error('%s', e)
errors = True
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError as e:
errorlog.error(str(e))
errors = True
if errors:
raise YaccError('Unable to build parser')
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
errors = True
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info('')
debuglog.info('Unused terminals:')
debuglog.info('')
for term in unused_terminals:
errorlog.warning('Token %r defined, but not used', term)
debuglog.info(' %s', term)
# Print out all productions to the debug log
if debug:
debuglog.info('')
debuglog.info('Grammar')
debuglog.info('')
for n, p in enumerate(grammar.Productions):
debuglog.info('Rule %-5d %s', n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning('There is 1 unused token')
if len(unused_terminals) > 1:
errorlog.warning('There are %d unused tokens', len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning('There is 1 unused rule')
if len(unused_rules) > 1:
errorlog.warning('There are %d unused rules', len(unused_rules))
if debug:
debuglog.info('')
debuglog.info('Terminals, with rules where they appear')
debuglog.info('')
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
debuglog.info('')
debuglog.info('Nonterminals, with rules where they appear')
debuglog.info('')
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info('')
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning('Symbol %r is unreachable', u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error('Infinite recursion detected for symbol %r', inf)
errors = True
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
errors = True
if errors:
raise YaccError('Unable to build parser')
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug('Generating %s tables', method)
lr = LRGeneratedTable(grammar, method, debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning('1 shift/reduce conflict')
elif num_sr > 1:
errorlog.warning('%d shift/reduce conflicts', num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning('1 reduce/reduce conflict')
elif num_rr > 1:
errorlog.warning('%d reduce/reduce conflicts', num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning('')
debuglog.warning('Conflicts:')
debuglog.warning('')
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
already_reported = set()
for state, rule, rejected in lr.rr_conflicts:
if (state, id(rule), id(rejected)) in already_reported:
continue
debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
debuglog.warning('rejected rule (%s) in state %d', rejected, state)
errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
errorlog.warning('rejected rule (%s) in state %d', rejected, state)
already_reported.add((state, id(rule), id(rejected)))
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning('Rule (%s) is never reduced', rejected)
errorlog.warning('Rule (%s) is never reduced', rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
try:
lr.write_table(tabmodule, outputdir, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
# Write a pickled version of the tables
if picklefile:
try:
lr.pickle_table(picklefile, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser | null |
171,754 |
The provided code snippet includes necessary dependencies for implementing the `t_COMMENT` function. Write a Python function `def t_COMMENT(t)` to solve the following problem:
r'/\*(.|\n)*?\*/
Here is the function:
def t_COMMENT(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
return t | r'/\*(.|\n)*?\*/ |
171,755 |
The provided code snippet includes necessary dependencies for implementing the `t_CPPCOMMENT` function. Write a Python function `def t_CPPCOMMENT(t)` to solve the following problem:
r'//.*\n
Here is the function:
def t_CPPCOMMENT(t):
r'//.*\n'
t.lexer.lineno += 1
return t | r'//.*\n |
171,756 | import sys
The provided code snippet includes necessary dependencies for implementing the `_repr` function. Write a Python function `def _repr(obj)` to solve the following problem:
Get the representation of an object, with dedicated pprint-like format for lists.
Here is the function:
def _repr(obj):
"""
Get the representation of an object, with dedicated pprint-like format for lists.
"""
if isinstance(obj, list):
return '[' + (',\n '.join((_repr(e).replace('\n', '\n ') for e in obj))) + '\n]'
else:
return repr(obj) | Get the representation of an object, with dedicated pprint-like format for lists. |
171,757 | from dataclasses import dataclass
from typing import Any, Optional
from .exceptions import InvalidHash
from .low_level import Type
Any = object()
Optional: _SpecialForm = ...
The provided code snippet includes necessary dependencies for implementing the `_check_types` function. Write a Python function `def _check_types(**kw: Any) -> Optional[str]` to solve the following problem:
Check each ``name: (value, types)`` in *kw*. Returns a human-readable string of all violations or `None``.
Here is the function:
def _check_types(**kw: Any) -> Optional[str]:
"""
Check each ``name: (value, types)`` in *kw*.
Returns a human-readable string of all violations or `None``.
"""
errors = []
for name, (value, types) in kw.items():
if not isinstance(value, types):
if isinstance(types, tuple):
types = ", or ".join(t.__name__ for t in types)
else:
types = types.__name__
errors.append(
"'{name}' must be a {type} (got {actual})".format(
name=name, type=types, actual=type(value).__name__
)
)
if errors != []:
return ", ".join(errors) + "."
return None | Check each ``name: (value, types)`` in *kw*. Returns a human-readable string of all violations or `None``. |
171,758 | from dataclasses import dataclass
from typing import Any, Optional
from .exceptions import InvalidHash
from .low_level import Type
def _decoded_str_len(l: int) -> int:
"""
Compute how long an encoded string of length *l* becomes.
"""
rem = l % 4
if rem == 3:
last_group_len = 2
elif rem == 2:
last_group_len = 1
else:
last_group_len = 0
return l // 4 * 3 + last_group_len
class Parameters:
"""
Argon2 hash parameters.
See :doc:`parameters` on how to pick them.
:ivar Type type: Hash type.
:ivar int version: Argon2 version.
:ivar int salt_len: Length of the salt in bytes.
:ivar int hash_len: Length of the hash in bytes.
:ivar int time_cost: Time cost in iterations.
:ivar int memory_cost: Memory cost in kibibytes.
:ivar int parallelism: Number of parallel threads.
.. versionadded:: 18.2.0
"""
type: Type
version: int
salt_len: int
hash_len: int
time_cost: int
memory_cost: int
parallelism: int
__slots__ = [
"type",
"version",
"salt_len",
"hash_len",
"time_cost",
"memory_cost",
"parallelism",
]
_NAME_TO_TYPE = {"argon2id": Type.ID, "argon2i": Type.I, "argon2d": Type.D}
_REQUIRED_KEYS = sorted(("v", "m", "t", "p"))
class InvalidHash(ValueError):
"""
Raised if the hash is invalid before passing it to Argon2.
.. versionadded:: 18.2.0
"""
The provided code snippet includes necessary dependencies for implementing the `extract_parameters` function. Write a Python function `def extract_parameters(hash: str) -> Parameters` to solve the following problem:
Extract parameters from an encoded *hash*. :param str params: An encoded Argon2 hash string. :rtype: Parameters .. versionadded:: 18.2.0
Here is the function:
def extract_parameters(hash: str) -> Parameters:
"""
Extract parameters from an encoded *hash*.
:param str params: An encoded Argon2 hash string.
:rtype: Parameters
.. versionadded:: 18.2.0
"""
parts = hash.split("$")
# Backwards compatibility for Argon v1.2 hashes
if len(parts) == 5:
parts.insert(2, "v=18")
if len(parts) != 6:
raise InvalidHash
if parts[0] != "":
raise InvalidHash
try:
type = _NAME_TO_TYPE[parts[1]]
kvs = {
k: int(v)
for k, v in (
s.split("=") for s in [parts[2]] + parts[3].split(",")
)
}
except Exception:
raise InvalidHash
if sorted(kvs.keys()) != _REQUIRED_KEYS:
raise InvalidHash
return Parameters(
type=type,
salt_len=_decoded_str_len(len(parts[4])),
hash_len=_decoded_str_len(len(parts[5])),
version=kvs["v"],
time_cost=kvs["t"],
memory_cost=kvs["m"],
parallelism=kvs["p"],
) | Extract parameters from an encoded *hash*. :param str params: An encoded Argon2 hash string. :rtype: Parameters .. versionadded:: 18.2.0 |
171,759 | import os
from typing import Union
from ._typing import Literal
from ._utils import Parameters, _check_types, extract_parameters
from .exceptions import InvalidHash
from .low_level import Type, hash_secret, verify_secret
from .profiles import RFC_9106_LOW_MEMORY
Union: _SpecialForm = ...
The provided code snippet includes necessary dependencies for implementing the `_ensure_bytes` function. Write a Python function `def _ensure_bytes(s: Union[bytes, str], encoding: str) -> bytes` to solve the following problem:
Ensure *s* is a bytes string. Encode using *encoding* if it isn't.
Here is the function:
def _ensure_bytes(s: Union[bytes, str], encoding: str) -> bytes:
"""
Ensure *s* is a bytes string. Encode using *encoding* if it isn't.
"""
if isinstance(s, bytes):
return s
return s.encode(encoding) | Ensure *s* is a bytes string. Encode using *encoding* if it isn't. |
171,760 | import os
from typing import Optional
from ._password_hasher import (
DEFAULT_HASH_LENGTH,
DEFAULT_MEMORY_COST,
DEFAULT_PARALLELISM,
DEFAULT_RANDOM_SALT_LENGTH,
DEFAULT_TIME_COST,
)
from ._typing import Literal
from .low_level import Type, hash_secret, hash_secret_raw, verify_secret
Optional: _SpecialForm = ...
DEFAULT_RANDOM_SALT_LENGTH = RFC_9106_LOW_MEMORY.salt_len
DEFAULT_HASH_LENGTH = RFC_9106_LOW_MEMORY.hash_len
DEFAULT_TIME_COST = RFC_9106_LOW_MEMORY.time_cost
DEFAULT_MEMORY_COST = RFC_9106_LOW_MEMORY.memory_cost
DEFAULT_PARALLELISM = RFC_9106_LOW_MEMORY.parallelism
class Type(Enum):
"""
Enum of Argon2 variants.
Please see :doc:`parameters` on how to pick one.
"""
D = lib.Argon2_d
r"""
Argon2\ **d** is faster and uses data-depending memory access, which makes
it less suitable for hashing secrets and more suitable for cryptocurrencies
and applications with no threats from side-channel timing attacks.
"""
I = lib.Argon2_i
r"""
Argon2\ **i** uses data-independent memory access. Argon2i is slower as
it makes more passes over the memory to protect from tradeoff attacks.
"""
ID = lib.Argon2_id
r"""
Argon2\ **id** is a hybrid of Argon2i and Argon2d, using a combination of
data-depending and data-independent memory accesses, which gives some of
Argon2i's resistance to side-channel cache timing attacks and much of
Argon2d's resistance to GPU cracking attacks.
That makes it the preferred type for password hashing and password-based
key derivation.
.. versionadded:: 16.3.0
"""
def hash_secret(
secret: bytes,
salt: bytes,
time_cost: int,
memory_cost: int,
parallelism: int,
hash_len: int,
type: Type,
version: int = ARGON2_VERSION,
) -> bytes:
"""
Hash *secret* and return an **encoded** hash.
An encoded hash can be directly passed into :func:`verify_secret` as it
contains all parameters and the salt.
:param bytes secret: Secret to hash.
:param bytes salt: A salt_. Should be random and different for each
secret.
:param Type type: Which Argon2 variant to use.
:param int version: Which Argon2 version to use.
For an explanation of the Argon2 parameters see :class:`PasswordHasher`.
:rtype: bytes
:raises argon2.exceptions.HashingError: If hashing fails.
.. versionadded:: 16.0.0
.. _salt: https://en.wikipedia.org/wiki/Salt_(cryptography)
.. _kibibytes: https://en.wikipedia.org/wiki/Binary_prefix#kibi
"""
size = (
lib.argon2_encodedlen(
time_cost,
memory_cost,
parallelism,
len(salt),
hash_len,
type.value,
)
+ 1
)
buf = ffi.new("char[]", size)
rv = lib.argon2_hash(
time_cost,
memory_cost,
parallelism,
ffi.new("uint8_t[]", secret),
len(secret),
ffi.new("uint8_t[]", salt),
len(salt),
ffi.NULL,
hash_len,
buf,
size,
type.value,
version,
)
if rv != lib.ARGON2_OK:
raise HashingError(error_to_str(rv))
return ffi.string(buf)
The provided code snippet includes necessary dependencies for implementing the `hash_password` function. Write a Python function `def hash_password( password: bytes, salt: Optional[bytes] = None, time_cost: int = DEFAULT_TIME_COST, memory_cost: int = DEFAULT_MEMORY_COST, parallelism: int = DEFAULT_PARALLELISM, hash_len: int = DEFAULT_HASH_LENGTH, type: Type = Type.I, ) -> bytes` to solve the following problem:
Legacy alias for :func:`hash_secret` with default parameters. .. deprecated:: 16.0.0 Use :class:`argon2.PasswordHasher` for passwords.
Here is the function:
def hash_password(
password: bytes,
salt: Optional[bytes] = None,
time_cost: int = DEFAULT_TIME_COST,
memory_cost: int = DEFAULT_MEMORY_COST,
parallelism: int = DEFAULT_PARALLELISM,
hash_len: int = DEFAULT_HASH_LENGTH,
type: Type = Type.I,
) -> bytes:
"""
Legacy alias for :func:`hash_secret` with default parameters.
.. deprecated:: 16.0.0
Use :class:`argon2.PasswordHasher` for passwords.
"""
if salt is None:
salt = os.urandom(DEFAULT_RANDOM_SALT_LENGTH)
return hash_secret(
password, salt, time_cost, memory_cost, parallelism, hash_len, type
) | Legacy alias for :func:`hash_secret` with default parameters. .. deprecated:: 16.0.0 Use :class:`argon2.PasswordHasher` for passwords. |
171,761 | import os
from typing import Optional
from ._password_hasher import (
DEFAULT_HASH_LENGTH,
DEFAULT_MEMORY_COST,
DEFAULT_PARALLELISM,
DEFAULT_RANDOM_SALT_LENGTH,
DEFAULT_TIME_COST,
)
from ._typing import Literal
from .low_level import Type, hash_secret, hash_secret_raw, verify_secret
Optional: _SpecialForm = ...
DEFAULT_RANDOM_SALT_LENGTH = RFC_9106_LOW_MEMORY.salt_len
DEFAULT_HASH_LENGTH = RFC_9106_LOW_MEMORY.hash_len
DEFAULT_TIME_COST = RFC_9106_LOW_MEMORY.time_cost
DEFAULT_MEMORY_COST = RFC_9106_LOW_MEMORY.memory_cost
DEFAULT_PARALLELISM = RFC_9106_LOW_MEMORY.parallelism
class Type(Enum):
"""
Enum of Argon2 variants.
Please see :doc:`parameters` on how to pick one.
"""
D = lib.Argon2_d
r"""
Argon2\ **d** is faster and uses data-depending memory access, which makes
it less suitable for hashing secrets and more suitable for cryptocurrencies
and applications with no threats from side-channel timing attacks.
"""
I = lib.Argon2_i
r"""
Argon2\ **i** uses data-independent memory access. Argon2i is slower as
it makes more passes over the memory to protect from tradeoff attacks.
"""
ID = lib.Argon2_id
r"""
Argon2\ **id** is a hybrid of Argon2i and Argon2d, using a combination of
data-depending and data-independent memory accesses, which gives some of
Argon2i's resistance to side-channel cache timing attacks and much of
Argon2d's resistance to GPU cracking attacks.
That makes it the preferred type for password hashing and password-based
key derivation.
.. versionadded:: 16.3.0
"""
def hash_secret_raw(
secret: bytes,
salt: bytes,
time_cost: int,
memory_cost: int,
parallelism: int,
hash_len: int,
type: Type,
version: int = ARGON2_VERSION,
) -> bytes:
"""
Hash *password* and return a **raw** hash.
This function takes the same parameters as :func:`hash_secret`.
.. versionadded:: 16.0.0
"""
buf = ffi.new("uint8_t[]", hash_len)
rv = lib.argon2_hash(
time_cost,
memory_cost,
parallelism,
ffi.new("uint8_t[]", secret),
len(secret),
ffi.new("uint8_t[]", salt),
len(salt),
buf,
hash_len,
ffi.NULL,
0,
type.value,
version,
)
if rv != lib.ARGON2_OK:
raise HashingError(error_to_str(rv))
return bytes(ffi.buffer(buf, hash_len))
The provided code snippet includes necessary dependencies for implementing the `hash_password_raw` function. Write a Python function `def hash_password_raw( password: bytes, salt: Optional[bytes] = None, time_cost: int = DEFAULT_TIME_COST, memory_cost: int = DEFAULT_MEMORY_COST, parallelism: int = DEFAULT_PARALLELISM, hash_len: int = DEFAULT_HASH_LENGTH, type: Type = Type.I, ) -> bytes` to solve the following problem:
Legacy alias for :func:`hash_secret_raw` with default parameters. .. deprecated:: 16.0.0 Use :class:`argon2.PasswordHasher` for passwords.
Here is the function:
def hash_password_raw(
password: bytes,
salt: Optional[bytes] = None,
time_cost: int = DEFAULT_TIME_COST,
memory_cost: int = DEFAULT_MEMORY_COST,
parallelism: int = DEFAULT_PARALLELISM,
hash_len: int = DEFAULT_HASH_LENGTH,
type: Type = Type.I,
) -> bytes:
"""
Legacy alias for :func:`hash_secret_raw` with default parameters.
.. deprecated:: 16.0.0
Use :class:`argon2.PasswordHasher` for passwords.
"""
if salt is None:
salt = os.urandom(DEFAULT_RANDOM_SALT_LENGTH)
return hash_secret_raw(
password, salt, time_cost, memory_cost, parallelism, hash_len, type
) | Legacy alias for :func:`hash_secret_raw` with default parameters. .. deprecated:: 16.0.0 Use :class:`argon2.PasswordHasher` for passwords. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.