repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
FNNDSC/pfdicom
pfdicom/pfdicom.py
pfdicom.env_check
def env_check(self, *args, **kwargs): """ This method provides a common entry for any checks on the environment (input / output dirs, etc) """ b_status = True str_error = '' if not len(self.str_outputDir): b_status = False str_error = 'output directory not specified.' self.dp.qprint(str_error, comms = 'error') error.warn(self, 'outputDirFail', drawBox = True) return { 'status': b_status, 'str_error': str_error }
python
def env_check(self, *args, **kwargs): """ This method provides a common entry for any checks on the environment (input / output dirs, etc) """ b_status = True str_error = '' if not len(self.str_outputDir): b_status = False str_error = 'output directory not specified.' self.dp.qprint(str_error, comms = 'error') error.warn(self, 'outputDirFail', drawBox = True) return { 'status': b_status, 'str_error': str_error }
[ "def", "env_check", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "b_status", "=", "True", "str_error", "=", "''", "if", "not", "len", "(", "self", ".", "str_outputDir", ")", ":", "b_status", "=", "False", "str_error", "=", "'outp...
This method provides a common entry for any checks on the environment (input / output dirs, etc)
[ "This", "method", "provides", "a", "common", "entry", "for", "any", "checks", "on", "the", "environment", "(", "input", "/", "output", "dirs", "etc", ")" ]
train
https://github.com/FNNDSC/pfdicom/blob/91a0426c514a3496cb2e0576481055a47afee8d8/pfdicom/pfdicom.py#L158-L173
FNNDSC/pfdicom
pfdicom/pfdicom.py
pfdicom.tagsInString_process
def tagsInString_process(self, d_DICOM, astr, *args, **kwargs): """ This method substitutes DICOM tags that are '%'-tagged in a string template with the actual tag lookup. For example, an output filename that is specified as the following string: %PatientAge-%PatientID-output.txt will be parsed to 006Y-4412364-ouptut.txt It is also possible to apply certain permutations/functions to a tag. For example, a function is identified by an underscore prefixed and suffixed string as part of the DICOM tag. If found, this function is applied to the tag value. For example, %PatientAge-%_md5|4_PatientID-output.txt will apply an md5 hash to the PatientID and use the first 4 characters: 006Y-7f38-output.txt """ b_tagsFound = False str_replace = '' # The lookup/processed tag value l_tags = [] # The input string split by '%' l_tagsToSub = [] # Remove any noise etc from each tag l_funcTag = [] # a function/tag list l_args = [] # the 'args' of the function func = '' # the function to apply tag = '' # the tag in the funcTag combo chars = '' # the number of resultant chars from func # result to use if '%' in astr: l_tags = astr.split('%')[1:] # Find which tags (mangled) in string match actual tags l_tagsToSub = [i for i in d_DICOM['l_tagRaw'] if any(i in b for b in l_tags)] # Need to arrange l_tagsToSub in same order as l_tags l_tagsToSubSort = sorted( l_tagsToSub, key = lambda x: [i for i, s in enumerate(l_tags) if x in s][0] ) for tag, func in zip(l_tagsToSubSort, l_tags): b_tagsFound = True str_replace = d_DICOM['d_dicomSimple'][tag] if 'md5' in func: str_replace = hashlib.md5(str_replace.encode('utf-8')).hexdigest() l_funcTag = func.split('_')[1:] func = l_funcTag[0] l_args = func.split('|') if len(l_args) > 1: chars = l_args[1] str_replace = str_replace[0:int(chars)] astr = astr.replace('_%s_' % func, '') if 'strmsk' in func: l_funcTag = func.split('_')[1:] func = l_funcTag[0] str_msk = func.split('|')[1] l_n = [] for i, j in zip(list(str_replace), list(str_msk)): if j == '*': l_n.append(i) else: l_n.append(j) str_replace = ''.join(l_n) astr = astr.replace('_%s_' % func, '') if 'nospc' in func: # pudb.set_trace() l_funcTag = func.split('_')[1:] func = l_funcTag[0] l_args = func.split('|') str_char = '' if len(l_args) > 1: str_char = l_args[1] # strip out all non-alphnumeric chars and # replace with space str_replace = re.sub(r'\W+', ' ', str_replace) # replace all spaces with str_char str_replace = str_char.join(str_replace.split()) astr = astr.replace('_%s_' % func, '') astr = astr.replace('%' + tag, str_replace) return { 'status': True, 'b_tagsFound': b_tagsFound, 'str_result': astr }
python
def tagsInString_process(self, d_DICOM, astr, *args, **kwargs): """ This method substitutes DICOM tags that are '%'-tagged in a string template with the actual tag lookup. For example, an output filename that is specified as the following string: %PatientAge-%PatientID-output.txt will be parsed to 006Y-4412364-ouptut.txt It is also possible to apply certain permutations/functions to a tag. For example, a function is identified by an underscore prefixed and suffixed string as part of the DICOM tag. If found, this function is applied to the tag value. For example, %PatientAge-%_md5|4_PatientID-output.txt will apply an md5 hash to the PatientID and use the first 4 characters: 006Y-7f38-output.txt """ b_tagsFound = False str_replace = '' # The lookup/processed tag value l_tags = [] # The input string split by '%' l_tagsToSub = [] # Remove any noise etc from each tag l_funcTag = [] # a function/tag list l_args = [] # the 'args' of the function func = '' # the function to apply tag = '' # the tag in the funcTag combo chars = '' # the number of resultant chars from func # result to use if '%' in astr: l_tags = astr.split('%')[1:] # Find which tags (mangled) in string match actual tags l_tagsToSub = [i for i in d_DICOM['l_tagRaw'] if any(i in b for b in l_tags)] # Need to arrange l_tagsToSub in same order as l_tags l_tagsToSubSort = sorted( l_tagsToSub, key = lambda x: [i for i, s in enumerate(l_tags) if x in s][0] ) for tag, func in zip(l_tagsToSubSort, l_tags): b_tagsFound = True str_replace = d_DICOM['d_dicomSimple'][tag] if 'md5' in func: str_replace = hashlib.md5(str_replace.encode('utf-8')).hexdigest() l_funcTag = func.split('_')[1:] func = l_funcTag[0] l_args = func.split('|') if len(l_args) > 1: chars = l_args[1] str_replace = str_replace[0:int(chars)] astr = astr.replace('_%s_' % func, '') if 'strmsk' in func: l_funcTag = func.split('_')[1:] func = l_funcTag[0] str_msk = func.split('|')[1] l_n = [] for i, j in zip(list(str_replace), list(str_msk)): if j == '*': l_n.append(i) else: l_n.append(j) str_replace = ''.join(l_n) astr = astr.replace('_%s_' % func, '') if 'nospc' in func: # pudb.set_trace() l_funcTag = func.split('_')[1:] func = l_funcTag[0] l_args = func.split('|') str_char = '' if len(l_args) > 1: str_char = l_args[1] # strip out all non-alphnumeric chars and # replace with space str_replace = re.sub(r'\W+', ' ', str_replace) # replace all spaces with str_char str_replace = str_char.join(str_replace.split()) astr = astr.replace('_%s_' % func, '') astr = astr.replace('%' + tag, str_replace) return { 'status': True, 'b_tagsFound': b_tagsFound, 'str_result': astr }
[ "def", "tagsInString_process", "(", "self", ",", "d_DICOM", ",", "astr", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "b_tagsFound", "=", "False", "str_replace", "=", "''", "# The lookup/processed tag value", "l_tags", "=", "[", "]", "# The input strin...
This method substitutes DICOM tags that are '%'-tagged in a string template with the actual tag lookup. For example, an output filename that is specified as the following string: %PatientAge-%PatientID-output.txt will be parsed to 006Y-4412364-ouptut.txt It is also possible to apply certain permutations/functions to a tag. For example, a function is identified by an underscore prefixed and suffixed string as part of the DICOM tag. If found, this function is applied to the tag value. For example, %PatientAge-%_md5|4_PatientID-output.txt will apply an md5 hash to the PatientID and use the first 4 characters: 006Y-7f38-output.txt
[ "This", "method", "substitutes", "DICOM", "tags", "that", "are", "%", "-", "tagged", "in", "a", "string", "template", "with", "the", "actual", "tag", "lookup", "." ]
train
https://github.com/FNNDSC/pfdicom/blob/91a0426c514a3496cb2e0576481055a47afee8d8/pfdicom/pfdicom.py#L175-L263
FNNDSC/pfdicom
pfdicom/pfdicom.py
pfdicom.DICOMfile_read
def DICOMfile_read(self, *args, **kwargs): """ Read a DICOM file and perform some initial parsing of tags. NB! For thread safety, class member variables should not be assigned since other threads might override/change these variables in mid- flight! """ b_status = False l_tags = [] l_tagsToUse = [] d_tagsInString = {} str_file = "" d_DICOM = { 'dcm': None, 'd_dcm': {}, 'strRaw': '', 'l_tagRaw': [], 'd_json': {}, 'd_dicom': {}, 'd_dicomSimple': {} } for k, v in kwargs.items(): if k == 'file': str_file = v if k == 'l_tagsToUse': l_tags = v if len(args): l_file = args[0] str_file = l_file[0] str_localFile = os.path.basename(str_file) str_path = os.path.dirname(str_file) # self.dp.qprint("%s: In input base directory: %s" % (threading.currentThread().getName(), self.str_inputDir)) # self.dp.qprint("%s: Reading DICOM file in path: %s" % (threading.currentThread().getName(),str_path)) # self.dp.qprint("%s: Analysing tags on DICOM file: %s" % (threading.currentThread().getName(),str_localFile)) # self.dp.qprint("%s: Loading: %s" % (threading.currentThread().getName(),str_file)) try: # self.dcm = dicom.read_file(str_file) d_DICOM['dcm'] = dicom.read_file(str_file) b_status = True except: self.dp.qprint('In directory: %s' % os.getcwd(), comms = 'error') self.dp.qprint('Failed to read %s' % str_file, comms = 'error') b_status = False d_DICOM['d_dcm'] = dict(d_DICOM['dcm']) d_DICOM['strRaw'] = str(d_DICOM['dcm']) d_DICOM['l_tagRaw'] = d_DICOM['dcm'].dir() if len(l_tags): l_tagsToUse = l_tags else: l_tagsToUse = d_DICOM['l_tagRaw'] if 'PixelData' in l_tagsToUse: l_tagsToUse.remove('PixelData') for key in l_tagsToUse: d_DICOM['d_dicom'][key] = d_DICOM['dcm'].data_element(key) try: d_DICOM['d_dicomSimple'][key] = getattr(d_DICOM['dcm'], key) except: d_DICOM['d_dicomSimple'][key] = "no attribute" d_DICOM['d_json'][key] = str(d_DICOM['d_dicomSimple'][key]) # pudb.set_trace() d_tagsInString = self.tagsInString_process(d_DICOM, self.str_outputFileStem) str_outputFile = d_tagsInString['str_result'] return { 'status': b_status, 'inputPath': str_path, 'inputFilename': str_localFile, 'outputFileStem': str_outputFile, 'd_DICOM': d_DICOM, 'l_tagsToUse': l_tagsToUse }
python
def DICOMfile_read(self, *args, **kwargs): """ Read a DICOM file and perform some initial parsing of tags. NB! For thread safety, class member variables should not be assigned since other threads might override/change these variables in mid- flight! """ b_status = False l_tags = [] l_tagsToUse = [] d_tagsInString = {} str_file = "" d_DICOM = { 'dcm': None, 'd_dcm': {}, 'strRaw': '', 'l_tagRaw': [], 'd_json': {}, 'd_dicom': {}, 'd_dicomSimple': {} } for k, v in kwargs.items(): if k == 'file': str_file = v if k == 'l_tagsToUse': l_tags = v if len(args): l_file = args[0] str_file = l_file[0] str_localFile = os.path.basename(str_file) str_path = os.path.dirname(str_file) # self.dp.qprint("%s: In input base directory: %s" % (threading.currentThread().getName(), self.str_inputDir)) # self.dp.qprint("%s: Reading DICOM file in path: %s" % (threading.currentThread().getName(),str_path)) # self.dp.qprint("%s: Analysing tags on DICOM file: %s" % (threading.currentThread().getName(),str_localFile)) # self.dp.qprint("%s: Loading: %s" % (threading.currentThread().getName(),str_file)) try: # self.dcm = dicom.read_file(str_file) d_DICOM['dcm'] = dicom.read_file(str_file) b_status = True except: self.dp.qprint('In directory: %s' % os.getcwd(), comms = 'error') self.dp.qprint('Failed to read %s' % str_file, comms = 'error') b_status = False d_DICOM['d_dcm'] = dict(d_DICOM['dcm']) d_DICOM['strRaw'] = str(d_DICOM['dcm']) d_DICOM['l_tagRaw'] = d_DICOM['dcm'].dir() if len(l_tags): l_tagsToUse = l_tags else: l_tagsToUse = d_DICOM['l_tagRaw'] if 'PixelData' in l_tagsToUse: l_tagsToUse.remove('PixelData') for key in l_tagsToUse: d_DICOM['d_dicom'][key] = d_DICOM['dcm'].data_element(key) try: d_DICOM['d_dicomSimple'][key] = getattr(d_DICOM['dcm'], key) except: d_DICOM['d_dicomSimple'][key] = "no attribute" d_DICOM['d_json'][key] = str(d_DICOM['d_dicomSimple'][key]) # pudb.set_trace() d_tagsInString = self.tagsInString_process(d_DICOM, self.str_outputFileStem) str_outputFile = d_tagsInString['str_result'] return { 'status': b_status, 'inputPath': str_path, 'inputFilename': str_localFile, 'outputFileStem': str_outputFile, 'd_DICOM': d_DICOM, 'l_tagsToUse': l_tagsToUse }
[ "def", "DICOMfile_read", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "b_status", "=", "False", "l_tags", "=", "[", "]", "l_tagsToUse", "=", "[", "]", "d_tagsInString", "=", "{", "}", "str_file", "=", "\"\"", "d_DICOM", "=", "{",...
Read a DICOM file and perform some initial parsing of tags. NB! For thread safety, class member variables should not be assigned since other threads might override/change these variables in mid- flight!
[ "Read", "a", "DICOM", "file", "and", "perform", "some", "initial", "parsing", "of", "tags", "." ]
train
https://github.com/FNNDSC/pfdicom/blob/91a0426c514a3496cb2e0576481055a47afee8d8/pfdicom/pfdicom.py#L266-L348
FNNDSC/pfdicom
pfdicom/pfdicom.py
pfdicom.filelist_prune
def filelist_prune(self, at_data, *args, **kwargs): """ Given a list of files, possibly prune list by extension. """ b_status = True l_file = [] str_path = at_data[0] al_file = at_data[1] if len(self.str_extension): al_file = [x for x in al_file if self.str_extension in x] if len(al_file): al_file.sort() l_file = al_file b_status = True else: self.dp.qprint( "No valid files to analyze found in path %s!" % str_path, comms = 'error', level = 3) l_file = None b_status = False return { 'status': b_status, 'l_file': l_file }
python
def filelist_prune(self, at_data, *args, **kwargs): """ Given a list of files, possibly prune list by extension. """ b_status = True l_file = [] str_path = at_data[0] al_file = at_data[1] if len(self.str_extension): al_file = [x for x in al_file if self.str_extension in x] if len(al_file): al_file.sort() l_file = al_file b_status = True else: self.dp.qprint( "No valid files to analyze found in path %s!" % str_path, comms = 'error', level = 3) l_file = None b_status = False return { 'status': b_status, 'l_file': l_file }
[ "def", "filelist_prune", "(", "self", ",", "at_data", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "b_status", "=", "True", "l_file", "=", "[", "]", "str_path", "=", "at_data", "[", "0", "]", "al_file", "=", "at_data", "[", "1", "]", "if", ...
Given a list of files, possibly prune list by extension.
[ "Given", "a", "list", "of", "files", "possibly", "prune", "list", "by", "extension", "." ]
train
https://github.com/FNNDSC/pfdicom/blob/91a0426c514a3496cb2e0576481055a47afee8d8/pfdicom/pfdicom.py#L350-L375
FNNDSC/pfdicom
pfdicom/pfdicom.py
pfdicom.ret_dump
def ret_dump(self, d_ret, **kwargs): """ JSON print results to console (or caller) """ b_print = True for k, v in kwargs.items(): if k == 'JSONprint': b_print = bool(v) if b_print: print( json.dumps( d_ret, indent = 4, sort_keys = True ) )
python
def ret_dump(self, d_ret, **kwargs): """ JSON print results to console (or caller) """ b_print = True for k, v in kwargs.items(): if k == 'JSONprint': b_print = bool(v) if b_print: print( json.dumps( d_ret, indent = 4, sort_keys = True ) )
[ "def", "ret_dump", "(", "self", ",", "d_ret", ",", "*", "*", "kwargs", ")", ":", "b_print", "=", "True", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "if", "k", "==", "'JSONprint'", ":", "b_print", "=", "bool", "(", "v", "...
JSON print results to console (or caller)
[ "JSON", "print", "results", "to", "console", "(", "or", "caller", ")" ]
train
https://github.com/FNNDSC/pfdicom/blob/91a0426c514a3496cb2e0576481055a47afee8d8/pfdicom/pfdicom.py#L377-L391
FNNDSC/pfdicom
pfdicom/pfdicom.py
pfdicom.run
def run(self, *args, **kwargs): """ The run method is merely a thin shim down to the embedded pftree run method. """ b_status = True d_pftreeRun = {} d_inputAnalysis = {} d_env = self.env_check() b_timerStart = False self.dp.qprint( "\tStarting pfdicom run... (please be patient while running)", level = 1 ) for k, v in kwargs.items(): if k == 'timerStart': b_timerStart = bool(v) if b_timerStart: other.tic() if d_env['status']: d_pftreeRun = self.pf_tree.run(timerStart = False) else: b_status = False str_startDir = os.getcwd() os.chdir(self.str_inputDir) if b_status: if len(self.str_extension): d_inputAnalysis = self.pf_tree.tree_process( inputReadCallback = None, analysisCallback = self.filelist_prune, outputWriteCallback = None, applyResultsTo = 'inputTree', applyKey = 'l_file', persistAnalysisResults = True ) os.chdir(str_startDir) d_ret = { 'status': b_status and d_pftreeRun['status'], 'd_env': d_env, 'd_pftreeRun': d_pftreeRun, 'd_inputAnalysis': d_inputAnalysis, 'runTime': other.toc() } if self.b_json: self.ret_dump(d_ret, **kwargs) self.dp.qprint('\tReturning from pfdicom run...', level = 1) return d_ret
python
def run(self, *args, **kwargs): """ The run method is merely a thin shim down to the embedded pftree run method. """ b_status = True d_pftreeRun = {} d_inputAnalysis = {} d_env = self.env_check() b_timerStart = False self.dp.qprint( "\tStarting pfdicom run... (please be patient while running)", level = 1 ) for k, v in kwargs.items(): if k == 'timerStart': b_timerStart = bool(v) if b_timerStart: other.tic() if d_env['status']: d_pftreeRun = self.pf_tree.run(timerStart = False) else: b_status = False str_startDir = os.getcwd() os.chdir(self.str_inputDir) if b_status: if len(self.str_extension): d_inputAnalysis = self.pf_tree.tree_process( inputReadCallback = None, analysisCallback = self.filelist_prune, outputWriteCallback = None, applyResultsTo = 'inputTree', applyKey = 'l_file', persistAnalysisResults = True ) os.chdir(str_startDir) d_ret = { 'status': b_status and d_pftreeRun['status'], 'd_env': d_env, 'd_pftreeRun': d_pftreeRun, 'd_inputAnalysis': d_inputAnalysis, 'runTime': other.toc() } if self.b_json: self.ret_dump(d_ret, **kwargs) self.dp.qprint('\tReturning from pfdicom run...', level = 1) return d_ret
[ "def", "run", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "b_status", "=", "True", "d_pftreeRun", "=", "{", "}", "d_inputAnalysis", "=", "{", "}", "d_env", "=", "self", ".", "env_check", "(", ")", "b_timerStart", "=", "False", ...
The run method is merely a thin shim down to the embedded pftree run method.
[ "The", "run", "method", "is", "merely", "a", "thin", "shim", "down", "to", "the", "embedded", "pftree", "run", "method", "." ]
train
https://github.com/FNNDSC/pfdicom/blob/91a0426c514a3496cb2e0576481055a47afee8d8/pfdicom/pfdicom.py#L393-L447
GustavePate/distarkcli
distarkcli/transport/majordomoclient.py
MajorDomoClient.reconnect_to_broker
def reconnect_to_broker(self): """Connect or reconnect to broker""" #print "CONNECT !" if self.client: self.poller.unregister(self.client) self.client.close() self.client = self.ctx.socket(zmq.DEALER) self.client.linger = 0 self.client.connect(self.broker) self.poller.register(self.client, zmq.POLLIN) if self.verbose: logging.info("I: connecting to broker at %s...", self.broker)
python
def reconnect_to_broker(self): """Connect or reconnect to broker""" #print "CONNECT !" if self.client: self.poller.unregister(self.client) self.client.close() self.client = self.ctx.socket(zmq.DEALER) self.client.linger = 0 self.client.connect(self.broker) self.poller.register(self.client, zmq.POLLIN) if self.verbose: logging.info("I: connecting to broker at %s...", self.broker)
[ "def", "reconnect_to_broker", "(", "self", ")", ":", "#print \"CONNECT !\"", "if", "self", ".", "client", ":", "self", ".", "poller", ".", "unregister", "(", "self", ".", "client", ")", "self", ".", "client", ".", "close", "(", ")", "self", ".", "client"...
Connect or reconnect to broker
[ "Connect", "or", "reconnect", "to", "broker" ]
train
https://github.com/GustavePate/distarkcli/blob/44b0e637e94ebb2687a1b7e2f6c5d0658d775238/distarkcli/transport/majordomoclient.py#L43-L54
GustavePate/distarkcli
distarkcli/transport/majordomoclient.py
MajorDomoClient.send
def send(self, service, request): """Send request to broker """ if not isinstance(request, list): request = [request] # Prefix request with protocol frames # Frame 0: empty (REQ emulation) # Frame 1: "MDPCxy" (six bytes, MDP/Client x.y) # Frame 2: Service name (printable string) request = ['', MDP.C_CLIENT, service] + request if self.verbose: logging.warn("I: send request to '%s' service: ", service) dump(request) self.client.send_multipart(request)
python
def send(self, service, request): """Send request to broker """ if not isinstance(request, list): request = [request] # Prefix request with protocol frames # Frame 0: empty (REQ emulation) # Frame 1: "MDPCxy" (six bytes, MDP/Client x.y) # Frame 2: Service name (printable string) request = ['', MDP.C_CLIENT, service] + request if self.verbose: logging.warn("I: send request to '%s' service: ", service) dump(request) self.client.send_multipart(request)
[ "def", "send", "(", "self", ",", "service", ",", "request", ")", ":", "if", "not", "isinstance", "(", "request", ",", "list", ")", ":", "request", "=", "[", "request", "]", "# Prefix request with protocol frames", "# Frame 0: empty (REQ emulation)", "# Frame 1: \"...
Send request to broker
[ "Send", "request", "to", "broker" ]
train
https://github.com/GustavePate/distarkcli/blob/44b0e637e94ebb2687a1b7e2f6c5d0658d775238/distarkcli/transport/majordomoclient.py#L56-L71
GustavePate/distarkcli
distarkcli/transport/majordomoclient.py
MajorDomoClient.recv
def recv(self): """Returns the reply message or None if there was no reply.""" try: items = self.poller.poll(self.timeout) except KeyboardInterrupt: return # interrupted if items: # if we got a reply, process it msg = self.client.recv_multipart() self.close() if self.verbose: logging.info("I: received reply:") dump(msg) # Don't try to handle errors, just assert noisily assert len(msg) >= 4 #first drop will be drop (cause empty) header = msg.pop(0) header = msg.pop(0) assert MDP.C_CLIENT == header #this one contains servicename #TODO: exploit this header = msg.pop(0) return msg else: logging.warn("W: permanent error, abandoning request")
python
def recv(self): """Returns the reply message or None if there was no reply.""" try: items = self.poller.poll(self.timeout) except KeyboardInterrupt: return # interrupted if items: # if we got a reply, process it msg = self.client.recv_multipart() self.close() if self.verbose: logging.info("I: received reply:") dump(msg) # Don't try to handle errors, just assert noisily assert len(msg) >= 4 #first drop will be drop (cause empty) header = msg.pop(0) header = msg.pop(0) assert MDP.C_CLIENT == header #this one contains servicename #TODO: exploit this header = msg.pop(0) return msg else: logging.warn("W: permanent error, abandoning request")
[ "def", "recv", "(", "self", ")", ":", "try", ":", "items", "=", "self", ".", "poller", ".", "poll", "(", "self", ".", "timeout", ")", "except", "KeyboardInterrupt", ":", "return", "# interrupted", "if", "items", ":", "# if we got a reply, process it", "msg",...
Returns the reply message or None if there was no reply.
[ "Returns", "the", "reply", "message", "or", "None", "if", "there", "was", "no", "reply", "." ]
train
https://github.com/GustavePate/distarkcli/blob/44b0e637e94ebb2687a1b7e2f6c5d0658d775238/distarkcli/transport/majordomoclient.py#L73-L102
duniter/duniter-python-api
examples/send_certification.py
get_identity_document
async def get_identity_document(client: Client, current_block: dict, pubkey: str) -> Identity: """ Get the identity document of the pubkey :param client: Client to connect to the api :param current_block: Current block data :param pubkey: UID/Public key :rtype: Identity """ # Here we request for the path wot/lookup/pubkey lookup_data = await client(bma.wot.lookup, pubkey) # init vars uid = None timestamp = BlockUID.empty() signature = None # parse results for result in lookup_data['results']: if result["pubkey"] == pubkey: uids = result['uids'] for uid_data in uids: # capture data timestamp = BlockUID.from_str(uid_data["meta"]["timestamp"]) uid = uid_data["uid"] signature = uid_data["self"] # return self-certification document return Identity( version=10, currency=current_block['currency'], pubkey=pubkey, uid=uid, ts=timestamp, signature=signature )
python
async def get_identity_document(client: Client, current_block: dict, pubkey: str) -> Identity: """ Get the identity document of the pubkey :param client: Client to connect to the api :param current_block: Current block data :param pubkey: UID/Public key :rtype: Identity """ # Here we request for the path wot/lookup/pubkey lookup_data = await client(bma.wot.lookup, pubkey) # init vars uid = None timestamp = BlockUID.empty() signature = None # parse results for result in lookup_data['results']: if result["pubkey"] == pubkey: uids = result['uids'] for uid_data in uids: # capture data timestamp = BlockUID.from_str(uid_data["meta"]["timestamp"]) uid = uid_data["uid"] signature = uid_data["self"] # return self-certification document return Identity( version=10, currency=current_block['currency'], pubkey=pubkey, uid=uid, ts=timestamp, signature=signature )
[ "async", "def", "get_identity_document", "(", "client", ":", "Client", ",", "current_block", ":", "dict", ",", "pubkey", ":", "str", ")", "->", "Identity", ":", "# Here we request for the path wot/lookup/pubkey", "lookup_data", "=", "await", "client", "(", "bma", ...
Get the identity document of the pubkey :param client: Client to connect to the api :param current_block: Current block data :param pubkey: UID/Public key :rtype: Identity
[ "Get", "the", "identity", "document", "of", "the", "pubkey" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/examples/send_certification.py#L20-L56
duniter/duniter-python-api
examples/send_certification.py
get_certification_document
def get_certification_document(current_block: dict, self_cert_document: Identity, from_pubkey: str) -> Certification: """ Create and return a Certification document :param current_block: Current block data :param self_cert_document: Identity document :param from_pubkey: Pubkey of the certifier :rtype: Certification """ # construct Certification Document return Certification(version=10, currency=current_block['currency'], pubkey_from=from_pubkey, identity=self_cert_document, timestamp=BlockUID(current_block['number'], current_block['hash']), signature="")
python
def get_certification_document(current_block: dict, self_cert_document: Identity, from_pubkey: str) -> Certification: """ Create and return a Certification document :param current_block: Current block data :param self_cert_document: Identity document :param from_pubkey: Pubkey of the certifier :rtype: Certification """ # construct Certification Document return Certification(version=10, currency=current_block['currency'], pubkey_from=from_pubkey, identity=self_cert_document, timestamp=BlockUID(current_block['number'], current_block['hash']), signature="")
[ "def", "get_certification_document", "(", "current_block", ":", "dict", ",", "self_cert_document", ":", "Identity", ",", "from_pubkey", ":", "str", ")", "->", "Certification", ":", "# construct Certification Document", "return", "Certification", "(", "version", "=", "...
Create and return a Certification document :param current_block: Current block data :param self_cert_document: Identity document :param from_pubkey: Pubkey of the certifier :rtype: Certification
[ "Create", "and", "return", "a", "Certification", "document" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/examples/send_certification.py#L59-L72
duniter/duniter-python-api
examples/send_certification.py
main
async def main(): """ Main code """ # Create Client from endpoint string in Duniter format client = Client(BMAS_ENDPOINT) # Get the node summary infos to test the connection response = await client(bma.node.summary) print(response) # prompt hidden user entry salt = getpass.getpass("Enter your passphrase (salt): ") # prompt hidden user entry password = getpass.getpass("Enter your password: ") # create key from credentials key = SigningKey.from_credentials(salt, password) pubkey_from = key.pubkey # prompt entry pubkey_to = input("Enter certified pubkey: ") # capture current block to get version and currency and blockstamp current_block = await client(bma.blockchain.current) # create our Identity document to sign the Certification document identity = await get_identity_document(client, current_block, pubkey_to) # send the Certification document to the node certification = get_certification_document(current_block, identity, pubkey_from) # sign document certification.sign([key]) # Here we request for the path wot/certify response = await client(bma.wot.certify, certification.signed_raw()) if response.status == 200: print(await response.text()) else: print("Error while publishing certification: {0}".format(await response.text())) # Close client aiohttp session await client.close()
python
async def main(): """ Main code """ # Create Client from endpoint string in Duniter format client = Client(BMAS_ENDPOINT) # Get the node summary infos to test the connection response = await client(bma.node.summary) print(response) # prompt hidden user entry salt = getpass.getpass("Enter your passphrase (salt): ") # prompt hidden user entry password = getpass.getpass("Enter your password: ") # create key from credentials key = SigningKey.from_credentials(salt, password) pubkey_from = key.pubkey # prompt entry pubkey_to = input("Enter certified pubkey: ") # capture current block to get version and currency and blockstamp current_block = await client(bma.blockchain.current) # create our Identity document to sign the Certification document identity = await get_identity_document(client, current_block, pubkey_to) # send the Certification document to the node certification = get_certification_document(current_block, identity, pubkey_from) # sign document certification.sign([key]) # Here we request for the path wot/certify response = await client(bma.wot.certify, certification.signed_raw()) if response.status == 200: print(await response.text()) else: print("Error while publishing certification: {0}".format(await response.text())) # Close client aiohttp session await client.close()
[ "async", "def", "main", "(", ")", ":", "# Create Client from endpoint string in Duniter format", "client", "=", "Client", "(", "BMAS_ENDPOINT", ")", "# Get the node summary infos to test the connection", "response", "=", "await", "client", "(", "bma", ".", "node", ".", ...
Main code
[ "Main", "code" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/examples/send_certification.py#L75-L120
duniter/duniter-python-api
duniterpy/documents/block.py
Block.sign
def sign(self, keys): """ Sign the current document. Warning : current signatures will be replaced with the new ones. """ key = keys[0] signed = self.raw()[-2:] signing = base64.b64encode(key.signature(bytes(signed, 'ascii'))) self.signatures = [signing.decode("ascii")]
python
def sign(self, keys): """ Sign the current document. Warning : current signatures will be replaced with the new ones. """ key = keys[0] signed = self.raw()[-2:] signing = base64.b64encode(key.signature(bytes(signed, 'ascii'))) self.signatures = [signing.decode("ascii")]
[ "def", "sign", "(", "self", ",", "keys", ")", ":", "key", "=", "keys", "[", "0", "]", "signed", "=", "self", ".", "raw", "(", ")", "[", "-", "2", ":", "]", "signing", "=", "base64", ".", "b64encode", "(", "key", ".", "signature", "(", "bytes", ...
Sign the current document. Warning : current signatures will be replaced with the new ones.
[ "Sign", "the", "current", "document", ".", "Warning", ":", "current", "signatures", "will", "be", "replaced", "with", "the", "new", "ones", "." ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/block.py#L492-L500
henzk/ape
ape/feaquencer/graph.py
_dfs_cycle_detect
def _dfs_cycle_detect(graph, node, path, visited_nodes): """ search graph for cycle using DFS continuing from node path contains the list of visited nodes currently on the stack visited_nodes is the set of already visited nodes :param graph: :param node: :param path: :param visited_nodes: :return: """ visited_nodes.add(node) for target in graph[node]: if target in path: # cycle found => return current path return path + [target] else: return _dfs_cycle_detect(graph, target, path + [target], visited_nodes) return None
python
def _dfs_cycle_detect(graph, node, path, visited_nodes): """ search graph for cycle using DFS continuing from node path contains the list of visited nodes currently on the stack visited_nodes is the set of already visited nodes :param graph: :param node: :param path: :param visited_nodes: :return: """ visited_nodes.add(node) for target in graph[node]: if target in path: # cycle found => return current path return path + [target] else: return _dfs_cycle_detect(graph, target, path + [target], visited_nodes) return None
[ "def", "_dfs_cycle_detect", "(", "graph", ",", "node", ",", "path", ",", "visited_nodes", ")", ":", "visited_nodes", ".", "add", "(", "node", ")", "for", "target", "in", "graph", "[", "node", "]", ":", "if", "target", "in", "path", ":", "# cycle found =>...
search graph for cycle using DFS continuing from node path contains the list of visited nodes currently on the stack visited_nodes is the set of already visited nodes :param graph: :param node: :param path: :param visited_nodes: :return:
[ "search", "graph", "for", "cycle", "using", "DFS", "continuing", "from", "node", "path", "contains", "the", "list", "of", "visited", "nodes", "currently", "on", "the", "stack", "visited_nodes", "is", "the", "set", "of", "already", "visited", "nodes", ":", "p...
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/feaquencer/graph.py#L25-L43
henzk/ape
ape/feaquencer/graph.py
detect_cycle
def detect_cycle(graph): """ search the given directed graph for cycles returns None if the given graph is cycle free otherwise it returns a path through the graph that contains a cycle :param graph: :return: """ visited_nodes = set() for node in list(graph): if node not in visited_nodes: cycle = _dfs_cycle_detect(graph, node, [node], visited_nodes) if cycle: return cycle return None
python
def detect_cycle(graph): """ search the given directed graph for cycles returns None if the given graph is cycle free otherwise it returns a path through the graph that contains a cycle :param graph: :return: """ visited_nodes = set() for node in list(graph): if node not in visited_nodes: cycle = _dfs_cycle_detect(graph, node, [node], visited_nodes) if cycle: return cycle return None
[ "def", "detect_cycle", "(", "graph", ")", ":", "visited_nodes", "=", "set", "(", ")", "for", "node", "in", "list", "(", "graph", ")", ":", "if", "node", "not", "in", "visited_nodes", ":", "cycle", "=", "_dfs_cycle_detect", "(", "graph", ",", "node", ",...
search the given directed graph for cycles returns None if the given graph is cycle free otherwise it returns a path through the graph that contains a cycle :param graph: :return:
[ "search", "the", "given", "directed", "graph", "for", "cycles" ]
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/feaquencer/graph.py#L46-L63
henzk/ape
ape/feaquencer/graph.py
topsort
def topsort(graph): """ For the given graph, returns a list of nodes in topological order In py3 the behaviour of this function differs from py2, the resulting order will change with every execution in py3 while in py2 the order stays the same :param graph: :return: """ count = defaultdict(int) for feature, node in graph.items(): for target in node: count[target] += 1 # convert for list is necessary for py3 as in py3 the filter # function creates a filter object, in py2 it returns a list free_nodes = list(filter(lambda x: count[x] == 0, graph)) result = [] while free_nodes: node = free_nodes.pop() result.append(node) for target in graph[node]: count[target] -= 1 if count[target] == 0: free_nodes.append(target) return result
python
def topsort(graph): """ For the given graph, returns a list of nodes in topological order In py3 the behaviour of this function differs from py2, the resulting order will change with every execution in py3 while in py2 the order stays the same :param graph: :return: """ count = defaultdict(int) for feature, node in graph.items(): for target in node: count[target] += 1 # convert for list is necessary for py3 as in py3 the filter # function creates a filter object, in py2 it returns a list free_nodes = list(filter(lambda x: count[x] == 0, graph)) result = [] while free_nodes: node = free_nodes.pop() result.append(node) for target in graph[node]: count[target] -= 1 if count[target] == 0: free_nodes.append(target) return result
[ "def", "topsort", "(", "graph", ")", ":", "count", "=", "defaultdict", "(", "int", ")", "for", "feature", ",", "node", "in", "graph", ".", "items", "(", ")", ":", "for", "target", "in", "node", ":", "count", "[", "target", "]", "+=", "1", "# conver...
For the given graph, returns a list of nodes in topological order In py3 the behaviour of this function differs from py2, the resulting order will change with every execution in py3 while in py2 the order stays the same :param graph: :return:
[ "For", "the", "given", "graph", "returns", "a", "list", "of", "nodes", "in", "topological", "order", "In", "py3", "the", "behaviour", "of", "this", "function", "differs", "from", "py2", "the", "resulting", "order", "will", "change", "with", "every", "executi...
train
https://github.com/henzk/ape/blob/a1b7ea5e5b25c42beffeaaa5c32d94ad82634819/ape/feaquencer/graph.py#L66-L91
coded-by-hand/mass
mass/monitor.py
main
def main(): cmd = sys.argv cmd.pop(0) """ parse arguments and make go """ parser = argparse.ArgumentParser() parser.add_argument( '-s', '--src', help='source folder to watch', default='.', dest='src', metavar='folder' ) parser.add_argument( '-d', '--dest', help='source folder to watch', default=None, dest='dest', metavar='folder' ) args = parser.parse_args() print 'Initializing...' config.source_dir = os.path.abspath(args.src) if args.dest != None: config.dest_dir = os.path.abspath(args.dest) init_sources(config.source_dir) if cmd: c = cmd[0] commands = globals() if c in commands: commands[c]()
python
def main(): cmd = sys.argv cmd.pop(0) """ parse arguments and make go """ parser = argparse.ArgumentParser() parser.add_argument( '-s', '--src', help='source folder to watch', default='.', dest='src', metavar='folder' ) parser.add_argument( '-d', '--dest', help='source folder to watch', default=None, dest='dest', metavar='folder' ) args = parser.parse_args() print 'Initializing...' config.source_dir = os.path.abspath(args.src) if args.dest != None: config.dest_dir = os.path.abspath(args.dest) init_sources(config.source_dir) if cmd: c = cmd[0] commands = globals() if c in commands: commands[c]()
[ "def", "main", "(", ")", ":", "cmd", "=", "sys", ".", "argv", "cmd", ".", "pop", "(", "0", ")", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "'--src'", ",", "help", "=", "'source folder ...
parse arguments and make go
[ "parse", "arguments", "and", "make", "go" ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/mass/monitor.py#L16-L49
coded-by-hand/mass
mass/monitor.py
init_sources
def init_sources(path): """ initializes array of groups and their associated js files """ for f in dir_list(path): if(os.path.splitext(f)[1][1:] == config.source_ext): print "Source file discovered: %s" % (f) script = Script(f) if (script.filename not in config.sources.keys()): config.sources[script.path] = script parse.parse_dependencies(script,script)
python
def init_sources(path): """ initializes array of groups and their associated js files """ for f in dir_list(path): if(os.path.splitext(f)[1][1:] == config.source_ext): print "Source file discovered: %s" % (f) script = Script(f) if (script.filename not in config.sources.keys()): config.sources[script.path] = script parse.parse_dependencies(script,script)
[ "def", "init_sources", "(", "path", ")", ":", "for", "f", "in", "dir_list", "(", "path", ")", ":", "if", "(", "os", ".", "path", ".", "splitext", "(", "f", ")", "[", "1", "]", "[", "1", ":", "]", "==", "config", ".", "source_ext", ")", ":", "...
initializes array of groups and their associated js files
[ "initializes", "array", "of", "groups", "and", "their", "associated", "js", "files" ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/mass/monitor.py#L72-L82
coded-by-hand/mass
mass/monitor.py
start_scanner
def start_scanner(path): """ watch for file events in the supplied path """ try: observer = Observer() observer.start() stream = Stream(file_modified, path, file_events=True) observer.schedule(stream) print "Watching for changes. Press Ctrl-C to stop." while 1: pass except (KeyboardInterrupt, OSError, IOError): observer.unschedule(stream) observer.stop()
python
def start_scanner(path): """ watch for file events in the supplied path """ try: observer = Observer() observer.start() stream = Stream(file_modified, path, file_events=True) observer.schedule(stream) print "Watching for changes. Press Ctrl-C to stop." while 1: pass except (KeyboardInterrupt, OSError, IOError): observer.unschedule(stream) observer.stop()
[ "def", "start_scanner", "(", "path", ")", ":", "try", ":", "observer", "=", "Observer", "(", ")", "observer", ".", "start", "(", ")", "stream", "=", "Stream", "(", "file_modified", ",", "path", ",", "file_events", "=", "True", ")", "observer", ".", "sc...
watch for file events in the supplied path
[ "watch", "for", "file", "events", "in", "the", "supplied", "path" ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/mass/monitor.py#L84-L98
coded-by-hand/mass
mass/monitor.py
file_modified
def file_modified(event): """ react to file events """ if re.match(config.file_regex,event.name) or (event.name in config.sources.keys()): print "Change detected to: %s" % (event.name) config.stack = [] script = config.sources[event.name] if script.extension == config.source_ext: parse.parse_file(script) else: parse.parse_parents(script)
python
def file_modified(event): """ react to file events """ if re.match(config.file_regex,event.name) or (event.name in config.sources.keys()): print "Change detected to: %s" % (event.name) config.stack = [] script = config.sources[event.name] if script.extension == config.source_ext: parse.parse_file(script) else: parse.parse_parents(script)
[ "def", "file_modified", "(", "event", ")", ":", "if", "re", ".", "match", "(", "config", ".", "file_regex", ",", "event", ".", "name", ")", "or", "(", "event", ".", "name", "in", "config", ".", "sources", ".", "keys", "(", ")", ")", ":", "print", ...
react to file events
[ "react", "to", "file", "events" ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/mass/monitor.py#L100-L111
MacHu-GWU/pymongo_mate-project
pymongo_mate/pkg/pandas_mate/csv_io.py
iter_tuple_from_csv
def iter_tuple_from_csv(path, iterator=False, chunksize=None, skiprows=None, nrows=None, **kwargs): """A high performance, low memory usage csv file row iterator function. :param path: csv file path. :param iterator: :param chunksize: :param skiprows: :param nrows: :yield tuple: **中文文档** 对dataframe进行tuple风格的高性能行遍历。 对用pandas从csv文件读取的dataframe进行逐行遍历时, iterrows和itertuple 都不是性能最高的方法。这是因为iterrows要生成Series对象, 而itertuple 也要对index进行访问。所以本方法是使用内建zip方法对所有的column进行打包 解压, 所以性能上是最佳的。 """ kwargs["iterator"] = iterator kwargs["chunksize"] = chunksize kwargs["skiprows"] = skiprows kwargs["nrows"] = nrows if iterator is True: for df in pd.read_csv(path, **kwargs): for tp in itertuple(df): yield tp else: df = pd.read_csv(path, **kwargs) for tp in itertuple(df): yield tp
python
def iter_tuple_from_csv(path, iterator=False, chunksize=None, skiprows=None, nrows=None, **kwargs): """A high performance, low memory usage csv file row iterator function. :param path: csv file path. :param iterator: :param chunksize: :param skiprows: :param nrows: :yield tuple: **中文文档** 对dataframe进行tuple风格的高性能行遍历。 对用pandas从csv文件读取的dataframe进行逐行遍历时, iterrows和itertuple 都不是性能最高的方法。这是因为iterrows要生成Series对象, 而itertuple 也要对index进行访问。所以本方法是使用内建zip方法对所有的column进行打包 解压, 所以性能上是最佳的。 """ kwargs["iterator"] = iterator kwargs["chunksize"] = chunksize kwargs["skiprows"] = skiprows kwargs["nrows"] = nrows if iterator is True: for df in pd.read_csv(path, **kwargs): for tp in itertuple(df): yield tp else: df = pd.read_csv(path, **kwargs) for tp in itertuple(df): yield tp
[ "def", "iter_tuple_from_csv", "(", "path", ",", "iterator", "=", "False", ",", "chunksize", "=", "None", ",", "skiprows", "=", "None", ",", "nrows", "=", "None", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "\"iterator\"", "]", "=", "iterator", "k...
A high performance, low memory usage csv file row iterator function. :param path: csv file path. :param iterator: :param chunksize: :param skiprows: :param nrows: :yield tuple: **中文文档** 对dataframe进行tuple风格的高性能行遍历。 对用pandas从csv文件读取的dataframe进行逐行遍历时, iterrows和itertuple 都不是性能最高的方法。这是因为iterrows要生成Series对象, 而itertuple 也要对index进行访问。所以本方法是使用内建zip方法对所有的column进行打包 解压, 所以性能上是最佳的。
[ "A", "high", "performance", "low", "memory", "usage", "csv", "file", "row", "iterator", "function", "." ]
train
https://github.com/MacHu-GWU/pymongo_mate-project/blob/be53170c2db54cb705b9e548d32ef26c773ff7f3/pymongo_mate/pkg/pandas_mate/csv_io.py#L16-L53
MacHu-GWU/pymongo_mate-project
pymongo_mate/pkg/pandas_mate/csv_io.py
index_row_dict_from_csv
def index_row_dict_from_csv(path, index_col=None, iterator=False, chunksize=None, skiprows=None, nrows=None, use_ordered_dict=True, **kwargs): """Read the csv into a dictionary. The key is it's index, the value is the dictionary form of the row. :param path: csv file path. :param index_col: None or str, the column that used as index. :param iterator: :param chunksize: :param skiprows: :param nrows: :param use_ordered_dict: :returns: {index_1: row1, index2: row2, ...} **中文文档** 读取csv, 选择一值完全不重复, 可作为index的列作为index, 生成一个字典 数据结构, 使得可以通过index直接访问row。 """ _kwargs = dict(list(kwargs.items())) _kwargs["iterator"] = None _kwargs["chunksize"] = None _kwargs["skiprows"] = 0 _kwargs["nrows"] = 1 df = pd.read_csv(path, index_col=index_col, **_kwargs) columns = df.columns if index_col is None: raise Exception("please give index_col!") if use_ordered_dict: table = OrderedDict() else: table = dict() kwargs["iterator"] = iterator kwargs["chunksize"] = chunksize kwargs["skiprows"] = skiprows kwargs["nrows"] = nrows if iterator is True: for df in pd.read_csv(path, index_col=index_col, **kwargs): for ind, tp in zip(df.index, itertuple(df)): table[ind] = dict(zip(columns, tp)) else: df = pd.read_csv(path, index_col=index_col, **kwargs) for ind, tp in zip(df.index, itertuple(df)): table[ind] = dict(zip(columns, tp)) return table
python
def index_row_dict_from_csv(path, index_col=None, iterator=False, chunksize=None, skiprows=None, nrows=None, use_ordered_dict=True, **kwargs): """Read the csv into a dictionary. The key is it's index, the value is the dictionary form of the row. :param path: csv file path. :param index_col: None or str, the column that used as index. :param iterator: :param chunksize: :param skiprows: :param nrows: :param use_ordered_dict: :returns: {index_1: row1, index2: row2, ...} **中文文档** 读取csv, 选择一值完全不重复, 可作为index的列作为index, 生成一个字典 数据结构, 使得可以通过index直接访问row。 """ _kwargs = dict(list(kwargs.items())) _kwargs["iterator"] = None _kwargs["chunksize"] = None _kwargs["skiprows"] = 0 _kwargs["nrows"] = 1 df = pd.read_csv(path, index_col=index_col, **_kwargs) columns = df.columns if index_col is None: raise Exception("please give index_col!") if use_ordered_dict: table = OrderedDict() else: table = dict() kwargs["iterator"] = iterator kwargs["chunksize"] = chunksize kwargs["skiprows"] = skiprows kwargs["nrows"] = nrows if iterator is True: for df in pd.read_csv(path, index_col=index_col, **kwargs): for ind, tp in zip(df.index, itertuple(df)): table[ind] = dict(zip(columns, tp)) else: df = pd.read_csv(path, index_col=index_col, **kwargs) for ind, tp in zip(df.index, itertuple(df)): table[ind] = dict(zip(columns, tp)) return table
[ "def", "index_row_dict_from_csv", "(", "path", ",", "index_col", "=", "None", ",", "iterator", "=", "False", ",", "chunksize", "=", "None", ",", "skiprows", "=", "None", ",", "nrows", "=", "None", ",", "use_ordered_dict", "=", "True", ",", "*", "*", "kwa...
Read the csv into a dictionary. The key is it's index, the value is the dictionary form of the row. :param path: csv file path. :param index_col: None or str, the column that used as index. :param iterator: :param chunksize: :param skiprows: :param nrows: :param use_ordered_dict: :returns: {index_1: row1, index2: row2, ...} **中文文档** 读取csv, 选择一值完全不重复, 可作为index的列作为index, 生成一个字典 数据结构, 使得可以通过index直接访问row。
[ "Read", "the", "csv", "into", "a", "dictionary", ".", "The", "key", "is", "it", "s", "index", "the", "value", "is", "the", "dictionary", "form", "of", "the", "row", "." ]
train
https://github.com/MacHu-GWU/pymongo_mate-project/blob/be53170c2db54cb705b9e548d32ef26c773ff7f3/pymongo_mate/pkg/pandas_mate/csv_io.py#L56-L113
oblalex/verboselib
verboselib/_compatibility.py
native_path
def native_path(path): # pragma: no cover """ Always return a native path, that is unicode on Python 3 and bytestring on Python 2. Taken `from Django <http://bit.ly/1r3gogZ>`_. """ if PY2 and not isinstance(path, bytes): return path.encode(fs_encoding) return path
python
def native_path(path): # pragma: no cover """ Always return a native path, that is unicode on Python 3 and bytestring on Python 2. Taken `from Django <http://bit.ly/1r3gogZ>`_. """ if PY2 and not isinstance(path, bytes): return path.encode(fs_encoding) return path
[ "def", "native_path", "(", "path", ")", ":", "# pragma: no cover", "if", "PY2", "and", "not", "isinstance", "(", "path", ",", "bytes", ")", ":", "return", "path", ".", "encode", "(", "fs_encoding", ")", "return", "path" ]
Always return a native path, that is unicode on Python 3 and bytestring on Python 2. Taken `from Django <http://bit.ly/1r3gogZ>`_.
[ "Always", "return", "a", "native", "path", "that", "is", "unicode", "on", "Python", "3", "and", "bytestring", "on", "Python", "2", "." ]
train
https://github.com/oblalex/verboselib/blob/3c108bef060b091e1f7c08861ab07672c87ddcff/verboselib/_compatibility.py#L18-L27
MacHu-GWU/pymongo_mate-project
pymongo_mate/crud/select.py
select_field
def select_field(col, field_or_fields, filters=None): """Select single or multiple fields. :params field_or_fields: str or list of str :returns headers: headers :return data: list of row **中文文档** - 在选择单列时, 返回的是 str, list. - 在选择多列时, 返回的是 str list, list of list. 返回单列或多列的数据。 """ fields = _preprocess_field_or_fields(field_or_fields) if filters is None: filters = dict() wanted = {field: True for field in fields} if len(fields) == 1: header = fields[0] data = [doc.get(header) for doc in col.find(filters, wanted)] return header, data else: headers = list(fields) data = [[doc.get(header) for header in headers] for doc in col.find(filters, wanted)] return headers, data
python
def select_field(col, field_or_fields, filters=None): """Select single or multiple fields. :params field_or_fields: str or list of str :returns headers: headers :return data: list of row **中文文档** - 在选择单列时, 返回的是 str, list. - 在选择多列时, 返回的是 str list, list of list. 返回单列或多列的数据。 """ fields = _preprocess_field_or_fields(field_or_fields) if filters is None: filters = dict() wanted = {field: True for field in fields} if len(fields) == 1: header = fields[0] data = [doc.get(header) for doc in col.find(filters, wanted)] return header, data else: headers = list(fields) data = [[doc.get(header) for header in headers] for doc in col.find(filters, wanted)] return headers, data
[ "def", "select_field", "(", "col", ",", "field_or_fields", ",", "filters", "=", "None", ")", ":", "fields", "=", "_preprocess_field_or_fields", "(", "field_or_fields", ")", "if", "filters", "is", "None", ":", "filters", "=", "dict", "(", ")", "wanted", "=", ...
Select single or multiple fields. :params field_or_fields: str or list of str :returns headers: headers :return data: list of row **中文文档** - 在选择单列时, 返回的是 str, list. - 在选择多列时, 返回的是 str list, list of list. 返回单列或多列的数据。
[ "Select", "single", "or", "multiple", "fields", "." ]
train
https://github.com/MacHu-GWU/pymongo_mate-project/blob/be53170c2db54cb705b9e548d32ef26c773ff7f3/pymongo_mate/crud/select.py#L36-L65
MacHu-GWU/pymongo_mate-project
pymongo_mate/crud/select.py
select_distinct_field
def select_distinct_field(col, field_or_fields, filters=None): """Select distinct value or combination of values of single or multiple fields. :params fields: str or list of str. :return data: list of list. **中文文档** 选择多列中出现过的所有可能的排列组合。 """ fields = _preprocess_field_or_fields(field_or_fields) if filters is None: filters = dict() if len(fields) == 1: key = fields[0] data = list(col.find(filters).distinct(key)) return data else: pipeline = [ { "$match": filters }, { "$group": { "_id": {key: "$" + key for key in fields}, }, }, ] data = list() for doc in col.aggregate(pipeline): # doc = {"_id": {"a": 0, "b": 0}} ... data.append([doc["_id"][key] for key in fields]) return data
python
def select_distinct_field(col, field_or_fields, filters=None): """Select distinct value or combination of values of single or multiple fields. :params fields: str or list of str. :return data: list of list. **中文文档** 选择多列中出现过的所有可能的排列组合。 """ fields = _preprocess_field_or_fields(field_or_fields) if filters is None: filters = dict() if len(fields) == 1: key = fields[0] data = list(col.find(filters).distinct(key)) return data else: pipeline = [ { "$match": filters }, { "$group": { "_id": {key: "$" + key for key in fields}, }, }, ] data = list() for doc in col.aggregate(pipeline): # doc = {"_id": {"a": 0, "b": 0}} ... data.append([doc["_id"][key] for key in fields]) return data
[ "def", "select_distinct_field", "(", "col", ",", "field_or_fields", ",", "filters", "=", "None", ")", ":", "fields", "=", "_preprocess_field_or_fields", "(", "field_or_fields", ")", "if", "filters", "is", "None", ":", "filters", "=", "dict", "(", ")", "if", ...
Select distinct value or combination of values of single or multiple fields. :params fields: str or list of str. :return data: list of list. **中文文档** 选择多列中出现过的所有可能的排列组合。
[ "Select", "distinct", "value", "or", "combination", "of", "values", "of", "single", "or", "multiple", "fields", "." ]
train
https://github.com/MacHu-GWU/pymongo_mate-project/blob/be53170c2db54cb705b9e548d32ef26c773ff7f3/pymongo_mate/crud/select.py#L68-L103
MacHu-GWU/pymongo_mate-project
pymongo_mate/crud/select.py
random_sample
def random_sample(col, n=5, filters=None): """Randomly select n document from query result set. If no query specified, then from entire collection. **中文文档** 从collection中随机选择 ``n`` 个样本。 """ pipeline = list() if filters is not None: pipeline.append({"$match": filters}) pipeline.append({"$sample": {"size": n}}) return list(col.aggregate(pipeline))
python
def random_sample(col, n=5, filters=None): """Randomly select n document from query result set. If no query specified, then from entire collection. **中文文档** 从collection中随机选择 ``n`` 个样本。 """ pipeline = list() if filters is not None: pipeline.append({"$match": filters}) pipeline.append({"$sample": {"size": n}}) return list(col.aggregate(pipeline))
[ "def", "random_sample", "(", "col", ",", "n", "=", "5", ",", "filters", "=", "None", ")", ":", "pipeline", "=", "list", "(", ")", "if", "filters", "is", "not", "None", ":", "pipeline", ".", "append", "(", "{", "\"$match\"", ":", "filters", "}", ")"...
Randomly select n document from query result set. If no query specified, then from entire collection. **中文文档** 从collection中随机选择 ``n`` 个样本。
[ "Randomly", "select", "n", "document", "from", "query", "result", "set", ".", "If", "no", "query", "specified", "then", "from", "entire", "collection", "." ]
train
https://github.com/MacHu-GWU/pymongo_mate-project/blob/be53170c2db54cb705b9e548d32ef26c773ff7f3/pymongo_mate/crud/select.py#L106-L118
NerdWalletOSS/savage
src/savage/__init__.py
_before_flush_handler
def _before_flush_handler(session, _flush_context, _instances): """Update version ID for all dirty, modified rows""" dialect = get_dialect(session) for row in session.dirty: if isinstance(row, SavageModelMixin) and is_modified(row, dialect): # Update row version_id row.update_version_id()
python
def _before_flush_handler(session, _flush_context, _instances): """Update version ID for all dirty, modified rows""" dialect = get_dialect(session) for row in session.dirty: if isinstance(row, SavageModelMixin) and is_modified(row, dialect): # Update row version_id row.update_version_id()
[ "def", "_before_flush_handler", "(", "session", ",", "_flush_context", ",", "_instances", ")", ":", "dialect", "=", "get_dialect", "(", "session", ")", "for", "row", "in", "session", ".", "dirty", ":", "if", "isinstance", "(", "row", ",", "SavageModelMixin", ...
Update version ID for all dirty, modified rows
[ "Update", "version", "ID", "for", "all", "dirty", "modified", "rows" ]
train
https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/__init__.py#L46-L52
NerdWalletOSS/savage
src/savage/__init__.py
_after_flush_handler
def _after_flush_handler(session, _flush_context): """Archive all new/updated/deleted data""" dialect = get_dialect(session) handlers = [ (_versioned_delete, session.deleted), (_versioned_insert, session.new), (_versioned_update, session.dirty), ] for handler, rows in handlers: # TODO: Bulk archive insert statements for row in rows: if not isinstance(row, SavageModelMixin): continue if not hasattr(row, 'ArchiveTable'): raise LogTableCreationError('Need to register Savage tables!!') user_id = getattr(row, '_updated_by', None) handler(row, session, user_id, dialect)
python
def _after_flush_handler(session, _flush_context): """Archive all new/updated/deleted data""" dialect = get_dialect(session) handlers = [ (_versioned_delete, session.deleted), (_versioned_insert, session.new), (_versioned_update, session.dirty), ] for handler, rows in handlers: # TODO: Bulk archive insert statements for row in rows: if not isinstance(row, SavageModelMixin): continue if not hasattr(row, 'ArchiveTable'): raise LogTableCreationError('Need to register Savage tables!!') user_id = getattr(row, '_updated_by', None) handler(row, session, user_id, dialect)
[ "def", "_after_flush_handler", "(", "session", ",", "_flush_context", ")", ":", "dialect", "=", "get_dialect", "(", "session", ")", "handlers", "=", "[", "(", "_versioned_delete", ",", "session", ".", "deleted", ")", ",", "(", "_versioned_insert", ",", "sessio...
Archive all new/updated/deleted data
[ "Archive", "all", "new", "/", "updated", "/", "deleted", "data" ]
train
https://github.com/NerdWalletOSS/savage/blob/54f64ac1c912528710365107952967d31d56e60d/src/savage/__init__.py#L55-L71
scivision/gridaurora
gridaurora/opticalmod.py
opticalModel
def opticalModel(sim, ver: xarray.DataArray, obsAlt_km: float, zenithang: float): """ ver: Nalt x Nwavelength """ assert isinstance(ver, xarray.DataArray) # %% get system optical transmission T optT = getSystemT(ver.wavelength_nm, sim.bg3fn, sim.windowfn, sim.qefn, obsAlt_km, zenithang) # %% first multiply VER by T, THEN sum overall wavelengths if sim.opticalfilter == 'bg3': VERgray = (ver*optT['sys'].values[None, :]).sum('wavelength_nm') elif sim.opticalfilter == 'none': VERgray = (ver*optT['sysNObg3'].values[None, :]).sum('wavelength_nm') else: logging.warning(f'unknown OpticalFilter type: {sim.opticalfilter}' ' falling back to using no filter at all') VERgray = (ver*optT['sysNObg3'].values[None, :]).sum('wavelength_nm') return VERgray
python
def opticalModel(sim, ver: xarray.DataArray, obsAlt_km: float, zenithang: float): """ ver: Nalt x Nwavelength """ assert isinstance(ver, xarray.DataArray) # %% get system optical transmission T optT = getSystemT(ver.wavelength_nm, sim.bg3fn, sim.windowfn, sim.qefn, obsAlt_km, zenithang) # %% first multiply VER by T, THEN sum overall wavelengths if sim.opticalfilter == 'bg3': VERgray = (ver*optT['sys'].values[None, :]).sum('wavelength_nm') elif sim.opticalfilter == 'none': VERgray = (ver*optT['sysNObg3'].values[None, :]).sum('wavelength_nm') else: logging.warning(f'unknown OpticalFilter type: {sim.opticalfilter}' ' falling back to using no filter at all') VERgray = (ver*optT['sysNObg3'].values[None, :]).sum('wavelength_nm') return VERgray
[ "def", "opticalModel", "(", "sim", ",", "ver", ":", "xarray", ".", "DataArray", ",", "obsAlt_km", ":", "float", ",", "zenithang", ":", "float", ")", ":", "assert", "isinstance", "(", "ver", ",", "xarray", ".", "DataArray", ")", "# %% get system optical trans...
ver: Nalt x Nwavelength
[ "ver", ":", "Nalt", "x", "Nwavelength" ]
train
https://github.com/scivision/gridaurora/blob/c3957b93c2201afff62bd104e0acead52c0d9e90/gridaurora/opticalmod.py#L7-L25
dave-shawley/glinda
glinda/content.py
register_text_type
def register_text_type(content_type, default_encoding, dumper, loader): """ Register handling for a text-based content type. :param str content_type: content type to register the hooks for :param str default_encoding: encoding to use if none is present in the request :param dumper: called to decode a string into a dictionary. Calling convention: ``dumper(obj_dict).encode(encoding) -> bytes`` :param loader: called to encode a dictionary to a string. Calling convention: ``loader(obj_bytes.decode(encoding)) -> dict`` The decoding of a text content body takes into account decoding the binary request body into a string before calling the underlying dump/load routines. """ content_type = headers.parse_content_type(content_type) content_type.parameters.clear() key = str(content_type) _content_types[key] = content_type handler = _content_handlers.setdefault(key, _ContentHandler(key)) handler.dict_to_string = dumper handler.string_to_dict = loader handler.default_encoding = default_encoding or handler.default_encoding
python
def register_text_type(content_type, default_encoding, dumper, loader): """ Register handling for a text-based content type. :param str content_type: content type to register the hooks for :param str default_encoding: encoding to use if none is present in the request :param dumper: called to decode a string into a dictionary. Calling convention: ``dumper(obj_dict).encode(encoding) -> bytes`` :param loader: called to encode a dictionary to a string. Calling convention: ``loader(obj_bytes.decode(encoding)) -> dict`` The decoding of a text content body takes into account decoding the binary request body into a string before calling the underlying dump/load routines. """ content_type = headers.parse_content_type(content_type) content_type.parameters.clear() key = str(content_type) _content_types[key] = content_type handler = _content_handlers.setdefault(key, _ContentHandler(key)) handler.dict_to_string = dumper handler.string_to_dict = loader handler.default_encoding = default_encoding or handler.default_encoding
[ "def", "register_text_type", "(", "content_type", ",", "default_encoding", ",", "dumper", ",", "loader", ")", ":", "content_type", "=", "headers", ".", "parse_content_type", "(", "content_type", ")", "content_type", ".", "parameters", ".", "clear", "(", ")", "ke...
Register handling for a text-based content type. :param str content_type: content type to register the hooks for :param str default_encoding: encoding to use if none is present in the request :param dumper: called to decode a string into a dictionary. Calling convention: ``dumper(obj_dict).encode(encoding) -> bytes`` :param loader: called to encode a dictionary to a string. Calling convention: ``loader(obj_bytes.decode(encoding)) -> dict`` The decoding of a text content body takes into account decoding the binary request body into a string before calling the underlying dump/load routines.
[ "Register", "handling", "for", "a", "text", "-", "based", "content", "type", "." ]
train
https://github.com/dave-shawley/glinda/blob/6dec43549d5b1767467174aa3d7fa2425bc25f66/glinda/content.py#L74-L99
dave-shawley/glinda
glinda/content.py
register_binary_type
def register_binary_type(content_type, dumper, loader): """ Register handling for a binary content type. :param str content_type: content type to register the hooks for :param dumper: called to decode bytes into a dictionary. Calling convention: ``dumper(obj_dict) -> bytes``. :param loader: called to encode a dictionary into a byte string. Calling convention: ``loader(obj_bytes) -> dict`` """ content_type = headers.parse_content_type(content_type) content_type.parameters.clear() key = str(content_type) _content_types[key] = content_type handler = _content_handlers.setdefault(key, _ContentHandler(key)) handler.dict_to_bytes = dumper handler.bytes_to_dict = loader
python
def register_binary_type(content_type, dumper, loader): """ Register handling for a binary content type. :param str content_type: content type to register the hooks for :param dumper: called to decode bytes into a dictionary. Calling convention: ``dumper(obj_dict) -> bytes``. :param loader: called to encode a dictionary into a byte string. Calling convention: ``loader(obj_bytes) -> dict`` """ content_type = headers.parse_content_type(content_type) content_type.parameters.clear() key = str(content_type) _content_types[key] = content_type handler = _content_handlers.setdefault(key, _ContentHandler(key)) handler.dict_to_bytes = dumper handler.bytes_to_dict = loader
[ "def", "register_binary_type", "(", "content_type", ",", "dumper", ",", "loader", ")", ":", "content_type", "=", "headers", ".", "parse_content_type", "(", "content_type", ")", "content_type", ".", "parameters", ".", "clear", "(", ")", "key", "=", "str", "(", ...
Register handling for a binary content type. :param str content_type: content type to register the hooks for :param dumper: called to decode bytes into a dictionary. Calling convention: ``dumper(obj_dict) -> bytes``. :param loader: called to encode a dictionary into a byte string. Calling convention: ``loader(obj_bytes) -> dict``
[ "Register", "handling", "for", "a", "binary", "content", "type", "." ]
train
https://github.com/dave-shawley/glinda/blob/6dec43549d5b1767467174aa3d7fa2425bc25f66/glinda/content.py#L102-L120
dave-shawley/glinda
glinda/content.py
_ContentHandler.unpack_bytes
def unpack_bytes(self, obj_bytes, encoding=None): """Unpack a byte stream into a dictionary.""" assert self.bytes_to_dict or self.string_to_dict encoding = encoding or self.default_encoding LOGGER.debug('%r decoding %d bytes with encoding of %s', self, len(obj_bytes), encoding) if self.bytes_to_dict: return escape.recursive_unicode(self.bytes_to_dict(obj_bytes)) return self.string_to_dict(obj_bytes.decode(encoding))
python
def unpack_bytes(self, obj_bytes, encoding=None): """Unpack a byte stream into a dictionary.""" assert self.bytes_to_dict or self.string_to_dict encoding = encoding or self.default_encoding LOGGER.debug('%r decoding %d bytes with encoding of %s', self, len(obj_bytes), encoding) if self.bytes_to_dict: return escape.recursive_unicode(self.bytes_to_dict(obj_bytes)) return self.string_to_dict(obj_bytes.decode(encoding))
[ "def", "unpack_bytes", "(", "self", ",", "obj_bytes", ",", "encoding", "=", "None", ")", ":", "assert", "self", ".", "bytes_to_dict", "or", "self", ".", "string_to_dict", "encoding", "=", "encoding", "or", "self", ".", "default_encoding", "LOGGER", ".", "deb...
Unpack a byte stream into a dictionary.
[ "Unpack", "a", "byte", "stream", "into", "a", "dictionary", "." ]
train
https://github.com/dave-shawley/glinda/blob/6dec43549d5b1767467174aa3d7fa2425bc25f66/glinda/content.py#L33-L41
dave-shawley/glinda
glinda/content.py
_ContentHandler.pack_bytes
def pack_bytes(self, obj_dict, encoding=None): """Pack a dictionary into a byte stream.""" assert self.dict_to_bytes or self.dict_to_string encoding = encoding or self.default_encoding or 'utf-8' LOGGER.debug('%r encoding dict with encoding %s', self, encoding) if self.dict_to_bytes: return None, self.dict_to_bytes(obj_dict) try: return encoding, self.dict_to_string(obj_dict).encode(encoding) except LookupError as error: raise web.HTTPError( 406, 'failed to encode result %r', error, reason='target charset {0} not found'.format(encoding)) except UnicodeEncodeError as error: LOGGER.warning('failed to encode text as %s - %s, trying utf-8', encoding, str(error)) return 'utf-8', self.dict_to_string(obj_dict).encode('utf-8')
python
def pack_bytes(self, obj_dict, encoding=None): """Pack a dictionary into a byte stream.""" assert self.dict_to_bytes or self.dict_to_string encoding = encoding or self.default_encoding or 'utf-8' LOGGER.debug('%r encoding dict with encoding %s', self, encoding) if self.dict_to_bytes: return None, self.dict_to_bytes(obj_dict) try: return encoding, self.dict_to_string(obj_dict).encode(encoding) except LookupError as error: raise web.HTTPError( 406, 'failed to encode result %r', error, reason='target charset {0} not found'.format(encoding)) except UnicodeEncodeError as error: LOGGER.warning('failed to encode text as %s - %s, trying utf-8', encoding, str(error)) return 'utf-8', self.dict_to_string(obj_dict).encode('utf-8')
[ "def", "pack_bytes", "(", "self", ",", "obj_dict", ",", "encoding", "=", "None", ")", ":", "assert", "self", ".", "dict_to_bytes", "or", "self", ".", "dict_to_string", "encoding", "=", "encoding", "or", "self", ".", "default_encoding", "or", "'utf-8'", "LOGG...
Pack a dictionary into a byte stream.
[ "Pack", "a", "dictionary", "into", "a", "byte", "stream", "." ]
train
https://github.com/dave-shawley/glinda/blob/6dec43549d5b1767467174aa3d7fa2425bc25f66/glinda/content.py#L43-L59
dave-shawley/glinda
glinda/content.py
HandlerMixin.get_request_body
def get_request_body(self): """ Decodes the request body and returns it. :return: the decoded request body as a :class:`dict` instance. :raises: :class:`tornado.web.HTTPError` if the body cannot be decoded (415) or if decoding fails (400) """ if self._request_body is None: content_type_str = self.request.headers.get( 'Content-Type', 'application/octet-stream') LOGGER.debug('decoding request body of type %s', content_type_str) content_type = headers.parse_content_type(content_type_str) try: selected, requested = algorithms.select_content_type( [content_type], _content_types.values()) except errors.NoMatch: raise web.HTTPError( 415, 'cannot decoded content type %s', content_type_str, reason='Unexpected content type') handler = _content_handlers[str(selected)] try: self._request_body = handler.unpack_bytes( self.request.body, encoding=content_type.parameters.get('charset'), ) except ValueError as error: raise web.HTTPError( 400, 'failed to decode content body - %r', error, reason='Content body decode failure') return self._request_body
python
def get_request_body(self): """ Decodes the request body and returns it. :return: the decoded request body as a :class:`dict` instance. :raises: :class:`tornado.web.HTTPError` if the body cannot be decoded (415) or if decoding fails (400) """ if self._request_body is None: content_type_str = self.request.headers.get( 'Content-Type', 'application/octet-stream') LOGGER.debug('decoding request body of type %s', content_type_str) content_type = headers.parse_content_type(content_type_str) try: selected, requested = algorithms.select_content_type( [content_type], _content_types.values()) except errors.NoMatch: raise web.HTTPError( 415, 'cannot decoded content type %s', content_type_str, reason='Unexpected content type') handler = _content_handlers[str(selected)] try: self._request_body = handler.unpack_bytes( self.request.body, encoding=content_type.parameters.get('charset'), ) except ValueError as error: raise web.HTTPError( 400, 'failed to decode content body - %r', error, reason='Content body decode failure') return self._request_body
[ "def", "get_request_body", "(", "self", ")", ":", "if", "self", ".", "_request_body", "is", "None", ":", "content_type_str", "=", "self", ".", "request", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "'application/octet-stream'", ")", "LOGGER", ".", ...
Decodes the request body and returns it. :return: the decoded request body as a :class:`dict` instance. :raises: :class:`tornado.web.HTTPError` if the body cannot be decoded (415) or if decoding fails (400)
[ "Decodes", "the", "request", "body", "and", "returns", "it", "." ]
train
https://github.com/dave-shawley/glinda/blob/6dec43549d5b1767467174aa3d7fa2425bc25f66/glinda/content.py#L144-L175
dave-shawley/glinda
glinda/content.py
HandlerMixin.send_response
def send_response(self, response_dict): """ Encode a response according to the request. :param dict response_dict: the response to send :raises: :class:`tornado.web.HTTPError` if no acceptable content type exists This method will encode `response_dict` using the most appropriate encoder based on the :mailheader:`Accept` request header and the available encoders. The result is written to the client by calling ``self.write`` after setting the response content type using ``self.set_header``. """ accept = headers.parse_http_accept_header( self.request.headers.get('Accept', '*/*')) try: selected, _ = algorithms.select_content_type( accept, _content_types.values()) except errors.NoMatch: raise web.HTTPError(406, 'no acceptable content type for %s in %r', accept, _content_types.values(), reason='Content Type Not Acceptable') LOGGER.debug('selected %s as outgoing content type', selected) handler = _content_handlers[str(selected)] accept = self.request.headers.get('Accept-Charset', '*') charsets = headers.parse_accept_charset(accept) charset = charsets[0] if charsets[0] != '*' else None LOGGER.debug('encoding response body using %r with encoding %s', handler, charset) encoding, response_bytes = handler.pack_bytes(response_dict, encoding=charset) if encoding: # don't overwrite the value in _content_types copied = datastructures.ContentType(selected.content_type, selected.content_subtype, selected.parameters) copied.parameters['charset'] = encoding selected = copied self.set_header('Content-Type', str(selected)) self.write(response_bytes)
python
def send_response(self, response_dict): """ Encode a response according to the request. :param dict response_dict: the response to send :raises: :class:`tornado.web.HTTPError` if no acceptable content type exists This method will encode `response_dict` using the most appropriate encoder based on the :mailheader:`Accept` request header and the available encoders. The result is written to the client by calling ``self.write`` after setting the response content type using ``self.set_header``. """ accept = headers.parse_http_accept_header( self.request.headers.get('Accept', '*/*')) try: selected, _ = algorithms.select_content_type( accept, _content_types.values()) except errors.NoMatch: raise web.HTTPError(406, 'no acceptable content type for %s in %r', accept, _content_types.values(), reason='Content Type Not Acceptable') LOGGER.debug('selected %s as outgoing content type', selected) handler = _content_handlers[str(selected)] accept = self.request.headers.get('Accept-Charset', '*') charsets = headers.parse_accept_charset(accept) charset = charsets[0] if charsets[0] != '*' else None LOGGER.debug('encoding response body using %r with encoding %s', handler, charset) encoding, response_bytes = handler.pack_bytes(response_dict, encoding=charset) if encoding: # don't overwrite the value in _content_types copied = datastructures.ContentType(selected.content_type, selected.content_subtype, selected.parameters) copied.parameters['charset'] = encoding selected = copied self.set_header('Content-Type', str(selected)) self.write(response_bytes)
[ "def", "send_response", "(", "self", ",", "response_dict", ")", ":", "accept", "=", "headers", ".", "parse_http_accept_header", "(", "self", ".", "request", ".", "headers", ".", "get", "(", "'Accept'", ",", "'*/*'", ")", ")", "try", ":", "selected", ",", ...
Encode a response according to the request. :param dict response_dict: the response to send :raises: :class:`tornado.web.HTTPError` if no acceptable content type exists This method will encode `response_dict` using the most appropriate encoder based on the :mailheader:`Accept` request header and the available encoders. The result is written to the client by calling ``self.write`` after setting the response content type using ``self.set_header``.
[ "Encode", "a", "response", "according", "to", "the", "request", "." ]
train
https://github.com/dave-shawley/glinda/blob/6dec43549d5b1767467174aa3d7fa2425bc25f66/glinda/content.py#L177-L222
beasley-weather/weewx-orm
weewx_orm/__init__.py
WeewxDB.archive_query_interval
def archive_query_interval(self, _from, to): ''' :param _from: Start of interval (int) (inclusive) :param to: End of interval (int) (exclusive) :raises: IOError ''' with self.session as session: table = self.tables.archive try: results = session.query(table)\ .filter(table.dateTime >= _from)\ .filter(table.dateTime < to)\ .all() return [self.archive_schema.dump(entry).data for entry in results] except SQLAlchemyError as exc: session.rollback() print_exc() raise IOError(exc)
python
def archive_query_interval(self, _from, to): ''' :param _from: Start of interval (int) (inclusive) :param to: End of interval (int) (exclusive) :raises: IOError ''' with self.session as session: table = self.tables.archive try: results = session.query(table)\ .filter(table.dateTime >= _from)\ .filter(table.dateTime < to)\ .all() return [self.archive_schema.dump(entry).data for entry in results] except SQLAlchemyError as exc: session.rollback() print_exc() raise IOError(exc)
[ "def", "archive_query_interval", "(", "self", ",", "_from", ",", "to", ")", ":", "with", "self", ".", "session", "as", "session", ":", "table", "=", "self", ".", "tables", ".", "archive", "try", ":", "results", "=", "session", ".", "query", "(", "table...
:param _from: Start of interval (int) (inclusive) :param to: End of interval (int) (exclusive) :raises: IOError
[ ":", "param", "_from", ":", "Start", "of", "interval", "(", "int", ")", "(", "inclusive", ")", ":", "param", "to", ":", "End", "of", "interval", "(", "int", ")", "(", "exclusive", ")", ":", "raises", ":", "IOError" ]
train
https://github.com/beasley-weather/weewx-orm/blob/37eb8adaf1f051f47367d69f0360e49feb7262fb/weewx_orm/__init__.py#L37-L56
beasley-weather/weewx-orm
weewx_orm/__init__.py
WeewxDB.archive_insert_data
def archive_insert_data(self, data_dump): ''' :param data: Archive table data :type data: list[archive] :raises: IOError ''' with self.session as session: try: data = [self.tables.archive(**entry) for entry in data_dump] session.add_all(data) session.commit() except SQLAlchemyError as exc: session.rollback() print_exc() raise IOError(exc)
python
def archive_insert_data(self, data_dump): ''' :param data: Archive table data :type data: list[archive] :raises: IOError ''' with self.session as session: try: data = [self.tables.archive(**entry) for entry in data_dump] session.add_all(data) session.commit() except SQLAlchemyError as exc: session.rollback() print_exc() raise IOError(exc)
[ "def", "archive_insert_data", "(", "self", ",", "data_dump", ")", ":", "with", "self", ".", "session", "as", "session", ":", "try", ":", "data", "=", "[", "self", ".", "tables", ".", "archive", "(", "*", "*", "entry", ")", "for", "entry", "in", "data...
:param data: Archive table data :type data: list[archive] :raises: IOError
[ ":", "param", "data", ":", "Archive", "table", "data", ":", "type", "data", ":", "list", "[", "archive", "]", ":", "raises", ":", "IOError" ]
train
https://github.com/beasley-weather/weewx-orm/blob/37eb8adaf1f051f47367d69f0360e49feb7262fb/weewx_orm/__init__.py#L58-L73
calve/prof
prof/config.py
read_config
def read_config(): """ Read a config file from ``$HOME/.profrc`` We expect a file of the following form [DEFAULT] Baseurl = https://your-prof-instance Login = username """ filename = path.join(path.expanduser('~'), '.profrc') config = configparser.ConfigParser() config.read(filename) if 'baseurl' not in config['DEFAULT']: print("""FATAL : No baseurl found in {0} Open {0} and add the following lines [DEFAULT] Baseurl = https://your-prof-instance""".format(filename)) sys.exit() try: requests.get(config['DEFAULT']['BASEURL']) except: print("{0} does not seems to be reachable. Verify the baseurl set at {1} matches ``https://your-prof-instance``".format(config['DEFAULT']['BASEURL'], filename)) sys.exit() return config
python
def read_config(): """ Read a config file from ``$HOME/.profrc`` We expect a file of the following form [DEFAULT] Baseurl = https://your-prof-instance Login = username """ filename = path.join(path.expanduser('~'), '.profrc') config = configparser.ConfigParser() config.read(filename) if 'baseurl' not in config['DEFAULT']: print("""FATAL : No baseurl found in {0} Open {0} and add the following lines [DEFAULT] Baseurl = https://your-prof-instance""".format(filename)) sys.exit() try: requests.get(config['DEFAULT']['BASEURL']) except: print("{0} does not seems to be reachable. Verify the baseurl set at {1} matches ``https://your-prof-instance``".format(config['DEFAULT']['BASEURL'], filename)) sys.exit() return config
[ "def", "read_config", "(", ")", ":", "filename", "=", "path", ".", "join", "(", "path", ".", "expanduser", "(", "'~'", ")", ",", "'.profrc'", ")", "config", "=", "configparser", ".", "ConfigParser", "(", ")", "config", ".", "read", "(", "filename", ")"...
Read a config file from ``$HOME/.profrc`` We expect a file of the following form [DEFAULT] Baseurl = https://your-prof-instance Login = username
[ "Read", "a", "config", "file", "from", "$HOME", "/", ".", "profrc", "We", "expect", "a", "file", "of", "the", "following", "form" ]
train
https://github.com/calve/prof/blob/c6e034f45ab60908dea661e8271bc44758aeedcf/prof/config.py#L7-L31
calve/prof
prof/config.py
set_sessid
def set_sessid(sessid): """ Save this current sessid in ``$HOME/.profrc`` """ filename = path.join(path.expanduser('~'), '.profrc') config = configparser.ConfigParser() config.read(filename) config.set('DEFAULT', 'Session', sessid) with open(filename, 'w') as configfile: print("write a new sessid") config.write(configfile)
python
def set_sessid(sessid): """ Save this current sessid in ``$HOME/.profrc`` """ filename = path.join(path.expanduser('~'), '.profrc') config = configparser.ConfigParser() config.read(filename) config.set('DEFAULT', 'Session', sessid) with open(filename, 'w') as configfile: print("write a new sessid") config.write(configfile)
[ "def", "set_sessid", "(", "sessid", ")", ":", "filename", "=", "path", ".", "join", "(", "path", ".", "expanduser", "(", "'~'", ")", ",", "'.profrc'", ")", "config", "=", "configparser", ".", "ConfigParser", "(", ")", "config", ".", "read", "(", "filen...
Save this current sessid in ``$HOME/.profrc``
[ "Save", "this", "current", "sessid", "in", "$HOME", "/", ".", "profrc" ]
train
https://github.com/calve/prof/blob/c6e034f45ab60908dea661e8271bc44758aeedcf/prof/config.py#L34-L44
simodalla/pygmount
pygmount/core/samba.py
run_command
def run_command(command): """ Utility function for run command with subprocess. Return a tuple, with return code and if python >= 2.7 command's output or None if python <= 2.6 """ try: check_ouput = getattr( subprocess, 'check_output', subprocess.check_call) result = check_ouput(command, stderr=subprocess.STDOUT, shell=True) if check_ouput.__name__ == 'check_output': return 0, result else: return result, None except subprocess.CalledProcessError as e: return e.returncode, getattr(e, 'output', None)
python
def run_command(command): """ Utility function for run command with subprocess. Return a tuple, with return code and if python >= 2.7 command's output or None if python <= 2.6 """ try: check_ouput = getattr( subprocess, 'check_output', subprocess.check_call) result = check_ouput(command, stderr=subprocess.STDOUT, shell=True) if check_ouput.__name__ == 'check_output': return 0, result else: return result, None except subprocess.CalledProcessError as e: return e.returncode, getattr(e, 'output', None)
[ "def", "run_command", "(", "command", ")", ":", "try", ":", "check_ouput", "=", "getattr", "(", "subprocess", ",", "'check_output'", ",", "subprocess", ".", "check_call", ")", "result", "=", "check_ouput", "(", "command", ",", "stderr", "=", "subprocess", "....
Utility function for run command with subprocess. Return a tuple, with return code and if python >= 2.7 command's output or None if python <= 2.6
[ "Utility", "function", "for", "run", "command", "with", "subprocess", ".", "Return", "a", "tuple", "with", "return", "code", "and", "if", "python", ">", "=", "2", ".", "7", "command", "s", "output", "or", "None", "if", "python", "<", "=", "2", ".", "...
train
https://github.com/simodalla/pygmount/blob/8027cfa2ed5fa8e9207d72b6013ecec7fcf2e5f5/pygmount/core/samba.py#L28-L42
jonathanlloyd/envpy
envpy/parser.py
parse_env
def parse_env(config_schema, env): """Parse the values from a given environment against a given config schema Args: config_schema: A dict which maps the variable name to a Schema object that describes the requested value. env: A dict which represents the value of each variable in the environment. """ try: return { key: item_schema.parse(key, env.get(key)) for key, item_schema in config_schema.items() } except KeyError as error: raise MissingConfigError( "Required config not set: {}".format(error.args[0]) )
python
def parse_env(config_schema, env): """Parse the values from a given environment against a given config schema Args: config_schema: A dict which maps the variable name to a Schema object that describes the requested value. env: A dict which represents the value of each variable in the environment. """ try: return { key: item_schema.parse(key, env.get(key)) for key, item_schema in config_schema.items() } except KeyError as error: raise MissingConfigError( "Required config not set: {}".format(error.args[0]) )
[ "def", "parse_env", "(", "config_schema", ",", "env", ")", ":", "try", ":", "return", "{", "key", ":", "item_schema", ".", "parse", "(", "key", ",", "env", ".", "get", "(", "key", ")", ")", "for", "key", ",", "item_schema", "in", "config_schema", "."...
Parse the values from a given environment against a given config schema Args: config_schema: A dict which maps the variable name to a Schema object that describes the requested value. env: A dict which represents the value of each variable in the environment.
[ "Parse", "the", "values", "from", "a", "given", "environment", "against", "a", "given", "config", "schema" ]
train
https://github.com/jonathanlloyd/envpy/blob/b3fa1cd0defc95ba76a36810653f9c7fe4f51ccc/envpy/parser.py#L94-L111
jonathanlloyd/envpy
envpy/parser.py
Schema.parse
def parse(self, key, value): """Parse the environment value for a given key against the schema. Args: key: The name of the environment variable. value: The value to be parsed. """ if value is not None: try: return self._parser(value) except Exception: raise ParsingError("Error parsing {}".format(key)) elif self._default is not SENTINAL: return self._default else: raise KeyError(key)
python
def parse(self, key, value): """Parse the environment value for a given key against the schema. Args: key: The name of the environment variable. value: The value to be parsed. """ if value is not None: try: return self._parser(value) except Exception: raise ParsingError("Error parsing {}".format(key)) elif self._default is not SENTINAL: return self._default else: raise KeyError(key)
[ "def", "parse", "(", "self", ",", "key", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "try", ":", "return", "self", ".", "_parser", "(", "value", ")", "except", "Exception", ":", "raise", "ParsingError", "(", "\"Error parsing {}\"", ...
Parse the environment value for a given key against the schema. Args: key: The name of the environment variable. value: The value to be parsed.
[ "Parse", "the", "environment", "value", "for", "a", "given", "key", "against", "the", "schema", "." ]
train
https://github.com/jonathanlloyd/envpy/blob/b3fa1cd0defc95ba76a36810653f9c7fe4f51ccc/envpy/parser.py#L76-L91
dependencies-io/cli
dependencies_cli/project_template/{{cookiecutter.name}}/src/utils.py
write_json_to_temp_file
def write_json_to_temp_file(data): """Writes JSON data to a temporary file and returns the path to it""" fp = tempfile.NamedTemporaryFile(delete=False) fp.write(json.dumps(data).encode('utf-8')) fp.close() return fp.name
python
def write_json_to_temp_file(data): """Writes JSON data to a temporary file and returns the path to it""" fp = tempfile.NamedTemporaryFile(delete=False) fp.write(json.dumps(data).encode('utf-8')) fp.close() return fp.name
[ "def", "write_json_to_temp_file", "(", "data", ")", ":", "fp", "=", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "fp", ".", "write", "(", "json", ".", "dumps", "(", "data", ")", ".", "encode", "(", "'utf-8'", ")", ")", "fp",...
Writes JSON data to a temporary file and returns the path to it
[ "Writes", "JSON", "data", "to", "a", "temporary", "file", "and", "returns", "the", "path", "to", "it" ]
train
https://github.com/dependencies-io/cli/blob/d8ae97343c48a61d6614d3e8af6a981b4cfb1bcb/dependencies_cli/project_template/{{cookiecutter.name}}/src/utils.py#L7-L12
dependencies-io/cli
dependencies_cli/project_template/{{cookiecutter.name}}/src/utils.py
mock_lockfile_update
def mock_lockfile_update(path): """ This is a mock update. In place of this, you might simply shell out to a command like `yarn upgrade`. """ updated_lockfile_contents = { 'package1': '1.2.0' } with open(path, 'w+') as f: f.write(json.dumps(updated_lockfile_contents, indent=4)) return updated_lockfile_contents
python
def mock_lockfile_update(path): """ This is a mock update. In place of this, you might simply shell out to a command like `yarn upgrade`. """ updated_lockfile_contents = { 'package1': '1.2.0' } with open(path, 'w+') as f: f.write(json.dumps(updated_lockfile_contents, indent=4)) return updated_lockfile_contents
[ "def", "mock_lockfile_update", "(", "path", ")", ":", "updated_lockfile_contents", "=", "{", "'package1'", ":", "'1.2.0'", "}", "with", "open", "(", "path", ",", "'w+'", ")", "as", "f", ":", "f", ".", "write", "(", "json", ".", "dumps", "(", "updated_loc...
This is a mock update. In place of this, you might simply shell out to a command like `yarn upgrade`.
[ "This", "is", "a", "mock", "update", ".", "In", "place", "of", "this", "you", "might", "simply", "shell", "out", "to", "a", "command", "like", "yarn", "upgrade", "." ]
train
https://github.com/dependencies-io/cli/blob/d8ae97343c48a61d6614d3e8af6a981b4cfb1bcb/dependencies_cli/project_template/{{cookiecutter.name}}/src/utils.py#L27-L37
dependencies-io/cli
dependencies_cli/project_template/{{cookiecutter.name}}/src/utils.py
print_settings_example
def print_settings_example(): """ You can use settings to get additional information from the user via their dependencies.io configuration file. Settings will be automatically injected as env variables with the "SETTING_" prefix. All settings will be passed as strings. More complex types will be json encoded. You should always provide defaults, if possible. """ SETTING_EXAMPLE_LIST = json.loads(os.getenv('SETTING_EXAMPLE_LIST', '[]')) SETTING_EXAMPLE_STRING = os.getenv('SETTING_EXAMPLE_STRING', 'default') print('List setting values: {}'.format(SETTING_EXAMPLE_LIST)) print('String setting value: {}'.format(SETTING_EXAMPLE_STRING))
python
def print_settings_example(): """ You can use settings to get additional information from the user via their dependencies.io configuration file. Settings will be automatically injected as env variables with the "SETTING_" prefix. All settings will be passed as strings. More complex types will be json encoded. You should always provide defaults, if possible. """ SETTING_EXAMPLE_LIST = json.loads(os.getenv('SETTING_EXAMPLE_LIST', '[]')) SETTING_EXAMPLE_STRING = os.getenv('SETTING_EXAMPLE_STRING', 'default') print('List setting values: {}'.format(SETTING_EXAMPLE_LIST)) print('String setting value: {}'.format(SETTING_EXAMPLE_STRING))
[ "def", "print_settings_example", "(", ")", ":", "SETTING_EXAMPLE_LIST", "=", "json", ".", "loads", "(", "os", ".", "getenv", "(", "'SETTING_EXAMPLE_LIST'", ",", "'[]'", ")", ")", "SETTING_EXAMPLE_STRING", "=", "os", ".", "getenv", "(", "'SETTING_EXAMPLE_STRING'", ...
You can use settings to get additional information from the user via their dependencies.io configuration file. Settings will be automatically injected as env variables with the "SETTING_" prefix. All settings will be passed as strings. More complex types will be json encoded. You should always provide defaults, if possible.
[ "You", "can", "use", "settings", "to", "get", "additional", "information", "from", "the", "user", "via", "their", "dependencies", ".", "io", "configuration", "file", ".", "Settings", "will", "be", "automatically", "injected", "as", "env", "variables", "with", ...
train
https://github.com/dependencies-io/cli/blob/d8ae97343c48a61d6614d3e8af6a981b4cfb1bcb/dependencies_cli/project_template/{{cookiecutter.name}}/src/utils.py#L44-L57
Kunstmord/datalib
src/example.py
otsu
def otsu(fpath): """ Returns value of otsu threshold for an image """ img = imread(fpath, as_grey=True) thresh = skimage.filter.threshold_otsu(img) return thresh
python
def otsu(fpath): """ Returns value of otsu threshold for an image """ img = imread(fpath, as_grey=True) thresh = skimage.filter.threshold_otsu(img) return thresh
[ "def", "otsu", "(", "fpath", ")", ":", "img", "=", "imread", "(", "fpath", ",", "as_grey", "=", "True", ")", "thresh", "=", "skimage", ".", "filter", ".", "threshold_otsu", "(", "img", ")", "return", "thresh" ]
Returns value of otsu threshold for an image
[ "Returns", "value", "of", "otsu", "threshold", "for", "an", "image" ]
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/example.py#L14-L21
Kunstmord/datalib
src/example.py
move_to
def move_to(name): """ Path to image folders """ datapath = path.join(path.dirname(path.realpath(__file__)), path.pardir) datapath = path.join(datapath, '../gzoo_data', 'images', name) print path.normpath(datapath) return path.normpath(datapath)
python
def move_to(name): """ Path to image folders """ datapath = path.join(path.dirname(path.realpath(__file__)), path.pardir) datapath = path.join(datapath, '../gzoo_data', 'images', name) print path.normpath(datapath) return path.normpath(datapath)
[ "def", "move_to", "(", "name", ")", ":", "datapath", "=", "path", ".", "join", "(", "path", ".", "dirname", "(", "path", ".", "realpath", "(", "__file__", ")", ")", ",", "path", ".", "pardir", ")", "datapath", "=", "path", ".", "join", "(", "datapa...
Path to image folders
[ "Path", "to", "image", "folders" ]
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/example.py#L31-L38
Kunstmord/datalib
src/example.py
labels
def labels(): """ Path to labels file """ datapath = path.join(path.dirname(path.realpath(__file__)), path.pardir) datapath = path.join(datapath, '../gzoo_data', 'train_solution.csv') return path.normpath(datapath)
python
def labels(): """ Path to labels file """ datapath = path.join(path.dirname(path.realpath(__file__)), path.pardir) datapath = path.join(datapath, '../gzoo_data', 'train_solution.csv') return path.normpath(datapath)
[ "def", "labels", "(", ")", ":", "datapath", "=", "path", ".", "join", "(", "path", ".", "dirname", "(", "path", ".", "realpath", "(", "__file__", ")", ")", ",", "path", ".", "pardir", ")", "datapath", "=", "path", ".", "join", "(", "datapath", ",",...
Path to labels file
[ "Path", "to", "labels", "file" ]
train
https://github.com/Kunstmord/datalib/blob/9d7db3e7c3a5feeeb5d19eb0dbee858bd2b50886/src/example.py#L41-L47
brookemosby/titanic
TitanicAttempt/TitanicAttempt.py
Feature_Engineering
def Feature_Engineering(DataFrame,train): """ Extracts important features and writes them in usable form Deletes features of little importance :param DataFrame: This is the file name of a csv file we wish to convert into a usable DataFrame. :param train: This is training set corresponding to our csv file. Should be of type pandas.DataFrame :returns: Returns csv file, after having been modified as a pandas.DataFrame type """ DataFrame= pd.read_csv(DataFrame) titles=DataFrame['Name'].apply(lambda x: x.split(',')[1].split(' ')[1]) title_mapping = {"the":5, "Mr.": 1, "Miss.": 2, "Mrs.": 3, "Master.": 4, "Dr.": 5, "Rev.": 6, "Major.": 7, "Col.": 7, "Mlle.": 2, "Mme.": 3, "Don.": 9, "Lady.": 10, "Countess.": 10, "Jonkheer.": 10, "Sir.": 9, "Capt.": 7, "Ms.": 2, "Dona.": 10} for k,v in title_mapping.items(): titles[titles == k] = v DataFrame["Title"] = titles DataFrame['NameLen']=DataFrame['Name'].apply(lambda x: len(x)) DataFrame['FamSize']=DataFrame['SibSp']+DataFrame['Parch'] DataFrame['Has_Cabin'] = DataFrame["Cabin"].apply(lambda x: 0 if type(x) == float else 1) cabins=DataFrame['Cabin'].apply(lambda x: str(x)[0]) cabin_mapping={'A':3,'B':5,'C':5,'D':4,'E':4,'F':3,'G':2,'T':1,'n':10} for k,v in cabin_mapping.items(): cabins[cabins==k]=v DataFrame['Cabin']=cabins del DataFrame['Parch'] del DataFrame['SibSp'] del DataFrame['PassengerId'] pclass = pd.get_dummies( DataFrame.Pclass , prefix='Pclass' ) sex = pd.get_dummies(DataFrame.Sex) embarked = pd.get_dummies(DataFrame.Embarked, prefix='Embarked') DataFrame=pd.concat([DataFrame,pclass,sex,embarked],axis=1) del DataFrame['Pclass'] del DataFrame['Name'] del DataFrame['Ticket'] del DataFrame['Sex'] del DataFrame['Embarked'] DataFrame['Fare'].fillna(train['Fare'].median(), inplace = True) # Mapping Fare DataFrame.loc[ DataFrame['Fare'] <= 7.91, 'Fare'] = 0 DataFrame.loc[(DataFrame['Fare'] > 7.91) & (DataFrame['Fare'] <= 14.454), 'Fare'] = 1 DataFrame.loc[(DataFrame['Fare'] > 14.454) & (DataFrame['Fare'] <= 31), 'Fare'] = 2 DataFrame.loc[ DataFrame['Fare'] > 31, 'Fare'] = 3 DataFrame['Fare'] = DataFrame['Fare'].astype(int) DataFrame['Age'].fillna(train['Age'].median(), inplace = True) return DataFrame
python
def Feature_Engineering(DataFrame,train): """ Extracts important features and writes them in usable form Deletes features of little importance :param DataFrame: This is the file name of a csv file we wish to convert into a usable DataFrame. :param train: This is training set corresponding to our csv file. Should be of type pandas.DataFrame :returns: Returns csv file, after having been modified as a pandas.DataFrame type """ DataFrame= pd.read_csv(DataFrame) titles=DataFrame['Name'].apply(lambda x: x.split(',')[1].split(' ')[1]) title_mapping = {"the":5, "Mr.": 1, "Miss.": 2, "Mrs.": 3, "Master.": 4, "Dr.": 5, "Rev.": 6, "Major.": 7, "Col.": 7, "Mlle.": 2, "Mme.": 3, "Don.": 9, "Lady.": 10, "Countess.": 10, "Jonkheer.": 10, "Sir.": 9, "Capt.": 7, "Ms.": 2, "Dona.": 10} for k,v in title_mapping.items(): titles[titles == k] = v DataFrame["Title"] = titles DataFrame['NameLen']=DataFrame['Name'].apply(lambda x: len(x)) DataFrame['FamSize']=DataFrame['SibSp']+DataFrame['Parch'] DataFrame['Has_Cabin'] = DataFrame["Cabin"].apply(lambda x: 0 if type(x) == float else 1) cabins=DataFrame['Cabin'].apply(lambda x: str(x)[0]) cabin_mapping={'A':3,'B':5,'C':5,'D':4,'E':4,'F':3,'G':2,'T':1,'n':10} for k,v in cabin_mapping.items(): cabins[cabins==k]=v DataFrame['Cabin']=cabins del DataFrame['Parch'] del DataFrame['SibSp'] del DataFrame['PassengerId'] pclass = pd.get_dummies( DataFrame.Pclass , prefix='Pclass' ) sex = pd.get_dummies(DataFrame.Sex) embarked = pd.get_dummies(DataFrame.Embarked, prefix='Embarked') DataFrame=pd.concat([DataFrame,pclass,sex,embarked],axis=1) del DataFrame['Pclass'] del DataFrame['Name'] del DataFrame['Ticket'] del DataFrame['Sex'] del DataFrame['Embarked'] DataFrame['Fare'].fillna(train['Fare'].median(), inplace = True) # Mapping Fare DataFrame.loc[ DataFrame['Fare'] <= 7.91, 'Fare'] = 0 DataFrame.loc[(DataFrame['Fare'] > 7.91) & (DataFrame['Fare'] <= 14.454), 'Fare'] = 1 DataFrame.loc[(DataFrame['Fare'] > 14.454) & (DataFrame['Fare'] <= 31), 'Fare'] = 2 DataFrame.loc[ DataFrame['Fare'] > 31, 'Fare'] = 3 DataFrame['Fare'] = DataFrame['Fare'].astype(int) DataFrame['Age'].fillna(train['Age'].median(), inplace = True) return DataFrame
[ "def", "Feature_Engineering", "(", "DataFrame", ",", "train", ")", ":", "DataFrame", "=", "pd", ".", "read_csv", "(", "DataFrame", ")", "titles", "=", "DataFrame", "[", "'Name'", "]", ".", "apply", "(", "lambda", "x", ":", "x", ".", "split", "(", "','"...
Extracts important features and writes them in usable form Deletes features of little importance :param DataFrame: This is the file name of a csv file we wish to convert into a usable DataFrame. :param train: This is training set corresponding to our csv file. Should be of type pandas.DataFrame :returns: Returns csv file, after having been modified as a pandas.DataFrame type
[ "Extracts", "important", "features", "and", "writes", "them", "in", "usable", "form", "Deletes", "features", "of", "little", "importance", ":", "param", "DataFrame", ":", "This", "is", "the", "file", "name", "of", "a", "csv", "file", "we", "wish", "to", "c...
train
https://github.com/brookemosby/titanic/blob/e0eb3537a83c7b9d0b7a01db5f23785ffc6f8f70/TitanicAttempt/TitanicAttempt.py#L4-L53
brookemosby/titanic
TitanicAttempt/TitanicAttempt.py
Create_Random_Forest
def Create_Random_Forest(train): """ Fits Random Forest to training set. :param train: This is the file name of a csv file we wish to have fitted to a Random Forest, does not need to have features already extracted. :returns: Returns sklearn.ensemble.Random_Forest_Classifier fitted to training set. """ trainDF=pd.read_csv(train) train=Feature_Engineering(train,trainDF) RF = RFC(min_samples_split=10, n_estimators= 700, criterion= 'gini', max_depth=None) RF.fit(train.iloc[:, 1:], train.iloc[:, 0]) return RF
python
def Create_Random_Forest(train): """ Fits Random Forest to training set. :param train: This is the file name of a csv file we wish to have fitted to a Random Forest, does not need to have features already extracted. :returns: Returns sklearn.ensemble.Random_Forest_Classifier fitted to training set. """ trainDF=pd.read_csv(train) train=Feature_Engineering(train,trainDF) RF = RFC(min_samples_split=10, n_estimators= 700, criterion= 'gini', max_depth=None) RF.fit(train.iloc[:, 1:], train.iloc[:, 0]) return RF
[ "def", "Create_Random_Forest", "(", "train", ")", ":", "trainDF", "=", "pd", ".", "read_csv", "(", "train", ")", "train", "=", "Feature_Engineering", "(", "train", ",", "trainDF", ")", "RF", "=", "RFC", "(", "min_samples_split", "=", "10", ",", "n_estimato...
Fits Random Forest to training set. :param train: This is the file name of a csv file we wish to have fitted to a Random Forest, does not need to have features already extracted. :returns: Returns sklearn.ensemble.Random_Forest_Classifier fitted to training set.
[ "Fits", "Random", "Forest", "to", "training", "set", ".", ":", "param", "train", ":", "This", "is", "the", "file", "name", "of", "a", "csv", "file", "we", "wish", "to", "have", "fitted", "to", "a", "Random", "Forest", "does", "not", "need", "to", "ha...
train
https://github.com/brookemosby/titanic/blob/e0eb3537a83c7b9d0b7a01db5f23785ffc6f8f70/TitanicAttempt/TitanicAttempt.py#L56-L67
brookemosby/titanic
TitanicAttempt/TitanicAttempt.py
Produce_Predictions
def Produce_Predictions(FileName,train,test): """ Produces predictions for testing set, based off of training set. :param FileName: This is the csv file name we wish to have our predictions exported to. :param train: This is the file name of a csv file that will be the training set. :param test: This is the file name of the testing set that predictions will be made for. :returns: Returns nothing, creates csv file containing predictions for testing set. """ TestFileName=test TrainFileName=train trainDF=pd.read_csv(train) train=Feature_Engineering(train,trainDF) test=Feature_Engineering(test,trainDF) MLA=Create_Random_Forest(TrainFileName) predictions = MLA.predict(test) predictions = pd.DataFrame(predictions, columns=['Survived']) test = pd.read_csv(TestFileName) predictions = pd.concat((test.iloc[:, 0], predictions), axis = 1) predictions.to_csv(FileName, sep=",", index = False)
python
def Produce_Predictions(FileName,train,test): """ Produces predictions for testing set, based off of training set. :param FileName: This is the csv file name we wish to have our predictions exported to. :param train: This is the file name of a csv file that will be the training set. :param test: This is the file name of the testing set that predictions will be made for. :returns: Returns nothing, creates csv file containing predictions for testing set. """ TestFileName=test TrainFileName=train trainDF=pd.read_csv(train) train=Feature_Engineering(train,trainDF) test=Feature_Engineering(test,trainDF) MLA=Create_Random_Forest(TrainFileName) predictions = MLA.predict(test) predictions = pd.DataFrame(predictions, columns=['Survived']) test = pd.read_csv(TestFileName) predictions = pd.concat((test.iloc[:, 0], predictions), axis = 1) predictions.to_csv(FileName, sep=",", index = False)
[ "def", "Produce_Predictions", "(", "FileName", ",", "train", ",", "test", ")", ":", "TestFileName", "=", "test", "TrainFileName", "=", "train", "trainDF", "=", "pd", ".", "read_csv", "(", "train", ")", "train", "=", "Feature_Engineering", "(", "train", ",", ...
Produces predictions for testing set, based off of training set. :param FileName: This is the csv file name we wish to have our predictions exported to. :param train: This is the file name of a csv file that will be the training set. :param test: This is the file name of the testing set that predictions will be made for. :returns: Returns nothing, creates csv file containing predictions for testing set.
[ "Produces", "predictions", "for", "testing", "set", "based", "off", "of", "training", "set", ".", ":", "param", "FileName", ":", "This", "is", "the", "csv", "file", "name", "we", "wish", "to", "have", "our", "predictions", "exported", "to", ".", ":", "pa...
train
https://github.com/brookemosby/titanic/blob/e0eb3537a83c7b9d0b7a01db5f23785ffc6f8f70/TitanicAttempt/TitanicAttempt.py#L70-L89
cohorte/cohorte-herald
python/herald/transports/http/servlet.py
_make_json_result
def _make_json_result(code, message="", results=None): """ An utility method to prepare a JSON result string, usable by the SignalReceiver :param code: A HTTP Code :param message: An associated message """ return code, json.dumps({'code': code, 'message': message, 'results': results})
python
def _make_json_result(code, message="", results=None): """ An utility method to prepare a JSON result string, usable by the SignalReceiver :param code: A HTTP Code :param message: An associated message """ return code, json.dumps({'code': code, 'message': message, 'results': results})
[ "def", "_make_json_result", "(", "code", ",", "message", "=", "\"\"", ",", "results", "=", "None", ")", ":", "return", "code", ",", "json", ".", "dumps", "(", "{", "'code'", ":", "code", ",", "'message'", ":", "message", ",", "'results'", ":", "results...
An utility method to prepare a JSON result string, usable by the SignalReceiver :param code: A HTTP Code :param message: An associated message
[ "An", "utility", "method", "to", "prepare", "a", "JSON", "result", "string", "usable", "by", "the", "SignalReceiver" ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/herald/transports/http/servlet.py#L65-L75
mozilla/socorrolib
socorrolib/lib/context_tools.py
temp_file_context
def temp_file_context(raw_dump_path, logger=None): """this contextmanager implements conditionally deleting a pathname at the end of a context if the pathname indicates that it is a temp file by having the word 'TEMPORARY' embedded in it.""" try: yield raw_dump_path finally: if 'TEMPORARY' in raw_dump_path: try: os.unlink(raw_dump_path) except OSError: if logger is None: logger = FakeLogger() logger.warning( 'unable to delete %s. manual deletion is required.', raw_dump_path, exc_info=True )
python
def temp_file_context(raw_dump_path, logger=None): """this contextmanager implements conditionally deleting a pathname at the end of a context if the pathname indicates that it is a temp file by having the word 'TEMPORARY' embedded in it.""" try: yield raw_dump_path finally: if 'TEMPORARY' in raw_dump_path: try: os.unlink(raw_dump_path) except OSError: if logger is None: logger = FakeLogger() logger.warning( 'unable to delete %s. manual deletion is required.', raw_dump_path, exc_info=True )
[ "def", "temp_file_context", "(", "raw_dump_path", ",", "logger", "=", "None", ")", ":", "try", ":", "yield", "raw_dump_path", "finally", ":", "if", "'TEMPORARY'", "in", "raw_dump_path", ":", "try", ":", "os", ".", "unlink", "(", "raw_dump_path", ")", "except...
this contextmanager implements conditionally deleting a pathname at the end of a context if the pathname indicates that it is a temp file by having the word 'TEMPORARY' embedded in it.
[ "this", "contextmanager", "implements", "conditionally", "deleting", "a", "pathname", "at", "the", "end", "of", "a", "context", "if", "the", "pathname", "indicates", "that", "it", "is", "a", "temp", "file", "by", "having", "the", "word", "TEMPORARY", "embedded...
train
https://github.com/mozilla/socorrolib/blob/4ec08c6a4ee2c8a69150268afdd324f5f22b90c8/socorrolib/lib/context_tools.py#L13-L30
duniter/duniter-python-api
duniterpy/api/bma/blockchain.py
memberships
async def memberships(client: Client, search: str) -> dict: """ GET list of Membership documents for UID/Public key :param client: Client to connect to the api :param search: UID/Public key :return: """ return await client.get(MODULE + '/memberships/%s' % search, schema=MEMBERSHIPS_SCHEMA)
python
async def memberships(client: Client, search: str) -> dict: """ GET list of Membership documents for UID/Public key :param client: Client to connect to the api :param search: UID/Public key :return: """ return await client.get(MODULE + '/memberships/%s' % search, schema=MEMBERSHIPS_SCHEMA)
[ "async", "def", "memberships", "(", "client", ":", "Client", ",", "search", ":", "str", ")", "->", "dict", ":", "return", "await", "client", ".", "get", "(", "MODULE", "+", "'/memberships/%s'", "%", "search", ",", "schema", "=", "MEMBERSHIPS_SCHEMA", ")" ]
GET list of Membership documents for UID/Public key :param client: Client to connect to the api :param search: UID/Public key :return:
[ "GET", "list", "of", "Membership", "documents", "for", "UID", "/", "Public", "key" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/blockchain.py#L363-L371
duniter/duniter-python-api
duniterpy/api/bma/blockchain.py
membership
async def membership(client: Client, membership_signed_raw: str) -> ClientResponse: """ POST a Membership document :param client: Client to connect to the api :param membership_signed_raw: Membership signed raw document :return: """ return await client.post(MODULE + '/membership', {'membership': membership_signed_raw}, rtype=RESPONSE_AIOHTTP)
python
async def membership(client: Client, membership_signed_raw: str) -> ClientResponse: """ POST a Membership document :param client: Client to connect to the api :param membership_signed_raw: Membership signed raw document :return: """ return await client.post(MODULE + '/membership', {'membership': membership_signed_raw}, rtype=RESPONSE_AIOHTTP)
[ "async", "def", "membership", "(", "client", ":", "Client", ",", "membership_signed_raw", ":", "str", ")", "->", "ClientResponse", ":", "return", "await", "client", ".", "post", "(", "MODULE", "+", "'/membership'", ",", "{", "'membership'", ":", "membership_si...
POST a Membership document :param client: Client to connect to the api :param membership_signed_raw: Membership signed raw document :return:
[ "POST", "a", "Membership", "document" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/blockchain.py#L374-L382
duniter/duniter-python-api
duniterpy/api/bma/blockchain.py
block
async def block(client: Client, number: int = 0, block_raw: str = None, signature: str = None) -> Union[dict, ClientResponse]: """ GET/POST a block from/to the blockchain :param client: Client to connect to the api :param number: Block number to get :param block_raw: Block document to post :param signature: Signature of the block document issuer :return: """ # POST block if block_raw is not None and signature is not None: return await client.post(MODULE + '/block', {'block': block_raw, 'signature': signature}, rtype=RESPONSE_AIOHTTP) # GET block return await client.get(MODULE + '/block/%d' % number, schema=BLOCK_SCHEMA)
python
async def block(client: Client, number: int = 0, block_raw: str = None, signature: str = None) -> Union[dict, ClientResponse]: """ GET/POST a block from/to the blockchain :param client: Client to connect to the api :param number: Block number to get :param block_raw: Block document to post :param signature: Signature of the block document issuer :return: """ # POST block if block_raw is not None and signature is not None: return await client.post(MODULE + '/block', {'block': block_raw, 'signature': signature}, rtype=RESPONSE_AIOHTTP) # GET block return await client.get(MODULE + '/block/%d' % number, schema=BLOCK_SCHEMA)
[ "async", "def", "block", "(", "client", ":", "Client", ",", "number", ":", "int", "=", "0", ",", "block_raw", ":", "str", "=", "None", ",", "signature", ":", "str", "=", "None", ")", "->", "Union", "[", "dict", ",", "ClientResponse", "]", ":", "# P...
GET/POST a block from/to the blockchain :param client: Client to connect to the api :param number: Block number to get :param block_raw: Block document to post :param signature: Signature of the block document issuer :return:
[ "GET", "/", "POST", "a", "block", "from", "/", "to", "the", "blockchain" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/blockchain.py#L395-L411
duniter/duniter-python-api
duniterpy/api/bma/blockchain.py
blocks
async def blocks(client: Client, count: int, start: int) -> list: """ GET list of blocks from the blockchain :param client: Client to connect to the api :param count: Number of blocks :param start: First block number :return: """ assert type(count) is int assert type(start) is int return await client.get(MODULE + '/blocks/%d/%d' % (count, start), schema=BLOCKS_SCHEMA)
python
async def blocks(client: Client, count: int, start: int) -> list: """ GET list of blocks from the blockchain :param client: Client to connect to the api :param count: Number of blocks :param start: First block number :return: """ assert type(count) is int assert type(start) is int return await client.get(MODULE + '/blocks/%d/%d' % (count, start), schema=BLOCKS_SCHEMA)
[ "async", "def", "blocks", "(", "client", ":", "Client", ",", "count", ":", "int", ",", "start", ":", "int", ")", "->", "list", ":", "assert", "type", "(", "count", ")", "is", "int", "assert", "type", "(", "start", ")", "is", "int", "return", "await...
GET list of blocks from the blockchain :param client: Client to connect to the api :param count: Number of blocks :param start: First block number :return:
[ "GET", "list", "of", "blocks", "from", "the", "blockchain" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/blockchain.py#L414-L426
duniter/duniter-python-api
duniterpy/api/bma/blockchain.py
hardship
async def hardship(client: Client, pubkey: str) -> dict: """ GET hardship level for given member's public key for writing next block :param client: Client to connect to the api :param pubkey: Public key of the member :return: """ return await client.get(MODULE + '/hardship/%s' % pubkey, schema=HARDSHIP_SCHEMA)
python
async def hardship(client: Client, pubkey: str) -> dict: """ GET hardship level for given member's public key for writing next block :param client: Client to connect to the api :param pubkey: Public key of the member :return: """ return await client.get(MODULE + '/hardship/%s' % pubkey, schema=HARDSHIP_SCHEMA)
[ "async", "def", "hardship", "(", "client", ":", "Client", ",", "pubkey", ":", "str", ")", "->", "dict", ":", "return", "await", "client", ".", "get", "(", "MODULE", "+", "'/hardship/%s'", "%", "pubkey", ",", "schema", "=", "HARDSHIP_SCHEMA", ")" ]
GET hardship level for given member's public key for writing next block :param client: Client to connect to the api :param pubkey: Public key of the member :return:
[ "GET", "hardship", "level", "for", "given", "member", "s", "public", "key", "for", "writing", "next", "block" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/blockchain.py#L429-L437
duniter/duniter-python-api
duniterpy/documents/block_uid.py
block_uid
def block_uid(value: Union[str, BlockUID, None]) -> BlockUID: """ Convert value to BlockUID instance :param value: Value to convert :return: """ if isinstance(value, BlockUID): return value elif isinstance(value, str): return BlockUID.from_str(value) elif value is None: return BlockUID.empty() else: raise TypeError("Cannot convert {0} to BlockUID".format(type(value)))
python
def block_uid(value: Union[str, BlockUID, None]) -> BlockUID: """ Convert value to BlockUID instance :param value: Value to convert :return: """ if isinstance(value, BlockUID): return value elif isinstance(value, str): return BlockUID.from_str(value) elif value is None: return BlockUID.empty() else: raise TypeError("Cannot convert {0} to BlockUID".format(type(value)))
[ "def", "block_uid", "(", "value", ":", "Union", "[", "str", ",", "BlockUID", ",", "None", "]", ")", "->", "BlockUID", ":", "if", "isinstance", "(", "value", ",", "BlockUID", ")", ":", "return", "value", "elif", "isinstance", "(", "value", ",", "str", ...
Convert value to BlockUID instance :param value: Value to convert :return:
[ "Convert", "value", "to", "BlockUID", "instance" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/block_uid.py#L84-L98
duniter/duniter-python-api
duniterpy/documents/block_uid.py
BlockUID.from_str
def from_str(cls: Type[BlockUIDType], blockid: str) -> BlockUIDType: """ :param blockid: The block id """ data = BlockUID.re_block_uid.match(blockid) if data is None: raise MalformedDocumentError("BlockUID") try: number = int(data.group(1)) except AttributeError: raise MalformedDocumentError("BlockUID") try: sha_hash = data.group(2) except AttributeError: raise MalformedDocumentError("BlockHash") return cls(number, sha_hash)
python
def from_str(cls: Type[BlockUIDType], blockid: str) -> BlockUIDType: """ :param blockid: The block id """ data = BlockUID.re_block_uid.match(blockid) if data is None: raise MalformedDocumentError("BlockUID") try: number = int(data.group(1)) except AttributeError: raise MalformedDocumentError("BlockUID") try: sha_hash = data.group(2) except AttributeError: raise MalformedDocumentError("BlockHash") return cls(number, sha_hash)
[ "def", "from_str", "(", "cls", ":", "Type", "[", "BlockUIDType", "]", ",", "blockid", ":", "str", ")", "->", "BlockUIDType", ":", "data", "=", "BlockUID", ".", "re_block_uid", ".", "match", "(", "blockid", ")", "if", "data", "is", "None", ":", "raise",...
:param blockid: The block id
[ ":", "param", "blockid", ":", "The", "block", "id" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/block_uid.py#L30-L47
cohorte/cohorte-herald
python/herald/transports/http/discovery_multicast.py
make_heartbeat
def make_heartbeat(port, path, peer_uid, node_uid, app_id): """ Prepares the heart beat UDP packet Format : Little endian * Kind of beat (1 byte) * Herald HTTP server port (2 bytes) * Herald HTTP servlet path length (2 bytes) * Herald HTTP servlet path (variable, UTF-8) * Peer UID length (2 bytes) * Peer UID (variable, UTF-8) * Node UID length (2 bytes) * Node UID (variable, UTF-8) * Application ID length (2 bytes) * Application ID (variable, UTF-8) :param port: The port to access the Herald HTTP server :param path: The path to the Herald HTTP servlet :param peer_uid: The UID of the peer :param node_uid: The UID of the node :param app_id: Application ID :return: The heart beat packet content (byte array) """ # Type and port... packet = struct.pack("<BBH", PACKET_FORMAT_VERSION, PACKET_TYPE_HEARTBEAT, port) for string in (path, peer_uid, node_uid, app_id): # Strings... string_bytes = to_bytes(string) packet += struct.pack("<H", len(string_bytes)) packet += string_bytes return packet
python
def make_heartbeat(port, path, peer_uid, node_uid, app_id): """ Prepares the heart beat UDP packet Format : Little endian * Kind of beat (1 byte) * Herald HTTP server port (2 bytes) * Herald HTTP servlet path length (2 bytes) * Herald HTTP servlet path (variable, UTF-8) * Peer UID length (2 bytes) * Peer UID (variable, UTF-8) * Node UID length (2 bytes) * Node UID (variable, UTF-8) * Application ID length (2 bytes) * Application ID (variable, UTF-8) :param port: The port to access the Herald HTTP server :param path: The path to the Herald HTTP servlet :param peer_uid: The UID of the peer :param node_uid: The UID of the node :param app_id: Application ID :return: The heart beat packet content (byte array) """ # Type and port... packet = struct.pack("<BBH", PACKET_FORMAT_VERSION, PACKET_TYPE_HEARTBEAT, port) for string in (path, peer_uid, node_uid, app_id): # Strings... string_bytes = to_bytes(string) packet += struct.pack("<H", len(string_bytes)) packet += string_bytes return packet
[ "def", "make_heartbeat", "(", "port", ",", "path", ",", "peer_uid", ",", "node_uid", ",", "app_id", ")", ":", "# Type and port...", "packet", "=", "struct", ".", "pack", "(", "\"<BBH\"", ",", "PACKET_FORMAT_VERSION", ",", "PACKET_TYPE_HEARTBEAT", ",", "port", ...
Prepares the heart beat UDP packet Format : Little endian * Kind of beat (1 byte) * Herald HTTP server port (2 bytes) * Herald HTTP servlet path length (2 bytes) * Herald HTTP servlet path (variable, UTF-8) * Peer UID length (2 bytes) * Peer UID (variable, UTF-8) * Node UID length (2 bytes) * Node UID (variable, UTF-8) * Application ID length (2 bytes) * Application ID (variable, UTF-8) :param port: The port to access the Herald HTTP server :param path: The path to the Herald HTTP servlet :param peer_uid: The UID of the peer :param node_uid: The UID of the node :param app_id: Application ID :return: The heart beat packet content (byte array)
[ "Prepares", "the", "heart", "beat", "UDP", "packet" ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/herald/transports/http/discovery_multicast.py#L283-L314
cohorte/cohorte-herald
python/herald/transports/http/discovery_multicast.py
make_lastbeat
def make_lastbeat(peer_uid, app_id): """ Prepares the last beat UDP packet (when the peer is going away) Format : Little endian * Kind of beat (1 byte) * Peer UID length (2 bytes) * Peer UID (variable, UTF-8) * Application ID length (2 bytes) * Application ID (variable, UTF-8) :param peer_uid: Peer UID :param app_id: Application ID :return: The last beat packet content (byte array) """ packet = struct.pack("<BB", PACKET_FORMAT_VERSION, PACKET_TYPE_LASTBEAT) for string in (peer_uid, app_id): string_bytes = to_bytes(string) packet += struct.pack("<H", len(string_bytes)) packet += string_bytes return packet
python
def make_lastbeat(peer_uid, app_id): """ Prepares the last beat UDP packet (when the peer is going away) Format : Little endian * Kind of beat (1 byte) * Peer UID length (2 bytes) * Peer UID (variable, UTF-8) * Application ID length (2 bytes) * Application ID (variable, UTF-8) :param peer_uid: Peer UID :param app_id: Application ID :return: The last beat packet content (byte array) """ packet = struct.pack("<BB", PACKET_FORMAT_VERSION, PACKET_TYPE_LASTBEAT) for string in (peer_uid, app_id): string_bytes = to_bytes(string) packet += struct.pack("<H", len(string_bytes)) packet += string_bytes return packet
[ "def", "make_lastbeat", "(", "peer_uid", ",", "app_id", ")", ":", "packet", "=", "struct", ".", "pack", "(", "\"<BB\"", ",", "PACKET_FORMAT_VERSION", ",", "PACKET_TYPE_LASTBEAT", ")", "for", "string", "in", "(", "peer_uid", ",", "app_id", ")", ":", "string_b...
Prepares the last beat UDP packet (when the peer is going away) Format : Little endian * Kind of beat (1 byte) * Peer UID length (2 bytes) * Peer UID (variable, UTF-8) * Application ID length (2 bytes) * Application ID (variable, UTF-8) :param peer_uid: Peer UID :param app_id: Application ID :return: The last beat packet content (byte array)
[ "Prepares", "the", "last", "beat", "UDP", "packet", "(", "when", "the", "peer", "is", "going", "away", ")" ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/herald/transports/http/discovery_multicast.py#L317-L338
cohorte/cohorte-herald
python/herald/transports/http/discovery_multicast.py
MulticastReceiver.start
def start(self): """ Starts listening to the socket :return: True if the socket has been created """ # Create the multicast socket (update the group) self._socket, self._group = create_multicast_socket(self._group, self._port) # Start the listening thread self._stop_event.clear() self._thread = threading.Thread( target=self.__read, name="MulticastReceiver-{0}".format(self._port)) self._thread.start()
python
def start(self): """ Starts listening to the socket :return: True if the socket has been created """ # Create the multicast socket (update the group) self._socket, self._group = create_multicast_socket(self._group, self._port) # Start the listening thread self._stop_event.clear() self._thread = threading.Thread( target=self.__read, name="MulticastReceiver-{0}".format(self._port)) self._thread.start()
[ "def", "start", "(", "self", ")", ":", "# Create the multicast socket (update the group)", "self", ".", "_socket", ",", "self", ".", "_group", "=", "create_multicast_socket", "(", "self", ".", "_group", ",", "self", ".", "_port", ")", "# Start the listening thread",...
Starts listening to the socket :return: True if the socket has been created
[ "Starts", "listening", "to", "the", "socket" ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/herald/transports/http/discovery_multicast.py#L370-L385
cohorte/cohorte-herald
python/herald/transports/http/discovery_multicast.py
MulticastReceiver.stop
def stop(self): """ Stops listening to the socket """ # Stop the loop self._stop_event.set() # Join the thread self._thread.join() self._thread = None # Close the socket close_multicast_socket(self._socket, self._group)
python
def stop(self): """ Stops listening to the socket """ # Stop the loop self._stop_event.set() # Join the thread self._thread.join() self._thread = None # Close the socket close_multicast_socket(self._socket, self._group)
[ "def", "stop", "(", "self", ")", ":", "# Stop the loop", "self", ".", "_stop_event", ".", "set", "(", ")", "# Join the thread", "self", ".", "_thread", ".", "join", "(", ")", "self", ".", "_thread", "=", "None", "# Close the socket", "close_multicast_socket", ...
Stops listening to the socket
[ "Stops", "listening", "to", "the", "socket" ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/herald/transports/http/discovery_multicast.py#L387-L399
cohorte/cohorte-herald
python/herald/transports/http/discovery_multicast.py
MulticastReceiver._handle_heartbeat
def _handle_heartbeat(self, sender, data): """ Handles a raw heart beat :param sender: Sender (address, port) tuple :param data: Raw packet data """ # Format of packet parsed, data = self._unpack("<B", data) format = parsed[0] if format == PACKET_FORMAT_VERSION: # Kind of beat parsed, data = self._unpack("<B", data) kind = parsed[0] if kind == PACKET_TYPE_HEARTBEAT: # Extract content parsed, data = self._unpack("<H", data) port = parsed[0] path, data = self._unpack_string(data) uid, data = self._unpack_string(data) node_uid, data = self._unpack_string(data) try: app_id, data = self._unpack_string(data) except struct.error: # Compatibility with previous version app_id = herald.DEFAULT_APPLICATION_ID elif kind == PACKET_TYPE_LASTBEAT: # Peer is going away uid, data = self._unpack_string(data) app_id, data = self._unpack_string(data) port = -1 path = None node_uid = None else: _logger.warning("Unknown kind of packet: %d", kind) return try: self._callback(kind, uid, node_uid, app_id, sender[0], port, path) except Exception as ex: _logger.exception("Error handling heart beat: %s", ex)
python
def _handle_heartbeat(self, sender, data): """ Handles a raw heart beat :param sender: Sender (address, port) tuple :param data: Raw packet data """ # Format of packet parsed, data = self._unpack("<B", data) format = parsed[0] if format == PACKET_FORMAT_VERSION: # Kind of beat parsed, data = self._unpack("<B", data) kind = parsed[0] if kind == PACKET_TYPE_HEARTBEAT: # Extract content parsed, data = self._unpack("<H", data) port = parsed[0] path, data = self._unpack_string(data) uid, data = self._unpack_string(data) node_uid, data = self._unpack_string(data) try: app_id, data = self._unpack_string(data) except struct.error: # Compatibility with previous version app_id = herald.DEFAULT_APPLICATION_ID elif kind == PACKET_TYPE_LASTBEAT: # Peer is going away uid, data = self._unpack_string(data) app_id, data = self._unpack_string(data) port = -1 path = None node_uid = None else: _logger.warning("Unknown kind of packet: %d", kind) return try: self._callback(kind, uid, node_uid, app_id, sender[0], port, path) except Exception as ex: _logger.exception("Error handling heart beat: %s", ex)
[ "def", "_handle_heartbeat", "(", "self", ",", "sender", ",", "data", ")", ":", "# Format of packet", "parsed", ",", "data", "=", "self", ".", "_unpack", "(", "\"<B\"", ",", "data", ")", "format", "=", "parsed", "[", "0", "]", "if", "format", "==", "PAC...
Handles a raw heart beat :param sender: Sender (address, port) tuple :param data: Raw packet data
[ "Handles", "a", "raw", "heart", "beat" ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/herald/transports/http/discovery_multicast.py#L401-L443
cohorte/cohorte-herald
python/herald/transports/http/discovery_multicast.py
MulticastReceiver._unpack
def _unpack(self, fmt, data): """ Calls struct.unpack(). Returns a tuple containing the result tuple and the subset of data containing the unread content. :param fmt: The format of data :param data: Data to unpack :return: A tuple (result tuple, unread_data) """ size = struct.calcsize(fmt) read, unread = data[:size], data[size:] return struct.unpack(fmt, read), unread
python
def _unpack(self, fmt, data): """ Calls struct.unpack(). Returns a tuple containing the result tuple and the subset of data containing the unread content. :param fmt: The format of data :param data: Data to unpack :return: A tuple (result tuple, unread_data) """ size = struct.calcsize(fmt) read, unread = data[:size], data[size:] return struct.unpack(fmt, read), unread
[ "def", "_unpack", "(", "self", ",", "fmt", ",", "data", ")", ":", "size", "=", "struct", ".", "calcsize", "(", "fmt", ")", "read", ",", "unread", "=", "data", "[", ":", "size", "]", ",", "data", "[", "size", ":", "]", "return", "struct", ".", "...
Calls struct.unpack(). Returns a tuple containing the result tuple and the subset of data containing the unread content. :param fmt: The format of data :param data: Data to unpack :return: A tuple (result tuple, unread_data)
[ "Calls", "struct", ".", "unpack", "()", "." ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/herald/transports/http/discovery_multicast.py#L449-L462
cohorte/cohorte-herald
python/herald/transports/http/discovery_multicast.py
MulticastReceiver._unpack_string
def _unpack_string(self, data): """ Unpacks the next string from the given data :param data: A datagram, starting at a string size :return: A (string, unread_data) tuple """ # Get the size of the string result, data = self._unpack("<H", data) size = result[0] # Read it string_bytes = data[:size] # Convert it return to_unicode(string_bytes), data[size:]
python
def _unpack_string(self, data): """ Unpacks the next string from the given data :param data: A datagram, starting at a string size :return: A (string, unread_data) tuple """ # Get the size of the string result, data = self._unpack("<H", data) size = result[0] # Read it string_bytes = data[:size] # Convert it return to_unicode(string_bytes), data[size:]
[ "def", "_unpack_string", "(", "self", ",", "data", ")", ":", "# Get the size of the string", "result", ",", "data", "=", "self", ".", "_unpack", "(", "\"<H\"", ",", "data", ")", "size", "=", "result", "[", "0", "]", "# Read it", "string_bytes", "=", "data"...
Unpacks the next string from the given data :param data: A datagram, starting at a string size :return: A (string, unread_data) tuple
[ "Unpacks", "the", "next", "string", "from", "the", "given", "data" ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/herald/transports/http/discovery_multicast.py#L464-L479
cohorte/cohorte-herald
python/herald/transports/http/discovery_multicast.py
MulticastReceiver.__read
def __read(self): """ Reads packets from the socket """ # Set the socket as non-blocking self._socket.setblocking(0) while not self._stop_event.is_set(): # Watch for content ready = select.select([self._socket], [], [], 1) if ready[0]: # Socket is ready data, sender = self._socket.recvfrom(1024) try: self._handle_heartbeat(sender, data) except Exception as ex: _logger.exception("Error handling the heart beat: %s", ex)
python
def __read(self): """ Reads packets from the socket """ # Set the socket as non-blocking self._socket.setblocking(0) while not self._stop_event.is_set(): # Watch for content ready = select.select([self._socket], [], [], 1) if ready[0]: # Socket is ready data, sender = self._socket.recvfrom(1024) try: self._handle_heartbeat(sender, data) except Exception as ex: _logger.exception("Error handling the heart beat: %s", ex)
[ "def", "__read", "(", "self", ")", ":", "# Set the socket as non-blocking", "self", ".", "_socket", ".", "setblocking", "(", "0", ")", "while", "not", "self", ".", "_stop_event", ".", "is_set", "(", ")", ":", "# Watch for content", "ready", "=", "select", "....
Reads packets from the socket
[ "Reads", "packets", "from", "the", "socket" ]
train
https://github.com/cohorte/cohorte-herald/blob/bb3445d0031c8b3abad71e6219cc559b49faa3ee/python/herald/transports/http/discovery_multicast.py#L481-L497
hangyan/shaw
shaw/log.py
get_log_config
def get_log_config(component, handlers, level='DEBUG', path='/var/log/vfine/'): """Return a log config for django project.""" config = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': '%(asctime)s [%(levelname)s][%(threadName)s]' + '[%(name)s.%(funcName)s():%(lineno)d] %(message)s' }, 'color': { '()': 'shaw.log.SplitColoredFormatter', 'format': "%(asctime)s " + "%(log_color)s%(bold)s[%(levelname)s]%(reset)s" + "[%(threadName)s][%(name)s.%(funcName)s():%(lineno)d] " + "%(blue)s%(message)s" } }, 'handlers': { 'debug': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'filename': path + component + '.debug.log', 'maxBytes': 1024 * 1024 * 1024, 'backupCount': 5, 'formatter': 'standard', }, 'color': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'filename': path + component + '.color.log', 'maxBytes': 1024 * 1024 * 1024, 'backupCount': 5, 'formatter': 'color', }, 'info': { 'level': 'INFO', 'class': 'logging.handlers.RotatingFileHandler', 'filename': path + component + '.info.log', 'maxBytes': 1024 * 1024 * 1024, 'backupCount': 5, 'formatter': 'standard', }, 'error': { 'level': 'ERROR', 'class': 'logging.handlers.RotatingFileHandler', 'filename': path + component + '.error.log', 'maxBytes': 1024 * 1024 * 100, 'backupCount': 5, 'formatter': 'standard', }, 'console': { 'level': level, 'class': 'logging.StreamHandler', 'formatter': 'standard' }, }, 'loggers': { 'django': { 'handlers': handlers, 'level': 'INFO', 'propagate': False }, 'django.request': { 'handlers': handlers, 'level': 'INFO', 'propagate': False, }, '': { 'handlers': handlers, 'level': level, 'propagate': False }, } } return config
python
def get_log_config(component, handlers, level='DEBUG', path='/var/log/vfine/'): """Return a log config for django project.""" config = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': '%(asctime)s [%(levelname)s][%(threadName)s]' + '[%(name)s.%(funcName)s():%(lineno)d] %(message)s' }, 'color': { '()': 'shaw.log.SplitColoredFormatter', 'format': "%(asctime)s " + "%(log_color)s%(bold)s[%(levelname)s]%(reset)s" + "[%(threadName)s][%(name)s.%(funcName)s():%(lineno)d] " + "%(blue)s%(message)s" } }, 'handlers': { 'debug': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'filename': path + component + '.debug.log', 'maxBytes': 1024 * 1024 * 1024, 'backupCount': 5, 'formatter': 'standard', }, 'color': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'filename': path + component + '.color.log', 'maxBytes': 1024 * 1024 * 1024, 'backupCount': 5, 'formatter': 'color', }, 'info': { 'level': 'INFO', 'class': 'logging.handlers.RotatingFileHandler', 'filename': path + component + '.info.log', 'maxBytes': 1024 * 1024 * 1024, 'backupCount': 5, 'formatter': 'standard', }, 'error': { 'level': 'ERROR', 'class': 'logging.handlers.RotatingFileHandler', 'filename': path + component + '.error.log', 'maxBytes': 1024 * 1024 * 100, 'backupCount': 5, 'formatter': 'standard', }, 'console': { 'level': level, 'class': 'logging.StreamHandler', 'formatter': 'standard' }, }, 'loggers': { 'django': { 'handlers': handlers, 'level': 'INFO', 'propagate': False }, 'django.request': { 'handlers': handlers, 'level': 'INFO', 'propagate': False, }, '': { 'handlers': handlers, 'level': level, 'propagate': False }, } } return config
[ "def", "get_log_config", "(", "component", ",", "handlers", ",", "level", "=", "'DEBUG'", ",", "path", "=", "'/var/log/vfine/'", ")", ":", "config", "=", "{", "'version'", ":", "1", ",", "'disable_existing_loggers'", ":", "False", ",", "'formatters'", ":", "...
Return a log config for django project.
[ "Return", "a", "log", "config", "for", "django", "project", "." ]
train
https://github.com/hangyan/shaw/blob/63d01d35e225ba4edb9c61edaf351e1bc0e8fd15/shaw/log.py#L129-L204
hangyan/shaw
shaw/log.py
MultiProcessTimedRotatingFileHandler.doRollover
def doRollover(self): """ do a rollover; in this case, a date/time stamp is appended to the filename when the rollover happens. However, you want the file to be named for the start of the interval, not the current time. If there is a backup count, then we have to get a list of matching filenames, sort them and remove the one with the oldest suffix. """ # if self.stream: # self.stream.close() # get the time that this sequence started at and make it a TimeTuple t = self.rolloverAt - self.interval if self.utc: timeTuple = time.gmtime(t) else: timeTuple = time.localtime(t) dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple) # if os.path.exists(dfn): # os.remove(dfn) lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0) fcntl.fcntl(self.stream, fcntl.F_SETLKW, lockdata) if not os.path.exists(dfn) and os.path.exists(self.baseFilename): os.rename(self.baseFilename, dfn) with open(self.baseFilename, 'a'): pass if self.backupCount > 0: # find the oldest log file and delete it # s = glob.glob(self.baseFilename + ".20*") # if len(s) > self.backupCount: # s.sort() # os.remove(s[0]) for s in self.getFilesToDelete(): os.remove(s) # print "%s -> %s" % (self.baseFilename, dfn) if self.stream: self.stream.close() self.mode = 'a' self.stream = self._open() currentTime = int(time.time()) newRolloverAt = self.computeRollover(currentTime) while newRolloverAt <= currentTime: newRolloverAt = newRolloverAt + self.interval # If DST changes and midnight or weekly rollover, adjust for this. if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc: dstNow = time.localtime(currentTime)[-1] dstAtRollover = time.localtime(newRolloverAt)[-1] if dstNow != dstAtRollover: if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour newRolloverAt = newRolloverAt - 3600 else: # DST bows out before next rollover, so we need to add an hour newRolloverAt = newRolloverAt + 3600 self.rolloverAt = newRolloverAt
python
def doRollover(self): """ do a rollover; in this case, a date/time stamp is appended to the filename when the rollover happens. However, you want the file to be named for the start of the interval, not the current time. If there is a backup count, then we have to get a list of matching filenames, sort them and remove the one with the oldest suffix. """ # if self.stream: # self.stream.close() # get the time that this sequence started at and make it a TimeTuple t = self.rolloverAt - self.interval if self.utc: timeTuple = time.gmtime(t) else: timeTuple = time.localtime(t) dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple) # if os.path.exists(dfn): # os.remove(dfn) lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0) fcntl.fcntl(self.stream, fcntl.F_SETLKW, lockdata) if not os.path.exists(dfn) and os.path.exists(self.baseFilename): os.rename(self.baseFilename, dfn) with open(self.baseFilename, 'a'): pass if self.backupCount > 0: # find the oldest log file and delete it # s = glob.glob(self.baseFilename + ".20*") # if len(s) > self.backupCount: # s.sort() # os.remove(s[0]) for s in self.getFilesToDelete(): os.remove(s) # print "%s -> %s" % (self.baseFilename, dfn) if self.stream: self.stream.close() self.mode = 'a' self.stream = self._open() currentTime = int(time.time()) newRolloverAt = self.computeRollover(currentTime) while newRolloverAt <= currentTime: newRolloverAt = newRolloverAt + self.interval # If DST changes and midnight or weekly rollover, adjust for this. if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc: dstNow = time.localtime(currentTime)[-1] dstAtRollover = time.localtime(newRolloverAt)[-1] if dstNow != dstAtRollover: if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour newRolloverAt = newRolloverAt - 3600 else: # DST bows out before next rollover, so we need to add an hour newRolloverAt = newRolloverAt + 3600 self.rolloverAt = newRolloverAt
[ "def", "doRollover", "(", "self", ")", ":", "# if self.stream:", "# self.stream.close()", "# get the time that this sequence started at and make it a TimeTuple", "t", "=", "self", ".", "rolloverAt", "-", "self", ".", "interval", "if", "self", ".", "utc", ":", "timeTu...
do a rollover; in this case, a date/time stamp is appended to the filename when the rollover happens. However, you want the file to be named for the start of the interval, not the current time. If there is a backup count, then we have to get a list of matching filenames, sort them and remove the one with the oldest suffix.
[ "do", "a", "rollover", ";", "in", "this", "case", "a", "date", "/", "time", "stamp", "is", "appended", "to", "the", "filename", "when", "the", "rollover", "happens", ".", "However", "you", "want", "the", "file", "to", "be", "named", "for", "the", "star...
train
https://github.com/hangyan/shaw/blob/63d01d35e225ba4edb9c61edaf351e1bc0e8fd15/shaw/log.py#L24-L75
hangyan/shaw
shaw/log.py
SplitColoredFormatter.format
def format(self, record): """Format a message from a record object.""" record = ColoredRecord(record) record.log_color = self.color(self.log_colors, record.levelname) # Set secondary log colors if self.secondary_log_colors: for name, log_colors in self.secondary_log_colors.items(): color = self.color(log_colors, record.levelname) setattr(record, name + '_log_color', color) # Format the message if sys.version_info > (2, 7): message = super(ColoredFormatter, self).format(record) else: message = logging.Formatter.format(self, record) # Add a reset code to the end of the message # (if it wasn't explicitly added in format str) if self.reset and not message.endswith(escape_codes['reset']): message += escape_codes['reset'] if '|' in message: desc, data = message.split("|", 1) desc = desc + escape_codes['reset'] data = escape_codes['green'] + data message = desc + '|' + data return message
python
def format(self, record): """Format a message from a record object.""" record = ColoredRecord(record) record.log_color = self.color(self.log_colors, record.levelname) # Set secondary log colors if self.secondary_log_colors: for name, log_colors in self.secondary_log_colors.items(): color = self.color(log_colors, record.levelname) setattr(record, name + '_log_color', color) # Format the message if sys.version_info > (2, 7): message = super(ColoredFormatter, self).format(record) else: message = logging.Formatter.format(self, record) # Add a reset code to the end of the message # (if it wasn't explicitly added in format str) if self.reset and not message.endswith(escape_codes['reset']): message += escape_codes['reset'] if '|' in message: desc, data = message.split("|", 1) desc = desc + escape_codes['reset'] data = escape_codes['green'] + data message = desc + '|' + data return message
[ "def", "format", "(", "self", ",", "record", ")", ":", "record", "=", "ColoredRecord", "(", "record", ")", "record", ".", "log_color", "=", "self", ".", "color", "(", "self", ".", "log_colors", ",", "record", ".", "levelname", ")", "# Set secondary log col...
Format a message from a record object.
[ "Format", "a", "message", "from", "a", "record", "object", "." ]
train
https://github.com/hangyan/shaw/blob/63d01d35e225ba4edb9c61edaf351e1bc0e8fd15/shaw/log.py#L79-L107
kaka19ace/kkconst
kkconst/__init__.py
get_console_logger
def get_console_logger(): """ just for kkconst demos """ global __console_logger if __console_logger: return __console_logger logger = logging.getLogger("kkconst") logger.setLevel(logging.DEBUG) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) __console_logger = logger return logger
python
def get_console_logger(): """ just for kkconst demos """ global __console_logger if __console_logger: return __console_logger logger = logging.getLogger("kkconst") logger.setLevel(logging.DEBUG) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) __console_logger = logger return logger
[ "def", "get_console_logger", "(", ")", ":", "global", "__console_logger", "if", "__console_logger", ":", "return", "__console_logger", "logger", "=", "logging", ".", "getLogger", "(", "\"kkconst\"", ")", "logger", ".", "setLevel", "(", "logging", ".", "DEBUG", "...
just for kkconst demos
[ "just", "for", "kkconst", "demos" ]
train
https://github.com/kaka19ace/kkconst/blob/67a969a969edef4ea8b2752eae44223e878103c6/kkconst/__init__.py#L27-L44
salesking/salesking_python_sdk
salesking/utils/loaders.py
_load_referenced_schemes_from_list
def _load_referenced_schemes_from_list(the_list, val, a_scheme, a_property): """ takes the referenced files and loads them returns the updated schema :param the_list: :param val: :param a_scheme: :param a_property: :return: """ scheme = copy.copy(a_scheme) new_list = [] if isinstance(the_list, list): for an_item in the_list: if ((not isinstance(an_item, basestring)) and (u'$ref' in an_item.keys())): sub_scheme_name = generate_schema_name_from_uri(an_item['$ref']) content = load_schema(sub_scheme_name) new_list.append(content) else: # somewhere the array is not an array - payment_reminder sub_scheme_name = generate_schema_name_from_uri(the_list['$ref']) new_list = load_schema(sub_scheme_name) scheme['properties'][a_property]['items'] = new_list return scheme
python
def _load_referenced_schemes_from_list(the_list, val, a_scheme, a_property): """ takes the referenced files and loads them returns the updated schema :param the_list: :param val: :param a_scheme: :param a_property: :return: """ scheme = copy.copy(a_scheme) new_list = [] if isinstance(the_list, list): for an_item in the_list: if ((not isinstance(an_item, basestring)) and (u'$ref' in an_item.keys())): sub_scheme_name = generate_schema_name_from_uri(an_item['$ref']) content = load_schema(sub_scheme_name) new_list.append(content) else: # somewhere the array is not an array - payment_reminder sub_scheme_name = generate_schema_name_from_uri(the_list['$ref']) new_list = load_schema(sub_scheme_name) scheme['properties'][a_property]['items'] = new_list return scheme
[ "def", "_load_referenced_schemes_from_list", "(", "the_list", ",", "val", ",", "a_scheme", ",", "a_property", ")", ":", "scheme", "=", "copy", ".", "copy", "(", "a_scheme", ")", "new_list", "=", "[", "]", "if", "isinstance", "(", "the_list", ",", "list", "...
takes the referenced files and loads them returns the updated schema :param the_list: :param val: :param a_scheme: :param a_property: :return:
[ "takes", "the", "referenced", "files", "and", "loads", "them", "returns", "the", "updated", "schema", ":", "param", "the_list", ":", ":", "param", "val", ":", ":", "param", "a_scheme", ":", ":", "param", "a_property", ":", ":", "return", ":" ]
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/utils/loaders.py#L79-L103
salesking/salesking_python_sdk
salesking/utils/loaders.py
_load_referenced_schema_from_properties
def _load_referenced_schema_from_properties(val, a_scheme, a_property): """ :return: updated scheme """ scheme = copy.copy(a_scheme) if _value_properties_are_referenced(val): ref_schema_uri = val['properties']['$ref'] sub_schema = load_ref_schema(ref_schema_uri) ## dereference the sub schema sub_schema_copy_level_0 = copy.deepcopy(sub_schema) # nesting level 1 # @TODO: NEEDS REFACTOR for prop_0 in sub_schema_copy_level_0['properties']: val_0 = sub_schema_copy_level_0['properties'][prop_0] # arrays may contain the nesting is_type_array_0 = (val_0['type'] == 'array') is_type_object_0 = (val_0['type'] == 'object') if ((is_type_array_0 or is_type_object_0) and (_value_properties_are_referenced(val_0))): # found a nested thingy sub_schema_copy_level_1 = _load_referenced_schema_from_properties(val_0, sub_schema_copy_level_0, prop_0) ### # one more loop level ### for prop_1 in sub_schema_copy_level_1['properties']: val_1 = sub_schema_copy_level_1['properties'][prop_1] is_type_array_1 = (val_1['type'] == 'array') is_type_object_1 = (val_1['type'] == 'object') if ((is_type_array_1 or is_type_object_1) and (_value_properties_are_referenced(val_1))): ### need to figure out a better way for loading # the referenced stuff # found a nested thingy # sub_schema_copy_level_2 = _load_referenced_schema_from_properties(val_1, sub_schema_copy_level_1, prop_1) raise SalesKingException("too much nesting in the schemes") if _value_is_required(val_0): # remove required sub_schema_copy_level_0['properties'][prop_0]['required'] = False # hack to bypass text format valitation to string if _value_is_type_text(val_0): log.debug("patched text to string") sub_schema_copy_level_0['properties'][prop_0]['type'] = u"string" # outer scheme scheme['properties'][a_property]['properties'] = sub_schema_copy_level_0['properties'] return scheme
python
def _load_referenced_schema_from_properties(val, a_scheme, a_property): """ :return: updated scheme """ scheme = copy.copy(a_scheme) if _value_properties_are_referenced(val): ref_schema_uri = val['properties']['$ref'] sub_schema = load_ref_schema(ref_schema_uri) ## dereference the sub schema sub_schema_copy_level_0 = copy.deepcopy(sub_schema) # nesting level 1 # @TODO: NEEDS REFACTOR for prop_0 in sub_schema_copy_level_0['properties']: val_0 = sub_schema_copy_level_0['properties'][prop_0] # arrays may contain the nesting is_type_array_0 = (val_0['type'] == 'array') is_type_object_0 = (val_0['type'] == 'object') if ((is_type_array_0 or is_type_object_0) and (_value_properties_are_referenced(val_0))): # found a nested thingy sub_schema_copy_level_1 = _load_referenced_schema_from_properties(val_0, sub_schema_copy_level_0, prop_0) ### # one more loop level ### for prop_1 in sub_schema_copy_level_1['properties']: val_1 = sub_schema_copy_level_1['properties'][prop_1] is_type_array_1 = (val_1['type'] == 'array') is_type_object_1 = (val_1['type'] == 'object') if ((is_type_array_1 or is_type_object_1) and (_value_properties_are_referenced(val_1))): ### need to figure out a better way for loading # the referenced stuff # found a nested thingy # sub_schema_copy_level_2 = _load_referenced_schema_from_properties(val_1, sub_schema_copy_level_1, prop_1) raise SalesKingException("too much nesting in the schemes") if _value_is_required(val_0): # remove required sub_schema_copy_level_0['properties'][prop_0]['required'] = False # hack to bypass text format valitation to string if _value_is_type_text(val_0): log.debug("patched text to string") sub_schema_copy_level_0['properties'][prop_0]['type'] = u"string" # outer scheme scheme['properties'][a_property]['properties'] = sub_schema_copy_level_0['properties'] return scheme
[ "def", "_load_referenced_schema_from_properties", "(", "val", ",", "a_scheme", ",", "a_property", ")", ":", "scheme", "=", "copy", ".", "copy", "(", "a_scheme", ")", "if", "_value_properties_are_referenced", "(", "val", ")", ":", "ref_schema_uri", "=", "val", "[...
:return: updated scheme
[ ":", "return", ":", "updated", "scheme" ]
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/utils/loaders.py#L106-L156
salesking/salesking_python_sdk
salesking/utils/loaders.py
import_schema_to_json
def import_schema_to_json(name, store_it=False): """ loads the given schema name from the local filesystem and puts it into a store if it is not in there yet :param name: :param store_it: if set to True, stores the contents :return: """ schema_file = u"%s.json" % name file_path = os.path.join(SCHEMA_ROOT, schema_file) log.debug(u"trying to load %s " % file_path) schema = None try: schema_file = open(file_path, "r").read() except IOError, e: log.error(u"file not found %s" % e) msg = "Could not find schema file. %s" % file_path raise SalesKingException("SCHEMA_NOT_FOUND", msg) schema = json.loads(schema_file) if schema is None: msg = "loading failed foo %s" % name raise SalesKingException("SCHEMA_NOT_FOUND", msg) return schema
python
def import_schema_to_json(name, store_it=False): """ loads the given schema name from the local filesystem and puts it into a store if it is not in there yet :param name: :param store_it: if set to True, stores the contents :return: """ schema_file = u"%s.json" % name file_path = os.path.join(SCHEMA_ROOT, schema_file) log.debug(u"trying to load %s " % file_path) schema = None try: schema_file = open(file_path, "r").read() except IOError, e: log.error(u"file not found %s" % e) msg = "Could not find schema file. %s" % file_path raise SalesKingException("SCHEMA_NOT_FOUND", msg) schema = json.loads(schema_file) if schema is None: msg = "loading failed foo %s" % name raise SalesKingException("SCHEMA_NOT_FOUND", msg) return schema
[ "def", "import_schema_to_json", "(", "name", ",", "store_it", "=", "False", ")", ":", "schema_file", "=", "u\"%s.json\"", "%", "name", "file_path", "=", "os", ".", "path", ".", "join", "(", "SCHEMA_ROOT", ",", "schema_file", ")", "log", ".", "debug", "(", ...
loads the given schema name from the local filesystem and puts it into a store if it is not in there yet :param name: :param store_it: if set to True, stores the contents :return:
[ "loads", "the", "given", "schema", "name", "from", "the", "local", "filesystem", "and", "puts", "it", "into", "a", "store", "if", "it", "is", "not", "in", "there", "yet", ":", "param", "name", ":", ":", "param", "store_it", ":", "if", "set", "to", "T...
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/utils/loaders.py#L159-L185
salesking/salesking_python_sdk
salesking/utils/loaders.py
load_schema
def load_schema(name): """ loads the schema by name :param name name of the model """ schema = import_schema_to_json(name) #salesking specific swap #//set link relation as key name to make it easier to call these for item in schema['links']: #//set link relation as key name to make it easier to call these # foreach($schema->links as $key => $link) # { # $schema->links[$link->rel] = $link; # unset($schema->links[$key]); # } # this here seems not to work as expected # something is wrong href_value = item['href'] rel_value = item['rel'] schema[rel_value] = href_value del item ## sk use nesting of schema ## dynamically loading for prop in schema['properties']: value = schema['properties'][prop] # arrays may contain the nesting is_type_array = (value['type'] == 'array') is_type_object = (value['type'] == 'object') if ((is_type_array or is_type_object) and (_value_properties_are_referenced(value))): schema = _load_referenced_schema_from_properties(value, schema, prop) if is_type_array and _value_is_default_any(value) and _value_has_items_key(value): schema = _load_referenced_schemes_from_list(value['items'], value, schema, prop) if _value_is_required(value): # remove required schema['properties'][prop]['required'] = False # hack to bypass text format valitation to string if _value_is_type_text(value): log.debug("patched text to string") schema['properties'][prop]['type'] = u"string" #ignore the readonly properties auto validation #if 'readonly' in value.keys() and value['readonly'] == True: # log.debug("patched required validation to none required") # schema['properties'][property]['readonly'] = False # sk works on title and not name schema['name'] = schema['title'] ## go one level deeper as we now have some replacements # put it to storage when done # if not JsonSchemaStore.is_stored(name) and (schema is not None): # JsonSchemaStore.copy_to_store(name, schema) return schema
python
def load_schema(name): """ loads the schema by name :param name name of the model """ schema = import_schema_to_json(name) #salesking specific swap #//set link relation as key name to make it easier to call these for item in schema['links']: #//set link relation as key name to make it easier to call these # foreach($schema->links as $key => $link) # { # $schema->links[$link->rel] = $link; # unset($schema->links[$key]); # } # this here seems not to work as expected # something is wrong href_value = item['href'] rel_value = item['rel'] schema[rel_value] = href_value del item ## sk use nesting of schema ## dynamically loading for prop in schema['properties']: value = schema['properties'][prop] # arrays may contain the nesting is_type_array = (value['type'] == 'array') is_type_object = (value['type'] == 'object') if ((is_type_array or is_type_object) and (_value_properties_are_referenced(value))): schema = _load_referenced_schema_from_properties(value, schema, prop) if is_type_array and _value_is_default_any(value) and _value_has_items_key(value): schema = _load_referenced_schemes_from_list(value['items'], value, schema, prop) if _value_is_required(value): # remove required schema['properties'][prop]['required'] = False # hack to bypass text format valitation to string if _value_is_type_text(value): log.debug("patched text to string") schema['properties'][prop]['type'] = u"string" #ignore the readonly properties auto validation #if 'readonly' in value.keys() and value['readonly'] == True: # log.debug("patched required validation to none required") # schema['properties'][property]['readonly'] = False # sk works on title and not name schema['name'] = schema['title'] ## go one level deeper as we now have some replacements # put it to storage when done # if not JsonSchemaStore.is_stored(name) and (schema is not None): # JsonSchemaStore.copy_to_store(name, schema) return schema
[ "def", "load_schema", "(", "name", ")", ":", "schema", "=", "import_schema_to_json", "(", "name", ")", "#salesking specific swap", "#//set link relation as key name to make it easier to call these", "for", "item", "in", "schema", "[", "'links'", "]", ":", "#//set link rel...
loads the schema by name :param name name of the model
[ "loads", "the", "schema", "by", "name", ":", "param", "name", "name", "of", "the", "model" ]
train
https://github.com/salesking/salesking_python_sdk/blob/0d5a95c5ee4e16a85562ceaf67bb11b55e47ee4c/salesking/utils/loaders.py#L200-L259
coded-by-hand/mass
env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/commands/zip.py
ZipCommand.paths
def paths(self): """All the entries of sys.path, possibly restricted by --path""" if not self.select_paths: return sys.path result = [] match_any = set() for path in sys.path: path = os.path.normcase(os.path.abspath(path)) for match in self.select_paths: match = os.path.normcase(os.path.abspath(match)) if '*' in match: if re.search(fnmatch.translate(match+'*'), path): result.append(path) match_any.add(match) break else: if path.startswith(match): result.append(path) match_any.add(match) break else: logger.debug("Skipping path %s because it doesn't match %s" % (path, ', '.join(self.select_paths))) for match in self.select_paths: if match not in match_any and '*' not in match: result.append(match) logger.debug("Adding path %s because it doesn't match anything already on sys.path" % match) return result
python
def paths(self): """All the entries of sys.path, possibly restricted by --path""" if not self.select_paths: return sys.path result = [] match_any = set() for path in sys.path: path = os.path.normcase(os.path.abspath(path)) for match in self.select_paths: match = os.path.normcase(os.path.abspath(match)) if '*' in match: if re.search(fnmatch.translate(match+'*'), path): result.append(path) match_any.add(match) break else: if path.startswith(match): result.append(path) match_any.add(match) break else: logger.debug("Skipping path %s because it doesn't match %s" % (path, ', '.join(self.select_paths))) for match in self.select_paths: if match not in match_any and '*' not in match: result.append(match) logger.debug("Adding path %s because it doesn't match anything already on sys.path" % match) return result
[ "def", "paths", "(", "self", ")", ":", "if", "not", "self", ".", "select_paths", ":", "return", "sys", ".", "path", "result", "=", "[", "]", "match_any", "=", "set", "(", ")", "for", "path", "in", "sys", ".", "path", ":", "path", "=", "os", ".", ...
All the entries of sys.path, possibly restricted by --path
[ "All", "the", "entries", "of", "sys", ".", "path", "possibly", "restricted", "by", "--", "path" ]
train
https://github.com/coded-by-hand/mass/blob/59005479efed3cd8598a8f0c66791a4482071899/env/lib/python2.7/site-packages/pip-1.0.2-py2.7.egg/pip/commands/zip.py#L58-L86
eReuse/utils
ereuse_utils/usb_flash_drive.py
plugged_usbs
def plugged_usbs(multiple=True) -> map or dict: """ Gets the plugged-in USB Flash drives (pen-drives). If multiple is true, it returns a map, and a dict otherwise. If multiple is false, this method will raise a :class:`.NoUSBFound` if no USB is found. """ class FindPenDrives(object): # From https://github.com/pyusb/pyusb/blob/master/docs/tutorial.rst def __init__(self, class_): self._class = class_ def __call__(self, device): # first, let's check the device if device.bDeviceClass == self._class: return True # ok, transverse all devices to find an # interface that matches our class for cfg in device: # find_descriptor: what's it? intf = usb.util.find_descriptor(cfg, bInterfaceClass=self._class) if intf is not None: try: product = intf.device.product.lower() except ValueError as e: if 'langid' in str(e): raise OSError('Cannot get "langid". Do you have permissions?') else: raise e if 'crw' not in product and 'reader' not in product: return True return False def get_pendrive(pen: usb.Device) -> dict: manufacturer = pen.manufacturer.strip() or str(pen.idVendor) model = pen.product.strip() or str(pen.idProduct) serial_number = pen.serial_number.strip() hid = Naming.hid(manufacturer, serial_number, model) return { '_id': hid, # Make live easier to DeviceHubClient by using _id 'hid': hid, '@type': 'USBFlashDrive', 'serialNumber': serial_number, 'model': model, 'manufacturer': manufacturer, 'vendorId': pen.idVendor, 'productId': pen.idProduct } result = usb.core.find(find_all=multiple, custom_match=FindPenDrives(CLASS_MASS_STORAGE)) if multiple: return map(get_pendrive, result) else: if not result: raise NoUSBFound() return get_pendrive(result)
python
def plugged_usbs(multiple=True) -> map or dict: """ Gets the plugged-in USB Flash drives (pen-drives). If multiple is true, it returns a map, and a dict otherwise. If multiple is false, this method will raise a :class:`.NoUSBFound` if no USB is found. """ class FindPenDrives(object): # From https://github.com/pyusb/pyusb/blob/master/docs/tutorial.rst def __init__(self, class_): self._class = class_ def __call__(self, device): # first, let's check the device if device.bDeviceClass == self._class: return True # ok, transverse all devices to find an # interface that matches our class for cfg in device: # find_descriptor: what's it? intf = usb.util.find_descriptor(cfg, bInterfaceClass=self._class) if intf is not None: try: product = intf.device.product.lower() except ValueError as e: if 'langid' in str(e): raise OSError('Cannot get "langid". Do you have permissions?') else: raise e if 'crw' not in product and 'reader' not in product: return True return False def get_pendrive(pen: usb.Device) -> dict: manufacturer = pen.manufacturer.strip() or str(pen.idVendor) model = pen.product.strip() or str(pen.idProduct) serial_number = pen.serial_number.strip() hid = Naming.hid(manufacturer, serial_number, model) return { '_id': hid, # Make live easier to DeviceHubClient by using _id 'hid': hid, '@type': 'USBFlashDrive', 'serialNumber': serial_number, 'model': model, 'manufacturer': manufacturer, 'vendorId': pen.idVendor, 'productId': pen.idProduct } result = usb.core.find(find_all=multiple, custom_match=FindPenDrives(CLASS_MASS_STORAGE)) if multiple: return map(get_pendrive, result) else: if not result: raise NoUSBFound() return get_pendrive(result)
[ "def", "plugged_usbs", "(", "multiple", "=", "True", ")", "->", "map", "or", "dict", ":", "class", "FindPenDrives", "(", "object", ")", ":", "# From https://github.com/pyusb/pyusb/blob/master/docs/tutorial.rst", "def", "__init__", "(", "self", ",", "class_", ")", ...
Gets the plugged-in USB Flash drives (pen-drives). If multiple is true, it returns a map, and a dict otherwise. If multiple is false, this method will raise a :class:`.NoUSBFound` if no USB is found.
[ "Gets", "the", "plugged", "-", "in", "USB", "Flash", "drives", "(", "pen", "-", "drives", ")", "." ]
train
https://github.com/eReuse/utils/blob/989062e85095ea4e1204523fe0e298cf1046a01c/ereuse_utils/usb_flash_drive.py#L8-L65
duniter/duniter-python-api
duniterpy/documents/transaction.py
reduce_base
def reduce_base(amount: int, base: int) -> tuple: """ Compute the reduced base of the given parameters :param amount: the amount value :param base: current base value :return: tuple containing computed (amount, base) """ if amount == 0: return 0, 0 next_amount = amount next_base = base next_amount_is_integer = True while next_amount_is_integer: amount = next_amount base = next_base if next_amount % 10 == 0: next_amount = int(next_amount / 10) next_base += 1 else: next_amount_is_integer = False return int(amount), int(base)
python
def reduce_base(amount: int, base: int) -> tuple: """ Compute the reduced base of the given parameters :param amount: the amount value :param base: current base value :return: tuple containing computed (amount, base) """ if amount == 0: return 0, 0 next_amount = amount next_base = base next_amount_is_integer = True while next_amount_is_integer: amount = next_amount base = next_base if next_amount % 10 == 0: next_amount = int(next_amount / 10) next_base += 1 else: next_amount_is_integer = False return int(amount), int(base)
[ "def", "reduce_base", "(", "amount", ":", "int", ",", "base", ":", "int", ")", "->", "tuple", ":", "if", "amount", "==", "0", ":", "return", "0", ",", "0", "next_amount", "=", "amount", "next_base", "=", "base", "next_amount_is_integer", "=", "True", "...
Compute the reduced base of the given parameters :param amount: the amount value :param base: current base value :return: tuple containing computed (amount, base)
[ "Compute", "the", "reduced", "base", "of", "the", "given", "parameters" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L13-L37
duniter/duniter-python-api
duniterpy/documents/transaction.py
InputSource.from_inline
def from_inline(cls: Type[InputSourceType], tx_version: int, inline: str) -> InputSourceType: """ Return Transaction instance from inline string format :param tx_version: Version number of the document :param inline: Inline string format :return: """ if tx_version == 2: data = InputSource.re_inline.match(inline) if data is None: raise MalformedDocumentError("Inline input") source_offset = 0 amount = 0 base = 0 else: data = InputSource.re_inline_v3.match(inline) if data is None: raise MalformedDocumentError("Inline input") source_offset = 2 amount = int(data.group(1)) base = int(data.group(2)) if data.group(1 + source_offset): source = data.group(1 + source_offset) origin_id = data.group(2 + source_offset) index = int(data.group(3 + source_offset)) else: source = data.group(4 + source_offset) origin_id = data.group(5 + source_offset) index = int(data.group(6 + source_offset)) return cls(amount, base, source, origin_id, index)
python
def from_inline(cls: Type[InputSourceType], tx_version: int, inline: str) -> InputSourceType: """ Return Transaction instance from inline string format :param tx_version: Version number of the document :param inline: Inline string format :return: """ if tx_version == 2: data = InputSource.re_inline.match(inline) if data is None: raise MalformedDocumentError("Inline input") source_offset = 0 amount = 0 base = 0 else: data = InputSource.re_inline_v3.match(inline) if data is None: raise MalformedDocumentError("Inline input") source_offset = 2 amount = int(data.group(1)) base = int(data.group(2)) if data.group(1 + source_offset): source = data.group(1 + source_offset) origin_id = data.group(2 + source_offset) index = int(data.group(3 + source_offset)) else: source = data.group(4 + source_offset) origin_id = data.group(5 + source_offset) index = int(data.group(6 + source_offset)) return cls(amount, base, source, origin_id, index)
[ "def", "from_inline", "(", "cls", ":", "Type", "[", "InputSourceType", "]", ",", "tx_version", ":", "int", ",", "inline", ":", "str", ")", "->", "InputSourceType", ":", "if", "tx_version", "==", "2", ":", "data", "=", "InputSource", ".", "re_inline", "."...
Return Transaction instance from inline string format :param tx_version: Version number of the document :param inline: Inline string format :return:
[ "Return", "Transaction", "instance", "from", "inline", "string", "format" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L97-L128
duniter/duniter-python-api
duniterpy/documents/transaction.py
InputSource.inline
def inline(self, tx_version: int) -> str: """ Return an inline string format of the document :param tx_version: Version number of the document :return: """ if tx_version == 2: return "{0}:{1}:{2}".format(self.source, self.origin_id, self.index) else: return "{0}:{1}:{2}:{3}:{4}".format(self.amount, self.base, self.source, self.origin_id, self.index)
python
def inline(self, tx_version: int) -> str: """ Return an inline string format of the document :param tx_version: Version number of the document :return: """ if tx_version == 2: return "{0}:{1}:{2}".format(self.source, self.origin_id, self.index) else: return "{0}:{1}:{2}:{3}:{4}".format(self.amount, self.base, self.source, self.origin_id, self.index)
[ "def", "inline", "(", "self", ",", "tx_version", ":", "int", ")", "->", "str", ":", "if", "tx_version", "==", "2", ":", "return", "\"{0}:{1}:{2}\"", ".", "format", "(", "self", ".", "source", ",", "self", ".", "origin_id", ",", "self", ".", "index", ...
Return an inline string format of the document :param tx_version: Version number of the document :return:
[ "Return", "an", "inline", "string", "format", "of", "the", "document" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L130-L146
duniter/duniter-python-api
duniterpy/documents/transaction.py
OutputSource.from_inline
def from_inline(cls: Type[OutputSourceType], inline: str) -> OutputSourceType: """ Return OutputSource instance from inline string format :param inline: Inline string format :return: """ data = OutputSource.re_inline.match(inline) if data is None: raise MalformedDocumentError("Inline output") amount = int(data.group(1)) base = int(data.group(2)) condition_text = data.group(3) return cls(amount, base, condition_text)
python
def from_inline(cls: Type[OutputSourceType], inline: str) -> OutputSourceType: """ Return OutputSource instance from inline string format :param inline: Inline string format :return: """ data = OutputSource.re_inline.match(inline) if data is None: raise MalformedDocumentError("Inline output") amount = int(data.group(1)) base = int(data.group(2)) condition_text = data.group(3) return cls(amount, base, condition_text)
[ "def", "from_inline", "(", "cls", ":", "Type", "[", "OutputSourceType", "]", ",", "inline", ":", "str", ")", "->", "OutputSourceType", ":", "data", "=", "OutputSource", ".", "re_inline", ".", "match", "(", "inline", ")", "if", "data", "is", "None", ":", ...
Return OutputSource instance from inline string format :param inline: Inline string format :return:
[ "Return", "OutputSource", "instance", "from", "inline", "string", "format" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L185-L199
duniter/duniter-python-api
duniterpy/documents/transaction.py
OutputSource.inline
def inline(self) -> str: """ Return an inline string format of the document :return: """ return "{0}:{1}:{2}".format(self.amount, self.base, pypeg2.compose(self.condition, output.Condition))
python
def inline(self) -> str: """ Return an inline string format of the document :return: """ return "{0}:{1}:{2}".format(self.amount, self.base, pypeg2.compose(self.condition, output.Condition))
[ "def", "inline", "(", "self", ")", "->", "str", ":", "return", "\"{0}:{1}:{2}\"", ".", "format", "(", "self", ".", "amount", ",", "self", ".", "base", ",", "pypeg2", ".", "compose", "(", "self", ".", "condition", ",", "output", ".", "Condition", ")", ...
Return an inline string format of the document :return:
[ "Return", "an", "inline", "string", "format", "of", "the", "document" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L201-L208
duniter/duniter-python-api
duniterpy/documents/transaction.py
OutputSource.condition_from_text
def condition_from_text(text) -> Condition: """ Return a Condition instance with PEG grammar from text :param text: PEG parsable string :return: """ try: condition = pypeg2.parse(text, output.Condition) except SyntaxError: # Invalid conditions are possible, see https://github.com/duniter/duniter/issues/1156 # In such a case, they are store as empty PEG grammar object and considered unlockable condition = Condition(text) return condition
python
def condition_from_text(text) -> Condition: """ Return a Condition instance with PEG grammar from text :param text: PEG parsable string :return: """ try: condition = pypeg2.parse(text, output.Condition) except SyntaxError: # Invalid conditions are possible, see https://github.com/duniter/duniter/issues/1156 # In such a case, they are store as empty PEG grammar object and considered unlockable condition = Condition(text) return condition
[ "def", "condition_from_text", "(", "text", ")", "->", "Condition", ":", "try", ":", "condition", "=", "pypeg2", ".", "parse", "(", "text", ",", "output", ".", "Condition", ")", "except", "SyntaxError", ":", "# Invalid conditions are possible, see https://github.com/...
Return a Condition instance with PEG grammar from text :param text: PEG parsable string :return:
[ "Return", "a", "Condition", "instance", "with", "PEG", "grammar", "from", "text" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L211-L224
duniter/duniter-python-api
duniterpy/documents/transaction.py
SIGParameter.from_parameter
def from_parameter(cls: Type[SIGParameterType], parameter: str) -> Optional[SIGParameterType]: """ Return a SIGParameter instance from an index parameter :param parameter: Index parameter :return: """ sig = SIGParameter.re_sig.match(parameter) if sig: return cls(int(sig.group(1))) return None
python
def from_parameter(cls: Type[SIGParameterType], parameter: str) -> Optional[SIGParameterType]: """ Return a SIGParameter instance from an index parameter :param parameter: Index parameter :return: """ sig = SIGParameter.re_sig.match(parameter) if sig: return cls(int(sig.group(1))) return None
[ "def", "from_parameter", "(", "cls", ":", "Type", "[", "SIGParameterType", "]", ",", "parameter", ":", "str", ")", "->", "Optional", "[", "SIGParameterType", "]", ":", "sig", "=", "SIGParameter", ".", "re_sig", ".", "match", "(", "parameter", ")", "if", ...
Return a SIGParameter instance from an index parameter :param parameter: Index parameter :return:
[ "Return", "a", "SIGParameter", "instance", "from", "an", "index", "parameter" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L246-L258
duniter/duniter-python-api
duniterpy/documents/transaction.py
XHXParameter.from_parameter
def from_parameter(cls: Type[XHXParameterType], parameter: str) -> Optional[XHXParameterType]: """ Return a XHXParameter instance from an index parameter :param parameter: Index parameter :return: """ xhx = XHXParameter.re_xhx.match(parameter) if xhx: return cls(int(xhx.group(1))) return None
python
def from_parameter(cls: Type[XHXParameterType], parameter: str) -> Optional[XHXParameterType]: """ Return a XHXParameter instance from an index parameter :param parameter: Index parameter :return: """ xhx = XHXParameter.re_xhx.match(parameter) if xhx: return cls(int(xhx.group(1))) return None
[ "def", "from_parameter", "(", "cls", ":", "Type", "[", "XHXParameterType", "]", ",", "parameter", ":", "str", ")", "->", "Optional", "[", "XHXParameterType", "]", ":", "xhx", "=", "XHXParameter", ".", "re_xhx", ".", "match", "(", "parameter", ")", "if", ...
Return a XHXParameter instance from an index parameter :param parameter: Index parameter :return:
[ "Return", "a", "XHXParameter", "instance", "from", "an", "index", "parameter" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L288-L300
duniter/duniter-python-api
duniterpy/documents/transaction.py
UnlockParameter.from_parameter
def from_parameter(cls: Type[UnlockParameterType], parameter: str) -> Optional[Union[SIGParameter, XHXParameter]]: """ Return UnlockParameter instance from parameter string :param parameter: Parameter string :return: """ sig_param = SIGParameter.from_parameter(parameter) if sig_param: return sig_param else: xhx_param = XHXParameter.from_parameter(parameter) if xhx_param: return xhx_param return None
python
def from_parameter(cls: Type[UnlockParameterType], parameter: str) -> Optional[Union[SIGParameter, XHXParameter]]: """ Return UnlockParameter instance from parameter string :param parameter: Parameter string :return: """ sig_param = SIGParameter.from_parameter(parameter) if sig_param: return sig_param else: xhx_param = XHXParameter.from_parameter(parameter) if xhx_param: return xhx_param return None
[ "def", "from_parameter", "(", "cls", ":", "Type", "[", "UnlockParameterType", "]", ",", "parameter", ":", "str", ")", "->", "Optional", "[", "Union", "[", "SIGParameter", ",", "XHXParameter", "]", "]", ":", "sig_param", "=", "SIGParameter", ".", "from_parame...
Return UnlockParameter instance from parameter string :param parameter: Parameter string :return:
[ "Return", "UnlockParameter", "instance", "from", "parameter", "string" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L321-L337
duniter/duniter-python-api
duniterpy/documents/transaction.py
Unlock.from_inline
def from_inline(cls: Type[UnlockType], inline: str) -> UnlockType: """ Return an Unlock instance from inline string format :param inline: Inline string format :return: """ data = Unlock.re_inline.match(inline) if data is None: raise MalformedDocumentError("Inline input") index = int(data.group(1)) parameters_str = data.group(2).split(' ') parameters = [] for p in parameters_str: param = UnlockParameter.from_parameter(p) if param: parameters.append(param) return cls(index, parameters)
python
def from_inline(cls: Type[UnlockType], inline: str) -> UnlockType: """ Return an Unlock instance from inline string format :param inline: Inline string format :return: """ data = Unlock.re_inline.match(inline) if data is None: raise MalformedDocumentError("Inline input") index = int(data.group(1)) parameters_str = data.group(2).split(' ') parameters = [] for p in parameters_str: param = UnlockParameter.from_parameter(p) if param: parameters.append(param) return cls(index, parameters)
[ "def", "from_inline", "(", "cls", ":", "Type", "[", "UnlockType", "]", ",", "inline", ":", "str", ")", "->", "UnlockType", ":", "data", "=", "Unlock", ".", "re_inline", ".", "match", "(", "inline", ")", "if", "data", "is", "None", ":", "raise", "Malf...
Return an Unlock instance from inline string format :param inline: Inline string format :return:
[ "Return", "an", "Unlock", "instance", "from", "inline", "string", "format" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L364-L382
duniter/duniter-python-api
duniterpy/documents/transaction.py
Unlock.inline
def inline(self) -> str: """ Return inline string format of the instance :return: """ return "{0}:{1}".format(self.index, ' '.join([str(p) for p in self.parameters]))
python
def inline(self) -> str: """ Return inline string format of the instance :return: """ return "{0}:{1}".format(self.index, ' '.join([str(p) for p in self.parameters]))
[ "def", "inline", "(", "self", ")", "->", "str", ":", "return", "\"{0}:{1}\"", ".", "format", "(", "self", ".", "index", ",", "' '", ".", "join", "(", "[", "str", "(", "p", ")", "for", "p", "in", "self", ".", "parameters", "]", ")", ")" ]
Return inline string format of the instance :return:
[ "Return", "inline", "string", "format", "of", "the", "instance" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L384-L390
duniter/duniter-python-api
duniterpy/documents/transaction.py
Transaction.from_bma_history
def from_bma_history(cls: Type[TransactionType], currency: str, tx_data: Dict) -> TransactionType: """ Get the transaction instance from json :param currency: the currency of the tx :param tx_data: json data of the transaction :return: """ tx_data = tx_data.copy() tx_data["currency"] = currency for data_list in ('issuers', 'outputs', 'inputs', 'unlocks', 'signatures'): tx_data['multiline_{0}'.format(data_list)] = '\n'.join(tx_data[data_list]) if tx_data["version"] >= 3: signed_raw = """Version: {version} Type: Transaction Currency: {currency} Blockstamp: {blockstamp} Locktime: {locktime} Issuers: {multiline_issuers} Inputs: {multiline_inputs} Unlocks: {multiline_unlocks} Outputs: {multiline_outputs} Comment: {comment} {multiline_signatures} """.format(**tx_data) else: signed_raw = """Version: {version} Type: Transaction Currency: {currency} Locktime: {locktime} Issuers: {multiline_issuers} Inputs: {multiline_inputs} Unlocks: {multiline_unlocks} Outputs: {multiline_outputs} Comment: {comment} {multiline_signatures} """.format(**tx_data) return cls.from_signed_raw(signed_raw)
python
def from_bma_history(cls: Type[TransactionType], currency: str, tx_data: Dict) -> TransactionType: """ Get the transaction instance from json :param currency: the currency of the tx :param tx_data: json data of the transaction :return: """ tx_data = tx_data.copy() tx_data["currency"] = currency for data_list in ('issuers', 'outputs', 'inputs', 'unlocks', 'signatures'): tx_data['multiline_{0}'.format(data_list)] = '\n'.join(tx_data[data_list]) if tx_data["version"] >= 3: signed_raw = """Version: {version} Type: Transaction Currency: {currency} Blockstamp: {blockstamp} Locktime: {locktime} Issuers: {multiline_issuers} Inputs: {multiline_inputs} Unlocks: {multiline_unlocks} Outputs: {multiline_outputs} Comment: {comment} {multiline_signatures} """.format(**tx_data) else: signed_raw = """Version: {version} Type: Transaction Currency: {currency} Locktime: {locktime} Issuers: {multiline_issuers} Inputs: {multiline_inputs} Unlocks: {multiline_unlocks} Outputs: {multiline_outputs} Comment: {comment} {multiline_signatures} """.format(**tx_data) return cls.from_signed_raw(signed_raw)
[ "def", "from_bma_history", "(", "cls", ":", "Type", "[", "TransactionType", "]", ",", "currency", ":", "str", ",", "tx_data", ":", "Dict", ")", "->", "TransactionType", ":", "tx_data", "=", "tx_data", ".", "copy", "(", ")", "tx_data", "[", "\"currency\"", ...
Get the transaction instance from json :param currency: the currency of the tx :param tx_data: json data of the transaction :return:
[ "Get", "the", "transaction", "instance", "from", "json" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L488-L534
duniter/duniter-python-api
duniterpy/documents/transaction.py
Transaction.from_compact
def from_compact(cls: Type[TransactionType], currency: str, compact: str) -> TransactionType: """ Return Transaction instance from compact string format :param currency: Name of the currency :param compact: Compact format string :return: """ lines = compact.splitlines(True) n = 0 header_data = Transaction.re_header.match(lines[n]) if header_data is None: raise MalformedDocumentError("Compact TX header") version = int(header_data.group(1)) issuers_num = int(header_data.group(2)) inputs_num = int(header_data.group(3)) unlocks_num = int(header_data.group(4)) outputs_num = int(header_data.group(5)) has_comment = int(header_data.group(6)) locktime = int(header_data.group(7)) n += 1 blockstamp = None # type: Optional[BlockUID] if version >= 3: blockstamp = BlockUID.from_str(Transaction.parse_field("CompactBlockstamp", lines[n])) n += 1 issuers = [] inputs = [] unlocks = [] outputs = [] signatures = [] for i in range(0, issuers_num): issuer = Transaction.parse_field("Pubkey", lines[n]) issuers.append(issuer) n += 1 for i in range(0, inputs_num): input_source = InputSource.from_inline(version, lines[n]) inputs.append(input_source) n += 1 for i in range(0, unlocks_num): unlock = Unlock.from_inline(lines[n]) unlocks.append(unlock) n += 1 for i in range(0, outputs_num): output_source = OutputSource.from_inline(lines[n]) outputs.append(output_source) n += 1 comment = "" if has_comment == 1: data = Transaction.re_compact_comment.match(lines[n]) if data: comment = data.group(1) n += 1 else: raise MalformedDocumentError("Compact TX Comment") while n < len(lines): data = Transaction.re_signature.match(lines[n]) if data: signatures.append(data.group(1)) n += 1 else: raise MalformedDocumentError("Compact TX Signatures") return cls(version, currency, blockstamp, locktime, issuers, inputs, unlocks, outputs, comment, signatures)
python
def from_compact(cls: Type[TransactionType], currency: str, compact: str) -> TransactionType: """ Return Transaction instance from compact string format :param currency: Name of the currency :param compact: Compact format string :return: """ lines = compact.splitlines(True) n = 0 header_data = Transaction.re_header.match(lines[n]) if header_data is None: raise MalformedDocumentError("Compact TX header") version = int(header_data.group(1)) issuers_num = int(header_data.group(2)) inputs_num = int(header_data.group(3)) unlocks_num = int(header_data.group(4)) outputs_num = int(header_data.group(5)) has_comment = int(header_data.group(6)) locktime = int(header_data.group(7)) n += 1 blockstamp = None # type: Optional[BlockUID] if version >= 3: blockstamp = BlockUID.from_str(Transaction.parse_field("CompactBlockstamp", lines[n])) n += 1 issuers = [] inputs = [] unlocks = [] outputs = [] signatures = [] for i in range(0, issuers_num): issuer = Transaction.parse_field("Pubkey", lines[n]) issuers.append(issuer) n += 1 for i in range(0, inputs_num): input_source = InputSource.from_inline(version, lines[n]) inputs.append(input_source) n += 1 for i in range(0, unlocks_num): unlock = Unlock.from_inline(lines[n]) unlocks.append(unlock) n += 1 for i in range(0, outputs_num): output_source = OutputSource.from_inline(lines[n]) outputs.append(output_source) n += 1 comment = "" if has_comment == 1: data = Transaction.re_compact_comment.match(lines[n]) if data: comment = data.group(1) n += 1 else: raise MalformedDocumentError("Compact TX Comment") while n < len(lines): data = Transaction.re_signature.match(lines[n]) if data: signatures.append(data.group(1)) n += 1 else: raise MalformedDocumentError("Compact TX Signatures") return cls(version, currency, blockstamp, locktime, issuers, inputs, unlocks, outputs, comment, signatures)
[ "def", "from_compact", "(", "cls", ":", "Type", "[", "TransactionType", "]", ",", "currency", ":", "str", ",", "compact", ":", "str", ")", "->", "TransactionType", ":", "lines", "=", "compact", ".", "splitlines", "(", "True", ")", "n", "=", "0", "heade...
Return Transaction instance from compact string format :param currency: Name of the currency :param compact: Compact format string :return:
[ "Return", "Transaction", "instance", "from", "compact", "string", "format" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L537-L607
duniter/duniter-python-api
duniterpy/documents/transaction.py
Transaction.from_signed_raw
def from_signed_raw(cls: Type[TransactionType], raw: str) -> TransactionType: """ Return a Transaction instance from a raw string format :param raw: Raw string format :return: """ lines = raw.splitlines(True) n = 0 version = int(Transaction.parse_field("Version", lines[n])) n += 1 Transaction.parse_field("Type", lines[n]) n += 1 currency = Transaction.parse_field("Currency", lines[n]) n += 1 blockstamp = None # type: Optional[BlockUID] if version >= 3: blockstamp = BlockUID.from_str(Transaction.parse_field("Blockstamp", lines[n])) n += 1 locktime = Transaction.parse_field("Locktime", lines[n]) n += 1 issuers = [] inputs = [] unlocks = [] outputs = [] signatures = [] if Transaction.re_issuers.match(lines[n]): n += 1 while Transaction.re_inputs.match(lines[n]) is None: issuer = Transaction.parse_field("Pubkey", lines[n]) issuers.append(issuer) n += 1 if Transaction.re_inputs.match(lines[n]): n += 1 while Transaction.re_unlocks.match(lines[n]) is None: input_source = InputSource.from_inline(version, lines[n]) inputs.append(input_source) n += 1 if Transaction.re_unlocks.match(lines[n]): n += 1 while Transaction.re_outputs.match(lines[n]) is None: unlock = Unlock.from_inline(lines[n]) unlocks.append(unlock) n += 1 if Transaction.re_outputs.match(lines[n]) is not None: n += 1 while not Transaction.re_comment.match(lines[n]): _output = OutputSource.from_inline(lines[n]) outputs.append(_output) n += 1 comment = Transaction.parse_field("Comment", lines[n]) n += 1 if Transaction.re_signature.match(lines[n]) is not None: while n < len(lines): sign = Transaction.parse_field("Signature", lines[n]) signatures.append(sign) n += 1 return cls(version, currency, blockstamp, locktime, issuers, inputs, unlocks, outputs, comment, signatures)
python
def from_signed_raw(cls: Type[TransactionType], raw: str) -> TransactionType: """ Return a Transaction instance from a raw string format :param raw: Raw string format :return: """ lines = raw.splitlines(True) n = 0 version = int(Transaction.parse_field("Version", lines[n])) n += 1 Transaction.parse_field("Type", lines[n]) n += 1 currency = Transaction.parse_field("Currency", lines[n]) n += 1 blockstamp = None # type: Optional[BlockUID] if version >= 3: blockstamp = BlockUID.from_str(Transaction.parse_field("Blockstamp", lines[n])) n += 1 locktime = Transaction.parse_field("Locktime", lines[n]) n += 1 issuers = [] inputs = [] unlocks = [] outputs = [] signatures = [] if Transaction.re_issuers.match(lines[n]): n += 1 while Transaction.re_inputs.match(lines[n]) is None: issuer = Transaction.parse_field("Pubkey", lines[n]) issuers.append(issuer) n += 1 if Transaction.re_inputs.match(lines[n]): n += 1 while Transaction.re_unlocks.match(lines[n]) is None: input_source = InputSource.from_inline(version, lines[n]) inputs.append(input_source) n += 1 if Transaction.re_unlocks.match(lines[n]): n += 1 while Transaction.re_outputs.match(lines[n]) is None: unlock = Unlock.from_inline(lines[n]) unlocks.append(unlock) n += 1 if Transaction.re_outputs.match(lines[n]) is not None: n += 1 while not Transaction.re_comment.match(lines[n]): _output = OutputSource.from_inline(lines[n]) outputs.append(_output) n += 1 comment = Transaction.parse_field("Comment", lines[n]) n += 1 if Transaction.re_signature.match(lines[n]) is not None: while n < len(lines): sign = Transaction.parse_field("Signature", lines[n]) signatures.append(sign) n += 1 return cls(version, currency, blockstamp, locktime, issuers, inputs, unlocks, outputs, comment, signatures)
[ "def", "from_signed_raw", "(", "cls", ":", "Type", "[", "TransactionType", "]", ",", "raw", ":", "str", ")", "->", "TransactionType", ":", "lines", "=", "raw", ".", "splitlines", "(", "True", ")", "n", "=", "0", "version", "=", "int", "(", "Transaction...
Return a Transaction instance from a raw string format :param raw: Raw string format :return:
[ "Return", "a", "Transaction", "instance", "from", "a", "raw", "string", "format" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L610-L682
duniter/duniter-python-api
duniterpy/documents/transaction.py
Transaction.raw
def raw(self) -> str: """ Return raw string format from the instance :return: """ doc = """Version: {0} Type: Transaction Currency: {1} """.format(self.version, self.currency) if self.version >= 3: doc += "Blockstamp: {0}\n".format(self.blockstamp) doc += "Locktime: {0}\n".format(self.locktime) doc += "Issuers:\n" for p in self.issuers: doc += "{0}\n".format(p) doc += "Inputs:\n" for i in self.inputs: doc += "{0}\n".format(i.inline(self.version)) doc += "Unlocks:\n" for u in self.unlocks: doc += "{0}\n".format(u.inline()) doc += "Outputs:\n" for o in self.outputs: doc += "{0}\n".format(o.inline()) doc += "Comment: " doc += "{0}\n".format(self.comment) return doc
python
def raw(self) -> str: """ Return raw string format from the instance :return: """ doc = """Version: {0} Type: Transaction Currency: {1} """.format(self.version, self.currency) if self.version >= 3: doc += "Blockstamp: {0}\n".format(self.blockstamp) doc += "Locktime: {0}\n".format(self.locktime) doc += "Issuers:\n" for p in self.issuers: doc += "{0}\n".format(p) doc += "Inputs:\n" for i in self.inputs: doc += "{0}\n".format(i.inline(self.version)) doc += "Unlocks:\n" for u in self.unlocks: doc += "{0}\n".format(u.inline()) doc += "Outputs:\n" for o in self.outputs: doc += "{0}\n".format(o.inline()) doc += "Comment: " doc += "{0}\n".format(self.comment) return doc
[ "def", "raw", "(", "self", ")", "->", "str", ":", "doc", "=", "\"\"\"Version: {0}\nType: Transaction\nCurrency: {1}\n\"\"\"", ".", "format", "(", "self", ".", "version", ",", "self", ".", "currency", ")", "if", "self", ".", "version", ">=", "3", ":", "doc", ...
Return raw string format from the instance :return:
[ "Return", "raw", "string", "format", "from", "the", "instance" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L684-L720
duniter/duniter-python-api
duniterpy/documents/transaction.py
Transaction.compact
def compact(self) -> str: """ Return a transaction in its compact format from the instance :return: """ """TX:VERSION:NB_ISSUERS:NB_INPUTS:NB_UNLOCKS:NB_OUTPUTS:HAS_COMMENT:LOCKTIME PUBLIC_KEY:INDEX ... INDEX:SOURCE:FINGERPRINT:AMOUNT ... PUBLIC_KEY:AMOUNT ... COMMENT """ doc = "TX:{0}:{1}:{2}:{3}:{4}:{5}:{6}\n".format(self.version, len(self.issuers), len(self.inputs), len(self.unlocks), len(self.outputs), '1' if self.comment != "" else '0', self.locktime) if self.version >= 3: doc += "{0}\n".format(self.blockstamp) for pubkey in self.issuers: doc += "{0}\n".format(pubkey) for i in self.inputs: doc += "{0}\n".format(i.inline(self.version)) for u in self.unlocks: doc += "{0}\n".format(u.inline()) for o in self.outputs: doc += "{0}\n".format(o.inline()) if self.comment != "": doc += "{0}\n".format(self.comment) for s in self.signatures: doc += "{0}\n".format(s) return doc
python
def compact(self) -> str: """ Return a transaction in its compact format from the instance :return: """ """TX:VERSION:NB_ISSUERS:NB_INPUTS:NB_UNLOCKS:NB_OUTPUTS:HAS_COMMENT:LOCKTIME PUBLIC_KEY:INDEX ... INDEX:SOURCE:FINGERPRINT:AMOUNT ... PUBLIC_KEY:AMOUNT ... COMMENT """ doc = "TX:{0}:{1}:{2}:{3}:{4}:{5}:{6}\n".format(self.version, len(self.issuers), len(self.inputs), len(self.unlocks), len(self.outputs), '1' if self.comment != "" else '0', self.locktime) if self.version >= 3: doc += "{0}\n".format(self.blockstamp) for pubkey in self.issuers: doc += "{0}\n".format(pubkey) for i in self.inputs: doc += "{0}\n".format(i.inline(self.version)) for u in self.unlocks: doc += "{0}\n".format(u.inline()) for o in self.outputs: doc += "{0}\n".format(o.inline()) if self.comment != "": doc += "{0}\n".format(self.comment) for s in self.signatures: doc += "{0}\n".format(s) return doc
[ "def", "compact", "(", "self", ")", "->", "str", ":", "\"\"\"TX:VERSION:NB_ISSUERS:NB_INPUTS:NB_UNLOCKS:NB_OUTPUTS:HAS_COMMENT:LOCKTIME\nPUBLIC_KEY:INDEX\n...\nINDEX:SOURCE:FINGERPRINT:AMOUNT\n...\nPUBLIC_KEY:AMOUNT\n...\nCOMMENT\n\"\"\"", "doc", "=", "\"TX:{0}:{1}:{2}:{3}:{4}:{5}:{6}\\n\"", ...
Return a transaction in its compact format from the instance :return:
[ "Return", "a", "transaction", "in", "its", "compact", "format", "from", "the", "instance" ]
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L722-L760
duniter/duniter-python-api
duniterpy/documents/transaction.py
SimpleTransaction.is_simple
def is_simple(tx: Transaction) -> bool: """ Filter a transaction and checks if it is a basic one A simple transaction is a tx which has only one issuer and two outputs maximum. The unlocks must be done with simple "SIG" functions, and the outputs must be simple SIG conditions. :param tx: the transaction to check :return: True if a simple transaction """ simple = True if len(tx.issuers) != 1: simple = False for unlock in tx.unlocks: if len(unlock.parameters) != 1: simple = False elif type(unlock.parameters[0]) is not SIGParameter: simple = False for o in tx.outputs: # if right condition is not None... if getattr(o.condition, 'right', None): simple = False # if left is not SIG... elif type(o.condition.left) is not output.SIG: simple = False return simple
python
def is_simple(tx: Transaction) -> bool: """ Filter a transaction and checks if it is a basic one A simple transaction is a tx which has only one issuer and two outputs maximum. The unlocks must be done with simple "SIG" functions, and the outputs must be simple SIG conditions. :param tx: the transaction to check :return: True if a simple transaction """ simple = True if len(tx.issuers) != 1: simple = False for unlock in tx.unlocks: if len(unlock.parameters) != 1: simple = False elif type(unlock.parameters[0]) is not SIGParameter: simple = False for o in tx.outputs: # if right condition is not None... if getattr(o.condition, 'right', None): simple = False # if left is not SIG... elif type(o.condition.left) is not output.SIG: simple = False return simple
[ "def", "is_simple", "(", "tx", ":", "Transaction", ")", "->", "bool", ":", "simple", "=", "True", "if", "len", "(", "tx", ".", "issuers", ")", "!=", "1", ":", "simple", "=", "False", "for", "unlock", "in", "tx", ".", "unlocks", ":", "if", "len", ...
Filter a transaction and checks if it is a basic one A simple transaction is a tx which has only one issuer and two outputs maximum. The unlocks must be done with simple "SIG" functions, and the outputs must be simple SIG conditions. :param tx: the transaction to check :return: True if a simple transaction
[ "Filter", "a", "transaction", "and", "checks", "if", "it", "is", "a", "basic", "one", "A", "simple", "transaction", "is", "a", "tx", "which", "has", "only", "one", "issuer", "and", "two", "outputs", "maximum", ".", "The", "unlocks", "must", "be", "done",...
train
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/documents/transaction.py#L790-L818
adammhaile/gitdata
gitdata/exectools.py
Exec.retry
def retry(self, retries, task_f, check_f=bool, wait_f=None): """ Try a function up to n times. Raise an exception if it does not pass in time :param retries int: The number of times to retry :param task_f func: The function to be run and observed :param func()bool check_f: a function to check if task_f is complete :param func()bool wait_f: a function to run between checks """ for attempt in range(retries): ret = task_f() if check_f(ret): return ret if attempt < retries - 1 and wait_f is not None: wait_f(attempt) raise RetryException("Giving up after {} failed attempt(s)".format(retries))
python
def retry(self, retries, task_f, check_f=bool, wait_f=None): """ Try a function up to n times. Raise an exception if it does not pass in time :param retries int: The number of times to retry :param task_f func: The function to be run and observed :param func()bool check_f: a function to check if task_f is complete :param func()bool wait_f: a function to run between checks """ for attempt in range(retries): ret = task_f() if check_f(ret): return ret if attempt < retries - 1 and wait_f is not None: wait_f(attempt) raise RetryException("Giving up after {} failed attempt(s)".format(retries))
[ "def", "retry", "(", "self", ",", "retries", ",", "task_f", ",", "check_f", "=", "bool", ",", "wait_f", "=", "None", ")", ":", "for", "attempt", "in", "range", "(", "retries", ")", ":", "ret", "=", "task_f", "(", ")", "if", "check_f", "(", "ret", ...
Try a function up to n times. Raise an exception if it does not pass in time :param retries int: The number of times to retry :param task_f func: The function to be run and observed :param func()bool check_f: a function to check if task_f is complete :param func()bool wait_f: a function to run between checks
[ "Try", "a", "function", "up", "to", "n", "times", ".", "Raise", "an", "exception", "if", "it", "does", "not", "pass", "in", "time" ]
train
https://github.com/adammhaile/gitdata/blob/93112899737d63855655d438e3027192abd76a37/gitdata/exectools.py#L33-L49
adammhaile/gitdata
gitdata/exectools.py
Exec.check_assert
def check_assert(self, cmd, retries=1, pollrate=60, on_retry=None): """ Run a command, logging (using gather) and raise an exception if the return code of the command indicates failure. Try the command multiple times if requested. :param cmd <string|list>: A shell command :param retries int: The number of times to try before declaring failure :param pollrate int: how long to sleep between tries :param on_retry <string|list>: A shell command to run before retrying a failure :return: (stdout,stderr) if exit code is zero """ for try_num in range(0, retries): if try_num > 0: self.logger.debug( "assert: Failed {} times. Retrying in {} seconds: {}". format(try_num, pollrate, cmd)) time.sleep(pollrate) if on_retry is not None: self.gather(on_retry) # no real use for the result though result, stdout, stderr = self.gather(cmd) if result == SUCCESS: break self.logger.debug("assert: Final result = {} in {} tries.".format(result, try_num)) assertion.success( result, "Error running [{}] {}.\n{}". format(pushd.Dir.getcwd(), cmd, stderr)) return stdout, stderr
python
def check_assert(self, cmd, retries=1, pollrate=60, on_retry=None): """ Run a command, logging (using gather) and raise an exception if the return code of the command indicates failure. Try the command multiple times if requested. :param cmd <string|list>: A shell command :param retries int: The number of times to try before declaring failure :param pollrate int: how long to sleep between tries :param on_retry <string|list>: A shell command to run before retrying a failure :return: (stdout,stderr) if exit code is zero """ for try_num in range(0, retries): if try_num > 0: self.logger.debug( "assert: Failed {} times. Retrying in {} seconds: {}". format(try_num, pollrate, cmd)) time.sleep(pollrate) if on_retry is not None: self.gather(on_retry) # no real use for the result though result, stdout, stderr = self.gather(cmd) if result == SUCCESS: break self.logger.debug("assert: Final result = {} in {} tries.".format(result, try_num)) assertion.success( result, "Error running [{}] {}.\n{}". format(pushd.Dir.getcwd(), cmd, stderr)) return stdout, stderr
[ "def", "check_assert", "(", "self", ",", "cmd", ",", "retries", "=", "1", ",", "pollrate", "=", "60", ",", "on_retry", "=", "None", ")", ":", "for", "try_num", "in", "range", "(", "0", ",", "retries", ")", ":", "if", "try_num", ">", "0", ":", "se...
Run a command, logging (using gather) and raise an exception if the return code of the command indicates failure. Try the command multiple times if requested. :param cmd <string|list>: A shell command :param retries int: The number of times to try before declaring failure :param pollrate int: how long to sleep between tries :param on_retry <string|list>: A shell command to run before retrying a failure :return: (stdout,stderr) if exit code is zero
[ "Run", "a", "command", "logging", "(", "using", "gather", ")", "and", "raise", "an", "exception", "if", "the", "return", "code", "of", "the", "command", "indicates", "failure", ".", "Try", "the", "command", "multiple", "times", "if", "requested", "." ]
train
https://github.com/adammhaile/gitdata/blob/93112899737d63855655d438e3027192abd76a37/gitdata/exectools.py#L51-L84
adammhaile/gitdata
gitdata/exectools.py
Exec.gather
def gather(self, cmd): """ Runs a command and returns rc,stdout,stderr as a tuple. If called while the `Dir` context manager is in effect, guarantees that the process is executed in that directory, even if it is no longer the current directory of the process (i.e. it is thread-safe). :param cmd: The command and arguments to execute :return: (rc,stdout,stderr) """ if not isinstance(cmd, list): cmd_list = shlex.split(cmd) else: cmd_list = cmd cwd = pushd.Dir.getcwd() cmd_info = '[cwd={}]: {}'.format(cwd, cmd_list) self.logger.debug("Executing:gather {}".format(cmd_info)) proc = subprocess.Popen( cmd_list, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() rc = proc.returncode self.logger.debug( "Process {}: exited with: {}\nstdout>>{}<<\nstderr>>{}<<\n". format(cmd_info, rc, out, err)) return rc, out, err
python
def gather(self, cmd): """ Runs a command and returns rc,stdout,stderr as a tuple. If called while the `Dir` context manager is in effect, guarantees that the process is executed in that directory, even if it is no longer the current directory of the process (i.e. it is thread-safe). :param cmd: The command and arguments to execute :return: (rc,stdout,stderr) """ if not isinstance(cmd, list): cmd_list = shlex.split(cmd) else: cmd_list = cmd cwd = pushd.Dir.getcwd() cmd_info = '[cwd={}]: {}'.format(cwd, cmd_list) self.logger.debug("Executing:gather {}".format(cmd_info)) proc = subprocess.Popen( cmd_list, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() rc = proc.returncode self.logger.debug( "Process {}: exited with: {}\nstdout>>{}<<\nstderr>>{}<<\n". format(cmd_info, rc, out, err)) return rc, out, err
[ "def", "gather", "(", "self", ",", "cmd", ")", ":", "if", "not", "isinstance", "(", "cmd", ",", "list", ")", ":", "cmd_list", "=", "shlex", ".", "split", "(", "cmd", ")", "else", ":", "cmd_list", "=", "cmd", "cwd", "=", "pushd", ".", "Dir", ".", ...
Runs a command and returns rc,stdout,stderr as a tuple. If called while the `Dir` context manager is in effect, guarantees that the process is executed in that directory, even if it is no longer the current directory of the process (i.e. it is thread-safe). :param cmd: The command and arguments to execute :return: (rc,stdout,stderr)
[ "Runs", "a", "command", "and", "returns", "rc", "stdout", "stderr", "as", "a", "tuple", "." ]
train
https://github.com/adammhaile/gitdata/blob/93112899737d63855655d438e3027192abd76a37/gitdata/exectools.py#L86-L115
myles/django-jsonfeed
jsonfeed/core.py
JSONFeed.json_serial
def json_serial(obj): """ Custom JSON serializer for objects not serializable by default. """ if isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() raise TypeError('Type {} not serializable.'.format(type(obj)))
python
def json_serial(obj): """ Custom JSON serializer for objects not serializable by default. """ if isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() raise TypeError('Type {} not serializable.'.format(type(obj)))
[ "def", "json_serial", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "(", "datetime", ".", "datetime", ",", "datetime", ".", "date", ")", ")", ":", "return", "obj", ".", "isoformat", "(", ")", "raise", "TypeError", "(", "'Type {} not serializ...
Custom JSON serializer for objects not serializable by default.
[ "Custom", "JSON", "serializer", "for", "objects", "not", "serializable", "by", "default", "." ]
train
https://github.com/myles/django-jsonfeed/blob/6ae77d525c412e7a2ec2b3b5158e81d2728218bd/jsonfeed/core.py#L14-L22
etcher-be/epab
epab/cmd/_next_version.py
next_version
def next_version(ctx: click.Context, _, value): """ Prints next version (according to Gitversion run) then exits """ if not value or ctx.resilient_parsing: return config.QUIET.default = True CTX.repo = epab.utils.Repo() print(epab.utils.get_next_version()) sys.exit(0)
python
def next_version(ctx: click.Context, _, value): """ Prints next version (according to Gitversion run) then exits """ if not value or ctx.resilient_parsing: return config.QUIET.default = True CTX.repo = epab.utils.Repo() print(epab.utils.get_next_version()) sys.exit(0)
[ "def", "next_version", "(", "ctx", ":", "click", ".", "Context", ",", "_", ",", "value", ")", ":", "if", "not", "value", "or", "ctx", ".", "resilient_parsing", ":", "return", "config", ".", "QUIET", ".", "default", "=", "True", "CTX", ".", "repo", "=...
Prints next version (according to Gitversion run) then exits
[ "Prints", "next", "version", "(", "according", "to", "Gitversion", "run", ")", "then", "exits" ]
train
https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/cmd/_next_version.py#L14-L27