nwo
stringlengths
5
106
sha
stringlengths
40
40
path
stringlengths
4
174
language
stringclasses
1 value
identifier
stringlengths
1
140
parameters
stringlengths
0
87.7k
argument_list
stringclasses
1 value
return_statement
stringlengths
0
426k
docstring
stringlengths
0
64.3k
docstring_summary
stringlengths
0
26.3k
docstring_tokens
list
function
stringlengths
18
4.83M
function_tokens
list
url
stringlengths
83
304
conan-io/conan
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
conan/tools/apple/xcodedeps.py
python
XcodeDeps._conf_xconfig_file
(self, dep_name, vars_xconfig_name)
return content_multi
content for conan_poco_x86_release.xcconfig, containing the activation
content for conan_poco_x86_release.xcconfig, containing the activation
[ "content", "for", "conan_poco_x86_release", ".", "xcconfig", "containing", "the", "activation" ]
def _conf_xconfig_file(self, dep_name, vars_xconfig_name): """ content for conan_poco_x86_release.xcconfig, containing the activation """ # TODO: when it's more clear what to do with the sdk, add the condition for it and also # we are not taking into account the version for the sdk because we probably # want to model also the sdk version decoupled of the compiler version # for example XCode 13 is now using sdk=macosx11.3 # related to: https://github.com/conan-io/conan/issues/9608 template = Template(self._conf_xconfig) content_multi = template.render(name=dep_name, vars_filename=vars_xconfig_name) return content_multi
[ "def", "_conf_xconfig_file", "(", "self", ",", "dep_name", ",", "vars_xconfig_name", ")", ":", "# TODO: when it's more clear what to do with the sdk, add the condition for it and also", "# we are not taking into account the version for the sdk because we probably", "# want to model also th...
https://github.com/conan-io/conan/blob/28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8/conan/tools/apple/xcodedeps.py#L127-L138
phantomcyber/playbooks
9e850ecc44cb98c5dde53784744213a1ed5799bd
excessive_account_lockouts_enrichment_and_response.py
python
endpoint_rights_mod
(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs)
return
[]
def endpoint_rights_mod(action=None, success=None, container=None, results=None, handle=None, filtered_artifacts=None, filtered_results=None, custom_function=None, **kwargs): phantom.debug('endpoint_rights_mod() called') template = """| search index=wineventlog (EventCode=4718 OR EventCode=4717) dest={0} | rename user as \"Account Modified\" | table _time, dest, \"Account Modified\", Access_Right, signature""" # parameter list for template variable replacement parameters = [ "artifact:*.cef.destinationAddress", ] phantom.format(container=container, template=template, parameters=parameters, name="endpoint_rights_mod") query_endpoint_rights(container=container) return
[ "def", "endpoint_rights_mod", "(", "action", "=", "None", ",", "success", "=", "None", ",", "container", "=", "None", ",", "results", "=", "None", ",", "handle", "=", "None", ",", "filtered_artifacts", "=", "None", ",", "filtered_results", "=", "None", ","...
https://github.com/phantomcyber/playbooks/blob/9e850ecc44cb98c5dde53784744213a1ed5799bd/excessive_account_lockouts_enrichment_and_response.py#L285-L299
edfungus/Crouton
ada98b3930192938a48909072b45cb84b945f875
clients/python_clients/venv/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py
python
DirectoryLocator.get_distribution_names
(self)
return result
Return all the distribution names known to this locator.
Return all the distribution names known to this locator.
[ "Return", "all", "the", "distribution", "names", "known", "to", "this", "locator", "." ]
def get_distribution_names(self): """ Return all the distribution names known to this locator. """ result = set() for root, dirs, files in os.walk(self.base_dir): for fn in files: if self.should_include(fn, root): fn = os.path.join(root, fn) url = urlunparse(('file', '', pathname2url(os.path.abspath(fn)), '', '', '')) info = self.convert_url_to_download_info(url, None) if info: result.add(info['name']) if not self.recursive: break return result
[ "def", "get_distribution_names", "(", "self", ")", ":", "result", "=", "set", "(", ")", "for", "root", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "self", ".", "base_dir", ")", ":", "for", "fn", "in", "files", ":", "if", "self", ".", ...
https://github.com/edfungus/Crouton/blob/ada98b3930192938a48909072b45cb84b945f875/clients/python_clients/venv/lib/python2.7/site-packages/pip/_vendor/distlib/locators.py#L843-L860
noio/peas
4c2503388644cc5ca58c501e6c773d53a883538b
peas/methods/evolution.py
python
SimplePopulation._birth
(self)
return self.population
Creates a population if there is none, returns current population otherwise.
Creates a population if there is none, returns current population otherwise.
[ "Creates", "a", "population", "if", "there", "is", "none", "returns", "current", "population", "otherwise", "." ]
def _birth(self): """ Creates a population if there is none, returns current population otherwise. """ while len(self.population) < self.popsize: individual = self.geno_factory() self.population.append(individual) return self.population
[ "def", "_birth", "(", "self", ")", ":", "while", "len", "(", "self", ".", "population", ")", "<", "self", ".", "popsize", ":", "individual", "=", "self", ".", "geno_factory", "(", ")", "self", ".", "population", ".", "append", "(", "individual", ")", ...
https://github.com/noio/peas/blob/4c2503388644cc5ca58c501e6c773d53a883538b/peas/methods/evolution.py#L116-L124
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/ex-submodules/casexml/apps/stock/consumption.py
python
span_days
(start, end)
return span.days + span.seconds / 86400.
[]
def span_days(start, end): span = end - start return span.days + span.seconds / 86400.
[ "def", "span_days", "(", "start", ",", "end", ")", ":", "span", "=", "end", "-", "start", "return", "span", ".", "days", "+", "span", ".", "seconds", "/", "86400." ]
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/ex-submodules/casexml/apps/stock/consumption.py#L135-L137
openhatch/oh-mainline
ce29352a034e1223141dcc2f317030bbc3359a51
vendor/packages/twisted/twisted/application/internet.py
python
StreamServerEndpointService.startService
(self)
Start listening on the endpoint, unless L{privilegedStartService} got around to it already.
Start listening on the endpoint, unless L{privilegedStartService} got around to it already.
[ "Start", "listening", "on", "the", "endpoint", "unless", "L", "{", "privilegedStartService", "}", "got", "around", "to", "it", "already", "." ]
def startService(self): """ Start listening on the endpoint, unless L{privilegedStartService} got around to it already. """ service.Service.startService(self) if self._waitingForPort is None: self.privilegedStartService()
[ "def", "startService", "(", "self", ")", ":", "service", ".", "Service", ".", "startService", "(", "self", ")", "if", "self", ".", "_waitingForPort", "is", "None", ":", "self", ".", "privilegedStartService", "(", ")" ]
https://github.com/openhatch/oh-mainline/blob/ce29352a034e1223141dcc2f317030bbc3359a51/vendor/packages/twisted/twisted/application/internet.py#L369-L376
flopp/GpxTrackPoster
d7ff0ba61f6396938efd075d841a7c4a226a6f5d
gpxtrackposter/track_loader.py
python
TrackLoader.load_strava_tracks
(self, strava_config: str)
return self._filter_and_merge_tracks(tracks)
[]
def load_strava_tracks(self, strava_config: str) -> typing.List[Track]: tracks = [] tracks_names = [] if self.cache_dir: self.strava_cache_file = os.path.join(self.cache_dir, strava_config) if os.path.isfile(self.strava_cache_file): with open(self.strava_cache_file, encoding="utf8") as f: strava_cache_data = json.load(f) tracks = [self._strava_cache_to_track(i) for i in strava_cache_data] tracks_names = [track.file_names[0] for track in tracks] with open(strava_config, encoding="utf8") as f: strava_data = json.load(f) filter_type = strava_data.pop("activity_type", None) client = Client() response = client.refresh_access_token(**strava_data) client.access_token = response["access_token"] filter_dict = {"before": datetime.datetime.utcnow()} if tracks: max_time = max(track.start_time() for track in tracks) filter_dict = {"after": max_time - datetime.timedelta(days=2)} for activity in client.get_activities(**filter_dict): # tricky to pass the timezone if str(activity.id) in tracks_names: continue if filter_type and activity.type not in ( [filter_type] if isinstance(filter_type, str) else filter_type ): # pylint: disable=superfluous-parens continue t = Track() t.load_strava(activity) tracks.append(t) self._store_strava_tracks_to_cache(tracks) return self._filter_and_merge_tracks(tracks)
[ "def", "load_strava_tracks", "(", "self", ",", "strava_config", ":", "str", ")", "->", "typing", ".", "List", "[", "Track", "]", ":", "tracks", "=", "[", "]", "tracks_names", "=", "[", "]", "if", "self", ".", "cache_dir", ":", "self", ".", "strava_cach...
https://github.com/flopp/GpxTrackPoster/blob/d7ff0ba61f6396938efd075d841a7c4a226a6f5d/gpxtrackposter/track_loader.py#L122-L155
zhixinwang/frustum-convnet
5b1508d3f2140c3c0dd6dd17b5606b532b7a5ec8
kitti/kitti_util.py
python
inverse_rigid_trans
(Tr)
return inv_Tr
Inverse a rigid body transform matrix (3x4 as [R|t]) [R'|-R't; 0|1]
Inverse a rigid body transform matrix (3x4 as [R|t]) [R'|-R't; 0|1]
[ "Inverse", "a", "rigid", "body", "transform", "matrix", "(", "3x4", "as", "[", "R|t", "]", ")", "[", "R", "|", "-", "R", "t", ";", "0|1", "]" ]
def inverse_rigid_trans(Tr): ''' Inverse a rigid body transform matrix (3x4 as [R|t]) [R'|-R't; 0|1] ''' inv_Tr = np.zeros_like(Tr) # 3x4 inv_Tr[0:3, 0:3] = np.transpose(Tr[0:3, 0:3]) inv_Tr[0:3, 3] = np.dot(-np.transpose(Tr[0:3, 0:3]), Tr[0:3, 3]) return inv_Tr
[ "def", "inverse_rigid_trans", "(", "Tr", ")", ":", "inv_Tr", "=", "np", ".", "zeros_like", "(", "Tr", ")", "# 3x4", "inv_Tr", "[", "0", ":", "3", ",", "0", ":", "3", "]", "=", "np", ".", "transpose", "(", "Tr", "[", "0", ":", "3", ",", "0", "...
https://github.com/zhixinwang/frustum-convnet/blob/5b1508d3f2140c3c0dd6dd17b5606b532b7a5ec8/kitti/kitti_util.py#L275-L282
kovidgoyal/calibre
2b41671370f2a9eb1109b9ae901ccf915f1bd0c8
src/calibre/utils/fonts/utils.py
python
get_font_characteristics
(raw, raw_is_table=False, return_all=False)
return weight, is_italic, is_bold, is_regular, fs_type, panose, width, is_oblique, is_wws, version
Return (weight, is_italic, is_bold, is_regular, fs_type, panose, width, is_oblique, is_wws). These values are taken from the OS/2 table of the font. See http://www.microsoft.com/typography/otspec/os2.htm for details
Return (weight, is_italic, is_bold, is_regular, fs_type, panose, width, is_oblique, is_wws). These values are taken from the OS/2 table of the font. See http://www.microsoft.com/typography/otspec/os2.htm for details
[ "Return", "(", "weight", "is_italic", "is_bold", "is_regular", "fs_type", "panose", "width", "is_oblique", "is_wws", ")", ".", "These", "values", "are", "taken", "from", "the", "OS", "/", "2", "table", "of", "the", "font", ".", "See", "http", ":", "//", ...
def get_font_characteristics(raw, raw_is_table=False, return_all=False): ''' Return (weight, is_italic, is_bold, is_regular, fs_type, panose, width, is_oblique, is_wws). These values are taken from the OS/2 table of the font. See http://www.microsoft.com/typography/otspec/os2.htm for details ''' if raw_is_table: os2_table = raw else: os2_table = get_table(raw, 'os/2')[0] if os2_table is None: raise UnsupportedFont('Not a supported font, has no OS/2 table') common_fields = b'>Hh3H11h' (version, char_width, weight, width, fs_type, subscript_x_size, subscript_y_size, subscript_x_offset, subscript_y_offset, superscript_x_size, superscript_y_size, superscript_x_offset, superscript_y_offset, strikeout_size, strikeout_position, family_class) = struct.unpack_from(common_fields, os2_table) offset = struct.calcsize(common_fields) panose = struct.unpack_from(b'>10B', os2_table, offset) offset += 10 (range1, range2, range3, range4) = struct.unpack_from(b'>4L', os2_table, offset) offset += struct.calcsize(b'>4L') vendor_id = os2_table[offset:offset+4] vendor_id offset += 4 selection, = struct.unpack_from(b'>H', os2_table, offset) is_italic = (selection & (1 << 0)) != 0 is_bold = (selection & (1 << 5)) != 0 is_regular = (selection & (1 << 6)) != 0 is_wws = (selection & (1 << 8)) != 0 is_oblique = (selection & (1 << 9)) != 0 if return_all: return (version, char_width, weight, width, fs_type, subscript_x_size, subscript_y_size, subscript_x_offset, subscript_y_offset, superscript_x_size, superscript_y_size, superscript_x_offset, superscript_y_offset, strikeout_size, strikeout_position, family_class, panose, selection, is_italic, is_bold, is_regular) return weight, is_italic, is_bold, is_regular, fs_type, panose, width, is_oblique, is_wws, version
[ "def", "get_font_characteristics", "(", "raw", ",", "raw_is_table", "=", "False", ",", "return_all", "=", "False", ")", ":", "if", "raw_is_table", ":", "os2_table", "=", "raw", "else", ":", "os2_table", "=", "get_table", "(", "raw", ",", "'os/2'", ")", "["...
https://github.com/kovidgoyal/calibre/blob/2b41671370f2a9eb1109b9ae901ccf915f1bd0c8/src/calibre/utils/fonts/utils.py#L50-L92
googledatalab/pydatalab
1c86e26a0d24e3bc8097895ddeab4d0607be4c40
solutionbox/structured_data/mltoolbox/_structured_data/_package.py
python
local_train
(train_dataset, eval_dataset, analysis_dir, output_dir, features, model_type, max_steps, num_epochs, train_batch_size, eval_batch_size, min_eval_frequency, top_n, layer_sizes, learning_rate, epsilon)
[]
def local_train(train_dataset, eval_dataset, analysis_dir, output_dir, features, model_type, max_steps, num_epochs, train_batch_size, eval_batch_size, min_eval_frequency, top_n, layer_sizes, learning_rate, epsilon): if len(train_dataset.input_files) != 1 or len(eval_dataset.input_files) != 1: raise ValueError('CsvDataSets must be built with a file pattern, not list ' 'of files.') if file_io.file_exists(output_dir): raise ValueError('output_dir already exist. Use a new output path.') if eval_dataset.size < eval_batch_size: raise ValueError('Eval batch size must be smaller than the eval data size.') if isinstance(features, dict): # Make a features file. if not file_io.file_exists(output_dir): file_io.recursive_create_dir(output_dir) features_file = os.path.join(output_dir, 'features_file.json') file_io.write_string_to_file( features_file, json.dumps(features)) else: features_file = features def _get_abs_path(input_path): cur_path = os.getcwd() full_path = os.path.abspath(os.path.join(cur_path, input_path)) # put path in quotes as it could contain spaces. return "'" + full_path + "'" args = ['cd %s &&' % os.path.abspath(os.path.dirname(__file__)), 'python -m trainer.task', '--train-data-paths=%s' % _get_abs_path(train_dataset.input_files[0]), '--eval-data-paths=%s' % _get_abs_path(eval_dataset.input_files[0]), '--job-dir=%s' % _get_abs_path(output_dir), '--preprocess-output-dir=%s' % _get_abs_path(analysis_dir), '--transforms-file=%s' % _get_abs_path(features_file), '--model-type=%s' % model_type, '--max-steps=%s' % str(max_steps), '--train-batch-size=%s' % str(train_batch_size), '--eval-batch-size=%s' % str(eval_batch_size), '--min-eval-frequency=%s' % str(min_eval_frequency), '--learning-rate=%s' % str(learning_rate), '--epsilon=%s' % str(epsilon)] if num_epochs: args.append('--num-epochs=%s' % str(num_epochs)) if top_n: args.append('--top-n=%s' % str(top_n)) if layer_sizes: for i in range(len(layer_sizes)): args.append('--layer-size%s=%s' % (i + 1, str(layer_sizes[i]))) monitor_process = None try: p = subprocess.Popen(' '.join(args), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) pids_to_kill = [p.pid] # script -> name = datalab_structured_data._package script = 'import %s; %s._wait_and_kill(%s, %s)' % (__name__, __name__, str(os.getpid()), str(pids_to_kill)) monitor_process = subprocess.Popen(['python', '-c', script]) while p.poll() is None: line = p.stdout.readline() if not six.PY2: line = line.decode() if (line.startswith('INFO:tensorflow:global') or line.startswith('INFO:tensorflow:loss') or line.startswith('INFO:tensorflow:Saving dict')): sys.stdout.write(line) finally: if monitor_process: monitor_process.kill() monitor_process.wait()
[ "def", "local_train", "(", "train_dataset", ",", "eval_dataset", ",", "analysis_dir", ",", "output_dir", ",", "features", ",", "model_type", ",", "max_steps", ",", "num_epochs", ",", "train_batch_size", ",", "eval_batch_size", ",", "min_eval_frequency", ",", "top_n"...
https://github.com/googledatalab/pydatalab/blob/1c86e26a0d24e3bc8097895ddeab4d0607be4c40/solutionbox/structured_data/mltoolbox/_structured_data/_package.py#L363-L453
DSE-MSU/DeepRobust
2bcde200a5969dae32cddece66206a52c87c43e8
deeprobust/graph/defense/r_gcn.py
python
GaussianConvolution.reset_parameters
(self)
[]
def reset_parameters(self): # TODO torch.nn.init.xavier_uniform_(self.weight_miu) torch.nn.init.xavier_uniform_(self.weight_sigma)
[ "def", "reset_parameters", "(", "self", ")", ":", "# TODO", "torch", ".", "nn", ".", "init", ".", "xavier_uniform_", "(", "self", ".", "weight_miu", ")", "torch", ".", "nn", ".", "init", ".", "xavier_uniform_", "(", "self", ".", "weight_sigma", ")" ]
https://github.com/DSE-MSU/DeepRobust/blob/2bcde200a5969dae32cddece66206a52c87c43e8/deeprobust/graph/defense/r_gcn.py#L89-L92
hyperledger/sawtooth-core
704cd5837c21f53642c06ffc97ba7978a77940b0
cli/sawtooth_cli/network_command/compare.py
python
do_compare_chains
(args)
Calculates and outputs comparison between all nodes on the network.
Calculates and outputs comparison between all nodes on the network.
[ "Calculates", "and", "outputs", "comparison", "between", "all", "nodes", "on", "the", "network", "." ]
def do_compare_chains(args): """Calculates and outputs comparison between all nodes on the network.""" urls = split_comma_append_args(args.urls) users = split_comma_append_args(args.users) clients = make_rest_apis(urls, users) broken = [] chains, errors = get_chain_generators(clients, args.limit) broken.extend(errors) for node in errors: print("Error connecting to node %d: %s" % (node, urls[node])) if not chains: print("No nodes reporting") return tails, errors = get_tails(chains) broken.extend(errors) for node in errors: del chains[node] for node in errors: print("Failed to reach common height with node %d: %s" % ( node, urls[node])) if not chains: print("Failed to get common height") return graph, errors = build_fork_graph(chains, tails) broken.extend(errors) for node in errors: print("Failed to reach common ancestor with node %d: %s" % ( node, urls[node])) if not graph: print("Failed to build fork graph") return # Transform tails and errors into the format expected by the print # functions. Because errors can occur while building the graph, we need to # remove the tails for those clients. broken.sort() node_id_map = get_node_id_map(broken, len(clients)) tails = list(map( lambda item: item[1], filter( lambda item: item[0] not in broken, sorted(tails.items())))) if args.table: print_table(graph, tails, node_id_map) elif args.tree: print_tree(graph, tails, node_id_map) else: print_summary(graph, tails, node_id_map)
[ "def", "do_compare_chains", "(", "args", ")", ":", "urls", "=", "split_comma_append_args", "(", "args", ".", "urls", ")", "users", "=", "split_comma_append_args", "(", "args", ".", "users", ")", "clients", "=", "make_rest_apis", "(", "urls", ",", "users", ")...
https://github.com/hyperledger/sawtooth-core/blob/704cd5837c21f53642c06ffc97ba7978a77940b0/cli/sawtooth_cli/network_command/compare.py#L85-L139
kozec/sc-controller
ce92c773b8b26f6404882e9209aff212c4053170
scc/device_monitor.py
python
DeviceMonitor.get_hidraw
(self, syspath)
return None
For given syspath, returns name of assotiated hidraw device. Returns None if there is no such thing.
For given syspath, returns name of assotiated hidraw device. Returns None if there is no such thing.
[ "For", "given", "syspath", "returns", "name", "of", "assotiated", "hidraw", "device", ".", "Returns", "None", "if", "there", "is", "no", "such", "thing", "." ]
def get_hidraw(self, syspath): """ For given syspath, returns name of assotiated hidraw device. Returns None if there is no such thing. """ node = self._dev_for_hci(syspath) if node is None: return None hidrawsubdir = os.path.join(node, "hidraw") for fname in os.listdir(hidrawsubdir): if fname.startswith("hidraw"): return fname return None
[ "def", "get_hidraw", "(", "self", ",", "syspath", ")", ":", "node", "=", "self", ".", "_dev_for_hci", "(", "syspath", ")", "if", "node", "is", "None", ":", "return", "None", "hidrawsubdir", "=", "os", ".", "path", ".", "join", "(", "node", ",", "\"hi...
https://github.com/kozec/sc-controller/blob/ce92c773b8b26f6404882e9209aff212c4053170/scc/device_monitor.py#L204-L216
linuxscout/pyarabic
010bddadb7c9b5c6bd24cc02d4aeddde0c4a10c4
pyarabic/kalima.py
python
Kalima.__le__
(self, other)
Compare two vocalized words, the two words are equal if they have the same letters and marks The two words are greater or equals if have the same letters and similar full or partial vocalization
Compare two vocalized words, the two words are equal if they have the same letters and marks The two words are greater or equals if have the same letters and similar full or partial vocalization
[ "Compare", "two", "vocalized", "words", "the", "two", "words", "are", "equal", "if", "they", "have", "the", "same", "letters", "and", "marks", "The", "two", "words", "are", "greater", "or", "equals", "if", "have", "the", "same", "letters", "and", "similar"...
def __le__(self, other): """ Compare two vocalized words, the two words are equal if they have the same letters and marks The two words are greater or equals if have the same letters and similar full or partial vocalization """
[ "def", "__le__", "(", "self", ",", "other", ")", ":" ]
https://github.com/linuxscout/pyarabic/blob/010bddadb7c9b5c6bd24cc02d4aeddde0c4a10c4/pyarabic/kalima.py#L100-L106
holzschu/Carnets
44effb10ddfc6aa5c8b0687582a724ba82c6b547
Library/lib/python3.7/pdb.py
python
Pdb.do_whatis
(self, arg)
whatis arg Print the type of the argument.
whatis arg Print the type of the argument.
[ "whatis", "arg", "Print", "the", "type", "of", "the", "argument", "." ]
def do_whatis(self, arg): """whatis arg Print the type of the argument. """ try: value = self._getval(arg) except: # _getval() already printed the error return code = None # Is it a function? try: code = value.__code__ except Exception: pass if code: self.message('Function %s' % code.co_name) return # Is it an instance method? try: code = value.__func__.__code__ except Exception: pass if code: self.message('Method %s' % code.co_name) return # Is it a class? if value.__class__ is type: self.message('Class %s.%s' % (value.__module__, value.__qualname__)) return # None of the above... self.message(type(value))
[ "def", "do_whatis", "(", "self", ",", "arg", ")", ":", "try", ":", "value", "=", "self", ".", "_getval", "(", "arg", ")", "except", ":", "# _getval() already printed the error", "return", "code", "=", "None", "# Is it a function?", "try", ":", "code", "=", ...
https://github.com/holzschu/Carnets/blob/44effb10ddfc6aa5c8b0687582a724ba82c6b547/Library/lib/python3.7/pdb.py#L1297-L1328
CTF-MissFeng/bayonet
360ffeb87025a85b08b4fa56f951936d38f2c109
web/route/user/html.py
python
html_user_add
()
return render_template('user/user-add.html', title=TITLE)
新增用户
新增用户
[ "新增用户" ]
def html_user_add(): '''新增用户''' return render_template('user/user-add.html', title=TITLE)
[ "def", "html_user_add", "(", ")", ":", "return", "render_template", "(", "'user/user-add.html'", ",", "title", "=", "TITLE", ")" ]
https://github.com/CTF-MissFeng/bayonet/blob/360ffeb87025a85b08b4fa56f951936d38f2c109/web/route/user/html.py#L63-L65
pymedusa/Medusa
1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38
medusa/server/api/v2/providers.py
python
ProvidersHandler.post
(self, identifier, path_param=None)
return self._bad_request('Could not locate provider by id')
Add a new provider or run an operation on a specific provider type. Without the identifier an arary of provider objects is expected to save the provider order list. With a subType param, operations can be executed, like the newznab/getCategories. Which will return a list of available newznab categories. :param identifier: Provider subType. For example: torznab, newznab, torrentrss.
Add a new provider or run an operation on a specific provider type.
[ "Add", "a", "new", "provider", "or", "run", "an", "operation", "on", "a", "specific", "provider", "type", "." ]
def post(self, identifier, path_param=None): """ Add a new provider or run an operation on a specific provider type. Without the identifier an arary of provider objects is expected to save the provider order list. With a subType param, operations can be executed, like the newznab/getCategories. Which will return a list of available newznab categories. :param identifier: Provider subType. For example: torznab, newznab, torrentrss. """ if not identifier: data = json_decode(self.request.body) sorted_providers = data.get('providers') if sorted_providers is None: return self._bad_request('You should provide an array of providers') self._save_provider_order(sorted_providers) return self._created(data={'providers': providers}) if identifier: data = json_decode(self.request.body) if identifier in ('newznab', 'torznab', 'torrentrss', 'prowlarr'): if not path_param: # No path_param passed. Asume we're trying to add a provider. if identifier == 'newznab': return self._add_newznab_provider(data) if identifier == 'torrentrss': return self._add_torrentrss_provider(data) if identifier == 'torznab': return self._add_torznab_provider(data) if identifier == 'prowlarr': return self._add_prowlarr_provider(data) if path_param == 'operation': if identifier == 'prowlarr': if data.get('type') == 'TEST': # Test prowlarr connectivity prowlarr = ProwlarrManager(data.get('url'), data.get('apikey')) if prowlarr.test_connectivity(): return self._ok('Connection successfull') else: return self._not_found('Çould not connect to prowlarr') if data.get('type') == 'GETINDEXERS': prowlarr = ProwlarrManager(data.get('url'), data.get('apikey')) indexers = prowlarr.get_indexers() if indexers: return self._ok(indexers) return self._internal_server_error() if identifier in ('newznab', 'torznab'): if data.get('type') == 'GETCATEGORIES': return self._get_categories(identifier, data) if identifier == 'internal': if path_param == 'operation': if data.get('type') == 'TESTPROVIDER': return self._test_provider(data) return self._bad_request('Could not locate provider by id')
[ "def", "post", "(", "self", ",", "identifier", ",", "path_param", "=", "None", ")", ":", "if", "not", "identifier", ":", "data", "=", "json_decode", "(", "self", ".", "request", ".", "body", ")", "sorted_providers", "=", "data", ".", "get", "(", "'prov...
https://github.com/pymedusa/Medusa/blob/1405fbb6eb8ef4d20fcca24c32ddca52b11f0f38/medusa/server/api/v2/providers.py#L132-L189
CvvT/dumpDex
92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1
python/idaapi.py
python
debug_event_t.bpt_ea
(self, *args)
return _idaapi.debug_event_t_bpt_ea(self, *args)
bpt_ea(self) -> ea_t
bpt_ea(self) -> ea_t
[ "bpt_ea", "(", "self", ")", "-", ">", "ea_t" ]
def bpt_ea(self, *args): """ bpt_ea(self) -> ea_t """ return _idaapi.debug_event_t_bpt_ea(self, *args)
[ "def", "bpt_ea", "(", "self", ",", "*", "args", ")", ":", "return", "_idaapi", ".", "debug_event_t_bpt_ea", "(", "self", ",", "*", "args", ")" ]
https://github.com/CvvT/dumpDex/blob/92ab3b7e996194a06bf1dd5538a4954e8a5ee9c1/python/idaapi.py#L3166-L3170
facebookresearch/votenet
2f6d6d36ff98d96901182e935afe48ccee82d566
models/loss_helper.py
python
compute_vote_loss
(end_points)
return vote_loss
Compute vote loss: Match predicted votes to GT votes. Args: end_points: dict (read-only) Returns: vote_loss: scalar Tensor Overall idea: If the seed point belongs to an object (votes_label_mask == 1), then we require it to vote for the object center. Each seed point may vote for multiple translations v1,v2,v3 A seed point may also be in the boxes of multiple objects: o1,o2,o3 with corresponding GT votes c1,c2,c3 Then the loss for this seed point is: min(d(v_i,c_j)) for i=1,2,3 and j=1,2,3
Compute vote loss: Match predicted votes to GT votes.
[ "Compute", "vote", "loss", ":", "Match", "predicted", "votes", "to", "GT", "votes", "." ]
def compute_vote_loss(end_points): """ Compute vote loss: Match predicted votes to GT votes. Args: end_points: dict (read-only) Returns: vote_loss: scalar Tensor Overall idea: If the seed point belongs to an object (votes_label_mask == 1), then we require it to vote for the object center. Each seed point may vote for multiple translations v1,v2,v3 A seed point may also be in the boxes of multiple objects: o1,o2,o3 with corresponding GT votes c1,c2,c3 Then the loss for this seed point is: min(d(v_i,c_j)) for i=1,2,3 and j=1,2,3 """ # Load ground truth votes and assign them to seed points batch_size = end_points['seed_xyz'].shape[0] num_seed = end_points['seed_xyz'].shape[1] # B,num_seed,3 vote_xyz = end_points['vote_xyz'] # B,num_seed*vote_factor,3 seed_inds = end_points['seed_inds'].long() # B,num_seed in [0,num_points-1] # Get groundtruth votes for the seed points # vote_label_mask: Use gather to select B,num_seed from B,num_point # non-object point has no GT vote mask = 0, object point has mask = 1 # vote_label: Use gather to select B,num_seed,9 from B,num_point,9 # with inds in shape B,num_seed,9 and 9 = GT_VOTE_FACTOR * 3 seed_gt_votes_mask = torch.gather(end_points['vote_label_mask'], 1, seed_inds) seed_inds_expand = seed_inds.view(batch_size,num_seed,1).repeat(1,1,3*GT_VOTE_FACTOR) seed_gt_votes = torch.gather(end_points['vote_label'], 1, seed_inds_expand) seed_gt_votes += end_points['seed_xyz'].repeat(1,1,3) # Compute the min of min of distance vote_xyz_reshape = vote_xyz.view(batch_size*num_seed, -1, 3) # from B,num_seed*vote_factor,3 to B*num_seed,vote_factor,3 seed_gt_votes_reshape = seed_gt_votes.view(batch_size*num_seed, GT_VOTE_FACTOR, 3) # from B,num_seed,3*GT_VOTE_FACTOR to B*num_seed,GT_VOTE_FACTOR,3 # A predicted vote to no where is not penalized as long as there is a good vote near the GT vote. dist1, _, dist2, _ = nn_distance(vote_xyz_reshape, seed_gt_votes_reshape, l1=True) votes_dist, _ = torch.min(dist2, dim=1) # (B*num_seed,vote_factor) to (B*num_seed,) votes_dist = votes_dist.view(batch_size, num_seed) vote_loss = torch.sum(votes_dist*seed_gt_votes_mask.float())/(torch.sum(seed_gt_votes_mask.float())+1e-6) return vote_loss
[ "def", "compute_vote_loss", "(", "end_points", ")", ":", "# Load ground truth votes and assign them to seed points", "batch_size", "=", "end_points", "[", "'seed_xyz'", "]", ".", "shape", "[", "0", "]", "num_seed", "=", "end_points", "[", "'seed_xyz'", "]", ".", "sh...
https://github.com/facebookresearch/votenet/blob/2f6d6d36ff98d96901182e935afe48ccee82d566/models/loss_helper.py#L21-L66
apple/ccs-calendarserver
13c706b985fb728b9aab42dc0fef85aae21921c3
txweb2/dav/resource.py
python
DAVPrincipalResource.expandedGroupMembers
(self)
return succeed(())
This implementation returns a Deferred which fires with C{()}, which is appropriate for non-group principals. Subclasses should override this method to provide expanded member URLs for this resource if appropriate. @see: L{IDAVPrincipalResource.expandedGroupMembers}
This implementation returns a Deferred which fires with C{()}, which is appropriate for non-group principals. Subclasses should override this method to provide expanded member URLs for this resource if appropriate.
[ "This", "implementation", "returns", "a", "Deferred", "which", "fires", "with", "C", "{", "()", "}", "which", "is", "appropriate", "for", "non", "-", "group", "principals", ".", "Subclasses", "should", "override", "this", "method", "to", "provide", "expanded",...
def expandedGroupMembers(self): """ This implementation returns a Deferred which fires with C{()}, which is appropriate for non-group principals. Subclasses should override this method to provide expanded member URLs for this resource if appropriate. @see: L{IDAVPrincipalResource.expandedGroupMembers} """ return succeed(())
[ "def", "expandedGroupMembers", "(", "self", ")", ":", "return", "succeed", "(", "(", ")", ")" ]
https://github.com/apple/ccs-calendarserver/blob/13c706b985fb728b9aab42dc0fef85aae21921c3/txweb2/dav/resource.py#L2481-L2490
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/ex-submodules/couchexport/export.py
python
FormattedRow.formatted_id
(self)
return self.separator.join(map(str, self.id))
[]
def formatted_id(self): if isinstance(self.id, str): return self.id return self.separator.join(map(str, self.id))
[ "def", "formatted_id", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "id", ",", "str", ")", ":", "return", "self", ".", "id", "return", "self", ".", "separator", ".", "join", "(", "map", "(", "str", ",", "self", ".", "id", ")", ")"...
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/ex-submodules/couchexport/export.py#L283-L286
IBM/EvolveGCN
90869062bbc98d56935e3d92e1d9b1b4c25be593
logger.py
python
Logger.eval_predicitions
(self, predictions, true_classes, num_classes)
return error, conf_mat_per_class
[]
def eval_predicitions(self, predictions, true_classes, num_classes): predicted_classes = predictions.argmax(dim=1) failures = (predicted_classes!=true_classes).sum(dtype=torch.float) error = failures/predictions.size(0) conf_mat_per_class = utils.Namespace({}) conf_mat_per_class.true_positives = {} conf_mat_per_class.false_negatives = {} conf_mat_per_class.false_positives = {} for cl in range(num_classes): cl_indices = true_classes == cl pos = predicted_classes == cl hits = (predicted_classes[cl_indices] == true_classes[cl_indices]) tp = hits.sum() fn = hits.size(0) - tp fp = pos.sum() - tp conf_mat_per_class.true_positives[cl] = tp conf_mat_per_class.false_negatives[cl] = fn conf_mat_per_class.false_positives[cl] = fp return error, conf_mat_per_class
[ "def", "eval_predicitions", "(", "self", ",", "predictions", ",", "true_classes", ",", "num_classes", ")", ":", "predicted_classes", "=", "predictions", ".", "argmax", "(", "dim", "=", "1", ")", "failures", "=", "(", "predicted_classes", "!=", "true_classes", ...
https://github.com/IBM/EvolveGCN/blob/90869062bbc98d56935e3d92e1d9b1b4c25be593/logger.py#L254-L277
IntelPython/sdc
1ebf55c00ef38dfbd401a70b3945e352a5a38b87
sdc/hiframes/pd_series_type.py
python
SeriesType.ndim
(self)
return self.data.ndim
[]
def ndim(self): return self.data.ndim
[ "def", "ndim", "(", "self", ")", ":", "return", "self", ".", "data", ".", "ndim" ]
https://github.com/IntelPython/sdc/blob/1ebf55c00ef38dfbd401a70b3945e352a5a38b87/sdc/hiframes/pd_series_type.py#L78-L79
lazylibrarian/LazyLibrarian
ae3c14e9db9328ce81765e094ab2a14ed7155624
cherrypy/wsgiserver/wsgiserver2.py
python
HTTPConnection.communicate
(self)
Read each request and respond appropriately.
Read each request and respond appropriately.
[ "Read", "each", "request", "and", "respond", "appropriately", "." ]
def communicate(self): """Read each request and respond appropriately.""" request_seen = False try: while True: # (re)set req to None so that if something goes wrong in # the RequestHandlerClass constructor, the error doesn't # get written to the previous request. req = None req = self.RequestHandlerClass(self.server, self) # This order of operations should guarantee correct pipelining. req.parse_request() if self.server.stats['Enabled']: self.requests_seen += 1 if not req.ready: # Something went wrong in the parsing (and the server has # probably already made a simple_response). Return and # let the conn close. return request_seen = True req.respond() if req.close_connection: return except socket.error: e = sys.exc_info()[1] errnum = e.args[0] # sadly SSL sockets return a different (longer) time out string if ( errnum == 'timed out' or errnum == 'The read operation timed out' ): # Don't error if we're between requests; only error # if 1) no request has been started at all, or 2) we're # in the middle of a request. # See https://bitbucket.org/cherrypy/cherrypy/issue/853 if (not request_seen) or (req and req.started_request): # Don't bother writing the 408 if the response # has already started being written. if req and not req.sent_headers: try: req.simple_response("408 Request Timeout") except FatalSSLAlert: # Close the connection. return elif errnum not in socket_errors_to_ignore: self.server.error_log("socket.error %s" % repr(errnum), level=logging.WARNING, traceback=True) if req and not req.sent_headers: try: req.simple_response("500 Internal Server Error") except FatalSSLAlert: # Close the connection. return return except (KeyboardInterrupt, SystemExit): raise except FatalSSLAlert: # Close the connection. return except NoSSLError: if req and not req.sent_headers: # Unwrap our wfile self.wfile = CP_fileobject( self.socket._sock, "wb", self.wbufsize) req.simple_response( "400 Bad Request", "The client sent a plain HTTP request, but " "this server only speaks HTTPS on this port.") self.linger = True except Exception: e = sys.exc_info()[1] self.server.error_log(repr(e), level=logging.ERROR, traceback=True) if req and not req.sent_headers: try: req.simple_response("500 Internal Server Error") except FatalSSLAlert: # Close the connection. return
[ "def", "communicate", "(", "self", ")", ":", "request_seen", "=", "False", "try", ":", "while", "True", ":", "# (re)set req to None so that if something goes wrong in", "# the RequestHandlerClass constructor, the error doesn't", "# get written to the previous request.", "req", "=...
https://github.com/lazylibrarian/LazyLibrarian/blob/ae3c14e9db9328ce81765e094ab2a14ed7155624/cherrypy/wsgiserver/wsgiserver2.py#L1331-L1410
perfectblue/ctf-writeups
38c81bbce254885da6b485dd1dd76798120bc98c
2019/ctfzone-quals-2019/ocb2/ocb/aes.py
python
AES._aes_invRound
(self, state, roundKey)
return state
Applies the 4 operations of the inverse round in sequence
Applies the 4 operations of the inverse round in sequence
[ "Applies", "the", "4", "operations", "of", "the", "inverse", "round", "in", "sequence" ]
def _aes_invRound(self, state, roundKey): """ Applies the 4 operations of the inverse round in sequence """ state = self._shiftRows(state, True) state = self._subBytes(state, True) state = self._addRoundKey(state, roundKey) state = self._mixColumns(state, True) return state
[ "def", "_aes_invRound", "(", "self", ",", "state", ",", "roundKey", ")", ":", "state", "=", "self", ".", "_shiftRows", "(", "state", ",", "True", ")", "state", "=", "self", ".", "_subBytes", "(", "state", ",", "True", ")", "state", "=", "self", ".", ...
https://github.com/perfectblue/ctf-writeups/blob/38c81bbce254885da6b485dd1dd76798120bc98c/2019/ctfzone-quals-2019/ocb2/ocb/aes.py#L327-L333
trailofbits/manticore
b050fdf0939f6c63f503cdf87ec0ab159dd41159
manticore/platforms/evm.py
python
EVMWorld.transaction
(self, address, price=0, data="", caller=None, value=0, gas=2300)
Initiates a CALL transaction on current state. Do a world.run() after this to explore all _possible_ outputs
Initiates a CALL transaction on current state. Do a world.run() after this to explore all _possible_ outputs
[ "Initiates", "a", "CALL", "transaction", "on", "current", "state", ".", "Do", "a", "world", ".", "run", "()", "after", "this", "to", "explore", "all", "_possible_", "outputs" ]
def transaction(self, address, price=0, data="", caller=None, value=0, gas=2300): """Initiates a CALL transaction on current state. Do a world.run() after this to explore all _possible_ outputs """ self.start_transaction( "CALL", address, price=price, data=data, caller=caller, value=value, gas=gas )
[ "def", "transaction", "(", "self", ",", "address", ",", "price", "=", "0", ",", "data", "=", "\"\"", ",", "caller", "=", "None", ",", "value", "=", "0", ",", "gas", "=", "2300", ")", ":", "self", ".", "start_transaction", "(", "\"CALL\"", ",", "add...
https://github.com/trailofbits/manticore/blob/b050fdf0939f6c63f503cdf87ec0ab159dd41159/manticore/platforms/evm.py#L3205-L3211
IntelLabs/coach
dea46ae0d22b0a0cd30b9fc138a4a2642e1b9d9d
rl_coach/architectures/mxnet_components/utils.py
python
get_mxnet_activation_name
(activation_name: str)
return activation_functions[activation_name]
Convert coach activation name to mxnet specific activation name :param activation_name: name of the activation inc coach :return: name of the activation in mxnet
Convert coach activation name to mxnet specific activation name :param activation_name: name of the activation inc coach :return: name of the activation in mxnet
[ "Convert", "coach", "activation", "name", "to", "mxnet", "specific", "activation", "name", ":", "param", "activation_name", ":", "name", "of", "the", "activation", "inc", "coach", ":", "return", ":", "name", "of", "the", "activation", "in", "mxnet" ]
def get_mxnet_activation_name(activation_name: str): """ Convert coach activation name to mxnet specific activation name :param activation_name: name of the activation inc coach :return: name of the activation in mxnet """ activation_functions = { 'relu': 'relu', 'tanh': 'tanh', 'sigmoid': 'sigmoid', # FIXME Add other activations # 'elu': tf.nn.elu, 'selu': 'softrelu', # 'leaky_relu': tf.nn.leaky_relu, 'none': None } assert activation_name in activation_functions, \ "Activation function must be one of the following {}. instead it was: {}".format( activation_functions.keys(), activation_name) return activation_functions[activation_name]
[ "def", "get_mxnet_activation_name", "(", "activation_name", ":", "str", ")", ":", "activation_functions", "=", "{", "'relu'", ":", "'relu'", ",", "'tanh'", ":", "'tanh'", ",", "'sigmoid'", ":", "'sigmoid'", ",", "# FIXME Add other activations", "# 'elu': tf.nn.elu,", ...
https://github.com/IntelLabs/coach/blob/dea46ae0d22b0a0cd30b9fc138a4a2642e1b9d9d/rl_coach/architectures/mxnet_components/utils.py#L290-L309
colour-science/colour
38782ac059e8ddd91939f3432bf06811c16667f0
colour/plotting/notation.py
python
plot_multi_munsell_value_functions
(functions, **kwargs)
return plot_multi_functions( functions, samples=np.linspace(0, 100, 1000), **settings)
Plots given *Munsell* value functions. Parameters ---------- functions : str or object or array_like *Munsell* value functions to plot. ``functions`` elements can be of any type or form supported by the :func:`colour.plotting.filter_passthrough` definition. Other Parameters ---------------- \\**kwargs : dict, optional {:func:`colour.plotting.artist`, :func:`colour.plotting.plot_multi_functions`, :func:`colour.plotting.render`}, Please refer to the documentation of the previously listed definitions. Returns ------- tuple Current figure and axes. Examples -------- >>> plot_multi_munsell_value_functions(['ASTM D1535', 'McCamy 1987']) ... # doctest: +ELLIPSIS (<Figure size ... with 1 Axes>, <...AxesSubplot...>) .. image:: ../_static/Plotting_Plot_Multi_Munsell_Value_Functions.png :align: center :alt: plot_multi_munsell_value_functions
Plots given *Munsell* value functions.
[ "Plots", "given", "*", "Munsell", "*", "value", "functions", "." ]
def plot_multi_munsell_value_functions(functions, **kwargs): """ Plots given *Munsell* value functions. Parameters ---------- functions : str or object or array_like *Munsell* value functions to plot. ``functions`` elements can be of any type or form supported by the :func:`colour.plotting.filter_passthrough` definition. Other Parameters ---------------- \\**kwargs : dict, optional {:func:`colour.plotting.artist`, :func:`colour.plotting.plot_multi_functions`, :func:`colour.plotting.render`}, Please refer to the documentation of the previously listed definitions. Returns ------- tuple Current figure and axes. Examples -------- >>> plot_multi_munsell_value_functions(['ASTM D1535', 'McCamy 1987']) ... # doctest: +ELLIPSIS (<Figure size ... with 1 Axes>, <...AxesSubplot...>) .. image:: ../_static/Plotting_Plot_Multi_Munsell_Value_Functions.png :align: center :alt: plot_multi_munsell_value_functions """ functions = filter_passthrough(MUNSELL_VALUE_METHODS, functions) settings = { 'bounding_box': (0, 100, 0, 10), 'legend': True, 'title': '{0} - Munsell Functions'.format(', '.join(functions)), 'x_label': 'Luminance Y', 'y_label': 'Munsell Value V', } settings.update(kwargs) return plot_multi_functions( functions, samples=np.linspace(0, 100, 1000), **settings)
[ "def", "plot_multi_munsell_value_functions", "(", "functions", ",", "*", "*", "kwargs", ")", ":", "functions", "=", "filter_passthrough", "(", "MUNSELL_VALUE_METHODS", ",", "functions", ")", "settings", "=", "{", "'bounding_box'", ":", "(", "0", ",", "100", ",",...
https://github.com/colour-science/colour/blob/38782ac059e8ddd91939f3432bf06811c16667f0/colour/plotting/notation.py#L76-L123
ghoseb/planet.clojure
a2e10e9e6bbf6bc544fad40beed8d0da7ab9518d
planet/vendor/compat_logging/handlers.py
python
SMTPHandler.getSubject
(self, record)
return self.subject
Determine the subject for the email. If you want to specify a subject line which is record-dependent, override this method.
Determine the subject for the email.
[ "Determine", "the", "subject", "for", "the", "email", "." ]
def getSubject(self, record): """ Determine the subject for the email. If you want to specify a subject line which is record-dependent, override this method. """ return self.subject
[ "def", "getSubject", "(", "self", ",", "record", ")", ":", "return", "self", ".", "subject" ]
https://github.com/ghoseb/planet.clojure/blob/a2e10e9e6bbf6bc544fad40beed8d0da7ab9518d/planet/vendor/compat_logging/handlers.py#L440-L447
openstack/barbican
a9d2b133c8dc3307974f119f9a2b23a4ba82e8ce
barbican/api/controllers/transportkeys.py
python
TransportKeyController.on_get
(self, external_project_id)
return transport_key
[]
def on_get(self, external_project_id): LOG.debug("== Getting transport key for %s", external_project_id) transport_key = self.repo.get(entity_id=self.transport_key_id) if not transport_key: _transport_key_not_found() pecan.override_template('json', 'application/json') return transport_key
[ "def", "on_get", "(", "self", ",", "external_project_id", ")", ":", "LOG", ".", "debug", "(", "\"== Getting transport key for %s\"", ",", "external_project_id", ")", "transport_key", "=", "self", ".", "repo", ".", "get", "(", "entity_id", "=", "self", ".", "tr...
https://github.com/openstack/barbican/blob/a9d2b133c8dc3307974f119f9a2b23a4ba82e8ce/barbican/api/controllers/transportkeys.py#L56-L63
twilio/twilio-python
6e1e811ea57a1edfadd5161ace87397c563f6915
twilio/rest/notify/v1/service/binding.py
python
BindingContext.fetch
(self)
return BindingInstance( self._version, payload, service_sid=self._solution['service_sid'], sid=self._solution['sid'], )
Fetch the BindingInstance :returns: The fetched BindingInstance :rtype: twilio.rest.notify.v1.service.binding.BindingInstance
Fetch the BindingInstance
[ "Fetch", "the", "BindingInstance" ]
def fetch(self): """ Fetch the BindingInstance :returns: The fetched BindingInstance :rtype: twilio.rest.notify.v1.service.binding.BindingInstance """ payload = self._version.fetch(method='GET', uri=self._uri, ) return BindingInstance( self._version, payload, service_sid=self._solution['service_sid'], sid=self._solution['sid'], )
[ "def", "fetch", "(", "self", ")", ":", "payload", "=", "self", ".", "_version", ".", "fetch", "(", "method", "=", "'GET'", ",", "uri", "=", "self", ".", "_uri", ",", ")", "return", "BindingInstance", "(", "self", ".", "_version", ",", "payload", ",",...
https://github.com/twilio/twilio-python/blob/6e1e811ea57a1edfadd5161ace87397c563f6915/twilio/rest/notify/v1/service/binding.py#L276-L290
larryhastings/gilectomy
4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a
Lib/_pydecimal.py
python
Context.logical_or
(self, a, b)
return a.logical_or(b, context=self)
Applies the logical operation 'or' between each operand's digits. The operands must be both logical numbers. >>> ExtendedContext.logical_or(Decimal('0'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_or(Decimal('0'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_or(Decimal('1'), Decimal('0')) Decimal('1') >>> ExtendedContext.logical_or(Decimal('1'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010')) Decimal('1110') >>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10')) Decimal('1110') >>> ExtendedContext.logical_or(110, 1101) Decimal('1111') >>> ExtendedContext.logical_or(Decimal(110), 1101) Decimal('1111') >>> ExtendedContext.logical_or(110, Decimal(1101)) Decimal('1111')
Applies the logical operation 'or' between each operand's digits.
[ "Applies", "the", "logical", "operation", "or", "between", "each", "operand", "s", "digits", "." ]
def logical_or(self, a, b): """Applies the logical operation 'or' between each operand's digits. The operands must be both logical numbers. >>> ExtendedContext.logical_or(Decimal('0'), Decimal('0')) Decimal('0') >>> ExtendedContext.logical_or(Decimal('0'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_or(Decimal('1'), Decimal('0')) Decimal('1') >>> ExtendedContext.logical_or(Decimal('1'), Decimal('1')) Decimal('1') >>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010')) Decimal('1110') >>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10')) Decimal('1110') >>> ExtendedContext.logical_or(110, 1101) Decimal('1111') >>> ExtendedContext.logical_or(Decimal(110), 1101) Decimal('1111') >>> ExtendedContext.logical_or(110, Decimal(1101)) Decimal('1111') """ a = _convert_other(a, raiseit=True) return a.logical_or(b, context=self)
[ "def", "logical_or", "(", "self", ",", "a", ",", "b", ")", ":", "a", "=", "_convert_other", "(", "a", ",", "raiseit", "=", "True", ")", "return", "a", ".", "logical_or", "(", "b", ",", "context", "=", "self", ")" ]
https://github.com/larryhastings/gilectomy/blob/4315ec3f1d6d4f813cc82ce27a24e7f784dbfc1a/Lib/_pydecimal.py#L4822-L4847
yt-project/yt
dc7b24f9b266703db4c843e329c6c8644d47b824
yt/frontends/enzo/io.py
python
IOHandlerPacked2D._read_fluid_selection
(self, chunks, selector, fields, size)
return rv
[]
def _read_fluid_selection(self, chunks, selector, fields, size): rv = {} # Now we have to do something unpleasant chunks = list(chunks) if isinstance(selector, GridSelector): if not (len(chunks) == len(chunks[0].objs) == 1): raise RuntimeError g = chunks[0].objs[0] f = h5py.File(g.filename, mode="r") gds = f.get("/Grid%08i" % g.id) for ftype, fname in fields: rv[(ftype, fname)] = np.atleast_3d(gds.get(fname)[()].transpose()) f.close() return rv if size is None: size = sum(g.count(selector) for chunk in chunks for g in chunk.objs) for field in fields: ftype, fname = field fsize = size rv[field] = np.empty(fsize, dtype="float64") ng = sum(len(c.objs) for c in chunks) mylog.debug( "Reading %s cells of %s fields in %s grids", size, [f2 for f1, f2 in fields], ng, ) ind = 0 for chunk in chunks: f = None for g in chunk.objs: if f is None: # print("Opening (count) %s" % g.filename) f = h5py.File(g.filename, mode="r") gds = f.get("/Grid%08i" % g.id) if gds is None: gds = f for field in fields: ftype, fname = field ds = np.atleast_3d(gds.get(fname)[()].transpose()) nd = g.select(selector, ds, rv[field], ind) # caches ind += nd f.close() return rv
[ "def", "_read_fluid_selection", "(", "self", ",", "chunks", ",", "selector", ",", "fields", ",", "size", ")", ":", "rv", "=", "{", "}", "# Now we have to do something unpleasant", "chunks", "=", "list", "(", "chunks", ")", "if", "isinstance", "(", "selector", ...
https://github.com/yt-project/yt/blob/dc7b24f9b266703db4c843e329c6c8644d47b824/yt/frontends/enzo/io.py#L317-L360
petercorke/robotics-toolbox-python
51aa8bbb3663a7c815f9880d538d61e7c85bc470
roboticstoolbox/backends/VPython/common_functions.py
python
close_localhost_session
(canvas)
Terminate the local host session through JavaScript :param canvas: The scene to append the JS to the caption :type canvas: class:`roboticstoolbox.backends.VPython.graphics_canvas.GraphicsCanvas3D`, class:`roboticstoolbox.backends.VPython.graphics_canvas.GraphicsCanvas2D`
Terminate the local host session through JavaScript
[ "Terminate", "the", "local", "host", "session", "through", "JavaScript" ]
def close_localhost_session(canvas): # pragma nocover """ Terminate the local host session through JavaScript :param canvas: The scene to append the JS to the caption :type canvas: class:`roboticstoolbox.backends.VPython.graphics_canvas.GraphicsCanvas3D`, class:`roboticstoolbox.backends.VPython.graphics_canvas.GraphicsCanvas2D` """ canvas.scene.append_to_caption(''' <script type="text/javascript"> close(); </script> ''')
[ "def", "close_localhost_session", "(", "canvas", ")", ":", "# pragma nocover", "canvas", ".", "scene", ".", "append_to_caption", "(", "'''\n <script type=\"text/javascript\">\n close();\n </script>\n '''", ")" ]
https://github.com/petercorke/robotics-toolbox-python/blob/51aa8bbb3663a7c815f9880d538d61e7c85bc470/roboticstoolbox/backends/VPython/common_functions.py#L128-L142
openstack/manila
142990edc027e14839d5deaf4954dd6fc88de15e
manila/scheduler/filters/json.py
python
JsonFilter._equals
(self, args)
return self._op_compare(args, operator.eq)
First term is == all the other terms.
First term is == all the other terms.
[ "First", "term", "is", "==", "all", "the", "other", "terms", "." ]
def _equals(self, args): """First term is == all the other terms.""" return self._op_compare(args, operator.eq)
[ "def", "_equals", "(", "self", ",", "args", ")", ":", "return", "self", ".", "_op_compare", "(", "args", ",", "operator", ".", "eq", ")" ]
https://github.com/openstack/manila/blob/142990edc027e14839d5deaf4954dd6fc88de15e/manila/scheduler/filters/json.py#L42-L44
Tautulli/Tautulli
2410eb33805aaac4bd1c5dad0f71e4f15afaf742
lib/cheroot/server.py
python
HTTPServer.bind_addr
(self, value)
Set the interface on which to listen for connections.
Set the interface on which to listen for connections.
[ "Set", "the", "interface", "on", "which", "to", "listen", "for", "connections", "." ]
def bind_addr(self, value): """Set the interface on which to listen for connections.""" if isinstance(value, tuple) and value[0] in ('', None): # Despite the socket module docs, using '' does not # allow AI_PASSIVE to work. Passing None instead # returns '0.0.0.0' like we want. In other words: # host AI_PASSIVE result # '' Y 192.168.x.y # '' N 192.168.x.y # None Y 0.0.0.0 # None N 127.0.0.1 # But since you can get the same effect with an explicit # '0.0.0.0', we deny both the empty string and None as values. raise ValueError( "Host values of '' or None are not allowed. " "Use '0.0.0.0' (IPv4) or '::' (IPv6) instead " 'to listen on all active interfaces.', ) self._bind_addr = value
[ "def", "bind_addr", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "tuple", ")", "and", "value", "[", "0", "]", "in", "(", "''", ",", "None", ")", ":", "# Despite the socket module docs, using '' does not", "# allow AI_PASSIVE to w...
https://github.com/Tautulli/Tautulli/blob/2410eb33805aaac4bd1c5dad0f71e4f15afaf742/lib/cheroot/server.py#L1713-L1731
kmpm/nodemcu-uploader
6178f40fff2deadd56b5bc474f9b4475ef444b37
nodemcu_uploader/uploader.py
python
Uploader.close
(self)
restores the nodemcu to default baudrate and then closes the port
restores the nodemcu to default baudrate and then closes the port
[ "restores", "the", "nodemcu", "to", "default", "baudrate", "and", "then", "closes", "the", "port" ]
def close(self): """restores the nodemcu to default baudrate and then closes the port""" try: if self.baud != self.start_baud: self.__set_baudrate(self.start_baud) self._port.flush() self.__clear_buffers() except serial.serialutil.SerialException: pass log.debug('closing port') self._port.close()
[ "def", "close", "(", "self", ")", ":", "try", ":", "if", "self", ".", "baud", "!=", "self", ".", "start_baud", ":", "self", ".", "__set_baudrate", "(", "self", ".", "start_baud", ")", "self", ".", "_port", ".", "flush", "(", ")", "self", ".", "__cl...
https://github.com/kmpm/nodemcu-uploader/blob/6178f40fff2deadd56b5bc474f9b4475ef444b37/nodemcu_uploader/uploader.py#L182-L192
KalleHallden/AutoTimer
2d954216700c4930baa154e28dbddc34609af7ce
env/lib/python2.7/site-packages/pip/_internal/req/constructors.py
python
install_req_from_req_string
( req_string, # type: str comes_from=None, # type: Optional[InstallRequirement] isolated=False, # type: bool wheel_cache=None, # type: Optional[WheelCache] use_pep517=None # type: Optional[bool] )
return InstallRequirement( req, comes_from, isolated=isolated, wheel_cache=wheel_cache, use_pep517=use_pep517 )
[]
def install_req_from_req_string( req_string, # type: str comes_from=None, # type: Optional[InstallRequirement] isolated=False, # type: bool wheel_cache=None, # type: Optional[WheelCache] use_pep517=None # type: Optional[bool] ): # type: (...) -> InstallRequirement try: req = Requirement(req_string) except InvalidRequirement: raise InstallationError("Invalid requirement: '%s'" % req_string) domains_not_allowed = [ PyPI.file_storage_domain, TestPyPI.file_storage_domain, ] if (req.url and comes_from and comes_from.link and comes_from.link.netloc in domains_not_allowed): # Explicitly disallow pypi packages that depend on external urls raise InstallationError( "Packages installed from PyPI cannot depend on packages " "which are not also hosted on PyPI.\n" "%s depends on %s " % (comes_from.name, req) ) return InstallRequirement( req, comes_from, isolated=isolated, wheel_cache=wheel_cache, use_pep517=use_pep517 )
[ "def", "install_req_from_req_string", "(", "req_string", ",", "# type: str", "comes_from", "=", "None", ",", "# type: Optional[InstallRequirement]", "isolated", "=", "False", ",", "# type: bool", "wheel_cache", "=", "None", ",", "# type: Optional[WheelCache]", "use_pep517",...
https://github.com/KalleHallden/AutoTimer/blob/2d954216700c4930baa154e28dbddc34609af7ce/env/lib/python2.7/site-packages/pip/_internal/req/constructors.py#L320-L349
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_flaskbb/lib/python2.7/site-packages/sqlalchemy/ext/associationproxy.py
python
AssociationProxy._inflate
(self, proxy)
[]
def _inflate(self, proxy): creator = self.creator and self.creator or self.target_class if self.getset_factory: getter, setter = self.getset_factory(self.collection_class, self) else: getter, setter = self._default_getset(self.collection_class) proxy.creator = creator proxy.getter = getter proxy.setter = setter
[ "def", "_inflate", "(", "self", ",", "proxy", ")", ":", "creator", "=", "self", ".", "creator", "and", "self", ".", "creator", "or", "self", ".", "target_class", "if", "self", ".", "getset_factory", ":", "getter", ",", "setter", "=", "self", ".", "gets...
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_flaskbb/lib/python2.7/site-packages/sqlalchemy/ext/associationproxy.py#L373-L383
jina-ai/jina
c77a492fcd5adba0fc3de5347bea83dd4e7d8087
docarray/memmap/__init__.py
python
DocumentArrayMemmap._load_header_body
(self, mode: str = 'a')
[]
def _load_header_body(self, mode: str = 'a'): if hasattr(self, '_header'): self._header.close() if hasattr(self, '_body'): self._body.close() open(self._header_path, mode).close() open(self._body_path, mode).close() self._header = open(self._header_path, 'r+b') self._body = open(self._body_path, 'r+b') tmp = np.frombuffer( self._header.read(), dtype=[ ('', (np.str_, self._key_length)), # key_length x 4 bytes ('', np.int64), # 8 bytes ('', np.int64), # 8 bytes ('', np.int64), # 8 bytes ], ) self._header_entry_size = 24 + 4 * self._key_length self._last_header_entry = len(tmp) self._header_map = OrderedDict() for idx, r in enumerate(tmp): if not np.array_equal((r[1], r[2], r[3]), _HEADER_NONE_ENTRY): self._header_map[r[0]] = (idx, r[1], r[2], r[3]) self._header_keys = list(self._header_map.keys()) self._body_fileno = self._body.fileno() self._start = 0 if self._header_map: self._start = tmp[-1][1] + tmp[-1][3] self._body.seek(self._start) self._last_mmap = None
[ "def", "_load_header_body", "(", "self", ",", "mode", ":", "str", "=", "'a'", ")", ":", "if", "hasattr", "(", "self", ",", "'_header'", ")", ":", "self", ".", "_header", ".", "close", "(", ")", "if", "hasattr", "(", "self", ",", "'_body'", ")", ":"...
https://github.com/jina-ai/jina/blob/c77a492fcd5adba0fc3de5347bea83dd4e7d8087/docarray/memmap/__init__.py#L113-L149
cronyo/cronyo
cd5abab0871b68bf31b18aac934303928130a441
cronyo/vendor/requests/cookies.py
python
cookiejar_from_dict
(cookie_dict, cookiejar=None, overwrite=True)
return cookiejar
Returns a CookieJar from a key/value dictionary. :param cookie_dict: Dict of key/values to insert into CookieJar. :param cookiejar: (optional) A cookiejar to add the cookies to. :param overwrite: (optional) If False, will not replace cookies already in the jar with new ones. :rtype: CookieJar
Returns a CookieJar from a key/value dictionary.
[ "Returns", "a", "CookieJar", "from", "a", "key", "/", "value", "dictionary", "." ]
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): """Returns a CookieJar from a key/value dictionary. :param cookie_dict: Dict of key/values to insert into CookieJar. :param cookiejar: (optional) A cookiejar to add the cookies to. :param overwrite: (optional) If False, will not replace cookies already in the jar with new ones. :rtype: CookieJar """ if cookiejar is None: cookiejar = RequestsCookieJar() if cookie_dict is not None: names_from_jar = [cookie.name for cookie in cookiejar] for name in cookie_dict: if overwrite or (name not in names_from_jar): cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) return cookiejar
[ "def", "cookiejar_from_dict", "(", "cookie_dict", ",", "cookiejar", "=", "None", ",", "overwrite", "=", "True", ")", ":", "if", "cookiejar", "is", "None", ":", "cookiejar", "=", "RequestsCookieJar", "(", ")", "if", "cookie_dict", "is", "not", "None", ":", ...
https://github.com/cronyo/cronyo/blob/cd5abab0871b68bf31b18aac934303928130a441/cronyo/vendor/requests/cookies.py#L508-L526
ehmatthes/pcc
f555082df0f8268b8f269e59a99da8ed5013f749
chapter_12/game_functions.py
python
update_bullets
(bullets)
Update position of bullets, and get rid of old bullets.
Update position of bullets, and get rid of old bullets.
[ "Update", "position", "of", "bullets", "and", "get", "rid", "of", "old", "bullets", "." ]
def update_bullets(bullets): """Update position of bullets, and get rid of old bullets.""" # Update bullet positions. bullets.update() # Get rid of bullets that have disappeared. for bullet in bullets.copy(): if bullet.rect.bottom <= 0: bullets.remove(bullet)
[ "def", "update_bullets", "(", "bullets", ")", ":", "# Update bullet positions.", "bullets", ".", "update", "(", ")", "# Get rid of bullets that have disappeared.", "for", "bullet", "in", "bullets", ".", "copy", "(", ")", ":", "if", "bullet", ".", "rect", ".", "b...
https://github.com/ehmatthes/pcc/blob/f555082df0f8268b8f269e59a99da8ed5013f749/chapter_12/game_functions.py#L53-L61
phonopy/phonopy
816586d0ba8177482ecf40e52f20cbdee2260d51
phonopy/structure/tetrahedron_method.py
python
get_all_tetrahedra_relative_grid_address
()
return relative_grid_address
Return relative grid addresses dataset. This exists only for the test.
Return relative grid addresses dataset.
[ "Return", "relative", "grid", "addresses", "dataset", "." ]
def get_all_tetrahedra_relative_grid_address(): """Return relative grid addresses dataset. This exists only for the test. """ relative_grid_address = np.zeros((4, 24, 4, 3), dtype="int_") phonoc.all_tetrahedra_relative_grid_address(relative_grid_address) return relative_grid_address
[ "def", "get_all_tetrahedra_relative_grid_address", "(", ")", ":", "relative_grid_address", "=", "np", ".", "zeros", "(", "(", "4", ",", "24", ",", "4", ",", "3", ")", ",", "dtype", "=", "\"int_\"", ")", "phonoc", ".", "all_tetrahedra_relative_grid_address", "(...
https://github.com/phonopy/phonopy/blob/816586d0ba8177482ecf40e52f20cbdee2260d51/phonopy/structure/tetrahedron_method.py#L80-L89
pyparallel/pyparallel
11e8c6072d48c8f13641925d17b147bf36ee0ba3
Lib/site-packages/numpy-1.10.0.dev0_046311a-py3.3-win-amd64.egg/numpy/polynomial/_polybase.py
python
ABCPolyBase.integ
(self, m=1, k=[], lbnd=None)
return self.__class__(coef, self.domain, self.window)
Integrate. Return a series instance that is the definite integral of the current series. Parameters ---------- m : non-negative int The number of integrations to perform. k : array_like Integration constants. The first constant is applied to the first integration, the second to the second, and so on. The list of values must less than or equal to `m` in length and any missing values are set to zero. lbnd : Scalar The lower bound of the definite integral. Returns ------- new_series : series A new series representing the integral. The domain is the same as the domain of the integrated series.
Integrate.
[ "Integrate", "." ]
def integ(self, m=1, k=[], lbnd=None): """Integrate. Return a series instance that is the definite integral of the current series. Parameters ---------- m : non-negative int The number of integrations to perform. k : array_like Integration constants. The first constant is applied to the first integration, the second to the second, and so on. The list of values must less than or equal to `m` in length and any missing values are set to zero. lbnd : Scalar The lower bound of the definite integral. Returns ------- new_series : series A new series representing the integral. The domain is the same as the domain of the integrated series. """ off, scl = self.mapparms() if lbnd is None: lbnd = 0 else: lbnd = off + scl*lbnd coef = self._int(self.coef, m, k, lbnd, 1./scl) return self.__class__(coef, self.domain, self.window)
[ "def", "integ", "(", "self", ",", "m", "=", "1", ",", "k", "=", "[", "]", ",", "lbnd", "=", "None", ")", ":", "off", ",", "scl", "=", "self", ".", "mapparms", "(", ")", "if", "lbnd", "is", "None", ":", "lbnd", "=", "0", "else", ":", "lbnd",...
https://github.com/pyparallel/pyparallel/blob/11e8c6072d48c8f13641925d17b147bf36ee0ba3/Lib/site-packages/numpy-1.10.0.dev0_046311a-py3.3-win-amd64.egg/numpy/polynomial/_polybase.py#L622-L653
googleads/google-ads-python
2a1d6062221f6aad1992a6bcca0e7e4a93d2db86
google/ads/googleads/v8/services/services/change_status_service/transports/grpc.py
python
ChangeStatusServiceGrpcTransport.get_change_status
( self, )
return self._stubs["get_change_status"]
r"""Return a callable for the get change status method over gRPC. Returns the requested change status in full detail. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Returns: Callable[[~.GetChangeStatusRequest], ~.ChangeStatus]: A function that, when called, will call the underlying RPC on the server.
r"""Return a callable for the get change status method over gRPC.
[ "r", "Return", "a", "callable", "for", "the", "get", "change", "status", "method", "over", "gRPC", "." ]
def get_change_status( self, ) -> Callable[ [change_status_service.GetChangeStatusRequest], change_status.ChangeStatus, ]: r"""Return a callable for the get change status method over gRPC. Returns the requested change status in full detail. List of thrown errors: `AuthenticationError <>`__ `AuthorizationError <>`__ `HeaderError <>`__ `InternalError <>`__ `QuotaError <>`__ `RequestError <>`__ Returns: Callable[[~.GetChangeStatusRequest], ~.ChangeStatus]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_change_status" not in self._stubs: self._stubs["get_change_status"] = self.grpc_channel.unary_unary( "/google.ads.googleads.v8.services.ChangeStatusService/GetChangeStatus", request_serializer=change_status_service.GetChangeStatusRequest.serialize, response_deserializer=change_status.ChangeStatus.deserialize, ) return self._stubs["get_change_status"]
[ "def", "get_change_status", "(", "self", ",", ")", "->", "Callable", "[", "[", "change_status_service", ".", "GetChangeStatusRequest", "]", ",", "change_status", ".", "ChangeStatus", ",", "]", ":", "# Generate a \"stub function\" on-the-fly which will actually make", "# t...
https://github.com/googleads/google-ads-python/blob/2a1d6062221f6aad1992a6bcca0e7e4a93d2db86/google/ads/googleads/v8/services/services/change_status_service/transports/grpc.py#L213-L243
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/PIL/FliImagePlugin.py
python
FliImageFile.n_frames
(self)
return self.__framecount
[]
def n_frames(self): return self.__framecount
[ "def", "n_frames", "(", "self", ")", ":", "return", "self", ".", "__framecount" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/PIL/FliImagePlugin.py#L113-L114
caktux/pytrader
b45b216dab3db78d6028d85e9a6f80419c22cea0
exchanges/kraken.py
python
PollClient.request_lag
(self)
Request server time to calculate lag
Request server time to calculate lag
[ "Request", "server", "time", "to", "calculate", "lag" ]
def request_lag(self): """Request server time to calculate lag""" def lag_thread(): json_time = http_request("%s://%s/0/public/Time" % ( self.proto, HTTP_HOST )) if not self._terminating: try: answer = json.loads(json_time) if not answer["error"]: lag = time.time() - answer['result']['unixtime'] result = { 'lag': lag * 1000, 'lag_text': "%0.3f s" % lag } translated = { "op": "result", "result": result, "id": "order_lag" } self.signal_recv(self, (json.dumps(translated))) except Exception as exc: self.debug("### exception in lag_thread:", exc) start_thread(lag_thread, "http request lag")
[ "def", "request_lag", "(", "self", ")", ":", "def", "lag_thread", "(", ")", ":", "json_time", "=", "http_request", "(", "\"%s://%s/0/public/Time\"", "%", "(", "self", ".", "proto", ",", "HTTP_HOST", ")", ")", "if", "not", "self", ".", "_terminating", ":", ...
https://github.com/caktux/pytrader/blob/b45b216dab3db78d6028d85e9a6f80419c22cea0/exchanges/kraken.py#L205-L230
zedshaw/lamson
8a8ad546ea746b129fa5f069bf9278f87d01473a
lamson/mail.py
python
MailRequest.all_parts
(self)
return self.base.parts
Returns all multipart mime parts. This could be an empty list.
Returns all multipart mime parts. This could be an empty list.
[ "Returns", "all", "multipart", "mime", "parts", ".", "This", "could", "be", "an", "empty", "list", "." ]
def all_parts(self): """Returns all multipart mime parts. This could be an empty list.""" return self.base.parts
[ "def", "all_parts", "(", "self", ")", ":", "return", "self", ".", "base", ".", "parts" ]
https://github.com/zedshaw/lamson/blob/8a8ad546ea746b129fa5f069bf9278f87d01473a/lamson/mail.py#L81-L83
CastagnaIT/plugin.video.netflix
5cf5fa436eb9956576c0f62aa31a4c7d6c5b8a4a
packages/h2/utilities.py
python
_secure_headers
(headers, hdr_validation_flags)
Certain headers are at risk of being attacked during the header compression phase, and so need to be kept out of header compression contexts. This function automatically transforms certain specific headers into HPACK never-indexed fields to ensure they don't get added to header compression contexts. This function currently implements two rules: - 'authorization' and 'proxy-authorization' fields are automatically made never-indexed. - Any 'cookie' header field shorter than 20 bytes long is made never-indexed. These fields are the most at-risk. These rules are inspired by Firefox and nghttp2.
Certain headers are at risk of being attacked during the header compression phase, and so need to be kept out of header compression contexts. This function automatically transforms certain specific headers into HPACK never-indexed fields to ensure they don't get added to header compression contexts.
[ "Certain", "headers", "are", "at", "risk", "of", "being", "attacked", "during", "the", "header", "compression", "phase", "and", "so", "need", "to", "be", "kept", "out", "of", "header", "compression", "contexts", ".", "This", "function", "automatically", "trans...
def _secure_headers(headers, hdr_validation_flags): """ Certain headers are at risk of being attacked during the header compression phase, and so need to be kept out of header compression contexts. This function automatically transforms certain specific headers into HPACK never-indexed fields to ensure they don't get added to header compression contexts. This function currently implements two rules: - 'authorization' and 'proxy-authorization' fields are automatically made never-indexed. - Any 'cookie' header field shorter than 20 bytes long is made never-indexed. These fields are the most at-risk. These rules are inspired by Firefox and nghttp2. """ for header in headers: if header[0] in _SECURE_HEADERS: yield NeverIndexedHeaderTuple(*header) elif header[0] in (b'cookie', u'cookie') and len(header[1]) < 20: yield NeverIndexedHeaderTuple(*header) else: yield header
[ "def", "_secure_headers", "(", "headers", ",", "hdr_validation_flags", ")", ":", "for", "header", "in", "headers", ":", "if", "header", "[", "0", "]", "in", "_SECURE_HEADERS", ":", "yield", "NeverIndexedHeaderTuple", "(", "*", "header", ")", "elif", "header", ...
https://github.com/CastagnaIT/plugin.video.netflix/blob/5cf5fa436eb9956576c0f62aa31a4c7d6c5b8a4a/packages/h2/utilities.py#L66-L90
Ultimaker/Cura
a1622c77ea7259ecb956acd6de07b7d34b7ac52b
plugins/CuraEngineBackend/CuraEngineBackend.py
python
CuraEngineBackend._onBackendConnected
(self)
Called when the back-end connects to the front-end.
Called when the back-end connects to the front-end.
[ "Called", "when", "the", "back", "-", "end", "connects", "to", "the", "front", "-", "end", "." ]
def _onBackendConnected(self) -> None: """Called when the back-end connects to the front-end.""" if self._restart: self._restart = False self._onChanged()
[ "def", "_onBackendConnected", "(", "self", ")", "->", "None", ":", "if", "self", ".", "_restart", ":", "self", ".", "_restart", "=", "False", "self", ".", "_onChanged", "(", ")" ]
https://github.com/Ultimaker/Cura/blob/a1622c77ea7259ecb956acd6de07b7d34b7ac52b/plugins/CuraEngineBackend/CuraEngineBackend.py#L880-L885
khanhnamle1994/natural-language-processing
01d450d5ac002b0156ef4cf93a07cb508c1bcdc5
assignment1/.env/lib/python2.7/site-packages/pip/_vendor/lockfile/__init__.py
python
MkdirFileLock
(*args, **kwds)
return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile", *args, **kwds)
Factory function provided for backwards compatibility. Do not use in new code. Instead, import MkdirLockFile from the lockfile.mkdirlockfile module.
Factory function provided for backwards compatibility.
[ "Factory", "function", "provided", "for", "backwards", "compatibility", "." ]
def MkdirFileLock(*args, **kwds): """Factory function provided for backwards compatibility. Do not use in new code. Instead, import MkdirLockFile from the lockfile.mkdirlockfile module. """ from . import mkdirlockfile return _fl_helper(mkdirlockfile.MkdirLockFile, "lockfile.mkdirlockfile", *args, **kwds)
[ "def", "MkdirFileLock", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "from", ".", "import", "mkdirlockfile", "return", "_fl_helper", "(", "mkdirlockfile", ".", "MkdirLockFile", ",", "\"lockfile.mkdirlockfile\"", ",", "*", "args", ",", "*", "*", "kwds",...
https://github.com/khanhnamle1994/natural-language-processing/blob/01d450d5ac002b0156ef4cf93a07cb508c1bcdc5/assignment1/.env/lib/python2.7/site-packages/pip/_vendor/lockfile/__init__.py#L293-L301
google/capirca
679e3885e3a5e5e129dc2dfab204ec44d63b26a4
capirca/lib/policy.py
python
ParsePolicy
(data, definitions=None, optimize=True, base_dir='', shade_check=False, filename='')
Parse the policy in 'data', optionally provide a naming object. Parse a blob of policy text into a policy object. Args: data: a string blob of policy data to parse. definitions: optional naming library definitions object. optimize: bool - whether to summarize networks and services. base_dir: base path string to look for acls or include files. shade_check: bool - whether to raise an exception when a term is shaded. filename: string - filename used by the policy. Returns: policy object or False (if parse error).
Parse the policy in 'data', optionally provide a naming object.
[ "Parse", "the", "policy", "in", "data", "optionally", "provide", "a", "naming", "object", "." ]
def ParsePolicy(data, definitions=None, optimize=True, base_dir='', shade_check=False, filename=''): """Parse the policy in 'data', optionally provide a naming object. Parse a blob of policy text into a policy object. Args: data: a string blob of policy data to parse. definitions: optional naming library definitions object. optimize: bool - whether to summarize networks and services. base_dir: base path string to look for acls or include files. shade_check: bool - whether to raise an exception when a term is shaded. filename: string - filename used by the policy. Returns: policy object or False (if parse error). """ try: if definitions: globals()['DEFINITIONS'] = definitions else: globals()['DEFINITIONS'] = naming.Naming(DEFAULT_DEFINITIONS) globals()['_OPTIMIZE'] = optimize globals()['_SHADE_CHECK'] = shade_check lexer = lex.lex() preprocessed_data = '\n'.join(_Preprocess(data, base_dir=base_dir)) global parser policy = parser.parse(preprocessed_data, lexer=lexer) policy.filename = filename return policy except IndexError: return False
[ "def", "ParsePolicy", "(", "data", ",", "definitions", "=", "None", ",", "optimize", "=", "True", ",", "base_dir", "=", "''", ",", "shade_check", "=", "False", ",", "filename", "=", "''", ")", ":", "try", ":", "if", "definitions", ":", "globals", "(", ...
https://github.com/google/capirca/blob/679e3885e3a5e5e129dc2dfab204ec44d63b26a4/capirca/lib/policy.py#L2573-L2607
jliljebl/flowblade
995313a509b80e99eb1ad550d945bdda5995093b
flowblade-trunk/Flowblade/propertyedit.py
python
ColorProperty.get_value_rgba
(self)
return (float(raw_r)/255.0, float(raw_g)/255.0, float(raw_b)/255.0, 1.0)
[]
def get_value_rgba(self): raw_r, raw_g, raw_b = utils.hex_to_rgb(self.value) return (float(raw_r)/255.0, float(raw_g)/255.0, float(raw_b)/255.0, 1.0)
[ "def", "get_value_rgba", "(", "self", ")", ":", "raw_r", ",", "raw_g", ",", "raw_b", "=", "utils", ".", "hex_to_rgb", "(", "self", ".", "value", ")", "return", "(", "float", "(", "raw_r", ")", "/", "255.0", ",", "float", "(", "raw_g", ")", "/", "25...
https://github.com/jliljebl/flowblade/blob/995313a509b80e99eb1ad550d945bdda5995093b/flowblade-trunk/Flowblade/propertyedit.py#L807-L809
microsoft/nni
31f11f51249660930824e888af0d4e022823285c
nni/compression/pytorch/utils/shape_dependency.py
python
GroupDependency._get_parent_convs
(self, node)
return parent_layers
Find the nearest father conv layers for the target node. Parameters --------- node : torch._C.Node target node. Returns ------- parent_layers : list nearest father conv layers for the target node. Due to the group dependency only exists between the conv layers, so we only find the parent conv layers.
Find the nearest father conv layers for the target node. Parameters --------- node : torch._C.Node target node. Returns ------- parent_layers : list nearest father conv layers for the target node. Due to the group dependency only exists between the conv layers, so we only find the parent conv layers.
[ "Find", "the", "nearest", "father", "conv", "layers", "for", "the", "target", "node", ".", "Parameters", "---------", "node", ":", "torch", ".", "_C", ".", "Node", "target", "node", ".", "Returns", "-------", "parent_layers", ":", "list", "nearest", "father"...
def _get_parent_convs(self, node): """ Find the nearest father conv layers for the target node. Parameters --------- node : torch._C.Node target node. Returns ------- parent_layers : list nearest father conv layers for the target node. Due to the group dependency only exists between the conv layers, so we only find the parent conv layers. """ parent_layers = [] # the input node is a Conv node predeessors = self.graph.find_predecessors(node.unique_name) predeessors = [self.graph.name_to_node[x] for x in predeessors] queue = predeessors while queue: curnode = queue.pop(0) if curnode.op_type == 'Conv2d' or curnode.op_type == 'ConvTranspose2d': # find the first met conv parent_layers.append(curnode.name) continue parents = self.graph.find_predecessors(curnode.unique_name) parents = [self.graph.name_to_node[name] for name in parents] for parent in parents: queue.append(parent) return parent_layers
[ "def", "_get_parent_convs", "(", "self", ",", "node", ")", ":", "parent_layers", "=", "[", "]", "# the input node is a Conv node", "predeessors", "=", "self", ".", "graph", ".", "find_predecessors", "(", "node", ".", "unique_name", ")", "predeessors", "=", "[", ...
https://github.com/microsoft/nni/blob/31f11f51249660930824e888af0d4e022823285c/nni/compression/pytorch/utils/shape_dependency.py#L349-L378
timonwong/OmniMarkupPreviewer
21921ac7a99d2b5924a2219b33679a5b53621392
OmniMarkupLib/Renderers/libs/python3/creoleparser/core.py
python
Parser.preprocess
(self,text)
return text
This should generally be called before fragmentize(). :parameter text: text to be processsed.
This should generally be called before fragmentize().
[ "This", "should", "generally", "be", "called", "before", "fragmentize", "()", "." ]
def preprocess(self,text): """This should generally be called before fragmentize(). :parameter text: text to be processsed. """ text = text.replace("\r\n", "\n") text = text.replace("\r", "\n") return text
[ "def", "preprocess", "(", "self", ",", "text", ")", ":", "text", "=", "text", ".", "replace", "(", "\"\\r\\n\"", ",", "\"\\n\"", ")", "text", "=", "text", ".", "replace", "(", "\"\\r\"", ",", "\"\\n\"", ")", "return", "text" ]
https://github.com/timonwong/OmniMarkupPreviewer/blob/21921ac7a99d2b5924a2219b33679a5b53621392/OmniMarkupLib/Renderers/libs/python3/creoleparser/core.py#L129-L138
pypa/pipenv
b21baade71a86ab3ee1429f71fbc14d4f95fb75d
pipenv/patched/notpip/_vendor/pyparsing.py
python
Empty.__init__
(self)
[]
def __init__(self): super(Empty, self).__init__() self.name = "Empty" self.mayReturnEmpty = True self.mayIndexError = False
[ "def", "__init__", "(", "self", ")", ":", "super", "(", "Empty", ",", "self", ")", ".", "__init__", "(", ")", "self", ".", "name", "=", "\"Empty\"", "self", ".", "mayReturnEmpty", "=", "True", "self", ".", "mayIndexError", "=", "False" ]
https://github.com/pypa/pipenv/blob/b21baade71a86ab3ee1429f71fbc14d4f95fb75d/pipenv/patched/notpip/_vendor/pyparsing.py#L2835-L2839
wucng/TensorExpand
4ea58f64f5c5082b278229b799c9f679536510b7
TensorExpand/Object detection/Data_interface/Labelme/labelme_draw_json.py
python
main
()
[]
def main(): parser = argparse.ArgumentParser() parser.add_argument('json_file') args = parser.parse_args() json_file = args.json_file data = json.load(open(json_file)) # 加载json文件 img = utils.img_b64_to_array(data['imageData']) # 解析原图片数据 lbl, lbl_names = utils.labelme_shapes_to_label(img.shape, data[ 'shapes']) # 解析'shapes'中的字段信息,解析出每个对象的mask与对应的label lbl存储 mask,lbl_names 存储对应的label # lal 像素取值 0、1、2 其中0对应背景,1对应第一个对象,2对应第二个对象 # 使用该方法取出每个对象的mask mask=[] mask.append((lbl==1).astype(np.uint8)) # 解析出像素值为1的对象,对应第一个对象 mask 为0、1组成的(0为背景,1为对象) # lbl_names ['background','cat_1','cat_2'] captions = ['%d: %s' % (l, name) for l, name in enumerate(lbl_names)] lbl_viz = utils.draw_label(lbl, img, captions) plt.subplot(121) plt.imshow(img) plt.subplot(122) plt.imshow(lbl_viz) plt.show()
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "'json_file'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "json_file", "=", "args", ".", "json_file", "data", "=",...
https://github.com/wucng/TensorExpand/blob/4ea58f64f5c5082b278229b799c9f679536510b7/TensorExpand/Object detection/Data_interface/Labelme/labelme_draw_json.py#L11-L34
cltk/cltk
1a8c2f5ef72389e2579dfce1fa5af8e59ebc9ec1
src/cltk/utils/file_operations.py
python
open_pickle
(path: str)
Open a pickle and return loaded pickle object. :type path: str :param : path: File path to pickle file to be opened. :rtype : object
Open a pickle and return loaded pickle object. :type path: str :param : path: File path to pickle file to be opened. :rtype : object
[ "Open", "a", "pickle", "and", "return", "loaded", "pickle", "object", ".", ":", "type", "path", ":", "str", ":", "param", ":", "path", ":", "File", "path", "to", "pickle", "file", "to", "be", "opened", ".", ":", "rtype", ":", "object" ]
def open_pickle(path: str) -> Any: """Open a pickle and return loaded pickle object. :type path: str :param : path: File path to pickle file to be opened. :rtype : object """ try: with open(path, "rb") as opened_pickle: try: return pickle.load(opened_pickle) except Exception as pickle_error: logger.error(pickle_error) raise except FileNotFoundError as fnf_error: logger.error(fnf_error) raise except IOError as io_err: logger.error(io_err) raise except EOFError as eof_error: logger.error(eof_error) raise except pickle.UnpicklingError as unp_error: logger.error(unp_error) raise
[ "def", "open_pickle", "(", "path", ":", "str", ")", "->", "Any", ":", "try", ":", "with", "open", "(", "path", ",", "\"rb\"", ")", "as", "opened_pickle", ":", "try", ":", "return", "pickle", ".", "load", "(", "opened_pickle", ")", "except", "Exception"...
https://github.com/cltk/cltk/blob/1a8c2f5ef72389e2579dfce1fa5af8e59ebc9ec1/src/cltk/utils/file_operations.py#L38-L62
TropComplique/FaceBoxes-tensorflow
0dde35eda1cb3dab6586b94c583029162ec37aa5
src/input_pipeline/pipeline.py
python
Pipeline.__init__
(self, filenames, batch_size, image_size, repeat=False, shuffle=False, augmentation=False)
Note: when evaluating set batch_size to 1. Arguments: filenames: a list of strings, paths to tfrecords files. batch_size: an integer. image_size: a list with two integers [width, height] or None, images of this size will be in a batch. If value is None then images will not be resized. In this case batch size must be 1. repeat: a boolean, whether repeat indefinitely. shuffle: whether to shuffle the dataset. augmentation: whether to do data augmentation.
Note: when evaluating set batch_size to 1.
[ "Note", ":", "when", "evaluating", "set", "batch_size", "to", "1", "." ]
def __init__(self, filenames, batch_size, image_size, repeat=False, shuffle=False, augmentation=False): """ Note: when evaluating set batch_size to 1. Arguments: filenames: a list of strings, paths to tfrecords files. batch_size: an integer. image_size: a list with two integers [width, height] or None, images of this size will be in a batch. If value is None then images will not be resized. In this case batch size must be 1. repeat: a boolean, whether repeat indefinitely. shuffle: whether to shuffle the dataset. augmentation: whether to do data augmentation. """ if image_size is not None: self.image_width, self.image_height = image_size self.resize = True else: assert batch_size == 1 self.image_width, self.image_height = None, None self.resize = False self.augmentation = augmentation self.batch_size = batch_size def get_num_samples(filename): return sum(1 for _ in tf.python_io.tf_record_iterator(filename)) num_examples = 0 for filename in filenames: num_examples_in_file = get_num_samples(filename) assert num_examples_in_file > 0 num_examples += num_examples_in_file self.num_examples = num_examples assert self.num_examples > 0 dataset = tf.data.Dataset.from_tensor_slices(filenames) num_shards = len(filenames) if shuffle: dataset = dataset.shuffle(buffer_size=num_shards) dataset = dataset.flat_map(tf.data.TFRecordDataset) dataset = dataset.prefetch(buffer_size=batch_size) if shuffle: dataset = dataset.shuffle(buffer_size=SHUFFLE_BUFFER_SIZE) dataset = dataset.repeat(None if repeat else 1) dataset = dataset.map(self._parse_and_preprocess, num_parallel_calls=NUM_THREADS) # we need batches of fixed size padded_shapes = ([self.image_height, self.image_width, 3], [None, 4], [], []) dataset = dataset.apply( tf.contrib.data.padded_batch_and_drop_remainder(batch_size, padded_shapes) ) dataset = dataset.prefetch(buffer_size=1) self.iterator = dataset.make_one_shot_iterator()
[ "def", "__init__", "(", "self", ",", "filenames", ",", "batch_size", ",", "image_size", ",", "repeat", "=", "False", ",", "shuffle", "=", "False", ",", "augmentation", "=", "False", ")", ":", "if", "image_size", "is", "not", "None", ":", "self", ".", "...
https://github.com/TropComplique/FaceBoxes-tensorflow/blob/0dde35eda1cb3dab6586b94c583029162ec37aa5/src/input_pipeline/pipeline.py#L12-L71
MillionIntegrals/vel
f3ce7da64362ad207f40f2c0d58d9300a25df3e8
vel/rl/vecenv/shared_mem.py
python
SharedMemVecEnvWrapper.instantiate_single
(self, seed=0, preset='default')
return env
Create a new Env instance - single
Create a new Env instance - single
[ "Create", "a", "new", "Env", "instance", "-", "single" ]
def instantiate_single(self, seed=0, preset='default'): """ Create a new Env instance - single """ env = self.env.instantiate(seed=seed, serial_id=0, preset=preset) if self.frame_history is not None: env = FrameStack(env, self.frame_history) return env
[ "def", "instantiate_single", "(", "self", ",", "seed", "=", "0", ",", "preset", "=", "'default'", ")", ":", "env", "=", "self", ".", "env", ".", "instantiate", "(", "seed", "=", "seed", ",", "serial_id", "=", "0", ",", "preset", "=", "preset", ")", ...
https://github.com/MillionIntegrals/vel/blob/f3ce7da64362ad207f40f2c0d58d9300a25df3e8/vel/rl/vecenv/shared_mem.py#L27-L34
AppScale/gts
46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9
AppServer/lib/django-1.4/django/contrib/gis/admin/options.py
python
GeoModelAdmin.media
(self)
return media
Injects OpenLayers JavaScript into the admin.
Injects OpenLayers JavaScript into the admin.
[ "Injects", "OpenLayers", "JavaScript", "into", "the", "admin", "." ]
def media(self): "Injects OpenLayers JavaScript into the admin." media = super(GeoModelAdmin, self).media media.add_js([self.openlayers_url]) media.add_js(self.extra_js) return media
[ "def", "media", "(", "self", ")", ":", "media", "=", "super", "(", "GeoModelAdmin", ",", "self", ")", ".", "media", "media", ".", "add_js", "(", "[", "self", ".", "openlayers_url", "]", ")", "media", ".", "add_js", "(", "self", ".", "extra_js", ")", ...
https://github.com/AppScale/gts/blob/46f909cf5dc5ba81faf9d81dc9af598dcf8a82a9/AppServer/lib/django-1.4/django/contrib/gis/admin/options.py#L43-L48
devitocodes/devito
6abd441e3f5f091775ad332be6b95e017b8cbd16
devito/types/grid.py
python
AbstractSubDomain.intersection
(self, other)
Return the intersection of two subdomains as a new subdomain.
Return the intersection of two subdomains as a new subdomain.
[ "Return", "the", "intersection", "of", "two", "subdomains", "as", "a", "new", "subdomain", "." ]
def intersection(self, other): """ Return the intersection of two subdomains as a new subdomain. """ raise NotImplementedError
[ "def", "intersection", "(", "self", ",", "other", ")", ":", "raise", "NotImplementedError" ]
https://github.com/devitocodes/devito/blob/6abd441e3f5f091775ad332be6b95e017b8cbd16/devito/types/grid.py#L417-L421
pyload/pyload
4410827ca7711f1a3cf91a0b11e967b81bbbcaa2
src/pyload/core/managers/file_manager.py
python
FileManager.update_file_info
(self, data, pid)
updates file info (name, size, status, url)
updates file info (name, size, status, url)
[ "updates", "file", "info", "(", "name", "size", "status", "url", ")" ]
def update_file_info(self, data, pid): """ updates file info (name, size, status, url) """ self.pyload.db.update_link_info(data) e = UpdateEvent( "pack", pid, "collector" if not self.get_package(pid).queue else "queue" ) self.pyload.event_manager.add_event(e)
[ "def", "update_file_info", "(", "self", ",", "data", ",", "pid", ")", ":", "self", ".", "pyload", ".", "db", ".", "update_link_info", "(", "data", ")", "e", "=", "UpdateEvent", "(", "\"pack\"", ",", "pid", ",", "\"collector\"", "if", "not", "self", "."...
https://github.com/pyload/pyload/blob/4410827ca7711f1a3cf91a0b11e967b81bbbcaa2/src/pyload/core/managers/file_manager.py#L583-L591
DataDog/integrations-core
934674b29d94b70ccc008f76ea172d0cdae05e1e
kubelet/datadog_checks/kubelet/prometheus.py
python
CadvisorPrometheusScraperMixin._get_container_label
(labels, l_name)
Iter on all labels to find the label.name equal to the l_name :param labels: list of labels :param l_name: str :return: str or None
Iter on all labels to find the label.name equal to the l_name :param labels: list of labels :param l_name: str :return: str or None
[ "Iter", "on", "all", "labels", "to", "find", "the", "label", ".", "name", "equal", "to", "the", "l_name", ":", "param", "labels", ":", "list", "of", "labels", ":", "param", "l_name", ":", "str", ":", "return", ":", "str", "or", "None" ]
def _get_container_label(labels, l_name): """ Iter on all labels to find the label.name equal to the l_name :param labels: list of labels :param l_name: str :return: str or None """ if l_name in labels: return labels[l_name]
[ "def", "_get_container_label", "(", "labels", ",", "l_name", ")", ":", "if", "l_name", "in", "labels", ":", "return", "labels", "[", "l_name", "]" ]
https://github.com/DataDog/integrations-core/blob/934674b29d94b70ccc008f76ea172d0cdae05e1e/kubelet/datadog_checks/kubelet/prometheus.py#L150-L158
pytorch/translate
564d011b10b4cef4e110c092c2912277ed64c713
pytorch_translate/research/deliberation_networks/deliberation_networks.py
python
TransformerDecoderPhase2.upgrade_state_dict_named
(self, state_dict, name)
return state_dict
Upgrade a (possibly old) state dict for new versions of fairseq.
Upgrade a (possibly old) state dict for new versions of fairseq.
[ "Upgrade", "a", "(", "possibly", "old", ")", "state", "dict", "for", "new", "versions", "of", "fairseq", "." ]
def upgrade_state_dict_named(self, state_dict, name): """Upgrade a (possibly old) state dict for new versions of fairseq.""" if isinstance(self.embed_positions, SinusoidalPositionalEmbedding): weights_key = "{}.embed_positions.weights".format(name) if weights_key in state_dict: del state_dict[weights_key] state_dict[ "{}.embed_positions._float_tensor".format(name) ] = torch.FloatTensor(1) for i in range(len(self.layers)): # update layer norms layer_norm_map = { "0": "self_attn_layer_norm", "1": "encoder_attn_layer_norm", "2": "final_layer_norm", } for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m) if k in state_dict: state_dict[ "{}.layers.{}.{}.{}".format(name, i, new, m) ] = state_dict[k] del state_dict[k] version_key = "{}.version".format(name) if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2: # earlier checkpoints did not normalize after the stack of layers self.layer_norm = None self.normalize = False state_dict[version_key] = torch.Tensor([1]) return state_dict
[ "def", "upgrade_state_dict_named", "(", "self", ",", "state_dict", ",", "name", ")", ":", "if", "isinstance", "(", "self", ".", "embed_positions", ",", "SinusoidalPositionalEmbedding", ")", ":", "weights_key", "=", "\"{}.embed_positions.weights\"", ".", "format", "(...
https://github.com/pytorch/translate/blob/564d011b10b4cef4e110c092c2912277ed64c713/pytorch_translate/research/deliberation_networks/deliberation_networks.py#L443-L476
sfzhang15/ATSS
79dfb28bd18c931dd75a3ca2c63d32f5e4b1626a
atss_core/structures/bounding_box.py
python
BoxList.resize
(self, size, *args, **kwargs)
return bbox.convert(self.mode)
Returns a resized copy of this bounding box :param size: The requested size in pixels, as a 2-tuple: (width, height).
Returns a resized copy of this bounding box
[ "Returns", "a", "resized", "copy", "of", "this", "bounding", "box" ]
def resize(self, size, *args, **kwargs): """ Returns a resized copy of this bounding box :param size: The requested size in pixels, as a 2-tuple: (width, height). """ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size)) if ratios[0] == ratios[1]: ratio = ratios[0] scaled_box = self.bbox * ratio bbox = BoxList(scaled_box, size, mode=self.mode) # bbox._copy_extra_fields(self) for k, v in self.extra_fields.items(): if not isinstance(v, torch.Tensor): v = v.resize(size, *args, **kwargs) bbox.add_field(k, v) return bbox ratio_width, ratio_height = ratios xmin, ymin, xmax, ymax = self._split_into_xyxy() scaled_xmin = xmin * ratio_width scaled_xmax = xmax * ratio_width scaled_ymin = ymin * ratio_height scaled_ymax = ymax * ratio_height scaled_box = torch.cat( (scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1 ) bbox = BoxList(scaled_box, size, mode="xyxy") # bbox._copy_extra_fields(self) for k, v in self.extra_fields.items(): if not isinstance(v, torch.Tensor): v = v.resize(size, *args, **kwargs) bbox.add_field(k, v) return bbox.convert(self.mode)
[ "def", "resize", "(", "self", ",", "size", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "ratios", "=", "tuple", "(", "float", "(", "s", ")", "/", "float", "(", "s_orig", ")", "for", "s", ",", "s_orig", "in", "zip", "(", "size", ",", "...
https://github.com/sfzhang15/ATSS/blob/79dfb28bd18c931dd75a3ca2c63d32f5e4b1626a/atss_core/structures/bounding_box.py#L91-L127
ecstatic-nobel/Analyst-Arsenal
7f38854e2383e1b564f67fb9a197a045cb167230
commons.py
python
download_site
(args, day, protocol, domain, ext_csv, url, resp)
[]
def download_site(args, day, protocol, domain, ext_csv, url, resp): """ """ directory = "{}{}".format(args.cap_dir, day) domain_dir = "{}/{}".format(directory, domain) root_url = "{}//{}/".format(protocol, domain) try: if not os.path.exists(domain_dir): os.makedirs(domain_dir) if not os.path.exists(domain_dir): tqdm.tqdm.write(colored("{}: {} is temporarily unavailable.".format( message_header("directory"), domain_dir ), "red", attrs=["underline"])) tqdm.tqdm.write(colored("{}: Waiting 15s for {} to become available...".format( message_header("directory"), domain_dir), "red", attrs=["underline"] )) time.sleep(15) if not os.path.exists(domain_dir): return "continue" wget_command = format_wget(args, directory, uagent, ext_csv, root_url) proc = subprocess.Popen(wget_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) _, err = proc.communicate() if "301 Moved Permanently" in err or "302 Found" in err or "307 Temporary Redirect" in err: message_failed(args, "Redirects exceeded", root_url) os.rmdir(domain_dir) return "continue" message_complete(url) remove_empty(domain_dir, args) return "break" except Exception as err: message_failed(args, err, url) remove_empty(domain_dir, args) return "continue"
[ "def", "download_site", "(", "args", ",", "day", ",", "protocol", ",", "domain", ",", "ext_csv", ",", "url", ",", "resp", ")", ":", "directory", "=", "\"{}{}\"", ".", "format", "(", "args", ".", "cap_dir", ",", "day", ")", "domain_dir", "=", "\"{}/{}\"...
https://github.com/ecstatic-nobel/Analyst-Arsenal/blob/7f38854e2383e1b564f67fb9a197a045cb167230/commons.py#L229-L268
Mariewelt/OpenChem
e612d5cd471079c64e61ceda946c3dc7cf095bd8
openchem/criterion/multitask_loss.py
python
MultitaskLoss.forward
(self, input, target)
return (loss.sum(dim=0) / n_samples).mean()
[]
def forward(self, input, target): assert target.size()[1] == self.n_tasks assert input.size()[1] == self.n_tasks x = torch.zeros(target.size()).cuda() y = torch.ones(target.size()).cuda() mask = torch.where(target == self.ignore_index, x, y) loss = F.binary_cross_entropy(input, mask * target, weight=self.weight) loss = loss * mask n_samples = mask.sum(dim=0) return (loss.sum(dim=0) / n_samples).mean()
[ "def", "forward", "(", "self", ",", "input", ",", "target", ")", ":", "assert", "target", ".", "size", "(", ")", "[", "1", "]", "==", "self", ".", "n_tasks", "assert", "input", ".", "size", "(", ")", "[", "1", "]", "==", "self", ".", "n_tasks", ...
https://github.com/Mariewelt/OpenChem/blob/e612d5cd471079c64e61ceda946c3dc7cf095bd8/openchem/criterion/multitask_loss.py#L40-L49
fossasia/x-mario-center
fe67afe28d995dcf4e2498e305825a4859566172
softwarecenter/ui/gtk3/models/appstore2.py
python
AppListStore.set_from_matches
(self, matches)
set the content of the liststore based on a list of xapian.MSetItems
set the content of the liststore based on a list of xapian.MSetItems
[ "set", "the", "content", "of", "the", "liststore", "based", "on", "a", "list", "of", "xapian", ".", "MSetItems" ]
def set_from_matches(self, matches): """ set the content of the liststore based on a list of xapian.MSetItems """ LOG.debug("set_from_matches len(matches)='%s'" % len(matches)) self.current_matches = matches n_matches = len(matches) if n_matches == 0: return extent = min(self.LOAD_INITIAL, n_matches) with ExecutionTime("store.append_initial"): for doc in [m.document for m in matches][:extent]: doc.available = doc.installed = doc.purchasable = None self.append((doc,)) if n_matches == extent: return with ExecutionTime("store.append_placeholders"): for i in range(n_matches - extent): self.append() self.emit('appcount-changed', len(matches)) self.buffer_icons()
[ "def", "set_from_matches", "(", "self", ",", "matches", ")", ":", "LOG", ".", "debug", "(", "\"set_from_matches len(matches)='%s'\"", "%", "len", "(", "matches", ")", ")", "self", ".", "current_matches", "=", "matches", "n_matches", "=", "len", "(", "matches",...
https://github.com/fossasia/x-mario-center/blob/fe67afe28d995dcf4e2498e305825a4859566172/softwarecenter/ui/gtk3/models/appstore2.py#L431-L456
stefanhoelzl/vue.py
f4256454256ddfe54a8be6dea493d3fc915ef1a2
examples/element_ui/app.py
python
App.clicked
(self, item)
[]
def clicked(self, item): print(item) self.notify.info( {"title": "Navigation", "message": item.get("title", "NO TITLE")} )
[ "def", "clicked", "(", "self", ",", "item", ")", ":", "print", "(", "item", ")", "self", ".", "notify", ".", "info", "(", "{", "\"title\"", ":", "\"Navigation\"", ",", "\"message\"", ":", "item", ".", "get", "(", "\"title\"", ",", "\"NO TITLE\"", ")", ...
https://github.com/stefanhoelzl/vue.py/blob/f4256454256ddfe54a8be6dea493d3fc915ef1a2/examples/element_ui/app.py#L39-L43
allenai/savn
0ce930091e9213b52950143bb43195045ea33437
datasets/environment.py
python
Environment.reachable_points
(self)
return self._reachable_points
Use the JSON file to get the reachable points.
Use the JSON file to get the reachable points.
[ "Use", "the", "JSON", "file", "to", "get", "the", "reachable", "points", "." ]
def reachable_points(self): """ Use the JSON file to get the reachable points. """ if self._reachable_points is not None: return self._reachable_points points_path = os.path.join(self.offline_data_dir, self.scene_name, "grid.json") if not os.path.exists(points_path): raise IOError("Path {0} does not exist".format(points_path)) self._reachable_points = json.load(open(points_path)) return self._reachable_points
[ "def", "reachable_points", "(", "self", ")", ":", "if", "self", ".", "_reachable_points", "is", "not", "None", ":", "return", "self", ".", "_reachable_points", "points_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "offline_data_dir", ",", "...
https://github.com/allenai/savn/blob/0ce930091e9213b52950143bb43195045ea33437/datasets/environment.py#L128-L137
clinton-hall/nzbToMedia
27669389216902d1085660167e7bda0bd8527ecf
libs/common/urllib3/packages/backports/makefile.py
python
backport_makefile
(self, mode="r", buffering=None, encoding=None, errors=None, newline=None)
return text
Backport of ``socket.makefile`` from Python 3.5.
Backport of ``socket.makefile`` from Python 3.5.
[ "Backport", "of", "socket", ".", "makefile", "from", "Python", "3", ".", "5", "." ]
def backport_makefile(self, mode="r", buffering=None, encoding=None, errors=None, newline=None): """ Backport of ``socket.makefile`` from Python 3.5. """ if not set(mode) <= {"r", "w", "b"}: raise ValueError( "invalid mode %r (only r, w, b allowed)" % (mode,) ) writing = "w" in mode reading = "r" in mode or not writing assert reading or writing binary = "b" in mode rawmode = "" if reading: rawmode += "r" if writing: rawmode += "w" raw = SocketIO(self, rawmode) self._makefile_refs += 1 if buffering is None: buffering = -1 if buffering < 0: buffering = io.DEFAULT_BUFFER_SIZE if buffering == 0: if not binary: raise ValueError("unbuffered streams must be binary") return raw if reading and writing: buffer = io.BufferedRWPair(raw, raw, buffering) elif reading: buffer = io.BufferedReader(raw, buffering) else: assert writing buffer = io.BufferedWriter(raw, buffering) if binary: return buffer text = io.TextIOWrapper(buffer, encoding, errors, newline) text.mode = mode return text
[ "def", "backport_makefile", "(", "self", ",", "mode", "=", "\"r\"", ",", "buffering", "=", "None", ",", "encoding", "=", "None", ",", "errors", "=", "None", ",", "newline", "=", "None", ")", ":", "if", "not", "set", "(", "mode", ")", "<=", "{", "\"...
https://github.com/clinton-hall/nzbToMedia/blob/27669389216902d1085660167e7bda0bd8527ecf/libs/common/urllib3/packages/backports/makefile.py#L14-L53
mdiazcl/fuzzbunch-debian
2b76c2249ade83a389ae3badb12a1bd09901fd2c
windows/Resources/Python/Core/Lib/DocXMLRPCServer.py
python
XMLRPCDocGenerator.generate_html_documentation
(self)
return documenter.page(self.server_title, documentation)
generate_html_documentation() => html documentation for the server Generates HTML documentation for the server using introspection for installed functions and instances that do not implement the _dispatch method. Alternatively, instances can choose to implement the _get_method_argstring(method_name) method to provide the argument string used in the documentation and the _methodHelp(method_name) method to provide the help text used in the documentation.
generate_html_documentation() => html documentation for the server Generates HTML documentation for the server using introspection for installed functions and instances that do not implement the _dispatch method. Alternatively, instances can choose to implement the _get_method_argstring(method_name) method to provide the argument string used in the documentation and the _methodHelp(method_name) method to provide the help text used in the documentation.
[ "generate_html_documentation", "()", "=", ">", "html", "documentation", "for", "the", "server", "Generates", "HTML", "documentation", "for", "the", "server", "using", "introspection", "for", "installed", "functions", "and", "instances", "that", "do", "not", "impleme...
def generate_html_documentation(self): """generate_html_documentation() => html documentation for the server Generates HTML documentation for the server using introspection for installed functions and instances that do not implement the _dispatch method. Alternatively, instances can choose to implement the _get_method_argstring(method_name) method to provide the argument string used in the documentation and the _methodHelp(method_name) method to provide the help text used in the documentation.""" methods = {} for method_name in self.system_listMethods(): if method_name in self.funcs: method = self.funcs[method_name] elif self.instance is not None: method_info = [ None, None] if hasattr(self.instance, '_get_method_argstring'): method_info[0] = self.instance._get_method_argstring(method_name) if hasattr(self.instance, '_methodHelp'): method_info[1] = self.instance._methodHelp(method_name) method_info = tuple(method_info) if method_info != (None, None): method = method_info elif not hasattr(self.instance, '_dispatch'): try: method = resolve_dotted_attribute(self.instance, method_name) except AttributeError: method = method_info else: method = method_info methods[method_name] = method documenter = ServerHTMLDoc() documentation = documenter.docserver(self.server_name, self.server_documentation, methods) return documenter.page(self.server_title, documentation)
[ "def", "generate_html_documentation", "(", "self", ")", ":", "methods", "=", "{", "}", "for", "method_name", "in", "self", ".", "system_listMethods", "(", ")", ":", "if", "method_name", "in", "self", ".", "funcs", ":", "method", "=", "self", ".", "funcs", ...
https://github.com/mdiazcl/fuzzbunch-debian/blob/2b76c2249ade83a389ae3badb12a1bd09901fd2c/windows/Resources/Python/Core/Lib/DocXMLRPCServer.py#L130-L166
Dman95/SASM
7e3ae6da1c219a68e26d38939338567e5c27151a
Windows/MinGW64/opt/lib/python2.7/codecs.py
python
StreamReader.read
(self, size=-1, chars=-1, firstline=False)
return result
Decodes data from the stream self.stream and returns the resulting object. chars indicates the number of characters to read from the stream. read() will never return more than chars characters, but it might return less, if there are not enough characters available. size indicates the approximate maximum number of bytes to read from the stream for decoding purposes. The decoder can modify this setting as appropriate. The default value -1 indicates to read and decode as much as possible. size is intended to prevent having to decode huge files in one step. If firstline is true, and a UnicodeDecodeError happens after the first line terminator in the input only the first line will be returned, the rest of the input will be kept until the next call to read(). The method should use a greedy read strategy meaning that it should read as much data as is allowed within the definition of the encoding and the given size, e.g. if optional encoding endings or state markers are available on the stream, these should be read too.
Decodes data from the stream self.stream and returns the resulting object.
[ "Decodes", "data", "from", "the", "stream", "self", ".", "stream", "and", "returns", "the", "resulting", "object", "." ]
def read(self, size=-1, chars=-1, firstline=False): """ Decodes data from the stream self.stream and returns the resulting object. chars indicates the number of characters to read from the stream. read() will never return more than chars characters, but it might return less, if there are not enough characters available. size indicates the approximate maximum number of bytes to read from the stream for decoding purposes. The decoder can modify this setting as appropriate. The default value -1 indicates to read and decode as much as possible. size is intended to prevent having to decode huge files in one step. If firstline is true, and a UnicodeDecodeError happens after the first line terminator in the input only the first line will be returned, the rest of the input will be kept until the next call to read(). The method should use a greedy read strategy meaning that it should read as much data as is allowed within the definition of the encoding and the given size, e.g. if optional encoding endings or state markers are available on the stream, these should be read too. """ # If we have lines cached, first merge them back into characters if self.linebuffer: self.charbuffer = "".join(self.linebuffer) self.linebuffer = None # read until we get the required number of characters (if available) while True: # can the request can be satisfied from the character buffer? if chars < 0: if size < 0: if self.charbuffer: break elif len(self.charbuffer) >= size: break else: if len(self.charbuffer) >= chars: break # we need more data if size < 0: newdata = self.stream.read() else: newdata = self.stream.read(size) # decode bytes (those remaining from the last call included) data = self.bytebuffer + newdata try: newchars, decodedbytes = self.decode(data, self.errors) except UnicodeDecodeError, exc: if firstline: newchars, decodedbytes = self.decode(data[:exc.start], self.errors) lines = newchars.splitlines(True) if len(lines)<=1: raise else: raise # keep undecoded bytes until the next call self.bytebuffer = data[decodedbytes:] # put new characters in the character buffer self.charbuffer += newchars # there was no data available if not newdata: break if chars < 0: # Return everything we've got result = self.charbuffer self.charbuffer = "" else: # Return the first chars characters result = self.charbuffer[:chars] self.charbuffer = self.charbuffer[chars:] return result
[ "def", "read", "(", "self", ",", "size", "=", "-", "1", ",", "chars", "=", "-", "1", ",", "firstline", "=", "False", ")", ":", "# If we have lines cached, first merge them back into characters", "if", "self", ".", "linebuffer", ":", "self", ".", "charbuffer", ...
https://github.com/Dman95/SASM/blob/7e3ae6da1c219a68e26d38939338567e5c27151a/Windows/MinGW64/opt/lib/python2.7/codecs.py#L424-L501
Yelp/clusterman
54beef89c01a2681aafd1fbb93b6ad5f6252d6cf
clusterman/interfaces/resource_group.py
python
ResourceGroup.fulfilled_capacity
(self)
The actual weighted capacity for this ResourceGroup
The actual weighted capacity for this ResourceGroup
[ "The", "actual", "weighted", "capacity", "for", "this", "ResourceGroup" ]
def fulfilled_capacity(self) -> float: # pragma: no cover """ The actual weighted capacity for this ResourceGroup """ pass
[ "def", "fulfilled_capacity", "(", "self", ")", "->", "float", ":", "# pragma: no cover", "pass" ]
https://github.com/Yelp/clusterman/blob/54beef89c01a2681aafd1fbb93b6ad5f6252d6cf/clusterman/interfaces/resource_group.py#L125-L127
demisto/content
5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07
Packs/Base/Scripts/CommonServerPython/CommonServerPython.py
python
is_ip_valid
(s, accept_v6_ips=False)
Checks if the given string represents a valid IP address. By default, will only return 'True' for IPv4 addresses. :type s: ``str`` :param s: The string to be checked (required) :type accept_v6_ips: ``bool`` :param accept_v6_ips: A boolean determining whether the function should accept IPv6 addresses :return: True if the given string represents a valid IP address, False otherwise :rtype: ``bool``
Checks if the given string represents a valid IP address. By default, will only return 'True' for IPv4 addresses.
[ "Checks", "if", "the", "given", "string", "represents", "a", "valid", "IP", "address", ".", "By", "default", "will", "only", "return", "True", "for", "IPv4", "addresses", "." ]
def is_ip_valid(s, accept_v6_ips=False): """ Checks if the given string represents a valid IP address. By default, will only return 'True' for IPv4 addresses. :type s: ``str`` :param s: The string to be checked (required) :type accept_v6_ips: ``bool`` :param accept_v6_ips: A boolean determining whether the function should accept IPv6 addresses :return: True if the given string represents a valid IP address, False otherwise :rtype: ``bool`` """ a = s.split('.') if accept_v6_ips and is_ipv6_valid(s): return True elif len(a) != 4: return False else: for x in a: if not x.isdigit(): return False i = int(x) if i < 0 or i > 255: return False return True
[ "def", "is_ip_valid", "(", "s", ",", "accept_v6_ips", "=", "False", ")", ":", "a", "=", "s", ".", "split", "(", "'.'", ")", "if", "accept_v6_ips", "and", "is_ipv6_valid", "(", "s", ")", ":", "return", "True", "elif", "len", "(", "a", ")", "!=", "4"...
https://github.com/demisto/content/blob/5c664a65b992ac8ca90ac3f11b1b2cdf11ee9b07/Packs/Base/Scripts/CommonServerPython/CommonServerPython.py#L2605-L2631
Jenyay/outwiker
50530cf7b3f71480bb075b2829bc0669773b835b
src/outwiker/gui/controls/ultimatelistctrl.py
python
UltimateListHeaderData.SetItem
(self, item)
Sets information about the header/footer item. :param `info`: an instance of :class:`UltimateListHeaderData`.
Sets information about the header/footer item.
[ "Sets", "information", "about", "the", "header", "/", "footer", "item", "." ]
def SetItem(self, item): """ Sets information about the header/footer item. :param `info`: an instance of :class:`UltimateListHeaderData`. """ self._mask = item._mask if self._mask & ULC_MASK_TEXT: self._text = item._text if self._mask & ULC_MASK_TOOLTIP: self._tooltip = item._tooltip if self._mask & ULC_MASK_FOOTER_TEXT: self._footerText = item._footerText if self._mask & ULC_MASK_IMAGE: self._image = item._image[:] if self._mask & ULC_MASK_FOOTER_IMAGE: self._footerImage = item._footerImage[:] if self._mask & ULC_MASK_FORMAT: self._format = item._format if self._mask & ULC_MASK_FOOTER_FORMAT: self._footerFormat = item._footerFormat if self._mask & ULC_MASK_WIDTH: self.SetWidth(item._width) if self._mask & ULC_MASK_FONT: self._font = item._font if self._mask & ULC_MASK_FOOTER_FONT: self._footerFont = item._footerFont if self._mask & ULC_MASK_FOOTER_KIND: self._footerKind = item._footerKind self._footerChecked = item._footerChecked if self._mask & ULC_MASK_KIND: self._kind = item._kind self._checked = item._checked if self._mask & ULC_MASK_CHECK: self._kind = item._kind self._checked = item._checked if self._mask & ULC_MASK_FOOTER_CHECK: self._footerKind = item._footerKind self._footerChecked = item._footerChecked if self._mask & ULC_MASK_STATE: self.SetState(item._state) if self._mask & ULC_MASK_SHOWN: self._isColumnShown = item._isColumnShown if self._mask & ULC_MASK_RENDERER: self._customRenderer = item._customRenderer
[ "def", "SetItem", "(", "self", ",", "item", ")", ":", "self", ".", "_mask", "=", "item", ".", "_mask", "if", "self", ".", "_mask", "&", "ULC_MASK_TEXT", ":", "self", ".", "_text", "=", "item", ".", "_text", "if", "self", ".", "_mask", "&", "ULC_MAS...
https://github.com/Jenyay/outwiker/blob/50530cf7b3f71480bb075b2829bc0669773b835b/src/outwiker/gui/controls/ultimatelistctrl.py#L3286-L3348
quay/quay
b7d325ed42827db9eda2d9f341cb5a6cdfd155a6
buildman/manager/executor.py
python
LogPipe.run
(self)
Run the thread, logging everything.
Run the thread, logging everything.
[ "Run", "the", "thread", "logging", "everything", "." ]
def run(self): """ Run the thread, logging everything. """ for line in iter(self.pipe_reader.readline, ""): logging.log(self.level, line.strip("\n")) self.pipe_reader.close()
[ "def", "run", "(", "self", ")", ":", "for", "line", "in", "iter", "(", "self", ".", "pipe_reader", ".", "readline", ",", "\"\"", ")", ":", "logging", ".", "log", "(", "self", ".", "level", ",", "line", ".", "strip", "(", "\"\\n\"", ")", ")", "sel...
https://github.com/quay/quay/blob/b7d325ed42827db9eda2d9f341cb5a6cdfd155a6/buildman/manager/executor.py#L696-L703
oracle/oci-python-sdk
3c1604e4e212008fb6718e2f68cdb5ef71fd5793
src/oci/data_catalog/data_catalog_client.py
python
DataCatalogClient.create_data_asset_tag
(self, catalog_id, data_asset_key, create_data_asset_tag_details, **kwargs)
Creates a new data asset tag. :param str catalog_id: (required) Unique catalog identifier. :param str data_asset_key: (required) Unique data asset key. :param oci.data_catalog.models.CreateTagDetails create_data_asset_tag_details: (required) The information used to create the data asset tag. :param str opc_request_id: (optional) The client request ID for tracing. :param str opc_retry_token: (optional) A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations. For example, if a resource has been deleted and purged from the system, then a retry of the original creation request might be rejected. :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_catalog.models.DataAssetTag` :rtype: :class:`~oci.response.Response` :example: Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/datacatalog/create_data_asset_tag.py.html>`__ to see an example of how to use create_data_asset_tag API.
Creates a new data asset tag.
[ "Creates", "a", "new", "data", "asset", "tag", "." ]
def create_data_asset_tag(self, catalog_id, data_asset_key, create_data_asset_tag_details, **kwargs): """ Creates a new data asset tag. :param str catalog_id: (required) Unique catalog identifier. :param str data_asset_key: (required) Unique data asset key. :param oci.data_catalog.models.CreateTagDetails create_data_asset_tag_details: (required) The information used to create the data asset tag. :param str opc_request_id: (optional) The client request ID for tracing. :param str opc_retry_token: (optional) A token that uniquely identifies a request so it can be retried in case of a timeout or server error without risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before then due to conflicting operations. For example, if a resource has been deleted and purged from the system, then a retry of the original creation request might be rejected. :param obj retry_strategy: (optional) A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level. This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__. To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`. :return: A :class:`~oci.response.Response` object with data of type :class:`~oci.data_catalog.models.DataAssetTag` :rtype: :class:`~oci.response.Response` :example: Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/datacatalog/create_data_asset_tag.py.html>`__ to see an example of how to use create_data_asset_tag API. """ resource_path = "/catalogs/{catalogId}/dataAssets/{dataAssetKey}/tags" method = "POST" # Don't accept unknown kwargs expected_kwargs = [ "retry_strategy", "opc_request_id", "opc_retry_token" ] extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs] if extra_kwargs: raise ValueError( "create_data_asset_tag got unknown kwargs: {!r}".format(extra_kwargs)) path_params = { "catalogId": catalog_id, "dataAssetKey": data_asset_key } path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing} for (k, v) in six.iteritems(path_params): if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0): raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k)) header_params = { "accept": "application/json", "content-type": "application/json", "opc-request-id": kwargs.get("opc_request_id", missing), "opc-retry-token": kwargs.get("opc_retry_token", missing) } header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None} retry_strategy = self.base_client.get_preferred_retry_strategy( operation_retry_strategy=kwargs.get('retry_strategy'), client_retry_strategy=self.retry_strategy ) if retry_strategy: if not isinstance(retry_strategy, retry.NoneRetryStrategy): self.base_client.add_opc_retry_token_if_needed(header_params) self.base_client.add_opc_client_retries_header(header_params) retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback) return retry_strategy.make_retrying_call( self.base_client.call_api, resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_data_asset_tag_details, response_type="DataAssetTag") else: return self.base_client.call_api( resource_path=resource_path, method=method, path_params=path_params, header_params=header_params, body=create_data_asset_tag_details, response_type="DataAssetTag")
[ "def", "create_data_asset_tag", "(", "self", ",", "catalog_id", ",", "data_asset_key", ",", "create_data_asset_tag_details", ",", "*", "*", "kwargs", ")", ":", "resource_path", "=", "\"/catalogs/{catalogId}/dataAssets/{dataAssetKey}/tags\"", "method", "=", "\"POST\"", "# ...
https://github.com/oracle/oci-python-sdk/blob/3c1604e4e212008fb6718e2f68cdb5ef71fd5793/src/oci/data_catalog/data_catalog_client.py#L1343-L1439
shiyanhui/FileHeader
f347cc134021fb0b710694b71c57742476f5fd2b
jinja2/ext.py
python
Extension.bind
(self, environment)
return rv
Create a copy of this extension bound to another environment.
Create a copy of this extension bound to another environment.
[ "Create", "a", "copy", "of", "this", "extension", "bound", "to", "another", "environment", "." ]
def bind(self, environment): """Create a copy of this extension bound to another environment.""" rv = object.__new__(self.__class__) rv.__dict__.update(self.__dict__) rv.environment = environment return rv
[ "def", "bind", "(", "self", ",", "environment", ")", ":", "rv", "=", "object", ".", "__new__", "(", "self", ".", "__class__", ")", "rv", ".", "__dict__", ".", "update", "(", "self", ".", "__dict__", ")", "rv", ".", "environment", "=", "environment", ...
https://github.com/shiyanhui/FileHeader/blob/f347cc134021fb0b710694b71c57742476f5fd2b/jinja2/ext.py#L73-L78
tensorflow/tensor2tensor
2a33b152d7835af66a6d20afe7961751047e28dd
tensor2tensor/models/basic.py
python
basic_fc_small
()
return hparams
Small fully connected model.
Small fully connected model.
[ "Small", "fully", "connected", "model", "." ]
def basic_fc_small(): """Small fully connected model.""" hparams = common_hparams.basic_params1() hparams.learning_rate = 0.1 hparams.batch_size = 128 hparams.hidden_size = 256 hparams.num_hidden_layers = 2 hparams.initializer = "uniform_unit_scaling" hparams.initializer_gain = 1.0 hparams.weight_decay = 0.0 hparams.dropout = 0.0 return hparams
[ "def", "basic_fc_small", "(", ")", ":", "hparams", "=", "common_hparams", ".", "basic_params1", "(", ")", "hparams", ".", "learning_rate", "=", "0.1", "hparams", ".", "batch_size", "=", "128", "hparams", ".", "hidden_size", "=", "256", "hparams", ".", "num_h...
https://github.com/tensorflow/tensor2tensor/blob/2a33b152d7835af66a6d20afe7961751047e28dd/tensor2tensor/models/basic.py#L47-L58
YosefLab/scvi-tools
f0a3ba6e11053069fd1857d2381083e5492fa8b8
scvi/model/base/_log_likelihood.py
python
compute_elbo
(vae, data_loader, feed_labels=True, **kwargs)
return elbo / n_samples
Computes the ELBO. The ELBO is the reconstruction error + the KL divergences between the variational distributions and the priors. It differs from the marginal log likelihood. Specifically, it is a lower bound on the marginal log likelihood plus a term that is constant with respect to the variational distribution. It still gives good insights on the modeling of the data, and is fast to compute.
Computes the ELBO.
[ "Computes", "the", "ELBO", "." ]
def compute_elbo(vae, data_loader, feed_labels=True, **kwargs): """ Computes the ELBO. The ELBO is the reconstruction error + the KL divergences between the variational distributions and the priors. It differs from the marginal log likelihood. Specifically, it is a lower bound on the marginal log likelihood plus a term that is constant with respect to the variational distribution. It still gives good insights on the modeling of the data, and is fast to compute. """ # Iterate once over the data and compute the elbo elbo = 0 for tensors in data_loader: _, _, scvi_loss = vae(tensors, **kwargs) recon_loss = scvi_loss.reconstruction_loss kl_local = scvi_loss.kl_local elbo += torch.sum(recon_loss + kl_local).item() kl_global = scvi_loss.kl_global n_samples = len(data_loader.indices) elbo += kl_global return elbo / n_samples
[ "def", "compute_elbo", "(", "vae", ",", "data_loader", ",", "feed_labels", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# Iterate once over the data and compute the elbo", "elbo", "=", "0", "for", "tensors", "in", "data_loader", ":", "_", ",", "_", ",", ...
https://github.com/YosefLab/scvi-tools/blob/f0a3ba6e11053069fd1857d2381083e5492fa8b8/scvi/model/base/_log_likelihood.py#L5-L28
Azure/azure-devops-cli-extension
11334cd55806bef0b99c3bee5a438eed71e44037
azure-devops/azext_devops/dev/pipelines/pipeline_folders.py
python
pipeline_folder_update
(path, new_path=None, new_description=None, organization=None, project=None, detect=None)
return client.update_folder(path=path, folder=folder_to_update, project=project)
Update a folder name or description. :param path: Full path of the folder. :type path: str :param new_path: New full path of the folder. :type new_path: str :param new_description: New description of the folder. :type new_description: str :param project: Name or ID of the team project. :type project: str :param detect: Automatically detect organization and project. Default is "on". :type detect: str
Update a folder name or description. :param path: Full path of the folder. :type path: str :param new_path: New full path of the folder. :type new_path: str :param new_description: New description of the folder. :type new_description: str :param project: Name or ID of the team project. :type project: str :param detect: Automatically detect organization and project. Default is "on". :type detect: str
[ "Update", "a", "folder", "name", "or", "description", ".", ":", "param", "path", ":", "Full", "path", "of", "the", "folder", ".", ":", "type", "path", ":", "str", ":", "param", "new_path", ":", "New", "full", "path", "of", "the", "folder", ".", ":", ...
def pipeline_folder_update(path, new_path=None, new_description=None, organization=None, project=None, detect=None): """ Update a folder name or description. :param path: Full path of the folder. :type path: str :param new_path: New full path of the folder. :type new_path: str :param new_description: New description of the folder. :type new_description: str :param project: Name or ID of the team project. :type project: str :param detect: Automatically detect organization and project. Default is "on". :type detect: str """ if not new_path and not new_description: raise CLIError('Either --new-path or --new-description should be specified.') organization, project = resolve_instance_and_project( detect=detect, organization=organization, project=project) client = get_build_client(organization) folders = client.get_folders(path=path, project=project, query_order='folderAscending') folder_to_update = None # find matching folder if present for folder in folders: if folder.path.strip('\\') == path.strip('\\'): folder_to_update = folder break if not folder_to_update: raise CLIError('Cannot find folder with path {}. Update operation failed.'.format(path)) if new_description: folder_to_update.description = new_description if new_path: folder_to_update.path = new_path return client.update_folder(path=path, folder=folder_to_update, project=project)
[ "def", "pipeline_folder_update", "(", "path", ",", "new_path", "=", "None", ",", "new_description", "=", "None", ",", "organization", "=", "None", ",", "project", "=", "None", ",", "detect", "=", "None", ")", ":", "if", "not", "new_path", "and", "not", "...
https://github.com/Azure/azure-devops-cli-extension/blob/11334cd55806bef0b99c3bee5a438eed71e44037/azure-devops/azext_devops/dev/pipelines/pipeline_folders.py#L73-L105
Quantika14/guasap-whatsapp-foresincs-tool
68291f781a0483ac0f0fe6c90c7e4395a6d58cea
hashdeep.py
python
extract_mm
(pop_wait)
return md5_cloned, md5_original
Clone media data check
Clone media data check
[ "Clone", "media", "data", "check" ]
def extract_mm(pop_wait): md5_original=list() md5_cloned=list() files=list() on_rute=False on_file=False subdirectoris=list() modules.functions.create_dir_media() directory = check_directory() pull_media(directory) # --- /helpers --- # write("""\ # %%%% HASH_CHECK # %%%% size,sha256,filename # ## # ## Clone media hash check # ##""") mensaje_deb = Label(pop_wait, text="Creando hash y comparando...") mensaje_deb.place(x=20,y=80) pop_wait.update() mensaje_deb = Label(pop_wait, text="(Este proceso varia su duracion en base a los archivos multimedia)") mensaje_deb.place(x=20,y=80) pop_wait.update() PATH = 'Whatsapp_Extracted_Media/' """ Clone media data check """ for path, dirs, files in os.walk(PATH): for fpath in [osp.join(path, f) for f in files]: md5 = filehash(fpath) name = osp.relpath(fpath, PATH) # print "---------------------" # print "MD5 [>] "+str(md5) # print "Path [>] "+str(name) md5_original.append((name,md5)) print "Finish hash cloned..." # print '\n' # write("""\ # %%%% HASH_CHECK # %%%% size,sha256,filename # ## # ## Original media hash check # ##""") ls=get_subdirectoris(directory) ls=ls.replace("\r", "").split("\n") for l in ls: if "/" in l and on_rute: if on_file: files.append(path.replace(":","")) subdirectoris.append(files) on_rute=False on_file=False files=[] else: on_rute=False on_file=False files=[] path="" if "/" in l: path = l on_rute=True elif "." in l and on_rute: file_l = l on_file=True files.append(file_l) else: continue for directory in subdirectoris: path=directory[len(directory)-1] for i in range(len(directory)-1): name, md5 = get_mdinfo(path, directory[i]) md5_cloned.append((name,md5)) # print "---------------------" # print "MD5 [>] "+str(md5) # print "Path [>] "+str(name) print "Finish hash origin..." print "Comparing..." for i in range(len(md5_cloned)): for has in md5_original: if has[1] == md5_cloned[i][1]: print "-----------------------" print "------- Cloned ------" print "MD5 [>] "+str(md5_cloned[i][1]) print "Path [>] "+str(md5_cloned[i][0]) print "--------Original-------" print "MD5 [>] "+str(has[1]) print "Path [>] "+str(has[0]) # time.sleep(1) return md5_cloned, md5_original
[ "def", "extract_mm", "(", "pop_wait", ")", ":", "md5_original", "=", "list", "(", ")", "md5_cloned", "=", "list", "(", ")", "files", "=", "list", "(", ")", "on_rute", "=", "False", "on_file", "=", "False", "subdirectoris", "=", "list", "(", ")", "modul...
https://github.com/Quantika14/guasap-whatsapp-foresincs-tool/blob/68291f781a0483ac0f0fe6c90c7e4395a6d58cea/hashdeep.py#L63-L157
vericast/spylon-kernel
2d0ddf2aca1b91738f938b72a500c20293e3156c
versioneer.py
python
do_vcs_install
(manifest_in, versionfile_source, ipy)
Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution.
Git-specific installation logic for Versioneer.
[ "Git", "-", "specific", "installation", "logic", "for", "Versioneer", "." ]
def do_vcs_install(manifest_in, versionfile_source, ipy): """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: me = __file__ if me.endswith(".pyc") or me.endswith(".pyo"): me = os.path.splitext(me)[0] + ".py" versioneer_file = os.path.relpath(me) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: f = open(".gitattributes", "r") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() except EnvironmentError: pass if not present: f = open(".gitattributes", "a+") f.write("%s export-subst\n" % versionfile_source) f.close() files.append(".gitattributes") run_command(GITS, ["add", "--"] + files)
[ "def", "do_vcs_install", "(", "manifest_in", ",", "versionfile_source", ",", "ipy", ")", ":", "GITS", "=", "[", "\"git\"", "]", "if", "sys", ".", "platform", "==", "\"win32\"", ":", "GITS", "=", "[", "\"git.cmd\"", ",", "\"git.exe\"", "]", "files", "=", ...
https://github.com/vericast/spylon-kernel/blob/2d0ddf2aca1b91738f938b72a500c20293e3156c/versioneer.py#L1117-L1152
dimagi/commcare-hq
d67ff1d3b4c51fa050c19e60c3253a79d3452a39
corehq/apps/accounting/subscription_changes.py
python
_get_active_immediate_broadcasts
(domain, survey_only=False)
return result
[]
def _get_active_immediate_broadcasts(domain, survey_only=False): result = list(ImmediateBroadcast.objects.filter(domain=domain.name, deleted=False, schedule__active=True)) if survey_only: result = [broadcast for broadcast in result if broadcast.schedule.memoized_uses_sms_survey] return result
[ "def", "_get_active_immediate_broadcasts", "(", "domain", ",", "survey_only", "=", "False", ")", ":", "result", "=", "list", "(", "ImmediateBroadcast", ".", "objects", ".", "filter", "(", "domain", "=", "domain", ".", "name", ",", "deleted", "=", "False", ",...
https://github.com/dimagi/commcare-hq/blob/d67ff1d3b4c51fa050c19e60c3253a79d3452a39/corehq/apps/accounting/subscription_changes.py#L90-L95
Pylons/pyramid
0b24ac16cc04746b25cf460f1497c157f6d3d6f4
src/pyramid/interfaces.py
python
IAuthenticationPolicy.unauthenticated_userid
(request)
Return the *unauthenticated* userid. This method performs the same duty as ``authenticated_userid`` but is permitted to return the userid based only on data present in the request; it needn't (and shouldn't) check any persistent store to ensure that the user record related to the request userid exists. This method is intended primarily a helper to assist the ``authenticated_userid`` method in pulling credentials out of the request data, abstracting away the specific headers, query strings, etc that are used to authenticate the request.
Return the *unauthenticated* userid. This method performs the same duty as ``authenticated_userid`` but is permitted to return the userid based only on data present in the request; it needn't (and shouldn't) check any persistent store to ensure that the user record related to the request userid exists.
[ "Return", "the", "*", "unauthenticated", "*", "userid", ".", "This", "method", "performs", "the", "same", "duty", "as", "authenticated_userid", "but", "is", "permitted", "to", "return", "the", "userid", "based", "only", "on", "data", "present", "in", "the", ...
def unauthenticated_userid(request): """Return the *unauthenticated* userid. This method performs the same duty as ``authenticated_userid`` but is permitted to return the userid based only on data present in the request; it needn't (and shouldn't) check any persistent store to ensure that the user record related to the request userid exists. This method is intended primarily a helper to assist the ``authenticated_userid`` method in pulling credentials out of the request data, abstracting away the specific headers, query strings, etc that are used to authenticate the request. """
[ "def", "unauthenticated_userid", "(", "request", ")", ":" ]
https://github.com/Pylons/pyramid/blob/0b24ac16cc04746b25cf460f1497c157f6d3d6f4/src/pyramid/interfaces.py#L560-L573
rhinstaller/anaconda
63edc8680f1b05cbfe11bef28703acba808c5174
pyanaconda/ui/gui/spokes/installation_source.py
python
SourceSpoke._reset_repo_store
(self)
Reset the list of repos. Populate the list with all the addon repos from payload.addons. If the list has no element, clear the repo entry fields.
Reset the list of repos.
[ "Reset", "the", "list", "of", "repos", "." ]
def _reset_repo_store(self): """ Reset the list of repos. Populate the list with all the addon repos from payload.addons. If the list has no element, clear the repo entry fields. """ log.debug("Clearing checks in source spoke") # Remove the repo checks for checks in self._repo_checks.values(): self.remove_check(checks.name_check) self.remove_check(checks.url_check) self.remove_check(checks.proxy_check) self._repo_checks = {} with self._repo_store_lock: self._repo_store.clear() for repo in self.data.repo.dataList(): log.debug("Setting up repo: %s", repo.name) ks_repo = self.data.RepoData.create_copy(repo) # Track the original name, user may change .name ks_repo.orig_name = repo.name # Add addon repository id for identification ks_repo.repo_id = next(self._repo_counter) self._repo_store.append([self.payload.is_repo_enabled(repo.name), ks_repo.name, ks_repo]) if len(self._repo_store) > 0: self._repo_selection.select_path(0) else: self._clear_repo_info() self._repo_entry_box.set_sensitive(False)
[ "def", "_reset_repo_store", "(", "self", ")", ":", "log", ".", "debug", "(", "\"Clearing checks in source spoke\"", ")", "# Remove the repo checks", "for", "checks", "in", "self", ".", "_repo_checks", ".", "values", "(", ")", ":", "self", ".", "remove_check", "(...
https://github.com/rhinstaller/anaconda/blob/63edc8680f1b05cbfe11bef28703acba808c5174/pyanaconda/ui/gui/spokes/installation_source.py#L1474-L1508
circus-tent/circus
4b8aaca9470e3fd1ab115b14c8b1d1bf9b5b3c5e
circus/consumer.py
python
CircusConsumer.iter_messages
(self)
Yields tuples of (topic, message)
Yields tuples of (topic, message)
[ "Yields", "tuples", "of", "(", "topic", "message", ")" ]
def iter_messages(self): """ Yields tuples of (topic, message) """ with self: while True: try: events = dict(self.poller.poll(self.timeout * 1000)) except zmq.ZMQError as e: if e.errno == errno.EINTR: continue raise if len(events) == 0: continue topic, message = self.pubsub_socket.recv_multipart() yield topic, message
[ "def", "iter_messages", "(", "self", ")", ":", "with", "self", ":", "while", "True", ":", "try", ":", "events", "=", "dict", "(", "self", ".", "poller", ".", "poll", "(", "self", ".", "timeout", "*", "1000", ")", ")", "except", "zmq", ".", "ZMQErro...
https://github.com/circus-tent/circus/blob/4b8aaca9470e3fd1ab115b14c8b1d1bf9b5b3c5e/circus/consumer.py#L38-L53
joe42/CloudFusion
c4b94124e74a81e0634578c7754d62160081f7a1
cloudfusion/util/lru_cache.py
python
LRUCache.delete
(self, key)
Remove current entry associated with key from the LRU queue.
Remove current entry associated with key from the LRU queue.
[ "Remove", "current", "entry", "associated", "with", "key", "from", "the", "LRU", "queue", "." ]
def delete(self, key): """Remove current entry associated with key from the LRU queue.""" if key in self.entries: entry = self.entries[key] self.entries[CACHESIZE] -= self._get_size_of_entry(entry) self._unlink(key) del self.entries[key]
[ "def", "delete", "(", "self", ",", "key", ")", ":", "if", "key", "in", "self", ".", "entries", ":", "entry", "=", "self", ".", "entries", "[", "key", "]", "self", ".", "entries", "[", "CACHESIZE", "]", "-=", "self", ".", "_get_size_of_entry", "(", ...
https://github.com/joe42/CloudFusion/blob/c4b94124e74a81e0634578c7754d62160081f7a1/cloudfusion/util/lru_cache.py#L173-L179
zhl2008/awd-platform
0416b31abea29743387b10b3914581fbe8e7da5e
web_hxb2/lib/python3.5/site-packages/uwsgidecorators.py
python
spoolforever
(f=None, pass_arguments=False)
return spool_decorate(f, pass_arguments, _spoolforever)
[]
def spoolforever(f=None, pass_arguments=False): return spool_decorate(f, pass_arguments, _spoolforever)
[ "def", "spoolforever", "(", "f", "=", "None", ",", "pass_arguments", "=", "False", ")", ":", "return", "spool_decorate", "(", "f", ",", "pass_arguments", ",", "_spoolforever", ")" ]
https://github.com/zhl2008/awd-platform/blob/0416b31abea29743387b10b3914581fbe8e7da5e/web_hxb2/lib/python3.5/site-packages/uwsgidecorators.py#L159-L160
pwnieexpress/pwn_plug_sources
1a23324f5dc2c3de20f9c810269b6a29b2758cad
src/metagoofil/pdfminer/layout.py
python
LTItem.voverlap
(self, obj)
[]
def voverlap(self, obj): assert isinstance(obj, LTItem) if self.is_voverlap(obj): return min(abs(self.y0-obj.y1), abs(self.y1-obj.y0)) else: return 0
[ "def", "voverlap", "(", "self", ",", "obj", ")", ":", "assert", "isinstance", "(", "obj", ",", "LTItem", ")", "if", "self", ".", "is_voverlap", "(", "obj", ")", ":", "return", "min", "(", "abs", "(", "self", ".", "y0", "-", "obj", ".", "y1", ")",...
https://github.com/pwnieexpress/pwn_plug_sources/blob/1a23324f5dc2c3de20f9c810269b6a29b2758cad/src/metagoofil/pdfminer/layout.py#L87-L92
perrygeo/simanneal
951e7d89a8b7f19aeb05b64e7cc8b844a734af89
examples/watershed/shapefile.py
python
Writer.field
(self, name, fieldType="C", size="50", decimal=0)
Adds a dbf field descriptor to the shapefile.
Adds a dbf field descriptor to the shapefile.
[ "Adds", "a", "dbf", "field", "descriptor", "to", "the", "shapefile", "." ]
def field(self, name, fieldType="C", size="50", decimal=0): """Adds a dbf field descriptor to the shapefile.""" self.fields.append((name, fieldType, size, decimal))
[ "def", "field", "(", "self", ",", "name", ",", "fieldType", "=", "\"C\"", ",", "size", "=", "\"50\"", ",", "decimal", "=", "0", ")", ":", "self", ".", "fields", ".", "append", "(", "(", "name", ",", "fieldType", ",", "size", ",", "decimal", ")", ...
https://github.com/perrygeo/simanneal/blob/951e7d89a8b7f19aeb05b64e7cc8b844a734af89/examples/watershed/shapefile.py#L789-L791
openshift/openshift-tools
1188778e728a6e4781acf728123e5b356380fe6f
openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_vendored_deps/library/oc_route.py
python
Utils.openshift_installed
()
return rpmquery.count() > 0
check if openshift is installed
check if openshift is installed
[ "check", "if", "openshift", "is", "installed" ]
def openshift_installed(): ''' check if openshift is installed ''' import rpm transaction_set = rpm.TransactionSet() rpmquery = transaction_set.dbMatch("name", "atomic-openshift") return rpmquery.count() > 0
[ "def", "openshift_installed", "(", ")", ":", "import", "rpm", "transaction_set", "=", "rpm", ".", "TransactionSet", "(", ")", "rpmquery", "=", "transaction_set", ".", "dbMatch", "(", "\"name\"", ",", "\"atomic-openshift\"", ")", "return", "rpmquery", ".", "count...
https://github.com/openshift/openshift-tools/blob/1188778e728a6e4781acf728123e5b356380fe6f/openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/lib_vendored_deps/library/oc_route.py#L1372-L1379
SoCo/SoCo
e83fef84d2645d05265dbd574598518655a9c125
soco/events_base.py
python
get_listen_ip
(ip_address)
Find the listen ip address.
Find the listen ip address.
[ "Find", "the", "listen", "ip", "address", "." ]
def get_listen_ip(ip_address): """Find the listen ip address.""" if config.EVENT_LISTENER_IP: return config.EVENT_LISTENER_IP sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: sock.connect((ip_address, config.EVENT_LISTENER_PORT)) return sock.getsockname()[0] except socket.error: return None finally: sock.close()
[ "def", "get_listen_ip", "(", "ip_address", ")", ":", "if", "config", ".", "EVENT_LISTENER_IP", ":", "return", "config", ".", "EVENT_LISTENER_IP", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ")", "try...
https://github.com/SoCo/SoCo/blob/e83fef84d2645d05265dbd574598518655a9c125/soco/events_base.py#L773-L784
TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials
5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e
tensorflow_dl_models/research/slim/deployment/model_deploy.py
python
optimize_clones
(clones, optimizer, regularization_losses=None, **kwargs)
return total_loss, grads_and_vars
Compute clone losses and gradients for the given list of `Clones`. Note: The regularization_losses are added to the first clone losses. Args: clones: List of `Clones` created by `create_clones()`. optimizer: An `Optimizer` object. regularization_losses: Optional list of regularization losses. If None it will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to exclude them. **kwargs: Optional list of keyword arguments to pass to `compute_gradients`. Returns: A tuple (total_loss, grads_and_vars). - total_loss: A Tensor containing the average of the clone losses including the regularization loss. - grads_and_vars: A List of tuples (gradient, variable) containing the sum of the gradients for each variable.
Compute clone losses and gradients for the given list of `Clones`.
[ "Compute", "clone", "losses", "and", "gradients", "for", "the", "given", "list", "of", "Clones", "." ]
def optimize_clones(clones, optimizer, regularization_losses=None, **kwargs): """Compute clone losses and gradients for the given list of `Clones`. Note: The regularization_losses are added to the first clone losses. Args: clones: List of `Clones` created by `create_clones()`. optimizer: An `Optimizer` object. regularization_losses: Optional list of regularization losses. If None it will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to exclude them. **kwargs: Optional list of keyword arguments to pass to `compute_gradients`. Returns: A tuple (total_loss, grads_and_vars). - total_loss: A Tensor containing the average of the clone losses including the regularization loss. - grads_and_vars: A List of tuples (gradient, variable) containing the sum of the gradients for each variable. """ grads_and_vars = [] clones_losses = [] num_clones = len(clones) if regularization_losses is None: regularization_losses = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) for clone in clones: with tf.name_scope(clone.scope): clone_loss, clone_grad = _optimize_clone( optimizer, clone, num_clones, regularization_losses, **kwargs) if clone_loss is not None: clones_losses.append(clone_loss) grads_and_vars.append(clone_grad) # Only use regularization_losses for the first clone regularization_losses = None # Compute the total_loss summing all the clones_losses. total_loss = tf.add_n(clones_losses, name='total_loss') # Sum the gradients across clones. grads_and_vars = _sum_clones_gradients(grads_and_vars) return total_loss, grads_and_vars
[ "def", "optimize_clones", "(", "clones", ",", "optimizer", ",", "regularization_losses", "=", "None", ",", "*", "*", "kwargs", ")", ":", "grads_and_vars", "=", "[", "]", "clones_losses", "=", "[", "]", "num_clones", "=", "len", "(", "clones", ")", "if", ...
https://github.com/TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials/blob/5bb97d7e3ffd913abddb4cfa7d78a1b4c868890e/tensorflow_dl_models/research/slim/deployment/model_deploy.py#L265-L307
seantis/seantis-questionnaire
698c77b3d707635f50bcd86e7f1c94e94061b0f5
questionnaire/models.py
python
QuestionSet.questions
(self)
return self.__qcache
[]
def questions(self): if not hasattr(self, "__qcache"): def numeric_number(val): matches = re.findall(r'^\d+', val) return int(matches[0]) if matches else 0 self.__qcache = sorted( Question.objects.filter(questionset=self.id), key=lambda q: (numeric_number(q.number), q.number) ) return self.__qcache
[ "def", "questions", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "\"__qcache\"", ")", ":", "def", "numeric_number", "(", "val", ")", ":", "matches", "=", "re", ".", "findall", "(", "r'^\\d+'", ",", "val", ")", "return", "int", "("...
https://github.com/seantis/seantis-questionnaire/blob/698c77b3d707635f50bcd86e7f1c94e94061b0f5/questionnaire/models.py#L174-L183
linxid/Machine_Learning_Study_Path
558e82d13237114bbb8152483977806fc0c222af
Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/_collections_abc.py
python
Callable.__call__
(self, *args, **kwds)
return False
[]
def __call__(self, *args, **kwds): return False
[ "def", "__call__", "(", "self", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "return", "False" ]
https://github.com/linxid/Machine_Learning_Study_Path/blob/558e82d13237114bbb8152483977806fc0c222af/Machine Learning In Action/Chapter4-NaiveBayes/venv/Lib/_collections_abc.py#L403-L404
mongodb/motor
055f5e05abf1f15e64ae43fb8c680d2706f3c419
motor/metaprogramming.py
python
AsyncRead.__init__
(self, attr_name=None, doc=None)
A descriptor that wraps a PyMongo read method like find_one() that returns a Future.
A descriptor that wraps a PyMongo read method like find_one() that returns a Future.
[ "A", "descriptor", "that", "wraps", "a", "PyMongo", "read", "method", "like", "find_one", "()", "that", "returns", "a", "Future", "." ]
def __init__(self, attr_name=None, doc=None): """A descriptor that wraps a PyMongo read method like find_one() that returns a Future. """ Async.__init__(self, attr_name=attr_name, doc=doc)
[ "def", "__init__", "(", "self", ",", "attr_name", "=", "None", ",", "doc", "=", "None", ")", ":", "Async", ".", "__init__", "(", "self", ",", "attr_name", "=", "attr_name", ",", "doc", "=", "doc", ")" ]
https://github.com/mongodb/motor/blob/055f5e05abf1f15e64ae43fb8c680d2706f3c419/motor/metaprogramming.py#L172-L176
huggingface/transformers
623b4f7c63f60cce917677ee704d6c93ee960b4b
src/transformers/utils/dummy_pt_objects.py
python
PerceiverPreTrainedModel.from_pretrained
(cls, *args, **kwargs)
[]
def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"])
[ "def", "from_pretrained", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "requires_backends", "(", "cls", ",", "[", "\"torch\"", "]", ")" ]
https://github.com/huggingface/transformers/blob/623b4f7c63f60cce917677ee704d6c93ee960b4b/src/transformers/utils/dummy_pt_objects.py#L3866-L3867