repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
madedotcom/atomicpuppy
atomicpuppy/atomicpuppy.py
https://github.com/madedotcom/atomicpuppy/blob/3c262c26e74bcfc25199984b425a5dccac0911ac/atomicpuppy/atomicpuppy.py#L74-L87
def location(self): """Return a string uniquely identifying the event. This string can be used to find the event in the event store UI (cf. id attribute, which is the UUID that at time of writing doesn't let you easily find the event). """ if self._location is None: self._location = "{}/{}-{}".format( self.stream, self.type, self.sequence, ) return self._location
[ "def", "location", "(", "self", ")", ":", "if", "self", ".", "_location", "is", "None", ":", "self", ".", "_location", "=", "\"{}/{}-{}\"", ".", "format", "(", "self", ".", "stream", ",", "self", ".", "type", ",", "self", ".", "sequence", ",", ")", ...
Return a string uniquely identifying the event. This string can be used to find the event in the event store UI (cf. id attribute, which is the UUID that at time of writing doesn't let you easily find the event).
[ "Return", "a", "string", "uniquely", "identifying", "the", "event", "." ]
python
train
rasbt/biopandas
biopandas/pdb/pandas_pdb.py
https://github.com/rasbt/biopandas/blob/615a7cf272692c12bbcfd9d1f217eab440120235/biopandas/pdb/pandas_pdb.py#L341-L373
def _construct_df(pdb_lines): """Construct DataFrames from list of PDB lines.""" valids = tuple(pdb_records.keys()) line_lists = {r: [] for r in valids} line_lists['OTHERS'] = [] for line_num, line in enumerate(pdb_lines): if line.strip(): if line.startswith(valids): record = line[:6].rstrip() line_ele = ['' for _ in range(len( pdb_records[record]) + 1)] for idx, ele in enumerate(pdb_records[record]): line_ele[idx] = (line[ele['line'][0]:ele['line'][1]] .strip()) line_ele[-1] = line_num line_lists[record].append(line_ele) else: line_lists['OTHERS'].append([line[:6].rstrip(), line[6:-1].rstrip(), line_num]) dfs = {} for r in line_lists.items(): df = pd.DataFrame(r[1], columns=[c['id'] for c in pdb_records[r[0]]] + ['line_idx']) for c in pdb_records[r[0]]: try: df[c['id']] = df[c['id']].astype(c['type']) except ValueError: # expect ValueError if float/int columns are empty strings df[c['id']] = pd.Series(np.nan, index=df.index) dfs[r[0]] = df return dfs
[ "def", "_construct_df", "(", "pdb_lines", ")", ":", "valids", "=", "tuple", "(", "pdb_records", ".", "keys", "(", ")", ")", "line_lists", "=", "{", "r", ":", "[", "]", "for", "r", "in", "valids", "}", "line_lists", "[", "'OTHERS'", "]", "=", "[", "...
Construct DataFrames from list of PDB lines.
[ "Construct", "DataFrames", "from", "list", "of", "PDB", "lines", "." ]
python
train
wishtack/pysynthetic
synthetic/decorators.py
https://github.com/wishtack/pysynthetic/blob/f37a4a2f1e0313b8c544f60d37c93726bc806ec6/synthetic/decorators.py#L68-L112
def synthesize_member(member_name, default = None, contract = None, read_only = False, getter_name = None, setter_name = None, private_member_name = None): """ When applied to a class, this decorator adds getter/setter methods to it and overrides the constructor in order to set\ the default value of the member. By default, the getter will be named ``member_name``. (Ex.: ``member_name = 'member' => instance.member()``) By default, the setter will be named ``member_name`` with 'set\_' prepended it to it. (Ex.: ``member_name = 'member' => instance.set_member(...)``) By default, the private attribute containing the member's value will be named ``member_name`` with '_' prepended to it. Naming convention can be overridden with a custom one using :meth:`naming_convention <naming_convention>` decorator. :param member_name: Name of the member to synthesize. :type member_name: str :param default: Member's default value. :type default: * :param contract: Type constraint. See `PyContracts <http://andreacensi.github.com/contracts/>`_ :type contract: * :param read_only: If set to ``True``, the setter will not be added to the class. :type read_only: bool :param getter_name: Custom getter name. This can be useful when the member is a boolean. (Ex.: ``is_alive``) :type getter_name: str|None :param setter_name: Custom setter name. :type setter_name: str|None :param private_member_name: Custom name for the private attribute that contains the member's value. :type private_member_name: str|None :raises: :class:`DuplicateMemberNameError` when two synthetic members have the same name. """ accessorDelegate = AccessorDelegate(namingConvention = NamingConventionUnderscore(), getterName = getter_name, setterName = setter_name) return SyntheticDecoratorFactory().syntheticMemberDecorator(memberName = member_name, defaultValue = default, contract = contract, readOnly = read_only, privateMemberName = private_member_name, memberDelegate = accessorDelegate)
[ "def", "synthesize_member", "(", "member_name", ",", "default", "=", "None", ",", "contract", "=", "None", ",", "read_only", "=", "False", ",", "getter_name", "=", "None", ",", "setter_name", "=", "None", ",", "private_member_name", "=", "None", ")", ":", ...
When applied to a class, this decorator adds getter/setter methods to it and overrides the constructor in order to set\ the default value of the member. By default, the getter will be named ``member_name``. (Ex.: ``member_name = 'member' => instance.member()``) By default, the setter will be named ``member_name`` with 'set\_' prepended it to it. (Ex.: ``member_name = 'member' => instance.set_member(...)``) By default, the private attribute containing the member's value will be named ``member_name`` with '_' prepended to it. Naming convention can be overridden with a custom one using :meth:`naming_convention <naming_convention>` decorator. :param member_name: Name of the member to synthesize. :type member_name: str :param default: Member's default value. :type default: * :param contract: Type constraint. See `PyContracts <http://andreacensi.github.com/contracts/>`_ :type contract: * :param read_only: If set to ``True``, the setter will not be added to the class. :type read_only: bool :param getter_name: Custom getter name. This can be useful when the member is a boolean. (Ex.: ``is_alive``) :type getter_name: str|None :param setter_name: Custom setter name. :type setter_name: str|None :param private_member_name: Custom name for the private attribute that contains the member's value. :type private_member_name: str|None :raises: :class:`DuplicateMemberNameError` when two synthetic members have the same name.
[ "When", "applied", "to", "a", "class", "this", "decorator", "adds", "getter", "/", "setter", "methods", "to", "it", "and", "overrides", "the", "constructor", "in", "order", "to", "set", "\\", "the", "default", "value", "of", "the", "member", ".", "By", "...
python
train
intel-analytics/BigDL
pyspark/bigdl/keras/converter.py
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/keras/converter.py#L54-L63
def load_weights_from_json_hdf5(def_json, weights_hdf5, by_name=False): """ The file path can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system. """ bmodel = DefinitionLoader.from_json_path(def_json) def_value = BCommon.text_from_path(def_json) kmodel = model_from_json(def_value) WeightLoader.load_weights_from_hdf5(bmodel, kmodel, weights_hdf5, by_name) return bmodel
[ "def", "load_weights_from_json_hdf5", "(", "def_json", ",", "weights_hdf5", ",", "by_name", "=", "False", ")", ":", "bmodel", "=", "DefinitionLoader", ".", "from_json_path", "(", "def_json", ")", "def_value", "=", "BCommon", ".", "text_from_path", "(", "def_json",...
The file path can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system.
[ "The", "file", "path", "can", "be", "stored", "in", "a", "local", "file", "system", "HDFS", "S3", "or", "any", "Hadoop", "-", "supported", "file", "system", "." ]
python
test
decryptus/httpdis
httpdis/httpdis.py
https://github.com/decryptus/httpdis/blob/5d198cdc5558f416634602689b3df2c8aeb34984/httpdis/httpdis.py#L419-L440
def send_response(self, code, message=None, size='-'): """ Send the response header and log the response code. Also send two standard headers with the server software version and the current date. """ # pylint: disable-msg=W0221 if self._to_log or LOG.isEnabledFor(logging.DEBUG): self.log_request(code, size) if message is None: if code in self.responses: message = self.responses[code][0] else: message = '' if self.request_version != 'HTTP/0.9': self.wfile.write("%s %d %s\r\n" % ("HTTP/1.1", code, message)) self.send_header('Server', self.version_string()) self.send_header('Date', self.date_time_string())
[ "def", "send_response", "(", "self", ",", "code", ",", "message", "=", "None", ",", "size", "=", "'-'", ")", ":", "# pylint: disable-msg=W0221", "if", "self", ".", "_to_log", "or", "LOG", ".", "isEnabledFor", "(", "logging", ".", "DEBUG", ")", ":", "self...
Send the response header and log the response code. Also send two standard headers with the server software version and the current date.
[ "Send", "the", "response", "header", "and", "log", "the", "response", "code", "." ]
python
train
koalalorenzo/python-digitalocean
digitalocean/FloatingIP.py
https://github.com/koalalorenzo/python-digitalocean/blob/d0221b57856fb1e131cafecf99d826f7b07a947c/digitalocean/FloatingIP.py#L61-L80
def reserve(self, *args, **kwargs): """ Creates a FloatingIP in a region without assigning it to a specific Droplet. Note: Every argument and parameter given to this method will be assigned to the object. Args: region_slug: str - region's slug (e.g. 'nyc3') """ data = self.get_data('floating_ips/', type=POST, params={'region': self.region_slug}) if data: self.ip = data['floating_ip']['ip'] self.region = data['floating_ip']['region'] return self
[ "def", "reserve", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "data", "=", "self", ".", "get_data", "(", "'floating_ips/'", ",", "type", "=", "POST", ",", "params", "=", "{", "'region'", ":", "self", ".", "region_slug", "}", "...
Creates a FloatingIP in a region without assigning it to a specific Droplet. Note: Every argument and parameter given to this method will be assigned to the object. Args: region_slug: str - region's slug (e.g. 'nyc3')
[ "Creates", "a", "FloatingIP", "in", "a", "region", "without", "assigning", "it", "to", "a", "specific", "Droplet", "." ]
python
valid
bububa/pyTOP
pyTOP/packages/requests/sessions.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/packages/requests/sessions.py#L240-L248
def put(self, url, data=None, **kwargs): """Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`. :param **kwargs: Optional arguments that ``request`` takes. """ return self.request('put', url, data=data, **kwargs)
[ "def", "put", "(", "self", ",", "url", ",", "data", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "request", "(", "'put'", ",", "url", ",", "data", "=", "data", ",", "*", "*", "kwargs", ")" ]
Sends a PUT request. Returns :class:`Response` object. :param url: URL for the new :class:`Request` object. :param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`. :param **kwargs: Optional arguments that ``request`` takes.
[ "Sends", "a", "PUT", "request", ".", "Returns", ":", "class", ":", "Response", "object", "." ]
python
train
bennyrowland/suspect
suspect/io/lcmodel.py
https://github.com/bennyrowland/suspect/blob/c09ab0a5013c5a199218214cdd791659243d7e41/suspect/io/lcmodel.py#L42-L150
def write_all_files(filename, data, wref_data=None, params=None): """ Creates an LCModel control file for processing the supplied MRSData, and optional water reference data, updating the default parameters with any values supplied through params. :param filename: the location where the control file should be saved. :param data: MRSData to be processed. :param wref_data: Optional MRSData containing water reference. :param params: Optional dictionary containing non-default parameter values. :return: """ # we assume that the data has one spectral dimension, any others must be # spatial if len(data.shape) == 1: shape = (1, 1, 1) elif len(data.shape) == 2: shape = (data.shape[0], 1, 1) elif len(data.shape) == 3: shape = (data.shape[0], data.shape[1], 1) elif len(data.shape) == 4: shape = data.shape[0:3] elif len(data.shape) > 4: raise ValueError("LCModel cannot handle data with more than 4 dimensions") # We need to save a bunch of files for LCModel to process: a raw file for # the data, possibly a raw file for the wref and a control file for each # slice. In addition, in the absence of information in the params file # about where to save the output (.ps, .csv, .table etc.) that should also # be saved in the same folder as the input data for LCModel. folder, file_root = os.path.split(filename) # make sure that the folder exists before trying to save things to it if not os.path.isdir(folder): os.makedirs(folder) file_root, ext = os.path.splitext(file_root) base_params = { "FILBAS": "/home/spectre/.lcmodel/basis-sets/provencher/press_te30_3t_gsh_v3.basis", "ICOLST": 1, "ICOLEN": shape[0], "NDCOLS": shape[0], "IROWST": 1, "IROWEN": shape[1], "NDROWS": shape[1], "NDSLIC": shape[2], "DOWS": "T" if wref_data is not None else "F", "DOECC": "T" if wref_data is not None else "F", "FILRAW": os.path.join(folder, file_root + ".RAW"), "FILPS": os.path.join(folder, file_root + ".PS") } if wref_data is not None: base_params["FILH2O"] = os.path.join(folder, file_root + ".H2O") # add the user supplied parameters to the list if params is not None: base_params.update(params) # make a few modifications based on user edits if "FILTAB" in base_params: base_params["LTABLE"] = 7 base_params["FILTAB"] = "'{}'".format(base_params["FILTAB"]) elif "LTABLE" in base_params: base_params["LTABLE"] = 7 base_params["FILTAB"] = "'{}'".format(os.path.join(folder, file_root + ".TABLE")) if "FILCSV" in base_params: base_params["LCSV"] = 11 base_params["FILCSV"] = "'{}'".format(base_params["FILCSV"]) elif "LCSV" in base_params: base_params["LCSV"] = 11 base_params["FILCSV"] = "'{}'".format(os.path.join(folder, file_root + ".CSV")) if "FILCOO" in base_params: base_params["LCOORD"] = 9 base_params["FILCOO"] = "'{}'".format(base_params["FILCOO"]) elif "LCOORD" in base_params: base_params["LCOORD"] = 9 base_params["FILCOO"] = "'{}'".format(os.path.join(folder, file_root + ".COORD")) if "FILCOR" in base_params: base_params["LCORAW"] = 10 base_params["FILCOR"] = "'{}'".format(base_params["FILCOR"]) elif "LCORAW" in base_params: base_params["LCORAW"] = 10 base_params["FILCOR"] = "'{}'".format(os.path.join(folder, file_root + ".CORAW")) save_raw(base_params["FILRAW"], data) if wref_data is not None: save_raw(base_params["FILH2O"], wref_data) # have to add single quotes to the various paths base_params["FILRAW"] = "'{}'".format(base_params["FILRAW"]) base_params["FILBAS"] = "'{}'".format(base_params["FILBAS"]) base_params["FILPS"] = "'{}'".format(base_params["FILPS"]) if wref_data is not None: base_params["FILH2O"] = "'{}'".format(base_params["FILH2O"]) for slice_index in range(shape[2]): control_filename = "{0}_sl{1}.CONTROL".format(file_root, slice_index) control_filepath = os.path.join(folder, control_filename) with open(control_filepath, 'wt') as fout: fout.write(" $LCMODL\n") fout.write(" OWNER = ''\n") fout.write(" KEY = 123456789\n") fout.write(" DELTAT = {}\n".format(data.dt)) fout.write(" HZPPPM = {}\n".format(data.f0)) fout.write(" NUNFIL = {}\n".format(data.np)) for key, value in base_params.items(): fout.write(" {0} = {1}\n".format(key, value)) fout.write(" $END\n")
[ "def", "write_all_files", "(", "filename", ",", "data", ",", "wref_data", "=", "None", ",", "params", "=", "None", ")", ":", "# we assume that the data has one spectral dimension, any others must be", "# spatial", "if", "len", "(", "data", ".", "shape", ")", "==", ...
Creates an LCModel control file for processing the supplied MRSData, and optional water reference data, updating the default parameters with any values supplied through params. :param filename: the location where the control file should be saved. :param data: MRSData to be processed. :param wref_data: Optional MRSData containing water reference. :param params: Optional dictionary containing non-default parameter values. :return:
[ "Creates", "an", "LCModel", "control", "file", "for", "processing", "the", "supplied", "MRSData", "and", "optional", "water", "reference", "data", "updating", "the", "default", "parameters", "with", "any", "values", "supplied", "through", "params", "." ]
python
train
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/eval_lib/image_batches.py#L291-L297
def count_generated_adv_examples(self): """Returns total number of all generated adversarial examples.""" result = {} for v in itervalues(self.data): s_id = v['submission_id'] result[s_id] = result.get(s_id, 0) + len(v['images']) return result
[ "def", "count_generated_adv_examples", "(", "self", ")", ":", "result", "=", "{", "}", "for", "v", "in", "itervalues", "(", "self", ".", "data", ")", ":", "s_id", "=", "v", "[", "'submission_id'", "]", "result", "[", "s_id", "]", "=", "result", ".", ...
Returns total number of all generated adversarial examples.
[ "Returns", "total", "number", "of", "all", "generated", "adversarial", "examples", "." ]
python
train
prompt-toolkit/pymux
pymux/arrangement.py
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/arrangement.py#L618-L622
def get_window_by_index(self, index): " Return the Window with this index or None if not found. " for w in self.windows: if w.index == index: return w
[ "def", "get_window_by_index", "(", "self", ",", "index", ")", ":", "for", "w", "in", "self", ".", "windows", ":", "if", "w", ".", "index", "==", "index", ":", "return", "w" ]
Return the Window with this index or None if not found.
[ "Return", "the", "Window", "with", "this", "index", "or", "None", "if", "not", "found", "." ]
python
train
portfors-lab/sparkle
sparkle/run/calibration_runner.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/run/calibration_runner.py#L92-L101
def set_stim_by_index(self, index): """Sets the stimulus to be generated to the one referenced by index :param index: index number of stimulus to set from this class's internal list of stimuli :type index: int """ # remove any current components self.stimulus.clearComponents() # add one to index because of tone curve self.stimulus.insertComponent(self.stim_components[index])
[ "def", "set_stim_by_index", "(", "self", ",", "index", ")", ":", "# remove any current components", "self", ".", "stimulus", ".", "clearComponents", "(", ")", "# add one to index because of tone curve", "self", ".", "stimulus", ".", "insertComponent", "(", "self", "."...
Sets the stimulus to be generated to the one referenced by index :param index: index number of stimulus to set from this class's internal list of stimuli :type index: int
[ "Sets", "the", "stimulus", "to", "be", "generated", "to", "the", "one", "referenced", "by", "index" ]
python
train
ANTsX/ANTsPy
ants/contrib/sampling/transforms.py
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/contrib/sampling/transforms.py#L242-L273
def transform(self, X, y=None): """ Locally blur an image by applying a gradient anisotropic diffusion filter. Arguments --------- X : ANTsImage image to transform y : ANTsImage (optional) another image to transform. Example ------- >>> import ants >>> blur = ants.contrib.LocallyBlurIntensity(1,5) >>> img2d = ants.image_read(ants.get_data('r16')) >>> img2d_b = blur.transform(img2d) >>> ants.plot(img2d) >>> ants.plot(img2d_b) >>> img3d = ants.image_read(ants.get_data('mni')) >>> img3d_b = blur.transform(img3d) >>> ants.plot(img3d) >>> ants.plot(img3d_b) """ #if X.pixeltype != 'float': # raise ValueError('image.pixeltype must be float ... use TypeCast transform or clone to float') insuffix = X._libsuffix cast_fn = utils.get_lib_fn('locallyBlurAntsImage%s' % (insuffix)) casted_ptr = cast_fn(X.pointer, self.iters, self.conductance) return iio.ANTsImage(pixeltype=X.pixeltype, dimension=X.dimension, components=X.components, pointer=casted_ptr)
[ "def", "transform", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "#if X.pixeltype != 'float':", "# raise ValueError('image.pixeltype must be float ... use TypeCast transform or clone to float')", "insuffix", "=", "X", ".", "_libsuffix", "cast_fn", "=", "utils"...
Locally blur an image by applying a gradient anisotropic diffusion filter. Arguments --------- X : ANTsImage image to transform y : ANTsImage (optional) another image to transform. Example ------- >>> import ants >>> blur = ants.contrib.LocallyBlurIntensity(1,5) >>> img2d = ants.image_read(ants.get_data('r16')) >>> img2d_b = blur.transform(img2d) >>> ants.plot(img2d) >>> ants.plot(img2d_b) >>> img3d = ants.image_read(ants.get_data('mni')) >>> img3d_b = blur.transform(img3d) >>> ants.plot(img3d) >>> ants.plot(img3d_b)
[ "Locally", "blur", "an", "image", "by", "applying", "a", "gradient", "anisotropic", "diffusion", "filter", "." ]
python
train
Nic30/hwt
hwt/synthesizer/rtlLevel/netlist.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/synthesizer/rtlLevel/netlist.py#L175-L216
def markVisibilityOfSignals(ctx, ctxName, signals, interfaceSignals): """ * check if all signals are driven by something * mark signals with hidden = False if they are connecting statements or if they are external interface """ for sig in signals: driver_cnt = len(sig.drivers) has_comb_driver = False if driver_cnt > 1: sig.hidden = False for d in sig.drivers: if not isinstance(d, Operator): sig.hidden = False is_comb_driver = False if isinstance(d, PortItem): is_comb_driver = True elif not d._now_is_event_dependent: for a in walk_assignments(d, sig): if not a.indexes\ and not a._is_completly_event_dependent: is_comb_driver = True break if has_comb_driver and is_comb_driver: raise MultipleDriversErr( "%s: Signal %r has multiple combinational drivers" % (ctx.getDebugScopeName(), sig)) has_comb_driver |= is_comb_driver elif driver_cnt == 1: if not isinstance(sig.drivers[0], Operator): sig.hidden = False else: sig.hidden = False if sig not in interfaceSignals: if not sig.defVal._isFullVld(): raise NoDriverErr( sig, "Signal without any driver or valid value in ", ctxName) sig._const = True
[ "def", "markVisibilityOfSignals", "(", "ctx", ",", "ctxName", ",", "signals", ",", "interfaceSignals", ")", ":", "for", "sig", "in", "signals", ":", "driver_cnt", "=", "len", "(", "sig", ".", "drivers", ")", "has_comb_driver", "=", "False", "if", "driver_cnt...
* check if all signals are driven by something * mark signals with hidden = False if they are connecting statements or if they are external interface
[ "*", "check", "if", "all", "signals", "are", "driven", "by", "something", "*", "mark", "signals", "with", "hidden", "=", "False", "if", "they", "are", "connecting", "statements", "or", "if", "they", "are", "external", "interface" ]
python
test
jazzband/django-axes
axes/middleware.py
https://github.com/jazzband/django-axes/blob/3e215a174030e43e7ab8c2a79c395eb0eeddc667/axes/middleware.py#L38-L49
def update_request(self, request: HttpRequest): """ Update given Django ``HttpRequest`` with necessary attributes before passing it on the ``get_response`` for further Django middleware and view processing. """ request.axes_attempt_time = now() request.axes_ip_address = get_client_ip_address(request) request.axes_user_agent = get_client_user_agent(request) request.axes_path_info = get_client_path_info(request) request.axes_http_accept = get_client_http_accept(request)
[ "def", "update_request", "(", "self", ",", "request", ":", "HttpRequest", ")", ":", "request", ".", "axes_attempt_time", "=", "now", "(", ")", "request", ".", "axes_ip_address", "=", "get_client_ip_address", "(", "request", ")", "request", ".", "axes_user_agent"...
Update given Django ``HttpRequest`` with necessary attributes before passing it on the ``get_response`` for further Django middleware and view processing.
[ "Update", "given", "Django", "HttpRequest", "with", "necessary", "attributes", "before", "passing", "it", "on", "the", "get_response", "for", "further", "Django", "middleware", "and", "view", "processing", "." ]
python
train
gwastro/pycbc
pycbc/inference/geweke.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/inference/geweke.py#L22-L83
def geweke(x, seg_length, seg_stride, end_idx, ref_start, ref_end=None, seg_start=0): """ Calculates Geweke conervergence statistic for a chain of data. This function will advance along the chain and calculate the statistic for each step. Parameters ---------- x : numpy.array A one-dimensional array of data. seg_length : int Number of samples to use for each Geweke calculation. seg_stride : int Number of samples to advance before next Geweke calculation. end_idx : int Index of last start. ref_start : int Index of beginning of end reference segment. ref_end : int Index of end of end reference segment. Default is None which will go to the end of the data array. seg_start : int What index to start computing the statistic. Default is 0 which will go to the beginning of the data array. Returns ------- starts : numpy.array The start index of the first segment in the chain. ends : numpy.array The end index of the first segment in the chain. stats : numpy.array The Geweke convergence diagnostic statistic for the segment. """ # lists to hold statistic and end index stats = [] ends = [] # get the beginning of all segments starts = numpy.arange(seg_start, end_idx, seg_stride) # get second segment of data at the end to compare x_end = x[ref_start:ref_end] # loop over all segments for start in starts: # find the end of the first segment x_start_end = int(start + seg_length) # get first segment x_start = x[start:x_start_end] # compute statistic stats.append((x_start.mean() - x_end.mean()) / numpy.sqrt( x_start.var() + x_end.var())) # store end of first segment ends.append(x_start_end) return numpy.array(starts), numpy.array(ends), numpy.array(stats)
[ "def", "geweke", "(", "x", ",", "seg_length", ",", "seg_stride", ",", "end_idx", ",", "ref_start", ",", "ref_end", "=", "None", ",", "seg_start", "=", "0", ")", ":", "# lists to hold statistic and end index", "stats", "=", "[", "]", "ends", "=", "[", "]", ...
Calculates Geweke conervergence statistic for a chain of data. This function will advance along the chain and calculate the statistic for each step. Parameters ---------- x : numpy.array A one-dimensional array of data. seg_length : int Number of samples to use for each Geweke calculation. seg_stride : int Number of samples to advance before next Geweke calculation. end_idx : int Index of last start. ref_start : int Index of beginning of end reference segment. ref_end : int Index of end of end reference segment. Default is None which will go to the end of the data array. seg_start : int What index to start computing the statistic. Default is 0 which will go to the beginning of the data array. Returns ------- starts : numpy.array The start index of the first segment in the chain. ends : numpy.array The end index of the first segment in the chain. stats : numpy.array The Geweke convergence diagnostic statistic for the segment.
[ "Calculates", "Geweke", "conervergence", "statistic", "for", "a", "chain", "of", "data", ".", "This", "function", "will", "advance", "along", "the", "chain", "and", "calculate", "the", "statistic", "for", "each", "step", "." ]
python
train
abilian/abilian-core
abilian/core/models/attachment.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/core/models/attachment.py#L61-L66
def for_entity(obj, check_support_attachments=False): """Return attachments on an entity.""" if check_support_attachments and not supports_attachments(obj): return [] return getattr(obj, ATTRIBUTE)
[ "def", "for_entity", "(", "obj", ",", "check_support_attachments", "=", "False", ")", ":", "if", "check_support_attachments", "and", "not", "supports_attachments", "(", "obj", ")", ":", "return", "[", "]", "return", "getattr", "(", "obj", ",", "ATTRIBUTE", ")"...
Return attachments on an entity.
[ "Return", "attachments", "on", "an", "entity", "." ]
python
train
cokelaer/reports
reports/report.py
https://github.com/cokelaer/reports/blob/7703b1e27d440c3193ee6cc90bfecd78cc98b737/reports/report.py#L230-L240
def get_time_now(self): """Returns a time stamp""" import datetime import getpass username = getpass.getuser() # this is not working on some systems: os.environ["USERNAME"] timenow = str(datetime.datetime.now()) timenow = timenow.split('.')[0] msg = '<div class="date">Created on ' + timenow msg += " by " + username +'</div>' return msg
[ "def", "get_time_now", "(", "self", ")", ":", "import", "datetime", "import", "getpass", "username", "=", "getpass", ".", "getuser", "(", ")", "# this is not working on some systems: os.environ[\"USERNAME\"]", "timenow", "=", "str", "(", "datetime", ".", "datetime", ...
Returns a time stamp
[ "Returns", "a", "time", "stamp" ]
python
train
jjgomera/iapws
iapws/iapws97.py
https://github.com/jjgomera/iapws/blob/1e5812aab38212fb8a63736f61cdcfa427d223b1/iapws/iapws97.py#L423-L473
def _PSat_s(s): """Define the saturated line, P=f(s) for region 3 Parameters ---------- s : float Specific entropy, [kJ/kgK] Returns ------- P : float Pressure, [MPa] Notes ------ Raise :class:`NotImplementedError` if input isn't in limit: * s'(623.15K) ≤ s ≤ s''(623.15K) References ---------- IAPWS, Revised Supplementary Release on Backward Equations for the Functions T(p,h), v(p,h) and T(p,s), v(p,s) for Region 3 of the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam, http://www.iapws.org/relguide/Supp-Tv%28ph,ps%293-2014.pdf, Eq 11 Examples -------- >>> _PSat_s(3.8) 16.87755057 >>> _PSat_s(5.2) 16.68968482 """ # Check input parameters smin_Ps3 = _Region1(623.15, Ps_623)["s"] smax_Ps3 = _Region2(623.15, Ps_623)["s"] if s < smin_Ps3 or s > smax_Ps3: raise NotImplementedError("Incoming out of bound") sigma = s/5.2 I = [0, 1, 1, 4, 12, 12, 16, 24, 28, 32] J = [0, 1, 32, 7, 4, 14, 36, 10, 0, 18] n = [0.639767553612785, -0.129727445396014e2, -0.224595125848403e16, 0.177466741801846e7, 0.717079349571538e10, -0.378829107169011e18, -0.955586736431328e35, 0.187269814676188e24, 0.119254746466473e12, 0.110649277244882e37] suma = 0 for i, j, ni in zip(I, J, n): suma += ni * (sigma-1.03)**i * (sigma-0.699)**j return 22*suma
[ "def", "_PSat_s", "(", "s", ")", ":", "# Check input parameters", "smin_Ps3", "=", "_Region1", "(", "623.15", ",", "Ps_623", ")", "[", "\"s\"", "]", "smax_Ps3", "=", "_Region2", "(", "623.15", ",", "Ps_623", ")", "[", "\"s\"", "]", "if", "s", "<", "smi...
Define the saturated line, P=f(s) for region 3 Parameters ---------- s : float Specific entropy, [kJ/kgK] Returns ------- P : float Pressure, [MPa] Notes ------ Raise :class:`NotImplementedError` if input isn't in limit: * s'(623.15K) ≤ s ≤ s''(623.15K) References ---------- IAPWS, Revised Supplementary Release on Backward Equations for the Functions T(p,h), v(p,h) and T(p,s), v(p,s) for Region 3 of the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam, http://www.iapws.org/relguide/Supp-Tv%28ph,ps%293-2014.pdf, Eq 11 Examples -------- >>> _PSat_s(3.8) 16.87755057 >>> _PSat_s(5.2) 16.68968482
[ "Define", "the", "saturated", "line", "P", "=", "f", "(", "s", ")", "for", "region", "3" ]
python
train
pypa/pipenv
pipenv/vendor/distlib/database.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/database.py#L1330-L1339
def make_dist(name, version, **kwargs): """ A convenience method for making a dist given just a name and version. """ summary = kwargs.pop('summary', 'Placeholder for summary') md = Metadata(**kwargs) md.name = name md.version = version md.summary = summary or 'Placeholder for summary' return Distribution(md)
[ "def", "make_dist", "(", "name", ",", "version", ",", "*", "*", "kwargs", ")", ":", "summary", "=", "kwargs", ".", "pop", "(", "'summary'", ",", "'Placeholder for summary'", ")", "md", "=", "Metadata", "(", "*", "*", "kwargs", ")", "md", ".", "name", ...
A convenience method for making a dist given just a name and version.
[ "A", "convenience", "method", "for", "making", "a", "dist", "given", "just", "a", "name", "and", "version", "." ]
python
train
delph-in/pydelphin
delphin/itsdb.py
https://github.com/delph-in/pydelphin/blob/7bd2cd63ab7cf74803e1d6547b9ebc014b382abd/delphin/itsdb.py#L1858-L1883
def apply_rows(applicators, rows): """ Yield rows after applying the applicator functions to them. Applicators are simple unary functions that return a value, and that value is stored in the yielded row. E.g. `row[col] = applicator(row[col])`. These are useful to, e.g., cast strings to numeric datatypes, to convert formats stored in a cell, extract features for machine learning, and so on. Args: applicators: a tuple of (cols, applicator) where the applicator will be applied to each col in cols rows: an iterable of rows for applicators to be called on Yields: Rows with specified column values replaced with the results of the applicators .. deprecated:: v0.7.0 """ for row in rows: for (cols, function) in applicators: for col in (cols or []): value = row.get(col, '') row[col] = function(row, value) yield row
[ "def", "apply_rows", "(", "applicators", ",", "rows", ")", ":", "for", "row", "in", "rows", ":", "for", "(", "cols", ",", "function", ")", "in", "applicators", ":", "for", "col", "in", "(", "cols", "or", "[", "]", ")", ":", "value", "=", "row", "...
Yield rows after applying the applicator functions to them. Applicators are simple unary functions that return a value, and that value is stored in the yielded row. E.g. `row[col] = applicator(row[col])`. These are useful to, e.g., cast strings to numeric datatypes, to convert formats stored in a cell, extract features for machine learning, and so on. Args: applicators: a tuple of (cols, applicator) where the applicator will be applied to each col in cols rows: an iterable of rows for applicators to be called on Yields: Rows with specified column values replaced with the results of the applicators .. deprecated:: v0.7.0
[ "Yield", "rows", "after", "applying", "the", "applicator", "functions", "to", "them", "." ]
python
train
deep-compute/basescript
basescript/log.py
https://github.com/deep-compute/basescript/blob/f7233963c5291530fcb2444a7f45b556e6407b90/basescript/log.py#L166-L175
def error(self, event=None, *args, **kw): """ Process event and call :meth:`logging.Logger.error` with the result. """ if not self._logger.isEnabledFor(logging.ERROR): return kw = self._add_base_info(kw) kw['level'] = "error" return self._proxy_to_logger('error', event, *args, **kw)
[ "def", "error", "(", "self", ",", "event", "=", "None", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "not", "self", ".", "_logger", ".", "isEnabledFor", "(", "logging", ".", "ERROR", ")", ":", "return", "kw", "=", "self", ".", "_add_bas...
Process event and call :meth:`logging.Logger.error` with the result.
[ "Process", "event", "and", "call", ":", "meth", ":", "logging", ".", "Logger", ".", "error", "with", "the", "result", "." ]
python
train
watson-developer-cloud/python-sdk
ibm_watson/natural_language_understanding_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/natural_language_understanding_v1.py#L633-L638
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'name') and self.name is not None: _dict['name'] = self.name return _dict
[ "def", "_to_dict", "(", "self", ")", ":", "_dict", "=", "{", "}", "if", "hasattr", "(", "self", ",", "'name'", ")", "and", "self", ".", "name", "is", "not", "None", ":", "_dict", "[", "'name'", "]", "=", "self", ".", "name", "return", "_dict" ]
Return a json dictionary representing this model.
[ "Return", "a", "json", "dictionary", "representing", "this", "model", "." ]
python
train
RedFantom/ttkwidgets
ttkwidgets/color/functions.py
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/color/functions.py#L79-L89
def hexa_to_rgb(color): """Convert hexadecimal color to RGB.""" r = int(color[1:3], 16) g = int(color[3:5], 16) b = int(color[5:7], 16) if len(color) == 7: return r, g, b elif len(color) == 9: return r, g, b, int(color[7:9], 16) else: raise ValueError("Invalid hexadecimal notation.")
[ "def", "hexa_to_rgb", "(", "color", ")", ":", "r", "=", "int", "(", "color", "[", "1", ":", "3", "]", ",", "16", ")", "g", "=", "int", "(", "color", "[", "3", ":", "5", "]", ",", "16", ")", "b", "=", "int", "(", "color", "[", "5", ":", ...
Convert hexadecimal color to RGB.
[ "Convert", "hexadecimal", "color", "to", "RGB", "." ]
python
train
cs50/python-cs50
src/cs50/sql.py
https://github.com/cs50/python-cs50/blob/f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a/src/cs50/sql.py#L68-L87
def _parse(self, e): """Parses an exception, returns its message.""" # MySQL matches = re.search(r"^\(_mysql_exceptions\.OperationalError\) \(\d+, \"(.+)\"\)$", str(e)) if matches: return matches.group(1) # PostgreSQL matches = re.search(r"^\(psycopg2\.OperationalError\) (.+)$", str(e)) if matches: return matches.group(1) # SQLite matches = re.search(r"^\(sqlite3\.OperationalError\) (.+)$", str(e)) if matches: return matches.group(1) # Default return str(e)
[ "def", "_parse", "(", "self", ",", "e", ")", ":", "# MySQL", "matches", "=", "re", ".", "search", "(", "r\"^\\(_mysql_exceptions\\.OperationalError\\) \\(\\d+, \\\"(.+)\\\"\\)$\"", ",", "str", "(", "e", ")", ")", "if", "matches", ":", "return", "matches", ".", ...
Parses an exception, returns its message.
[ "Parses", "an", "exception", "returns", "its", "message", "." ]
python
train
thombashi/DateTimeRange
datetimerange/__init__.py
https://github.com/thombashi/DateTimeRange/blob/542a3b69ec256d28cc5d5469fd68207c1b509c9c/datetimerange/__init__.py#L442-L473
def set_end_datetime(self, value, timezone=None): """ Set the end time of the time range. :param datetime.datetime/str value: |param_end_datetime| :raises ValueError: If the value is invalid as a |datetime| value. :Sample Code: .. code:: python from datetimerange import DateTimeRange time_range = DateTimeRange() print(time_range) time_range.set_end_datetime("2015-03-22T10:10:00+0900") print(time_range) :Output: .. parsed-literal:: NaT - NaT NaT - 2015-03-22T10:10:00+0900 """ if value is None: self.__end_datetime = None return try: self.__end_datetime = typepy.type.DateTime( value, strict_level=typepy.StrictLevel.MIN, timezone=timezone ).convert() except typepy.TypeConversionError as e: raise ValueError(e)
[ "def", "set_end_datetime", "(", "self", ",", "value", ",", "timezone", "=", "None", ")", ":", "if", "value", "is", "None", ":", "self", ".", "__end_datetime", "=", "None", "return", "try", ":", "self", ".", "__end_datetime", "=", "typepy", ".", "type", ...
Set the end time of the time range. :param datetime.datetime/str value: |param_end_datetime| :raises ValueError: If the value is invalid as a |datetime| value. :Sample Code: .. code:: python from datetimerange import DateTimeRange time_range = DateTimeRange() print(time_range) time_range.set_end_datetime("2015-03-22T10:10:00+0900") print(time_range) :Output: .. parsed-literal:: NaT - NaT NaT - 2015-03-22T10:10:00+0900
[ "Set", "the", "end", "time", "of", "the", "time", "range", "." ]
python
train
Esri/ArcREST
src/arcresthelper/common.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcresthelper/common.py#L459-L488
def find_replace_string(obj, find, replace): """Performs a string.replace() on the input object. Args: obj (object): The object to find/replace. It will be cast to ``str``. find (str): The string to search for. replace (str): The string to replace with. Returns: str: The replaced string. """ try: strobj = str(obj) newStr = string.replace(strobj, find, replace) if newStr == strobj: return obj else: return newStr except: line, filename, synerror = trace() raise ArcRestHelperError({ "function": "find_replace_string", "line": line, "filename": filename, "synerror": synerror, } ) finally: pass
[ "def", "find_replace_string", "(", "obj", ",", "find", ",", "replace", ")", ":", "try", ":", "strobj", "=", "str", "(", "obj", ")", "newStr", "=", "string", ".", "replace", "(", "strobj", ",", "find", ",", "replace", ")", "if", "newStr", "==", "strob...
Performs a string.replace() on the input object. Args: obj (object): The object to find/replace. It will be cast to ``str``. find (str): The string to search for. replace (str): The string to replace with. Returns: str: The replaced string.
[ "Performs", "a", "string", ".", "replace", "()", "on", "the", "input", "object", "." ]
python
train
konstantint/matplotlib-venn
matplotlib_venn/_venn2.py
https://github.com/konstantint/matplotlib-venn/blob/c26796c9925bdac512edf48387452fbd1848c791/matplotlib_venn/_venn2.py#L104-L114
def compute_venn2_colors(set_colors): ''' Given two base colors, computes combinations of colors corresponding to all regions of the venn diagram. returns a list of 3 elements, providing colors for regions (10, 01, 11). >>> compute_venn2_colors(('r', 'g')) (array([ 1., 0., 0.]), array([ 0. , 0.5, 0. ]), array([ 0.7 , 0.35, 0. ])) ''' ccv = ColorConverter() base_colors = [np.array(ccv.to_rgb(c)) for c in set_colors] return (base_colors[0], base_colors[1], mix_colors(base_colors[0], base_colors[1]))
[ "def", "compute_venn2_colors", "(", "set_colors", ")", ":", "ccv", "=", "ColorConverter", "(", ")", "base_colors", "=", "[", "np", ".", "array", "(", "ccv", ".", "to_rgb", "(", "c", ")", ")", "for", "c", "in", "set_colors", "]", "return", "(", "base_co...
Given two base colors, computes combinations of colors corresponding to all regions of the venn diagram. returns a list of 3 elements, providing colors for regions (10, 01, 11). >>> compute_venn2_colors(('r', 'g')) (array([ 1., 0., 0.]), array([ 0. , 0.5, 0. ]), array([ 0.7 , 0.35, 0. ]))
[ "Given", "two", "base", "colors", "computes", "combinations", "of", "colors", "corresponding", "to", "all", "regions", "of", "the", "venn", "diagram", ".", "returns", "a", "list", "of", "3", "elements", "providing", "colors", "for", "regions", "(", "10", "01...
python
train
springload/draftjs_exporter
draftjs_exporter/command.py
https://github.com/springload/draftjs_exporter/blob/1e391a46f162740f90511cde1ef615858e8de5cb/draftjs_exporter/command.py#L29-L36
def start_stop(name, start, stop, data=''): """ Builds a pair of start/stop commands with the same data. """ return [ Command('start_%s' % name, start, data), Command('stop_%s' % name, stop, data), ]
[ "def", "start_stop", "(", "name", ",", "start", ",", "stop", ",", "data", "=", "''", ")", ":", "return", "[", "Command", "(", "'start_%s'", "%", "name", ",", "start", ",", "data", ")", ",", "Command", "(", "'stop_%s'", "%", "name", ",", "stop", ","...
Builds a pair of start/stop commands with the same data.
[ "Builds", "a", "pair", "of", "start", "/", "stop", "commands", "with", "the", "same", "data", "." ]
python
train
gwastro/pycbc
pycbc/tmpltbank/lambda_mapping.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/tmpltbank/lambda_mapping.py#L138-L155
def ethinca_order_from_string(order): """ Returns the integer giving twice the post-Newtonian order used by the ethinca calculation. Currently valid only for TaylorF2 metric Parameters ---------- order : string Returns ------- int """ if order in get_ethinca_orders().keys(): return get_ethinca_orders()[order] else: raise ValueError("Order "+str(order)+" is not valid for ethinca" "calculation! Valid orders: "+ str(get_ethinca_orders().keys()))
[ "def", "ethinca_order_from_string", "(", "order", ")", ":", "if", "order", "in", "get_ethinca_orders", "(", ")", ".", "keys", "(", ")", ":", "return", "get_ethinca_orders", "(", ")", "[", "order", "]", "else", ":", "raise", "ValueError", "(", "\"Order \"", ...
Returns the integer giving twice the post-Newtonian order used by the ethinca calculation. Currently valid only for TaylorF2 metric Parameters ---------- order : string Returns ------- int
[ "Returns", "the", "integer", "giving", "twice", "the", "post", "-", "Newtonian", "order", "used", "by", "the", "ethinca", "calculation", ".", "Currently", "valid", "only", "for", "TaylorF2", "metric" ]
python
train
bkg/greenwich
greenwich/raster.py
https://github.com/bkg/greenwich/blob/57ec644dadfe43ce0ecf2cfd32a2de71e0c8c141/greenwich/raster.py#L45-L61
def geom_to_array(geom, size, affine): """Converts an OGR polygon to a 2D NumPy array. Arguments: geom -- OGR Geometry size -- array size in pixels as a tuple of (width, height) affine -- AffineTransform """ driver = ImageDriver('MEM') rast = driver.raster(driver.ShortName, size) rast.affine = affine rast.sref = geom.GetSpatialReference() with MemoryLayer.from_records([(1, geom)]) as ml: status = gdal.RasterizeLayer(rast.ds, (1,), ml.layer, burn_values=(1,)) arr = rast.array() rast.close() return arr
[ "def", "geom_to_array", "(", "geom", ",", "size", ",", "affine", ")", ":", "driver", "=", "ImageDriver", "(", "'MEM'", ")", "rast", "=", "driver", ".", "raster", "(", "driver", ".", "ShortName", ",", "size", ")", "rast", ".", "affine", "=", "affine", ...
Converts an OGR polygon to a 2D NumPy array. Arguments: geom -- OGR Geometry size -- array size in pixels as a tuple of (width, height) affine -- AffineTransform
[ "Converts", "an", "OGR", "polygon", "to", "a", "2D", "NumPy", "array", "." ]
python
test
iotile/coretools
iotilecore/iotile/core/utilities/formatting.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/formatting.py#L20-L27
def indent_list(inlist, level): """Join a list of strings, one per line with 'level' spaces before each one""" indent = ' '*level joinstr = '\n' + indent retval = joinstr.join(inlist) return indent + retval
[ "def", "indent_list", "(", "inlist", ",", "level", ")", ":", "indent", "=", "' '", "*", "level", "joinstr", "=", "'\\n'", "+", "indent", "retval", "=", "joinstr", ".", "join", "(", "inlist", ")", "return", "indent", "+", "retval" ]
Join a list of strings, one per line with 'level' spaces before each one
[ "Join", "a", "list", "of", "strings", "one", "per", "line", "with", "level", "spaces", "before", "each", "one" ]
python
train
QunarOPS/qg.core
qg/core/importutils.py
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/importutils.py#L24-L33
def import_class(import_str): """Returns a class from a string including module and class.""" mod_str, _sep, class_str = import_str.rpartition('.') try: __import__(mod_str) return getattr(sys.modules[mod_str], class_str) except (ValueError, AttributeError): raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info())))
[ "def", "import_class", "(", "import_str", ")", ":", "mod_str", ",", "_sep", ",", "class_str", "=", "import_str", ".", "rpartition", "(", "'.'", ")", "try", ":", "__import__", "(", "mod_str", ")", "return", "getattr", "(", "sys", ".", "modules", "[", "mod...
Returns a class from a string including module and class.
[ "Returns", "a", "class", "from", "a", "string", "including", "module", "and", "class", "." ]
python
train
python-cmd2/cmd2
cmd2/cmd2.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/cmd2/cmd2.py#L3505-L3508
def do_eos(self, _: argparse.Namespace) -> None: """Handle cleanup when a script has finished executing""" if self._script_dir: self._script_dir.pop()
[ "def", "do_eos", "(", "self", ",", "_", ":", "argparse", ".", "Namespace", ")", "->", "None", ":", "if", "self", ".", "_script_dir", ":", "self", ".", "_script_dir", ".", "pop", "(", ")" ]
Handle cleanup when a script has finished executing
[ "Handle", "cleanup", "when", "a", "script", "has", "finished", "executing" ]
python
train
Guake/guake
guake/prefs.py
https://github.com/Guake/guake/blob/4153ef38f9044cbed6494075fce80acd5809df2b/guake/prefs.py#L1073-L1313
def load_configs(self): """Load configurations for all widgets in General, Scrolling and Appearance tabs from dconf. """ self._load_default_shell_settings() # restore tabs startup value = self.settings.general.get_boolean('restore-tabs-startup') self.get_widget('restore-tabs-startup').set_active(value) # restore tabs notify value = self.settings.general.get_boolean('restore-tabs-notify') self.get_widget('restore-tabs-notify').set_active(value) # save tabs when changed value = self.settings.general.get_boolean('save-tabs-when-changed') self.get_widget('save-tabs-when-changed').set_active(value) # login shell value = self.settings.general.get_boolean('use-login-shell') self.get_widget('use_login_shell').set_active(value) # tray icon value = self.settings.general.get_boolean('use-trayicon') self.get_widget('use_trayicon').set_active(value) # popup value = self.settings.general.get_boolean('use-popup') self.get_widget('use_popup').set_active(value) # workspace-specific tab sets value = self.settings.general.get_boolean('workspace-specific-tab-sets') self.get_widget('workspace-specific-tab-sets').set_active(value) # prompt on quit value = self.settings.general.get_boolean('prompt-on-quit') self.get_widget('prompt_on_quit').set_active(value) # prompt on close_tab value = self.settings.general.get_int('prompt-on-close-tab') self.get_widget('prompt_on_close_tab').set_active(value) self.get_widget('prompt_on_quit').set_sensitive(value != 2) # gtk theme theme value = self.settings.general.get_string('gtk-theme-name') combo = self.get_widget('gtk_theme_name') for i in combo.get_model(): if i[0] == value: combo.set_active_iter(i.iter) break # prefer gtk theme theme value = self.settings.general.get_boolean('gtk-prefer-dark-theme') self.get_widget('gtk_prefer_dark_theme').set_active(value) # ontop value = self.settings.general.get_boolean('window-ontop') self.get_widget('window_ontop').set_active(value) # tab ontop value = self.settings.general.get_boolean('tab-ontop') self.get_widget('tab_ontop').set_active(value) # refocus value = self.settings.general.get_boolean('window-refocus') self.get_widget('window_refocus').set_active(value) # losefocus value = self.settings.general.get_boolean('window-losefocus') self.get_widget('window_losefocus').set_active(value) # use VTE titles value = self.settings.general.get_boolean('use-vte-titles') self.get_widget('use_vte_titles').set_active(value) # set window title value = self.settings.general.get_boolean('set-window-title') self.get_widget('set_window_title').set_active(value) # abbreviate tab names self.get_widget('abbreviate_tab_names').set_sensitive(value) value = self.settings.general.get_boolean('abbreviate-tab-names') self.get_widget('abbreviate_tab_names').set_active(value) # max tab name length value = self.settings.general.get_int('max-tab-name-length') self.get_widget('max_tab_name_length').set_value(value) self.update_vte_subwidgets_states() value = self.settings.general.get_int('window-height') self.get_widget('window_height').set_value(value) value = self.settings.general.get_int('window-width') self.get_widget('window_width').set_value(value) # window displacements value = self.settings.general.get_int('window-vertical-displacement') self.get_widget('window_vertical_displacement').set_value(value) value = self.settings.general.get_int('window-horizontal-displacement') self.get_widget('window_horizontal_displacement').set_value(value) value = self.settings.general.get_int('window-halignment') which_button = { ALIGN_RIGHT: 'radiobutton_align_right', ALIGN_LEFT: 'radiobutton_align_left', ALIGN_CENTER: 'radiobutton_align_center' } self.get_widget(which_button[value]).set_active(True) self.get_widget("window_horizontal_displacement").set_sensitive(value != ALIGN_CENTER) value = self.settings.general.get_boolean('open-tab-cwd') self.get_widget('open_tab_cwd').set_active(value) # tab bar value = self.settings.general.get_boolean('window-tabbar') self.get_widget('window_tabbar').set_active(value) # start fullscreen value = self.settings.general.get_boolean('start-fullscreen') self.get_widget('start_fullscreen').set_active(value) # start at GNOME login value = self.settings.general.get_boolean('start-at-login') self.get_widget('start_at_login').set_active(value) # use audible bell value = self.settings.general.get_boolean('use-audible-bell') self.get_widget('use_audible_bell').set_active(value) self._load_screen_settings() value = self.settings.general.get_boolean('quick-open-enable') self.get_widget('quick_open_enable').set_active(value) self.get_widget('quick_open_command_line').set_sensitive(value) self.get_widget('quick_open_in_current_terminal').set_sensitive(value) text = Gtk.TextBuffer() text = self.get_widget('quick_open_supported_patterns').get_buffer() for title, matcher, _useless in QUICK_OPEN_MATCHERS: text.insert_at_cursor("%s: %s\n" % (title, matcher)) self.get_widget('quick_open_supported_patterns').set_buffer(text) value = self.settings.general.get_string('quick-open-command-line') if value is None: value = "subl %(file_path)s:%(line_number)s" self.get_widget('quick_open_command_line').set_text(value) value = self.settings.general.get_boolean('quick-open-in-current-terminal') self.get_widget('quick_open_in_current_terminal').set_active(value) value = self.settings.general.get_string('startup-script') if value: self.get_widget('startup_script').set_text(value) # use display where the mouse is currently value = self.settings.general.get_boolean('mouse-display') self.get_widget('mouse_display').set_active(value) # scrollbar value = self.settings.general.get_boolean('use-scrollbar') self.get_widget('use_scrollbar').set_active(value) # history size value = self.settings.general.get_int('history-size') self.get_widget('history_size').set_value(value) # infinite history value = self.settings.general.get_boolean('infinite-history') self.get_widget('infinite_history').set_active(value) # scroll output value = self.settings.general.get_boolean('scroll-output') self.get_widget('scroll_output').set_active(value) # scroll keystroke value = self.settings.general.get_boolean('scroll-keystroke') self.get_widget('scroll_keystroke').set_active(value) # default font value = self.settings.general.get_boolean('use-default-font') self.get_widget('use_default_font').set_active(value) self.get_widget('font_style').set_sensitive(not value) # font value = self.settings.styleFont.get_string('style') if value: self.get_widget('font_style').set_font_name(value) # allow bold font value = self.settings.styleFont.get_boolean('allow-bold') self.get_widget('allow_bold').set_active(value) # use bold is bright value = self.settings.styleFont.get_boolean('bold-is-bright') self.get_widget('bold_is_bright').set_active(value) # palette self.fill_palette_names() value = self.settings.styleFont.get_string('palette-name') self.set_palette_name(value) value = self.settings.styleFont.get_string('palette') self.set_palette_colors(value) self.update_demo_palette(value) # cursor shape value = self.settings.style.get_int('cursor-shape') self.set_cursor_shape(value) # cursor blink value = self.settings.style.get_int('cursor-blink-mode') self.set_cursor_blink_mode(value) value = self.settings.styleBackground.get_int('transparency') self.get_widget('background_transparency').set_value(MAX_TRANSPARENCY - value) value = self.settings.general.get_int('window-valignment') self.get_widget('top_align').set_active(value) # it's a separated method, to be reused. self.reload_erase_combos() # custom command context-menu configuration file custom_command_file = self.settings.general.get_string('custom-command-file') if custom_command_file: custom_command_file_name = os.path.expanduser(custom_command_file) else: custom_command_file_name = None custom_cmd_filter = Gtk.FileFilter() custom_cmd_filter.set_name(_("JSON files")) custom_cmd_filter.add_pattern("*.json") self.get_widget('custom_command_file_chooser').add_filter(custom_cmd_filter) all_files_filter = Gtk.FileFilter() all_files_filter.set_name(_("All files")) all_files_filter.add_pattern("*") self.get_widget('custom_command_file_chooser').add_filter(all_files_filter) if custom_command_file_name: self.get_widget('custom_command_file_chooser').set_filename(custom_command_file_name) # hooks self._load_hooks_settings()
[ "def", "load_configs", "(", "self", ")", ":", "self", ".", "_load_default_shell_settings", "(", ")", "# restore tabs startup", "value", "=", "self", ".", "settings", ".", "general", ".", "get_boolean", "(", "'restore-tabs-startup'", ")", "self", ".", "get_widget",...
Load configurations for all widgets in General, Scrolling and Appearance tabs from dconf.
[ "Load", "configurations", "for", "all", "widgets", "in", "General", "Scrolling", "and", "Appearance", "tabs", "from", "dconf", "." ]
python
train
fastai/fastai
fastai/text/models/transformer.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/text/models/transformer.py#L255-L260
def tfmer_lm_split(model:nn.Module) -> List[nn.Module]: "Split a RNN `model` in groups for differential learning rates." encoder = model[0] n = len(encoder.layers)//3 groups = [list(encoder.layers[:n]), list(encoder.layers[n:2*n]), list(encoder.layers[2*n:])] return groups + [[encoder.encoder, model[1]]]
[ "def", "tfmer_lm_split", "(", "model", ":", "nn", ".", "Module", ")", "->", "List", "[", "nn", ".", "Module", "]", ":", "encoder", "=", "model", "[", "0", "]", "n", "=", "len", "(", "encoder", ".", "layers", ")", "//", "3", "groups", "=", "[", ...
Split a RNN `model` in groups for differential learning rates.
[ "Split", "a", "RNN", "model", "in", "groups", "for", "differential", "learning", "rates", "." ]
python
train
ergo/ziggurat_foundations
ziggurat_foundations/models/services/resource_tree.py
https://github.com/ergo/ziggurat_foundations/blob/9eeec894d08e8d7defa60ddc04b63f69cd4cbeba/ziggurat_foundations/models/services/resource_tree.py#L36-L46
def delete_branch(self, resource_id=None, db_session=None, *args, **kwargs): """ This deletes whole branch with children starting from resource_id :param resource_id: :param db_session: :return: """ return self.service.delete_branch( resource_id=resource_id, db_session=db_session, *args, **kwargs )
[ "def", "delete_branch", "(", "self", ",", "resource_id", "=", "None", ",", "db_session", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "service", ".", "delete_branch", "(", "resource_id", "=", "resource_id", ","...
This deletes whole branch with children starting from resource_id :param resource_id: :param db_session: :return:
[ "This", "deletes", "whole", "branch", "with", "children", "starting", "from", "resource_id" ]
python
train
respondcreate/django-versatileimagefield
versatileimagefield/registry.py
https://github.com/respondcreate/django-versatileimagefield/blob/d41e279c39cccffafbe876c67596184704ae8877/versatileimagefield/registry.py#L145-L157
def unregister_sizer(self, attr_name): """ Unregister the SizedImage subclass currently assigned to `attr_name`. If a SizedImage subclass isn't already registered to `attr_name` NotRegistered will raise. """ if attr_name not in self._sizedimage_registry: raise NotRegistered( 'No SizedImage subclass is registered to %s' % attr_name ) else: del self._sizedimage_registry[attr_name]
[ "def", "unregister_sizer", "(", "self", ",", "attr_name", ")", ":", "if", "attr_name", "not", "in", "self", ".", "_sizedimage_registry", ":", "raise", "NotRegistered", "(", "'No SizedImage subclass is registered to %s'", "%", "attr_name", ")", "else", ":", "del", ...
Unregister the SizedImage subclass currently assigned to `attr_name`. If a SizedImage subclass isn't already registered to `attr_name` NotRegistered will raise.
[ "Unregister", "the", "SizedImage", "subclass", "currently", "assigned", "to", "attr_name", "." ]
python
test
sirfoga/pyhal
hal/hashes/md5.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/hashes/md5.py#L19-L34
def file_to_md5(filename, block_size=8192): """Calculate the md5 hash of a file. Memory-friendly solution, it reads the file piece by piece. See stackoverflow.com/questions/1131220/ :param filename: filename to convert :param block_size: size of block :return: MD5 hash of file content """ md5 = hashlib.md5() with open(filename, 'rb') as f: while True: data = f.read(block_size) if not data: break md5.update(data) return md5.hexdigest()
[ "def", "file_to_md5", "(", "filename", ",", "block_size", "=", "8192", ")", ":", "md5", "=", "hashlib", ".", "md5", "(", ")", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "while", "True", ":", "data", "=", "f", ".", "read", ...
Calculate the md5 hash of a file. Memory-friendly solution, it reads the file piece by piece. See stackoverflow.com/questions/1131220/ :param filename: filename to convert :param block_size: size of block :return: MD5 hash of file content
[ "Calculate", "the", "md5", "hash", "of", "a", "file", ".", "Memory", "-", "friendly", "solution", "it", "reads", "the", "file", "piece", "by", "piece", ".", "See", "stackoverflow", ".", "com", "/", "questions", "/", "1131220", "/" ]
python
train
synw/dataswim
dataswim/charts/chartjs.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/charts/chartjs.py#L19-L52
def _get_chartjs_chart(self, xcol, ycol, chart_type, label=None, opts={}, style={}, options={}, **kwargs): """ Get Chartjs html """ try: xdata = list(self.df[xcol]) except Exception as e: self.err(e, self._get_chartjs_chart, "Can not get data for x field ", ycol) return if label is None: label = "Data" try: if type(ycol) != list: ydata = [dict(name=label, data=list(self.df[ycol]))] else: ydata = [] for col in ycol: y = {} y["name"] = col y["data"] = list(self.df[col]) ydata.append(y) except Exception as e: self.err(e, self._get_chartjs_chart, "Can not get data for y field ", xcol) return try: slug = str(uuid.uuid4()) html = chart.get(slug, xdata, ydata, label, opts, style, chart_type, **kwargs) return html except Exception as e: self.err(e, self._get_chartjs_chart, "Can not get chart")
[ "def", "_get_chartjs_chart", "(", "self", ",", "xcol", ",", "ycol", ",", "chart_type", ",", "label", "=", "None", ",", "opts", "=", "{", "}", ",", "style", "=", "{", "}", ",", "options", "=", "{", "}", ",", "*", "*", "kwargs", ")", ":", "try", ...
Get Chartjs html
[ "Get", "Chartjs", "html" ]
python
train
python-gitlab/python-gitlab
gitlab/v4/objects.py
https://github.com/python-gitlab/python-gitlab/blob/16de1b03fde3dbbe8f851614dd1d8c09de102fe5/gitlab/v4/objects.py#L978-L989
def ldap_sync(self, **kwargs): """Sync LDAP groups. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server cannot perform the request """ path = '/groups/%s/ldap_sync' % self.get_id() self.manager.gitlab.http_post(path, **kwargs)
[ "def", "ldap_sync", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "'/groups/%s/ldap_sync'", "%", "self", ".", "get_id", "(", ")", "self", ".", "manager", ".", "gitlab", ".", "http_post", "(", "path", ",", "*", "*", "kwargs", ")" ]
Sync LDAP groups. Args: **kwargs: Extra options to send to the server (e.g. sudo) Raises: GitlabAuthenticationError: If authentication is not correct GitlabCreateError: If the server cannot perform the request
[ "Sync", "LDAP", "groups", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Environment.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Environment.py#L2241-L2263
def FindSourceFiles(self, node='.'): """ returns a list of all source files. """ node = self.arg2nodes(node, self.fs.Entry)[0] sources = [] def build_source(ss): for s in ss: if isinstance(s, SCons.Node.FS.Dir): build_source(s.all_children()) elif s.has_builder(): build_source(s.sources) elif isinstance(s.disambiguate(), SCons.Node.FS.File): sources.append(s) build_source(node.all_children()) def final_source(node): while (node != node.srcnode()): node = node.srcnode() return node sources = list(map( final_source, sources )); # remove duplicates return list(set(sources))
[ "def", "FindSourceFiles", "(", "self", ",", "node", "=", "'.'", ")", ":", "node", "=", "self", ".", "arg2nodes", "(", "node", ",", "self", ".", "fs", ".", "Entry", ")", "[", "0", "]", "sources", "=", "[", "]", "def", "build_source", "(", "ss", ")...
returns a list of all source files.
[ "returns", "a", "list", "of", "all", "source", "files", "." ]
python
train
senaite/senaite.core
bika/lims/browser/workflow/__init__.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/workflow/__init__.py#L75-L94
def get_action(self): """Returns the action to be taken from the request. Returns None if no action is found """ action = self.request.get("workflow_action_id", None) action = self.request.get("workflow_action", action) if not action: return None # A condition in the form causes Plone to sometimes send two actions # This usually happens when the previous action was not managed properly # and the request was not able to complete, so the previous form value # is kept, together with the new one. if type(action) in (list, tuple): actions = list(set(action)) if len(actions) > 0: logger.warn("Multiple actions in request: {}. Fallback to '{}'" .format(repr(actions), actions[-1])) action = actions[-1] return action
[ "def", "get_action", "(", "self", ")", ":", "action", "=", "self", ".", "request", ".", "get", "(", "\"workflow_action_id\"", ",", "None", ")", "action", "=", "self", ".", "request", ".", "get", "(", "\"workflow_action\"", ",", "action", ")", "if", "not"...
Returns the action to be taken from the request. Returns None if no action is found
[ "Returns", "the", "action", "to", "be", "taken", "from", "the", "request", ".", "Returns", "None", "if", "no", "action", "is", "found" ]
python
train
opendatateam/udata
udata/core/site/rdf.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/site/rdf.py#L15-L68
def build_catalog(site, datasets, format=None): '''Build the DCAT catalog for this site''' site_url = url_for('site.home_redirect', _external=True) catalog_url = url_for('site.rdf_catalog', _external=True) graph = Graph(namespace_manager=namespace_manager) catalog = graph.resource(URIRef(catalog_url)) catalog.set(RDF.type, DCAT.Catalog) catalog.set(DCT.title, Literal(site.title)) catalog.set(DCT.language, Literal(current_app.config['DEFAULT_LANGUAGE'])) catalog.set(FOAF.homepage, URIRef(site_url)) publisher = graph.resource(BNode()) publisher.set(RDF.type, FOAF.Organization) publisher.set(FOAF.name, Literal(current_app.config['SITE_AUTHOR'])) catalog.set(DCT.publisher, publisher) for dataset in datasets: catalog.add(DCAT.dataset, dataset_to_rdf(dataset, graph)) if isinstance(datasets, Paginable): if not format: raise ValueError('Pagination requires format') catalog.add(RDF.type, HYDRA.Collection) catalog.set(HYDRA.totalItems, Literal(datasets.total)) kwargs = { 'format': format, 'page_size': datasets.page_size, '_external': True, } first_url = url_for('site.rdf_catalog_format', page=1, **kwargs) page_url = url_for('site.rdf_catalog_format', page=datasets.page, **kwargs) last_url = url_for('site.rdf_catalog_format', page=datasets.pages, **kwargs) pagination = graph.resource(URIRef(page_url)) pagination.set(RDF.type, HYDRA.PartialCollectionView) pagination.set(HYDRA.first, URIRef(first_url)) pagination.set(HYDRA.last, URIRef(last_url)) if datasets.has_next: next_url = url_for('site.rdf_catalog_format', page=datasets.page + 1, **kwargs) pagination.set(HYDRA.next, URIRef(next_url)) if datasets.has_prev: prev_url = url_for('site.rdf_catalog_format', page=datasets.page - 1, **kwargs) pagination.set(HYDRA.previous, URIRef(prev_url)) catalog.set(HYDRA.view, pagination) return catalog
[ "def", "build_catalog", "(", "site", ",", "datasets", ",", "format", "=", "None", ")", ":", "site_url", "=", "url_for", "(", "'site.home_redirect'", ",", "_external", "=", "True", ")", "catalog_url", "=", "url_for", "(", "'site.rdf_catalog'", ",", "_external",...
Build the DCAT catalog for this site
[ "Build", "the", "DCAT", "catalog", "for", "this", "site" ]
python
train
svinota/mdns
mdns/zeroconf.py
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L907-L913
def add_answer_at_time(self, record, now): """Adds an answer if if does not expire by a certain time""" if record is not None: if now == 0 or not record.is_expired(now): self.answers.append((record, now)) if record.rrsig is not None: self.answers.append((record.rrsig, now))
[ "def", "add_answer_at_time", "(", "self", ",", "record", ",", "now", ")", ":", "if", "record", "is", "not", "None", ":", "if", "now", "==", "0", "or", "not", "record", ".", "is_expired", "(", "now", ")", ":", "self", ".", "answers", ".", "append", ...
Adds an answer if if does not expire by a certain time
[ "Adds", "an", "answer", "if", "if", "does", "not", "expire", "by", "a", "certain", "time" ]
python
train
manns/pyspread
pyspread/src/actions/_grid_actions.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/actions/_grid_actions.py#L529-L555
def _save_pys(self, filepath): """Saves file as pys file and returns True if save success Parameters ---------- filepath: String \tTarget file path for xls file """ try: with Bz2AOpen(filepath, "wb", main_window=self.main_window) as outfile: interface = Pys(self.grid.code_array, outfile) interface.from_code_array() except (IOError, ValueError), err: try: post_command_event(self.main_window, self.StatusBarMsg, text=err) return except TypeError: # The main window does not exist any more pass return not outfile.aborted
[ "def", "_save_pys", "(", "self", ",", "filepath", ")", ":", "try", ":", "with", "Bz2AOpen", "(", "filepath", ",", "\"wb\"", ",", "main_window", "=", "self", ".", "main_window", ")", "as", "outfile", ":", "interface", "=", "Pys", "(", "self", ".", "grid...
Saves file as pys file and returns True if save success Parameters ---------- filepath: String \tTarget file path for xls file
[ "Saves", "file", "as", "pys", "file", "and", "returns", "True", "if", "save", "success" ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/work_item_tracking_process/work_item_tracking_process_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/work_item_tracking_process/work_item_tracking_process_client.py#L434-L457
def remove_group(self, process_id, wit_ref_name, page_id, section_id, group_id): """RemoveGroup. [Preview API] Removes a group from the work item form. :param str process_id: The ID of the process :param str wit_ref_name: The reference name of the work item type :param str page_id: The ID of the page the group is in :param str section_id: The ID of the section to the group is in :param str group_id: The ID of the group """ route_values = {} if process_id is not None: route_values['processId'] = self._serialize.url('process_id', process_id, 'str') if wit_ref_name is not None: route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str') if page_id is not None: route_values['pageId'] = self._serialize.url('page_id', page_id, 'str') if section_id is not None: route_values['sectionId'] = self._serialize.url('section_id', section_id, 'str') if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') self._send(http_method='DELETE', location_id='766e44e1-36a8-41d7-9050-c343ff02f7a5', version='5.0-preview.1', route_values=route_values)
[ "def", "remove_group", "(", "self", ",", "process_id", ",", "wit_ref_name", ",", "page_id", ",", "section_id", ",", "group_id", ")", ":", "route_values", "=", "{", "}", "if", "process_id", "is", "not", "None", ":", "route_values", "[", "'processId'", "]", ...
RemoveGroup. [Preview API] Removes a group from the work item form. :param str process_id: The ID of the process :param str wit_ref_name: The reference name of the work item type :param str page_id: The ID of the page the group is in :param str section_id: The ID of the section to the group is in :param str group_id: The ID of the group
[ "RemoveGroup", ".", "[", "Preview", "API", "]", "Removes", "a", "group", "from", "the", "work", "item", "form", ".", ":", "param", "str", "process_id", ":", "The", "ID", "of", "the", "process", ":", "param", "str", "wit_ref_name", ":", "The", "reference"...
python
train
JukeboxPipeline/jukebox-core
src/jukeboxcore/gui/widgets/releasewin.py
https://github.com/JukeboxPipeline/jukebox-core/blob/bac2280ca49940355270e4b69400ce9976ab2e6f/src/jukeboxcore/gui/widgets/releasewin.py#L117-L130
def set_release_actions(self, actions): """Set the widget that gives users options about the release, e.g. importing references :param actions: Release actions that define the sanity checks and cleanup actions :type actions: :class:`jukeboxcore.release.ReleaseActions` :returns: None :rtype: None :raises: None """ self.release_actions = actions self.option_widget = self.release_actions.option_widget() if self.option_widget: self.option_vbox.addWidget(self.option_widget) self.option_gb.setVisible(True)
[ "def", "set_release_actions", "(", "self", ",", "actions", ")", ":", "self", ".", "release_actions", "=", "actions", "self", ".", "option_widget", "=", "self", ".", "release_actions", ".", "option_widget", "(", ")", "if", "self", ".", "option_widget", ":", "...
Set the widget that gives users options about the release, e.g. importing references :param actions: Release actions that define the sanity checks and cleanup actions :type actions: :class:`jukeboxcore.release.ReleaseActions` :returns: None :rtype: None :raises: None
[ "Set", "the", "widget", "that", "gives", "users", "options", "about", "the", "release", "e", ".", "g", ".", "importing", "references" ]
python
train
Microsoft/knack
knack/cli.py
https://github.com/Microsoft/knack/blob/5f1a480a33f103e2688c46eef59fb2d9eaf2baad/knack/cli.py#L139-L150
def unregister_event(self, event_name, handler): """ Unregister a callable that will be called when event is raised. :param event_name: The name of the event (see knack.events for in-built events) :type event_name: str :param handler: The callback that was used to register the event :type handler: function """ try: self._event_handlers[event_name].remove(handler) except ValueError: pass
[ "def", "unregister_event", "(", "self", ",", "event_name", ",", "handler", ")", ":", "try", ":", "self", ".", "_event_handlers", "[", "event_name", "]", ".", "remove", "(", "handler", ")", "except", "ValueError", ":", "pass" ]
Unregister a callable that will be called when event is raised. :param event_name: The name of the event (see knack.events for in-built events) :type event_name: str :param handler: The callback that was used to register the event :type handler: function
[ "Unregister", "a", "callable", "that", "will", "be", "called", "when", "event", "is", "raised", "." ]
python
train
androguard/androguard
androguard/core/bytecodes/apk.py
https://github.com/androguard/androguard/blob/984c0d981be2950cf0451e484f7b0d4d53bc4911/androguard/core/bytecodes/apk.py#L710-L727
def _get_crc32(self, filename): """ Calculates and compares the CRC32 and returns the raw buffer. The CRC32 is added to `files_crc32` dictionary, if not present. :param filename: filename inside the zipfile :rtype: bytes """ buffer = self.zip.read(filename) if filename not in self.files_crc32: self.files_crc32[filename] = crc32(buffer) if self.files_crc32[filename] != self.zip.getinfo(filename).CRC: log.error("File '{}' has different CRC32 after unpacking! " "Declared: {:08x}, Calculated: {:08x}".format(filename, self.zip.getinfo(filename).CRC, self.files_crc32[filename])) return buffer
[ "def", "_get_crc32", "(", "self", ",", "filename", ")", ":", "buffer", "=", "self", ".", "zip", ".", "read", "(", "filename", ")", "if", "filename", "not", "in", "self", ".", "files_crc32", ":", "self", ".", "files_crc32", "[", "filename", "]", "=", ...
Calculates and compares the CRC32 and returns the raw buffer. The CRC32 is added to `files_crc32` dictionary, if not present. :param filename: filename inside the zipfile :rtype: bytes
[ "Calculates", "and", "compares", "the", "CRC32", "and", "returns", "the", "raw", "buffer", "." ]
python
train
jobovy/galpy
galpy/potential/Potential.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/potential/Potential.py#L2872-L2900
def turn_physical_off(Pot): """ NAME: turn_physical_off PURPOSE: turn off automatic returning of outputs in physical units INPUT: (none) OUTPUT: (none) HISTORY: 2016-01-30 - Written - Bovy (UofT) """ if isinstance(Pot,list): for pot in Pot: turn_physical_off(pot) else: Pot.turn_physical_off() return None
[ "def", "turn_physical_off", "(", "Pot", ")", ":", "if", "isinstance", "(", "Pot", ",", "list", ")", ":", "for", "pot", "in", "Pot", ":", "turn_physical_off", "(", "pot", ")", "else", ":", "Pot", ".", "turn_physical_off", "(", ")", "return", "None" ]
NAME: turn_physical_off PURPOSE: turn off automatic returning of outputs in physical units INPUT: (none) OUTPUT: (none) HISTORY: 2016-01-30 - Written - Bovy (UofT)
[ "NAME", ":", "turn_physical_off" ]
python
train
python-diamond/Diamond
src/collectors/processresources/processresources.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/processresources/processresources.py#L99-L125
def process_config(self): super(ProcessResourcesCollector, self).process_config() """ prepare self.processes, which is a descriptor dictionary in pg_name: { exe: [regex], name: [regex], cmdline: [regex], selfmon: [boolean], procs: [psutil.Process], count_workers: [boolean] } """ self.processes = {} self.processes_info = {} for pg_name, cfg in self.config['process'].items(): pg_cfg = {} for key in ('exe', 'name', 'cmdline'): pg_cfg[key] = cfg.get(key, []) if not isinstance(pg_cfg[key], list): pg_cfg[key] = [pg_cfg[key]] pg_cfg[key] = [re.compile(e) for e in pg_cfg[key]] pg_cfg['selfmon'] = cfg.get('selfmon', '').lower() == 'true' pg_cfg['count_workers'] = cfg.get( 'count_workers', '').lower() == 'true' self.processes[pg_name] = pg_cfg self.processes_info[pg_name] = {}
[ "def", "process_config", "(", "self", ")", ":", "super", "(", "ProcessResourcesCollector", ",", "self", ")", ".", "process_config", "(", ")", "self", ".", "processes", "=", "{", "}", "self", ".", "processes_info", "=", "{", "}", "for", "pg_name", ",", "c...
prepare self.processes, which is a descriptor dictionary in pg_name: { exe: [regex], name: [regex], cmdline: [regex], selfmon: [boolean], procs: [psutil.Process], count_workers: [boolean] }
[ "prepare", "self", ".", "processes", "which", "is", "a", "descriptor", "dictionary", "in", "pg_name", ":", "{", "exe", ":", "[", "regex", "]", "name", ":", "[", "regex", "]", "cmdline", ":", "[", "regex", "]", "selfmon", ":", "[", "boolean", "]", "pr...
python
train
CalebBell/fluids
fluids/nrlmsise00/nrlmsise_00.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/nrlmsise00/nrlmsise_00.py#L165-L184
def ccor2(alt, r, h1, zh, h2): ''' /* CHEMISTRY/DISSOCIATION CORRECTION FOR MSIS MODELS * ALT - altitude * R - target ratio * H1 - transition scale length * ZH - altitude of 1/2 R * H2 - transition scale length #2 ? */ ''' e1 = (alt - zh) / h1; e2 = (alt - zh) / h2; if ((e1 > 70.0) or (e2 > 70)): # pragma: no cover return 1.0 # exp(0) if ((e1 < -70) and (e2 < -70)): # pragma: no cover return exp(r) ex1 = exp(e1); ex2 = exp(e2); ccor2v = r / (1.0 + 0.5 * (ex1 + ex2)); return exp(ccor2v);
[ "def", "ccor2", "(", "alt", ",", "r", ",", "h1", ",", "zh", ",", "h2", ")", ":", "e1", "=", "(", "alt", "-", "zh", ")", "/", "h1", "e2", "=", "(", "alt", "-", "zh", ")", "/", "h2", "if", "(", "(", "e1", ">", "70.0", ")", "or", "(", "e...
/* CHEMISTRY/DISSOCIATION CORRECTION FOR MSIS MODELS * ALT - altitude * R - target ratio * H1 - transition scale length * ZH - altitude of 1/2 R * H2 - transition scale length #2 ? */
[ "/", "*", "CHEMISTRY", "/", "DISSOCIATION", "CORRECTION", "FOR", "MSIS", "MODELS", "*", "ALT", "-", "altitude", "*", "R", "-", "target", "ratio", "*", "H1", "-", "transition", "scale", "length", "*", "ZH", "-", "altitude", "of", "1", "/", "2", "R", "...
python
train
earlye/nephele
nephele/AwsRoot.py
https://github.com/earlye/nephele/blob/a7dadc68f4124671457f09119419978c4d22013e/nephele/AwsRoot.py#L114-L124
def do_stack_resource(self, args): """Use specified stack resource. stack_resource -h for detailed help.""" parser = CommandArgumentParser() parser.add_argument('-s','--stack-name',dest='stack-name',help='name of the stack resource'); parser.add_argument('-i','--logical-id',dest='logical-id',help='logical id of the child resource'); args = vars(parser.parse_args(args)) stackName = args['stack-name'] logicalId = args['logical-id'] self.stackResource(stackName,logicalId)
[ "def", "do_stack_resource", "(", "self", ",", "args", ")", ":", "parser", "=", "CommandArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "'--stack-name'", ",", "dest", "=", "'stack-name'", ",", "help", "=", "'name of the stack resource'...
Use specified stack resource. stack_resource -h for detailed help.
[ "Use", "specified", "stack", "resource", ".", "stack_resource", "-", "h", "for", "detailed", "help", "." ]
python
train
openstack/networking-cisco
networking_cisco/plugins/cisco/cfg_agent/device_drivers/iosxe/iosxe_routing_driver.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/cfg_agent/device_drivers/iosxe/iosxe_routing_driver.py#L530-L561
def caller_name(self, skip=2): """ Get a name of a caller in the format module.class.method `skip` specifies how many levels of stack to skip while getting caller name. skip=1 means "who calls me", skip=2 "who calls my caller" etc. An empty string is returned if skipped levels exceed stack height """ stack = inspect.stack() start = 0 + skip if len(stack) < start + 1: return '' parentframe = stack[start][0] name = [] module = inspect.getmodule(parentframe) # `modname` can be None when frame is executed directly in console # TODO(asr1kteam): consider using __main__ if module: name.append(module.__name__) # detect classname if 'self' in parentframe.f_locals: # I don't know any way to detect call from the object method # XXX: there seems to be no way to detect static method call, # it will be just a function call name.append(parentframe.f_locals['self'].__class__.__name__) codename = parentframe.f_code.co_name if codename != '<module>': # top level usually name.append(codename) # function or a method del parentframe return ".".join(name)
[ "def", "caller_name", "(", "self", ",", "skip", "=", "2", ")", ":", "stack", "=", "inspect", ".", "stack", "(", ")", "start", "=", "0", "+", "skip", "if", "len", "(", "stack", ")", "<", "start", "+", "1", ":", "return", "''", "parentframe", "=", ...
Get a name of a caller in the format module.class.method `skip` specifies how many levels of stack to skip while getting caller name. skip=1 means "who calls me", skip=2 "who calls my caller" etc. An empty string is returned if skipped levels exceed stack height
[ "Get", "a", "name", "of", "a", "caller", "in", "the", "format", "module", ".", "class", ".", "method" ]
python
train
tensorflow/datasets
tensorflow_datasets/text/cnn_dailymail.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/text/cnn_dailymail.py#L111-L126
def _find_files(dl_paths, publisher, url_dict): """Find files corresponding to urls.""" if publisher == 'cnn': top_dir = os.path.join(dl_paths['cnn_stories'], 'cnn', 'stories') elif publisher == 'dm': top_dir = os.path.join(dl_paths['dm_stories'], 'dailymail', 'stories') else: logging.fatal('Unsupported publisher: %s', publisher) files = tf.io.gfile.listdir(top_dir) ret_files = [] for p in files: basename = os.path.basename(p) if basename[0:basename.find('.story')] in url_dict: ret_files.append(os.path.join(top_dir, p)) return ret_files
[ "def", "_find_files", "(", "dl_paths", ",", "publisher", ",", "url_dict", ")", ":", "if", "publisher", "==", "'cnn'", ":", "top_dir", "=", "os", ".", "path", ".", "join", "(", "dl_paths", "[", "'cnn_stories'", "]", ",", "'cnn'", ",", "'stories'", ")", ...
Find files corresponding to urls.
[ "Find", "files", "corresponding", "to", "urls", "." ]
python
train
ntoll/uflash
uflash.py
https://github.com/ntoll/uflash/blob/867468d386da0aa20212b69a152ce8bfc0972366/uflash.py#L379-L452
def main(argv=None): """ Entry point for the command line tool 'uflash'. Will print help text if the optional first argument is "help". Otherwise it will ensure the optional first argument ends in ".py" (the source Python script). An optional second argument is used to reference the path to the micro:bit device. Any more arguments are ignored. Exceptions are caught and printed for the user. """ if not argv: argv = sys.argv[1:] parser = argparse.ArgumentParser(description=_HELP_TEXT) parser.add_argument('source', nargs='?', default=None) parser.add_argument('target', nargs='*', default=None) parser.add_argument('-r', '--runtime', default=None, help="Use the referenced MicroPython runtime.") parser.add_argument('-e', '--extract', action='store_true', help=("Extract python source from a hex file" " instead of creating the hex file."), ) parser.add_argument('-w', '--watch', action='store_true', help='Watch the source file for changes.') parser.add_argument('-m', '--minify', action='store_true', help='Minify the source') parser.add_argument('--version', action='version', version='%(prog)s ' + get_version()) args = parser.parse_args(argv) if args.extract: try: extract(args.source, args.target) except Exception as ex: error_message = "Error extracting {source}: {error!s}" print(error_message.format(source=args.source, error=ex), file=sys.stderr) sys.exit(1) elif args.watch: try: watch_file(args.source, flash, path_to_python=args.source, paths_to_microbits=args.target, path_to_runtime=args.runtime) except Exception as ex: error_message = "Error watching {source}: {error!s}" print(error_message.format(source=args.source, error=ex), file=sys.stderr) sys.exit(1) else: try: flash(path_to_python=args.source, paths_to_microbits=args.target, path_to_runtime=args.runtime, minify=args.minify) except Exception as ex: error_message = ( "Error flashing {source} to {target}{runtime}: {error!s}" ) source = args.source target = args.target if args.target else "microbit" if args.runtime: runtime = "with runtime {runtime}".format(runtime=args.runtime) else: runtime = "" print(error_message.format(source=source, target=target, runtime=runtime, error=ex), file=sys.stderr) sys.exit(1)
[ "def", "main", "(", "argv", "=", "None", ")", ":", "if", "not", "argv", ":", "argv", "=", "sys", ".", "argv", "[", "1", ":", "]", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "_HELP_TEXT", ")", "parser", ".", "add_argume...
Entry point for the command line tool 'uflash'. Will print help text if the optional first argument is "help". Otherwise it will ensure the optional first argument ends in ".py" (the source Python script). An optional second argument is used to reference the path to the micro:bit device. Any more arguments are ignored. Exceptions are caught and printed for the user.
[ "Entry", "point", "for", "the", "command", "line", "tool", "uflash", "." ]
python
train
blockstack/blockstack-core
blockstack/blockstackd.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1757-L1842
def rpc_put_zonefiles( self, zonefile_datas, **con_info ): """ Replicate one or more zonefiles, given as serialized strings. Only stores zone files whose zone file hashes were announced on the blockchain (i.e. not subdomain zone files) Returns {'status': True, 'saved': [0|1]'} on success ('saved' is a vector of success/failure) Returns {'error': ...} on error Takes at most 5 zonefiles """ conf = get_blockstack_opts() if not is_atlas_enabled(conf): return {'error': 'No data', 'http_status': 400} if 'zonefiles' not in conf: return {'error': 'No zonefiles directory (likely a configuration error)', 'http_status': 400} if type(zonefile_datas) != list: return {'error': 'Invalid data', 'http_status': 400} if len(zonefile_datas) > 5: return {'error': 'Too many zonefiles', 'http_status': 400} for zfd in zonefile_datas: if not check_string(zfd, max_length=((4 * RPC_MAX_ZONEFILE_LEN) / 3) + 3, pattern=OP_BASE64_EMPTY_PATTERN): return {'error': 'Invalid zone file payload (exceeds {} bytes and/or not base64-encoded)'.format(RPC_MAX_ZONEFILE_LEN)} zonefile_dir = conf.get("zonefiles", None) saved = [] for zonefile_data in zonefile_datas: # decode try: zonefile_data = base64.b64decode( zonefile_data ) except: log.debug("Invalid base64 zonefile") saved.append(0) continue if len(zonefile_data) > RPC_MAX_ZONEFILE_LEN: log.debug("Zonefile too long") saved.append(0) continue # is this zone file already discovered? zonefile_hash = get_zonefile_data_hash(str(zonefile_data)) zfinfos = atlasdb_get_zonefiles_by_hash(zonefile_hash, path=conf['atlasdb_path']) if not zfinfos: # nope log.debug("Unknown zonefile hash {}".format(zonefile_hash)) saved.append(0) continue # keep this zone file rc = store_atlas_zonefile_data( str(zonefile_data), zonefile_dir ) if not rc: log.error("Failed to store zonefile {}".format(zonefile_hash)) saved.append(0) continue # mark this zone file as present, so we don't ask anyone else for it was_present = atlasdb_set_zonefile_present(zonefile_hash, True, path=conf['atlasdb_path']) if was_present: # we already got this zone file # only process it if it's outside our recovery range recovery_start, recovery_end = get_recovery_range(self.working_dir) current_block = virtualchain_hooks.get_last_block(self.working_dir) if recovery_start is not None and recovery_end is not None and recovery_end < current_block: # no need to process log.debug("Already have zonefile {}".format(zonefile_hash)) saved.append(1) continue if self.subdomain_index: # got new zonefile # let the subdomain indexer know, along with giving it the minimum block height min_block_height = min([zfi['block_height'] for zfi in zfinfos]) log.debug("Enqueue {} from {} for subdomain processing".format(zonefile_hash, min_block_height)) self.subdomain_index.enqueue_zonefile(zonefile_hash, min_block_height) log.debug("Stored new zonefile {}".format(zonefile_hash)) saved.append(1) log.debug("Saved {} zonefile(s)".format(sum(saved))) log.debug("Reply: {}".format({'saved': saved})) return self.success_response( {'saved': saved} )
[ "def", "rpc_put_zonefiles", "(", "self", ",", "zonefile_datas", ",", "*", "*", "con_info", ")", ":", "conf", "=", "get_blockstack_opts", "(", ")", "if", "not", "is_atlas_enabled", "(", "conf", ")", ":", "return", "{", "'error'", ":", "'No data'", ",", "'ht...
Replicate one or more zonefiles, given as serialized strings. Only stores zone files whose zone file hashes were announced on the blockchain (i.e. not subdomain zone files) Returns {'status': True, 'saved': [0|1]'} on success ('saved' is a vector of success/failure) Returns {'error': ...} on error Takes at most 5 zonefiles
[ "Replicate", "one", "or", "more", "zonefiles", "given", "as", "serialized", "strings", ".", "Only", "stores", "zone", "files", "whose", "zone", "file", "hashes", "were", "announced", "on", "the", "blockchain", "(", "i", ".", "e", ".", "not", "subdomain", "...
python
train
Azure/azure-sdk-for-python
azure-cognitiveservices-search-imagesearch/azure/cognitiveservices/search/imagesearch/operations/images_operations.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-cognitiveservices-search-imagesearch/azure/cognitiveservices/search/imagesearch/operations/images_operations.py#L499-L897
def details( self, query, accept_language=None, content_type=None, user_agent=None, client_id=None, client_ip=None, location=None, crop_bottom=None, crop_left=None, crop_right=None, crop_top=None, crop_type=None, country_code=None, id=None, image_url=None, insights_token=None, modules=None, market=None, safe_search=None, set_lang=None, custom_headers=None, raw=False, **operation_config): """The Image Detail Search API lets you search on Bing and get back insights about an image, such as webpages that include the image. This section provides technical details about the query parameters and headers that you use to request insights of images and the JSON response objects that contain them. For examples that show how to make requests, see [Searching the Web for Images](https://docs.microsoft.com/azure/cognitive-services/bing-image-search/search-the-web). :param query: The user's search query term. The term cannot be empty. The term may contain [Bing Advanced Operators](http://msdn.microsoft.com/library/ff795620.aspx). For example, to limit images to a specific domain, use the [site:](http://msdn.microsoft.com/library/ff795613.aspx) operator. To help improve relevance of an insights query (see [insightsToken](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#insightstoken)), you should always include the user's query term. Use this parameter only with the Image Search API.Do not specify this parameter when calling the Trending Images API. :type query: str :param accept_language: A comma-delimited list of one or more languages to use for user interface strings. The list is in decreasing order of preference. For additional information, including expected format, see [RFC2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html). This header and the [setLang](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#setlang) query parameter are mutually exclusive; do not specify both. If you set this header, you must also specify the [cc](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#cc) query parameter. To determine the market to return results for, Bing uses the first supported language it finds from the list and combines it with the cc parameter value. If the list does not include a supported language, Bing finds the closest language and market that supports the request or it uses an aggregated or default market for the results. To determine the market that Bing used, see the BingAPIs-Market header. Use this header and the cc query parameter only if you specify multiple languages. Otherwise, use the [mkt](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#mkt) and [setLang](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#setlang) query parameters. A user interface string is a string that's used as a label in a user interface. There are few user interface strings in the JSON response objects. Any links to Bing.com properties in the response objects apply the specified language. :type accept_language: str :param content_type: Optional request header. If you set the [modules](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#modulesrequested) query parameter to RecognizedEntities, you may specify the binary of an image in the body of a POST request. If you specify the image in the body of a POST request, you must specify this header and set its value to multipart/form-data. The maximum image size is 1 MB. :type content_type: str :param user_agent: The user agent originating the request. Bing uses the user agent to provide mobile users with an optimized experience. Although optional, you are encouraged to always specify this header. The user-agent should be the same string that any commonly used browser sends. For information about user agents, see [RFC 2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html). The following are examples of user-agent strings. Windows Phone: Mozilla/5.0 (compatible; MSIE 10.0; Windows Phone 8.0; Trident/6.0; IEMobile/10.0; ARM; Touch; NOKIA; Lumia 822). Android: Mozilla / 5.0 (Linux; U; Android 2.3.5; en - us; SCH - I500 Build / GINGERBREAD) AppleWebKit / 533.1 (KHTML; like Gecko) Version / 4.0 Mobile Safari / 533.1. iPhone: Mozilla / 5.0 (iPhone; CPU iPhone OS 6_1 like Mac OS X) AppleWebKit / 536.26 (KHTML; like Gecko) Mobile / 10B142 iPhone4; 1 BingWeb / 3.03.1428.20120423. PC: Mozilla / 5.0 (Windows NT 6.3; WOW64; Trident / 7.0; Touch; rv:11.0) like Gecko. iPad: Mozilla / 5.0 (iPad; CPU OS 7_0 like Mac OS X) AppleWebKit / 537.51.1 (KHTML, like Gecko) Version / 7.0 Mobile / 11A465 Safari / 9537.53 :type user_agent: str :param client_id: Bing uses this header to provide users with consistent behavior across Bing API calls. Bing often flights new features and improvements, and it uses the client ID as a key for assigning traffic on different flights. If you do not use the same client ID for a user across multiple requests, then Bing may assign the user to multiple conflicting flights. Being assigned to multiple conflicting flights can lead to an inconsistent user experience. For example, if the second request has a different flight assignment than the first, the experience may be unexpected. Also, Bing can use the client ID to tailor web results to that client ID’s search history, providing a richer experience for the user. Bing also uses this header to help improve result rankings by analyzing the activity generated by a client ID. The relevance improvements help with better quality of results delivered by Bing APIs and in turn enables higher click-through rates for the API consumer. IMPORTANT: Although optional, you should consider this header required. Persisting the client ID across multiple requests for the same end user and device combination enables 1) the API consumer to receive a consistent user experience, and 2) higher click-through rates via better quality of results from the Bing APIs. Each user that uses your application on the device must have a unique, Bing generated client ID. If you do not include this header in the request, Bing generates an ID and returns it in the X-MSEdge-ClientID response header. The only time that you should NOT include this header in a request is the first time the user uses your app on that device. Use the client ID for each Bing API request that your app makes for this user on the device. Persist the client ID. To persist the ID in a browser app, use a persistent HTTP cookie to ensure the ID is used across all sessions. Do not use a session cookie. For other apps such as mobile apps, use the device's persistent storage to persist the ID. The next time the user uses your app on that device, get the client ID that you persisted. Bing responses may or may not include this header. If the response includes this header, capture the client ID and use it for all subsequent Bing requests for the user on that device. If you include the X-MSEdge-ClientID, you must not include cookies in the request. :type client_id: str :param client_ip: The IPv4 or IPv6 address of the client device. The IP address is used to discover the user's location. Bing uses the location information to determine safe search behavior. Although optional, you are encouraged to always specify this header and the X-Search-Location header. Do not obfuscate the address (for example, by changing the last octet to 0). Obfuscating the address results in the location not being anywhere near the device's actual location, which may result in Bing serving erroneous results. :type client_ip: str :param location: A semicolon-delimited list of key/value pairs that describe the client's geographical location. Bing uses the location information to determine safe search behavior and to return relevant local content. Specify the key/value pair as <key>:<value>. The following are the keys that you use to specify the user's location. lat (required): The latitude of the client's location, in degrees. The latitude must be greater than or equal to -90.0 and less than or equal to +90.0. Negative values indicate southern latitudes and positive values indicate northern latitudes. long (required): The longitude of the client's location, in degrees. The longitude must be greater than or equal to -180.0 and less than or equal to +180.0. Negative values indicate western longitudes and positive values indicate eastern longitudes. re (required): The radius, in meters, which specifies the horizontal accuracy of the coordinates. Pass the value returned by the device's location service. Typical values might be 22m for GPS/Wi-Fi, 380m for cell tower triangulation, and 18,000m for reverse IP lookup. ts (optional): The UTC UNIX timestamp of when the client was at the location. (The UNIX timestamp is the number of seconds since January 1, 1970.) head (optional): The client's relative heading or direction of travel. Specify the direction of travel as degrees from 0 through 360, counting clockwise relative to true north. Specify this key only if the sp key is nonzero. sp (optional): The horizontal velocity (speed), in meters per second, that the client device is traveling. alt (optional): The altitude of the client device, in meters. are (optional): The radius, in meters, that specifies the vertical accuracy of the coordinates. Specify this key only if you specify the alt key. Although many of the keys are optional, the more information that you provide, the more accurate the location results are. Although optional, you are encouraged to always specify the user's geographical location. Providing the location is especially important if the client's IP address does not accurately reflect the user's physical location (for example, if the client uses VPN). For optimal results, you should include this header and the X-MSEdge-ClientIP header, but at a minimum, you should include this header. :type location: str :param crop_bottom: The bottom coordinate of the region to crop. The coordinate is a fractional value of the original image's height and is measured from the top, left corner of the image. Specify the coordinate as a value from 0.0 through 1.0. Use this parameter only with the Insights API. Do not specify this parameter when calling the Images, Trending Images, or Web Search APIs. :type crop_bottom: float :param crop_left: The left coordinate of the region to crop. The coordinate is a fractional value of the original image's height and is measured from the top, left corner of the image. Specify the coordinate as a value from 0.0 through 1.0. Use this parameter only with the Insights API. Do not specify this parameter when calling the Images, Trending Images, or Web Search APIs. :type crop_left: float :param crop_right: The right coordinate of the region to crop. The coordinate is a fractional value of the original image's height and is measured from the top, left corner of the image. Specify the coordinate as a value from 0.0 through 1.0. Use this parameter only with the Insights API. Do not specify this parameter when calling the Images, Trending Images, or Web Search APIs. :type crop_right: float :param crop_top: The top coordinate of the region to crop. The coordinate is a fractional value of the original image's height and is measured from the top, left corner of the image. Specify the coordinate as a value from 0.0 through 1.0. Use this parameter only with the Insights API. Do not specify this parameter when calling the Images, Trending Images, or Web Search APIs. :type crop_top: float :param crop_type: The crop type to use when cropping the image based on the coordinates specified in the cal, cat, car, and cab parameters. The following are the possible values. 0: Rectangular (default). Use this parameter only with the Insights API. Do not specify this parameter when calling the Images, Trending Images, or Web Search APIs. Possible values include: 'Rectangular' :type crop_type: str or ~azure.cognitiveservices.search.imagesearch.models.ImageCropType :param country_code: A 2-character country code of the country where the results come from. For a list of possible values, see [Market Codes](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#market-codes). If you set this parameter, you must also specify the [Accept-Language](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#acceptlanguage) header. Bing uses the first supported language it finds from the languages list, and combine that language with the country code that you specify to determine the market to return results for. If the languages list does not include a supported language, Bing finds the closest language and market that supports the request, or it may use an aggregated or default market for the results instead of a specified one. You should use this query parameter and the Accept-Language query parameter only if you specify multiple languages; otherwise, you should use the mkt and setLang query parameters. This parameter and the [mkt](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#mkt) query parameter are mutually exclusive—do not specify both. :type country_code: str :param id: An ID that uniquely identifies an image. Use this parameter to ensure that the specified image is the first image in the list of images that Bing returns. The [Image](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#image) object's imageId field contains the ID that you set this parameter to. :type id: str :param image_url: The URL of an image that you want to get insights of. Use this parameter as an alternative to using the insightsToken parameter to specify the image. You may also specify the image by placing the binary of the image in the body of a POST request. If you use the binary option, see the [Content-Type](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#contenttype) header. The maximum supported image size is 1 MB. Use this parameter only with the Insights API. Do not specify this parameter when calling the Images, Trending Images, or Web Search APIs. :type image_url: str :param insights_token: An image token. The [Image](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#image) object's [imageInsightsToken](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#image-imageinsightstoken) contains the token. Specify this parameter to get additional information about an image, such as a caption or shopping source. For a list of the additional information about an image that you can get, see the [modules](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#modulesrequested) query parameter. Use this parameter only with the Insights API. Do not specify this parameter when calling the Images, Trending Images, or Web Search APIs. :type insights_token: str :param modules: A comma-delimited list of insights to request. The following are the possible case-insensitive values. All: Return all insights, if available, except RecognizedEntities. BRQ: Best representative query. The query term that best describes the image. Caption: A caption that provides information about the image. If the caption contains entities, the response may include links to images of those entities. Collections: A list of related images. Recipes: A list of recipes for cooking the food shown in the images. PagesIncluding: A list of webpages that include the image. RecognizedEntities: A list of entities (people) that were recognized in the image. NOTE: You may not specify this module with any other module. If you specify it with other modules, the response doesn't include recognized entities. RelatedSearches: A list of related searches made by others. ShoppingSources: A list of merchants where you can buy related offerings. SimilarImages: A list of images that are visually similar to the original image. SimilarProducts: A list of images that contain a product that is similar to a product found in the original image. Tags: Provides characteristics of the type of content found in the image. For example, if the image is of a person, the tags might indicate the person's gender and type of clothes they're wearing. If you specify a module and there is no data for the module, the response object doesn't include the related field. For example, if you specify Caption and it does not exist, the response doesn't include the imageCaption field. To include related searches, the request must include the original query string. Although the original query string is not required for similar images or products, you should always include it because it can help improve relevance and the results. Use this parameter only with the Insights API. Do not specify this parameter when calling the Images, Trending Images, or Web Search APIs. :type modules: list[str or ~azure.cognitiveservices.search.imagesearch.models.ImageInsightModule] :param market: The market where the results come from. Typically, mkt is the country where the user is making the request from. However, it could be a different country if the user is not located in a country where Bing delivers results. The market must be in the form <language code>-<country code>. For example, en-US. The string is case insensitive. For a list of possible market values, see [Market Codes](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#market-codes). NOTE: If known, you are encouraged to always specify the market. Specifying the market helps Bing route the request and return an appropriate and optimal response. If you specify a market that is not listed in [Market Codes](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#market-codes), Bing uses a best fit market code based on an internal mapping that is subject to change. This parameter and the [cc](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#cc) query parameter are mutually exclusive—do not specify both. :type market: str :param safe_search: Filter images for adult content. The following are the possible filter values. Off: May return images with adult content. If the request is through the Image Search API, the response includes thumbnail images that are clear (non-fuzzy). However, if the request is through the Web Search API, the response includes thumbnail images that are pixelated (fuzzy). Moderate: If the request is through the Image Search API, the response doesn't include images with adult content. If the request is through the Web Search API, the response may include images with adult content (the thumbnail images are pixelated (fuzzy)). Strict: Do not return images with adult content. The default is Moderate. If the request comes from a market that Bing's adult policy requires that safeSearch is set to Strict, Bing ignores the safeSearch value and uses Strict. If you use the site: query operator, there is the chance that the response may contain adult content regardless of what the safeSearch query parameter is set to. Use site: only if you are aware of the content on the site and your scenario supports the possibility of adult content. Possible values include: 'Off', 'Moderate', 'Strict' :type safe_search: str or ~azure.cognitiveservices.search.imagesearch.models.SafeSearch :param set_lang: The language to use for user interface strings. Specify the language using the ISO 639-1 2-letter language code. For example, the language code for English is EN. The default is EN (English). Although optional, you should always specify the language. Typically, you set setLang to the same language specified by mkt unless the user wants the user interface strings displayed in a different language. This parameter and the [Accept-Language](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#acceptlanguage) header are mutually exclusive; do not specify both. A user interface string is a string that's used as a label in a user interface. There are few user interface strings in the JSON response objects. Also, any links to Bing.com properties in the response objects apply the specified language. :type set_lang: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: ImageInsights or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.search.imagesearch.models.ImageInsights or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.cognitiveservices.search.imagesearch.models.ErrorResponseException>` """ # Construct URL url = self.details.metadata['url'] # Construct parameters query_parameters = {} if crop_bottom is not None: query_parameters['cab'] = self._serialize.query("crop_bottom", crop_bottom, 'float') if crop_left is not None: query_parameters['cal'] = self._serialize.query("crop_left", crop_left, 'float') if crop_right is not None: query_parameters['car'] = self._serialize.query("crop_right", crop_right, 'float') if crop_top is not None: query_parameters['cat'] = self._serialize.query("crop_top", crop_top, 'float') if crop_type is not None: query_parameters['ct'] = self._serialize.query("crop_type", crop_type, 'str') if country_code is not None: query_parameters['cc'] = self._serialize.query("country_code", country_code, 'str') if id is not None: query_parameters['id'] = self._serialize.query("id", id, 'str') if image_url is not None: query_parameters['imgUrl'] = self._serialize.query("image_url", image_url, 'str') if insights_token is not None: query_parameters['insightsToken'] = self._serialize.query("insights_token", insights_token, 'str') if modules is not None: query_parameters['modules'] = self._serialize.query("modules", modules, '[str]', div=',') if market is not None: query_parameters['mkt'] = self._serialize.query("market", market, 'str') query_parameters['q'] = self._serialize.query("query", query, 'str') if safe_search is not None: query_parameters['safeSearch'] = self._serialize.query("safe_search", safe_search, 'str') if set_lang is not None: query_parameters['setLang'] = self._serialize.query("set_lang", set_lang, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if custom_headers: header_parameters.update(custom_headers) header_parameters['X-BingApis-SDK'] = self._serialize.header("self.x_bing_apis_sdk", self.x_bing_apis_sdk, 'str') if accept_language is not None: header_parameters['Accept-Language'] = self._serialize.header("accept_language", accept_language, 'str') if content_type is not None: header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') if user_agent is not None: header_parameters['User-Agent'] = self._serialize.header("user_agent", user_agent, 'str') if client_id is not None: header_parameters['X-MSEdge-ClientID'] = self._serialize.header("client_id", client_id, 'str') if client_ip is not None: header_parameters['X-MSEdge-ClientIP'] = self._serialize.header("client_ip", client_ip, 'str') if location is not None: header_parameters['X-Search-Location'] = self._serialize.header("location", location, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('ImageInsights', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized
[ "def", "details", "(", "self", ",", "query", ",", "accept_language", "=", "None", ",", "content_type", "=", "None", ",", "user_agent", "=", "None", ",", "client_id", "=", "None", ",", "client_ip", "=", "None", ",", "location", "=", "None", ",", "crop_bot...
The Image Detail Search API lets you search on Bing and get back insights about an image, such as webpages that include the image. This section provides technical details about the query parameters and headers that you use to request insights of images and the JSON response objects that contain them. For examples that show how to make requests, see [Searching the Web for Images](https://docs.microsoft.com/azure/cognitive-services/bing-image-search/search-the-web). :param query: The user's search query term. The term cannot be empty. The term may contain [Bing Advanced Operators](http://msdn.microsoft.com/library/ff795620.aspx). For example, to limit images to a specific domain, use the [site:](http://msdn.microsoft.com/library/ff795613.aspx) operator. To help improve relevance of an insights query (see [insightsToken](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#insightstoken)), you should always include the user's query term. Use this parameter only with the Image Search API.Do not specify this parameter when calling the Trending Images API. :type query: str :param accept_language: A comma-delimited list of one or more languages to use for user interface strings. The list is in decreasing order of preference. For additional information, including expected format, see [RFC2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html). This header and the [setLang](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#setlang) query parameter are mutually exclusive; do not specify both. If you set this header, you must also specify the [cc](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#cc) query parameter. To determine the market to return results for, Bing uses the first supported language it finds from the list and combines it with the cc parameter value. If the list does not include a supported language, Bing finds the closest language and market that supports the request or it uses an aggregated or default market for the results. To determine the market that Bing used, see the BingAPIs-Market header. Use this header and the cc query parameter only if you specify multiple languages. Otherwise, use the [mkt](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#mkt) and [setLang](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#setlang) query parameters. A user interface string is a string that's used as a label in a user interface. There are few user interface strings in the JSON response objects. Any links to Bing.com properties in the response objects apply the specified language. :type accept_language: str :param content_type: Optional request header. If you set the [modules](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#modulesrequested) query parameter to RecognizedEntities, you may specify the binary of an image in the body of a POST request. If you specify the image in the body of a POST request, you must specify this header and set its value to multipart/form-data. The maximum image size is 1 MB. :type content_type: str :param user_agent: The user agent originating the request. Bing uses the user agent to provide mobile users with an optimized experience. Although optional, you are encouraged to always specify this header. The user-agent should be the same string that any commonly used browser sends. For information about user agents, see [RFC 2616](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html). The following are examples of user-agent strings. Windows Phone: Mozilla/5.0 (compatible; MSIE 10.0; Windows Phone 8.0; Trident/6.0; IEMobile/10.0; ARM; Touch; NOKIA; Lumia 822). Android: Mozilla / 5.0 (Linux; U; Android 2.3.5; en - us; SCH - I500 Build / GINGERBREAD) AppleWebKit / 533.1 (KHTML; like Gecko) Version / 4.0 Mobile Safari / 533.1. iPhone: Mozilla / 5.0 (iPhone; CPU iPhone OS 6_1 like Mac OS X) AppleWebKit / 536.26 (KHTML; like Gecko) Mobile / 10B142 iPhone4; 1 BingWeb / 3.03.1428.20120423. PC: Mozilla / 5.0 (Windows NT 6.3; WOW64; Trident / 7.0; Touch; rv:11.0) like Gecko. iPad: Mozilla / 5.0 (iPad; CPU OS 7_0 like Mac OS X) AppleWebKit / 537.51.1 (KHTML, like Gecko) Version / 7.0 Mobile / 11A465 Safari / 9537.53 :type user_agent: str :param client_id: Bing uses this header to provide users with consistent behavior across Bing API calls. Bing often flights new features and improvements, and it uses the client ID as a key for assigning traffic on different flights. If you do not use the same client ID for a user across multiple requests, then Bing may assign the user to multiple conflicting flights. Being assigned to multiple conflicting flights can lead to an inconsistent user experience. For example, if the second request has a different flight assignment than the first, the experience may be unexpected. Also, Bing can use the client ID to tailor web results to that client ID’s search history, providing a richer experience for the user. Bing also uses this header to help improve result rankings by analyzing the activity generated by a client ID. The relevance improvements help with better quality of results delivered by Bing APIs and in turn enables higher click-through rates for the API consumer. IMPORTANT: Although optional, you should consider this header required. Persisting the client ID across multiple requests for the same end user and device combination enables 1) the API consumer to receive a consistent user experience, and 2) higher click-through rates via better quality of results from the Bing APIs. Each user that uses your application on the device must have a unique, Bing generated client ID. If you do not include this header in the request, Bing generates an ID and returns it in the X-MSEdge-ClientID response header. The only time that you should NOT include this header in a request is the first time the user uses your app on that device. Use the client ID for each Bing API request that your app makes for this user on the device. Persist the client ID. To persist the ID in a browser app, use a persistent HTTP cookie to ensure the ID is used across all sessions. Do not use a session cookie. For other apps such as mobile apps, use the device's persistent storage to persist the ID. The next time the user uses your app on that device, get the client ID that you persisted. Bing responses may or may not include this header. If the response includes this header, capture the client ID and use it for all subsequent Bing requests for the user on that device. If you include the X-MSEdge-ClientID, you must not include cookies in the request. :type client_id: str :param client_ip: The IPv4 or IPv6 address of the client device. The IP address is used to discover the user's location. Bing uses the location information to determine safe search behavior. Although optional, you are encouraged to always specify this header and the X-Search-Location header. Do not obfuscate the address (for example, by changing the last octet to 0). Obfuscating the address results in the location not being anywhere near the device's actual location, which may result in Bing serving erroneous results. :type client_ip: str :param location: A semicolon-delimited list of key/value pairs that describe the client's geographical location. Bing uses the location information to determine safe search behavior and to return relevant local content. Specify the key/value pair as <key>:<value>. The following are the keys that you use to specify the user's location. lat (required): The latitude of the client's location, in degrees. The latitude must be greater than or equal to -90.0 and less than or equal to +90.0. Negative values indicate southern latitudes and positive values indicate northern latitudes. long (required): The longitude of the client's location, in degrees. The longitude must be greater than or equal to -180.0 and less than or equal to +180.0. Negative values indicate western longitudes and positive values indicate eastern longitudes. re (required): The radius, in meters, which specifies the horizontal accuracy of the coordinates. Pass the value returned by the device's location service. Typical values might be 22m for GPS/Wi-Fi, 380m for cell tower triangulation, and 18,000m for reverse IP lookup. ts (optional): The UTC UNIX timestamp of when the client was at the location. (The UNIX timestamp is the number of seconds since January 1, 1970.) head (optional): The client's relative heading or direction of travel. Specify the direction of travel as degrees from 0 through 360, counting clockwise relative to true north. Specify this key only if the sp key is nonzero. sp (optional): The horizontal velocity (speed), in meters per second, that the client device is traveling. alt (optional): The altitude of the client device, in meters. are (optional): The radius, in meters, that specifies the vertical accuracy of the coordinates. Specify this key only if you specify the alt key. Although many of the keys are optional, the more information that you provide, the more accurate the location results are. Although optional, you are encouraged to always specify the user's geographical location. Providing the location is especially important if the client's IP address does not accurately reflect the user's physical location (for example, if the client uses VPN). For optimal results, you should include this header and the X-MSEdge-ClientIP header, but at a minimum, you should include this header. :type location: str :param crop_bottom: The bottom coordinate of the region to crop. The coordinate is a fractional value of the original image's height and is measured from the top, left corner of the image. Specify the coordinate as a value from 0.0 through 1.0. Use this parameter only with the Insights API. Do not specify this parameter when calling the Images, Trending Images, or Web Search APIs. :type crop_bottom: float :param crop_left: The left coordinate of the region to crop. The coordinate is a fractional value of the original image's height and is measured from the top, left corner of the image. Specify the coordinate as a value from 0.0 through 1.0. Use this parameter only with the Insights API. Do not specify this parameter when calling the Images, Trending Images, or Web Search APIs. :type crop_left: float :param crop_right: The right coordinate of the region to crop. The coordinate is a fractional value of the original image's height and is measured from the top, left corner of the image. Specify the coordinate as a value from 0.0 through 1.0. Use this parameter only with the Insights API. Do not specify this parameter when calling the Images, Trending Images, or Web Search APIs. :type crop_right: float :param crop_top: The top coordinate of the region to crop. The coordinate is a fractional value of the original image's height and is measured from the top, left corner of the image. Specify the coordinate as a value from 0.0 through 1.0. Use this parameter only with the Insights API. Do not specify this parameter when calling the Images, Trending Images, or Web Search APIs. :type crop_top: float :param crop_type: The crop type to use when cropping the image based on the coordinates specified in the cal, cat, car, and cab parameters. The following are the possible values. 0: Rectangular (default). Use this parameter only with the Insights API. Do not specify this parameter when calling the Images, Trending Images, or Web Search APIs. Possible values include: 'Rectangular' :type crop_type: str or ~azure.cognitiveservices.search.imagesearch.models.ImageCropType :param country_code: A 2-character country code of the country where the results come from. For a list of possible values, see [Market Codes](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#market-codes). If you set this parameter, you must also specify the [Accept-Language](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#acceptlanguage) header. Bing uses the first supported language it finds from the languages list, and combine that language with the country code that you specify to determine the market to return results for. If the languages list does not include a supported language, Bing finds the closest language and market that supports the request, or it may use an aggregated or default market for the results instead of a specified one. You should use this query parameter and the Accept-Language query parameter only if you specify multiple languages; otherwise, you should use the mkt and setLang query parameters. This parameter and the [mkt](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#mkt) query parameter are mutually exclusive—do not specify both. :type country_code: str :param id: An ID that uniquely identifies an image. Use this parameter to ensure that the specified image is the first image in the list of images that Bing returns. The [Image](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#image) object's imageId field contains the ID that you set this parameter to. :type id: str :param image_url: The URL of an image that you want to get insights of. Use this parameter as an alternative to using the insightsToken parameter to specify the image. You may also specify the image by placing the binary of the image in the body of a POST request. If you use the binary option, see the [Content-Type](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#contenttype) header. The maximum supported image size is 1 MB. Use this parameter only with the Insights API. Do not specify this parameter when calling the Images, Trending Images, or Web Search APIs. :type image_url: str :param insights_token: An image token. The [Image](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#image) object's [imageInsightsToken](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#image-imageinsightstoken) contains the token. Specify this parameter to get additional information about an image, such as a caption or shopping source. For a list of the additional information about an image that you can get, see the [modules](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#modulesrequested) query parameter. Use this parameter only with the Insights API. Do not specify this parameter when calling the Images, Trending Images, or Web Search APIs. :type insights_token: str :param modules: A comma-delimited list of insights to request. The following are the possible case-insensitive values. All: Return all insights, if available, except RecognizedEntities. BRQ: Best representative query. The query term that best describes the image. Caption: A caption that provides information about the image. If the caption contains entities, the response may include links to images of those entities. Collections: A list of related images. Recipes: A list of recipes for cooking the food shown in the images. PagesIncluding: A list of webpages that include the image. RecognizedEntities: A list of entities (people) that were recognized in the image. NOTE: You may not specify this module with any other module. If you specify it with other modules, the response doesn't include recognized entities. RelatedSearches: A list of related searches made by others. ShoppingSources: A list of merchants where you can buy related offerings. SimilarImages: A list of images that are visually similar to the original image. SimilarProducts: A list of images that contain a product that is similar to a product found in the original image. Tags: Provides characteristics of the type of content found in the image. For example, if the image is of a person, the tags might indicate the person's gender and type of clothes they're wearing. If you specify a module and there is no data for the module, the response object doesn't include the related field. For example, if you specify Caption and it does not exist, the response doesn't include the imageCaption field. To include related searches, the request must include the original query string. Although the original query string is not required for similar images or products, you should always include it because it can help improve relevance and the results. Use this parameter only with the Insights API. Do not specify this parameter when calling the Images, Trending Images, or Web Search APIs. :type modules: list[str or ~azure.cognitiveservices.search.imagesearch.models.ImageInsightModule] :param market: The market where the results come from. Typically, mkt is the country where the user is making the request from. However, it could be a different country if the user is not located in a country where Bing delivers results. The market must be in the form <language code>-<country code>. For example, en-US. The string is case insensitive. For a list of possible market values, see [Market Codes](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#market-codes). NOTE: If known, you are encouraged to always specify the market. Specifying the market helps Bing route the request and return an appropriate and optimal response. If you specify a market that is not listed in [Market Codes](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#market-codes), Bing uses a best fit market code based on an internal mapping that is subject to change. This parameter and the [cc](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#cc) query parameter are mutually exclusive—do not specify both. :type market: str :param safe_search: Filter images for adult content. The following are the possible filter values. Off: May return images with adult content. If the request is through the Image Search API, the response includes thumbnail images that are clear (non-fuzzy). However, if the request is through the Web Search API, the response includes thumbnail images that are pixelated (fuzzy). Moderate: If the request is through the Image Search API, the response doesn't include images with adult content. If the request is through the Web Search API, the response may include images with adult content (the thumbnail images are pixelated (fuzzy)). Strict: Do not return images with adult content. The default is Moderate. If the request comes from a market that Bing's adult policy requires that safeSearch is set to Strict, Bing ignores the safeSearch value and uses Strict. If you use the site: query operator, there is the chance that the response may contain adult content regardless of what the safeSearch query parameter is set to. Use site: only if you are aware of the content on the site and your scenario supports the possibility of adult content. Possible values include: 'Off', 'Moderate', 'Strict' :type safe_search: str or ~azure.cognitiveservices.search.imagesearch.models.SafeSearch :param set_lang: The language to use for user interface strings. Specify the language using the ISO 639-1 2-letter language code. For example, the language code for English is EN. The default is EN (English). Although optional, you should always specify the language. Typically, you set setLang to the same language specified by mkt unless the user wants the user interface strings displayed in a different language. This parameter and the [Accept-Language](https://docs.microsoft.com/en-us/rest/api/cognitiveservices/bing-images-api-v7-reference#acceptlanguage) header are mutually exclusive; do not specify both. A user interface string is a string that's used as a label in a user interface. There are few user interface strings in the JSON response objects. Also, any links to Bing.com properties in the response objects apply the specified language. :type set_lang: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: ImageInsights or ClientRawResponse if raw=true :rtype: ~azure.cognitiveservices.search.imagesearch.models.ImageInsights or ~msrest.pipeline.ClientRawResponse :raises: :class:`ErrorResponseException<azure.cognitiveservices.search.imagesearch.models.ErrorResponseException>`
[ "The", "Image", "Detail", "Search", "API", "lets", "you", "search", "on", "Bing", "and", "get", "back", "insights", "about", "an", "image", "such", "as", "webpages", "that", "include", "the", "image", ".", "This", "section", "provides", "technical", "details...
python
test
iotile/coretools
iotilecore/iotile/core/dev/registry.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/dev/registry.py#L390-L408
def SetBackingStore(cls, backing): """Set the global backing type used by the ComponentRegistry from this point forward This function must be called before any operations that use the registry are initiated otherwise they will work from different registries that will likely contain different data """ if backing not in ['json', 'sqlite', 'memory']: raise ArgumentError("Unknown backing store type that is not json or sqlite", backing=backing) if backing == 'json': cls.BackingType = JSONKVStore cls.BackingFileName = 'component_registry.json' elif backing == 'memory': cls.BackingType = InMemoryKVStore cls.BackingFileName = None else: cls.BackingType = SQLiteKVStore cls.BackingFileName = 'component_registry.db'
[ "def", "SetBackingStore", "(", "cls", ",", "backing", ")", ":", "if", "backing", "not", "in", "[", "'json'", ",", "'sqlite'", ",", "'memory'", "]", ":", "raise", "ArgumentError", "(", "\"Unknown backing store type that is not json or sqlite\"", ",", "backing", "="...
Set the global backing type used by the ComponentRegistry from this point forward This function must be called before any operations that use the registry are initiated otherwise they will work from different registries that will likely contain different data
[ "Set", "the", "global", "backing", "type", "used", "by", "the", "ComponentRegistry", "from", "this", "point", "forward" ]
python
train
benmoran56/esper
esper.py
https://github.com/benmoran56/esper/blob/5b6cd0c51718d5dcfa0e5613f824b5251cf092ac/esper.py#L259-L274
def try_component(self, entity: int, component_type: Type): """Try to get a single component type for an Entity. This method will return the requested Component if it exists, but will pass silently if it does not. This allows a way to access optional Components that may or may not exist. :param entity: The Entity ID to retrieve the Component for. :param component_type: The Component instance you wish to retrieve. :return: A iterator containg the single Component instance requested, which is empty if the component doesn't exist. """ if component_type in self._entities[entity]: yield self._entities[entity][component_type] else: return None
[ "def", "try_component", "(", "self", ",", "entity", ":", "int", ",", "component_type", ":", "Type", ")", ":", "if", "component_type", "in", "self", ".", "_entities", "[", "entity", "]", ":", "yield", "self", ".", "_entities", "[", "entity", "]", "[", "...
Try to get a single component type for an Entity. This method will return the requested Component if it exists, but will pass silently if it does not. This allows a way to access optional Components that may or may not exist. :param entity: The Entity ID to retrieve the Component for. :param component_type: The Component instance you wish to retrieve. :return: A iterator containg the single Component instance requested, which is empty if the component doesn't exist.
[ "Try", "to", "get", "a", "single", "component", "type", "for", "an", "Entity", ".", "This", "method", "will", "return", "the", "requested", "Component", "if", "it", "exists", "but", "will", "pass", "silently", "if", "it", "does", "not", ".", "This", "all...
python
train
razor-x/scipy-data_fitting
scipy_data_fitting/data.py
https://github.com/razor-x/scipy-data_fitting/blob/c756a645da8629699b3f22244bfb7d5d4d88b179/scipy_data_fitting/data.py#L253-L294
def load_error(self): """ Loads error values from `scipy_data_fitting.Data.path` using [`numpy.genfromtxt`][1] and returns a two element tuple where each element is of a form described by cases 3 and 4 in `scipy_data_fitting.Data.error`. The columns to import are set by `scipy_data_fitting.Data.error_columns`. Values are scaled according to `scipy_data_fitting.Data.scale`. Arguments to [`numpy.genfromtxt`][1] are controlled by `scipy_data_fitting.Data.genfromtxt_args_error`. [1]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.html """ usecols = [] for v in self.error_columns: if v is None: pass elif isinstance(v, int): usecols.append(v) elif len(v) is 2: for n in v: usecols.append(n) self.genfromtxt_args_error['usecols'] = tuple(usecols) array = numpy.genfromtxt(self.path, **self.genfromtxt_args_error) error = [] for n, v in enumerate(self.error_columns): if v is None: error.append(None) elif isinstance(v, int): if len(usecols) is 1: error.append(array * self.scale[n]) else: error.append(array[0] * self.scale[n]) array = numpy.delete(array, (0), axis=(0)) elif len(v) is 2: error.append(array[0:2] * self.scale[n]) array = numpy.delete(array, (0, 1), axis=(0)) return tuple(error)
[ "def", "load_error", "(", "self", ")", ":", "usecols", "=", "[", "]", "for", "v", "in", "self", ".", "error_columns", ":", "if", "v", "is", "None", ":", "pass", "elif", "isinstance", "(", "v", ",", "int", ")", ":", "usecols", ".", "append", "(", ...
Loads error values from `scipy_data_fitting.Data.path` using [`numpy.genfromtxt`][1] and returns a two element tuple where each element is of a form described by cases 3 and 4 in `scipy_data_fitting.Data.error`. The columns to import are set by `scipy_data_fitting.Data.error_columns`. Values are scaled according to `scipy_data_fitting.Data.scale`. Arguments to [`numpy.genfromtxt`][1] are controlled by `scipy_data_fitting.Data.genfromtxt_args_error`. [1]: http://docs.scipy.org/doc/numpy/reference/generated/numpy.genfromtxt.html
[ "Loads", "error", "values", "from", "scipy_data_fitting", ".", "Data", ".", "path", "using", "[", "numpy", ".", "genfromtxt", "]", "[", "1", "]", "and", "returns", "a", "two", "element", "tuple", "where", "each", "element", "is", "of", "a", "form", "desc...
python
train
saltstack/salt
salt/modules/openvswitch.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/openvswitch.py#L251-L274
def bridge_to_parent(br): ''' Returns the parent bridge of a bridge. Args: br: A string - bridge name Returns: Name of the parent bridge. This is the same as the bridge name if the bridge is not a fake bridge. If the bridge does not exist, False is returned. CLI Example: .. code-block:: bash salt '*' openvswitch.bridge_to_parent br0 ''' cmd = 'ovs-vsctl br-to-parent {0}'.format(br) result = __salt__['cmd.run_all'](cmd) if result['retcode'] != 0: return False return result['stdout']
[ "def", "bridge_to_parent", "(", "br", ")", ":", "cmd", "=", "'ovs-vsctl br-to-parent {0}'", ".", "format", "(", "br", ")", "result", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ")", "if", "result", "[", "'retcode'", "]", "!=", "0", ":", "retu...
Returns the parent bridge of a bridge. Args: br: A string - bridge name Returns: Name of the parent bridge. This is the same as the bridge name if the bridge is not a fake bridge. If the bridge does not exist, False is returned. CLI Example: .. code-block:: bash salt '*' openvswitch.bridge_to_parent br0
[ "Returns", "the", "parent", "bridge", "of", "a", "bridge", "." ]
python
train
angr/angr
angr/sim_procedure.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/sim_procedure.py#L90-L100
def _describe_me(self): """ return a 5-tuple of strings sufficient for formatting with ``%s%s%s%s%s`` to verbosely describe the procedure """ return ( self.display_name, ' (cont: %s)' % self.run_func if self.is_continuation else '', ' (syscall)' if self.is_syscall else '', ' (inline)' if not self.use_state_arguments else '', ' (stub)' if self.is_stub else '', )
[ "def", "_describe_me", "(", "self", ")", ":", "return", "(", "self", ".", "display_name", ",", "' (cont: %s)'", "%", "self", ".", "run_func", "if", "self", ".", "is_continuation", "else", "''", ",", "' (syscall)'", "if", "self", ".", "is_syscall", "else", ...
return a 5-tuple of strings sufficient for formatting with ``%s%s%s%s%s`` to verbosely describe the procedure
[ "return", "a", "5", "-", "tuple", "of", "strings", "sufficient", "for", "formatting", "with", "%s%s%s%s%s", "to", "verbosely", "describe", "the", "procedure" ]
python
train
pypa/pipenv
pipenv/project.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/project.py#L462-L465
def register_proper_name(self, name): """Registers a proper name to the database.""" with self.proper_names_db_path.open("a") as f: f.write(u"{0}\n".format(name))
[ "def", "register_proper_name", "(", "self", ",", "name", ")", ":", "with", "self", ".", "proper_names_db_path", ".", "open", "(", "\"a\"", ")", "as", "f", ":", "f", ".", "write", "(", "u\"{0}\\n\"", ".", "format", "(", "name", ")", ")" ]
Registers a proper name to the database.
[ "Registers", "a", "proper", "name", "to", "the", "database", "." ]
python
train
OpenVolunteeringPlatform/django-ovp-search
ovp_search/filters.py
https://github.com/OpenVolunteeringPlatform/django-ovp-search/blob/003ceecc0a87be31fe8195f65367c52631f72b57/ovp_search/filters.py#L78-L87
def by_skills(queryset, skill_string=None): """ Filter queryset by a comma delimeted skill list """ if skill_string: operator, items = get_operator_and_items(skill_string) q_obj = SQ() for s in items: if len(s) > 0: q_obj.add(SQ(skills=s), operator) queryset = queryset.filter(q_obj) return queryset
[ "def", "by_skills", "(", "queryset", ",", "skill_string", "=", "None", ")", ":", "if", "skill_string", ":", "operator", ",", "items", "=", "get_operator_and_items", "(", "skill_string", ")", "q_obj", "=", "SQ", "(", ")", "for", "s", "in", "items", ":", "...
Filter queryset by a comma delimeted skill list
[ "Filter", "queryset", "by", "a", "comma", "delimeted", "skill", "list" ]
python
train
CS207-Final-Project-Group-10/cs207-FinalProject
solar_system/earth_sun.py
https://github.com/CS207-Final-Project-Group-10/cs207-FinalProject/blob/842e9c2d3ca1490cef18c086dfde81856d8d3a82/solar_system/earth_sun.py#L131-L170
def accel_ES(q: np.ndarray): """ Compute the gravitational accelerations in the earth-sun system. q in row vector of 6 elements: sun (x, y, z), earth (x, y, z) """ # Number of celestial bodies num_bodies: int = 2 # Number of dimensions in arrays; 3 spatial dimensions times the number of bodies dims = 3 * num_bodies # Body 0 is the sun; Body 1 is the earth m0 = mass[0] m1 = mass[1] # Extract position of the sun and earth as 3-vectors pos_0 = q[slices[0]] pos_1 = q[slices[1]] # Displacement vector from sun to earth dv_01: np.ndarray = pos_1 - pos_0 # Distance from sun to earth r_01: float = np.linalg.norm(dv_01) # Unit vector pointing from sun to earth udv_01 = dv_01 / r_01 # The force between these has magnitude G*m0*m1 / r^2 f_01: float = (G * m0 * m1) / (r_01 ** 2) # Initialize acceleration as 6x1 array a: np.ndarray = np.zeros(dims) # The force vectors are attractive a[slices[0]] += f_01 * udv_01 / m0 a[slices[1]] -= f_01 * udv_01 / m1 # Return the acceleration vector return a
[ "def", "accel_ES", "(", "q", ":", "np", ".", "ndarray", ")", ":", "# Number of celestial bodies", "num_bodies", ":", "int", "=", "2", "# Number of dimensions in arrays; 3 spatial dimensions times the number of bodies", "dims", "=", "3", "*", "num_bodies", "# Body 0 is the...
Compute the gravitational accelerations in the earth-sun system. q in row vector of 6 elements: sun (x, y, z), earth (x, y, z)
[ "Compute", "the", "gravitational", "accelerations", "in", "the", "earth", "-", "sun", "system", ".", "q", "in", "row", "vector", "of", "6", "elements", ":", "sun", "(", "x", "y", "z", ")", "earth", "(", "x", "y", "z", ")" ]
python
train
hydpy-dev/hydpy
hydpy/models/lland/lland_model.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/lland/lland_model.py#L279-L394
def calc_nbes_inzp_v1(self): """Calculate stand precipitation and update the interception storage accordingly. Required control parameters: |NHRU| |Lnk| Required derived parameter: |KInz| Required flux sequence: |NKor| Calculated flux sequence: |NBes| Updated state sequence: |Inzp| Additional requirements: |Model.idx_sim| Basic equation: :math:`NBes = \\Bigl \\lbrace { {PKor \\ | \\ Inzp = KInz} \\atop {0 \\ | \\ Inzp < KInz} }` Examples: Initialize five HRUs with different land usages: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(5) >>> lnk(SIED_D, FEUCHT, GLETS, FLUSS, SEE) Define |KInz| values for July the selected land usages directly: >>> derived.kinz.sied_d_jul = 2.0 >>> derived.kinz.feucht_jul = 1.0 >>> derived.kinz.glets_jul = 0.0 >>> derived.kinz.fluss_jul = 1.0 >>> derived.kinz.see_jul = 1.0 Now we prepare a |MOY| object, that assumes that the first, second, and third simulation time steps are in June, July, and August respectively (we make use of the value defined above for July, but setting the values of parameter |MOY| this way allows for a more rigorous testing of proper indexing): >>> derived.moy.shape = 3 >>> derived.moy = 5, 6, 7 >>> model.idx_sim = 1 The dense settlement (|SIED_D|), the wetland area (|FEUCHT|), and both water areas (|FLUSS| and |SEE|) start with a initial interception storage of 1/2 mm, the glacier (|GLETS|) and water areas (|FLUSS| and |SEE|) start with 0 mm. In the first example, actual precipition is 1 mm: >>> states.inzp = 0.5, 0.5, 0.0, 1.0, 1.0 >>> fluxes.nkor = 1.0 >>> model.calc_nbes_inzp_v1() >>> states.inzp inzp(1.5, 1.0, 0.0, 0.0, 0.0) >>> fluxes.nbes nbes(0.0, 0.5, 1.0, 0.0, 0.0) Only for the settled area, interception capacity is not exceeded, meaning no stand precipitation occurs. Note that it is common in define zero interception capacities for glacier areas, but not mandatory. Also note that the |KInz|, |Inzp| and |NKor| values given for both water areas are ignored completely, and |Inzp| and |NBes| are simply set to zero. If there is no precipitation, there is of course also no stand precipitation and interception storage remains unchanged: >>> states.inzp = 0.5, 0.5, 0.0, 0.0, 0.0 >>> fluxes.nkor = 0. >>> model.calc_nbes_inzp_v1() >>> states.inzp inzp(0.5, 0.5, 0.0, 0.0, 0.0) >>> fluxes.nbes nbes(0.0, 0.0, 0.0, 0.0, 0.0) Interception capacities change discontinuously between consecutive months. This can result in little stand precipitation events in periods without precipitation: >>> states.inzp = 1.0, 0.0, 0.0, 0.0, 0.0 >>> derived.kinz.sied_d_jul = 0.6 >>> fluxes.nkor = 0.0 >>> model.calc_nbes_inzp_v1() >>> states.inzp inzp(0.6, 0.0, 0.0, 0.0, 0.0) >>> fluxes.nbes nbes(0.4, 0.0, 0.0, 0.0, 0.0) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess sta = self.sequences.states.fastaccess for k in range(con.nhru): if con.lnk[k] in (WASSER, FLUSS, SEE): flu.nbes[k] = 0. sta.inzp[k] = 0. else: flu.nbes[k] = \ max(flu.nkor[k]+sta.inzp[k] - der.kinz[con.lnk[k]-1, der.moy[self.idx_sim]], 0.) sta.inzp[k] += flu.nkor[k]-flu.nbes[k]
[ "def", "calc_nbes_inzp_v1", "(", "self", ")", ":", "con", "=", "self", ".", "parameters", ".", "control", ".", "fastaccess", "der", "=", "self", ".", "parameters", ".", "derived", ".", "fastaccess", "flu", "=", "self", ".", "sequences", ".", "fluxes", "....
Calculate stand precipitation and update the interception storage accordingly. Required control parameters: |NHRU| |Lnk| Required derived parameter: |KInz| Required flux sequence: |NKor| Calculated flux sequence: |NBes| Updated state sequence: |Inzp| Additional requirements: |Model.idx_sim| Basic equation: :math:`NBes = \\Bigl \\lbrace { {PKor \\ | \\ Inzp = KInz} \\atop {0 \\ | \\ Inzp < KInz} }` Examples: Initialize five HRUs with different land usages: >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(5) >>> lnk(SIED_D, FEUCHT, GLETS, FLUSS, SEE) Define |KInz| values for July the selected land usages directly: >>> derived.kinz.sied_d_jul = 2.0 >>> derived.kinz.feucht_jul = 1.0 >>> derived.kinz.glets_jul = 0.0 >>> derived.kinz.fluss_jul = 1.0 >>> derived.kinz.see_jul = 1.0 Now we prepare a |MOY| object, that assumes that the first, second, and third simulation time steps are in June, July, and August respectively (we make use of the value defined above for July, but setting the values of parameter |MOY| this way allows for a more rigorous testing of proper indexing): >>> derived.moy.shape = 3 >>> derived.moy = 5, 6, 7 >>> model.idx_sim = 1 The dense settlement (|SIED_D|), the wetland area (|FEUCHT|), and both water areas (|FLUSS| and |SEE|) start with a initial interception storage of 1/2 mm, the glacier (|GLETS|) and water areas (|FLUSS| and |SEE|) start with 0 mm. In the first example, actual precipition is 1 mm: >>> states.inzp = 0.5, 0.5, 0.0, 1.0, 1.0 >>> fluxes.nkor = 1.0 >>> model.calc_nbes_inzp_v1() >>> states.inzp inzp(1.5, 1.0, 0.0, 0.0, 0.0) >>> fluxes.nbes nbes(0.0, 0.5, 1.0, 0.0, 0.0) Only for the settled area, interception capacity is not exceeded, meaning no stand precipitation occurs. Note that it is common in define zero interception capacities for glacier areas, but not mandatory. Also note that the |KInz|, |Inzp| and |NKor| values given for both water areas are ignored completely, and |Inzp| and |NBes| are simply set to zero. If there is no precipitation, there is of course also no stand precipitation and interception storage remains unchanged: >>> states.inzp = 0.5, 0.5, 0.0, 0.0, 0.0 >>> fluxes.nkor = 0. >>> model.calc_nbes_inzp_v1() >>> states.inzp inzp(0.5, 0.5, 0.0, 0.0, 0.0) >>> fluxes.nbes nbes(0.0, 0.0, 0.0, 0.0, 0.0) Interception capacities change discontinuously between consecutive months. This can result in little stand precipitation events in periods without precipitation: >>> states.inzp = 1.0, 0.0, 0.0, 0.0, 0.0 >>> derived.kinz.sied_d_jul = 0.6 >>> fluxes.nkor = 0.0 >>> model.calc_nbes_inzp_v1() >>> states.inzp inzp(0.6, 0.0, 0.0, 0.0, 0.0) >>> fluxes.nbes nbes(0.4, 0.0, 0.0, 0.0, 0.0)
[ "Calculate", "stand", "precipitation", "and", "update", "the", "interception", "storage", "accordingly", "." ]
python
train
chrisspen/burlap
burlap/dj.py
https://github.com/chrisspen/burlap/blob/a92b0a8e5206850bb777c74af8421ea8b33779bd/burlap/dj.py#L446-L517
def load_django_settings(self): """ Loads Django settings for the current site and sets them so Django internals can be run. """ r = self.local_renderer # Save environment variables so we can restore them later. _env = {} save_vars = ['ALLOW_CELERY', 'DJANGO_SETTINGS_MODULE'] for var_name in save_vars: _env[var_name] = os.environ.get(var_name) try: # Allow us to import local app modules. if r.env.local_project_dir: sys.path.insert(0, r.env.local_project_dir) #TODO:remove this once bug in django-celery has been fixed os.environ['ALLOW_CELERY'] = '0' # print('settings_module:', r.format(r.env.settings_module)) os.environ['DJANGO_SETTINGS_MODULE'] = r.format(r.env.settings_module) # os.environ['CELERY_LOADER'] = 'django' # os.environ['SITE'] = r.genv.SITE or r.genv.default_site # os.environ['ROLE'] = r.genv.ROLE or r.genv.default_role # In Django >= 1.7, fixes the error AppRegistryNotReady: Apps aren't loaded yet # Disabling, in Django >= 1.10, throws exception: # RuntimeError: Model class django.contrib.contenttypes.models.ContentType # doesn't declare an explicit app_label and isn't in an application in INSTALLED_APPS. # try: # from django.core.wsgi import get_wsgi_application # application = get_wsgi_application() # except (ImportError, RuntimeError): # raise # print('Unable to get wsgi application.') # traceback.print_exc() # In Django >= 1.7, fixes the error AppRegistryNotReady: Apps aren't loaded yet try: import django django.setup() except AttributeError: # This doesn't exist in Django < 1.7, so ignore it. pass # Load Django settings. settings = self.get_settings() try: from django.contrib import staticfiles from django.conf import settings as _settings # get_settings() doesn't raise ImportError but returns None instead if settings is not None: for k, v in settings.__dict__.items(): setattr(_settings, k, v) else: raise ImportError except (ImportError, RuntimeError): print('Unable to load settings.') traceback.print_exc() finally: # Restore environment variables. for var_name, var_value in _env.items(): if var_value is None: del os.environ[var_name] else: os.environ[var_name] = var_value return settings
[ "def", "load_django_settings", "(", "self", ")", ":", "r", "=", "self", ".", "local_renderer", "# Save environment variables so we can restore them later.", "_env", "=", "{", "}", "save_vars", "=", "[", "'ALLOW_CELERY'", ",", "'DJANGO_SETTINGS_MODULE'", "]", "for", "v...
Loads Django settings for the current site and sets them so Django internals can be run.
[ "Loads", "Django", "settings", "for", "the", "current", "site", "and", "sets", "them", "so", "Django", "internals", "can", "be", "run", "." ]
python
valid
gabstopper/smc-python
smc/examples/ip_lists.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/examples/ip_lists.py#L113-L125
def download_as_json(name): """ Download IPList as json. This would allow for easily manipulation of the IPList, but generally recommended only for smaller lists :param str name: name of IPList :return: None """ location = list(IPList.objects.filter(name)) if location: iplist = location[0] return iplist.download(as_type='json')
[ "def", "download_as_json", "(", "name", ")", ":", "location", "=", "list", "(", "IPList", ".", "objects", ".", "filter", "(", "name", ")", ")", "if", "location", ":", "iplist", "=", "location", "[", "0", "]", "return", "iplist", ".", "download", "(", ...
Download IPList as json. This would allow for easily manipulation of the IPList, but generally recommended only for smaller lists :param str name: name of IPList :return: None
[ "Download", "IPList", "as", "json", ".", "This", "would", "allow", "for", "easily", "manipulation", "of", "the", "IPList", "but", "generally", "recommended", "only", "for", "smaller", "lists" ]
python
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/parsers.py
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/parsers.py#L172-L189
def parse_disease_associations(path: str, excluded_disease_ids: set): """Parse the disease-drug target associations file. :param str path: Path to the disease-drug target associations file. :param list excluded_disease_ids: Identifiers of the disease for which drug targets are being predicted. :return: Dictionary of drug target-disease mappings. """ if os.path.isdir(path) or not os.path.exists(path): logger.info("Couldn't find the disease associations file. Returning empty list.") return {} disease_associations = defaultdict(list) with open(path) as input_file: for line in input_file: target_id, disease_id = line.strip().split(" ") if disease_id not in excluded_disease_ids: disease_associations[target_id].append(disease_id) return disease_associations
[ "def", "parse_disease_associations", "(", "path", ":", "str", ",", "excluded_disease_ids", ":", "set", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", "or", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "logger", ...
Parse the disease-drug target associations file. :param str path: Path to the disease-drug target associations file. :param list excluded_disease_ids: Identifiers of the disease for which drug targets are being predicted. :return: Dictionary of drug target-disease mappings.
[ "Parse", "the", "disease", "-", "drug", "target", "associations", "file", "." ]
python
train
libtcod/python-tcod
tcod/libtcodpy.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L1973-L1985
def path_new_using_map( m: tcod.map.Map, dcost: float = 1.41 ) -> tcod.path.AStar: """Return a new AStar using the given Map. Args: m (Map): A Map instance. dcost (float): The path-finding cost of diagonal movement. Can be set to 0 to disable diagonal movement. Returns: AStar: A new AStar instance. """ return tcod.path.AStar(m, dcost)
[ "def", "path_new_using_map", "(", "m", ":", "tcod", ".", "map", ".", "Map", ",", "dcost", ":", "float", "=", "1.41", ")", "->", "tcod", ".", "path", ".", "AStar", ":", "return", "tcod", ".", "path", ".", "AStar", "(", "m", ",", "dcost", ")" ]
Return a new AStar using the given Map. Args: m (Map): A Map instance. dcost (float): The path-finding cost of diagonal movement. Can be set to 0 to disable diagonal movement. Returns: AStar: A new AStar instance.
[ "Return", "a", "new", "AStar", "using", "the", "given", "Map", "." ]
python
train
google/grr
grr/client/grr_response_client/client_actions/cloud.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/cloud.py#L34-L42
def IsCloud(self, request, bios_version, services): """Test to see if we're on a cloud machine.""" if request.bios_version_regex and bios_version: if re.match(request.bios_version_regex, bios_version): return True if request.service_name_regex and services: if re.search(request.service_name_regex, services): return True return False
[ "def", "IsCloud", "(", "self", ",", "request", ",", "bios_version", ",", "services", ")", ":", "if", "request", ".", "bios_version_regex", "and", "bios_version", ":", "if", "re", ".", "match", "(", "request", ".", "bios_version_regex", ",", "bios_version", "...
Test to see if we're on a cloud machine.
[ "Test", "to", "see", "if", "we", "re", "on", "a", "cloud", "machine", "." ]
python
train
cole/aiosmtplib
src/aiosmtplib/email.py
https://github.com/cole/aiosmtplib/blob/0cd00e5059005371cbdfca995feff9183a16a51f/src/aiosmtplib/email.py#L84-L105
def _extract_recipients( message: Message, resent_dates: List[Union[str, Header]] = None ) -> List[str]: """ Extract the recipients from the message object given. """ recipients = [] # type: List[str] if resent_dates: recipient_headers = ("Resent-To", "Resent-Cc", "Resent-Bcc") else: recipient_headers = ("To", "Cc", "Bcc") for header in recipient_headers: recipients.extend(message.get_all(header, [])) # type: ignore parsed_recipients = [ str(email.utils.formataddr(address)) for address in email.utils.getaddresses(recipients) ] return parsed_recipients
[ "def", "_extract_recipients", "(", "message", ":", "Message", ",", "resent_dates", ":", "List", "[", "Union", "[", "str", ",", "Header", "]", "]", "=", "None", ")", "->", "List", "[", "str", "]", ":", "recipients", "=", "[", "]", "# type: List[str]", "...
Extract the recipients from the message object given.
[ "Extract", "the", "recipients", "from", "the", "message", "object", "given", "." ]
python
train
insomnia-lab/libreant
archivant/archivant.py
https://github.com/insomnia-lab/libreant/blob/55d529435baf4c05a86b8341899e9f5e14e50245/archivant/archivant.py#L340-L349
def update_volume(self, volumeID, metadata): '''update existing volume metadata the given metadata will substitute the old one ''' log.debug('updating volume metadata: {}'.format(volumeID)) rawVolume = self._req_raw_volume(volumeID) normalized = self.normalize_volume(rawVolume) normalized['metadata'] = metadata _, newRawVolume = self.denormalize_volume(normalized) self._db.modify_book(volumeID, newRawVolume)
[ "def", "update_volume", "(", "self", ",", "volumeID", ",", "metadata", ")", ":", "log", ".", "debug", "(", "'updating volume metadata: {}'", ".", "format", "(", "volumeID", ")", ")", "rawVolume", "=", "self", ".", "_req_raw_volume", "(", "volumeID", ")", "no...
update existing volume metadata the given metadata will substitute the old one
[ "update", "existing", "volume", "metadata", "the", "given", "metadata", "will", "substitute", "the", "old", "one" ]
python
train
tobami/littlechef
littlechef/lib.py
https://github.com/tobami/littlechef/blob/aab8c94081b38100a69cc100bc4278ae7419c58e/littlechef/lib.py#L174-L191
def get_nodes_with_recipe(recipe_name, environment=None): """Get all nodes which include a given recipe, prefix-searches are also supported """ prefix_search = recipe_name.endswith("*") if prefix_search: recipe_name = recipe_name.rstrip("*") for n in get_nodes(environment): recipes = get_recipes_in_node(n) for role in get_roles_in_node(n, recursive=True): recipes.extend(get_recipes_in_role(role)) if prefix_search: if any(recipe.startswith(recipe_name) for recipe in recipes): yield n else: if recipe_name in recipes: yield n
[ "def", "get_nodes_with_recipe", "(", "recipe_name", ",", "environment", "=", "None", ")", ":", "prefix_search", "=", "recipe_name", ".", "endswith", "(", "\"*\"", ")", "if", "prefix_search", ":", "recipe_name", "=", "recipe_name", ".", "rstrip", "(", "\"*\"", ...
Get all nodes which include a given recipe, prefix-searches are also supported
[ "Get", "all", "nodes", "which", "include", "a", "given", "recipe", "prefix", "-", "searches", "are", "also", "supported" ]
python
train
robertpeteuil/aws-shortcuts
awss/core.py
https://github.com/robertpeteuil/aws-shortcuts/blob/cf453ca996978a4d88015d1cf6125bce8ca4873b/awss/core.py#L376-L400
def qry_helper(flag_id, qry_string, param_str, flag_filt=False, filt_st=""): """Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list. """ if flag_id or flag_filt: qry_string += ", " param_str += ", " if not flag_filt: qry_string += filt_st return (qry_string, param_str)
[ "def", "qry_helper", "(", "flag_id", ",", "qry_string", ",", "param_str", ",", "flag_filt", "=", "False", ",", "filt_st", "=", "\"\"", ")", ":", "if", "flag_id", "or", "flag_filt", ":", "qry_string", "+=", "\", \"", "param_str", "+=", "\", \"", "if", "not"...
Dynamically add syntaxtical elements to query. This functions adds syntactical elements to the query string, and report title, based on the types and number of items added thus far. Args: flag_filt (bool): at least one filter item specified. qry_string (str): portion of the query constructed thus far. param_str (str): the title to display before the list. flag_id (bool): optional - instance-id was specified. filt_st (str): optional - syntax to add on end if filter specified. Returns: qry_string (str): the portion of the query that was passed in with the appropriate syntactical elements added. param_str (str): the title to display before the list.
[ "Dynamically", "add", "syntaxtical", "elements", "to", "query", "." ]
python
train
log2timeline/plaso
plaso/engine/path_helper.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/engine/path_helper.py#L238-L286
def ExpandWindowsPath(cls, path, environment_variables): """Expands a Windows path containing environment variables. Args: path (str): Windows path with environment variables. environment_variables (list[EnvironmentVariableArtifact]): environment variables. Returns: str: expanded Windows path. """ if environment_variables is None: environment_variables = [] lookup_table = {} if environment_variables: for environment_variable in environment_variables: attribute_name = environment_variable.name.upper() attribute_value = environment_variable.value if not isinstance(attribute_value, py2to3.STRING_TYPES): continue lookup_table[attribute_name] = attribute_value path_segments = path.split('\\') # Make a copy of path_segments since this loop can change it. for index, path_segment in enumerate(list(path_segments)): if (len(path_segment) <= 2 or not path_segment.startswith('%') or not path_segment.endswith('%')): continue path_segment_upper_case = path_segment.upper() if path_segment_upper_case.startswith('%%ENVIRON_'): lookup_key = path_segment_upper_case[10:-2] else: lookup_key = path_segment_upper_case[1:-1] path_segment = lookup_table.get(lookup_key, path_segment) path_segment = path_segment.split('\\') expanded_path_segments = list(path_segments[:index]) expanded_path_segments.extend(path_segment) expanded_path_segments.extend(path_segments[index + 1:]) path_segments = expanded_path_segments if cls._IsWindowsDrivePathSegment(path_segments[0]): path_segments[0] = '' return '\\'.join(path_segments)
[ "def", "ExpandWindowsPath", "(", "cls", ",", "path", ",", "environment_variables", ")", ":", "if", "environment_variables", "is", "None", ":", "environment_variables", "=", "[", "]", "lookup_table", "=", "{", "}", "if", "environment_variables", ":", "for", "envi...
Expands a Windows path containing environment variables. Args: path (str): Windows path with environment variables. environment_variables (list[EnvironmentVariableArtifact]): environment variables. Returns: str: expanded Windows path.
[ "Expands", "a", "Windows", "path", "containing", "environment", "variables", "." ]
python
train
OCHA-DAP/hdx-python-api
src/hdx/data/dataset.py
https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/data/dataset.py#L1123-L1137
def add_region_location(self, region, locations=None, use_live=True): # type: (str, Optional[List[str]], bool) -> bool """Add all countries in a region. If a 3 digit UNStats M49 region code is not provided, value is parsed as a region name. If any country is already added, it is ignored. Args: region (str): M49 region, intermediate region or subregion to add locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if all countries in region added or False if any already present. """ return self.add_country_locations(Country.get_countries_in_region(region, exception=HDXError, use_live=use_live), locations=locations)
[ "def", "add_region_location", "(", "self", ",", "region", ",", "locations", "=", "None", ",", "use_live", "=", "True", ")", ":", "# type: (str, Optional[List[str]], bool) -> bool", "return", "self", ".", "add_country_locations", "(", "Country", ".", "get_countries_in_...
Add all countries in a region. If a 3 digit UNStats M49 region code is not provided, value is parsed as a region name. If any country is already added, it is ignored. Args: region (str): M49 region, intermediate region or subregion to add locations (Optional[List[str]]): Valid locations list. Defaults to list downloaded from HDX. use_live (bool): Try to get use latest country data from web rather than file in package. Defaults to True. Returns: bool: True if all countries in region added or False if any already present.
[ "Add", "all", "countries", "in", "a", "region", ".", "If", "a", "3", "digit", "UNStats", "M49", "region", "code", "is", "not", "provided", "value", "is", "parsed", "as", "a", "region", "name", ".", "If", "any", "country", "is", "already", "added", "it"...
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/transformer_moe.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_moe.py#L315-L327
def transformer_moe_8k(): """Hyper parameters specifics for long sequence generation.""" hparams = transformer_moe_base() hparams.batch_size = 8192 hparams.max_length = 0 # max_length == batch_size hparams.eval_drop_long_sequences = True hparams.min_length_bucket = 256 # Avoid cyclic problems for big batches hparams.default_ff = "sep" hparams.hidden_size = 1024 return hparams
[ "def", "transformer_moe_8k", "(", ")", ":", "hparams", "=", "transformer_moe_base", "(", ")", "hparams", ".", "batch_size", "=", "8192", "hparams", ".", "max_length", "=", "0", "# max_length == batch_size", "hparams", ".", "eval_drop_long_sequences", "=", "True", ...
Hyper parameters specifics for long sequence generation.
[ "Hyper", "parameters", "specifics", "for", "long", "sequence", "generation", "." ]
python
train
pyQode/pyqode.core
pyqode/core/widgets/interactive.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/interactive.py#L98-L113
def set_writer(self, writer): """ Changes the writer function to handle writing to the text edit. A writer function must have the following prototype: .. code-block:: python def write(text_edit, text, color) :param writer: write function as described above. """ if self._writer != writer and self._writer: self._writer = None if writer: self._writer = writer
[ "def", "set_writer", "(", "self", ",", "writer", ")", ":", "if", "self", ".", "_writer", "!=", "writer", "and", "self", ".", "_writer", ":", "self", ".", "_writer", "=", "None", "if", "writer", ":", "self", ".", "_writer", "=", "writer" ]
Changes the writer function to handle writing to the text edit. A writer function must have the following prototype: .. code-block:: python def write(text_edit, text, color) :param writer: write function as described above.
[ "Changes", "the", "writer", "function", "to", "handle", "writing", "to", "the", "text", "edit", "." ]
python
train
RIPE-NCC/ripe-atlas-cousteau
ripe/atlas/cousteau/stream.py
https://github.com/RIPE-NCC/ripe-atlas-cousteau/blob/ffee2556aaa4df86525b88c269bb098de11678ec/ripe/atlas/cousteau/stream.py#L158-L165
def subscribe(self, stream_type, **parameters): """Subscribe to stream with give parameters.""" parameters["stream_type"] = stream_type if (stream_type == "result") and ("buffering" not in parameters): parameters["buffering"] = True self.socketIO.emit(self.EVENT_NAME_SUBSCRIBE, parameters)
[ "def", "subscribe", "(", "self", ",", "stream_type", ",", "*", "*", "parameters", ")", ":", "parameters", "[", "\"stream_type\"", "]", "=", "stream_type", "if", "(", "stream_type", "==", "\"result\"", ")", "and", "(", "\"buffering\"", "not", "in", "parameter...
Subscribe to stream with give parameters.
[ "Subscribe", "to", "stream", "with", "give", "parameters", "." ]
python
train
fjwCode/cerium
cerium/utils.py
https://github.com/fjwCode/cerium/blob/f6e06e0dcf83a0bc924828e9d6cb81383ed2364f/cerium/utils.py#L34-L53
def is_connectable(host: str, port: Union[int, str]) -> bool: """Tries to connect to the device to see if it is connectable. Args: host: The host to connect. port: The port to connect. Returns: True or False. """ socket_ = None try: socket_ = socket.create_connection((host, port), 1) result = True except socket.timeout: result = False finally: if socket_: socket_.close() return result
[ "def", "is_connectable", "(", "host", ":", "str", ",", "port", ":", "Union", "[", "int", ",", "str", "]", ")", "->", "bool", ":", "socket_", "=", "None", "try", ":", "socket_", "=", "socket", ".", "create_connection", "(", "(", "host", ",", "port", ...
Tries to connect to the device to see if it is connectable. Args: host: The host to connect. port: The port to connect. Returns: True or False.
[ "Tries", "to", "connect", "to", "the", "device", "to", "see", "if", "it", "is", "connectable", "." ]
python
train
ShenggaoZhu/midict
midict/__init__.py
https://github.com/ShenggaoZhu/midict/blob/2fad2edcfb753035b443a70fe15852affae1b5bb/midict/__init__.py#L1412-L1444
def update(self, *args, **kw): ''' Update the dictionary with items and names:: (items, names, **kw) (dict, names, **kw) (MIDict, names, **kw) Optional positional argument ``names`` is only allowed when ``self.indices`` is empty (no indices are set yet). ''' if len(args) > 1 and self.indices: raise ValueError('Only one positional argument is allowed when the' 'index names are already set.') if not self.indices: # empty; init again _MI_init(self, *args, **kw) return d = MIMapping(*args, **kw) if not d.indices: return names = force_list(self.indices.keys()) if len(d.indices) != len(names): raise ValueError('Length of update items (%s) does not match ' 'length of original items (%s)' % (len(d.indices), len(names))) for key in d: # use __setitem__() to handle duplicate self[key] = d[key]
[ "def", "update", "(", "self", ",", "*", "args", ",", "*", "*", "kw", ")", ":", "if", "len", "(", "args", ")", ">", "1", "and", "self", ".", "indices", ":", "raise", "ValueError", "(", "'Only one positional argument is allowed when the'", "'index names are al...
Update the dictionary with items and names:: (items, names, **kw) (dict, names, **kw) (MIDict, names, **kw) Optional positional argument ``names`` is only allowed when ``self.indices`` is empty (no indices are set yet).
[ "Update", "the", "dictionary", "with", "items", "and", "names", "::" ]
python
train
neuropsychology/NeuroKit.py
examples/UnderDev/eeg/eeg_time_frequency.py
https://github.com/neuropsychology/NeuroKit.py/blob/c9589348fbbde0fa7e986048c48f38e6b488adfe/examples/UnderDev/eeg/eeg_time_frequency.py#L20-L87
def eeg_name_frequencies(freqs): """ Name frequencies according to standart classifications. Parameters ---------- freqs : list or numpy.array list of floats containing frequencies to classify. Returns ---------- freqs_names : list Named frequencies Example ---------- >>> import neurokit as nk >>> >>> nk.eeg_name_frequencies([0.5, 1.5, 3, 5, 7, 15]) Notes ---------- *Details* - Delta: 1-3Hz - Theta: 4-7Hz - Alpha1: 8-9Hz - Alpha2: 10-12Hz - Beta1: 13-17Hz - Beta2: 18-30Hz - Gamma1: 31-40Hz - Gamma2: 41-50Hz - Mu: 8-13Hz *Authors* - Dominique Makowski (https://github.com/DominiqueMakowski) References ------------ - None """ freqs = list(freqs) freqs_names = [] for freq in freqs: if freq < 1: freqs_names.append("UltraLow") elif freq <= 3: freqs_names.append("Delta") elif freq <= 7: freqs_names.append("Theta") elif freq <= 9: freqs_names.append("Alpha1/Mu") elif freq <= 12: freqs_names.append("Alpha2/Mu") elif freq <= 13: freqs_names.append("Beta1/Mu") elif freq <= 17: freqs_names.append("Beta1") elif freq <= 30: freqs_names.append("Beta2") elif freq <= 40: freqs_names.append("Gamma1") elif freq <= 50: freqs_names.append("Gamma2") else: freqs_names.append("UltraHigh") return(freqs_names)
[ "def", "eeg_name_frequencies", "(", "freqs", ")", ":", "freqs", "=", "list", "(", "freqs", ")", "freqs_names", "=", "[", "]", "for", "freq", "in", "freqs", ":", "if", "freq", "<", "1", ":", "freqs_names", ".", "append", "(", "\"UltraLow\"", ")", "elif"...
Name frequencies according to standart classifications. Parameters ---------- freqs : list or numpy.array list of floats containing frequencies to classify. Returns ---------- freqs_names : list Named frequencies Example ---------- >>> import neurokit as nk >>> >>> nk.eeg_name_frequencies([0.5, 1.5, 3, 5, 7, 15]) Notes ---------- *Details* - Delta: 1-3Hz - Theta: 4-7Hz - Alpha1: 8-9Hz - Alpha2: 10-12Hz - Beta1: 13-17Hz - Beta2: 18-30Hz - Gamma1: 31-40Hz - Gamma2: 41-50Hz - Mu: 8-13Hz *Authors* - Dominique Makowski (https://github.com/DominiqueMakowski) References ------------ - None
[ "Name", "frequencies", "according", "to", "standart", "classifications", "." ]
python
train
jjgomera/iapws
iapws/iapws97.py
https://github.com/jjgomera/iapws/blob/1e5812aab38212fb8a63736f61cdcfa427d223b1/iapws/iapws97.py#L2220-L2270
def _Backward3a_T_Ph(P, h): """Backward equation for region 3a, T=f(P,h) Parameters ---------- P : float Pressure, [MPa] h : float Specific enthalpy, [kJ/kg] Returns ------- T : float Temperature, [K] References ---------- IAPWS, Revised Supplementary Release on Backward Equations for the Functions T(p,h), v(p,h) and T(p,s), v(p,s) for Region 3 of the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam, http://www.iapws.org/relguide/Supp-Tv%28ph,ps%293-2014.pdf, Eq 2 Examples -------- >>> _Backward3a_T_Ph(20,1700) 629.3083892 >>> _Backward3a_T_Ph(100,2100) 733.6163014 """ I = [-12, -12, -12, -12, -12, -12, -12, -12, -10, -10, -10, -8, -8, -8, -8, -5, -3, -2, -2, -2, -1, -1, 0, 0, 1, 3, 3, 4, 4, 10, 12] J = [0, 1, 2, 6, 14, 16, 20, 22, 1, 5, 12, 0, 2, 4, 10, 2, 0, 1, 3, 4, 0, 2, 0, 1, 1, 0, 1, 0, 3, 4, 5] n = [-0.133645667811215e-6, 0.455912656802978e-5, -0.146294640700979e-4, 0.639341312970080e-2, 0.372783927268847e3, -0.718654377460447e4, 0.573494752103400e6, -0.267569329111439e7, -0.334066283302614e-4, -0.245479214069597e-1, 0.478087847764996e2, 0.764664131818904e-5, 0.128350627676972e-2, 0.171219081377331e-1, -0.851007304583213e1, -0.136513461629781e-1, -0.384460997596657e-5, 0.337423807911655e-2, -0.551624873066791, 0.729202277107470, -0.992522757376041e-2, -.119308831407288, .793929190615421, .454270731799386, .20999859125991, -0.642109823904738e-2, -0.235155868604540e-1, 0.252233108341612e-2, -0.764885133368119e-2, 0.136176427574291e-1, -0.133027883575669e-1] Pr = P/100. nu = h/2300. suma = 0 for i, j, n in zip(I, J, n): suma += n*(Pr+0.240)**i*(nu-0.615)**j return 760*suma
[ "def", "_Backward3a_T_Ph", "(", "P", ",", "h", ")", ":", "I", "=", "[", "-", "12", ",", "-", "12", ",", "-", "12", ",", "-", "12", ",", "-", "12", ",", "-", "12", ",", "-", "12", ",", "-", "12", ",", "-", "10", ",", "-", "10", ",", "-...
Backward equation for region 3a, T=f(P,h) Parameters ---------- P : float Pressure, [MPa] h : float Specific enthalpy, [kJ/kg] Returns ------- T : float Temperature, [K] References ---------- IAPWS, Revised Supplementary Release on Backward Equations for the Functions T(p,h), v(p,h) and T(p,s), v(p,s) for Region 3 of the IAPWS Industrial Formulation 1997 for the Thermodynamic Properties of Water and Steam, http://www.iapws.org/relguide/Supp-Tv%28ph,ps%293-2014.pdf, Eq 2 Examples -------- >>> _Backward3a_T_Ph(20,1700) 629.3083892 >>> _Backward3a_T_Ph(100,2100) 733.6163014
[ "Backward", "equation", "for", "region", "3a", "T", "=", "f", "(", "P", "h", ")" ]
python
train
bitshares/uptick
uptick/api.py
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/api.py#L28-L37
def create(ctx): """ Create default config file """ import shutil this_dir, this_filename = os.path.split(__file__) default_config_file = os.path.join(this_dir, "apis/example-config.yaml") config_file = ctx.obj["configfile"] shutil.copyfile(default_config_file, config_file) print_message("Config file created: {}".format(config_file))
[ "def", "create", "(", "ctx", ")", ":", "import", "shutil", "this_dir", ",", "this_filename", "=", "os", ".", "path", ".", "split", "(", "__file__", ")", "default_config_file", "=", "os", ".", "path", ".", "join", "(", "this_dir", ",", "\"apis/example-confi...
Create default config file
[ "Create", "default", "config", "file" ]
python
train
PythonCharmers/python-future
src/future/types/newstr.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/types/newstr.py#L179-L218
def encode(self, encoding='utf-8', errors='strict'): """ Returns bytes Encode S using the codec registered for encoding. Default encoding is 'utf-8'. errors may be given to set a different error handling scheme. Default is 'strict' meaning that encoding errors raise a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well as any other name registered with codecs.register_error that can handle UnicodeEncodeErrors. """ from future.types.newbytes import newbytes # Py2 unicode.encode() takes encoding and errors as optional parameter, # not keyword arguments as in Python 3 str. # For the surrogateescape error handling mechanism, the # codecs.register_error() function seems to be inadequate for an # implementation of it when encoding. (Decoding seems fine, however.) # For example, in the case of # u'\udcc3'.encode('ascii', 'surrogateescape_handler') # after registering the ``surrogateescape_handler`` function in # future.utils.surrogateescape, both Python 2.x and 3.x raise an # exception anyway after the function is called because the unicode # string it has to return isn't encodable strictly as ASCII. if errors == 'surrogateescape': if encoding == 'utf-16': # Known to fail here. See test_encoding_works_normally() raise NotImplementedError('FIXME: surrogateescape handling is ' 'not yet implemented properly') # Encode char by char, building up list of byte-strings mybytes = [] for c in self: code = ord(c) if 0xD800 <= code <= 0xDCFF: mybytes.append(newbytes([code - 0xDC00])) else: mybytes.append(c.encode(encoding=encoding)) return newbytes(b'').join(mybytes) return newbytes(super(newstr, self).encode(encoding, errors))
[ "def", "encode", "(", "self", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'strict'", ")", ":", "from", "future", ".", "types", ".", "newbytes", "import", "newbytes", "# Py2 unicode.encode() takes encoding and errors as optional parameter,", "# not keyword argu...
Returns bytes Encode S using the codec registered for encoding. Default encoding is 'utf-8'. errors may be given to set a different error handling scheme. Default is 'strict' meaning that encoding errors raise a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and 'xmlcharrefreplace' as well as any other name registered with codecs.register_error that can handle UnicodeEncodeErrors.
[ "Returns", "bytes" ]
python
train
cltk/cltk
cltk/phonology/syllabify.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/phonology/syllabify.py#L23-L72
def get_onsets(text, vowels="aeiou", threshold=0.0002): """ Source: Resonances in Middle High German: New Methodologies in Prosody, 2017, C. L. Hench :param text: str list: text to be analysed :param vowels: str: valid vowels constituting the syllable :param threshold: minimum frequency count for valid onset, C. Hench noted that the algorithm produces the best result for an untagged wordset of MHG, when retaining onsets which appear in at least 0.02% of the words Example: Let's test it on the opening lines of Nibelungenlied >>> text = ['uns', 'ist', 'in', 'alten', 'mæren', 'wunders', 'vil', 'geseit', 'von', 'helden', 'lobebæren', 'von', 'grôzer', 'arebeit', 'von', 'fröuden', 'hôchgezîten', 'von', 'weinen', 'und', 'von', 'klagen', 'von', 'küener', 'recken', 'strîten', 'muget', 'ir', 'nu', 'wunder', 'hœren', 'sagen'] >>> vowels = "aeiouæœôîöü" >>> get_onsets(text, vowels=vowels) ['lt', 'm', 'r', 'w', 'nd', 'v', 'g', 's', 'h', 'ld', 'l', 'b', 'gr', 'z', 'fr', 'd', 'chg', 't', 'n', 'kl', 'k', 'ck', 'str'] Of course, this is an insignificant sample, but we could try and see how modifying the threshold affects the returned onset: >>> get_onsets(text, threshold = 0.05, vowels=vowels) ['m', 'r', 'w', 'nd', 'v', 'g', 's', 'h', 'b', 'z', 't', 'n'] """ onset_dict = defaultdict(lambda: 0) n = len(text) for word in text: onset = '' candidates = [] for l in word: if l not in vowels: onset += l else: if onset != '': candidates.append(onset) onset = '' for c in candidates: onset_dict[c] += 1 return [onset for onset, i in onset_dict.items() if i/n > threshold]
[ "def", "get_onsets", "(", "text", ",", "vowels", "=", "\"aeiou\"", ",", "threshold", "=", "0.0002", ")", ":", "onset_dict", "=", "defaultdict", "(", "lambda", ":", "0", ")", "n", "=", "len", "(", "text", ")", "for", "word", "in", "text", ":", "onset"...
Source: Resonances in Middle High German: New Methodologies in Prosody, 2017, C. L. Hench :param text: str list: text to be analysed :param vowels: str: valid vowels constituting the syllable :param threshold: minimum frequency count for valid onset, C. Hench noted that the algorithm produces the best result for an untagged wordset of MHG, when retaining onsets which appear in at least 0.02% of the words Example: Let's test it on the opening lines of Nibelungenlied >>> text = ['uns', 'ist', 'in', 'alten', 'mæren', 'wunders', 'vil', 'geseit', 'von', 'helden', 'lobebæren', 'von', 'grôzer', 'arebeit', 'von', 'fröuden', 'hôchgezîten', 'von', 'weinen', 'und', 'von', 'klagen', 'von', 'küener', 'recken', 'strîten', 'muget', 'ir', 'nu', 'wunder', 'hœren', 'sagen'] >>> vowels = "aeiouæœôîöü" >>> get_onsets(text, vowels=vowels) ['lt', 'm', 'r', 'w', 'nd', 'v', 'g', 's', 'h', 'ld', 'l', 'b', 'gr', 'z', 'fr', 'd', 'chg', 't', 'n', 'kl', 'k', 'ck', 'str'] Of course, this is an insignificant sample, but we could try and see how modifying the threshold affects the returned onset: >>> get_onsets(text, threshold = 0.05, vowels=vowels) ['m', 'r', 'w', 'nd', 'v', 'g', 's', 'h', 'b', 'z', 't', 'n']
[ "Source", ":", "Resonances", "in", "Middle", "High", "German", ":", "New", "Methodologies", "in", "Prosody", "2017", "C", ".", "L", ".", "Hench" ]
python
train
tensorforce/tensorforce
tensorforce/models/model.py
https://github.com/tensorforce/tensorforce/blob/520a8d992230e382f08e315ede5fc477f5e26bfb/tensorforce/models/model.py#L1481-L1498
def observe(self, terminal, reward, index=0): """ Adds an observation (reward and is-terminal) to the model without updating its trainable variables. Args: terminal (List[bool]): List of is-terminal signals. reward (List[float]): List of reward signals. index: (int) parallel episode you want to observe Returns: The value of the model-internal episode counter. """ fetches = self.episode_output feed_dict = self.get_feed_dict(terminal=terminal, reward=reward, index=index) episode = self.monitored_session.run(fetches=fetches, feed_dict=feed_dict) return episode
[ "def", "observe", "(", "self", ",", "terminal", ",", "reward", ",", "index", "=", "0", ")", ":", "fetches", "=", "self", ".", "episode_output", "feed_dict", "=", "self", ".", "get_feed_dict", "(", "terminal", "=", "terminal", ",", "reward", "=", "reward"...
Adds an observation (reward and is-terminal) to the model without updating its trainable variables. Args: terminal (List[bool]): List of is-terminal signals. reward (List[float]): List of reward signals. index: (int) parallel episode you want to observe Returns: The value of the model-internal episode counter.
[ "Adds", "an", "observation", "(", "reward", "and", "is", "-", "terminal", ")", "to", "the", "model", "without", "updating", "its", "trainable", "variables", "." ]
python
valid
redhat-cip/python-dciclient
dciclient/v1/api/file.py
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/api/file.py#L26-L65
def create(context, name, content=None, file_path=None, mime='text/plain', jobstate_id=None, md5=None, job_id=None, test_id=None): """Method to create a file on the Control-Server This method allows one to upload a file to the Control-Server. The file to be uploaded can be specified in two different ways either by specifying its content directly or or by specifying the file_path where the file is located. content can be in the form of: string, bytes or a file-descriptor. """ if content and file_path: raise Exception('content and file_path are mutually exclusive') elif not content and not file_path: raise Exception( 'At least one of content or file_path must be specified' ) headers = {'DCI-NAME': name, 'DCI-MIME': mime, 'DCI-JOBSTATE-ID': jobstate_id, 'DCI-MD5': md5, 'DCI-JOB-ID': job_id, 'DCI-TEST-ID': test_id} headers = utils.sanitize_kwargs(**headers) uri = '%s/%s' % (context.dci_cs_api, RESOURCE) if content: if not hasattr(content, 'read'): if not isinstance(content, bytes): content = content.encode('utf-8') content = io.BytesIO(content) return context.session.post(uri, headers=headers, data=content) else: if not os.path.exists(file_path): raise FileErrorException() with open(file_path, 'rb') as f: return context.session.post(uri, headers=headers, data=f)
[ "def", "create", "(", "context", ",", "name", ",", "content", "=", "None", ",", "file_path", "=", "None", ",", "mime", "=", "'text/plain'", ",", "jobstate_id", "=", "None", ",", "md5", "=", "None", ",", "job_id", "=", "None", ",", "test_id", "=", "No...
Method to create a file on the Control-Server This method allows one to upload a file to the Control-Server. The file to be uploaded can be specified in two different ways either by specifying its content directly or or by specifying the file_path where the file is located. content can be in the form of: string, bytes or a file-descriptor.
[ "Method", "to", "create", "a", "file", "on", "the", "Control", "-", "Server" ]
python
train
pypa/pipenv
pipenv/patched/notpip/_vendor/html5lib/_inputstream.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_vendor/html5lib/_inputstream.py#L438-L455
def openStream(self, source): """Produces a file object from source. source can be either a file object, local filename or a string. """ # Already a file object if hasattr(source, 'read'): stream = source else: stream = BytesIO(source) try: stream.seek(stream.tell()) except: # pylint:disable=bare-except stream = BufferedStream(stream) return stream
[ "def", "openStream", "(", "self", ",", "source", ")", ":", "# Already a file object", "if", "hasattr", "(", "source", ",", "'read'", ")", ":", "stream", "=", "source", "else", ":", "stream", "=", "BytesIO", "(", "source", ")", "try", ":", "stream", ".", ...
Produces a file object from source. source can be either a file object, local filename or a string.
[ "Produces", "a", "file", "object", "from", "source", "." ]
python
train
django-treebeard/django-treebeard
treebeard/templatetags/admin_tree.py
https://github.com/django-treebeard/django-treebeard/blob/8042ee939cb45394909237da447f8925e3cc6aa3/treebeard/templatetags/admin_tree.py#L219-L231
def check_empty_dict(GET_dict): """ Returns True if the GET querstring contains on values, but it can contain empty keys. This is better than doing not bool(request.GET) as an empty key will return True """ empty = True for k, v in GET_dict.items(): # Don't disable on p(age) or 'all' GET param if v and k != 'p' and k != 'all': empty = False return empty
[ "def", "check_empty_dict", "(", "GET_dict", ")", ":", "empty", "=", "True", "for", "k", ",", "v", "in", "GET_dict", ".", "items", "(", ")", ":", "# Don't disable on p(age) or 'all' GET param", "if", "v", "and", "k", "!=", "'p'", "and", "k", "!=", "'all'", ...
Returns True if the GET querstring contains on values, but it can contain empty keys. This is better than doing not bool(request.GET) as an empty key will return True
[ "Returns", "True", "if", "the", "GET", "querstring", "contains", "on", "values", "but", "it", "can", "contain", "empty", "keys", ".", "This", "is", "better", "than", "doing", "not", "bool", "(", "request", ".", "GET", ")", "as", "an", "empty", "key", "...
python
train
google/dotty
efilter/parsers/common/ast_transforms.py
https://github.com/google/dotty/blob/b145131499be0c4b755fc2e2ac19be11a50bce6a/efilter/parsers/common/ast_transforms.py#L43-L46
def ComplementEquivalence(*args, **kwargs): """Change x != y to not(x == y).""" return ast.Complement( ast.Equivalence(*args, **kwargs), **kwargs)
[ "def", "ComplementEquivalence", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "ast", ".", "Complement", "(", "ast", ".", "Equivalence", "(", "*", "args", ",", "*", "*", "kwargs", ")", ",", "*", "*", "kwargs", ")" ]
Change x != y to not(x == y).
[ "Change", "x", "!", "=", "y", "to", "not", "(", "x", "==", "y", ")", "." ]
python
train
rameshg87/pyremotevbox
pyremotevbox/ZSI/TC.py
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/TC.py#L203-L229
def checktype(self, elt, ps): '''See if the type of the "elt" element is what we're looking for. Return the element's type. Parameters: elt -- the DOM element being parsed ps -- the ParsedSoap object. ''' typeName = _find_type(elt) if typeName is None or typeName == "": return (None,None) # Parse the QNAME. prefix,typeName = SplitQName(typeName) uri = ps.GetElementNSdict(elt).get(prefix) if uri is None: raise EvaluateException('Malformed type attribute (bad NS)', ps.Backtrace(elt)) #typeName = list[1] parselist,errorlist = self.get_parse_and_errorlist() if not parselist or \ (uri,typeName) in parselist or \ (_is_xsd_or_soap_ns(uri) and (None,typeName) in parselist): return (uri,typeName) raise EvaluateException( 'Type mismatch (%s namespace) (got %s wanted %s)' % \ (uri, typeName, errorlist), ps.Backtrace(elt))
[ "def", "checktype", "(", "self", ",", "elt", ",", "ps", ")", ":", "typeName", "=", "_find_type", "(", "elt", ")", "if", "typeName", "is", "None", "or", "typeName", "==", "\"\"", ":", "return", "(", "None", ",", "None", ")", "# Parse the QNAME.", "prefi...
See if the type of the "elt" element is what we're looking for. Return the element's type. Parameters: elt -- the DOM element being parsed ps -- the ParsedSoap object.
[ "See", "if", "the", "type", "of", "the", "elt", "element", "is", "what", "we", "re", "looking", "for", ".", "Return", "the", "element", "s", "type", ".", "Parameters", ":", "elt", "--", "the", "DOM", "element", "being", "parsed", "ps", "--", "the", "...
python
train
saltstack/salt
salt/modules/portage_config.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/portage_config.py#L326-L437
def append_to_package_conf(conf, atom='', flags=None, string='', overwrite=False): ''' Append a string or a list of flags for a given package or DEPEND atom to a given configuration file. CLI Example: .. code-block:: bash salt '*' portage_config.append_to_package_conf use string="app-admin/salt ldap -libvirt" salt '*' portage_config.append_to_package_conf use atom="> = app-admin/salt-0.14.1" flags="['ldap', '-libvirt']" ''' if flags is None: flags = [] if conf in SUPPORTED_CONFS: if not string: if '/' not in atom: atom = _p_to_cp(atom) if not atom: return string = '{0} {1}'.format(atom, ' '.join(flags)) new_flags = list(flags) else: atom = string.strip().split()[0] new_flags = [flag for flag in string.strip().split(' ') if flag][1:] if '/' not in atom: atom = _p_to_cp(atom) string = '{0} {1}'.format(atom, ' '.join(new_flags)) if not atom: return to_delete_if_empty = [] if conf == 'accept_keywords': if '-~ARCH' in new_flags: new_flags.remove('-~ARCH') to_delete_if_empty.append(atom) if '~ARCH' in new_flags: new_flags.remove('~ARCH') append_to_package_conf(conf, string=atom, overwrite=overwrite) if not new_flags: return # Next sort is just aesthetic, can be commented for a small performance # boost new_flags.sort(key=lambda x: x.lstrip('-')) complete_file_path = _get_config_file(conf, atom) pdir = os.path.dirname(complete_file_path) if not os.path.exists(pdir): os.makedirs(pdir, 0o755) try: shutil.copy(complete_file_path, complete_file_path + '.bak') except IOError: pass try: file_handler = salt.utils.files.fopen(complete_file_path, 'r+') # pylint: disable=resource-leakage except IOError: file_handler = salt.utils.files.fopen(complete_file_path, 'w+') # pylint: disable=resource-leakage new_contents = '' added = False try: for l in file_handler: l_strip = l.strip() if l_strip == '': new_contents += '\n' elif l_strip[0] == '#': new_contents += l elif l_strip.split()[0] == atom: if l_strip in to_delete_if_empty: continue if overwrite: new_contents += string.strip() + '\n' added = True else: old_flags = [flag for flag in l_strip.split(' ') if flag][1:] if conf == 'accept_keywords': if not old_flags: new_contents += l if not new_flags: added = True continue elif not new_flags: continue merged_flags = _merge_flags(new_flags, old_flags, conf) if merged_flags: new_contents += '{0} {1}\n'.format( atom, ' '.join(merged_flags)) else: new_contents += '{0}\n'.format(atom) added = True else: new_contents += l if not added: new_contents += string.strip() + '\n' except Exception as exc: log.error('Failed to write to %s: %s', complete_file_path, exc) else: file_handler.seek(0) file_handler.truncate(len(new_contents)) file_handler.write(new_contents) finally: file_handler.close() try: os.remove(complete_file_path + '.bak') except OSError: pass
[ "def", "append_to_package_conf", "(", "conf", ",", "atom", "=", "''", ",", "flags", "=", "None", ",", "string", "=", "''", ",", "overwrite", "=", "False", ")", ":", "if", "flags", "is", "None", ":", "flags", "=", "[", "]", "if", "conf", "in", "SUPP...
Append a string or a list of flags for a given package or DEPEND atom to a given configuration file. CLI Example: .. code-block:: bash salt '*' portage_config.append_to_package_conf use string="app-admin/salt ldap -libvirt" salt '*' portage_config.append_to_package_conf use atom="> = app-admin/salt-0.14.1" flags="['ldap', '-libvirt']"
[ "Append", "a", "string", "or", "a", "list", "of", "flags", "for", "a", "given", "package", "or", "DEPEND", "atom", "to", "a", "given", "configuration", "file", "." ]
python
train
numenta/nupic
src/nupic/swarming/hypersearch/extended_logger.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/swarming/hypersearch/extended_logger.py#L111-L121
def log(self, level, msg, *args, **kwargs): """ Log 'msg % args' with the integer severity 'level'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.log(level, "We have a %s", "mysterious problem", exc_info=1) """ self._baseLogger.log(self, level, self.getExtendedMsg(msg), *args, **kwargs)
[ "def", "log", "(", "self", ",", "level", ",", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_baseLogger", ".", "log", "(", "self", ",", "level", ",", "self", ".", "getExtendedMsg", "(", "msg", ")", ",", "*", "args", ...
Log 'msg % args' with the integer severity 'level'. To pass exception information, use the keyword argument exc_info with a true value, e.g. logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
[ "Log", "msg", "%", "args", "with", "the", "integer", "severity", "level", "." ]
python
valid
TheHive-Project/Cortex-Analyzers
analyzers/MaxMind/ipaddr.py
https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/MaxMind/ipaddr.py#L41-L78
def IPAddress(address, version=None): """Take an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP address. Either IPv4 or IPv6 addresses may be supplied; integers less than 2**32 will be considered to be IPv4 by default. version: An Integer, 4 or 6. If set, don't try to automatically determine what the IP address type is. important for things like IPAddress(1), which could be IPv4, '0.0.0.1', or IPv6, '::1'. Returns: An IPv4Address or IPv6Address object. Raises: ValueError: if the string passed isn't either a v4 or a v6 address. """ if version: if version == 4: return IPv4Address(address) elif version == 6: return IPv6Address(address) try: return IPv4Address(address) except (AddressValueError, NetmaskValueError): pass try: return IPv6Address(address) except (AddressValueError, NetmaskValueError): pass raise ValueError('%r does not appear to be an IPv4 or IPv6 address' % address)
[ "def", "IPAddress", "(", "address", ",", "version", "=", "None", ")", ":", "if", "version", ":", "if", "version", "==", "4", ":", "return", "IPv4Address", "(", "address", ")", "elif", "version", "==", "6", ":", "return", "IPv6Address", "(", "address", ...
Take an IP string/int and return an object of the correct type. Args: address: A string or integer, the IP address. Either IPv4 or IPv6 addresses may be supplied; integers less than 2**32 will be considered to be IPv4 by default. version: An Integer, 4 or 6. If set, don't try to automatically determine what the IP address type is. important for things like IPAddress(1), which could be IPv4, '0.0.0.1', or IPv6, '::1'. Returns: An IPv4Address or IPv6Address object. Raises: ValueError: if the string passed isn't either a v4 or a v6 address.
[ "Take", "an", "IP", "string", "/", "int", "and", "return", "an", "object", "of", "the", "correct", "type", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/build/build_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/build/build_client.py#L1885-L1909
def get_build_work_items_refs_from_commits(self, commit_ids, project, build_id, top=None): """GetBuildWorkItemsRefsFromCommits. Gets the work items associated with a build, filtered to specific commits. :param [str] commit_ids: A comma-delimited list of commit IDs. :param str project: Project ID or project name :param int build_id: The ID of the build. :param int top: The maximum number of work items to return, or the number of commits to consider if no commit IDs are specified. :rtype: [ResourceRef] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') query_parameters = {} if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') content = self._serialize.body(commit_ids, '[str]') response = self._send(http_method='POST', location_id='5a21f5d2-5642-47e4-a0bd-1356e6731bee', version='5.0', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('[ResourceRef]', self._unwrap_collection(response))
[ "def", "get_build_work_items_refs_from_commits", "(", "self", ",", "commit_ids", ",", "project", ",", "build_id", ",", "top", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "...
GetBuildWorkItemsRefsFromCommits. Gets the work items associated with a build, filtered to specific commits. :param [str] commit_ids: A comma-delimited list of commit IDs. :param str project: Project ID or project name :param int build_id: The ID of the build. :param int top: The maximum number of work items to return, or the number of commits to consider if no commit IDs are specified. :rtype: [ResourceRef]
[ "GetBuildWorkItemsRefsFromCommits", ".", "Gets", "the", "work", "items", "associated", "with", "a", "build", "filtered", "to", "specific", "commits", ".", ":", "param", "[", "str", "]", "commit_ids", ":", "A", "comma", "-", "delimited", "list", "of", "commit",...
python
train
tensorflow/cleverhans
cleverhans/utils.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils.py#L247-L262
def create_logger(name): """ Create a logger object with the given name. If this is the first time that we call this method, then initialize the formatter. """ base = logging.getLogger("cleverhans") if len(base.handlers) == 0: ch = logging.StreamHandler() formatter = logging.Formatter('[%(levelname)s %(asctime)s %(name)s] ' + '%(message)s') ch.setFormatter(formatter) base.addHandler(ch) return base
[ "def", "create_logger", "(", "name", ")", ":", "base", "=", "logging", ".", "getLogger", "(", "\"cleverhans\"", ")", "if", "len", "(", "base", ".", "handlers", ")", "==", "0", ":", "ch", "=", "logging", ".", "StreamHandler", "(", ")", "formatter", "=",...
Create a logger object with the given name. If this is the first time that we call this method, then initialize the formatter.
[ "Create", "a", "logger", "object", "with", "the", "given", "name", "." ]
python
train
MillionIntegrals/vel
vel/rl/models/q_stochastic_policy_model.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/models/q_stochastic_policy_model.py#L74-L87
def step(self, observation, argmax_sampling=False): """ Select actions based on model's output """ policy_params, q = self(observation) actions = self.action_head.sample(policy_params, argmax_sampling=argmax_sampling) # log probability - we can do that, because we support only discrete action spaces logprobs = self.action_head.logprob(actions, policy_params) return { 'actions': actions, 'q': q, 'logprobs': policy_params, 'action:logprobs': logprobs }
[ "def", "step", "(", "self", ",", "observation", ",", "argmax_sampling", "=", "False", ")", ":", "policy_params", ",", "q", "=", "self", "(", "observation", ")", "actions", "=", "self", ".", "action_head", ".", "sample", "(", "policy_params", ",", "argmax_s...
Select actions based on model's output
[ "Select", "actions", "based", "on", "model", "s", "output" ]
python
train