repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
nerdvegas/rez
src/rez/build_process_.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/build_process_.py#L218-L253
def create_build_context(self, variant, build_type, build_path): """Create a context to build the variant within.""" request = variant.get_requires(build_requires=True, private_build_requires=True) req_strs = map(str, request) quoted_req_strs = map(quote, req_strs) self._print("Resolving build environment: %s", ' '.join(quoted_req_strs)) if build_type == BuildType.local: packages_path = self.package.config.packages_path else: packages_path = self.package.config.nonlocal_packages_path if self.package.config.is_overridden("package_filter"): from rez.package_filter import PackageFilterList data = self.package.config.package_filter package_filter = PackageFilterList.from_pod(data) else: package_filter = None context = ResolvedContext(request, package_paths=packages_path, package_filter=package_filter, building=True) if self.verbose: context.print_info() # save context before possible fail, so user can debug rxt_filepath = os.path.join(build_path, "build.rxt") context.save(rxt_filepath) if context.status != ResolverStatus.solved: raise BuildContextResolveError(context) return context, rxt_filepath
[ "def", "create_build_context", "(", "self", ",", "variant", ",", "build_type", ",", "build_path", ")", ":", "request", "=", "variant", ".", "get_requires", "(", "build_requires", "=", "True", ",", "private_build_requires", "=", "True", ")", "req_strs", "=", "m...
Create a context to build the variant within.
[ "Create", "a", "context", "to", "build", "the", "variant", "within", "." ]
python
train
nicolargo/glances
glances/plugins/glances_alert.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/plugins/glances_alert.py#L162-L167
def approx_equal(self, a, b, tolerance=0.0): """Compare a with b using the tolerance (if numerical).""" if str(int(a)).isdigit() and str(int(b)).isdigit(): return abs(a - b) <= max(abs(a), abs(b)) * tolerance else: return a == b
[ "def", "approx_equal", "(", "self", ",", "a", ",", "b", ",", "tolerance", "=", "0.0", ")", ":", "if", "str", "(", "int", "(", "a", ")", ")", ".", "isdigit", "(", ")", "and", "str", "(", "int", "(", "b", ")", ")", ".", "isdigit", "(", ")", "...
Compare a with b using the tolerance (if numerical).
[ "Compare", "a", "with", "b", "using", "the", "tolerance", "(", "if", "numerical", ")", "." ]
python
train
Azure/msrest-for-python
msrest/serialization.py
https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/serialization.py#L582-L605
def url(self, name, data, data_type, **kwargs): """Serialize data intended for a URL path. :param data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str :raises: TypeError if serialization fails. :raises: ValueError if data is None """ if self.client_side_validation: data = self.validate(data, name, required=True, **kwargs) try: output = self.serialize_data(data, data_type, **kwargs) if data_type == 'bool': output = json.dumps(output) if kwargs.get('skip_quote') is True: output = str(output) else: output = quote(str(output), safe='') except SerializationError: raise TypeError("{} must be type {}.".format(name, data_type)) else: return output
[ "def", "url", "(", "self", ",", "name", ",", "data", ",", "data_type", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "client_side_validation", ":", "data", "=", "self", ".", "validate", "(", "data", ",", "name", ",", "required", "=", "True", ...
Serialize data intended for a URL path. :param data: The data to be serialized. :param str data_type: The type to be serialized from. :rtype: str :raises: TypeError if serialization fails. :raises: ValueError if data is None
[ "Serialize", "data", "intended", "for", "a", "URL", "path", "." ]
python
train
genialis/resolwe
resolwe/flow/models/data.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/models/data.py#L74-L87
def duplicate(self, contributor=None): """Duplicate (make a copy) ``Data`` objects. :param contributor: Duplication user """ bundle = [ {'original': data, 'copy': data.duplicate(contributor=contributor)} for data in self ] bundle = rewire_inputs(bundle) duplicated = [item['copy'] for item in bundle] return duplicated
[ "def", "duplicate", "(", "self", ",", "contributor", "=", "None", ")", ":", "bundle", "=", "[", "{", "'original'", ":", "data", ",", "'copy'", ":", "data", ".", "duplicate", "(", "contributor", "=", "contributor", ")", "}", "for", "data", "in", "self",...
Duplicate (make a copy) ``Data`` objects. :param contributor: Duplication user
[ "Duplicate", "(", "make", "a", "copy", ")", "Data", "objects", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/oinspect.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/oinspect.py#L327-L330
def __head(self,h): """Return a header string with proper colors.""" return '%s%s%s' % (self.color_table.active_colors.header,h, self.color_table.active_colors.normal)
[ "def", "__head", "(", "self", ",", "h", ")", ":", "return", "'%s%s%s'", "%", "(", "self", ".", "color_table", ".", "active_colors", ".", "header", ",", "h", ",", "self", ".", "color_table", ".", "active_colors", ".", "normal", ")" ]
Return a header string with proper colors.
[ "Return", "a", "header", "string", "with", "proper", "colors", "." ]
python
test
googlefonts/ufo2ft
Lib/ufo2ft/fontInfoData.py
https://github.com/googlefonts/ufo2ft/blob/915b986558e87bee288765d9218cc1cd4ebf7f4c/Lib/ufo2ft/fontInfoData.py#L79-L88
def openTypeHeadCreatedFallback(info): """ Fallback to the environment variable SOURCE_DATE_EPOCH if set, otherwise now. """ if "SOURCE_DATE_EPOCH" in os.environ: t = datetime.utcfromtimestamp(int(os.environ["SOURCE_DATE_EPOCH"])) return t.strftime(_date_format) else: return dateStringForNow()
[ "def", "openTypeHeadCreatedFallback", "(", "info", ")", ":", "if", "\"SOURCE_DATE_EPOCH\"", "in", "os", ".", "environ", ":", "t", "=", "datetime", ".", "utcfromtimestamp", "(", "int", "(", "os", ".", "environ", "[", "\"SOURCE_DATE_EPOCH\"", "]", ")", ")", "r...
Fallback to the environment variable SOURCE_DATE_EPOCH if set, otherwise now.
[ "Fallback", "to", "the", "environment", "variable", "SOURCE_DATE_EPOCH", "if", "set", "otherwise", "now", "." ]
python
train
bukun/TorCMS
torcms/handlers/log_handler.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/log_handler.py#L151-L183
def pageview(self, cur_p=''): ''' View the list of the Log. ''' if cur_p == '': current_page_number = 1 else: current_page_number = int(cur_p) current_page_number = 1 if current_page_number < 1 else current_page_number pager_num = int(MLog.total_number() / CMS_CFG['list_num']) kwd = { 'pager': '', 'title': '', 'current_page': current_page_number, } arr_num = [] postinfo = MLog.query_all_current_url() for i in postinfo: postnum = MLog.count_of_current_url(i.current_url) arr_num.append(postnum) self.render('misc/log/pageview.html', kwd=kwd, infos=MLog.query_all_pageview(current_page_num=current_page_number), postinfo=postinfo, arr_num=arr_num, format_date=tools.format_date, userinfo=self.userinfo)
[ "def", "pageview", "(", "self", ",", "cur_p", "=", "''", ")", ":", "if", "cur_p", "==", "''", ":", "current_page_number", "=", "1", "else", ":", "current_page_number", "=", "int", "(", "cur_p", ")", "current_page_number", "=", "1", "if", "current_page_numb...
View the list of the Log.
[ "View", "the", "list", "of", "the", "Log", "." ]
python
train
caseyjlaw/rtpipe
rtpipe/parsecal.py
https://github.com/caseyjlaw/rtpipe/blob/ac33e4332cf215091a63afbb3137850876d73ec0/rtpipe/parsecal.py#L586-L599
def calcdelay(self, ant1, ant2, skyfreq, pol): """ Calculates the relative delay (d1-d2) for a pair of antennas in ns. """ select = self.select[n.where( (self.skyfreq[self.select] == skyfreq) & (self.polarization[self.select] == pol) )[0]] ind1 = n.where(ant1 == self.antnum[select]) ind2 = n.where(ant2 == self.antnum[select]) d1 = self.delay[select][ind1] d2 = self.delay[select][ind2] if len(d1-d2) > 0: return d1-d2 else: return n.array([0])
[ "def", "calcdelay", "(", "self", ",", "ant1", ",", "ant2", ",", "skyfreq", ",", "pol", ")", ":", "select", "=", "self", ".", "select", "[", "n", ".", "where", "(", "(", "self", ".", "skyfreq", "[", "self", ".", "select", "]", "==", "skyfreq", ")"...
Calculates the relative delay (d1-d2) for a pair of antennas in ns.
[ "Calculates", "the", "relative", "delay", "(", "d1", "-", "d2", ")", "for", "a", "pair", "of", "antennas", "in", "ns", "." ]
python
train
elifesciences/elife-tools
elifetools/parseJATS.py
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/parseJATS.py#L1740-L1754
def author_notes(soup): """ Find the fn tags included in author-notes """ author_notes = [] author_notes_section = raw_parser.author_notes(soup) if author_notes_section: fn_nodes = raw_parser.fn(author_notes_section) for tag in fn_nodes: if 'fn-type' in tag.attrs: if(tag['fn-type'] != 'present-address'): author_notes.append(node_text(tag)) return author_notes
[ "def", "author_notes", "(", "soup", ")", ":", "author_notes", "=", "[", "]", "author_notes_section", "=", "raw_parser", ".", "author_notes", "(", "soup", ")", "if", "author_notes_section", ":", "fn_nodes", "=", "raw_parser", ".", "fn", "(", "author_notes_section...
Find the fn tags included in author-notes
[ "Find", "the", "fn", "tags", "included", "in", "author", "-", "notes" ]
python
train
apache/incubator-heron
heron/tools/tracker/src/python/javaobj.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/tracker/src/python/javaobj.py#L45-L52
def load(file_object): """ Deserializes Java primitive data and objects serialized by ObjectOutputStream from a file-like object. """ marshaller = JavaObjectUnmarshaller(file_object) marshaller.add_transformer(DefaultObjectTransformer()) return marshaller.readObject()
[ "def", "load", "(", "file_object", ")", ":", "marshaller", "=", "JavaObjectUnmarshaller", "(", "file_object", ")", "marshaller", ".", "add_transformer", "(", "DefaultObjectTransformer", "(", ")", ")", "return", "marshaller", ".", "readObject", "(", ")" ]
Deserializes Java primitive data and objects serialized by ObjectOutputStream from a file-like object.
[ "Deserializes", "Java", "primitive", "data", "and", "objects", "serialized", "by", "ObjectOutputStream", "from", "a", "file", "-", "like", "object", "." ]
python
valid
pypa/pipenv
pipenv/vendor/distlib/_backport/shutil.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/shutil.py#L727-L761
def unpack_archive(filename, extract_dir=None, format=None): """Unpack an archive. `filename` is the name of the archive. `extract_dir` is the name of the target directory, where the archive is unpacked. If not provided, the current working directory is used. `format` is the archive format: one of "zip", "tar", or "gztar". Or any other registered format. If not provided, unpack_archive will use the filename extension and see if an unpacker was registered for that extension. In case none is found, a ValueError is raised. """ if extract_dir is None: extract_dir = os.getcwd() if format is not None: try: format_info = _UNPACK_FORMATS[format] except KeyError: raise ValueError("Unknown unpack format '{0}'".format(format)) func = format_info[1] func(filename, extract_dir, **dict(format_info[2])) else: # we need to look at the registered unpackers supported extensions format = _find_unpack_format(filename) if format is None: raise ReadError("Unknown archive format '{0}'".format(filename)) func = _UNPACK_FORMATS[format][1] kwargs = dict(_UNPACK_FORMATS[format][2]) func(filename, extract_dir, **kwargs)
[ "def", "unpack_archive", "(", "filename", ",", "extract_dir", "=", "None", ",", "format", "=", "None", ")", ":", "if", "extract_dir", "is", "None", ":", "extract_dir", "=", "os", ".", "getcwd", "(", ")", "if", "format", "is", "not", "None", ":", "try",...
Unpack an archive. `filename` is the name of the archive. `extract_dir` is the name of the target directory, where the archive is unpacked. If not provided, the current working directory is used. `format` is the archive format: one of "zip", "tar", or "gztar". Or any other registered format. If not provided, unpack_archive will use the filename extension and see if an unpacker was registered for that extension. In case none is found, a ValueError is raised.
[ "Unpack", "an", "archive", "." ]
python
train
keras-rl/keras-rl
rl/memory.py
https://github.com/keras-rl/keras-rl/blob/e6efb0d8297ec38d704a3110b5d6ed74d09a05e3/rl/memory.py#L120-L144
def get_recent_state(self, current_observation): """Return list of last observations # Argument current_observation (object): Last observation # Returns A list of the last observations """ # This code is slightly complicated by the fact that subsequent observations might be # from different episodes. We ensure that an experience never spans multiple episodes. # This is probably not that important in practice but it seems cleaner. state = [current_observation] idx = len(self.recent_observations) - 1 for offset in range(0, self.window_length - 1): current_idx = idx - offset current_terminal = self.recent_terminals[current_idx - 1] if current_idx - 1 >= 0 else False if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal): # The previously handled observation was terminal, don't add the current one. # Otherwise we would leak into a different episode. break state.insert(0, self.recent_observations[current_idx]) while len(state) < self.window_length: state.insert(0, zeroed_observation(state[0])) return state
[ "def", "get_recent_state", "(", "self", ",", "current_observation", ")", ":", "# This code is slightly complicated by the fact that subsequent observations might be", "# from different episodes. We ensure that an experience never spans multiple episodes.", "# This is probably not that important ...
Return list of last observations # Argument current_observation (object): Last observation # Returns A list of the last observations
[ "Return", "list", "of", "last", "observations" ]
python
train
mitsei/dlkit
dlkit/json_/repository/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/objects.py#L1671-L1689
def clear_data(self): """Removes the content data. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ if (self.get_data_metadata().is_read_only() or self.get_data_metadata().is_required()): raise errors.NoAccess() if self._my_map['data'] == self._data_default: return dbase = JSONClientValidated('repository', runtime=self._runtime).raw() filesys = gridfs.GridFS(dbase) filesys.delete(self._my_map['data']) self._my_map['data'] = self._data_default del self._my_map['base64']
[ "def", "clear_data", "(", "self", ")", ":", "if", "(", "self", ".", "get_data_metadata", "(", ")", ".", "is_read_only", "(", ")", "or", "self", ".", "get_data_metadata", "(", ")", ".", "is_required", "(", ")", ")", ":", "raise", "errors", ".", "NoAcces...
Removes the content data. raise: NoAccess - ``Metadata.isRequired()`` is ``true`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
[ "Removes", "the", "content", "data", "." ]
python
train
BetterWorks/django-anonymizer
anonymizer/replacers.py
https://github.com/BetterWorks/django-anonymizer/blob/2d25bb6e8b5e4230c58031c4b6d10cc536669b3e/anonymizer/replacers.py#L11-L15
def varchar(anon, obj, field, val): """ Returns random data for a varchar field. """ return anon.faker.varchar(field=field)
[ "def", "varchar", "(", "anon", ",", "obj", ",", "field", ",", "val", ")", ":", "return", "anon", ".", "faker", ".", "varchar", "(", "field", "=", "field", ")" ]
Returns random data for a varchar field.
[ "Returns", "random", "data", "for", "a", "varchar", "field", "." ]
python
train
calmjs/calmjs.parse
src/calmjs/parse/parsers/es5.py
https://github.com/calmjs/calmjs.parse/blob/369f0ee346c5a84c4d5c35a7733a0e63b02eac59/src/calmjs/parse/parsers/es5.py#L1316-L1323
def p_switch_statement(self, p): """switch_statement : SWITCH LPAREN expr RPAREN case_block""" # this uses a completely different type that corrects a # subtly wrong interpretation of this construct. # see: https://github.com/rspivak/slimit/issues/94 p[0] = self.asttypes.Switch(expr=p[3], case_block=p[5]) p[0].setpos(p) return
[ "def", "p_switch_statement", "(", "self", ",", "p", ")", ":", "# this uses a completely different type that corrects a", "# subtly wrong interpretation of this construct.", "# see: https://github.com/rspivak/slimit/issues/94", "p", "[", "0", "]", "=", "self", ".", "asttypes", "...
switch_statement : SWITCH LPAREN expr RPAREN case_block
[ "switch_statement", ":", "SWITCH", "LPAREN", "expr", "RPAREN", "case_block" ]
python
train
astropy/photutils
photutils/segmentation/properties.py
https://github.com/astropy/photutils/blob/cc9bb4534ab76bac98cb5f374a348a2573d10401/photutils/segmentation/properties.py#L957-L971
def perimeter(self): """ The total perimeter of the source segment, approximated lines through the centers of the border pixels using a 4-connectivity. If any masked pixels make holes within the source segment, then the perimeter around the inner hole (e.g. an annulus) will also contribute to the total perimeter. """ if self._is_completely_masked: return np.nan * u.pix # unit for table else: from skimage.measure import perimeter return perimeter(~self._total_mask, neighbourhood=4) * u.pix
[ "def", "perimeter", "(", "self", ")", ":", "if", "self", ".", "_is_completely_masked", ":", "return", "np", ".", "nan", "*", "u", ".", "pix", "# unit for table", "else", ":", "from", "skimage", ".", "measure", "import", "perimeter", "return", "perimeter", ...
The total perimeter of the source segment, approximated lines through the centers of the border pixels using a 4-connectivity. If any masked pixels make holes within the source segment, then the perimeter around the inner hole (e.g. an annulus) will also contribute to the total perimeter.
[ "The", "total", "perimeter", "of", "the", "source", "segment", "approximated", "lines", "through", "the", "centers", "of", "the", "border", "pixels", "using", "a", "4", "-", "connectivity", "." ]
python
train
flatangle/flatlib
flatlib/dignities/accidental.py
https://github.com/flatangle/flatlib/blob/44e05b2991a296c678adbc17a1d51b6a21bc867c/flatlib/dignities/accidental.py#L306-L322
def __sepApp(self, IDs, aspList): """ Returns true if the object last and next movement are separations and applications to objects in list IDs. It only considers aspects in aspList. This function is static since it does not test if the next application will be indeed perfected. It considers only a snapshot of the chart and not its astronomical movement. """ sep, app = self.dyn.immediateAspects(self.obj.id, aspList) if sep is None or app is None: return False else: sepCondition = sep['id'] in IDs appCondition = app['id'] in IDs return sepCondition == appCondition == True
[ "def", "__sepApp", "(", "self", ",", "IDs", ",", "aspList", ")", ":", "sep", ",", "app", "=", "self", ".", "dyn", ".", "immediateAspects", "(", "self", ".", "obj", ".", "id", ",", "aspList", ")", "if", "sep", "is", "None", "or", "app", "is", "Non...
Returns true if the object last and next movement are separations and applications to objects in list IDs. It only considers aspects in aspList. This function is static since it does not test if the next application will be indeed perfected. It considers only a snapshot of the chart and not its astronomical movement.
[ "Returns", "true", "if", "the", "object", "last", "and", "next", "movement", "are", "separations", "and", "applications", "to", "objects", "in", "list", "IDs", ".", "It", "only", "considers", "aspects", "in", "aspList", ".", "This", "function", "is", "static...
python
train
marrow/WebCore
example/annotation.py
https://github.com/marrow/WebCore/blob/38d50f8022ca62976a1e5ff23f7714bd647b6532/example/annotation.py#L15-L42
def mul(self, a: int = None, b: int = None) -> 'json': """Multiply two values together and return the result via JSON. Python 3 function annotations are used to ensure that the arguments are integers. This requires the functionality of `web.ext.annotation:AnnotationExtension`. There are several ways to execute this method: * POST http://localhost:8080/mul * GET http://localhost:8080/mul?a=27&b=42 * GET http://localhost:8080/mul/27/42 The latter relies on the fact we can't descend past a callable method so the remaining path elements are used as positional arguments, whereas the others rely on keyword argument assignment from a form-encoded request body or query string arguments. (Security note: any data in the request body takes presidence over query string arguments!) You can easily test these on the command line using cURL: curl http://localhost:8080/mul/27/42 # HTTP GET curl -d a=27 -d b=42 http://localhost:8080/mul # HTTP POST """ if not a or not b: return dict(message="Pass arguments a and b to multiply them together!") return dict(answer=a * b)
[ "def", "mul", "(", "self", ",", "a", ":", "int", "=", "None", ",", "b", ":", "int", "=", "None", ")", "->", "'json'", ":", "if", "not", "a", "or", "not", "b", ":", "return", "dict", "(", "message", "=", "\"Pass arguments a and b to multiply them togeth...
Multiply two values together and return the result via JSON. Python 3 function annotations are used to ensure that the arguments are integers. This requires the functionality of `web.ext.annotation:AnnotationExtension`. There are several ways to execute this method: * POST http://localhost:8080/mul * GET http://localhost:8080/mul?a=27&b=42 * GET http://localhost:8080/mul/27/42 The latter relies on the fact we can't descend past a callable method so the remaining path elements are used as positional arguments, whereas the others rely on keyword argument assignment from a form-encoded request body or query string arguments. (Security note: any data in the request body takes presidence over query string arguments!) You can easily test these on the command line using cURL: curl http://localhost:8080/mul/27/42 # HTTP GET curl -d a=27 -d b=42 http://localhost:8080/mul # HTTP POST
[ "Multiply", "two", "values", "together", "and", "return", "the", "result", "via", "JSON", ".", "Python", "3", "function", "annotations", "are", "used", "to", "ensure", "that", "the", "arguments", "are", "integers", ".", "This", "requires", "the", "functionalit...
python
train
sendgrid/sendgrid-python
sendgrid/helpers/mail/mail.py
https://github.com/sendgrid/sendgrid-python/blob/266c2abde7a35dfcce263e06bedc6a0bbdebeac9/sendgrid/helpers/mail/mail.py#L833-L842
def category(self, categories): """Add categories assigned to this message :rtype: list(Category) """ if isinstance(categories, list): for c in categories: self.add_category(c) else: self.add_category(categories)
[ "def", "category", "(", "self", ",", "categories", ")", ":", "if", "isinstance", "(", "categories", ",", "list", ")", ":", "for", "c", "in", "categories", ":", "self", ".", "add_category", "(", "c", ")", "else", ":", "self", ".", "add_category", "(", ...
Add categories assigned to this message :rtype: list(Category)
[ "Add", "categories", "assigned", "to", "this", "message" ]
python
train
pyviz/holoviews
holoviews/plotting/plotly/chart.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/plotly/chart.py#L175-L214
def get_extents(self, element, ranges, range_type='combined'): """ Make adjustments to plot extents by computing stacked bar heights, adjusting the bar baseline and forcing the x-axis to be categorical. """ if self.batched: overlay = self.current_frame element = Bars(overlay.table(), kdims=element.kdims+overlay.kdims, vdims=element.vdims) for kd in overlay.kdims: ranges[kd.name]['combined'] = overlay.range(kd) xdim = element.kdims[0] ydim = element.vdims[0] # Compute stack heights if self.stacked or self.stack_index: ds = Dataset(element) pos_range = ds.select(**{ydim.name: (0, None)}).aggregate(xdim, function=np.sum).range(ydim) neg_range = ds.select(**{ydim.name: (None, 0)}).aggregate(xdim, function=np.sum).range(ydim) y0, y1 = util.max_range([pos_range, neg_range]) else: y0, y1 = ranges[ydim.name]['combined'] padding = 0 if self.overlaid else self.padding _, ypad, _ = get_axis_padding(padding) y0, y1 = util.range_pad(y0, y1, ypad, self.logy) # Set y-baseline if y0 < 0: y1 = max([y1, 0]) elif self.logy: y0 = (ydim.range[0] or (10**(np.log10(y1)-2)) if y1 else 0.01) else: y0 = 0 # Ensure x-axis is picked up as categorical nx = len(element.dimension_values(0, False)) return (-0.5, y0, nx-0.5, y1)
[ "def", "get_extents", "(", "self", ",", "element", ",", "ranges", ",", "range_type", "=", "'combined'", ")", ":", "if", "self", ".", "batched", ":", "overlay", "=", "self", ".", "current_frame", "element", "=", "Bars", "(", "overlay", ".", "table", "(", ...
Make adjustments to plot extents by computing stacked bar heights, adjusting the bar baseline and forcing the x-axis to be categorical.
[ "Make", "adjustments", "to", "plot", "extents", "by", "computing", "stacked", "bar", "heights", "adjusting", "the", "bar", "baseline", "and", "forcing", "the", "x", "-", "axis", "to", "be", "categorical", "." ]
python
train
jeffrimko/Auxly
lib/auxly/filesys.py
https://github.com/jeffrimko/Auxly/blob/5aae876bcb6ca117c81d904f9455764cdc78cd48/lib/auxly/filesys.py#L203-L208
def abspath(relpath, root=None): """Returns an absolute path based on the given root and relative path.""" root = root or cwd() if op.isfile(root): root = op.dirname(root) return op.abspath(op.join(root, relpath))
[ "def", "abspath", "(", "relpath", ",", "root", "=", "None", ")", ":", "root", "=", "root", "or", "cwd", "(", ")", "if", "op", ".", "isfile", "(", "root", ")", ":", "root", "=", "op", ".", "dirname", "(", "root", ")", "return", "op", ".", "abspa...
Returns an absolute path based on the given root and relative path.
[ "Returns", "an", "absolute", "path", "based", "on", "the", "given", "root", "and", "relative", "path", "." ]
python
train
CiscoDevNet/webexteamssdk
webexteamssdk/utils.py
https://github.com/CiscoDevNet/webexteamssdk/blob/6fc2cc3557e080ba4b2a380664cb2a0532ae45cd/webexteamssdk/utils.py#L168-L186
def dict_from_items_with_values(*dictionaries, **items): """Creates a dict with the inputted items; pruning any that are `None`. Args: *dictionaries(dict): Dictionaries of items to be pruned and included. **items: Items to be pruned and included. Returns: dict: A dictionary containing all of the items with a 'non-None' value. """ dict_list = list(dictionaries) dict_list.append(items) result = {} for d in dict_list: for key, value in d.items(): if value is not None: result[key] = value return result
[ "def", "dict_from_items_with_values", "(", "*", "dictionaries", ",", "*", "*", "items", ")", ":", "dict_list", "=", "list", "(", "dictionaries", ")", "dict_list", ".", "append", "(", "items", ")", "result", "=", "{", "}", "for", "d", "in", "dict_list", "...
Creates a dict with the inputted items; pruning any that are `None`. Args: *dictionaries(dict): Dictionaries of items to be pruned and included. **items: Items to be pruned and included. Returns: dict: A dictionary containing all of the items with a 'non-None' value.
[ "Creates", "a", "dict", "with", "the", "inputted", "items", ";", "pruning", "any", "that", "are", "None", "." ]
python
test
iotile/coretools
iotilebuild/iotile/build/dev/resolverchain.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/dev/resolverchain.py#L108-L176
def update_dependency(self, tile, depinfo, destdir=None): """Attempt to install or update a dependency to the latest version. Args: tile (IOTile): An IOTile object describing the tile that has the dependency depinfo (dict): a dictionary from tile.dependencies specifying the dependency destdir (string): An optional folder into which to unpack the dependency Returns: string: a string indicating the outcome. Possible values are: "already installed" "installed" "updated" "not found" """ if destdir is None: destdir = os.path.join(tile.folder, 'build', 'deps', depinfo['unique_id']) has_version = False had_version = False if os.path.exists(destdir): has_version = True had_version = True for priority, rule in self.rules: if not self._check_rule(rule, depinfo): continue resolver = self._find_resolver(rule) if has_version: deptile = IOTile(destdir) # If the dependency is not up to date, don't do anything depstatus = self._check_dep(depinfo, deptile, resolver) if depstatus is False: shutil.rmtree(destdir) has_version = False else: continue # Now try to resolve this dependency with the latest version result = resolver.resolve(depinfo, destdir) if not result['found'] and result.get('stop', False): return 'not found' if not result['found']: continue settings = { 'resolver': resolver.__class__.__name__, 'factory_args': rule[2] } if 'settings' in result: settings['settings'] = result['settings'] self._save_depsettings(destdir, settings) if had_version: return "updated" return "installed" if has_version: return "already installed" return "not found"
[ "def", "update_dependency", "(", "self", ",", "tile", ",", "depinfo", ",", "destdir", "=", "None", ")", ":", "if", "destdir", "is", "None", ":", "destdir", "=", "os", ".", "path", ".", "join", "(", "tile", ".", "folder", ",", "'build'", ",", "'deps'"...
Attempt to install or update a dependency to the latest version. Args: tile (IOTile): An IOTile object describing the tile that has the dependency depinfo (dict): a dictionary from tile.dependencies specifying the dependency destdir (string): An optional folder into which to unpack the dependency Returns: string: a string indicating the outcome. Possible values are: "already installed" "installed" "updated" "not found"
[ "Attempt", "to", "install", "or", "update", "a", "dependency", "to", "the", "latest", "version", "." ]
python
train
ibis-project/ibis
ibis/expr/lineage.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/lineage.py#L10-L28
def find_nodes(expr, node_types): """Depth-first search of the expression tree yielding nodes of a given type or set of types. Parameters ---------- expr: ibis.expr.types.Expr node_types: type or tuple of types Yields ------ op: type A node of given node_types """ def extender(op): return (arg for arg in op.args if isinstance(arg, ir.Expr)) return _search_for_nodes([expr], extender, node_types)
[ "def", "find_nodes", "(", "expr", ",", "node_types", ")", ":", "def", "extender", "(", "op", ")", ":", "return", "(", "arg", "for", "arg", "in", "op", ".", "args", "if", "isinstance", "(", "arg", ",", "ir", ".", "Expr", ")", ")", "return", "_search...
Depth-first search of the expression tree yielding nodes of a given type or set of types. Parameters ---------- expr: ibis.expr.types.Expr node_types: type or tuple of types Yields ------ op: type A node of given node_types
[ "Depth", "-", "first", "search", "of", "the", "expression", "tree", "yielding", "nodes", "of", "a", "given", "type", "or", "set", "of", "types", "." ]
python
train
tjcsl/ion
intranet/apps/eighth/models.py
https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/eighth/models.py#L304-L322
def restricted_activities_available_to_user(cls, user): """Find the restricted activities available to the given user.""" if not user: return [] activities = set(user.restricted_activity_set.values_list("id", flat=True)) if user and user.grade and user.grade.number and user.grade.name: grade = user.grade else: grade = None if grade is not None and 9 <= grade.number <= 12: activities |= set(EighthActivity.objects.filter(**{'{}_allowed'.format(grade.name_plural): True}).values_list("id", flat=True)) for group in user.groups.all(): activities |= set(group.restricted_activity_set.values_list("id", flat=True)) return list(activities)
[ "def", "restricted_activities_available_to_user", "(", "cls", ",", "user", ")", ":", "if", "not", "user", ":", "return", "[", "]", "activities", "=", "set", "(", "user", ".", "restricted_activity_set", ".", "values_list", "(", "\"id\"", ",", "flat", "=", "Tr...
Find the restricted activities available to the given user.
[ "Find", "the", "restricted", "activities", "available", "to", "the", "given", "user", "." ]
python
train
jmgilman/Neolib
neolib/pyamf/remoting/gateway/__init__.py
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/remoting/gateway/__init__.py#L516-L542
def authenticate(func, c, expose_request=False): """ A decorator that facilitates authentication per method. Setting C{expose_request} to C{True} will set the underlying request object (if there is one), usually HTTP and set it to the first argument of the authenticating callable. If there is no request object, the default is C{None}. @raise TypeError: C{func} and authenticator must be callable. """ if not python.callable(func): raise TypeError('func must be callable') if not python.callable(c): raise TypeError('Authenticator must be callable') attr = func if isinstance(func, types.UnboundMethodType): attr = func.im_func if expose_request is True: c = globals()['expose_request'](c) setattr(attr, '_pyamf_authenticator', c) return func
[ "def", "authenticate", "(", "func", ",", "c", ",", "expose_request", "=", "False", ")", ":", "if", "not", "python", ".", "callable", "(", "func", ")", ":", "raise", "TypeError", "(", "'func must be callable'", ")", "if", "not", "python", ".", "callable", ...
A decorator that facilitates authentication per method. Setting C{expose_request} to C{True} will set the underlying request object (if there is one), usually HTTP and set it to the first argument of the authenticating callable. If there is no request object, the default is C{None}. @raise TypeError: C{func} and authenticator must be callable.
[ "A", "decorator", "that", "facilitates", "authentication", "per", "method", ".", "Setting", "C", "{", "expose_request", "}", "to", "C", "{", "True", "}", "will", "set", "the", "underlying", "request", "object", "(", "if", "there", "is", "one", ")", "usuall...
python
train
sci-bots/svg-model
svg_model/connections.py
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/connections.py#L122-L141
def get_adjacency_matrix(df_connected): ''' Return matrix where $a_{i,j} = 1$ indicates polygon $i$ is connected to polygon $j$. Also, return mapping (and reverse mapping) from original keys in `df_connected` to zero-based integer index used for matrix rows and columns. ''' sorted_path_keys = np.sort(np.unique(df_connected[['source', 'target']] .values.ravel())) indexed_paths = pd.Series(sorted_path_keys) path_indexes = pd.Series(indexed_paths.index, index=sorted_path_keys) adjacency_matrix = np.zeros((path_indexes.shape[0], ) * 2, dtype=int) for i_key, j_key in df_connected[['source', 'target']].values: i, j = path_indexes.loc[[i_key, j_key]] adjacency_matrix[i, j] = 1 adjacency_matrix[j, i] = 1 return adjacency_matrix, indexed_paths, path_indexes
[ "def", "get_adjacency_matrix", "(", "df_connected", ")", ":", "sorted_path_keys", "=", "np", ".", "sort", "(", "np", ".", "unique", "(", "df_connected", "[", "[", "'source'", ",", "'target'", "]", "]", ".", "values", ".", "ravel", "(", ")", ")", ")", "...
Return matrix where $a_{i,j} = 1$ indicates polygon $i$ is connected to polygon $j$. Also, return mapping (and reverse mapping) from original keys in `df_connected` to zero-based integer index used for matrix rows and columns.
[ "Return", "matrix", "where", "$a_", "{", "i", "j", "}", "=", "1$", "indicates", "polygon", "$i$", "is", "connected", "to", "polygon", "$j$", "." ]
python
train
scarface-4711/denonavr
denonavr/denonavr.py
https://github.com/scarface-4711/denonavr/blob/59a136e27b43cb1d1e140cf67705087b3aa377cd/denonavr/denonavr.py#L1569-L1582
def previous_track(self): """Send previous track command to receiver command via HTTP post.""" # Use previous track button only for sources which support NETAUDIO if self._input_func in self._netaudio_func_list: body = {"cmd0": "PutNetAudioCommand/CurUp", "cmd1": "aspMainZone_WebUpdateStatus/", "ZoneName": "MAIN ZONE"} try: return bool(self.send_post_command( self._urls.command_netaudio_post, body)) except requests.exceptions.RequestException: _LOGGER.error( "Connection error: previous track command not sent.") return False
[ "def", "previous_track", "(", "self", ")", ":", "# Use previous track button only for sources which support NETAUDIO", "if", "self", ".", "_input_func", "in", "self", ".", "_netaudio_func_list", ":", "body", "=", "{", "\"cmd0\"", ":", "\"PutNetAudioCommand/CurUp\"", ",", ...
Send previous track command to receiver command via HTTP post.
[ "Send", "previous", "track", "command", "to", "receiver", "command", "via", "HTTP", "post", "." ]
python
train
numenta/htmresearch
projects/union_path_integration/noise_experiments/noise_simulation.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/union_path_integration/noise_experiments/noise_simulation.py#L97-L201
def doExperiment(cellDimensions, cellCoordinateOffsets, numObjects, featuresPerObject, objectWidth, numFeatures, useTrace, noiseFactor, moduleNoiseFactor, anchoringMethod="narrowing", randomLocation=False, threshold=16): """ Learn a set of objects. Then try to recognize each object. Output an interactive visualization. @param cellDimensions (pair) The cell dimensions of each module @param cellCoordinateOffsets (sequence) The "cellCoordinateOffsets" parameter for each module """ if not os.path.exists("traces"): os.makedirs("traces") features = generateFeatures(numFeatures) objects = generateObjects(numObjects, featuresPerObject, objectWidth, features) locationConfigs = [] scale = 5*cellDimensions[0] # One cell is about a quarter of a feature numModules = 20 perModRange = float(90.0 / float(numModules)) if anchoringMethod == "corners": cellCoordinateOffsets = (.0001, .5, .9999) if anchoringMethod == "discrete": cellCoordinateOffsets = (.5,) for i in xrange(numModules): orientation = float(i) * perModRange locationConfigs.append({ "cellDimensions": cellDimensions, "moduleMapDimensions": (scale, scale), "orientation": np.radians(orientation), "cellCoordinateOffsets": cellCoordinateOffsets, "activationThreshold": 8, "initialPermanence": 1.0, "connectedPermanence": 0.5, "learningThreshold": 8, "sampleSize": 20, "permanenceIncrement": 0.1, "permanenceDecrement": 0.0, "anchoringMethod": anchoringMethod, }) l4Overrides = { "initialPermanence": 1.0, "activationThreshold": threshold, "reducedBasalThreshold": threshold, "minThreshold": threshold, "sampleSize": numModules, "cellsPerColumn": 16, } column = PIUNCorticalColumn(locationConfigs, L4Overrides=l4Overrides) exp = PIUNExperiment(column, featureNames=features, numActiveMinicolumns=10, noiseFactor=noiseFactor, moduleNoiseFactor=moduleNoiseFactor) for objectDescription in objects: exp.learnObject(objectDescription, randomLocation=randomLocation, useNoise = False) print 'Learned object {}'.format(objectDescription["name"]) filename = "traces/{}-points-{}-cells-{}-objects-{}-feats-{}-random.html".format( len(cellCoordinateOffsets)**2, np.prod(cellDimensions), numObjects, numFeatures, randomLocation) convergence = collections.defaultdict(int) if useTrace: with io.open(filename, "w", encoding="utf8") as fileOut: with trace(fileOut, exp, includeSynapses=False): print "Logging to", filename for objectDescription in objects: steps = exp.inferObjectWithRandomMovements(objectDescription, randomLocation=randomLocation) convergence[steps] += 1 if steps is None: print 'Failed to infer object "{}"'.format(objectDescription["name"]) else: print 'Inferred object {} after {} steps'.format(objectDescription["name"], steps) else: for objectDescription in objects: steps = exp.inferObjectWithRandomMovements(objectDescription, randomLocation=randomLocation) convergence[steps] += 1 if steps is None: print 'Failed to infer object "{}"'.format(objectDescription["name"]) else: print 'Inferred object {} after {} steps'.format(objectDescription["name"], steps) for step, num in sorted(convergence.iteritems()): print "{}: {}".format(step, num) return(convergence)
[ "def", "doExperiment", "(", "cellDimensions", ",", "cellCoordinateOffsets", ",", "numObjects", ",", "featuresPerObject", ",", "objectWidth", ",", "numFeatures", ",", "useTrace", ",", "noiseFactor", ",", "moduleNoiseFactor", ",", "anchoringMethod", "=", "\"narrowing\"", ...
Learn a set of objects. Then try to recognize each object. Output an interactive visualization. @param cellDimensions (pair) The cell dimensions of each module @param cellCoordinateOffsets (sequence) The "cellCoordinateOffsets" parameter for each module
[ "Learn", "a", "set", "of", "objects", ".", "Then", "try", "to", "recognize", "each", "object", ".", "Output", "an", "interactive", "visualization", "." ]
python
train
IdentityPython/SATOSA
src/satosa/backends/github.py
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/backends/github.py#L48-L71
def start_auth(self, context, internal_request, get_state=stateID): """ :param get_state: Generates a state to be used in authentication call :type get_state: Callable[[str, bytes], str] :type context: satosa.context.Context :type internal_request: satosa.internal.InternalData :rtype satosa.response.Redirect """ oauth_state = get_state(self.config["base_url"], rndstr().encode()) context.state[self.name] = dict(state=oauth_state) request_args = dict( client_id=self.config['client_config']['client_id'], redirect_uri=self.redirect_url, state=oauth_state, allow_signup=self.config.get('allow_signup', False)) scope = ' '.join(self.config['scope']) if scope: request_args['scope'] = scope cis = self.consumer.construct_AuthorizationRequest( request_args=request_args) return Redirect(cis.request(self.consumer.authorization_endpoint))
[ "def", "start_auth", "(", "self", ",", "context", ",", "internal_request", ",", "get_state", "=", "stateID", ")", ":", "oauth_state", "=", "get_state", "(", "self", ".", "config", "[", "\"base_url\"", "]", ",", "rndstr", "(", ")", ".", "encode", "(", ")"...
:param get_state: Generates a state to be used in authentication call :type get_state: Callable[[str, bytes], str] :type context: satosa.context.Context :type internal_request: satosa.internal.InternalData :rtype satosa.response.Redirect
[ ":", "param", "get_state", ":", "Generates", "a", "state", "to", "be", "used", "in", "authentication", "call" ]
python
train
kiwiz/gkeepapi
gkeepapi/node.py
https://github.com/kiwiz/gkeepapi/blob/78aaae8b988b1cf616e3973f7f15d4c6d5e996cc/gkeepapi/node.py#L966-L972
def trashed(self): """Get the trashed state. Returns: bool: Whether this item is trashed. """ return self.timestamps.trashed is not None and self.timestamps.trashed > NodeTimestamps.int_to_dt(0)
[ "def", "trashed", "(", "self", ")", ":", "return", "self", ".", "timestamps", ".", "trashed", "is", "not", "None", "and", "self", ".", "timestamps", ".", "trashed", ">", "NodeTimestamps", ".", "int_to_dt", "(", "0", ")" ]
Get the trashed state. Returns: bool: Whether this item is trashed.
[ "Get", "the", "trashed", "state", "." ]
python
train
fastai/fastai
fastai/torch_core.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/torch_core.py#L353-L355
def rand_bool(p:float, size:Optional[List[int]]=None)->BoolOrTensor: "Draw 1 or shape=`size` random booleans (`True` occuring with probability `p`)." return uniform(0,1,size)<p
[ "def", "rand_bool", "(", "p", ":", "float", ",", "size", ":", "Optional", "[", "List", "[", "int", "]", "]", "=", "None", ")", "->", "BoolOrTensor", ":", "return", "uniform", "(", "0", ",", "1", ",", "size", ")", "<", "p" ]
Draw 1 or shape=`size` random booleans (`True` occuring with probability `p`).
[ "Draw", "1", "or", "shape", "=", "size", "random", "booleans", "(", "True", "occuring", "with", "probability", "p", ")", "." ]
python
train
sashahart/cookies
cookies.py
https://github.com/sashahart/cookies/blob/ab8185e06f221eaf65305f15e05852393723ac95/cookies.py#L794-L804
def validate(self, name, value): """Validate a cookie attribute with an appropriate validator. The value comes in already parsed (for example, an expires value should be a datetime). Called automatically when an attribute value is set. """ validator = self.attribute_validators.get(name, None) if validator: return True if validator(value) else False return True
[ "def", "validate", "(", "self", ",", "name", ",", "value", ")", ":", "validator", "=", "self", ".", "attribute_validators", ".", "get", "(", "name", ",", "None", ")", "if", "validator", ":", "return", "True", "if", "validator", "(", "value", ")", "else...
Validate a cookie attribute with an appropriate validator. The value comes in already parsed (for example, an expires value should be a datetime). Called automatically when an attribute value is set.
[ "Validate", "a", "cookie", "attribute", "with", "an", "appropriate", "validator", "." ]
python
train
crunchyroll/ef-open
efopen/ef_utils.py
https://github.com/crunchyroll/ef-open/blob/59fff3761af07a59f8f1c1682f2be004bdac15f7/efopen/ef_utils.py#L197-L236
def create_aws_clients(region, profile, *clients): """ Create boto3 clients for one or more AWS services. These are the services used within the libs: cloudformation, cloudfront, ec2, iam, lambda, route53, waf Args: region: the region in which to create clients that are region-specific (all but IAM) profile: Name of profile (in .aws/credentials). Pass the value None if using instance credentials on EC2 or Lambda clients: names of the clients to create (lowercase, must match what boto3 expects) Returns: A dictionary of <key>,<value> pairs for several AWS services, using the labels above as keys, e.g.: { "cloudfront": <cloudfront_client>, ... } Dictionary contains an extra record, "SESSION" - pointing to the session that created the clients """ if not profile: profile = None client_key = (region, profile) aws_clients = client_cache.get(client_key, {}) requested_clients = set(clients) new_clients = requested_clients.difference(aws_clients) if not new_clients: return aws_clients session = aws_clients.get("SESSION") try: if not session: session = boto3.Session(region_name=region, profile_name=profile) aws_clients["SESSION"] = session # build clients client_dict = {c: session.client(c) for c in new_clients} # append the session itself in case it's needed by the client code - can't get it from the clients themselves aws_clients.update(client_dict) # add the created clients to the cache client_cache[client_key] = aws_clients return aws_clients except ClientError as error: raise RuntimeError("Exception logging in with Session() and creating clients", error)
[ "def", "create_aws_clients", "(", "region", ",", "profile", ",", "*", "clients", ")", ":", "if", "not", "profile", ":", "profile", "=", "None", "client_key", "=", "(", "region", ",", "profile", ")", "aws_clients", "=", "client_cache", ".", "get", "(", "c...
Create boto3 clients for one or more AWS services. These are the services used within the libs: cloudformation, cloudfront, ec2, iam, lambda, route53, waf Args: region: the region in which to create clients that are region-specific (all but IAM) profile: Name of profile (in .aws/credentials). Pass the value None if using instance credentials on EC2 or Lambda clients: names of the clients to create (lowercase, must match what boto3 expects) Returns: A dictionary of <key>,<value> pairs for several AWS services, using the labels above as keys, e.g.: { "cloudfront": <cloudfront_client>, ... } Dictionary contains an extra record, "SESSION" - pointing to the session that created the clients
[ "Create", "boto3", "clients", "for", "one", "or", "more", "AWS", "services", ".", "These", "are", "the", "services", "used", "within", "the", "libs", ":", "cloudformation", "cloudfront", "ec2", "iam", "lambda", "route53", "waf", "Args", ":", "region", ":", ...
python
train
benspaulding/django-faq
faq/models.py
https://github.com/benspaulding/django-faq/blob/9a744e7c1943fd05bfa42c84b2ce003367c58e6e/faq/models.py#L14-L45
def _field_lookups(model, status=None): """ Abstraction of field lookups for managers. Returns a dictionary of field lookups for a queryset. The lookups will always filter by site. Optionally, if ``status`` is passed to the function the objects will also be filtered by the given status. This function saves from having to make two different on-site and published Managers each for `Topic` and `Question`, and having to move Managers out of the `FAQBase` model and into each of the `Topic` and `Question` models. """ # Import models here to avoid circular import fail. from faq.models import Topic, Question field_lookups = {} if model == Topic: field_lookups['sites__pk'] = settings.SITE_ID if model == Question: field_lookups['topic__sites__pk'] = settings.SITE_ID if status: field_lookups['topic__status'] = status # Both Topic & Question have a status field. if status: field_lookups['status'] = status return field_lookups
[ "def", "_field_lookups", "(", "model", ",", "status", "=", "None", ")", ":", "# Import models here to avoid circular import fail.", "from", "faq", ".", "models", "import", "Topic", ",", "Question", "field_lookups", "=", "{", "}", "if", "model", "==", "Topic", ":...
Abstraction of field lookups for managers. Returns a dictionary of field lookups for a queryset. The lookups will always filter by site. Optionally, if ``status`` is passed to the function the objects will also be filtered by the given status. This function saves from having to make two different on-site and published Managers each for `Topic` and `Question`, and having to move Managers out of the `FAQBase` model and into each of the `Topic` and `Question` models.
[ "Abstraction", "of", "field", "lookups", "for", "managers", "." ]
python
train
mfcloud/python-zvm-sdk
smtLayer/ReqHandle.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/smtLayer/ReqHandle.py#L299-L317
def printSysLog(self, logString): """ Log one or more lines. Optionally, add them to logEntries list. Input: Strings to be logged. """ if zvmsdklog.LOGGER.getloglevel() <= logging.DEBUG: # print log only when debug is enabled if self.daemon == '': self.logger.debug(self.requestId + ": " + logString) else: self.daemon.logger.debug(self.requestId + ": " + logString) if self.captureLogs is True: self.results['logEntries'].append(self.requestId + ": " + logString) return
[ "def", "printSysLog", "(", "self", ",", "logString", ")", ":", "if", "zvmsdklog", ".", "LOGGER", ".", "getloglevel", "(", ")", "<=", "logging", ".", "DEBUG", ":", "# print log only when debug is enabled", "if", "self", ".", "daemon", "==", "''", ":", "self",...
Log one or more lines. Optionally, add them to logEntries list. Input: Strings to be logged.
[ "Log", "one", "or", "more", "lines", ".", "Optionally", "add", "them", "to", "logEntries", "list", "." ]
python
train
gem/oq-engine
openquake/baselib/slots.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/slots.py#L23-L58
def with_slots(cls): """ Decorator for a class with _slots_. It automatically defines the methods __eq__, __ne__, assert_equal. """ def _compare(self, other): for slot in self.__class__._slots_: attr = operator.attrgetter(slot) source = attr(self) target = attr(other) if isinstance(source, numpy.ndarray): eq = numpy.array_equal(source, target) elif hasattr(source, '_slots_'): source.assert_equal(target) eq = True else: eq = source == target yield slot, source, target, eq def __eq__(self, other): return all(eq for slot, source, target, eq in _compare(self, other)) def __ne__(self, other): return not self.__eq__(other) def assert_equal(self, other, ignore=()): for slot, source, target, eq in _compare(self, other): if not eq and slot not in ignore: raise AssertionError('slot %s: %s is different from %s' % (slot, source, target)) cls._slots_ # raise an AttributeError for missing slots cls.__eq__ = __eq__ cls.__ne__ = __ne__ cls.assert_equal = assert_equal return cls
[ "def", "with_slots", "(", "cls", ")", ":", "def", "_compare", "(", "self", ",", "other", ")", ":", "for", "slot", "in", "self", ".", "__class__", ".", "_slots_", ":", "attr", "=", "operator", ".", "attrgetter", "(", "slot", ")", "source", "=", "attr"...
Decorator for a class with _slots_. It automatically defines the methods __eq__, __ne__, assert_equal.
[ "Decorator", "for", "a", "class", "with", "_slots_", ".", "It", "automatically", "defines", "the", "methods", "__eq__", "__ne__", "assert_equal", "." ]
python
train
kata198/indexedredis
IndexedRedis/fields/foreign.py
https://github.com/kata198/indexedredis/blob/f9c85adcf5218dac25acb06eedc63fc2950816fa/IndexedRedis/fields/foreign.py#L204-L223
def getObj(self): ''' getObj - @see ForeignLinkData.getObj Except this always returns a list ''' if self.obj: needPks = [ (i, self.pk[i]) for i in range(len(self.obj)) if self.obj[i] is None] if not needPks: return self.obj fetched = list(self.foreignModel.objects.getMultiple([needPk[1] for needPk in needPks])) i = 0 for objIdx, pk in needPks: self.obj[objIdx] = fetched[i] i += 1 return self.obj
[ "def", "getObj", "(", "self", ")", ":", "if", "self", ".", "obj", ":", "needPks", "=", "[", "(", "i", ",", "self", ".", "pk", "[", "i", "]", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "obj", ")", ")", "if", "self", ".", ...
getObj - @see ForeignLinkData.getObj Except this always returns a list
[ "getObj", "-", "@see", "ForeignLinkData", ".", "getObj" ]
python
valid
log2timeline/dfvfs
examples/recursive_hasher.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/examples/recursive_hasher.py#L104-L127
def _GetDisplayPath(self, path_spec, full_path, data_stream_name): """Retrieves a path to display. Args: path_spec (dfvfs.PathSpec): path specification of the file entry. full_path (str): full path of the file entry. data_stream_name (str): name of the data stream. Returns: str: path to display. """ display_path = '' if path_spec.HasParent(): parent_path_spec = path_spec.parent if parent_path_spec and parent_path_spec.type_indicator == ( dfvfs_definitions.TYPE_INDICATOR_TSK_PARTITION): display_path = ''.join([display_path, parent_path_spec.location]) display_path = ''.join([display_path, full_path]) if data_stream_name: display_path = ':'.join([display_path, data_stream_name]) return display_path
[ "def", "_GetDisplayPath", "(", "self", ",", "path_spec", ",", "full_path", ",", "data_stream_name", ")", ":", "display_path", "=", "''", "if", "path_spec", ".", "HasParent", "(", ")", ":", "parent_path_spec", "=", "path_spec", ".", "parent", "if", "parent_path...
Retrieves a path to display. Args: path_spec (dfvfs.PathSpec): path specification of the file entry. full_path (str): full path of the file entry. data_stream_name (str): name of the data stream. Returns: str: path to display.
[ "Retrieves", "a", "path", "to", "display", "." ]
python
train
python-wink/python-wink
src/pywink/devices/air_conditioner.py
https://github.com/python-wink/python-wink/blob/cf8bdce8c6518f30b91b23aa7aa32e89c2ce48da/src/pywink/devices/air_conditioner.py#L45-L56
def set_schedule_enabled(self, state): """ :param state: a boolean True (on) or False (off) :return: nothing """ desired_state = {"schedule_enabled": state} response = self.api_interface.set_device_state(self, { "desired_state": desired_state }) self._update_state_from_response(response)
[ "def", "set_schedule_enabled", "(", "self", ",", "state", ")", ":", "desired_state", "=", "{", "\"schedule_enabled\"", ":", "state", "}", "response", "=", "self", ".", "api_interface", ".", "set_device_state", "(", "self", ",", "{", "\"desired_state\"", ":", "...
:param state: a boolean True (on) or False (off) :return: nothing
[ ":", "param", "state", ":", "a", "boolean", "True", "(", "on", ")", "or", "False", "(", "off", ")", ":", "return", ":", "nothing" ]
python
train
randomdude999/rule_n
rule_n.py
https://github.com/randomdude999/rule_n/blob/4d8d72e71a9f1eaacb193d5b4383fba9f8cf67a6/rule_n.py#L73-L88
def _process_cell(i, state, finite=False): """Process 3 cells and return a value from 0 to 7. """ op_1 = state[i - 1] op_2 = state[i] if i == len(state) - 1: if finite: op_3 = state[0] else: op_3 = 0 else: op_3 = state[i + 1] result = 0 for i, val in enumerate([op_3, op_2, op_1]): if val: result += 2**i return result
[ "def", "_process_cell", "(", "i", ",", "state", ",", "finite", "=", "False", ")", ":", "op_1", "=", "state", "[", "i", "-", "1", "]", "op_2", "=", "state", "[", "i", "]", "if", "i", "==", "len", "(", "state", ")", "-", "1", ":", "if", "finite...
Process 3 cells and return a value from 0 to 7.
[ "Process", "3", "cells", "and", "return", "a", "value", "from", "0", "to", "7", "." ]
python
train
saltstack/salt
salt/modules/snapper.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/snapper.py#L350-L421
def create_snapshot(config='root', snapshot_type='single', pre_number=None, description=None, cleanup_algorithm='number', userdata=None, **kwargs): ''' Creates an snapshot config Configuration name. snapshot_type Specifies the type of the new snapshot. Possible values are single, pre and post. pre_number For post snapshots the number of the pre snapshot must be provided. description Description for the snapshot. If not given, the salt job will be used. cleanup_algorithm Set the cleanup algorithm for the snapshot. number Deletes old snapshots when a certain number of snapshots is reached. timeline Deletes old snapshots but keeps a number of hourly, daily, weekly, monthly and yearly snapshots. empty-pre-post Deletes pre/post snapshot pairs with empty diffs. userdata Set userdata for the snapshot (key-value pairs). Returns the number of the created snapshot. CLI example: .. code-block:: bash salt '*' snapper.create_snapshot ''' if not userdata: userdata = {} jid = kwargs.get('__pub_jid') if description is None and jid is not None: description = 'salt job {0}'.format(jid) if jid is not None: userdata['salt_jid'] = jid new_nr = None try: if snapshot_type == 'single': new_nr = snapper.CreateSingleSnapshot(config, description, cleanup_algorithm, userdata) elif snapshot_type == 'pre': new_nr = snapper.CreatePreSnapshot(config, description, cleanup_algorithm, userdata) elif snapshot_type == 'post': if pre_number is None: raise CommandExecutionError( "pre snapshot number 'pre_number' needs to be" "specified for snapshots of the 'post' type") new_nr = snapper.CreatePostSnapshot(config, pre_number, description, cleanup_algorithm, userdata) else: raise CommandExecutionError( "Invalid snapshot type '{0}'".format(snapshot_type)) except dbus.DBusException as exc: raise CommandExecutionError( 'Error encountered while listing changed files: {0}' .format(_dbus_exception_to_reason(exc, locals())) ) return new_nr
[ "def", "create_snapshot", "(", "config", "=", "'root'", ",", "snapshot_type", "=", "'single'", ",", "pre_number", "=", "None", ",", "description", "=", "None", ",", "cleanup_algorithm", "=", "'number'", ",", "userdata", "=", "None", ",", "*", "*", "kwargs", ...
Creates an snapshot config Configuration name. snapshot_type Specifies the type of the new snapshot. Possible values are single, pre and post. pre_number For post snapshots the number of the pre snapshot must be provided. description Description for the snapshot. If not given, the salt job will be used. cleanup_algorithm Set the cleanup algorithm for the snapshot. number Deletes old snapshots when a certain number of snapshots is reached. timeline Deletes old snapshots but keeps a number of hourly, daily, weekly, monthly and yearly snapshots. empty-pre-post Deletes pre/post snapshot pairs with empty diffs. userdata Set userdata for the snapshot (key-value pairs). Returns the number of the created snapshot. CLI example: .. code-block:: bash salt '*' snapper.create_snapshot
[ "Creates", "an", "snapshot" ]
python
train
pywbem/pywbem
pywbem_mock/_resolvermixin.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem_mock/_resolvermixin.py#L129-L147
def _init_qualifier(qualifier, qual_repo): """ Initialize the flavors of a qualifier from the qualifier repo and initialize propagated. """ qual_dict_entry = qual_repo[qualifier.name] qualifier.propagated = False if qualifier.tosubclass is None: if qual_dict_entry.tosubclass is None: qualifier.tosubclass = True else: qualifier.tosubclass = qual_dict_entry.tosubclass if qualifier.overridable is None: if qual_dict_entry.overridable is None: qualifier.overridable = True else: qualifier.overridable = qual_dict_entry.overridable if qualifier.translatable is None: qualifier.translatable = qual_dict_entry.translatable
[ "def", "_init_qualifier", "(", "qualifier", ",", "qual_repo", ")", ":", "qual_dict_entry", "=", "qual_repo", "[", "qualifier", ".", "name", "]", "qualifier", ".", "propagated", "=", "False", "if", "qualifier", ".", "tosubclass", "is", "None", ":", "if", "qua...
Initialize the flavors of a qualifier from the qualifier repo and initialize propagated.
[ "Initialize", "the", "flavors", "of", "a", "qualifier", "from", "the", "qualifier", "repo", "and", "initialize", "propagated", "." ]
python
train
laymonage/kbbi-python
kbbi/kbbi.py
https://github.com/laymonage/kbbi-python/blob/1a52ba8bcc6dc4c5c1215f9e00207aca264287d6/kbbi/kbbi.py#L275-L287
def serialisasi(self): """Mengembalikan hasil serialisasi objek Makna ini. :returns: Dictionary hasil serialisasi :rtype: dict """ return { "kelas": self.kelas, "submakna": self.submakna, "info": self.info, "contoh": self.contoh }
[ "def", "serialisasi", "(", "self", ")", ":", "return", "{", "\"kelas\"", ":", "self", ".", "kelas", ",", "\"submakna\"", ":", "self", ".", "submakna", ",", "\"info\"", ":", "self", ".", "info", ",", "\"contoh\"", ":", "self", ".", "contoh", "}" ]
Mengembalikan hasil serialisasi objek Makna ini. :returns: Dictionary hasil serialisasi :rtype: dict
[ "Mengembalikan", "hasil", "serialisasi", "objek", "Makna", "ini", "." ]
python
train
rix0rrr/gcl
gcl/ast_util.py
https://github.com/rix0rrr/gcl/blob/4e3bccc978a9c60aaaffd20f6f291c4d23775cdf/gcl/ast_util.py#L141-L148
def is_identifier_position(rootpath): """Return whether the cursor is in identifier-position in a member declaration.""" if len(rootpath) >= 2 and is_tuple_member_node(rootpath[-2]) and is_identifier(rootpath[-1]): return True if len(rootpath) >= 1 and is_tuple_node(rootpath[-1]): # No deeper node than tuple? Must be identifier position, otherwise we'd have a TupleMemberNode. return True return False
[ "def", "is_identifier_position", "(", "rootpath", ")", ":", "if", "len", "(", "rootpath", ")", ">=", "2", "and", "is_tuple_member_node", "(", "rootpath", "[", "-", "2", "]", ")", "and", "is_identifier", "(", "rootpath", "[", "-", "1", "]", ")", ":", "r...
Return whether the cursor is in identifier-position in a member declaration.
[ "Return", "whether", "the", "cursor", "is", "in", "identifier", "-", "position", "in", "a", "member", "declaration", "." ]
python
train
Microsoft/nni
examples/trials/mnist-batch-tune-keras/mnist-keras.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/trials/mnist-batch-tune-keras/mnist-keras.py#L39-L60
def create_mnist_model(hyper_params, input_shape=(H, W, 1), num_classes=NUM_CLASSES): ''' Create simple convolutional model ''' layers = [ Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape), Conv2D(64, (3, 3), activation='relu'), MaxPooling2D(pool_size=(2, 2)), Flatten(), Dense(100, activation='relu'), Dense(num_classes, activation='softmax') ] model = Sequential(layers) if hyper_params['optimizer'] == 'Adam': optimizer = keras.optimizers.Adam(lr=hyper_params['learning_rate']) else: optimizer = keras.optimizers.SGD(lr=hyper_params['learning_rate'], momentum=0.9) model.compile(loss=keras.losses.categorical_crossentropy, optimizer=optimizer, metrics=['accuracy']) return model
[ "def", "create_mnist_model", "(", "hyper_params", ",", "input_shape", "=", "(", "H", ",", "W", ",", "1", ")", ",", "num_classes", "=", "NUM_CLASSES", ")", ":", "layers", "=", "[", "Conv2D", "(", "32", ",", "kernel_size", "=", "(", "3", ",", "3", ")",...
Create simple convolutional model
[ "Create", "simple", "convolutional", "model" ]
python
train
unfoldingWord-dev/python-gogs-client
gogs_client/interface.py
https://github.com/unfoldingWord-dev/python-gogs-client/blob/b7f27f4995abf914c0db8a424760f5b27331939d/gogs_client/interface.py#L33-L45
def authenticated_user(self, auth): """ Returns the user authenticated by ``auth`` :param auth.Authentication auth: authentication for user to retrieve :return: user authenticated by the provided authentication :rtype: GogsUser :raises NetworkFailure: if there is an error communicating with the server :raises ApiFailure: if the request cannot be serviced """ response = self.get("/user", auth=auth) return GogsUser.from_json(response.json())
[ "def", "authenticated_user", "(", "self", ",", "auth", ")", ":", "response", "=", "self", ".", "get", "(", "\"/user\"", ",", "auth", "=", "auth", ")", "return", "GogsUser", ".", "from_json", "(", "response", ".", "json", "(", ")", ")" ]
Returns the user authenticated by ``auth`` :param auth.Authentication auth: authentication for user to retrieve :return: user authenticated by the provided authentication :rtype: GogsUser :raises NetworkFailure: if there is an error communicating with the server :raises ApiFailure: if the request cannot be serviced
[ "Returns", "the", "user", "authenticated", "by", "auth" ]
python
train
openstack/horizon
openstack_dashboard/api/neutron.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/neutron.py#L921-L1006
def trunk_update(request, trunk_id, old_trunk, new_trunk): """Handle update to a trunk in (at most) three neutron calls. The JavaScript side should know only about the old and new state of a trunk. However it should not know anything about how the old and new are meant to be diffed and sent to neutron. We handle that here. This code was adapted from Heat, see: https://review.opendev.org/442496 Call #1) Update all changed properties but 'sub_ports'. PUT /v2.0/trunks/TRUNK_ID openstack network trunk set Call #2) Delete subports not needed anymore. PUT /v2.0/trunks/TRUNK_ID/remove_subports openstack network trunk unset --subport Call #3) Create new subports. PUT /v2.0/trunks/TRUNK_ID/add_subports openstack network trunk set --subport A single neutron port cannot be two subports at the same time (ie. have two segmentation (type, ID)s on the same trunk or to belong to two trunks). Therefore we have to delete old subports before creating new ones to avoid conflicts. """ LOG.debug("trunk_update(): trunk_id=%s", trunk_id) # NOTE(bence romsics): We want to do set operations on the subports, # however we receive subports represented as dicts. In Python # mutable objects like dicts are not hashable so they cannot be # inserted into sets. So we convert subport dicts to (immutable) # frozensets in order to do the set operations. def dict2frozenset(d): """Convert a dict to a frozenset. Create an immutable equivalent of a dict, so it's hashable therefore can be used as an element of a set or a key of another dictionary. """ return frozenset(d.items()) # cf. neutron_lib/api/definitions/trunk.py updatable_props = ('admin_state_up', 'description', 'name') prop_diff = { k: new_trunk[k] for k in updatable_props if old_trunk[k] != new_trunk[k]} subports_old = {dict2frozenset(d): d for d in old_trunk.get('sub_ports', [])} subports_new = {dict2frozenset(d): d for d in new_trunk.get('sub_ports', [])} old_set = set(subports_old.keys()) new_set = set(subports_new.keys()) delete = old_set - new_set create = new_set - old_set dicts_delete = [subports_old[fs] for fs in delete] dicts_create = [subports_new[fs] for fs in create] trunk = old_trunk if prop_diff: LOG.debug('trunk_update(): update properties of trunk %s: %s', trunk_id, prop_diff) body = _prepare_body_update_trunk(prop_diff) trunk = neutronclient(request).update_trunk( trunk_id, body=body).get('trunk') if dicts_delete: LOG.debug('trunk_update(): delete subports of trunk %s: %s', trunk_id, dicts_delete) body = _prepare_body_remove_subports(dicts_delete) trunk = neutronclient(request).trunk_remove_subports( trunk_id, body=body) if dicts_create: LOG.debug('trunk_update(): create subports of trunk %s: %s', trunk_id, dicts_create) body = _prepare_body_add_subports(dicts_create) trunk = neutronclient(request).trunk_add_subports( trunk_id, body=body) return Trunk(trunk)
[ "def", "trunk_update", "(", "request", ",", "trunk_id", ",", "old_trunk", ",", "new_trunk", ")", ":", "LOG", ".", "debug", "(", "\"trunk_update(): trunk_id=%s\"", ",", "trunk_id", ")", "# NOTE(bence romsics): We want to do set operations on the subports,", "# however we rec...
Handle update to a trunk in (at most) three neutron calls. The JavaScript side should know only about the old and new state of a trunk. However it should not know anything about how the old and new are meant to be diffed and sent to neutron. We handle that here. This code was adapted from Heat, see: https://review.opendev.org/442496 Call #1) Update all changed properties but 'sub_ports'. PUT /v2.0/trunks/TRUNK_ID openstack network trunk set Call #2) Delete subports not needed anymore. PUT /v2.0/trunks/TRUNK_ID/remove_subports openstack network trunk unset --subport Call #3) Create new subports. PUT /v2.0/trunks/TRUNK_ID/add_subports openstack network trunk set --subport A single neutron port cannot be two subports at the same time (ie. have two segmentation (type, ID)s on the same trunk or to belong to two trunks). Therefore we have to delete old subports before creating new ones to avoid conflicts.
[ "Handle", "update", "to", "a", "trunk", "in", "(", "at", "most", ")", "three", "neutron", "calls", "." ]
python
train
Microsoft/nni
src/sdk/pynni/nni/msg_dispatcher.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/msg_dispatcher.py#L157-L165
def _handle_final_metric_data(self, data): """Call tuner to process final results """ id_ = data['parameter_id'] value = data['value'] if id_ in _customized_parameter_ids: self.tuner.receive_customized_trial_result(id_, _trial_params[id_], value) else: self.tuner.receive_trial_result(id_, _trial_params[id_], value)
[ "def", "_handle_final_metric_data", "(", "self", ",", "data", ")", ":", "id_", "=", "data", "[", "'parameter_id'", "]", "value", "=", "data", "[", "'value'", "]", "if", "id_", "in", "_customized_parameter_ids", ":", "self", ".", "tuner", ".", "receive_custom...
Call tuner to process final results
[ "Call", "tuner", "to", "process", "final", "results" ]
python
train
aliyun/aliyun-log-python-sdk
aliyun/log/logclient.py
https://github.com/aliyun/aliyun-log-python-sdk/blob/ac383db0a16abf1e5ef7df36074374184b43516e/aliyun/log/logclient.py#L1712-L1740
def list_machine_group(self, project_name, offset=0, size=100): """ list machine group names in a project Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type offset: int :param offset: the offset of all group name :type size: int :param size: the max return names count, -1 means all :return: ListMachineGroupResponse :raise: LogException """ # need to use extended method to get more if int(size) == -1 or int(size) > MAX_LIST_PAGING_SIZE: return list_more(self.list_machine_group, int(offset), int(size), MAX_LIST_PAGING_SIZE, project_name) headers = {} params = {} resource = "/machinegroups" params['offset'] = str(offset) params['size'] = str(size) (resp, header) = self._send("GET", project_name, None, resource, params, headers) return ListMachineGroupResponse(resp, header)
[ "def", "list_machine_group", "(", "self", ",", "project_name", ",", "offset", "=", "0", ",", "size", "=", "100", ")", ":", "# need to use extended method to get more\r", "if", "int", "(", "size", ")", "==", "-", "1", "or", "int", "(", "size", ")", ">", "...
list machine group names in a project Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type offset: int :param offset: the offset of all group name :type size: int :param size: the max return names count, -1 means all :return: ListMachineGroupResponse :raise: LogException
[ "list", "machine", "group", "names", "in", "a", "project", "Unsuccessful", "opertaion", "will", "cause", "an", "LogException", ".", ":", "type", "project_name", ":", "string", ":", "param", "project_name", ":", "the", "Project", "name", ":", "type", "offset", ...
python
train
gem/oq-engine
openquake/commonlib/oqvalidation.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/oqvalidation.py#L461-L465
def imt_dt(self, dtype=F64): """ :returns: a numpy dtype {imt: float} """ return numpy.dtype([(imt, dtype) for imt in self.imtls])
[ "def", "imt_dt", "(", "self", ",", "dtype", "=", "F64", ")", ":", "return", "numpy", ".", "dtype", "(", "[", "(", "imt", ",", "dtype", ")", "for", "imt", "in", "self", ".", "imtls", "]", ")" ]
:returns: a numpy dtype {imt: float}
[ ":", "returns", ":", "a", "numpy", "dtype", "{", "imt", ":", "float", "}" ]
python
train
OpenTreeOfLife/peyotl
peyotl/phylesystem/phylesystem_shard.py
https://github.com/OpenTreeOfLife/peyotl/blob/5e4e52a0fdbd17f490aa644ad79fda6ea2eda7c0/peyotl/phylesystem/phylesystem_shard.py#L246-L279
def _determine_next_study_id(self): """Return the numeric part of the newest study_id Checks out master branch as a side effect! """ if self._doc_counter_lock is None: self._doc_counter_lock = Lock() prefix = self._new_study_prefix lp = len(prefix) n = 0 # this function holds the lock for quite awhile, # but it only called on the first instance of # of creating a new study with self._doc_counter_lock: with self._index_lock: for k in self.study_index.keys(): if k.startswith(prefix): try: pn = int(k[lp:]) if pn > n: n = pn except: pass nsi_contents = self._read_master_branch_resource(self._id_minting_file, is_json=True) if nsi_contents: self._next_study_id = nsi_contents['next_study_id'] if self._next_study_id <= n: m = 'next_study_id in {} is set lower than the ID of an existing study!' m = m.format(self._id_minting_file) raise RuntimeError(m) else: # legacy support for repo with no next_study_id.json file self._next_study_id = n self._advance_new_study_id()
[ "def", "_determine_next_study_id", "(", "self", ")", ":", "if", "self", ".", "_doc_counter_lock", "is", "None", ":", "self", ".", "_doc_counter_lock", "=", "Lock", "(", ")", "prefix", "=", "self", ".", "_new_study_prefix", "lp", "=", "len", "(", "prefix", ...
Return the numeric part of the newest study_id Checks out master branch as a side effect!
[ "Return", "the", "numeric", "part", "of", "the", "newest", "study_id" ]
python
train
The-Politico/politico-civic-election-night
electionnight/serializers/election.py
https://github.com/The-Politico/politico-civic-election-night/blob/a8aaf5be43872a7b84d2b0d7c2b6151d32d4d8b6/electionnight/serializers/election.py#L102-L104
def get_images(self, obj): """Object of images serialized by tag name.""" return {str(i.tag): i.image.url for i in obj.images.all()}
[ "def", "get_images", "(", "self", ",", "obj", ")", ":", "return", "{", "str", "(", "i", ".", "tag", ")", ":", "i", ".", "image", ".", "url", "for", "i", "in", "obj", ".", "images", ".", "all", "(", ")", "}" ]
Object of images serialized by tag name.
[ "Object", "of", "images", "serialized", "by", "tag", "name", "." ]
python
train
nprapps/mapturner
mapturner/__init__.py
https://github.com/nprapps/mapturner/blob/fc9747c9d1584af2053bff3df229a460ef2a5f62/mapturner/__init__.py#L171-L176
def unzip_file(self, zip_path, output_path): """ Unzip a local file into a specified directory. """ with zipfile.ZipFile(zip_path, 'r') as z: z.extractall(output_path)
[ "def", "unzip_file", "(", "self", ",", "zip_path", ",", "output_path", ")", ":", "with", "zipfile", ".", "ZipFile", "(", "zip_path", ",", "'r'", ")", "as", "z", ":", "z", ".", "extractall", "(", "output_path", ")" ]
Unzip a local file into a specified directory.
[ "Unzip", "a", "local", "file", "into", "a", "specified", "directory", "." ]
python
train
MartinThoma/mpu
mpu/package/cli.py
https://github.com/MartinThoma/mpu/blob/61bc36d0192ca90c0bcf9b8a5d7d0d8520e20ff6/mpu/package/cli.py#L88-L111
def _multiple_replace(text, search_replace_dict): """ Replace multiple things at once in a text. Parameters ---------- text : str search_replace_dict : dict Returns ------- replaced_text : str Examples -------- >>> d = {'a': 'b', 'b': 'c', 'c': 'd', 'd': 'e'} >>> _multiple_replace('abcdefghijklm', d) 'bcdeefghijklm' """ # Create a regular expression from all of the dictionary keys regex = re.compile("|".join(map(re.escape, search_replace_dict.keys()))) # For each match, look up the corresponding value in the dictionary return regex.sub(lambda match: search_replace_dict[match.group(0)], text)
[ "def", "_multiple_replace", "(", "text", ",", "search_replace_dict", ")", ":", "# Create a regular expression from all of the dictionary keys", "regex", "=", "re", ".", "compile", "(", "\"|\"", ".", "join", "(", "map", "(", "re", ".", "escape", ",", "search_replace_...
Replace multiple things at once in a text. Parameters ---------- text : str search_replace_dict : dict Returns ------- replaced_text : str Examples -------- >>> d = {'a': 'b', 'b': 'c', 'c': 'd', 'd': 'e'} >>> _multiple_replace('abcdefghijklm', d) 'bcdeefghijklm'
[ "Replace", "multiple", "things", "at", "once", "in", "a", "text", "." ]
python
train
numenta/htmresearch
htmresearch/frameworks/layers/physical_objects.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/layers/physical_objects.py#L110-L123
def sampleLocationFromFeature(self, feature): """ Samples a location from the provided specific feature. In the case of a sphere, there is only one feature. """ if feature == "surface": coordinates = [random.gauss(0, 1.) for _ in xrange(self.dimension)] norm = sqrt(sum([coord ** 2 for coord in coordinates])) return [self.radius * coord / norm for coord in coordinates] elif feature == "random": return self.sampleLocation() else: raise NameError("No such feature in {}: {}".format(self, feature))
[ "def", "sampleLocationFromFeature", "(", "self", ",", "feature", ")", ":", "if", "feature", "==", "\"surface\"", ":", "coordinates", "=", "[", "random", ".", "gauss", "(", "0", ",", "1.", ")", "for", "_", "in", "xrange", "(", "self", ".", "dimension", ...
Samples a location from the provided specific feature. In the case of a sphere, there is only one feature.
[ "Samples", "a", "location", "from", "the", "provided", "specific", "feature", "." ]
python
train
ensime/ensime-vim
ensime_shared/ensime.py
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/ensime.py#L110-L126
def create_client(self, config_path): """Create an :class:`EnsimeClient` for a project, given its config file path. This will launch the ENSIME server for the project as a side effect. """ config = ProjectConfig(config_path) editor = Editor(self._vim) launcher = EnsimeLauncher(self._vim, config) if self.using_server_v2: client = EnsimeClientV2(editor, launcher) else: client = EnsimeClientV1(editor, launcher) self._create_ticker() return client
[ "def", "create_client", "(", "self", ",", "config_path", ")", ":", "config", "=", "ProjectConfig", "(", "config_path", ")", "editor", "=", "Editor", "(", "self", ".", "_vim", ")", "launcher", "=", "EnsimeLauncher", "(", "self", ".", "_vim", ",", "config", ...
Create an :class:`EnsimeClient` for a project, given its config file path. This will launch the ENSIME server for the project as a side effect.
[ "Create", "an", ":", "class", ":", "EnsimeClient", "for", "a", "project", "given", "its", "config", "file", "path", "." ]
python
train
ChrisBeaumont/soupy
soupy.py
https://github.com/ChrisBeaumont/soupy/blob/795f2f61f711f574d5218fc8a3375d02bda1104f/soupy.py#L1264-L1291
def either(*funcs): """ A utility function for selecting the first non-null query. Parameters: funcs: One or more functions Returns: A function that, when called with a :class:`Node`, will pass the input to each `func`, and return the first non-Falsey result. Examples: >>> s = Soupy("<p>hi</p>") >>> s.apply(either(Q.find('a'), Q.find('p').text)) Scalar('hi') """ def either(val): for func in funcs: result = val.apply(func) if result: return result return Null() return either
[ "def", "either", "(", "*", "funcs", ")", ":", "def", "either", "(", "val", ")", ":", "for", "func", "in", "funcs", ":", "result", "=", "val", ".", "apply", "(", "func", ")", "if", "result", ":", "return", "result", "return", "Null", "(", ")", "re...
A utility function for selecting the first non-null query. Parameters: funcs: One or more functions Returns: A function that, when called with a :class:`Node`, will pass the input to each `func`, and return the first non-Falsey result. Examples: >>> s = Soupy("<p>hi</p>") >>> s.apply(either(Q.find('a'), Q.find('p').text)) Scalar('hi')
[ "A", "utility", "function", "for", "selecting", "the", "first", "non", "-", "null", "query", "." ]
python
test
tophatmonocle/ims_lti_py
ims_lti_py/tool_config.py
https://github.com/tophatmonocle/ims_lti_py/blob/979244d83c2e6420d2c1941f58e52f641c56ad12/ims_lti_py/tool_config.py#L98-L103
def get_ext_param(self, ext_key, param_key): ''' Get specific param in set of provided extension parameters. ''' return self.extensions[ext_key][param_key] if self.extensions[ext_key]\ else None
[ "def", "get_ext_param", "(", "self", ",", "ext_key", ",", "param_key", ")", ":", "return", "self", ".", "extensions", "[", "ext_key", "]", "[", "param_key", "]", "if", "self", ".", "extensions", "[", "ext_key", "]", "else", "None" ]
Get specific param in set of provided extension parameters.
[ "Get", "specific", "param", "in", "set", "of", "provided", "extension", "parameters", "." ]
python
train
05bit/peewee-async
peewee_async.py
https://github.com/05bit/peewee-async/blob/d15f4629da1d9975da4ec37306188e68d288c862/peewee_async.py#L286-L292
async def scalar(self, query, as_tuple=False): """Get single value from ``select()`` query, i.e. for aggregation. :return: result is the same as after sync ``query.scalar()`` call """ query = self._swap_database(query) return (await scalar(query, as_tuple=as_tuple))
[ "async", "def", "scalar", "(", "self", ",", "query", ",", "as_tuple", "=", "False", ")", ":", "query", "=", "self", ".", "_swap_database", "(", "query", ")", "return", "(", "await", "scalar", "(", "query", ",", "as_tuple", "=", "as_tuple", ")", ")" ]
Get single value from ``select()`` query, i.e. for aggregation. :return: result is the same as after sync ``query.scalar()`` call
[ "Get", "single", "value", "from", "select", "()", "query", "i", ".", "e", ".", "for", "aggregation", "." ]
python
train
unionbilling/union-python
union/models.py
https://github.com/unionbilling/union-python/blob/551e4fc1a0b395b632781d80527a3660a7c67c0c/union/models.py#L72-L77
def delete(cls, id): ''' Destroy a Union object ''' client = cls._new_api_client() return client.make_request(cls, 'delete', url_params={'id': id})
[ "def", "delete", "(", "cls", ",", "id", ")", ":", "client", "=", "cls", ".", "_new_api_client", "(", ")", "return", "client", ".", "make_request", "(", "cls", ",", "'delete'", ",", "url_params", "=", "{", "'id'", ":", "id", "}", ")" ]
Destroy a Union object
[ "Destroy", "a", "Union", "object" ]
python
train
icometrix/dicom2nifti
scripts/dicomdiff.py
https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/scripts/dicomdiff.py#L14-L32
def dicom_diff(file1, file2): """ Shows the fields that differ between two DICOM images. Inspired by https://code.google.com/p/pydicom/source/browse/source/dicom/examples/DicomDiff.py """ datasets = compressed_dicom.read_file(file1), compressed_dicom.read_file(file2) rep = [] for dataset in datasets: lines = (str(dataset.file_meta)+"\n"+str(dataset)).split('\n') lines = [line + '\n' for line in lines] # add the newline to the end rep.append(lines) diff = difflib.Differ() for line in diff.compare(rep[0], rep[1]): if (line[0] == '+') or (line[0] == '-'): sys.stdout.write(line)
[ "def", "dicom_diff", "(", "file1", ",", "file2", ")", ":", "datasets", "=", "compressed_dicom", ".", "read_file", "(", "file1", ")", ",", "compressed_dicom", ".", "read_file", "(", "file2", ")", "rep", "=", "[", "]", "for", "dataset", "in", "datasets", "...
Shows the fields that differ between two DICOM images. Inspired by https://code.google.com/p/pydicom/source/browse/source/dicom/examples/DicomDiff.py
[ "Shows", "the", "fields", "that", "differ", "between", "two", "DICOM", "images", "." ]
python
train
consbio/ncdjango
ncdjango/interfaces/data/classify.py
https://github.com/consbio/ncdjango/blob/f807bfd1e4083ab29fbc3c4d4418be108383a710/ncdjango/interfaces/data/classify.py#L134-L144
def equal(data, num_breaks): """ Calculate equal interval breaks. Arguments: data -- Array of values to classify. num_breaks -- Number of breaks to perform. """ step = (numpy.amax(data) - numpy.amin(data)) / num_breaks return numpy.linspace(numpy.amin(data) + step, numpy.amax(data), num_breaks)
[ "def", "equal", "(", "data", ",", "num_breaks", ")", ":", "step", "=", "(", "numpy", ".", "amax", "(", "data", ")", "-", "numpy", ".", "amin", "(", "data", ")", ")", "/", "num_breaks", "return", "numpy", ".", "linspace", "(", "numpy", ".", "amin", ...
Calculate equal interval breaks. Arguments: data -- Array of values to classify. num_breaks -- Number of breaks to perform.
[ "Calculate", "equal", "interval", "breaks", "." ]
python
train
Kozea/pygal
pygal/graph/base.py
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/base.py#L177-L221
def setup(self, **kwargs): """Set up the transient state prior rendering""" # Keep labels in case of map if getattr(self, 'x_labels', None) is not None: self.x_labels = list(self.x_labels) if getattr(self, 'y_labels', None) is not None: self.y_labels = list(self.y_labels) self.state = State(self, **kwargs) if isinstance(self.style, type): self.style = self.style() self.series = self.prepare_values([ rs for rs in self.raw_series if not rs[1].get('secondary') ]) or [] self.secondary_series = self.prepare_values([ rs for rs in self.raw_series if rs[1].get('secondary') ], len(self.series)) or [] self.horizontal = getattr(self, 'horizontal', False) self.svg = Svg(self) self._x_labels = None self._y_labels = None self._x_2nd_labels = None self._y_2nd_labels = None self.nodes = {} self.margin_box = Margin( self.margin_top or self.margin, self.margin_right or self.margin, self.margin_bottom or self.margin, self.margin_left or self.margin ) self._box = Box() self.view = None if self.logarithmic and self.zero == 0: # Explicit min to avoid interpolation dependency positive_values = list( filter( lambda x: x > 0, [ val[1] or 1 if self._dual else val for serie in self.series for val in serie.safe_values ] ) ) self.zero = min(positive_values or (1, )) or 1 if self._len < 3: self.interpolate = None self._draw() self.svg.pre_render()
[ "def", "setup", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Keep labels in case of map", "if", "getattr", "(", "self", ",", "'x_labels'", ",", "None", ")", "is", "not", "None", ":", "self", ".", "x_labels", "=", "list", "(", "self", ".", "x_labe...
Set up the transient state prior rendering
[ "Set", "up", "the", "transient", "state", "prior", "rendering" ]
python
train
jazzband/sorl-thumbnail
sorl/thumbnail/helpers.py
https://github.com/jazzband/sorl-thumbnail/blob/22ccd9781462a820f963f57018ad3dcef85053ed/sorl/thumbnail/helpers.py#L27-L39
def toint(number): """ Helper to return rounded int for a float or just the int it self. """ if isinstance(number, float): if number > 1: number = round(number, 0) else: # The following solves when image has small dimensions (like 1x54) # then scale factor 1 * 0.296296 and `number` will store `0` # that will later raise ZeroDivisionError. number = round(math.ceil(number), 0) return int(number)
[ "def", "toint", "(", "number", ")", ":", "if", "isinstance", "(", "number", ",", "float", ")", ":", "if", "number", ">", "1", ":", "number", "=", "round", "(", "number", ",", "0", ")", "else", ":", "# The following solves when image has small dimensions (lik...
Helper to return rounded int for a float or just the int it self.
[ "Helper", "to", "return", "rounded", "int", "for", "a", "float", "or", "just", "the", "int", "it", "self", "." ]
python
train
kisom/pypcapfile
pcapfile/protocols/linklayer/wifi.py
https://github.com/kisom/pypcapfile/blob/67520cfbb6c2e9ab3e7c181a8012ddc56ec5cad8/pcapfile/protocols/linklayer/wifi.py#L791-L811
def strip_flags(self, idx): """strip(1 byte) radiotap.flags :idx: int :return: int idx :return: collections.namedtuple """ flags = collections.namedtuple( 'flags', ['cfp', 'preamble', 'wep', 'fragmentation', 'fcs', 'datapad', 'badfcs', 'shortgi']) val, = struct.unpack_from('<B', self._rtap, idx) bits = format(val, '08b')[::-1] flags.cfp = int(bits[0]) flags.preamble = int(bits[1]) flags.wep = int(bits[2]) flags.fragmentation = int(bits[3]) flags.fcs = int(bits[4]) flags.datapad = int(bits[5]) flags.badfcs = int(bits[6]) flags.shortgi = int(bits[7]) return idx + 1, flags
[ "def", "strip_flags", "(", "self", ",", "idx", ")", ":", "flags", "=", "collections", ".", "namedtuple", "(", "'flags'", ",", "[", "'cfp'", ",", "'preamble'", ",", "'wep'", ",", "'fragmentation'", ",", "'fcs'", ",", "'datapad'", ",", "'badfcs'", ",", "'s...
strip(1 byte) radiotap.flags :idx: int :return: int idx :return: collections.namedtuple
[ "strip", "(", "1", "byte", ")", "radiotap", ".", "flags", ":", "idx", ":", "int", ":", "return", ":", "int", "idx", ":", "return", ":", "collections", ".", "namedtuple" ]
python
valid
tensorflow/probability
tensorflow_probability/python/distributions/hidden_markov_model.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/distributions/hidden_markov_model.py#L914-L917
def _log_matrix_vector(ms, vs): """Multiply tensor of matrices by vectors assuming values stored are logs.""" return tf.reduce_logsumexp(input_tensor=ms + vs[..., tf.newaxis, :], axis=-1)
[ "def", "_log_matrix_vector", "(", "ms", ",", "vs", ")", ":", "return", "tf", ".", "reduce_logsumexp", "(", "input_tensor", "=", "ms", "+", "vs", "[", "...", ",", "tf", ".", "newaxis", ",", ":", "]", ",", "axis", "=", "-", "1", ")" ]
Multiply tensor of matrices by vectors assuming values stored are logs.
[ "Multiply", "tensor", "of", "matrices", "by", "vectors", "assuming", "values", "stored", "are", "logs", "." ]
python
test
kentik/kentikapi-py
kentikapi/v5/tagging.py
https://github.com/kentik/kentikapi-py/blob/aa94c0b7eaf88409818b97967d7293e309e11bab/kentikapi/v5/tagging.py#L283-L304
def add_tcp_flag(self, tcp_flag): """Add a single TCP flag - will be OR'd into the existing bitmask""" if tcp_flag not in [1, 2, 4, 8, 16, 32, 64, 128]: raise ValueError("Invalid TCP flag. Valid: [1, 2, 4, 8, 16,32, 64, 128]") prev_size = 0 if self._json_dict.get('tcp_flags') is None: self._json_dict['tcp_flags'] = 0 else: prev_size = len(str(self._json_dict['tcp_flags'])) + len('tcp_flags') + 3 # str, key, key quotes, colon self._json_dict['tcp_flags'] |= tcp_flag # update size new_size = len(str(self._json_dict['tcp_flags'])) + len('tcp_flags') + 3 # str, key, key quotes, colon self._size += new_size - prev_size if prev_size == 0 and self._has_field: # add the comma and space self._size += 2 self._has_field = True
[ "def", "add_tcp_flag", "(", "self", ",", "tcp_flag", ")", ":", "if", "tcp_flag", "not", "in", "[", "1", ",", "2", ",", "4", ",", "8", ",", "16", ",", "32", ",", "64", ",", "128", "]", ":", "raise", "ValueError", "(", "\"Invalid TCP flag. Valid: [1, 2...
Add a single TCP flag - will be OR'd into the existing bitmask
[ "Add", "a", "single", "TCP", "flag", "-", "will", "be", "OR", "d", "into", "the", "existing", "bitmask" ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/mox.py#L331-L347
def _Verify(self): """Verify that all of the expected calls have been made. Raises: ExpectedMethodCallsError: if there are still more method calls in the expected queue. """ # If the list of expected calls is not empty, raise an exception if self._expected_calls_queue: # The last MultipleTimesGroup is not popped from the queue. if (len(self._expected_calls_queue) == 1 and isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and self._expected_calls_queue[0].IsSatisfied()): pass else: raise ExpectedMethodCallsError(self._expected_calls_queue)
[ "def", "_Verify", "(", "self", ")", ":", "# If the list of expected calls is not empty, raise an exception", "if", "self", ".", "_expected_calls_queue", ":", "# The last MultipleTimesGroup is not popped from the queue.", "if", "(", "len", "(", "self", ".", "_expected_calls_queu...
Verify that all of the expected calls have been made. Raises: ExpectedMethodCallsError: if there are still more method calls in the expected queue.
[ "Verify", "that", "all", "of", "the", "expected", "calls", "have", "been", "made", "." ]
python
train
mayfield/shellish
shellish/command/supplement.py
https://github.com/mayfield/shellish/blob/df0f0e4612d138c34d8cb99b66ab5b8e47f1414a/shellish/command/supplement.py#L351-L361
def add_subparsers(self, prog=None, **kwargs): """ Supplement a proper `prog` keyword argument for the subprocessor. The superclass technique for getting the `prog` value breaks because of our VT100 escape codes injected by `format_help`. """ if prog is None: # Use a non-shellish help formatter to avoid vt100 codes. f = argparse.HelpFormatter(prog=self.prog) f.add_usage(self.usage, self._get_positional_actions(), self._mutually_exclusive_groups, '') prog = f.format_help().strip() return super().add_subparsers(prog=prog, **kwargs)
[ "def", "add_subparsers", "(", "self", ",", "prog", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "prog", "is", "None", ":", "# Use a non-shellish help formatter to avoid vt100 codes.", "f", "=", "argparse", ".", "HelpFormatter", "(", "prog", "=", "self...
Supplement a proper `prog` keyword argument for the subprocessor. The superclass technique for getting the `prog` value breaks because of our VT100 escape codes injected by `format_help`.
[ "Supplement", "a", "proper", "prog", "keyword", "argument", "for", "the", "subprocessor", ".", "The", "superclass", "technique", "for", "getting", "the", "prog", "value", "breaks", "because", "of", "our", "VT100", "escape", "codes", "injected", "by", "format_hel...
python
train
sergiocorreia/panflute
panflute/io.py
https://github.com/sergiocorreia/panflute/blob/65c2d570c26a190deb600cab5e2ad8a828a3302e/panflute/io.py#L187-L249
def run_filters(actions, prepare=None, finalize=None, input_stream=None, output_stream=None, doc=None, **kwargs): """ Receive a Pandoc document from the input stream (default is stdin), walk through it applying the functions in *actions* to each element, and write it back to the output stream (default is stdout). Notes: - It receives and writes the Pandoc documents as JSON--encoded strings; this is done through the :func:`.load` and :func:`.dump` functions. - It walks through the document once for every function in *actions*, so the actions are applied sequentially. - By default, it will read from stdin and write to stdout, but these can be modified. - It can also apply functions to the entire document at the beginning and end; this allows for global operations on the document. - If ``doc`` is a :class:`.Doc` instead of ``None``, ``run_filters`` will return the document instead of writing it to the output stream. :param actions: sequence of functions; each function takes (element, doc) as argument, so a valid header would be ``def action(elem, doc):`` :type actions: [:class:`function`] :param prepare: function executed at the beginning; right after the document is received and parsed :type prepare: :class:`function` :param finalize: function executed at the end; right before the document is converted back to JSON and written to stdout. :type finalize: :class:`function` :param input_stream: text stream used as input (default is :data:`sys.stdin`) :param output_stream: text stream used as output (default is :data:`sys.stdout`) :param doc: ``None`` unless running panflute as a filter, in which case this will be a :class:`.Doc` element :type doc: ``None`` | :class:`.Doc` :param \*kwargs: keyword arguments will be passed through to the *action* functions (so they can actually receive more than just two arguments (*element* and *doc*) """ load_and_dump = (doc is None) if load_and_dump: doc = load(input_stream=input_stream) if prepare is not None: prepare(doc) for action in actions: if kwargs: action = partial(action, **kwargs) doc = doc.walk(action, doc) if finalize is not None: finalize(doc) if load_and_dump: dump(doc, output_stream=output_stream) else: return(doc)
[ "def", "run_filters", "(", "actions", ",", "prepare", "=", "None", ",", "finalize", "=", "None", ",", "input_stream", "=", "None", ",", "output_stream", "=", "None", ",", "doc", "=", "None", ",", "*", "*", "kwargs", ")", ":", "load_and_dump", "=", "(",...
Receive a Pandoc document from the input stream (default is stdin), walk through it applying the functions in *actions* to each element, and write it back to the output stream (default is stdout). Notes: - It receives and writes the Pandoc documents as JSON--encoded strings; this is done through the :func:`.load` and :func:`.dump` functions. - It walks through the document once for every function in *actions*, so the actions are applied sequentially. - By default, it will read from stdin and write to stdout, but these can be modified. - It can also apply functions to the entire document at the beginning and end; this allows for global operations on the document. - If ``doc`` is a :class:`.Doc` instead of ``None``, ``run_filters`` will return the document instead of writing it to the output stream. :param actions: sequence of functions; each function takes (element, doc) as argument, so a valid header would be ``def action(elem, doc):`` :type actions: [:class:`function`] :param prepare: function executed at the beginning; right after the document is received and parsed :type prepare: :class:`function` :param finalize: function executed at the end; right before the document is converted back to JSON and written to stdout. :type finalize: :class:`function` :param input_stream: text stream used as input (default is :data:`sys.stdin`) :param output_stream: text stream used as output (default is :data:`sys.stdout`) :param doc: ``None`` unless running panflute as a filter, in which case this will be a :class:`.Doc` element :type doc: ``None`` | :class:`.Doc` :param \*kwargs: keyword arguments will be passed through to the *action* functions (so they can actually receive more than just two arguments (*element* and *doc*)
[ "Receive", "a", "Pandoc", "document", "from", "the", "input", "stream", "(", "default", "is", "stdin", ")", "walk", "through", "it", "applying", "the", "functions", "in", "*", "actions", "*", "to", "each", "element", "and", "write", "it", "back", "to", "...
python
train
micha030201/aionationstates
aionationstates/region_.py
https://github.com/micha030201/aionationstates/blob/dc86b86d994cbab830b69ab8023601c73e778b3a/aionationstates/region_.py#L451-L466
async def officers(self, root): """Regional Officers. Does not include the Founder or the Delegate, unless they have additional titles as Officers. In the correct order. Returns ------- an :class:`ApiQuery` of a list of :class:`Officer` """ officers = sorted( root.find('OFFICERS'), # I struggle to say what else this tag would be useful for. key=lambda elem: int(elem.find('ORDER').text) ) return [Officer(elem) for elem in officers]
[ "async", "def", "officers", "(", "self", ",", "root", ")", ":", "officers", "=", "sorted", "(", "root", ".", "find", "(", "'OFFICERS'", ")", ",", "# I struggle to say what else this tag would be useful for.", "key", "=", "lambda", "elem", ":", "int", "(", "ele...
Regional Officers. Does not include the Founder or the Delegate, unless they have additional titles as Officers. In the correct order. Returns ------- an :class:`ApiQuery` of a list of :class:`Officer`
[ "Regional", "Officers", ".", "Does", "not", "include", "the", "Founder", "or", "the", "Delegate", "unless", "they", "have", "additional", "titles", "as", "Officers", "." ]
python
train
elastic/elasticsearch-py
elasticsearch/client/xpack/security.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/xpack/security.py#L299-L309
def invalidate_api_key(self, body, params=None): """ `<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-api-key.html>`_ :arg body: The api key request to invalidate API key(s) """ if body in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument 'body'.") return self.transport.perform_request( "DELETE", "/_security/api_key", params=params, body=body )
[ "def", "invalidate_api_key", "(", "self", ",", "body", ",", "params", "=", "None", ")", ":", "if", "body", "in", "SKIP_IN_PATH", ":", "raise", "ValueError", "(", "\"Empty value passed for a required argument 'body'.\"", ")", "return", "self", ".", "transport", "."...
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-invalidate-api-key.html>`_ :arg body: The api key request to invalidate API key(s)
[ "<https", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "current", "/", "security", "-", "api", "-", "invalidate", "-", "api", "-", "key", ".", "html", ">", "_" ]
python
train
ga4gh/ga4gh-server
ga4gh/server/datamodel/__init__.py
https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/datamodel/__init__.py#L207-L247
def parse(cls, compoundIdStr): """ Parses the specified compoundId string and returns an instance of this CompoundId class. :raises: An ObjectWithIdNotFoundException if parsing fails. This is because this method is a client-facing method, and if a malformed identifier (under our internal rules) is provided, the response should be that the identifier does not exist. """ if not isinstance(compoundIdStr, basestring): raise exceptions.BadIdentifierException(compoundIdStr) try: deobfuscated = cls.deobfuscate(compoundIdStr) except TypeError: # When a string that cannot be converted to base64 is passed # as an argument, b64decode raises a TypeError. We must treat # this as an ID not found error. raise exceptions.ObjectWithIdNotFoundException(compoundIdStr) try: encodedSplits = cls.split(deobfuscated) splits = [cls.decode(split) for split in encodedSplits] except (UnicodeDecodeError, ValueError): # Sometimes base64 decoding succeeds but we're left with # unicode gibberish. This is also and IdNotFound. raise exceptions.ObjectWithIdNotFoundException(compoundIdStr) # pull the differentiator out of the splits before instantiating # the class, if the differentiator exists fieldsLength = len(cls.fields) if cls.differentiator is not None: differentiatorIndex = cls.fields.index( cls.differentiatorFieldName) if differentiatorIndex < len(splits): del splits[differentiatorIndex] else: raise exceptions.ObjectWithIdNotFoundException( compoundIdStr) fieldsLength -= 1 if len(splits) != fieldsLength: raise exceptions.ObjectWithIdNotFoundException(compoundIdStr) return cls(None, *splits)
[ "def", "parse", "(", "cls", ",", "compoundIdStr", ")", ":", "if", "not", "isinstance", "(", "compoundIdStr", ",", "basestring", ")", ":", "raise", "exceptions", ".", "BadIdentifierException", "(", "compoundIdStr", ")", "try", ":", "deobfuscated", "=", "cls", ...
Parses the specified compoundId string and returns an instance of this CompoundId class. :raises: An ObjectWithIdNotFoundException if parsing fails. This is because this method is a client-facing method, and if a malformed identifier (under our internal rules) is provided, the response should be that the identifier does not exist.
[ "Parses", "the", "specified", "compoundId", "string", "and", "returns", "an", "instance", "of", "this", "CompoundId", "class", "." ]
python
train
openearth/bmi-python
bmi/wrapper.py
https://github.com/openearth/bmi-python/blob/2f53f24d45515eb0711c2d28ddd6c1582045248f/bmi/wrapper.py#L562-L570
def get_time_step(self): """ returns current time step of simulation """ time_step = c_double() self.library.get_time_step.argtypes = [POINTER(c_double)] self.library.get_time_step.restype = None self.library.get_time_step(byref(time_step)) return time_step.value
[ "def", "get_time_step", "(", "self", ")", ":", "time_step", "=", "c_double", "(", ")", "self", ".", "library", ".", "get_time_step", ".", "argtypes", "=", "[", "POINTER", "(", "c_double", ")", "]", "self", ".", "library", ".", "get_time_step", ".", "rest...
returns current time step of simulation
[ "returns", "current", "time", "step", "of", "simulation" ]
python
train
eandersson/amqpstorm
examples/scalable_consumer.py
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/examples/scalable_consumer.py#L80-L101
def _create_connection(self): """Create a connection. :return: """ attempts = 0 while True: attempts += 1 if self._stopped.is_set(): break try: self._connection = Connection(self.hostname, self.username, self.password) break except amqpstorm.AMQPError as why: LOGGER.warning(why) if self.max_retries and attempts > self.max_retries: raise Exception('max number of retries reached') time.sleep(min(attempts * 2, 30)) except KeyboardInterrupt: break
[ "def", "_create_connection", "(", "self", ")", ":", "attempts", "=", "0", "while", "True", ":", "attempts", "+=", "1", "if", "self", ".", "_stopped", ".", "is_set", "(", ")", ":", "break", "try", ":", "self", ".", "_connection", "=", "Connection", "(",...
Create a connection. :return:
[ "Create", "a", "connection", "." ]
python
train
tornadoweb/tornado
tornado/netutil.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/netutil.py#L591-L614
def ssl_wrap_socket( socket: socket.socket, ssl_options: Union[Dict[str, Any], ssl.SSLContext], server_hostname: str = None, **kwargs: Any ) -> ssl.SSLSocket: """Returns an ``ssl.SSLSocket`` wrapping the given socket. ``ssl_options`` may be either an `ssl.SSLContext` object or a dictionary (as accepted by `ssl_options_to_context`). Additional keyword arguments are passed to ``wrap_socket`` (either the `~ssl.SSLContext` method or the `ssl` module function as appropriate). """ context = ssl_options_to_context(ssl_options) if ssl.HAS_SNI: # In python 3.4, wrap_socket only accepts the server_hostname # argument if HAS_SNI is true. # TODO: add a unittest (python added server-side SNI support in 3.4) # In the meantime it can be manually tested with # python3 -m tornado.httpclient https://sni.velox.ch return context.wrap_socket(socket, server_hostname=server_hostname, **kwargs) else: return context.wrap_socket(socket, **kwargs)
[ "def", "ssl_wrap_socket", "(", "socket", ":", "socket", ".", "socket", ",", "ssl_options", ":", "Union", "[", "Dict", "[", "str", ",", "Any", "]", ",", "ssl", ".", "SSLContext", "]", ",", "server_hostname", ":", "str", "=", "None", ",", "*", "*", "kw...
Returns an ``ssl.SSLSocket`` wrapping the given socket. ``ssl_options`` may be either an `ssl.SSLContext` object or a dictionary (as accepted by `ssl_options_to_context`). Additional keyword arguments are passed to ``wrap_socket`` (either the `~ssl.SSLContext` method or the `ssl` module function as appropriate).
[ "Returns", "an", "ssl", ".", "SSLSocket", "wrapping", "the", "given", "socket", "." ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/frankel_1996.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/frankel_1996.py#L129-L152
def _compute_mean(self, imt, mag, rhypo): """ Compute mean value from lookup table. Lookup table defines log10(IMT) (in g) for combinations of Mw and log10(rhypo) values. ``mag`` is therefore converted from Mblg to Mw using Atkinson and Boore 1987 conversion equation. Mean value is finally converted from base 10 to base e. """ mag = np.zeros_like(rhypo) + self._convert_magnitude(mag) # to avoid run time warning in case rhypo is zero set minimum distance # to 10, which is anyhow the minimum distance allowed by the tables rhypo[rhypo < 10] = 10 rhypo = np.log10(rhypo) # create lookup table and interpolate it at magnitude/distance values table = RectBivariateSpline( self.MAGS, self.DISTS, self.IMTS_TABLES[imt].T ) mean = table.ev(mag, rhypo) # convert mean from base 10 to base e return mean * np.log(10)
[ "def", "_compute_mean", "(", "self", ",", "imt", ",", "mag", ",", "rhypo", ")", ":", "mag", "=", "np", ".", "zeros_like", "(", "rhypo", ")", "+", "self", ".", "_convert_magnitude", "(", "mag", ")", "# to avoid run time warning in case rhypo is zero set minimum d...
Compute mean value from lookup table. Lookup table defines log10(IMT) (in g) for combinations of Mw and log10(rhypo) values. ``mag`` is therefore converted from Mblg to Mw using Atkinson and Boore 1987 conversion equation. Mean value is finally converted from base 10 to base e.
[ "Compute", "mean", "value", "from", "lookup", "table", "." ]
python
train
mikedh/trimesh
trimesh/collision.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/collision.py#L206-L223
def remove_object(self, name): """ Delete an object from the collision manager. Parameters ---------- name : str The identifier for the object """ if name in self._objs: self._manager.unregisterObject(self._objs[name]['obj']) self._manager.update(self._objs[name]['obj']) # remove objects from _objs geom_id = id(self._objs.pop(name)['geom']) # remove names self._names.pop(geom_id) else: raise ValueError('{} not in collision manager!'.format(name))
[ "def", "remove_object", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "_objs", ":", "self", ".", "_manager", ".", "unregisterObject", "(", "self", ".", "_objs", "[", "name", "]", "[", "'obj'", "]", ")", "self", ".", "_manager",...
Delete an object from the collision manager. Parameters ---------- name : str The identifier for the object
[ "Delete", "an", "object", "from", "the", "collision", "manager", "." ]
python
train
elsampsa/valkka-live
valkka/live/tools.py
https://github.com/elsampsa/valkka-live/blob/218bb2ecf71c516c85b1b6e075454bba13090cd8/valkka/live/tools.py#L125-L160
def getH264V4l2(verbose=False): """Find all V4l2 cameras with H264 encoding, and returns a list of tuples with .. (device file, device name), e.g. ("/dev/video2", "HD Pro Webcam C920 (/dev/video2)") """ import glob from subprocess import Popen, PIPE cams=[] for device in glob.glob("/sys/class/video4linux/*"): devname=device.split("/")[-1] devfile=os.path.join("/dev",devname) lis=("v4l2-ctl --list-formats -d "+devfile).split() p = Popen(lis, stdout=PIPE, stderr=PIPE) # p.communicate() # print(dir(p)) # print(p.returncode) # print(p.stderr.read().decode("utf-8")) st = p.stdout.read().decode("utf-8") # print(st) if (st.lower().find("h264")>-1): namefile=os.path.join(device, "name") # print(namefile) f=open(namefile, "r"); name=f.read(); f.close() fullname = name.strip() + " ("+devname+")" cams.append((devfile, fullname)) if (verbose): for cam in cams: print(cam) return cams
[ "def", "getH264V4l2", "(", "verbose", "=", "False", ")", ":", "import", "glob", "from", "subprocess", "import", "Popen", ",", "PIPE", "cams", "=", "[", "]", "for", "device", "in", "glob", ".", "glob", "(", "\"/sys/class/video4linux/*\"", ")", ":", "devname...
Find all V4l2 cameras with H264 encoding, and returns a list of tuples with .. (device file, device name), e.g. ("/dev/video2", "HD Pro Webcam C920 (/dev/video2)")
[ "Find", "all", "V4l2", "cameras", "with", "H264", "encoding", "and", "returns", "a", "list", "of", "tuples", "with", "..", "(", "device", "file", "device", "name", ")", "e", ".", "g", ".", "(", "/", "dev", "/", "video2", "HD", "Pro", "Webcam", "C920"...
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_attention.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_attention.py#L456-L492
def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, start_index=0): """Adds a bunch of sinusoids of different frequencies to a Tensor. Each channel of the input Tensor is incremented by a sinusoid of a different frequency and phase. This allows attention to learn to use absolute and relative positions. Timing signals should be added to some precursors of both the query and the memory inputs to attention. The use of relative position is possible because sin(x+y) and cos(x+y) can be expressed in terms of y, sin(x) and cos(x). In particular, we use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. The number of different timescales is equal to channels / 2. For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the channels dimension. Args: x: a Tensor with shape [batch, length, channels] min_timescale: a float max_timescale: a float start_index: index of first position Returns: a Tensor the same shape as x. """ length = common_layers.shape_list(x)[1] channels = common_layers.shape_list(x)[2] signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale, start_index) return x + common_layers.cast_like(signal, x)
[ "def", "add_timing_signal_1d", "(", "x", ",", "min_timescale", "=", "1.0", ",", "max_timescale", "=", "1.0e4", ",", "start_index", "=", "0", ")", ":", "length", "=", "common_layers", ".", "shape_list", "(", "x", ")", "[", "1", "]", "channels", "=", "comm...
Adds a bunch of sinusoids of different frequencies to a Tensor. Each channel of the input Tensor is incremented by a sinusoid of a different frequency and phase. This allows attention to learn to use absolute and relative positions. Timing signals should be added to some precursors of both the query and the memory inputs to attention. The use of relative position is possible because sin(x+y) and cos(x+y) can be expressed in terms of y, sin(x) and cos(x). In particular, we use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. The number of different timescales is equal to channels / 2. For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the channels dimension. Args: x: a Tensor with shape [batch, length, channels] min_timescale: a float max_timescale: a float start_index: index of first position Returns: a Tensor the same shape as x.
[ "Adds", "a", "bunch", "of", "sinusoids", "of", "different", "frequencies", "to", "a", "Tensor", "." ]
python
train
hyperledger/indy-sdk
wrappers/python/indy/anoncreds.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/anoncreds.py#L501-L564
async def prover_create_credential_req(wallet_handle: int, prover_did: str, cred_offer_json: str, cred_def_json: str, master_secret_id: str) -> (str, str): """ Creates a clam request for the given credential offer. The method creates a blinded master secret for a master secret identified by a provided name. The master secret identified by the name must be already stored in the secure wallet (see prover_create_master_secret) The blinded master secret is a part of the credential request. :param wallet_handle: wallet handler (created by open_wallet). :param prover_did: a DID of the prover :param cred_offer_json: credential offer as a json containing information about the issuer and a credential :param cred_def_json: credential definition json related to <cred_def_id> in <cred_offer_json> :param master_secret_id: the id of the master secret stored in the wallet :return: cred_req_json: Credential request json for creation of credential by Issuer { "prover_did" : string, "cred_def_id" : string, // Fields below can depend on Cred Def type "blinded_ms" : <blinded_master_secret>, "blinded_ms_correctness_proof" : <blinded_ms_correctness_proof>, "nonce": string } cred_req_metadata_json: Credential request metadata json for processing of received form Issuer credential. Note: cred_req_metadata_json mustn't be shared with Issuer. """ logger = logging.getLogger(__name__) logger.debug("prover_create_credential_req: >>> wallet_handle: %r, prover_did: %r, cred_offer_json: %r," " cred_def_json: %r, master_secret_id: %r", wallet_handle, prover_did, cred_offer_json, cred_def_json, master_secret_id) if not hasattr(prover_create_credential_req, "cb"): logger.debug("prover_create_credential_req: Creating callback") prover_create_credential_req.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p, c_char_p)) c_wallet_handle = c_int32(wallet_handle) c_prover_did = c_char_p(prover_did.encode('utf-8')) c_cred_offer_json = c_char_p(cred_offer_json.encode('utf-8')) c_cred_def_json = c_char_p(cred_def_json.encode('utf-8')) c_master_secret_id = c_char_p(master_secret_id.encode('utf-8')) (credential_req_json, credential_req_metadata_json) = await do_call('indy_prover_create_credential_req', c_wallet_handle, c_prover_did, c_cred_offer_json, c_cred_def_json, c_master_secret_id, prover_create_credential_req.cb) credential_req_json = credential_req_json.decode() credential_req_metadata_json = credential_req_metadata_json.decode() res = (credential_req_json, credential_req_metadata_json) logger.debug("prover_create_credential_req: <<< res: %r", res) return res
[ "async", "def", "prover_create_credential_req", "(", "wallet_handle", ":", "int", ",", "prover_did", ":", "str", ",", "cred_offer_json", ":", "str", ",", "cred_def_json", ":", "str", ",", "master_secret_id", ":", "str", ")", "->", "(", "str", ",", "str", ")"...
Creates a clam request for the given credential offer. The method creates a blinded master secret for a master secret identified by a provided name. The master secret identified by the name must be already stored in the secure wallet (see prover_create_master_secret) The blinded master secret is a part of the credential request. :param wallet_handle: wallet handler (created by open_wallet). :param prover_did: a DID of the prover :param cred_offer_json: credential offer as a json containing information about the issuer and a credential :param cred_def_json: credential definition json related to <cred_def_id> in <cred_offer_json> :param master_secret_id: the id of the master secret stored in the wallet :return: cred_req_json: Credential request json for creation of credential by Issuer { "prover_did" : string, "cred_def_id" : string, // Fields below can depend on Cred Def type "blinded_ms" : <blinded_master_secret>, "blinded_ms_correctness_proof" : <blinded_ms_correctness_proof>, "nonce": string } cred_req_metadata_json: Credential request metadata json for processing of received form Issuer credential. Note: cred_req_metadata_json mustn't be shared with Issuer.
[ "Creates", "a", "clam", "request", "for", "the", "given", "credential", "offer", "." ]
python
train
deepmind/pysc2
pysc2/lib/portspicker.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/portspicker.py#L43-L59
def pick_contiguous_unused_ports( num_ports, retry_interval_secs=3, retry_attempts=5): """Reserves and returns a list of `num_ports` contiguous unused ports.""" for _ in range(retry_attempts): start_port = portpicker.pick_unused_port() if start_port is not None: ports = [start_port + p for p in range(num_ports)] if all(portpicker.is_port_free(p) for p in ports): return ports else: return_ports(ports) time.sleep(retry_interval_secs) raise RuntimeError("Unable to obtain %d contiguous unused ports." % num_ports)
[ "def", "pick_contiguous_unused_ports", "(", "num_ports", ",", "retry_interval_secs", "=", "3", ",", "retry_attempts", "=", "5", ")", ":", "for", "_", "in", "range", "(", "retry_attempts", ")", ":", "start_port", "=", "portpicker", ".", "pick_unused_port", "(", ...
Reserves and returns a list of `num_ports` contiguous unused ports.
[ "Reserves", "and", "returns", "a", "list", "of", "num_ports", "contiguous", "unused", "ports", "." ]
python
train
balloob/pychromecast
pychromecast/socket_client.py
https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/socket_client.py#L586-L608
def _cleanup(self): """ Cleanup open channels and handlers """ for channel in self._open_channels: try: self.disconnect_channel(channel) except Exception: # pylint: disable=broad-except pass for handler in self._handlers.values(): try: handler.tear_down() except Exception: # pylint: disable=broad-except pass try: self.socket.close() except Exception: # pylint: disable=broad-except self.logger.exception( "[%s:%s] _cleanup", self.fn or self.host, self.port) self._report_connection_status( ConnectionStatus(CONNECTION_STATUS_DISCONNECTED, NetworkAddress(self.host, self.port))) self.connecting = True
[ "def", "_cleanup", "(", "self", ")", ":", "for", "channel", "in", "self", ".", "_open_channels", ":", "try", ":", "self", ".", "disconnect_channel", "(", "channel", ")", "except", "Exception", ":", "# pylint: disable=broad-except", "pass", "for", "handler", "i...
Cleanup open channels and handlers
[ "Cleanup", "open", "channels", "and", "handlers" ]
python
train
mar10/wsgidav
wsgidav/dav_provider.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/dav_provider.py#L718-L807
def set_property_value(self, name, value, dry_run=False): """Set a property value or remove a property. value == None means 'remove property'. Raise HTTP_FORBIDDEN if property is read-only, or not supported. When dry_run is True, this function should raise errors, as in a real run, but MUST NOT change any data. This default implementation - raises HTTP_FORBIDDEN, if trying to modify a locking property - raises HTTP_FORBIDDEN, if trying to modify an immutable {DAV:} property - handles Windows' Win32LastModifiedTime to set the getlastmodified property, if enabled - stores everything else as dead property, if a property manager is present. - raises HTTP_FORBIDDEN, else Removing a non-existing prop is NOT an error. Note: RFC 4918 states that {DAV:}displayname 'SHOULD NOT be protected' A resource provider may override this method, to update supported custom live properties. """ assert value is None or xml_tools.is_etree_element(value) if name in _lockPropertyNames: # Locking properties are always read-only raise DAVError( HTTP_FORBIDDEN, err_condition=PRECONDITION_CODE_ProtectedProperty ) # Live property config = self.environ["wsgidav.config"] # hotfixes = config.get("hotfixes", {}) mutableLiveProps = config.get("mutable_live_props", []) # Accept custom live property updates on resources if configured. if ( name.startswith("{DAV:}") and name in _standardLivePropNames and name in mutableLiveProps ): # Please note that some properties should not be mutable according # to RFC4918. This includes the 'getlastmodified' property, which # it may still make sense to make mutable in order to support time # stamp changes from e.g. utime calls or the touch or rsync -a # commands. if name in ("{DAV:}getlastmodified", "{DAV:}last_modified"): try: return self.set_last_modified(self.path, value.text, dry_run) except Exception: _logger.warning( "Provider does not support set_last_modified on {}.".format( self.path ) ) # Unsupported or not allowed raise DAVError(HTTP_FORBIDDEN) # Handle MS Windows Win32LastModifiedTime, if enabled. # Note that the WebDAV client in Win7 and earler has issues and can't be used # with this so we ignore older clients. Others pre-Win10 should be tested. if name.startswith("{urn:schemas-microsoft-com:}"): agent = self.environ.get("HTTP_USER_AGENT", "None") win32_emu = config.get("hotfixes", {}).get("emulate_win32_lastmod", False) if win32_emu and "MiniRedir/6.1" not in agent: if "Win32LastModifiedTime" in name: return self.set_last_modified(self.path, value.text, dry_run) elif "Win32FileAttributes" in name: return True elif "Win32CreationTime" in name: return True elif "Win32LastAccessTime" in name: return True # Dead property pm = self.provider.prop_manager if pm and not name.startswith("{DAV:}"): refUrl = self.get_ref_url() if value is None: return pm.remove_property(refUrl, name, dry_run, self.environ) else: value = etree.tostring(value) return pm.write_property(refUrl, name, value, dry_run, self.environ) raise DAVError(HTTP_FORBIDDEN)
[ "def", "set_property_value", "(", "self", ",", "name", ",", "value", ",", "dry_run", "=", "False", ")", ":", "assert", "value", "is", "None", "or", "xml_tools", ".", "is_etree_element", "(", "value", ")", "if", "name", "in", "_lockPropertyNames", ":", "# L...
Set a property value or remove a property. value == None means 'remove property'. Raise HTTP_FORBIDDEN if property is read-only, or not supported. When dry_run is True, this function should raise errors, as in a real run, but MUST NOT change any data. This default implementation - raises HTTP_FORBIDDEN, if trying to modify a locking property - raises HTTP_FORBIDDEN, if trying to modify an immutable {DAV:} property - handles Windows' Win32LastModifiedTime to set the getlastmodified property, if enabled - stores everything else as dead property, if a property manager is present. - raises HTTP_FORBIDDEN, else Removing a non-existing prop is NOT an error. Note: RFC 4918 states that {DAV:}displayname 'SHOULD NOT be protected' A resource provider may override this method, to update supported custom live properties.
[ "Set", "a", "property", "value", "or", "remove", "a", "property", "." ]
python
valid
blockstack/blockstack-core
blockstack/lib/operations/tokentransfer.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/operations/tokentransfer.py#L50-L122
def check( state_engine, token_op, block_id, checked_ops ): """ Verify that a token transfer operation is permitted. * the token feature must exist * the sender must be unlocked---i.e. able to send at this point * the sender must have enough balance of the given token to send the amount requested * the token value must be positive * the consensus hash must be valid Return True if accepted Return False if not """ epoch_features = get_epoch_features(block_id) if EPOCH_FEATURE_TOKEN_TRANSFER not in epoch_features: log.warning("Token transfers are not enabled in this epoch") return False consensus_hash = token_op['consensus_hash'] address = token_op['address'] recipient_address = token_op['recipient_address'] token_type = token_op['token_units'] token_value = token_op['token_fee'] # token value must be positive if token_value <= 0: log.warning("Zero-value token transfer from {}".format(address)) return False # can't send to ourselves if address == recipient_address: log.warning('Cannot transfer token from the account to itself ({})'.format(address)) return False # consensus hash must be valid if not state_engine.is_consensus_hash_valid(block_id, consensus_hash): log.warning('Invalid consensus hash {}'.format(consensus_hash)) return False # sender account must exist account_info = state_engine.get_account(address, token_type) if account_info is None: log.warning("No account for {} ({})".format(address, token_type)) return False # sender must not be transfer-locked if block_id < account_info['lock_transfer_block_id']: log.warning('Account {} is blocked from transferring tokens until block height {}'.format(address, account_info['lock_transfer_block_id'])) return False # sender must have enough balance of the token account_balance = state_engine.get_account_balance(account_info) if account_balance < token_value: log.warning('Account {} has {} {}; tried to send {}'.format(address, account_balance, token_type, token_value)) return False receiver_account = state_engine.get_account(recipient_address, token_type) if receiver_account is not None: if not receiver_account['receive_whitelisted']: log.warning('Receiver account {} is not whitelisted'.format(recipient_address)) return False log.debug("Account {} will pay {} {} to {}".format(address, token_value, token_type, recipient_address)) # will execute a debit against the sender address token_operation_put_account_payment_info(token_op, address, token_type, token_value) # will execute a credit against the receiver address token_operation_put_account_credit_info(token_op, recipient_address, token_type, token_value) # preserve token_fee as a string to prevent overflow token_op['token_fee'] = '{}'.format(token_op['token_fee']) return True
[ "def", "check", "(", "state_engine", ",", "token_op", ",", "block_id", ",", "checked_ops", ")", ":", "epoch_features", "=", "get_epoch_features", "(", "block_id", ")", "if", "EPOCH_FEATURE_TOKEN_TRANSFER", "not", "in", "epoch_features", ":", "log", ".", "warning",...
Verify that a token transfer operation is permitted. * the token feature must exist * the sender must be unlocked---i.e. able to send at this point * the sender must have enough balance of the given token to send the amount requested * the token value must be positive * the consensus hash must be valid Return True if accepted Return False if not
[ "Verify", "that", "a", "token", "transfer", "operation", "is", "permitted", ".", "*", "the", "token", "feature", "must", "exist", "*", "the", "sender", "must", "be", "unlocked", "---", "i", ".", "e", ".", "able", "to", "send", "at", "this", "point", "*...
python
train
bfontaine/crosswords
crosswords/cli.py
https://github.com/bfontaine/crosswords/blob/042b3cdd00a59d193ee559368910a8faa54565f5/crosswords/cli.py#L33-L39
def print_languages_and_exit(lst, status=1, header=True): """print a list of languages and exit""" if header: print("Available languages:") for lg in lst: print("- %s" % lg) sys.exit(status)
[ "def", "print_languages_and_exit", "(", "lst", ",", "status", "=", "1", ",", "header", "=", "True", ")", ":", "if", "header", ":", "print", "(", "\"Available languages:\"", ")", "for", "lg", "in", "lst", ":", "print", "(", "\"- %s\"", "%", "lg", ")", "...
print a list of languages and exit
[ "print", "a", "list", "of", "languages", "and", "exit" ]
python
train
arkottke/pysra
pysra/propagation.py
https://github.com/arkottke/pysra/blob/c72fd389d6c15203c0c00728ac00f101bae6369d/pysra/propagation.py#L606-L612
def _estimate_strains(self): """Estimate the strains by running an EQL site response. This step was recommended in Section 8.3.1 of Zalachoris (2014). """ eql = EquivalentLinearCalculator() eql(self._motion, self._profile, self._loc_input)
[ "def", "_estimate_strains", "(", "self", ")", ":", "eql", "=", "EquivalentLinearCalculator", "(", ")", "eql", "(", "self", ".", "_motion", ",", "self", ".", "_profile", ",", "self", ".", "_loc_input", ")" ]
Estimate the strains by running an EQL site response. This step was recommended in Section 8.3.1 of Zalachoris (2014).
[ "Estimate", "the", "strains", "by", "running", "an", "EQL", "site", "response", "." ]
python
train
oscarlazoarjona/fast
build/lib/fast/atomic_structure.py
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/build/lib/fast/atomic_structure.py#L2021-L2039
def speed_average(Temperature,element,isotope): r"""This function calculates the average speed (in meters per second) of an atom in a vapour assuming a Maxwell-Boltzmann velocity distribution. This is simply sqrt(8*k_B*T/m/pi) where k_B is Boltzmann's constant, T is the temperature (in Kelvins) and m is the mass of the atom (in kilograms). >>> print speed_average(25+273.15,"Rb",85) 272.65940782 >>> print speed_average(25+273.15,"Cs",133) 217.938062809 """ atom = Atom(element, isotope) return sqrt(8*k_B*Temperature/atom.mass/pi)
[ "def", "speed_average", "(", "Temperature", ",", "element", ",", "isotope", ")", ":", "atom", "=", "Atom", "(", "element", ",", "isotope", ")", "return", "sqrt", "(", "8", "*", "k_B", "*", "Temperature", "/", "atom", ".", "mass", "/", "pi", ")" ]
r"""This function calculates the average speed (in meters per second) of an atom in a vapour assuming a Maxwell-Boltzmann velocity distribution. This is simply sqrt(8*k_B*T/m/pi) where k_B is Boltzmann's constant, T is the temperature (in Kelvins) and m is the mass of the atom (in kilograms). >>> print speed_average(25+273.15,"Rb",85) 272.65940782 >>> print speed_average(25+273.15,"Cs",133) 217.938062809
[ "r", "This", "function", "calculates", "the", "average", "speed", "(", "in", "meters", "per", "second", ")", "of", "an", "atom", "in", "a", "vapour", "assuming", "a", "Maxwell", "-", "Boltzmann", "velocity", "distribution", ".", "This", "is", "simply" ]
python
train
google/python-atfork
atfork/__init__.py
https://github.com/google/python-atfork/blob/0ba186bd3a75f823c720e711b39d73441da67ea4/atfork/__init__.py#L58-L71
def monkeypatch_os_fork_functions(): """ Replace os.fork* with wrappers that use ForkSafeLock to acquire all locks before forking and release them afterwards. """ builtin_function = type(''.join) if hasattr(os, 'fork') and isinstance(os.fork, builtin_function): global _orig_os_fork _orig_os_fork = os.fork os.fork = os_fork_wrapper if hasattr(os, 'forkpty') and isinstance(os.forkpty, builtin_function): global _orig_os_forkpty _orig_os_forkpty = os.forkpty os.forkpty = os_forkpty_wrapper
[ "def", "monkeypatch_os_fork_functions", "(", ")", ":", "builtin_function", "=", "type", "(", "''", ".", "join", ")", "if", "hasattr", "(", "os", ",", "'fork'", ")", "and", "isinstance", "(", "os", ".", "fork", ",", "builtin_function", ")", ":", "global", ...
Replace os.fork* with wrappers that use ForkSafeLock to acquire all locks before forking and release them afterwards.
[ "Replace", "os", ".", "fork", "*", "with", "wrappers", "that", "use", "ForkSafeLock", "to", "acquire", "all", "locks", "before", "forking", "and", "release", "them", "afterwards", "." ]
python
train
PyCQA/pylint
pylint/checkers/typecheck.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/typecheck.py#L385-L446
def _emit_no_member(node, owner, owner_name, ignored_mixins=True, ignored_none=True): """Try to see if no-member should be emitted for the given owner. The following cases are ignored: * the owner is a function and it has decorators. * the owner is an instance and it has __getattr__, __getattribute__ implemented * the module is explicitly ignored from no-member checks * the owner is a class and the name can be found in its metaclass. * The access node is protected by an except handler, which handles AttributeError, Exception or bare except. """ # pylint: disable=too-many-return-statements if node_ignores_exception(node, AttributeError): return False if ignored_none and isinstance(owner, astroid.Const) and owner.value is None: return False if is_super(owner) or getattr(owner, "type", None) == "metaclass": return False if ignored_mixins and owner_name[-5:].lower() == "mixin": return False if isinstance(owner, astroid.FunctionDef) and owner.decorators: return False if isinstance(owner, (astroid.Instance, astroid.ClassDef)): if owner.has_dynamic_getattr(): # Issue #2565: Don't ignore enums, as they have a `__getattr__` but it's not # invoked at this point. try: metaclass = owner.metaclass() except exceptions.MroError: return False if metaclass: return metaclass.qname() == "enum.EnumMeta" return False if not has_known_bases(owner): return False if isinstance(owner, objects.Super): # Verify if we are dealing with an invalid Super object. # If it is invalid, then there's no point in checking that # it has the required attribute. Also, don't fail if the # MRO is invalid. try: owner.super_mro() except (exceptions.MroError, exceptions.SuperError): return False if not all(map(has_known_bases, owner.type.mro())): return False if isinstance(owner, astroid.Module): try: owner.getattr("__getattr__") return False except astroid.NotFoundError: pass if node.attrname.startswith("_" + owner_name): # Test if an attribute has been mangled ('private' attribute) unmangled_name = node.attrname.split("_" + owner_name)[-1] try: if owner.getattr(unmangled_name, context=None) is not None: return False except astroid.NotFoundError: return True return True
[ "def", "_emit_no_member", "(", "node", ",", "owner", ",", "owner_name", ",", "ignored_mixins", "=", "True", ",", "ignored_none", "=", "True", ")", ":", "# pylint: disable=too-many-return-statements", "if", "node_ignores_exception", "(", "node", ",", "AttributeError", ...
Try to see if no-member should be emitted for the given owner. The following cases are ignored: * the owner is a function and it has decorators. * the owner is an instance and it has __getattr__, __getattribute__ implemented * the module is explicitly ignored from no-member checks * the owner is a class and the name can be found in its metaclass. * The access node is protected by an except handler, which handles AttributeError, Exception or bare except.
[ "Try", "to", "see", "if", "no", "-", "member", "should", "be", "emitted", "for", "the", "given", "owner", "." ]
python
test
aerkalov/ebooklib
ebooklib/epub.py
https://github.com/aerkalov/ebooklib/blob/305f2dd7f02923ffabf9586a5d16266113d00c4a/ebooklib/epub.py#L606-L616
def set_title(self, title): """ Set title. You can set multiple titles. :Args: - title: Title value """ self.title = title self.add_metadata('DC', 'title', self.title)
[ "def", "set_title", "(", "self", ",", "title", ")", ":", "self", ".", "title", "=", "title", "self", ".", "add_metadata", "(", "'DC'", ",", "'title'", ",", "self", ".", "title", ")" ]
Set title. You can set multiple titles. :Args: - title: Title value
[ "Set", "title", ".", "You", "can", "set", "multiple", "titles", "." ]
python
train
Devoxin/Lavalink.py
lavalink/PlayerManager.py
https://github.com/Devoxin/Lavalink.py/blob/63f55c3d726d24c4cfd3674d3cd6aab6f5be110d/lavalink/PlayerManager.py#L81-L83
def fetch(self, key: object, default=None): """ Retrieves the related value from the stored user data. """ return self._user_data.get(key, default)
[ "def", "fetch", "(", "self", ",", "key", ":", "object", ",", "default", "=", "None", ")", ":", "return", "self", ".", "_user_data", ".", "get", "(", "key", ",", "default", ")" ]
Retrieves the related value from the stored user data.
[ "Retrieves", "the", "related", "value", "from", "the", "stored", "user", "data", "." ]
python
valid
djordon/queueing-tool
queueing_tool/network/queue_network.py
https://github.com/djordon/queueing-tool/blob/ccd418cf647ac03a54f78ba5e3725903f541b808/queueing_tool/network/queue_network.py#L1605-L1636
def _get_queues(g, queues, edge, edge_type): """Used to specify edge indices from different types of arguments.""" INT = numbers.Integral if isinstance(queues, INT): queues = [queues] elif queues is None: if edge is not None: if isinstance(edge, tuple): if isinstance(edge[0], INT) and isinstance(edge[1], INT): queues = [g.edge_index[edge]] elif isinstance(edge[0], collections.Iterable): if np.array([len(e) == 2 for e in edge]).all(): queues = [g.edge_index[e] for e in edge] else: queues = [g.edge_index[edge]] elif edge_type is not None: if isinstance(edge_type, collections.Iterable): edge_type = set(edge_type) else: edge_type = set([edge_type]) tmp = [] for e in g.edges(): if g.ep(e, 'edge_type') in edge_type: tmp.append(g.edge_index[e]) queues = np.array(tmp, int) if queues is None: queues = range(g.number_of_edges()) return queues
[ "def", "_get_queues", "(", "g", ",", "queues", ",", "edge", ",", "edge_type", ")", ":", "INT", "=", "numbers", ".", "Integral", "if", "isinstance", "(", "queues", ",", "INT", ")", ":", "queues", "=", "[", "queues", "]", "elif", "queues", "is", "None"...
Used to specify edge indices from different types of arguments.
[ "Used", "to", "specify", "edge", "indices", "from", "different", "types", "of", "arguments", "." ]
python
valid
nickjj/ansigenome
ansigenome/utils.py
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L312-L336
def roles_dict(path, repo_prefix="", repo_sub_dir=""): """ Return a dict of role names and repo paths. """ exit_if_path_not_found(path) aggregated_roles = {} roles = os.walk(path).next()[1] # First scan all directories for role in roles: for sub_role in roles_dict(path + "/" + role, repo_prefix="", repo_sub_dir=role + "/"): aggregated_roles[role + "/" + sub_role] = role + "/" + sub_role # Then format them for role in roles: if is_role(os.path.join(path, role)): if isinstance(role, basestring): role_repo = "{0}{1}".format(repo_prefix, role_name(role)) aggregated_roles[role] = role_repo return aggregated_roles
[ "def", "roles_dict", "(", "path", ",", "repo_prefix", "=", "\"\"", ",", "repo_sub_dir", "=", "\"\"", ")", ":", "exit_if_path_not_found", "(", "path", ")", "aggregated_roles", "=", "{", "}", "roles", "=", "os", ".", "walk", "(", "path", ")", ".", "next", ...
Return a dict of role names and repo paths.
[ "Return", "a", "dict", "of", "role", "names", "and", "repo", "paths", "." ]
python
train
hotdoc/hotdoc
hotdoc/core/tree.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/core/tree.py#L597-L603
def format_page(self, page, link_resolver, output, extensions): """ Banana banana """ info('formatting %s' % page.source_file, 'formatting') extension = extensions[page.extension_name] extension.format_page(page, link_resolver, output)
[ "def", "format_page", "(", "self", ",", "page", ",", "link_resolver", ",", "output", ",", "extensions", ")", ":", "info", "(", "'formatting %s'", "%", "page", ".", "source_file", ",", "'formatting'", ")", "extension", "=", "extensions", "[", "page", ".", "...
Banana banana
[ "Banana", "banana" ]
python
train
globality-corp/microcosm-flask
microcosm_flask/conventions/crud_adapter.py
https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/conventions/crud_adapter.py#L54-L81
def update_batch(self, **kwargs): """ Simplistic batch update operation implemented in terms of `replace()`. Assumes that: - Request and response schemas contains lists of items. - Request items define a primary key identifier - The entire batch succeeds or fails together. """ items = kwargs.pop("items") def transform(item): """ Transform the dictionary expected for replace (which uses the URI path's id) into the resource expected from individual resources (which uses plain id). """ item[self.identifier_key] = item.pop("id") return item return dict( items=[ self.replace(**transform(item)) for item in items ], )
[ "def", "update_batch", "(", "self", ",", "*", "*", "kwargs", ")", ":", "items", "=", "kwargs", ".", "pop", "(", "\"items\"", ")", "def", "transform", "(", "item", ")", ":", "\"\"\"\n Transform the dictionary expected for replace (which uses the URI path's i...
Simplistic batch update operation implemented in terms of `replace()`. Assumes that: - Request and response schemas contains lists of items. - Request items define a primary key identifier - The entire batch succeeds or fails together.
[ "Simplistic", "batch", "update", "operation", "implemented", "in", "terms", "of", "replace", "()", "." ]
python
train
tjguk/networkzero
networkzero/core.py
https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/core.py#L212-L306
def address(address=None): """Convert one of a number of inputs into a valid ip:port string. Elements which are not provided are filled in as follows: * IP Address: the system is asked for the set of IP addresses associated with the machine and the first one is used, preferring those matching `address` if it is a wildcard. * Port number: a random port is selected from the pool of dynamically-available port numbers. This means you can pass any of: nothing; a hostname; an IP address; an IP address with wildcards; a port number If an IP address is supplied but is invalid, an InvalidAddressError exception is raised. :param address: (optional) Any of: an IP address, a port number, or both :returns: a valid ip:port string for this machine """ address = str(address or "").strip() # # If the address is an ip:port pair, split into its component parts. # Otherwise, try to determine whether we're looking at an IP # or at a port and leave the other one blank # host_or_ip, port = split_address(address) # # If the port has been supplied, make sure it's numeric and that it's a valid # port number. If it hasn't been supplied, remove a random one from the pool # of possible dynamically-allocated ports and use that. # if port: try: port = int(port) except ValueError: raise AddressError("Port %s must be a number" % port) if port not in config.VALID_PORTS: raise AddressError("Port %d must be in range %d - %d" % ( port, min(config.VALID_PORTS), max(config.VALID_PORTS)) ) else: random.shuffle(PORT_POOL) port = PORT_POOL.pop() # # The address part could be an IP address (optionally including # wildcards to indicate a preference) or a hostname or nothing. # If it's a hostname we attempt to resolve it to an IP address. # It it's nothing or a wildcard we query the system for a matching IP address. # if (not host_or_ip) or is_valid_ip_pattern(host_or_ip): # # If a specific IP address is given, use that. # If an IP pattern is given (ie something with a wildcard in it) treat # that as no address with a preference for that wildcard. # prefer = None if "*" in host_or_ip: host_or_ip, prefer = None, [host_or_ip] # # If no IP (or only a wildcard) is specified, query the system for valid # addresses, preferring those which match the wildcard. NB if the preference # matches one we've previously used, we can return a cached address. But # different requests can specify different wildcard preferences. # if not host_or_ip: if _ip4 and _prefer == prefer: ip = _ip4 else: ip = _find_ip4(prefer) else: ip = host_or_ip else: # # Treat the string as a hostname and resolve to an IP4 address # try: ip = socket.gethostbyname(host_or_ip) except socket.gaierror as exc: _logger.error("gaierror %d for %s", exc.errno, host_or_ip) raise InvalidAddressError(host_or_ip, exc.errno) else: # # Bizarrely specific check because BT Internet "helpfully" # redirects DNS fails to this address which hosts a sponsored # landing page! # if ip == "92.242.132.15": raise InvalidAddressError(host_or_ip, 0) return "%s:%s" % (ip, port)
[ "def", "address", "(", "address", "=", "None", ")", ":", "address", "=", "str", "(", "address", "or", "\"\"", ")", ".", "strip", "(", ")", "#", "# If the address is an ip:port pair, split into its component parts.", "# Otherwise, try to determine whether we're looking at ...
Convert one of a number of inputs into a valid ip:port string. Elements which are not provided are filled in as follows: * IP Address: the system is asked for the set of IP addresses associated with the machine and the first one is used, preferring those matching `address` if it is a wildcard. * Port number: a random port is selected from the pool of dynamically-available port numbers. This means you can pass any of: nothing; a hostname; an IP address; an IP address with wildcards; a port number If an IP address is supplied but is invalid, an InvalidAddressError exception is raised. :param address: (optional) Any of: an IP address, a port number, or both :returns: a valid ip:port string for this machine
[ "Convert", "one", "of", "a", "number", "of", "inputs", "into", "a", "valid", "ip", ":", "port", "string", "." ]
python
train
ethereum/vyper
vyper/utils.py
https://github.com/ethereum/vyper/blob/c21a40a4f651ebd8426b29b8e2bb7d8b72b57cdd/vyper/utils.py#L242-L256
def check_valid_varname(varname, custom_units, custom_structs, constants, pos, error_prefix="Variable name invalid.", exc=None): """ Handle invalid variable names """ exc = VariableDeclarationException if exc is None else exc valid_varname, msg = is_varname_valid(varname, custom_units, custom_structs, constants) if not valid_varname: raise exc(error_prefix + msg, pos) return True
[ "def", "check_valid_varname", "(", "varname", ",", "custom_units", ",", "custom_structs", ",", "constants", ",", "pos", ",", "error_prefix", "=", "\"Variable name invalid.\"", ",", "exc", "=", "None", ")", ":", "exc", "=", "VariableDeclarationException", "if", "ex...
Handle invalid variable names
[ "Handle", "invalid", "variable", "names" ]
python
train
google/grr
grr/client_builder/grr_response_client_builder/builders/windows.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client_builder/grr_response_client_builder/builders/windows.py#L185-L190
def CopyFileInZip(from_zip, from_name, to_zip, to_name=None): """Read a file from a ZipFile and write it to a new ZipFile.""" data = from_zip.read(from_name) if to_name is None: to_name = from_name to_zip.writestr(to_name, data)
[ "def", "CopyFileInZip", "(", "from_zip", ",", "from_name", ",", "to_zip", ",", "to_name", "=", "None", ")", ":", "data", "=", "from_zip", ".", "read", "(", "from_name", ")", "if", "to_name", "is", "None", ":", "to_name", "=", "from_name", "to_zip", ".", ...
Read a file from a ZipFile and write it to a new ZipFile.
[ "Read", "a", "file", "from", "a", "ZipFile", "and", "write", "it", "to", "a", "new", "ZipFile", "." ]
python
train