text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def get_svc_stats(self, svcs): """ Get statistics for Services, resp. Service entities """ stats = { "services.total": 0, "services.ok": 0, "services.warning": 0, "services.critical": 0, "services.unknown": 0, "services.flapping": 0, "services.in_downtime": 0, "services.checked": 0, "services.scheduled": 0, "services.active_checks": 0, "services.passive_checks": 0, } for svc in svcs: if type(svc) is not dict: continue sane = self._sanitize_entity(svc) stats["services.total"] += 1 stats["services.flapping"] += self._trans_binary(sane["flapping"]) stats["services.in_downtime"] += self._trans_dtime( sane["in_downtime"]) stats["services.checked"] += self._trans_binary(sane["checked"]) stats[ "services.scheduled"] += self._trans_binary(sane["scheduled"]) stats["services.active_checks"] += sane["active_checks"] stats["services.passive_checks"] += sane["passive_checks"] state_key = self._trans_svc_state(sane["state"]) stats["services.%s" % (state_key)] += 1 return stats
[ "def", "get_svc_stats", "(", "self", ",", "svcs", ")", ":", "stats", "=", "{", "\"services.total\"", ":", "0", ",", "\"services.ok\"", ":", "0", ",", "\"services.warning\"", ":", "0", ",", "\"services.critical\"", ":", "0", ",", "\"services.unknown\"", ":", ...
39.363636
15.393939
def get(request, obj_id=None): """Lists all tags :returns: json """ res = Result() if obj_id: if obj_id == '0': obj = { 'id': 0, 'name': 'TAGLESS', 'artist': False, } else: obj = get_object_or_404(Tag, pk=obj_id).json() res.append(obj) return JsonResponse(res.asDict()) else: if request.GET.get('count'): itags = Tag.objects.all().annotate(icount=Count('image')) vtags = Tag.objects.all().annotate(vcount=Count('video')) for i, tag in enumerate(itags): tag.count = itags[i].icount + vtags[i].vcount res.append(tag.json()) else: for tag in Tag.objects.all(): res.append(tag.json()) return JsonResponse(res.asDict())
[ "def", "get", "(", "request", ",", "obj_id", "=", "None", ")", ":", "res", "=", "Result", "(", ")", "if", "obj_id", ":", "if", "obj_id", "==", "'0'", ":", "obj", "=", "{", "'id'", ":", "0", ",", "'name'", ":", "'TAGLESS'", ",", "'artist'", ":", ...
27.354839
17.612903
def nas_auto_qos_set_cos(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") nas = ET.SubElement(config, "nas", xmlns="urn:brocade.com:mgmt:brocade-qos") auto_qos = ET.SubElement(nas, "auto-qos") set = ET.SubElement(auto_qos, "set") cos = ET.SubElement(set, "cos") cos.text = kwargs.pop('cos') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "nas_auto_qos_set_cos", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "nas", "=", "ET", ".", "SubElement", "(", "config", ",", "\"nas\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:broca...
38.25
11
def _retf16(ins): """ Returns from a procedure / function a Fixed Point (32bits) value """ output = _f16_oper(ins.quad[1]) output.append('#pragma opt require hl,de') output.append('jp %s' % str(ins.quad[2])) return output
[ "def", "_retf16", "(", "ins", ")", ":", "output", "=", "_f16_oper", "(", "ins", ".", "quad", "[", "1", "]", ")", "output", ".", "append", "(", "'#pragma opt require hl,de'", ")", "output", ".", "append", "(", "'jp %s'", "%", "str", "(", "ins", ".", "...
34.142857
8.857143
def checkState(self): """ Returns Qt.Checked or Qt.Unchecked. """ if self.data is True: return Qt.Checked elif self.data is False: return Qt.Unchecked else: raise ValueError("Unexpected data: {!r}".format(self.data))
[ "def", "checkState", "(", "self", ")", ":", "if", "self", ".", "data", "is", "True", ":", "return", "Qt", ".", "Checked", "elif", "self", ".", "data", "is", "False", ":", "return", "Qt", ".", "Unchecked", "else", ":", "raise", "ValueError", "(", "\"U...
31.555556
12.888889
def find_target_container(portal_type, record): """Locates a target container for the given portal_type and record :param record: The dictionary representation of a content object :type record: dict :returns: folder which contains the object :rtype: object """ portal_type = portal_type or record.get("portal_type") container = get_container_for(portal_type) if container: return container parent_uid = record.pop("parent_uid", None) parent_path = record.pop("parent_path", None) target = None # Try to find the target object if parent_uid: target = get_object_by_uid(parent_uid) elif parent_path: target = get_object_by_path(parent_path) else: fail(404, "No target UID/PATH information found") if not target: fail(404, "No target container found") return target
[ "def", "find_target_container", "(", "portal_type", ",", "record", ")", ":", "portal_type", "=", "portal_type", "or", "record", ".", "get", "(", "\"portal_type\"", ")", "container", "=", "get_container_for", "(", "portal_type", ")", "if", "container", ":", "retu...
28.366667
19.4
def with_filter(self, filter_func): ''' Returns a new service which will process requests with the specified filter. Filtering operations can include logging, automatic retrying, etc... The filter is a lambda which receives the HTTPRequest and another lambda. The filter can perform any pre-processing on the request, pass it off to the next lambda, and then perform any post-processing on the response. ''' res = ServiceBusService( service_namespace=self.service_namespace, authentication=self.authentication) old_filter = self._filter def new_filter(request): return filter_func(request, old_filter) res._filter = new_filter # pylint: disable=protected-access return res
[ "def", "with_filter", "(", "self", ",", "filter_func", ")", ":", "res", "=", "ServiceBusService", "(", "service_namespace", "=", "self", ".", "service_namespace", ",", "authentication", "=", "self", ".", "authentication", ")", "old_filter", "=", "self", ".", "...
40
22.8
def move(self, position, slowdown=0): """Move to the specified sample position. :param position: The target position. :param slowdown: The slowdown code, an integer in the range 0 to 14, used to scale the stepper motor speed. 0, the default, is the fastest rate and 14 the slowest. """ cmd = 'MOVE', [Float, Integer, Integer(min=0, max=14)] self._write(cmd, position, 0, slowdown)
[ "def", "move", "(", "self", ",", "position", ",", "slowdown", "=", "0", ")", ":", "cmd", "=", "'MOVE'", ",", "[", "Float", ",", "Integer", ",", "Integer", "(", "min", "=", "0", ",", "max", "=", "14", ")", "]", "self", ".", "_write", "(", "cmd",...
40.363636
17.272727
def get_dict(dictionary, *keys,**kwargs): """ This function allows traversals over several keys to be performed by passing a list of keys:: get_dict(d,key1,key2,key3) = d[key1][key2][key3] """ if 'default' in kwargs: default = kwargs['default'] else: default = None existing = dictionary for i in range(0, len(keys)): if keys[i] in existing: existing = existing[keys[i]] else: return default return existing
[ "def", "get_dict", "(", "dictionary", ",", "*", "keys", ",", "*", "*", "kwargs", ")", ":", "if", "'default'", "in", "kwargs", ":", "default", "=", "kwargs", "[", "'default'", "]", "else", ":", "default", "=", "None", "existing", "=", "dictionary", "for...
20.291667
19.708333
def update_portal(self, portal_obj): """ Implements the Update device Portals API. This function is extremely dangerous. The portal object you pass in will completely overwrite the portal. http://docs.exosite.com/portals/#update-portal """ headers = { 'User-Agent': self.user_agent(), } headers.update(self.headers()) r = requests.put( self.portals_url()+'/portals/'+self.portal_id(), data=json.dumps(portal_obj), headers=headers, auth=self.auth()) if HTTP_STATUS.OK == r.status_code: return r.json() else: print("update_portal: Something went wrong: <{0}>: {1}".format( r.status_code, r.reason)) r.raise_for_status()
[ "def", "update_portal", "(", "self", ",", "portal_obj", ")", ":", "headers", "=", "{", "'User-Agent'", ":", "self", ".", "user_agent", "(", ")", ",", "}", "headers", ".", "update", "(", "self", ".", "headers", "(", ")", ")", "r", "=", "requests", "."...
37.956522
17.26087
def strip_vl_extension(filename): """Strip the vega-lite extension (either vl.json or json) from filename""" for ext in ['.vl.json', '.json']: if filename.endswith(ext): return filename[:-len(ext)] else: return filename
[ "def", "strip_vl_extension", "(", "filename", ")", ":", "for", "ext", "in", "[", "'.vl.json'", ",", "'.json'", "]", ":", "if", "filename", ".", "endswith", "(", "ext", ")", ":", "return", "filename", "[", ":", "-", "len", "(", "ext", ")", "]", "else"...
36.142857
9.285714
def parse_timing(self, nids=None): """ Parse the timer data in the main output file(s) of Abinit. Requires timopt /= 0 in the input file (usually timopt = -1) Args: nids: optional list of node identifiers used to filter the tasks. Return: :class:`AbinitTimerParser` instance, None if error. """ # Get the list of output files according to nids. paths = [task.output_file.path for task in self.iflat_tasks(nids=nids)] # Parse data. from .abitimer import AbinitTimerParser parser = AbinitTimerParser() read_ok = parser.parse(paths) if read_ok: return parser return None
[ "def", "parse_timing", "(", "self", ",", "nids", "=", "None", ")", ":", "# Get the list of output files according to nids.", "paths", "=", "[", "task", ".", "output_file", ".", "path", "for", "task", "in", "self", ".", "iflat_tasks", "(", "nids", "=", "nids", ...
34.35
20.85
def by_title(cls, title, conn=None, google_user=None, google_password=None): """ Open the first document with the given ``title`` that is returned by document search. """ conn = Connection.connect(conn=conn, google_user=google_user, google_password=google_password) q = DocsQuery(categories=['spreadsheet'], title=title) feed = conn.docs_client.GetResources(q=q) for entry in feed.entry: if entry.title.text == title: id = entry.id.text.rsplit('%3A', 1)[-1] return cls.by_id(id, conn=conn)
[ "def", "by_title", "(", "cls", ",", "title", ",", "conn", "=", "None", ",", "google_user", "=", "None", ",", "google_password", "=", "None", ")", ":", "conn", "=", "Connection", ".", "connect", "(", "conn", "=", "conn", ",", "google_user", "=", "google...
51.75
10.916667
def validateElement(self, doc, elem): """Try to validate the subtree under an element """ if doc is None: doc__o = None else: doc__o = doc._o if elem is None: elem__o = None else: elem__o = elem._o ret = libxml2mod.xmlValidateElement(self._o, doc__o, elem__o) return ret
[ "def", "validateElement", "(", "self", ",", "doc", ",", "elem", ")", ":", "if", "doc", "is", "None", ":", "doc__o", "=", "None", "else", ":", "doc__o", "=", "doc", ".", "_o", "if", "elem", "is", "None", ":", "elem__o", "=", "None", "else", ":", "...
39.875
9.75
def get_datasource(self, source_id, datasource_id): """ Get a Datasource object :rtype: Datasource """ target_url = self.client.get_url('DATASOURCE', 'GET', 'single', {'source_id': source_id, 'datasource_id': datasource_id}) return self.client.get_manager(Datasource)._get(target_url)
[ "def", "get_datasource", "(", "self", ",", "source_id", ",", "datasource_id", ")", ":", "target_url", "=", "self", ".", "client", ".", "get_url", "(", "'DATASOURCE'", ",", "'GET'", ",", "'single'", ",", "{", "'source_id'", ":", "source_id", ",", "'datasource...
40.75
23.75
def _apply_gradients_and_copy(self, opt, raw_grad_list, ps_var_grads): """ Apply averaged gradients to ps vars, and then copy the updated variables back to each tower. Args: raw_grad_list: Ngpu x Nvar x 2 gradient list from all towers ps_var_grads: Nvar x 2 (grad, ps_var) Returns: list of copy ops """ # TODO do this for variables together? with tf.name_scope('apply_gradients'): var_update_ops = [] for vid, (g, v) in enumerate(ps_var_grads): # TODO do we put momentum variables into local or global? apply_gradient_op = opt.apply_gradients([(g, v)]) barrier = self._add_sync_queues_and_barrier( 'param_update_barrier_{}'.format(vid), [apply_gradient_op]) with tf.control_dependencies([barrier]), \ tf.device(self.cpu_device): updated_value = v.read_value() for towerid in range(self.nr_gpu): var_update_ops.append( raw_grad_list[towerid][vid][1].assign(updated_value)) return var_update_ops
[ "def", "_apply_gradients_and_copy", "(", "self", ",", "opt", ",", "raw_grad_list", ",", "ps_var_grads", ")", ":", "# TODO do this for variables together?", "with", "tf", ".", "name_scope", "(", "'apply_gradients'", ")", ":", "var_update_ops", "=", "[", "]", "for", ...
44.62963
18.777778
def _supply_data(data_sink, context): """ Supply data to the data sink """ try: data_sink.sink(context) except Exception as e: ex = ValueError("An exception occurred while " "supplying data to data sink '{ds}'\n\n" "{e}\n\n" "{help}".format(ds=context.name, e=str(e), help=context.help())) raise ex, None, sys.exc_info()[2]
[ "def", "_supply_data", "(", "data_sink", ",", "context", ")", ":", "try", ":", "data_sink", ".", "sink", "(", "context", ")", "except", "Exception", "as", "e", ":", "ex", "=", "ValueError", "(", "\"An exception occurred while \"", "\"supplying data to data sink '{...
33.416667
12.916667
def info(self, page, version=None): """Returns informations of *page*. Informations of the last version is returned if *version* is not set. """ return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version) if version is not None else self._dokuwiki.send('wiki.getPageInfo', page))
[ "def", "info", "(", "self", ",", "page", ",", "version", "=", "None", ")", ":", "return", "(", "self", ".", "_dokuwiki", ".", "send", "(", "'wiki.getPageInfoVersion'", ",", "page", ",", "version", ")", "if", "version", "is", "not", "None", "else", "sel...
49.571429
10.714286
def _linalg_cho_factor(A, rho, lower=False, check_finite=True): """Patched version of :func:`sporco.linalg.cho_factor`.""" N, M = A.shape if N >= M: c, lwr = _cho_factor( A.T.dot(A) + rho * cp.identity(M, dtype=A.dtype), lower=lower, check_finite=check_finite) else: c, lwr = _cho_factor( A.dot(A.T) + rho * cp.identity(N, dtype=A.dtype), lower=lower, check_finite=check_finite) return c, lwr
[ "def", "_linalg_cho_factor", "(", "A", ",", "rho", ",", "lower", "=", "False", ",", "check_finite", "=", "True", ")", ":", "N", ",", "M", "=", "A", ".", "shape", "if", "N", ">=", "M", ":", "c", ",", "lwr", "=", "_cho_factor", "(", "A", ".", "T"...
35.769231
19.923077
def p_statement_list_1(self, p): '''statement_list : statement SEMICOLON statement_list''' p[0] = p[3] if p[1] is not None: p[0].children.insert(0, p[1])
[ "def", "p_statement_list_1", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "p", "[", "3", "]", "if", "p", "[", "1", "]", "is", "not", "None", ":", "p", "[", "0", "]", ".", "children", ".", "insert", "(", "0", ",", "p", "[", "1...
37
13.4
def add_unique_postfix(fn): """__source__ = 'http://code.activestate.com/recipes/577200-make-unique-file-name/'""" if not os.path.exists(fn): return fn path, name = os.path.split(fn) name, ext = os.path.splitext(name) make_fn = lambda i: os.path.join(path, '%s(%d)%s' % (name, i, ext)) for i in xrange(2, sys.maxint): uni_fn = make_fn(i) if not os.path.exists(uni_fn): return uni_fn
[ "def", "add_unique_postfix", "(", "fn", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "fn", ")", ":", "return", "fn", "path", ",", "name", "=", "os", ".", "path", ".", "split", "(", "fn", ")", "name", ",", "ext", "=", "os", ".",...
30.857143
17.142857
def _stmt_from_rule(model, rule_name, stmts): """Return the INDRA Statement corresponding to a given rule by name.""" stmt_uuid = None for ann in model.annotations: if ann.predicate == 'from_indra_statement': if ann.subject == rule_name: stmt_uuid = ann.object break if stmt_uuid: for stmt in stmts: if stmt.uuid == stmt_uuid: return stmt
[ "def", "_stmt_from_rule", "(", "model", ",", "rule_name", ",", "stmts", ")", ":", "stmt_uuid", "=", "None", "for", "ann", "in", "model", ".", "annotations", ":", "if", "ann", ".", "predicate", "==", "'from_indra_statement'", ":", "if", "ann", ".", "subject...
35.916667
9.666667
def get(self, id=None): """ 获取指定部门列表 https://work.weixin.qq.com/api/doc#90000/90135/90208 权限说明: 只能拉取token对应的应用的权限范围内的部门列表 :param id: 部门id。获取指定部门及其下的子部门。 如果不填,默认获取全量组织架构 :return: 部门列表 """ if id is None: res = self._get('department/list') else: res = self._get('department/list', params={'id': id}) return res['department']
[ "def", "get", "(", "self", ",", "id", "=", "None", ")", ":", "if", "id", "is", "None", ":", "res", "=", "self", ".", "_get", "(", "'department/list'", ")", "else", ":", "res", "=", "self", ".", "_get", "(", "'department/list'", ",", "params", "=", ...
24.705882
19.529412
def updatepLvlNextFunc(self): ''' A method that creates the pLvlNextFunc attribute as a sequence of linear functions, indicating constant expected permanent income growth across permanent income levels. Draws on the attribute PermGroFac, and installs a special retirement function when it exists. Parameters ---------- None Returns ------- None ''' orig_time = self.time_flow self.timeFwd() pLvlNextFunc = [] for t in range(self.T_cycle): pLvlNextFunc.append(LinearInterp(np.array([0.,1.]),np.array([0.,self.PermGroFac[t]]))) self.pLvlNextFunc = pLvlNextFunc self.addToTimeVary('pLvlNextFunc') if not orig_time: self.timeRev()
[ "def", "updatepLvlNextFunc", "(", "self", ")", ":", "orig_time", "=", "self", ".", "time_flow", "self", ".", "timeFwd", "(", ")", "pLvlNextFunc", "=", "[", "]", "for", "t", "in", "range", "(", "self", ".", "T_cycle", ")", ":", "pLvlNextFunc", ".", "app...
30.076923
24.692308
def create_external_feed_groups(self, url, group_id, header_match=None, verbosity=None): """ Create an external feed. Create a new external feed for the course or group. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - url """The url to the external rss or atom feed""" data["url"] = url # OPTIONAL - header_match """If given, only feed entries that contain this string in their title will be imported""" if header_match is not None: data["header_match"] = header_match # OPTIONAL - verbosity """Defaults to "full"""" if verbosity is not None: self._validate_enum(verbosity, ["full", "truncate", "link_only"]) data["verbosity"] = verbosity self.logger.debug("POST /api/v1/groups/{group_id}/external_feeds with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/groups/{group_id}/external_feeds".format(**path), data=data, params=params, single_item=True)
[ "def", "create_external_feed_groups", "(", "self", ",", "url", ",", "group_id", ",", "header_match", "=", "None", ",", "verbosity", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - group_id\...
39.096774
23.032258
def reset(self): """Reset the instance - reset rows and header """ self._hline_string = None self._row_size = None self._header = [] self._rows = []
[ "def", "reset", "(", "self", ")", ":", "self", ".", "_hline_string", "=", "None", "self", ".", "_row_size", "=", "None", "self", ".", "_header", "=", "[", "]", "self", ".", "_rows", "=", "[", "]" ]
19.7
16.3
async def get_guild_count(self, bot_id: int=None): """This function is a coroutine. Gets a guild count from discordbots.org Parameters ========== bot_id: int[Optional] The bot_id of the bot you want to lookup. Defaults to the Bot provided in Client init Returns ======= stats: dict The guild count and shards of a bot. The date object is returned in a datetime.datetime object """ if bot_id is None: bot_id = self.bot_id return await self.http.get_guild_count(bot_id)
[ "async", "def", "get_guild_count", "(", "self", ",", "bot_id", ":", "int", "=", "None", ")", ":", "if", "bot_id", "is", "None", ":", "bot_id", "=", "self", ".", "bot_id", "return", "await", "self", ".", "http", ".", "get_guild_count", "(", "bot_id", ")...
26.043478
21.043478
def create_raw(self, key, value): """Create method of CRUD operation for raw data. Args: key (string): The variable to write to the DB. value (any): The data to write to the DB. Returns: (string): Result of DB write. """ data = None if key is not None and value is not None: data = self.db.create(key.strip(), value) else: self.tcex.log.warning(u'The key or value field was None.') return data
[ "def", "create_raw", "(", "self", ",", "key", ",", "value", ")", ":", "data", "=", "None", "if", "key", "is", "not", "None", "and", "value", "is", "not", "None", ":", "data", "=", "self", ".", "db", ".", "create", "(", "key", ".", "strip", "(", ...
31.5
18.1875
def GetMemSharedSavedMB(self): '''Retrieves the estimated amount of physical memory on the host saved from copy-on-write (COW) shared guest physical memory.''' counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemSharedSavedMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
[ "def", "GetMemSharedSavedMB", "(", "self", ")", ":", "counter", "=", "c_uint", "(", ")", "ret", "=", "vmGuestLib", ".", "VMGuestLib_GetMemSharedSavedMB", "(", "self", ".", "handle", ".", "value", ",", "byref", "(", "counter", ")", ")", "if", "ret", "!=", ...
56.285714
26.571429
def transformation_get(node_id): """Get all the transformations of a node. The node id must be specified in the url. You can also pass transformation_type. """ exp = experiment(session) # get the parameters transformation_type = request_parameter(parameter="transformation_type", parameter_type="known_class", default=models.Transformation) if type(transformation_type) == Response: return transformation_type # check the node exists node = models.Node.query.get(node_id) if node is None: return error_response( error_type="/node/transformations, node does not exist") # execute the request transformations = node.transformations( transformation_type=transformation_type) try: # ping the experiment exp.transformation_get_request(node=node, transformations=transformations) session.commit() except: return error_response(error_type="/node/tranaformations GET failed", participant=node.participant) # return the data return success_response(field="transformations", data=[t.__json__() for t in transformations], request_type="transformations")
[ "def", "transformation_get", "(", "node_id", ")", ":", "exp", "=", "experiment", "(", "session", ")", "# get the parameters", "transformation_type", "=", "request_parameter", "(", "parameter", "=", "\"transformation_type\"", ",", "parameter_type", "=", "\"known_class\""...
35.789474
20
def update_metadata(self, key, value): """Set *key* in the metadata to *value*. Returns the previous value of *key*, or None if the key was not previously set. """ old_value = self.contents['metadata'].get(key) self.contents['metadata'][key] = value self._log('Updated metadata: %s=%s' % (key, value)) return old_value
[ "def", "update_metadata", "(", "self", ",", "key", ",", "value", ")", ":", "old_value", "=", "self", ".", "contents", "[", "'metadata'", "]", ".", "get", "(", "key", ")", "self", ".", "contents", "[", "'metadata'", "]", "[", "key", "]", "=", "value",...
37.4
13.7
def result_report_class_wise_average(self): """Report class-wise averages Returns ------- str result report in string format """ results = self.results_class_wise_average_metrics() output = self.ui.section_header('Class-wise average metrics (macro-average)', indent=2) + '\n' if 'f_measure' in results and results['f_measure']: if results['f_measure']['f_measure'] is not None: f_measure = results['f_measure']['f_measure'] * 100 else: f_measure = None if results['f_measure']['precision'] is not None: precision = results['f_measure']['precision'] * 100 else: precision = None if results['f_measure']['recall'] is not None: recall = results['f_measure']['recall'] * 100 else: recall = None output += self.ui.line('F-measure', indent=2) + '\n' output += self.ui.data(field='F-measure (F1)', value=f_measure, unit='%', indent=4) + '\n' output += self.ui.data(field='Precision', value=precision, unit='%', indent=4) + '\n' output += self.ui.data(field='Recall', value=recall, unit='%', indent=4) + '\n' if 'eer' in results and results['eer']: if results['eer']['eer'] is not None: eer = results['eer']['eer'] * 100 else: eer = None output += self.ui.line('Equal error rate', indent=2) + '\n' output += self.ui.data(field='Equal error rate (EER)', value=eer, unit='%', indent=4) + '\n' return output
[ "def", "result_report_class_wise_average", "(", "self", ")", ":", "results", "=", "self", ".", "results_class_wise_average_metrics", "(", ")", "output", "=", "self", ".", "ui", ".", "section_header", "(", "'Class-wise average metrics (macro-average)'", ",", "indent", ...
33.693878
28.265306
def backup_db(release=None, limit=5): """ Backup database and associate it with current release """ assert "mysql_user" in env, "Missing mysqL_user in env" assert "mysql_password" in env, "Missing mysql_password in env" assert "mysql_host" in env, "Missing mysql_host in env" assert "mysql_db" in env, "Missing mysql_db in env" if not release: release = paths.get_current_release_name() max_versions = limit+1 if not release: return env.run("mkdir -p %s" % paths.get_backup_path("mysql")) backup_file = "mysql/%s.sql.gz" % release backup_path = paths.get_backup_path(backup_file) env.run("mysqldump -u %s -p%s -h %s %s | gzip -c > %s" % (env.mysql_user, env.mysql_password, env.mysql_host, env.mysql_db, backup_path)) # Remove older releases env.run("ls -dt %s/* | tail -n +%s | xargs rm -rf" % ( paths.get_backup_path("mysql"), max_versions) )
[ "def", "backup_db", "(", "release", "=", "None", ",", "limit", "=", "5", ")", ":", "assert", "\"mysql_user\"", "in", "env", ",", "\"Missing mysqL_user in env\"", "assert", "\"mysql_password\"", "in", "env", ",", "\"Missing mysql_password in env\"", "assert", "\"mysq...
29.5625
22.0625
def _generate_placeholder(readable_text=None): """Generate a placeholder name to use while updating WeldObject. Parameters ---------- readable_text : str, optional Appended to the name for a more understandable placeholder. Returns ------- str Placeholder. """ name = '_interm_' + str(Cache._counter) Cache._counter += 1 if readable_text is not None: assert isinstance(readable_text, str) name += '_' + readable_text return name
[ "def", "_generate_placeholder", "(", "readable_text", "=", "None", ")", ":", "name", "=", "'_interm_'", "+", "str", "(", "Cache", ".", "_counter", ")", "Cache", ".", "_counter", "+=", "1", "if", "readable_text", "is", "not", "None", ":", "assert", "isinsta...
25.272727
19.681818
def load_gene_exp_to_df(inst_path): ''' Loads gene expression data from 10x in sparse matrix format and returns a Pandas dataframe ''' import pandas as pd from scipy import io from scipy import sparse from ast import literal_eval as make_tuple # matrix Matrix = io.mmread( inst_path + 'matrix.mtx') mat = Matrix.todense() # genes filename = inst_path + 'genes.tsv' f = open(filename, 'r') lines = f.readlines() f.close() # # add unique id to all genes # genes = [] # unique_id = 0 # for inst_line in lines: # inst_line = inst_line.strip().split() # if len(inst_line) > 1: # inst_gene = inst_line[1] # else: # inst_gene = inst_line[0] # genes.append(inst_gene + '_' + str(unique_id)) # unique_id = unique_id + 1 # add unique id only to duplicate genes ini_genes = [] for inst_line in lines: inst_line = inst_line.strip().split() if len(inst_line) > 1: inst_gene = inst_line[1] else: inst_gene = inst_line[0] ini_genes.append(inst_gene) gene_name_count = pd.Series(ini_genes).value_counts() duplicate_genes = gene_name_count[gene_name_count > 1].index.tolist() dup_index = {} genes = [] for inst_row in ini_genes: # add index to non-unique genes if inst_row in duplicate_genes: # calc_non-unque index if inst_row not in dup_index: dup_index[inst_row] = 1 else: dup_index[inst_row] = dup_index[inst_row] + 1 new_row = inst_row + '_' + str(dup_index[inst_row]) else: new_row = inst_row genes.append(new_row) # barcodes filename = inst_path + 'barcodes.tsv' f = open(filename, 'r') lines = f.readlines() f.close() cell_barcodes = [] for inst_bc in lines: inst_bc = inst_bc.strip().split('\t') # remove dash from barcodes if necessary if '-' in inst_bc[0]: inst_bc[0] = inst_bc[0].split('-')[0] cell_barcodes.append(inst_bc[0]) # parse tuples if necessary try: cell_barcodes = [make_tuple(x) for x in cell_barcodes] except: pass try: genes = [make_tuple(x) for x in genes] except: pass # make dataframe df = pd.DataFrame(mat, index=genes, columns=cell_barcodes) return df
[ "def", "load_gene_exp_to_df", "(", "inst_path", ")", ":", "import", "pandas", "as", "pd", "from", "scipy", "import", "io", "from", "scipy", "import", "sparse", "from", "ast", "import", "literal_eval", "as", "make_tuple", "# matrix", "Matrix", "=", "io", ".", ...
23.545455
21.262626
def tree_probe(self, **kwargs): """ Perform an os walk down a file system tree, starting from a **kwargs identified 'root', and return lists of files and directories found. kwargs: root = '/some/path' return { 'status': True, 'l_dir': l_dirs, 'l_files': l_files } """ str_topDir = "." l_dirs = [] l_files = [] b_status = False str_path = '' l_dirsHere = [] l_filesHere = [] for k, v in kwargs.items(): if k == 'root': str_topDir = v # for root, dirs, files in os.walk(str_topDir, followlinks = self.b_followLinks): for root, dirs, files in pftree.walklevel(str_topDir, self.maxdepth, followlinks = self.b_followLinks): b_status = True str_path = root.split(os.sep) if dirs: l_dirsHere = [root + '/' + x for x in dirs] l_dirs.append(l_dirsHere) self.dp.qprint('Appending dirs to search space:\n', level = 3) self.dp.qprint("\n" + self.pp.pformat(l_dirsHere), level = 3) if files: l_filesHere = [root + '/' + y for y in files] if len(self.str_inputFile): l_hit = [s for s in l_filesHere if self.str_inputFile in s] if l_hit: l_filesHere = l_hit else: l_filesHere = [] if l_filesHere: l_files.append(l_filesHere) self.dp.qprint('Appending files to search space:\n', level = 3) self.dp.qprint("\n" + self.pp.pformat(l_filesHere), level = 3) return { 'status': b_status, 'l_dir': l_dirs, 'l_files': l_files }
[ "def", "tree_probe", "(", "self", ",", "*", "*", "kwargs", ")", ":", "str_topDir", "=", "\".\"", "l_dirs", "=", "[", "]", "l_files", "=", "[", "]", "b_status", "=", "False", "str_path", "=", "''", "l_dirsHere", "=", "[", "]", "l_filesHere", "=", "[",...
35.160714
19.660714
def shared(self, value, name=None): """ Create a shared theano scalar value. """ if type(value) == int: final_value = np.array(value, dtype="int32") elif type(value) == float: final_value = np.array(value, dtype=env.FLOATX) else: final_value = value return theano.shared(final_value, name=name)
[ "def", "shared", "(", "self", ",", "value", ",", "name", "=", "None", ")", ":", "if", "type", "(", "value", ")", "==", "int", ":", "final_value", "=", "np", ".", "array", "(", "value", ",", "dtype", "=", "\"int32\"", ")", "elif", "type", "(", "va...
31.333333
12.333333
def get_template_file(args): """Returns valid template file, generating the default template file if it doesn't exist and one wasn't specified on command line. :param args: Argument collection as generated by parseargs :return file""" if args.template is None: template_filename = os.getenv("HOME") + "/.mvmany.template" try: template_filename = open(template_filename, "r") except: with open(template_filename, "w") as file: print >> file, """#SBATCH --job-name=$jobname #SBATCH --nodes=1 #SBATCH --tasks-per-node=1 #SBATCH --cpus-per-task=1 #SBATCH --mem=$memory #SBATCH --time=$walltime #SBATCH --error $logpath/$jobname.e #SBATCH --output $respath/$jobname.txt cd $pwd $body """ print >> sys.stderr, """PLEASE NOTE: \n A default template file, %s, has been created. You are encouraged to configure it according to work with your cluster management software or personalize it with email notifications, etc.\n""" template_filename = open(template_filename, "r") return template_filename
[ "def", "get_template_file", "(", "args", ")", ":", "if", "args", ".", "template", "is", "None", ":", "template_filename", "=", "os", ".", "getenv", "(", "\"HOME\"", ")", "+", "\"/.mvmany.template\"", "try", ":", "template_filename", "=", "open", "(", "templa...
34.83871
19.354839
def from_locale(cls, locale): """ Create a new Language instance from a locale string :param locale: locale as string :return: Language instance with instance.locale() == locale if locale is valid else instance of Unknown Language """ locale = str(locale) if locale is 'unknown': return UnknownLanguage(locale) try: return cls._from_xyz('locale', locale) except NotALanguageException: log.warning('Unknown locale: {}'.format(locale)) return UnknownLanguage(locale)
[ "def", "from_locale", "(", "cls", ",", "locale", ")", ":", "locale", "=", "str", "(", "locale", ")", "if", "locale", "is", "'unknown'", ":", "return", "UnknownLanguage", "(", "locale", ")", "try", ":", "return", "cls", ".", "_from_xyz", "(", "'locale'", ...
40.785714
14.071429
def setwinsize(fd, rows_cols): """ set the terminal size of a tty file descriptor. borrowed logic from pexpect.py """ rows, cols = rows_cols TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561) s = struct.pack('HHHH', rows, cols, 0, 0) fcntl.ioctl(fd, TIOCSWINSZ, s)
[ "def", "setwinsize", "(", "fd", ",", "rows_cols", ")", ":", "rows", ",", "cols", "=", "rows_cols", "TIOCSWINSZ", "=", "getattr", "(", "termios", ",", "'TIOCSWINSZ'", ",", "-", "2146929561", ")", "s", "=", "struct", ".", "pack", "(", "'HHHH'", ",", "row...
36.125
11.875
def remove_dataset(self, dataset=None, **kwargs): """ Remove a dataset from the Bundle. This removes all matching Parameters from the dataset, model, and constraint contexts (by default if the context tag is not provided). You must provide some sort of filter or this will raise an Error (so that all Parameters are not accidentally removed). :parameter str dataset: name of the dataset :parameter **kwargs: any other tags to do the filter (except qualifier and dataset) :raises ValueError: if no filter is provided """ self._kwargs_checks(kwargs) # Let's avoid deleting ALL parameters from the matching contexts if dataset is None and not len(kwargs.items()): raise ValueError("must provide some value to filter for datasets") # let's handle deps if kind was passed kind = kwargs.get('kind', None) if kind is not None: if isinstance(kind, str): kind = [kind] kind_deps = [] for kind_i in kind: dep = '{}_dep'.format(kind_i) if dep not in kind: kind_deps.append(dep) kind = kind + kind_deps kwargs['kind'] = kind if dataset is None: # then let's find the list of datasets that match the filter, # we'll then use dataset to do the removing. This avoids leaving # pararameters behind that don't specifically match the filter # (ie if kind is passed as 'rv' we still want to remove parameters # with datasets that are RVs but belong to a different kind in # another context like compute) dataset = self.filter(**kwargs).datasets kwargs['kind'] = None kwargs['dataset'] = dataset # Let's avoid the possibility of deleting a single parameter kwargs['qualifier'] = None # Let's also avoid the possibility of accidentally deleting system # parameters, etc kwargs.setdefault('context', ['dataset', 'model', 'constraint', 'compute']) # ps = self.filter(**kwargs) # logger.info('removing {} parameters (this is not undoable)'.\ # format(len(ps))) # print "*** kwargs", kwargs, len(ps) self.remove_parameters_all(**kwargs) # not really sure why we need to call this twice, but it seems to do # the trick self.remove_parameters_all(**kwargs) self._handle_dataset_selectparams() # TODO: check to make sure that trying to undo this # will raise an error saying this is not undo-able self._add_history(redo_func='remove_dataset', redo_kwargs={'dataset': dataset}, undo_func=None, undo_kwargs={}) return
[ "def", "remove_dataset", "(", "self", ",", "dataset", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_kwargs_checks", "(", "kwargs", ")", "# Let's avoid deleting ALL parameters from the matching contexts", "if", "dataset", "is", "None", "and", "not"...
38.445946
21.22973
def to_query_parameters(parameters): """Converts DB-API parameter values into query parameters. :type parameters: Mapping[str, Any] or Sequence[Any] :param parameters: A dictionary or sequence of query parameter values. :rtype: List[google.cloud.bigquery.query._AbstractQueryParameter] :returns: A list of query parameters. """ if parameters is None: return [] if isinstance(parameters, collections_abc.Mapping): return to_query_parameters_dict(parameters) return to_query_parameters_list(parameters)
[ "def", "to_query_parameters", "(", "parameters", ")", ":", "if", "parameters", "is", "None", ":", "return", "[", "]", "if", "isinstance", "(", "parameters", ",", "collections_abc", ".", "Mapping", ")", ":", "return", "to_query_parameters_dict", "(", "parameters"...
33.8125
19.625
def to_xml(self, f=None): """Get this domain as an XML DOM Document :param f: Optional File to dump directly to :type f: File or Stream :return: File object where the XML has been dumped to :rtype: file """ if not f: from tempfile import TemporaryFile f = TemporaryFile() print >> f, '<?xml version="1.0" encoding="UTF-8"?>' print >> f, '<Domain id="%s">' % self.name for item in self: print >> f, '\t<Item id="%s">' % item.name for k in item: print >> f, '\t\t<attribute id="%s">' % k values = item[k] if not isinstance(values, list): values = [values] for value in values: print >> f, '\t\t\t<value><![CDATA[', if isinstance(value, unicode): value = value.encode('utf-8', 'replace') else: value = unicode(value, errors='replace').encode('utf-8', 'replace') f.write(value) print >> f, ']]></value>' print >> f, '\t\t</attribute>' print >> f, '\t</Item>' print >> f, '</Domain>' f.flush() f.seek(0) return f
[ "def", "to_xml", "(", "self", ",", "f", "=", "None", ")", ":", "if", "not", "f", ":", "from", "tempfile", "import", "TemporaryFile", "f", "=", "TemporaryFile", "(", ")", "print", ">>", "f", ",", "'<?xml version=\"1.0\" encoding=\"UTF-8\"?>'", "print", ">>", ...
38.088235
14.264706
def removeLogicalInterfaceFromThingType(self, thingTypeId, logicalInterfaceId): """ Removes a logical interface from a thing type. Parameters: - thingTypeId (string) - the thing type - logicalInterfaceId (string) - the id returned by the platform on creation of the logical interface Throws APIException on failure. """ req = ApiClient.oneThingTypeLogicalInterfaceUrl % (self.host, thingTypeId, logicalInterfaceId) resp = requests.delete(req, auth=self.credentials, verify=self.verify) if resp.status_code == 204: self.logger.debug("Logical interface removed from a thing type") else: raise ibmiotf.APIException(resp.status_code, "HTTP error removing logical interface from a thing type", resp) return resp
[ "def", "removeLogicalInterfaceFromThingType", "(", "self", ",", "thingTypeId", ",", "logicalInterfaceId", ")", ":", "req", "=", "ApiClient", ".", "oneThingTypeLogicalInterfaceUrl", "%", "(", "self", ".", "host", ",", "thingTypeId", ",", "logicalInterfaceId", ")", "r...
54.666667
28.533333
def import_status(handler, host=None, core_name=None, verbose=False): ''' Submits an import command to the specified handler using specified options. This command can only be run if the minion is configured with solr.type: 'master' handler : str The name of the data import handler. host : str (None) The solr host to query. __opts__['host'] is default. core : str (None) The core the handler belongs to. verbose : boolean (False) Specifies verbose output Return : dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list} CLI Example: .. code-block:: bash salt '*' solr.import_status dataimport None music False ''' if not _is_master() and _get_none_or_value(host) is None: errors = ['solr.import_status can only be called by "master" minions'] return _get_return_dict(False, errors=errors) extra = ["command=status"] if verbose: extra.append("verbose=true") url = _format_url(handler, host=host, core_name=core_name, extra=extra) return _http_request(url)
[ "def", "import_status", "(", "handler", ",", "host", "=", "None", ",", "core_name", "=", "None", ",", "verbose", "=", "False", ")", ":", "if", "not", "_is_master", "(", ")", "and", "_get_none_or_value", "(", "host", ")", "is", "None", ":", "errors", "=...
32.176471
24.235294
def serialized(self, prepend_date=True): """Return a string fully representing the fact.""" name = self.serialized_name() datetime = self.serialized_time(prepend_date) return "%s %s" % (datetime, name)
[ "def", "serialized", "(", "self", ",", "prepend_date", "=", "True", ")", ":", "name", "=", "self", ".", "serialized_name", "(", ")", "datetime", "=", "self", ".", "serialized_time", "(", "prepend_date", ")", "return", "\"%s %s\"", "%", "(", "datetime", ","...
45.8
3.4
def add_markdown_cell(self, text): """Add a markdown cell to the notebook Parameters ---------- code : str Cell content """ markdown_cell = { "cell_type": "markdown", "metadata": {}, "source": [rst2md(text)] } self.work_notebook["cells"].append(markdown_cell)
[ "def", "add_markdown_cell", "(", "self", ",", "text", ")", ":", "markdown_cell", "=", "{", "\"cell_type\"", ":", "\"markdown\"", ",", "\"metadata\"", ":", "{", "}", ",", "\"source\"", ":", "[", "rst2md", "(", "text", ")", "]", "}", "self", ".", "work_not...
25.642857
15.142857
def add_unique_template_variables(self, options): """Update map template variables specific to heatmap visual""" # set line stroke dash interval based on line_stroke property if self.line_stroke in ["dashed", "--"]: self.line_dash_array = [6, 4] elif self.line_stroke in ["dotted", ":"]: self.line_dash_array = [0.5, 4] elif self.line_stroke in ["dash dot", "-."]: self.line_dash_array = [6, 4, 0.5, 4] elif self.line_stroke in ["solid", "-"]: self.line_dash_array = [1, 0] else: # default to solid line self.line_dash_array = [1, 0] # check if choropleth map should include 3-D extrusion self.extrude = all([bool(self.height_property), bool(self.height_stops)]) # common variables for vector and geojson-based choropleths options.update(dict( colorStops=self.color_stops, colorProperty=self.color_property, colorType=self.color_function_type, defaultColor=self.color_default, lineColor=self.line_color, lineDashArray=self.line_dash_array, lineStroke=self.line_stroke, lineWidth=self.line_width, extrudeChoropleth=self.extrude, highlightColor=self.highlight_color )) if self.extrude: options.update(dict( heightType=self.height_function_type, heightProperty=self.height_property, heightStops=self.height_stops, defaultHeight=self.height_default, )) # vector-based choropleth map variables if self.vector_source: options.update(vectorColorStops=self.generate_vector_color_map()) if self.extrude: options.update(vectorHeightStops=self.generate_vector_numeric_map('height')) # geojson-based choropleth map variables else: options.update(geojson_data=json.dumps(self.data, ensure_ascii=False))
[ "def", "add_unique_template_variables", "(", "self", ",", "options", ")", ":", "# set line stroke dash interval based on line_stroke property", "if", "self", ".", "line_stroke", "in", "[", "\"dashed\"", ",", "\"--\"", "]", ":", "self", ".", "line_dash_array", "=", "["...
40.52
16.12
def set_object_cache(self, notify_func=None, getbuffer_func=None): """ Set the object cache "notifyObjectCompiled" and "getBuffer" callbacks to the given Python functions. """ self._object_cache_notify = notify_func self._object_cache_getbuffer = getbuffer_func # Lifetime of the object cache is managed by us. self._object_cache = _ObjectCacheRef(self) # Note this doesn't keep a reference to self, to avoid reference # cycles. ffi.lib.LLVMPY_SetObjectCache(self, self._object_cache)
[ "def", "set_object_cache", "(", "self", ",", "notify_func", "=", "None", ",", "getbuffer_func", "=", "None", ")", ":", "self", ".", "_object_cache_notify", "=", "notify_func", "self", ".", "_object_cache_getbuffer", "=", "getbuffer_func", "# Lifetime of the object cac...
46.75
15.416667
def _convert_agent_types(ind, to_string=False, **kwargs): '''Convenience method to allow specifying agents by class or class name.''' if to_string: return serialize_distribution(ind, **kwargs) return deserialize_distribution(ind, **kwargs)
[ "def", "_convert_agent_types", "(", "ind", ",", "to_string", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "to_string", ":", "return", "serialize_distribution", "(", "ind", ",", "*", "*", "kwargs", ")", "return", "deserialize_distribution", "(", "in...
51
20.2
def task_view_link(self, ): """View the link of the current task :returns: None :rtype: None :raises: None """ if not self.cur_task: return e = self.cur_task.element if isinstance(e, djadapter.models.Asset): self.view_asset(e) else: self.view_shot(e)
[ "def", "task_view_link", "(", "self", ",", ")", ":", "if", "not", "self", ".", "cur_task", ":", "return", "e", "=", "self", ".", "cur_task", ".", "element", "if", "isinstance", "(", "e", ",", "djadapter", ".", "models", ".", "Asset", ")", ":", "self"...
24.714286
14.785714
def accept(self, evt): """ write setting to the preferences """ # determine if application is a script file or frozen exe (pyinstaller) frozen = getattr(sys, 'frozen', False) if frozen: app_file = sys.executable else: app_file = PathStr(__main__.__file__).abspath() if self.cb_startmenu.isChecked(): # TODO: allow only logo location # icon = app_file.dirname().join('media', 'logo.ico') StartMenuEntry(self.name, app_file, icon=self.icon, console=False).create() if self.cb_mime.isChecked(): # get admin rights if not isAdmin(): try: # run this file as __main__ with admin rights: if frozen: cmd = "from %s import embeddIntoOS\nembeddIntoOS('%s', '%s', '%s')" % ( __name__, '', self.ftype, self.name) # in this case there is no python.exe and no moduly.py to call # thats why we have to import the method and execute it runAsAdmin((sys.executable, '-exec', cmd)) else: runAsAdmin((sys.executable, __file__, app_file, self.ftype, self.name)) except: print('needs admin rights to work') else: embeddIntoOS(app_file, self.ftype, self.name) QtWidgets.QDialog.accept(self)
[ "def", "accept", "(", "self", ",", "evt", ")", ":", "# determine if application is a script file or frozen exe (pyinstaller)\r", "frozen", "=", "getattr", "(", "sys", ",", "'frozen'", ",", "False", ")", "if", "frozen", ":", "app_file", "=", "sys", ".", "executable...
42.837838
19.864865
def _path_has_ok_chars(path): """ Validate path for invalid characters. :param path: str possible filesystem path :return: path if it was ok otherwise raises error """ basename = os.path.basename(path) if any([bad_char in basename for bad_char in INVALID_PATH_CHARS]): raise argparse.ArgumentTypeError("{} contains invalid characters for a directory.".format(path)) return path
[ "def", "_path_has_ok_chars", "(", "path", ")", ":", "basename", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "if", "any", "(", "[", "bad_char", "in", "basename", "for", "bad_char", "in", "INVALID_PATH_CHARS", "]", ")", ":", "raise", "argpar...
41.2
15.6
def analyze_section(section: SoS_Step, default_input: Optional[sos_targets] = None, default_output: Optional[sos_targets] = None, context={}, vars_and_output_only: bool = False) -> Dict[str, Any]: '''Analyze a section for how it uses input and output, what variables it uses, and input, output, etc.''' # analysis_key = (section.md5, section.step_name(), # default_input.target_name() if hasattr(default_input, 'target_name') else '', # default_output.target_name() if hasattr(default_output, 'target_name') else '', vars_and_output_only) #if analysis_key in analysis_cache: # return analysis_cache[analysis_key] # use a fresh env for analysis new_env, old_env = env.request_new() try: prepare_env(section.global_def, section.global_vars, context) env.sos_dict.set('step_name', section.step_name()) env.sos_dict.set('__null_func__', __null_func__) if 'STEP' in env.config['SOS_DEBUG'] or 'ALL' in env.config['SOS_DEBUG']: env.log_to_file( 'STEP', f'Analyzing {section.step_name()} {"(output only)" if vars_and_output_only else ""}' ) res = { 'step_name': section.step_name(), 'step_output': get_step_output(section, default_output), # variables starting with __ are internals... 'environ_vars': get_environ_vars(section), 'signature_vars': get_signature_vars(section), 'changed_vars': get_changed_vars(section) } if not vars_and_output_only: inps = get_step_input(section, default_input) res['step_input'] = inps[0] res['dynamic_input'] = inps[1] deps = get_step_depends(section) res['step_depends'] = deps[0] res['dynamic_depends'] = deps[1] # analysis_cache[analysis_key] = res finally: # restore env env.restore_to_old(new_env, old_env) # #1225 # The global section can contain a lot of variables, some of which can be large. Here we # found all variables that will be used in the step, including ones used in substep (signature_vars) # and ones that will be used in input statement etc. section.global_vars = { x: y for x, y in section.global_vars.items() if x in get_all_used_vars(section) } return res
[ "def", "analyze_section", "(", "section", ":", "SoS_Step", ",", "default_input", ":", "Optional", "[", "sos_targets", "]", "=", "None", ",", "default_output", ":", "Optional", "[", "sos_targets", "]", "=", "None", ",", "context", "=", "{", "}", ",", "vars_...
43.196429
21.267857
def getOverlayWidthInMeters(self, ulOverlayHandle): """Returns the width of the overlay quad in meters. By default overlays are rendered on a quad that is 1 meter across""" fn = self.function_table.getOverlayWidthInMeters pfWidthInMeters = c_float() result = fn(ulOverlayHandle, byref(pfWidthInMeters)) return result, pfWidthInMeters.value
[ "def", "getOverlayWidthInMeters", "(", "self", ",", "ulOverlayHandle", ")", ":", "fn", "=", "self", ".", "function_table", ".", "getOverlayWidthInMeters", "pfWidthInMeters", "=", "c_float", "(", ")", "result", "=", "fn", "(", "ulOverlayHandle", ",", "byref", "("...
53.428571
13.714286
def run_top_task(self, task_name=None, sort=None, **kwargs): """Finds and runs a pending task that in the first of the sorting list. Parameters ----------- task_name : str The task name. sort : List of tuple PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details. kwargs : other parameters Users customized parameters such as description, version number. Examples --------- Monitors the database and pull tasks to run >>> while True: >>> print("waiting task from distributor") >>> db.run_top_task(task_name='mnist', sort=[("time", -1)]) >>> time.sleep(1) Returns -------- boolean : True for success, False for fail. """ if not isinstance(task_name, str): # is None: raise Exception("task_name should be string") self._fill_project_info(kwargs) kwargs.update({'status': 'pending'}) # find task and set status to running task = self.db.Task.find_one_and_update(kwargs, {'$set': {'status': 'running'}}, sort=sort) try: # get task info e.g. hyper parameters, python script if task is None: logging.info("[Database] Find Task FAIL: key: {} sort: {}".format(task_name, sort)) return False else: logging.info("[Database] Find Task SUCCESS: key: {} sort: {}".format(task_name, sort)) _datetime = task['time'] _script = task['script'] _id = task['_id'] _hyper_parameters = task['hyper_parameters'] _saved_result_keys = task['saved_result_keys'] logging.info(" hyper parameters:") for key in _hyper_parameters: globals()[key] = _hyper_parameters[key] logging.info(" {}: {}".format(key, _hyper_parameters[key])) # run task s = time.time() logging.info("[Database] Start Task: key: {} sort: {} push time: {}".format(task_name, sort, _datetime)) _script = _script.decode('utf-8') with tf.Graph().as_default(): # as graph: # clear all TF graphs exec(_script, globals()) # set status to finished _ = self.db.Task.find_one_and_update({'_id': _id}, {'$set': {'status': 'finished'}}) # return results __result = {} for _key in _saved_result_keys: logging.info(" result: {}={} {}".format(_key, globals()[_key], type(globals()[_key]))) __result.update({"%s" % _key: globals()[_key]}) _ = self.db.Task.find_one_and_update( { '_id': _id }, {'$set': { 'result': __result }}, return_document=pymongo.ReturnDocument.AFTER ) logging.info( "[Database] Finished Task: task_name - {} sort: {} push time: {} took: {}s". format(task_name, sort, _datetime, time.time() - s) ) return True except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e)) logging.info("[Database] Fail to run task") # if fail, set status back to pending _ = self.db.Task.find_one_and_update({'_id': _id}, {'$set': {'status': 'pending'}}) return False
[ "def", "run_top_task", "(", "self", ",", "task_name", "=", "None", ",", "sort", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "isinstance", "(", "task_name", ",", "str", ")", ":", "# is None:", "raise", "Exception", "(", "\"task_name shoul...
44.166667
23.404762
def validity_duration(self): """ How long this parameter value is valid. .. note: There is also an option when subscribing to get updated when the parameter values expire. :type: :class:`~datetime.timedelta` """ if self._proto.HasField('expireMillis'): return timedelta(milliseconds=self._proto.expireMillis) return None
[ "def", "validity_duration", "(", "self", ")", ":", "if", "self", ".", "_proto", ".", "HasField", "(", "'expireMillis'", ")", ":", "return", "timedelta", "(", "milliseconds", "=", "self", ".", "_proto", ".", "expireMillis", ")", "return", "None" ]
33
16.666667
def isInNet(host, pattern, mask): """ Pattern and mask specification is done the same way as for SOCKS configuration. :param str host: a DNS hostname, or IP address. If a hostname is passed, it will be resolved into an IP address by this function. :param str pattern: an IP address pattern in the dot-separated format :param str mask: mask for the IP address pattern informing which parts of the IP address should be matched against. 0 means ignore, 255 means match. :returns: True iff the IP address of the host matches the specified IP address pattern. :rtype: bool """ host_ip = host if is_ipv4_address(host) else dnsResolve(host) if not host_ip or not is_ipv4_address(pattern) or not is_ipv4_address(mask): return False return _address_in_network(host_ip, pattern, mask)
[ "def", "isInNet", "(", "host", ",", "pattern", ",", "mask", ")", ":", "host_ip", "=", "host", "if", "is_ipv4_address", "(", "host", ")", "else", "dnsResolve", "(", "host", ")", "if", "not", "host_ip", "or", "not", "is_ipv4_address", "(", "pattern", ")", ...
51.75
27.25
def all(self, target=None, include_global=True): """ Get a dictionary of all aliases and their options. :param target: Include aliases for this specific field, model or app (optional). :param include_global: Include all non target-specific aliases (default ``True``). For example:: >>> aliases.all(target='my_app.MyModel') {'small': {'size': (100, 100)}, 'large': {'size': (400, 400)}} """ aliases = {} for target_part in self._get_targets(target, include_global): aliases.update(self._aliases.get(target_part, {})) return aliases
[ "def", "all", "(", "self", ",", "target", "=", "None", ",", "include_global", "=", "True", ")", ":", "aliases", "=", "{", "}", "for", "target_part", "in", "self", ".", "_get_targets", "(", "target", ",", "include_global", ")", ":", "aliases", ".", "upd...
36
21.777778
def log(x, base=None): """ log(x, base=e) Logarithmic function. """ _math = infer_math(x) if base is None: return _math.log(x) elif _math == math: return _math.log(x, base) else: # numpy has no option to set a base return _math.log(x) / _math.log(base)
[ "def", "log", "(", "x", ",", "base", "=", "None", ")", ":", "_math", "=", "infer_math", "(", "x", ")", "if", "base", "is", "None", ":", "return", "_math", ".", "log", "(", "x", ")", "elif", "_math", "==", "math", ":", "return", "_math", ".", "l...
25.083333
12
def get_pstats_print2list(fnames, filter_fnames=None, exclude_fnames=None, sort=None, sort_reverse=None, limit=None): """Print stats with a filter or exclude filenames, sort index and limit. :param list fnames: cProfile standard files to process. :param list filter_fnames: Relative paths to filter and show them. :param list exclude_fnames: Relative paths to avoid show them. :param str sort: Standard `pstats` key of value to sort the result. \n\t\t\t'calls' (call count) \n\t\t\t'cumulative' (cumulative time) \n\t\t\t'cumtime' (cumulative time) \n\t\t\t'file' (file name) \n\t\t\t'filename' (file name) \n\t\t\t'module' (file name) \n\t\t\t'ncalls' (call count) \n\t\t\t'pcalls' (primitive call count) \n\t\t\t'line' (line number) \n\t\t\t'name' (function name) \n\t\t\t'nfl' (name/file/line) \n\t\t\t'stdname' (standard name) \n\t\t\t'time' (internal time) \n\t\t\t'tottime' (internal time) :param bool sort_reverse: Reverse sort order. :param int limit: Limit max result. :returns: List of dicts with `pstats` print result after filters, sorted and limited. """ if isinstance(fnames, basestring): fnames = [fnames] fnames_expanded = [ os.path.expandvars(os.path.expanduser(fname)) for fname in fnames] stream = StringIO() try: stats = pstats.Stats(fnames[0], stream=stream) for fname in fnames_expanded[1:]: stats.add(fname) except TypeError: print("No cProfile stats valid.") return False except EOFError: print("Empty file cProfile stats valid.") return False except IOError: print("Error to open file.") return False stats.print_stats() stream.seek(0) field_list = get_field_list() line_stats_re = re.compile( r'(?P<%s>\d+/?\d+|\d+)\s+(?P<%s>\d+\.?\d+)\s+(?P<%s>\d+\.?\d+)\s+' r'(?P<%s>\d+\.?\d+)\s+(?P<%s>\d+\.?\d+)\s+(?P<%s>.*):(?P<%s>\d+)' r'\((?P<%s>.*)\)' % tuple(field_list)) stats_list = [] count = 0 for line in stream: line = line.strip('\r\n ') line_stats_match = line_stats_re.match(line) if line else None fname = line_stats_match.group('file') if line_stats_match else None if fname and is_fname_match(fname, filter_fnames) and \ not is_exclude(fname, exclude_fnames): data = dict([(field, line_stats_match.group(field)) for field in field_list]) data['rcalls'], data['calls'] = ( data.get('ncalls', '') + '/' + data.get('ncalls', '') ).split('/')[:2] data['factor'] = "%.2f" % ( (float(data['rcalls']) - float(data['calls']) + 1) * float(data['cumtime'])) data['cumulative'] = data['cumtime'] stats_list.append(data) count += 1 return sorted(stats_list, key=lambda key: float(key[sort or 'factor']), reverse=not sort_reverse)[:limit]
[ "def", "get_pstats_print2list", "(", "fnames", ",", "filter_fnames", "=", "None", ",", "exclude_fnames", "=", "None", ",", "sort", "=", "None", ",", "sort_reverse", "=", "None", ",", "limit", "=", "None", ")", ":", "if", "isinstance", "(", "fnames", ",", ...
41.432432
15.22973
def support_scripting(self): """ Returns True if scripting is available. Checks are done in the client library (redis-py) AND the redis server. Result is cached, so done only one time. """ if not hasattr(self, '_support_scripting'): try: self._support_scripting = self.redis_version >= (2, 5) \ and hasattr(self.connection, 'register_script') except: self._support_scripting = False return self._support_scripting
[ "def", "support_scripting", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_support_scripting'", ")", ":", "try", ":", "self", ".", "_support_scripting", "=", "self", ".", "redis_version", ">=", "(", "2", ",", "5", ")", "and", "hasatt...
41
18.076923
def get_sitecol_assetcol(oqparam, haz_sitecol=None, cost_types=()): """ :param oqparam: calculation parameters :param haz_sitecol: the hazard site collection :param cost_types: the expected cost types :returns: (site collection, asset collection, discarded) """ global exposure asset_hazard_distance = oqparam.asset_hazard_distance['default'] if exposure is None: # haz_sitecol not extracted from the exposure exposure = get_exposure(oqparam) if haz_sitecol is None: haz_sitecol = get_site_collection(oqparam) if oqparam.region_grid_spacing: haz_distance = oqparam.region_grid_spacing * 1.414 if haz_distance != asset_hazard_distance: logging.info('Using asset_hazard_distance=%d km instead of %d km', haz_distance, asset_hazard_distance) else: haz_distance = asset_hazard_distance if haz_sitecol.mesh != exposure.mesh: # associate the assets to the hazard sites sitecol, assets_by, discarded = geo.utils.assoc( exposure.assets_by_site, haz_sitecol, haz_distance, 'filter', exposure.asset_refs) assets_by_site = [[] for _ in sitecol.complete.sids] num_assets = 0 for sid, assets in zip(sitecol.sids, assets_by): assets_by_site[sid] = assets num_assets += len(assets) logging.info( 'Associated %d assets to %d sites', num_assets, len(sitecol)) else: # asset sites and hazard sites are the same sitecol = haz_sitecol assets_by_site = exposure.assets_by_site discarded = [] logging.info('Read %d sites and %d assets from the exposure', len(sitecol), sum(len(a) for a in assets_by_site)) assetcol = asset.AssetCollection( exposure, assets_by_site, oqparam.time_event) if assetcol.occupancy_periods: missing = set(cost_types) - set(exposure.cost_types['name']) - set( ['occupants']) else: missing = set(cost_types) - set(exposure.cost_types['name']) if missing and not oqparam.calculation_mode.endswith('damage'): raise InvalidFile('The exposure %s is missing %s' % (oqparam.inputs['exposure'], missing)) if (not oqparam.hazard_calculation_id and 'gmfs' not in oqparam.inputs and 'hazard_curves' not in oqparam.inputs and sitecol is not sitecol.complete): assetcol = assetcol.reduce_also(sitecol) return sitecol, assetcol, discarded
[ "def", "get_sitecol_assetcol", "(", "oqparam", ",", "haz_sitecol", "=", "None", ",", "cost_types", "=", "(", ")", ")", ":", "global", "exposure", "asset_hazard_distance", "=", "oqparam", ".", "asset_hazard_distance", "[", "'default'", "]", "if", "exposure", "is"...
44.803571
16.017857
def sg_one_hot(tensor, opt): r"""Converts a tensor into a one-hot tensor. See `tf.one_hot()` in tensorflow. Args: tensor: A `Tensor` ( automatically given by chain ) opt: depth: The number of classes. name: If provided, replace current tensor's name. Returns: A `Tensor`. """ assert opt.depth is not None, 'depth is mandatory.' return tf.one_hot(tensor, opt.depth, name=opt.name)
[ "def", "sg_one_hot", "(", "tensor", ",", "opt", ")", ":", "assert", "opt", ".", "depth", "is", "not", "None", ",", "'depth is mandatory.'", "return", "tf", ".", "one_hot", "(", "tensor", ",", "opt", ".", "depth", ",", "name", "=", "opt", ".", "name", ...
27.1875
19.25
def build_articles_from_article_xmls(article_xmls, detail="full", build_parts=None, remove_tags=None): """ Given a list of article XML filenames, convert to article objects """ poa_articles = [] for article_xml in article_xmls: print("working on ", article_xml) article, error_count = build_article_from_xml(article_xml, detail, build_parts, remove_tags) if error_count == 0: poa_articles.append(article) return poa_articles
[ "def", "build_articles_from_article_xmls", "(", "article_xmls", ",", "detail", "=", "\"full\"", ",", "build_parts", "=", "None", ",", "remove_tags", "=", "None", ")", ":", "poa_articles", "=", "[", "]", "for", "article_xml", "in", "article_xmls", ":", "print", ...
35.1875
20.8125
def parse_setup(options: Union[List, str]) -> str: """Convert potentially a list of commands into a single string. This creates a single string with newlines between each element of the list so that they will all run after each other in a bash script. """ if isinstance(options, str): return options return "\n".join(options)
[ "def", "parse_setup", "(", "options", ":", "Union", "[", "List", ",", "str", "]", ")", "->", "str", ":", "if", "isinstance", "(", "options", ",", "str", ")", ":", "return", "options", "return", "\"\\n\"", ".", "join", "(", "options", ")" ]
35
19
def view_seq(self, seq): """View the given sequence on the sequence page :param seq: the sequence to view :type seq: :class:`jukeboxcore.djadapter.models.Sequence` :returns: None :rtype: None :raises: None """ log.debug('Viewing sequence %s', seq.name) self.cur_seq = None self.pages_tabw.setCurrentIndex(2) self.seq_name_le.setText(seq.name) self.seq_prj_le.setText(seq.project.name) self.seq_desc_pte.setPlainText(seq.description) shotrootdata = treemodel.ListItemData(['Name', "Description", "Duration", "Start", "End"]) shotrootitem = treemodel.TreeItem(shotrootdata) for shot in seq.shot_set.all(): shotdata = djitemdata.ShotItemData(shot) treemodel.TreeItem(shotdata, shotrootitem) self.seq_shot_model = treemodel.TreeModel(shotrootitem) self.seq_shot_tablev.setModel(self.seq_shot_model) self.cur_seq = seq
[ "def", "view_seq", "(", "self", ",", "seq", ")", ":", "log", ".", "debug", "(", "'Viewing sequence %s'", ",", "seq", ".", "name", ")", "self", ".", "cur_seq", "=", "None", "self", ".", "pages_tabw", ".", "setCurrentIndex", "(", "2", ")", "self", ".", ...
40.333333
16
def insert(self, **kwargs): """ Performs an INSERT statement on the model's table in the master database. :param values: A dictionary containing the values to be inserted. ``datetime``, ``dict`` and ``bool`` objects can be passed as is and will be correctly serialized by psycopg2. :type values: dict """ if len(kwargs['values']) == 0: config.logger.warning('No values to insert.') return values = kwargs['values'] if isinstance(values, self): values = values.attributes.copy() if isinstance(values, dict): for (k, v) in values.items(): if v is None: del kwargs['values'][k] kwargs['stack'] = self.stack_mark(inspect.stack()) kwargs['primary_key'] = self.primary_key column_names = self.table_schema().keys() now = datetime.utcnow() for field in ('created_at', 'updated_at'): if field in column_names: kwargs['values'][field] = now results = self.db_adapter(role='master').insert(**kwargs) return self.record_or_model(results)
[ "def", "insert", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "kwargs", "[", "'values'", "]", ")", "==", "0", ":", "config", ".", "logger", ".", "warning", "(", "'No values to insert.'", ")", "return", "values", "=", "kwargs", "[...
43.923077
16.076923
def create_gist(self, public, files, description=github.GithubObject.NotSet): """ :calls: `POST /gists <http://developer.github.com/v3/gists>`_ :param public: bool :param files: dict of string to :class:`github.InputFileContent.InputFileContent` :param description: string :rtype: :class:`github.Gist.Gist` """ assert isinstance(public, bool), public assert all(isinstance(element, github.InputFileContent) for element in files.itervalues()), files assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description post_parameters = { "public": public, "files": {key: value._identity for key, value in files.iteritems()}, } if description is not github.GithubObject.NotSet: post_parameters["description"] = description headers, data = self._requester.requestJsonAndCheck( "POST", "/gists", input=post_parameters ) return github.Gist.Gist(self._requester, headers, data, completed=True)
[ "def", "create_gist", "(", "self", ",", "public", ",", "files", ",", "description", "=", "github", ".", "GithubObject", ".", "NotSet", ")", ":", "assert", "isinstance", "(", "public", ",", "bool", ")", ",", "public", "assert", "all", "(", "isinstance", "...
47.913043
23.652174
def _normalize_dates(self, context): ''' Build a timeline from given (or not) start and end dates ''' if 'start' in context: if isinstance(context['start'], dt.date): context['start'] = dt.date.strftime( context['start'], format='%Y-%m-%d') if 'end' in context: if isinstance(context['end'], dt.date): context['end'] = dt.date.strftime( context['end'], format='%Y-%m-%d') trading_dates = intuition.utils.build_trading_timeline( context.pop('start', None), context.pop('end', None)) context['index'] = trading_dates context['live'] = (dt.datetime.now(tz=pytz.utc) < trading_dates[-1])
[ "def", "_normalize_dates", "(", "self", ",", "context", ")", ":", "if", "'start'", "in", "context", ":", "if", "isinstance", "(", "context", "[", "'start'", "]", ",", "dt", ".", "date", ")", ":", "context", "[", "'start'", "]", "=", "dt", ".", "date"...
41.111111
19.333333
def _nodes_replaced(self, object, name, old, new): """ Handles a list of nodes being set. """ self._delete_nodes(old) self._add_nodes(new)
[ "def", "_nodes_replaced", "(", "self", ",", "object", ",", "name", ",", "old", ",", "new", ")", ":", "self", ".", "_delete_nodes", "(", "old", ")", "self", ".", "_add_nodes", "(", "new", ")" ]
33.2
6.2
def load_stream(self, key, binary=False): """ Return a managed file-like object from which the calling code can read previously-serialized data. :param key: :return: A managed stream-like object """ value = self.load_value(key, binary=binary) yield io.BytesIO(value) if binary else io.StringIO(value)
[ "def", "load_stream", "(", "self", ",", "key", ",", "binary", "=", "False", ")", ":", "value", "=", "self", ".", "load_value", "(", "key", ",", "binary", "=", "binary", ")", "yield", "io", ".", "BytesIO", "(", "value", ")", "if", "binary", "else", ...
35.6
14.6
def autoprops_decorate(cls, # type: Type[T] include=None, # type: Union[str, Tuple[str]] exclude=None # type: Union[str, Tuple[str]] ): # type: (...) -> Type[T] """ To automatically generate all properties getters and setters from the class constructor manually, without using @autoprops decorator. * if a @contract annotation exist on the __init__ method, mentioning a contract for a given parameter, the parameter contract will be added on the generated setter method * The user may override the generated getter and/or setter by creating them explicitly in the class and annotating them with @getter_override or @setter_override. Note that the contract will still be dynamically added on the setter, even if the setter already has one (in such case a `UserWarning` will be issued) :param cls: the class on which to execute. Note that it won't be wrapped. :param include: a tuple of explicit attribute names to include (None means all) :param exclude: a tuple of explicit attribute names to exclude. In such case, include should be None. :return: """ # first check that we do not conflict with other known decorators _check_known_decorators(cls, '@autoprops') # perform the class mod _execute_autoprops_on_class(cls, include=include, exclude=exclude) # TODO better create a wrapper than modify the class? Probably not # class Autoprops_Wrapper(object): # def __init__(self, *args, **kwargs): # self.wrapped = cls(*args, **kwargs) # # return Autoprops_Wrapper return cls
[ "def", "autoprops_decorate", "(", "cls", ",", "# type: Type[T]", "include", "=", "None", ",", "# type: Union[str, Tuple[str]]", "exclude", "=", "None", "# type: Union[str, Tuple[str]]", ")", ":", "# type: (...) -> Type[T]", "# first check that we do not conflict with other known ...
48.147059
31.029412
def Search(path): """Search sys.path to find a source file that matches path. The provided input path may have an unknown number of irrelevant outer directories (e.g., /garbage1/garbage2/real1/real2/x.py'). This function does multiple search iterations until an actual Python module file that matches the input path is found. At each iteration, it strips one leading directory from the path and searches the directories at sys.path for a match. Examples: sys.path: ['/x1/x2', '/y1/y2'] Search order: [.pyo|.pyc|.py] /x1/x2/a/b/c /x1/x2/b/c /x1/x2/c /y1/y2/a/b/c /y1/y2/b/c /y1/y2/c Filesystem: ['/y1/y2/a/b/c.pyc'] 1) Search('a/b/c.py') Returns '/y1/y2/a/b/c.pyc' 2) Search('q/w/a/b/c.py') Returns '/y1/y2/a/b/c.pyc' 3) Search('q/w/c.py') Returns 'q/w/c.py' The provided input path may also be relative to an unknown directory. The path may include some or all outer package names. Examples (continued): 4) Search('c.py') Returns 'c.py' 5) Search('b/c.py') Returns 'b/c.py' Args: path: Path that describes a source file. Must contain .py file extension. Must not contain any leading os.sep character. Returns: Full path to the matched source file, if a match is found. Otherwise, returns the input path. Raises: AssertionError: if the provided path is an absolute path, or if it does not have a .py extension. """ def SearchCandidates(p): """Generates all candidates for the fuzzy search of p.""" while p: yield p (_, _, p) = p.partition(os.sep) # Verify that the os.sep is already stripped from the input. assert not path.startswith(os.sep) # Strip the file extension, it will not be needed. src_root, src_ext = os.path.splitext(path) assert src_ext == '.py' # Search longer suffixes first. Move to shorter suffixes only if longer # suffixes do not result in any matches. for src_part in SearchCandidates(src_root): # Search is done in sys.path order, which gives higher priority to earlier # entries in sys.path list. for sys_path in sys.path: f = os.path.join(sys_path, src_part) # The order in which we search the extensions does not matter. for ext in ('.pyo', '.pyc', '.py'): # The os.path.exists check internally follows symlinks and flattens # relative paths, so we don't have to deal with it. fext = f + ext if os.path.exists(fext): # Once we identify a matching file in the filesystem, we should # preserve the (1) potentially-symlinked and (2) # potentially-non-flattened file path (f+ext), because that's exactly # how we expect it to appear in sys.modules when we search the file # there. return fext # A matching file was not found in sys.path directories. return path
[ "def", "Search", "(", "path", ")", ":", "def", "SearchCandidates", "(", "p", ")", ":", "\"\"\"Generates all candidates for the fuzzy search of p.\"\"\"", "while", "p", ":", "yield", "p", "(", "_", ",", "_", ",", "p", ")", "=", "p", ".", "partition", "(", "...
33.588235
22.517647
def rand_email(): """Random email. Usage Example:: >>> rand_email() Z4Lljcbdw7m@npa.net """ name = random.choice(string.ascii_letters) + \ rand_str(string.ascii_letters + string.digits, random.randint(4, 14)) domain = rand_str(string.ascii_lowercase, random.randint(2, 10)) kind = random.choice(_all_email_kinds) return "%s@%s%s" % (name, domain, kind)
[ "def", "rand_email", "(", ")", ":", "name", "=", "random", ".", "choice", "(", "string", ".", "ascii_letters", ")", "+", "rand_str", "(", "string", ".", "ascii_letters", "+", "string", ".", "digits", ",", "random", ".", "randint", "(", "4", ",", "14", ...
30.769231
17.769231
def stat(filename, retry_params=None, _account_id=None): """Get GCSFileStat of a Google Cloud storage file. Args: filename: A Google Cloud Storage filename of form '/bucket/filename'. retry_params: An api_utils.RetryParams for this call to GCS. If None, the default one is used. _account_id: Internal-use only. Returns: a GCSFileStat object containing info about this file. Raises: errors.AuthorizationError: if authorization failed. errors.NotFoundError: if an object that's expected to exist doesn't. """ common.validate_file_path(filename) api = storage_api._get_storage_api(retry_params=retry_params, account_id=_account_id) status, headers, content = api.head_object( api_utils._quote_filename(filename)) errors.check_status(status, [200], filename, resp_headers=headers, body=content) file_stat = common.GCSFileStat( filename=filename, st_size=common.get_stored_content_length(headers), st_ctime=common.http_time_to_posix(headers.get('last-modified')), etag=headers.get('etag'), content_type=headers.get('content-type'), metadata=common.get_metadata(headers)) return file_stat
[ "def", "stat", "(", "filename", ",", "retry_params", "=", "None", ",", "_account_id", "=", "None", ")", ":", "common", ".", "validate_file_path", "(", "filename", ")", "api", "=", "storage_api", ".", "_get_storage_api", "(", "retry_params", "=", "retry_params"...
37.75
19.15625
def update_route53_records(self, domain_name, dns_name): """ Updates Route53 Records following GW domain creation """ zone_id = self.get_hosted_zone_id_for_domain(domain_name) is_apex = self.route53.get_hosted_zone(Id=zone_id)['HostedZone']['Name'][:-1] == domain_name if is_apex: record_set = { 'Name': domain_name, 'Type': 'A', 'AliasTarget': { 'HostedZoneId': 'Z2FDTNDATAQYW2', # This is a magic value that means "CloudFront" 'DNSName': dns_name, 'EvaluateTargetHealth': False } } else: record_set = { 'Name': domain_name, 'Type': 'CNAME', 'ResourceRecords': [ { 'Value': dns_name } ], 'TTL': 60 } # Related: https://github.com/boto/boto3/issues/157 # and: http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html # and policy: https://spin.atomicobject.com/2016/04/28/route-53-hosted-zone-managment/ # pure_zone_id = zone_id.split('/hostedzone/')[1] # XXX: ClientError: An error occurred (InvalidChangeBatch) when calling the ChangeResourceRecordSets operation: # Tried to create an alias that targets d1awfeji80d0k2.cloudfront.net., type A in zone Z1XWOQP59BYF6Z, # but the alias target name does not lie within the target zone response = self.route53.change_resource_record_sets( HostedZoneId=zone_id, ChangeBatch={ 'Changes': [ { 'Action': 'UPSERT', 'ResourceRecordSet': record_set } ] } ) return response
[ "def", "update_route53_records", "(", "self", ",", "domain_name", ",", "dns_name", ")", ":", "zone_id", "=", "self", ".", "get_hosted_zone_id_for_domain", "(", "domain_name", ")", "is_apex", "=", "self", ".", "route53", ".", "get_hosted_zone", "(", "Id", "=", ...
37.92
23
def renew_voms_proxy(passwd="", vo=None, lifetime="196:00"): """ Renews the voms proxy using a password *passwd*, an optional virtual organization name *vo*, and a default *lifetime* of 8 days. The password is written to a temporary file first and piped into the renewal commad to ensure it is not visible in the process list. """ with tmp_file() as (_, tmp): with open(tmp, "w") as f: f.write(passwd) cmd = "cat '{}' | voms-proxy-init --valid '{}'".format(tmp, lifetime) if vo: cmd += " -voms '{}'".format(vo) code, out, _ = interruptable_popen(cmd, shell=True, executable="/bin/bash", stdout=subprocess.PIPE, stderr=subprocess.STDOUT) if code != 0: raise Exception("proxy renewal failed: {}".format(out))
[ "def", "renew_voms_proxy", "(", "passwd", "=", "\"\"", ",", "vo", "=", "None", ",", "lifetime", "=", "\"196:00\"", ")", ":", "with", "tmp_file", "(", ")", "as", "(", "_", ",", "tmp", ")", ":", "with", "open", "(", "tmp", ",", "\"w\"", ")", "as", ...
47.235294
24.411765
def run(self, output): '''Generate the report to the given output. :param output: writable file-like object or file path ''' # Ensure folder exists. if self.folder_id not in self.folders.folders(self.user): print("E: folder not found: %s" % self.folder_name, file=sys.stderr) return # Create workbook. wb = self.workbook = xlsxwriter.Workbook(output) # Create the different styles used by this report generator. self.formats['title'] = wb.add_format({'font_size': '18', 'bold': True}) self.formats['default'] = wb.add_format({'align': 'top'}) self.formats['bold'] = wb.add_format({'bold': True}) self.formats['header'] = wb.add_format({ 'bold': True, 'align': 'center', 'valign': 'top', 'font_size': '14', 'font_color': '#506050', 'bg_color': '#f5f5f5', 'right': 1, 'border_color': 'white'}) self.formats['pre'] = wb.add_format({'font_name': 'Courier', 'valign': 'top'}) self.formats['link'] = wb.add_format({'valign': 'top', 'font_color': 'blue', 'underline': True}) self.formats['type_text'] = wb.add_format({ 'font_color': '#BF8645', 'valign': 'top', 'align': 'center'}) self.formats['type_image'] = wb.add_format({ 'font_color': '#84BF45', 'valign': 'top', 'align': 'center'}) # Generate report for a specific subfolder or *all* subfolders of # self.folder . if self.subfolder_id is None: self._generate_report_all() else: self._generate_report_single(self.subfolder_id) # done and outta here self.workbook.close()
[ "def", "run", "(", "self", ",", "output", ")", ":", "# Ensure folder exists.", "if", "self", ".", "folder_id", "not", "in", "self", ".", "folders", ".", "folders", "(", "self", ".", "user", ")", ":", "print", "(", "\"E: folder not found: %s\"", "%", "self"...
34.508772
20.298246
def add(entry_point, all_entry_points, auto_write, scripts_path): '''Add Scrim scripts for a python project''' click.echo() if not entry_point and not all_entry_points: raise click.UsageError( 'Missing required option: --entry_point or --all_entry_points' ) if not os.path.exists('setup.py'): raise click.UsageError('No setup.py found.') setup_data = parse_setup('setup.py') console_scripts = get_console_scripts(setup_data) scripts = [] if all_entry_points and console_scripts: # Make sure our entry points start with py for entry in console_scripts: if not entry.startswith('py'): click.echo('Your python entry_points must start with py.') click.echo('Found: ' + entry) raise click.Abort() for entry in console_scripts: click.echo('Found entry_point: ' + entry) py_entry_point = entry entry_point = entry[2:] more_scripts = copy_templates( entry_point, py_entry_point, auto_write, scripts_path ) for script in more_scripts: click.echo(' Created ' + script) scripts.extend(more_scripts) elif entry_point: if not entry_point.startswith('py'): click.echo('Your python entry_points must start with py.') raise click.Abort() if entry_point not in console_scripts: click.echo(entry_point + ' not found in your setups entry_points') click.echo('You will need to add it afterward if you continue...') click.echo('') click.confirm('Do you want to continue?', abort=True) click.echo('\nCreating scripts for: ' + entry_point) py_entry_point = entry_point entry_point = entry_point[2:] more_scripts = copy_templates( entry_point, py_entry_point, auto_write, scripts_path ) for script in more_scripts: click.echo(' Created ' + script) scripts.extend(more_scripts) click.echo('\n\nAdd the following section to your package setup:\n') click.echo('scripts=[') for script in scripts: click.echo(" '{}',".format(script)) click.echo('],')
[ "def", "add", "(", "entry_point", ",", "all_entry_points", ",", "auto_write", ",", "scripts_path", ")", ":", "click", ".", "echo", "(", ")", "if", "not", "entry_point", "and", "not", "all_entry_points", ":", "raise", "click", ".", "UsageError", "(", "'Missin...
33.157143
17.9
def _reload_version(self): """ Packages installed by distutils (e.g. numpy or scipy), which uses an old safe_version, and so their version numbers can get mangled when converted to filenames (e.g., 1.11.0.dev0+2329eae to 1.11.0.dev0_2329eae). These distributions will not be parsed properly downstream by Distribution and safe_version, so take an extra step and try to get the version number from the metadata file itself instead of the filename. """ md_version = self._get_version() if md_version: self._version = md_version return self
[ "def", "_reload_version", "(", "self", ")", ":", "md_version", "=", "self", ".", "_get_version", "(", ")", "if", "md_version", ":", "self", ".", "_version", "=", "md_version", "return", "self" ]
40.375
13
def __process_url_wrapper_elements(self, elements): """ Creates the url nodes for pelican.urlwrappers.Category and pelican.urlwrappers.Tag. :param elements: list of wrapper elements :type elements: list :return: the processes urls as HTML :rtype: str """ urls = '' for url_wrapper, articles in elements: urls += self.__create_url_node_for_content( url_wrapper, 'others', url=urljoin(self.url_site, url_wrapper.url), modification_time=self.__get_date_key(sorted(articles, key=self.__get_date_key, reverse=True)[0]) ) return urls
[ "def", "__process_url_wrapper_elements", "(", "self", ",", "elements", ")", ":", "urls", "=", "''", "for", "url_wrapper", ",", "articles", "in", "elements", ":", "urls", "+=", "self", ".", "__create_url_node_for_content", "(", "url_wrapper", ",", "'others'", ","...
39.941176
18.764706
def set_proxy(self, host, port, user=None, password=None): ''' Sets the proxy server host and port for the HTTP CONNECT Tunnelling. host: Address of the proxy. Ex: '192.168.0.100' port: Port of the proxy. Ex: 6000 user: User for proxy authorization. password: Password for proxy authorization. ''' self._httpclient.set_proxy(host, port, user, password)
[ "def", "set_proxy", "(", "self", ",", "host", ",", "port", ",", "user", "=", "None", ",", "password", "=", "None", ")", ":", "self", ".", "_httpclient", ".", "set_proxy", "(", "host", ",", "port", ",", "user", ",", "password", ")" ]
32.285714
21.285714
def filter_alias_create_namespace(namespace): """ Filter alias name and alias command inside alias create namespace to appropriate strings. Args namespace: The alias create namespace. Returns: Filtered namespace where excessive whitespaces are removed in strings. """ def filter_string(s): return ' '.join(s.strip().split()) namespace.alias_name = filter_string(namespace.alias_name) namespace.alias_command = filter_string(namespace.alias_command) return namespace
[ "def", "filter_alias_create_namespace", "(", "namespace", ")", ":", "def", "filter_string", "(", "s", ")", ":", "return", "' '", ".", "join", "(", "s", ".", "strip", "(", ")", ".", "split", "(", ")", ")", "namespace", ".", "alias_name", "=", "filter_stri...
32.0625
23.0625
def build_block_with_transactions( self, transactions: Tuple[BaseTransaction, ...], parent_header: BlockHeader=None ) -> Tuple[BaseBlock, Tuple[Receipt, ...], Tuple[BaseComputation, ...]]: """ Generate a block with the provided transactions. This does *not* import that block into your chain. If you want this new block in your chain, run :meth:`~import_block` with the result block from this method. :param transactions: an iterable of transactions to insert to the block :param parent_header: parent of the new block -- or canonical head if ``None`` :return: (new block, receipts, computations) """ base_header = self.ensure_header(parent_header) vm = self.get_vm(base_header) new_header, receipts, computations = vm.apply_all_transactions(transactions, base_header) new_block = vm.set_block_transactions(vm.block, new_header, transactions, receipts) return new_block, receipts, computations
[ "def", "build_block_with_transactions", "(", "self", ",", "transactions", ":", "Tuple", "[", "BaseTransaction", ",", "...", "]", ",", "parent_header", ":", "BlockHeader", "=", "None", ")", "->", "Tuple", "[", "BaseBlock", ",", "Tuple", "[", "Receipt", ",", "...
48.571429
25.809524
def make_mixture_prior(latent_size, mixture_components): """Creates the mixture of Gaussians prior distribution. Args: latent_size: The dimensionality of the latent representation. mixture_components: Number of elements of the mixture. Returns: random_prior: A `tfd.Distribution` instance representing the distribution over encodings in the absence of any evidence. """ if mixture_components == 1: # See the module docstring for why we don't learn the parameters here. return tfd.MultivariateNormalDiag( loc=tf.zeros([latent_size]), scale_identity_multiplier=1.0) loc = tf.compat.v1.get_variable( name="loc", shape=[mixture_components, latent_size]) raw_scale_diag = tf.compat.v1.get_variable( name="raw_scale_diag", shape=[mixture_components, latent_size]) mixture_logits = tf.compat.v1.get_variable( name="mixture_logits", shape=[mixture_components]) return tfd.MixtureSameFamily( components_distribution=tfd.MultivariateNormalDiag( loc=loc, scale_diag=tf.nn.softplus(raw_scale_diag)), mixture_distribution=tfd.Categorical(logits=mixture_logits), name="prior")
[ "def", "make_mixture_prior", "(", "latent_size", ",", "mixture_components", ")", ":", "if", "mixture_components", "==", "1", ":", "# See the module docstring for why we don't learn the parameters here.", "return", "tfd", ".", "MultivariateNormalDiag", "(", "loc", "=", "tf",...
38.433333
19.033333
def process_get(self): """ Analyse the GET request :return: * :attr:`USER_NOT_AUTHENTICATED` if the user is not authenticated or is requesting for authentication renewal * :attr:`USER_AUTHENTICATED` if the user is authenticated and is not requesting for authentication renewal :rtype: int """ # generate a new LT self.gen_lt() if not self.request.session.get("authenticated") or self.renew: # authentication will be needed, initialize the form to use self.init_form() return self.USER_NOT_AUTHENTICATED return self.USER_AUTHENTICATED
[ "def", "process_get", "(", "self", ")", ":", "# generate a new LT", "self", ".", "gen_lt", "(", ")", "if", "not", "self", ".", "request", ".", "session", ".", "get", "(", "\"authenticated\"", ")", "or", "self", ".", "renew", ":", "# authentication will be ne...
39.166667
18.611111
def get_edges_with_citations(self, citations: Iterable[Citation]) -> List[Edge]: """Get edges with one of the given citations.""" return self.session.query(Edge).join(Evidence).filter(Evidence.citation.in_(citations)).all()
[ "def", "get_edges_with_citations", "(", "self", ",", "citations", ":", "Iterable", "[", "Citation", "]", ")", "->", "List", "[", "Edge", "]", ":", "return", "self", ".", "session", ".", "query", "(", "Edge", ")", ".", "join", "(", "Evidence", ")", ".",...
79
33.666667
def _init_channel(self): """ build the grpc channel used for both publisher and subscriber :return: None """ host = self._get_host() port = self._get_grpc_port() if 'TLS_PEM_FILE' in os.environ: with open(os.environ['TLS_PEM_FILE'], mode='rb') as f: # b is important -> binary file_content = f.read() credentials = grpc.ssl_channel_credentials(root_certificates=file_content) else: credentials = grpc.ssl_channel_credentials() self._channel = grpc.secure_channel(host + ":" + port, credentials=credentials) self._init_health_checker()
[ "def", "_init_channel", "(", "self", ")", ":", "host", "=", "self", ".", "_get_host", "(", ")", "port", "=", "self", ".", "_get_grpc_port", "(", ")", "if", "'TLS_PEM_FILE'", "in", "os", ".", "environ", ":", "with", "open", "(", "os", ".", "environ", ...
38.411765
20.764706
def create_permissions(): """ Creates all permissions and add them to the ADMIN Role. """ current_app.appbuilder.add_permissions(update_perms=True) click.echo(click.style("Created all permissions", fg="green"))
[ "def", "create_permissions", "(", ")", ":", "current_app", ".", "appbuilder", ".", "add_permissions", "(", "update_perms", "=", "True", ")", "click", ".", "echo", "(", "click", ".", "style", "(", "\"Created all permissions\"", ",", "fg", "=", "\"green\"", ")",...
38.166667
14.166667
def xenon_interactive_worker( machine, worker_config, input_queue=None, stderr_sink=None): """Uses Xenon to run a single remote interactive worker. Jobs are read from stdin, and results written to stdout. :param machine: Specification of the machine on which to run. :type machine: noodles.run.xenon.Machine :param worker_config: Job configuration. Specifies the command to be run remotely. :type worker_config: noodles.run.xenon.XenonJobConfig """ if input_queue is None: input_queue = Queue() registry = worker_config.registry() @pull_map def serialise(obj): """Serialise incoming objects, yielding strings.""" if isinstance(obj, JobMessage): print('serializing:', str(obj.node), file=sys.stderr) return (registry.to_json(obj, host='scheduler') + '\n').encode() @pull_map def echo(line): print('{} input: {}'.format(worker_config.name, line), file=sys.stderr) return line def do_iterate(source): for x in source(): if x is EndOfQueue: yield EndOfWork return yield x job, output_stream = machine.scheduler.submit_interactive_job( worker_config.xenon_job_description, echo(lambda: serialise(lambda: do_iterate(input_queue.source)))) @sink_map def echo_stderr(text): """Print lines.""" for line in text.split('\n'): print("{}: {}".format(worker_config.name, line), file=sys.stderr) if stderr_sink is None: stderr_sink = echo_stderr() @pull def read_output(source): """Handle output from job, sending stderr data to given `stderr_sink`, passing on lines from stdout.""" line_buffer = "" try: for chunk in source(): if chunk.stdout: lines = chunk.stdout.decode().splitlines(keepends=True) if not lines: continue if lines[0][-1] == '\n': yield line_buffer + lines[0] line_buffer = "" else: line_buffer += lines[0] if len(lines) == 1: continue yield from lines[1:-1] if lines[-1][-1] == '\n': yield lines[-1] else: line_buffer = lines[-1] if chunk.stderr: for line in chunk.stderr.decode().split('\n'): stripped_line = line.strip() if stripped_line != '': stderr_sink.send(stripped_line) except grpc.RpcError as e: return @pull_map def deserialise(line): result = registry.from_json(line, deref=False) return result return Connection( lambda: deserialise(lambda: read_output(lambda: output_stream)), input_queue.sink, aux=job)
[ "def", "xenon_interactive_worker", "(", "machine", ",", "worker_config", ",", "input_queue", "=", "None", ",", "stderr_sink", "=", "None", ")", ":", "if", "input_queue", "is", "None", ":", "input_queue", "=", "Queue", "(", ")", "registry", "=", "worker_config"...
30.814433
19.536082
def from_dict(cls, d): """ Reconstructs the SimplestChemenvStrategy object from a dict representation of the SimplestChemenvStrategy object created using the as_dict method. :param d: dict representation of the SimplestChemenvStrategy object :return: StructureEnvironments object """ return cls(distance_cutoff=d["distance_cutoff"], angle_cutoff=d["angle_cutoff"], additional_condition=d["additional_condition"], continuous_symmetry_measure_cutoff=d["continuous_symmetry_measure_cutoff"], symmetry_measure_type=d["symmetry_measure_type"])
[ "def", "from_dict", "(", "cls", ",", "d", ")", ":", "return", "cls", "(", "distance_cutoff", "=", "d", "[", "\"distance_cutoff\"", "]", ",", "angle_cutoff", "=", "d", "[", "\"angle_cutoff\"", "]", ",", "additional_condition", "=", "d", "[", "\"additional_con...
58.272727
26.818182
def group(iterable, key): """ groupby which sorts the input, discards the key and returns the output as a sequence of lists. """ for _, grouped in groupby(sorted(iterable, key=key), key=key): yield list(grouped)
[ "def", "group", "(", "iterable", ",", "key", ")", ":", "for", "_", ",", "grouped", "in", "groupby", "(", "sorted", "(", "iterable", ",", "key", "=", "key", ")", ",", "key", "=", "key", ")", ":", "yield", "list", "(", "grouped", ")" ]
33.285714
14.428571
def menuitemenabled(self, window_name, object_name): """ Verify a menu item is enabled @param window_name: Window name to look for, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to look for, either full name, LDTP's name convention, or a Unix glob. Or menu heirarchy @type object_name: string @return: 1 on success. @rtype: integer """ try: menu_handle = self._get_menu_handle(window_name, object_name, False) if menu_handle.AXEnabled: return 1 except LdtpServerException: pass return 0
[ "def", "menuitemenabled", "(", "self", ",", "window_name", ",", "object_name", ")", ":", "try", ":", "menu_handle", "=", "self", ".", "_get_menu_handle", "(", "window_name", ",", "object_name", ",", "False", ")", "if", "menu_handle", ".", "AXEnabled", ":", "...
34.045455
17.045455
def on_configurationdone_request(self, py_db, request): ''' :param ConfigurationDoneRequest request: ''' self.api.run(py_db) configuration_done_response = pydevd_base_schema.build_response(request) return NetCommand(CMD_RETURN, 0, configuration_done_response, is_json=True)
[ "def", "on_configurationdone_request", "(", "self", ",", "py_db", ",", "request", ")", ":", "self", ".", "api", ".", "run", "(", "py_db", ")", "configuration_done_response", "=", "pydevd_base_schema", ".", "build_response", "(", "request", ")", "return", "NetCom...
45
25.285714
def invert(self): ''' Return inverse mapping of dictionary with sorted values. USAGE >>> # Switch the keys and values >>> adv_dict({ ... 'A': [1, 2, 3], ... 'B': [4, 2], ... 'C': [1, 4], ... }).invert() {1: ['A', 'C'], 2: ['A', 'B'], 3: ['A'], 4: ['B', 'C']} ''' inv_map = {} for k, v in self.items(): if sys.version_info < (3, 0): acceptable_v_instance = isinstance(v, (str, int, float, long)) else: acceptable_v_instance = isinstance(v, (str, int, float)) if acceptable_v_instance: v = [v] elif not isinstance(v, list): raise Exception('Error: Non supported value format! Values may only' ' be numerical, strings, or lists of numbers and ' 'strings.') for val in v: inv_map[val] = inv_map.get(val, []) inv_map[val].append(k) inv_map[val].sort() return inv_map
[ "def", "invert", "(", "self", ")", ":", "inv_map", "=", "{", "}", "for", "k", ",", "v", "in", "self", ".", "items", "(", ")", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", "0", ")", ":", "acceptable_v_instance", "=", "isinstance", "(...
37.740741
17.074074
def frompath(path, accessor=None, ext=None, start=None, stop=None, recursive=False, npartitions=None, dims=None, dtype=None, labels=None, recount=False, engine=None, credentials=None): """ Load images from a path using the given accessor. Supports both local and remote filesystems. Parameters ---------- accessor : function Apply to each item after loading to yield an image. ext : str, optional, default=None File extension. npartitions : int, optional, default=None Number of partitions for computational engine, if None will use default for engine. dims : tuple, optional, default=None Dimensions of images. dtype : str, optional, default=None Numerical type of images. labels : array, optional, default = None Labels for records. If provided, should be one-dimensional. start, stop : nonnegative int, optional, default=None Indices of files to load, interpreted using Python slicing conventions. recursive : boolean, optional, default=False If true, will recursively descend directories from path, loading all files with an extension matching 'ext'. recount : boolean, optional, default=False Force subsequent record counting. """ from thunder.readers import get_parallel_reader reader = get_parallel_reader(path)(engine, credentials=credentials) data = reader.read(path, ext=ext, start=start, stop=stop, recursive=recursive, npartitions=npartitions) if spark and isinstance(engine, spark): if accessor: data = data.flatMap(accessor) if recount: nrecords = None def switch(record): ary, idx = record return (idx,), ary data = data.values().zipWithIndex().map(switch) else: nrecords = reader.nfiles return fromrdd(data, nrecords=nrecords, dims=dims, dtype=dtype, labels=labels, ordered=True) else: if accessor: data = [accessor(d) for d in data] flattened = list(itertools.chain(*data)) values = [kv[1] for kv in flattened] return fromarray(values, labels=labels)
[ "def", "frompath", "(", "path", ",", "accessor", "=", "None", ",", "ext", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "recursive", "=", "False", ",", "npartitions", "=", "None", ",", "dims", "=", "None", ",", "dtype", "="...
34.571429
21.84127
def checkerboard(img_spec1=None, img_spec2=None, patch_size=10, view_set=(0, 1, 2), num_slices=(10,), num_rows=2, rescale_method='global', background_threshold=0.05, annot=None, padding=5, output_path=None, figsize=None, ): """ Checkerboard mixer. Parameters ---------- img_spec1 : str or nibabel image-like object MR image (or path to one) to be visualized img_spec2 : str or nibabel image-like object MR image (or path to one) to be visualized patch_size : int or list or (int, int) or None size of checker patch (either square or rectangular) If None, number of voxels/patch are chosen such that, there will be 7 patches through the width/height. view_set : iterable Integers specifying the dimensions to be visualized. Choices: one or more of (0, 1, 2) for a 3D image num_slices : int or iterable of size as view_set number of slices to be selected for each view Must be of the same length as view_set, each element specifying the number of slices for each dimension. If only one number is given, same number will be chosen for all dimensions. num_rows : int number of rows (top to bottom) per each of 3 dimensions rescale_method : bool or str or list or None Range to rescale the intensity values to Default: 'global', min and max values computed based on ranges from both images. If false or None, no rescaling is done (does not work yet). background_threshold : float or str A threshold value below which all the background voxels will be set to zero. Default : 0.05. Other option is a string specifying a percentile: '5%', '10%'. Specify None if you don't want any thresholding. annot : str Text to display to annotate the visualization padding : int number of voxels to pad around each panel. output_path : str path to save the generate collage to. figsize : list Size of figure in inches to be passed on to plt.figure() e.g. [12, 12] or [20, 20] Returns ------- fig : figure handle handle to the collage figure generated. """ img_one, img_two = _preprocess_images(img_spec1, img_spec2, rescale_method=rescale_method, bkground_thresh=background_threshold, padding=padding) display_params = dict(interpolation='none', aspect='auto', origin='lower', cmap='gray', vmin=0.0, vmax=1.0) mixer = partial(_checker_mixer, checker_size=patch_size) collage = Collage(view_set=view_set, num_slices=num_slices, num_rows=num_rows, figsize=figsize, display_params=display_params) collage.transform_and_attach((img_one, img_two), func=mixer) collage.save(output_path=output_path, annot=annot) return collage
[ "def", "checkerboard", "(", "img_spec1", "=", "None", ",", "img_spec2", "=", "None", ",", "patch_size", "=", "10", ",", "view_set", "=", "(", "0", ",", "1", ",", "2", ")", ",", "num_slices", "=", "(", "10", ",", ")", ",", "num_rows", "=", "2", ",...
36.511628
22.860465
def from_array(array): """ Deserialize a new UserProfilePhotos from a given dictionary. :return: new UserProfilePhotos instance. :rtype: UserProfilePhotos """ if array is None or not array: return None # end if assert_type_or_raise(array, dict, parameter_name="array") from pytgbot.api_types.receivable.media import PhotoSize data = {} data['total_count'] = int(array.get('total_count')) data['photos'] = PhotoSize.from_array_list(array.get('photos'), list_level=2) data['_raw'] = array return UserProfilePhotos(**data)
[ "def", "from_array", "(", "array", ")", ":", "if", "array", "is", "None", "or", "not", "array", ":", "return", "None", "# end if", "assert_type_or_raise", "(", "array", ",", "dict", ",", "parameter_name", "=", "\"array\"", ")", "from", "pytgbot", ".", "api...
33.473684
19.157895
def from_file(cls, jss, filename): """Create a new JSSObject from an external XML file. Args: jss: A JSS object. filename: String path to an XML file. """ tree = ElementTree.parse(filename) root = tree.getroot() return cls(jss, root)
[ "def", "from_file", "(", "cls", ",", "jss", ",", "filename", ")", ":", "tree", "=", "ElementTree", ".", "parse", "(", "filename", ")", "root", "=", "tree", ".", "getroot", "(", ")", "return", "cls", "(", "jss", ",", "root", ")" ]
29.7
11.6