text
stringlengths
89
104k
code_tokens
list
avg_line_len
float64
7.91
980
score
float64
0
630
def block_layer(inputs, filters, block_fn, blocks, strides, is_training, name, data_format="channels_first", use_td=False, targeting_rate=None, keep_prob=None): """Creates one layer of blocks for the ResNet model. Args: inputs: `Tensor` of size `[batch, channels, height, width]`. filters: `int` number of filters for the first convolution of the layer. block_fn: `function` for the block to use within the model blocks: `int` number of blocks contained in the layer. strides: `int` stride to use for the first convolution of the layer. If greater than 1, this layer will downsample the input. is_training: `bool` for whether the model is training. name: `str`name for the Tensor output of the block layer. data_format: `str` either "channels_first" for `[batch, channels, height, width]` or "channels_last for `[batch, height, width, channels]`. use_td: `str` one of "weight" or "unit". Set to False or "" to disable targeted dropout. targeting_rate: `float` proportion of weights to target with targeted dropout. keep_prob: `float` keep probability for targeted dropout. Returns: The output `Tensor` of the block layer. """ # Bottleneck blocks end with 4x the number of filters as they start with filters_out = 4 * filters if block_fn is bottleneck_block else filters def projection_shortcut(inputs): """Project identity branch.""" inputs = conv2d_fixed_padding( inputs=inputs, filters=filters_out, kernel_size=1, strides=strides, data_format=data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob, is_training=is_training) return batch_norm_relu( inputs, is_training, relu=False, data_format=data_format) # Only the first block per block_layer uses projection_shortcut and strides inputs = block_fn( inputs, filters, is_training, projection_shortcut, strides, False, data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) for i in range(1, blocks): inputs = block_fn( inputs, filters, is_training, None, 1, (i + 1 == blocks), data_format, use_td=use_td, targeting_rate=targeting_rate, keep_prob=keep_prob) return tf.identity(inputs, name)
[ "def", "block_layer", "(", "inputs", ",", "filters", ",", "block_fn", ",", "blocks", ",", "strides", ",", "is_training", ",", "name", ",", "data_format", "=", "\"channels_first\"", ",", "use_td", "=", "False", ",", "targeting_rate", "=", "None", ",", "keep_p...
32.545455
19.805195
def qloguniform(low, high, q, random_state): ''' low: an float that represent an lower bound high: an float that represent an upper bound q: sample step random_state: an object of numpy.random.RandomState ''' return np.round(loguniform(low, high, random_state) / q) * q
[ "def", "qloguniform", "(", "low", ",", "high", ",", "q", ",", "random_state", ")", ":", "return", "np", ".", "round", "(", "loguniform", "(", "low", ",", "high", ",", "random_state", ")", "/", "q", ")", "*", "q" ]
36.25
18.25
def response(resp): '''post-response callback resp: requests response object ''' results = [] dom = html.fromstring(resp.text) try: number_of_results_string = re.sub('[^0-9]', '', dom.xpath( '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0] ) results.append({'number_of_results': int(number_of_results_string)}) except: logger.debug("Couldn't read number of results.") pass for result in dom.xpath('//section[@class="wide" and not(contains(@style,"overflow:hidden"))]'): try: logger.debug("running for %s" % str(result)) link = result.xpath('.//h2/a')[0] url = link.attrib.get('href') title = result.xpath('string(.//h2/a)') content = extract_text(result.xpath('.//p')) # append result results.append({'url': url, 'title': title, 'content': content}) except: logger.debug('result parse error in:\n%s', etree.tostring(result, pretty_print=True)) continue return results
[ "def", "response", "(", "resp", ")", ":", "results", "=", "[", "]", "dom", "=", "html", ".", "fromstring", "(", "resp", ".", "text", ")", "try", ":", "number_of_results_string", "=", "re", ".", "sub", "(", "'[^0-9]'", ",", "''", ",", "dom", ".", "x...
32.542857
25
def generate_help_text(): """Return a formatted string listing commands, HTTPie options, and HTTP actions. """ def generate_cmds_with_explanations(summary, cmds): text = '{0}:\n'.format(summary) for cmd, explanation in cmds: text += '\t{0:<10}\t{1:<20}\n'.format(cmd, explanation) return text + '\n' text = generate_cmds_with_explanations('Commands', ROOT_COMMANDS.items()) text += generate_cmds_with_explanations('Options', OPTION_NAMES.items()) text += generate_cmds_with_explanations('Actions', ACTIONS.items()) text += generate_cmds_with_explanations('Headers', HEADER_NAMES.items()) return text
[ "def", "generate_help_text", "(", ")", ":", "def", "generate_cmds_with_explanations", "(", "summary", ",", "cmds", ")", ":", "text", "=", "'{0}:\\n'", ".", "format", "(", "summary", ")", "for", "cmd", ",", "explanation", "in", "cmds", ":", "text", "+=", "'...
43.866667
20.533333
def keyPressEvent(self, event): """ Listens for the left/right keys and the escape key to control the slides. :param event | <QtCore.Qt.QKeyEvent> """ if event.key() == QtCore.Qt.Key_Escape: self.cancel() elif event.key() == QtCore.Qt.Key_Left: self.goBack() elif event.key() == QtCore.Qt.Key_Right: self.goForward() elif event.key() == QtCore.Qt.Key_Home: self.restart() super(XWalkthroughWidget, self).keyPressEvent(event)
[ "def", "keyPressEvent", "(", "self", ",", "event", ")", ":", "if", "event", ".", "key", "(", ")", "==", "QtCore", ".", "Qt", ".", "Key_Escape", ":", "self", ".", "cancel", "(", ")", "elif", "event", ".", "key", "(", ")", "==", "QtCore", ".", "Qt"...
33.823529
13.823529
def copychildren(self, newdoc=None, idsuffix=""): """Generator creating a deep copy of the children of this element. Invokes :meth:`copy` on all children, parameters are the same. """ if idsuffix is True: idsuffix = ".copy." + "%08x" % random.getrandbits(32) #random 32-bit hash for each copy, same one will be reused for all children for c in self: if isinstance(c, AbstractElement): yield c.copy(newdoc,idsuffix)
[ "def", "copychildren", "(", "self", ",", "newdoc", "=", "None", ",", "idsuffix", "=", "\"\"", ")", ":", "if", "idsuffix", "is", "True", ":", "idsuffix", "=", "\".copy.\"", "+", "\"%08x\"", "%", "random", ".", "getrandbits", "(", "32", ")", "#random 32-bi...
52.777778
25.111111
def ms_zoom(self, viewer, event, data_x, data_y, msg=True): """Zoom the image by dragging the cursor left or right. """ if not self.canzoom: return True msg = self.settings.get('msg_zoom', msg) x, y = self.get_win_xy(viewer) if event.state == 'move': self._zoom_xy(viewer, x, y) elif event.state == 'down': if msg: viewer.onscreen_message("Zoom (drag mouse L-R)", delay=1.0) self._start_x, self._start_y = x, y else: viewer.onscreen_message(None) return True
[ "def", "ms_zoom", "(", "self", ",", "viewer", ",", "event", ",", "data_x", ",", "data_y", ",", "msg", "=", "True", ")", ":", "if", "not", "self", ".", "canzoom", ":", "return", "True", "msg", "=", "self", ".", "settings", ".", "get", "(", "'msg_zoo...
30
16.285714
def onTWriteCallback__init(self, sim): """ Process for injecting of this callback loop into simulator """ yield from self.onTWriteCallback(sim) self.intf.t._sigInside.registerWriteCallback( self.onTWriteCallback, self.getEnable) self.intf.o._sigInside.registerWriteCallback( self.onTWriteCallback, self.getEnable)
[ "def", "onTWriteCallback__init", "(", "self", ",", "sim", ")", ":", "yield", "from", "self", ".", "onTWriteCallback", "(", "sim", ")", "self", ".", "intf", ".", "t", ".", "_sigInside", ".", "registerWriteCallback", "(", "self", ".", "onTWriteCallback", ",", ...
36.272727
8.818182
def load_3MF(file_obj, postprocess=True, **kwargs): """ Load a 3MF formatted file into a Trimesh scene. Parameters ------------ file_obj: file object Returns ------------ kwargs: dict, with keys 'graph', 'geometry', 'base_frame' """ # dict, {name in archive: BytesIo} archive = util.decompress(file_obj, file_type='zip') # load the XML into an LXML tree tree = etree.XML(archive['3D/3dmodel.model'].read()) # { mesh id : mesh name} id_name = {} # { mesh id: (n,3) float vertices} v_seq = {} # { mesh id: (n,3) int faces} f_seq = {} # components are objects that contain other objects # {id : [other ids]} components = collections.defaultdict(list) for obj in tree.iter('{*}object'): # id is mandatory index = obj.attrib['id'] # not required, so use a get call which will return None # if the tag isn't populated if 'name' in obj.attrib: name = obj.attrib['name'] else: name = str(index) # store the name by index id_name[index] = name # if the object has actual geometry data, store it for mesh in obj.iter('{*}mesh'): vertices = mesh.find('{*}vertices') vertices = np.array([[i.attrib['x'], i.attrib['y'], i.attrib['z']] for i in vertices.iter('{*}vertex')], dtype=np.float64) v_seq[index] = vertices faces = mesh.find('{*}triangles') faces = np.array([[i.attrib['v1'], i.attrib['v2'], i.attrib['v3']] for i in faces.iter('{*}triangle')], dtype=np.int64) f_seq[index] = faces # components are references to other geometries for c in obj.iter('{*}component'): mesh_index = c.attrib['objectid'] transform = _attrib_to_transform(c.attrib) components[index].append((mesh_index, transform)) # load information about the scene graph # each instance is a single geometry build_items = [] # scene graph information stored here, aka "build" the scene build = tree.find('{*}build') for item in build.iter('{*}item'): # get a transform from the item's attributes transform = _attrib_to_transform(item.attrib) # the index of the geometry this item instantiates build_items.append((item.attrib['objectid'], transform)) # collect unit information from the tree if 'unit' in tree.attrib: metadata = {'units': tree.attrib['unit']} else: # the default units, defined by the specification metadata = {'units': 'millimeters'} # have one mesh per 3MF object # one mesh per geometry ID, store as kwargs for the object meshes = {} for gid in v_seq.keys(): name = id_name[gid] meshes[name] = {'vertices': v_seq[gid], 'faces': f_seq[gid], 'metadata': metadata.copy()} # turn the item / component representation into # a MultiDiGraph to compound our pain g = nx.MultiDiGraph() # build items are the only things that exist according to 3MF # so we accomplish that by linking them to the base frame for gid, tf in build_items: g.add_edge('world', gid, matrix=tf) # components are instances which need to be linked to base # frame by a build_item for start, group in components.items(): for i, (gid, tf) in enumerate(group): g.add_edge(start, gid, matrix=tf) # turn the graph into kwargs for a scene graph # flatten the scene structure and simplify to # a single unique node per instance graph_args = [] parents = collections.defaultdict(set) for path in graph.multigraph_paths(G=g, source='world'): # collect all the transform on the path transforms = graph.multigraph_collect(G=g, traversal=path, attrib='matrix') # combine them into a single transform if len(transforms) == 1: transform = transforms[0] else: transform = util.multi_dot(transforms) # the last element of the path should be the geometry last = path[-1][0] # if someone included an undefined component, skip it if last not in id_name: log.debug('id {} included but not defined!'.format(last)) continue # frame names unique name = id_name[last] + util.unique_id() # index in meshes geom = id_name[last] # collect parents if we want to combine later if len(path) > 2: parent = path[-2][0] parents[parent].add(last) graph_args.append({'frame_from': 'world', 'frame_to': name, 'matrix': transform, 'geometry': geom}) # solidworks will export each body as its own mesh with the part # name as the parent so optionally rename and combine these bodies if postprocess and all('body' in i.lower() for i in meshes.keys()): # don't rename by default rename = {k: k for k in meshes.keys()} for parent, mesh_name in parents.items(): # only handle the case where a parent has a single child # if there are multiple children we would do a combine op if len(mesh_name) != 1: continue # rename the part rename[id_name[next(iter(mesh_name))]] = id_name[parent].split( '(')[0] # apply the rename operation meshes meshes = {rename[k]: m for k, m in meshes.items()} # rename geometry references in the scene graph for arg in graph_args: if 'geometry' in arg: arg['geometry'] = rename[arg['geometry']] # construct the kwargs to load the scene kwargs = {'base_frame': 'world', 'graph': graph_args, 'geometry': meshes, 'metadata': metadata} return kwargs
[ "def", "load_3MF", "(", "file_obj", ",", "postprocess", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# dict, {name in archive: BytesIo}", "archive", "=", "util", ".", "decompress", "(", "file_obj", ",", "file_type", "=", "'zip'", ")", "# load the XML into an...
36.48538
15.526316
def compute_md5(self): """Compute and erturn MD5 hash value.""" import hashlib with open(self.path, "rt") as fh: text = fh.read() m = hashlib.md5(text.encode("utf-8")) return m.hexdigest()
[ "def", "compute_md5", "(", "self", ")", ":", "import", "hashlib", "with", "open", "(", "self", ".", "path", ",", "\"rt\"", ")", "as", "fh", ":", "text", "=", "fh", ".", "read", "(", ")", "m", "=", "hashlib", ".", "md5", "(", "text", ".", "encode"...
34.571429
9.428571
def set_param(self, param, value): '''Set a parameter in this configuration set.''' self.data[param] = value self._object.configuration_data = utils.dict_to_nvlist(self.data)
[ "def", "set_param", "(", "self", ",", "param", ",", "value", ")", ":", "self", ".", "data", "[", "param", "]", "=", "value", "self", ".", "_object", ".", "configuration_data", "=", "utils", ".", "dict_to_nvlist", "(", "self", ".", "data", ")" ]
48.75
15.75
def set_multi(self, mappings, time=0, compress_level=-1): """ Set multiple keys with it's values on server. :param mappings: A dict with keys/values :type mappings: dict :param time: Time in seconds that your key will expire. :type time: int :param compress_level: How much to compress. 0 = no compression, 1 = fastest, 9 = slowest but best, -1 = default compression level. :type compress_level: int :return: True in case of success and False in case of failure :rtype: bool """ returns = [] if not mappings: return False server_mappings = defaultdict(dict) for key, value in mappings.items(): server_key = self._get_server(key) server_mappings[server_key].update([(key, value)]) for server, m in server_mappings.items(): returns.append(server.set_multi(m, time, compress_level)) return all(returns)
[ "def", "set_multi", "(", "self", ",", "mappings", ",", "time", "=", "0", ",", "compress_level", "=", "-", "1", ")", ":", "returns", "=", "[", "]", "if", "not", "mappings", ":", "return", "False", "server_mappings", "=", "defaultdict", "(", "dict", ")",...
37.846154
15.538462
def data(self): """Return list of (dataframe, filestem) tuples.""" stemdict = { "ANIm": pyani_config.ANIM_FILESTEMS, "ANIb": pyani_config.ANIB_FILESTEMS, "ANIblastall": pyani_config.ANIBLASTALL_FILESTEMS, } return zip( ( self.alignment_lengths, self.percentage_identity, self.alignment_coverage, self.similarity_errors, self.hadamard, ), stemdict[self.mode], )
[ "def", "data", "(", "self", ")", ":", "stemdict", "=", "{", "\"ANIm\"", ":", "pyani_config", ".", "ANIM_FILESTEMS", ",", "\"ANIb\"", ":", "pyani_config", ".", "ANIB_FILESTEMS", ",", "\"ANIblastall\"", ":", "pyani_config", ".", "ANIBLASTALL_FILESTEMS", ",", "}", ...
31.529412
14.117647
def check_token(request): """ Resource check is token valid. --- request_serializer: serializers.CheckToken type: username: required: true type: string description: token related user responseMessages: - code: 200 message: Token is valid - code: 400 message: Token is not valid - code: 401 message: Unauthorized """ serializer = serializers.CheckToken(data=request.data) serializer.is_valid(raise_exception=True) token = serializer.validated_data['token'] logger.debug('Token correct', extra={'token': token, 'username': token.user.username}) return Response({'username': token.user.username})
[ "def", "check_token", "(", "request", ")", ":", "serializer", "=", "serializers", ".", "CheckToken", "(", "data", "=", "request", ".", "data", ")", "serializer", ".", "is_valid", "(", "raise_exception", "=", "True", ")", "token", "=", "serializer", ".", "v...
27.576923
17.730769
def _data_flow_chain(self): """ Get a list of all elements in the data flow graph. The first element is the original source, the next one reads from the prior and so on and so forth. Returns ------- list: list of data sources """ if self.data_producer is None: return [] res = [] ds = self.data_producer while not ds.is_reader: res.append(ds) ds = ds.data_producer res.append(ds) res = res[::-1] return res
[ "def", "_data_flow_chain", "(", "self", ")", ":", "if", "self", ".", "data_producer", "is", "None", ":", "return", "[", "]", "res", "=", "[", "]", "ds", "=", "self", ".", "data_producer", "while", "not", "ds", ".", "is_reader", ":", "res", ".", "appe...
25.571429
19.761905
def do_POST(self): """ This method will be called for each POST request to one of the listener ports. It parses the CIM-XML export message and delivers the contained CIM indication to the stored listener object. """ # Accept header check described in DSP0200 accept = self.headers.get('Accept', 'text/xml') if accept not in ('text/xml', 'application/xml', '*/*'): self.send_http_error( 406, 'header-mismatch', _format("Invalid Accept header value: {0} (need text/xml, " "application/xml or */*)", accept)) return # Accept-Charset header check described in DSP0200 accept_charset = self.headers.get('Accept-Charset', 'UTF-8') tq_list = re.findall(TOKEN_QUALITY_FINDALL_PATTERN, accept_charset) found = False if tq_list is not None: for token, quality in tq_list: if token.lower() in ('utf-8', '*'): found = True break if not found: self.send_http_error( 406, 'header-mismatch', _format("Invalid Accept-Charset header value: {0} " "(need UTF-8 or *)", accept_charset)) return # Accept-Encoding header check described in DSP0200 accept_encoding = self.headers.get('Accept-Encoding', 'Identity') tq_list = re.findall(TOKEN_QUALITY_FINDALL_PATTERN, accept_encoding) identity_acceptable = False identity_found = False if tq_list is not None: for token, quality in tq_list: quality = 1 if quality == '' else float(quality) if token.lower() == 'identity': identity_found = True if quality > 0: identity_acceptable = True break if not identity_found: for token, quality in tq_list: quality = 1 if quality == '' else float(quality) if token == '*' and quality > 0: identity_acceptable = True break if not identity_acceptable: self.send_http_error( 406, 'header-mismatch', _format("Invalid Accept-Encoding header value: {0} " "(need Identity to be acceptable)", accept_encoding)) return # Accept-Language header check described in DSP0200. # Ignored, because this WBEM listener does not support multiple # languages, and hence any language is allowed to be returned. # Accept-Range header check described in DSP0200 accept_range = self.headers.get('Accept-Range', None) if accept_range is not None: self.send_http_error( 406, 'header-mismatch', _format("Accept-Range header is not permitted {0}", accept_range)) return # Content-Type header check described in DSP0200 content_type = self.headers.get('Content-Type', None) if content_type is None: self.send_http_error( 406, 'header-mismatch', "Content-Type header is required") return tc_list = re.findall(TOKEN_CHARSET_FINDALL_PATTERN, content_type) found = False if tc_list is not None: for token, charset in tc_list: if token.lower() in ('text/xml', 'application/xml') and \ (charset == '' or charset.lower() == 'utf-8'): found = True break if not found: self.send_http_error( 406, 'header-mismatch', _format("Invalid Content-Type header value: {0} " "(need text/xml or application/xml with " "charset=utf-8 or empty)", content_type)) return # Content-Encoding header check described in DSP0200 content_encoding = self.headers.get('Content-Encoding', 'identity') if content_encoding.lower() != 'identity': self.send_http_error( 406, 'header-mismatch', _format("Invalid Content-Encoding header value: {0}" "(listener supports only identity)", content_encoding)) return # Content-Language header check described in DSP0200. # Ignored, because this WBEM listener does not support multiple # languages, and hence any language is allowed in the request. # The following headers are ignored. They are not allowed to be used # by servers, but listeners are not required to reject them: # Content-Range, Expires, If-Range, Range. # Start processing the request content_len = int(self.headers.get('Content-Length', 0)) body = self.rfile.read(content_len) try: msgid, methodname, params = self.parse_export_request(body) except (CIMXMLParseError, XMLParseError) as exc: self.send_http_error(400, "request-not-well-formed", str(exc)) return except VersionError as exc: if str(exc).startswith("DTD"): self.send_http_error(400, "unsupported-dtd-version", str(exc)) elif str(exc).startswith("Protocol"): self.send_http_error(400, "unsupported-protocol-version", str(exc)) else: self.send_http_error(400, "unsupported-version", str(exc)) return if methodname == 'ExportIndication': if len(params) != 1 or 'NewIndication' not in params: self.send_error_response( msgid, methodname, CIM_ERR_INVALID_PARAMETER, _format("Expecting one parameter NewIndication, got {0!A}", params.keys())) return indication_inst = params['NewIndication'] if not isinstance(indication_inst, CIMInstance): self.send_error_response( msgid, methodname, CIM_ERR_INVALID_PARAMETER, _format("NewIndication parameter is not a CIM instance, " "but {0!A}", indication_inst)) return # server.listener created in WBEMListener.start function self.server.listener.deliver_indication(indication_inst, self.client_address[0]) self.send_success_response(msgid, methodname) else: self.send_error_response( msgid, methodname, CIM_ERR_NOT_SUPPORTED, _format("Unknown export method: {0!A}", methodname))
[ "def", "do_POST", "(", "self", ")", ":", "# Accept header check described in DSP0200", "accept", "=", "self", ".", "headers", ".", "get", "(", "'Accept'", ",", "'text/xml'", ")", "if", "accept", "not", "in", "(", "'text/xml'", ",", "'application/xml'", ",", "'...
42.228395
19.364198
def to_representation(self, obj): """Convert given internal object instance into representation dict. Representation dict may be later serialized to the content-type of choice in the resource HTTP method handler. This loops over all fields and retrieves source keys/attributes as field values with respect to optional field sources and converts each one using ``field.to_representation()`` method. Args: obj (object): internal object that needs to be represented Returns: dict: representation dictionary """ representation = {} for name, field in self.fields.items(): if field.write_only: continue # note fields do not know their names in source representation # but may know what attribute they target from source object attribute = self.get_attribute(obj, field.source or name) if attribute is None: # Skip none attributes so fields do not have to deal with them representation[name] = [] if field.many else None elif field.many: representation[name] = [ field.to_representation(item) for item in attribute ] else: representation[name] = field.to_representation(attribute) return representation
[ "def", "to_representation", "(", "self", ",", "obj", ")", ":", "representation", "=", "{", "}", "for", "name", ",", "field", "in", "self", ".", "fields", ".", "items", "(", ")", ":", "if", "field", ".", "write_only", ":", "continue", "# note fields do no...
36.526316
24.315789
def add_splitrelation(self, splitrelation): """ Parameters ---------- splitrelation : etree.Element etree representation of a <splitRelation> element A <splitRelation> annotates its parent element (e.g. as an anaphora). Its parent can be either a <word> or a <node>. A <splitRelation> has a target attribute, which describes the targets (plural! e.g. antecedents) of the relation. Example ------- <node xml:id="s2527_528" cat="NX" func="-" parent="s2527_529"> <splitRelation type="split_antecedent" target="s2527_504 s2527_521"/> <word xml:id="s2527_32" form="beider" pos="PIDAT" morph="gpf" lemma="beide" func="-" parent="s2527_528" dephead="s2527_33" deprel="DET"/> <word xml:id="s2527_33" form="Firmen" pos="NN" morph="gpf" lemma="Firma" func="HD" parent="s2527_528" dephead="s2527_31" deprel="GMOD"/> </node> <word xml:id="s3456_12" form="ihr" pos="PPOSAT" morph="nsm" lemma="ihr" func="-" parent="s3456_507" dephead="s3456_14" deprel="DET"> <splitRelation type="split_antecedent" target="s3456_505 s3456_9"/> </word> """ if self.ignore_relations is False and self.ignore_splitrelations is False: source_id = self.get_element_id(splitrelation) # the target attribute looks like this: target="s2527_504 s2527_521" target_node_ids = splitrelation.attrib['target'].split() # we'll create an additional node which spans all target nodes target_span_id = '__'.join(target_node_ids) reltype = splitrelation.attrib['type'] self.add_node(source_id, layers={self.ns, self.ns+':relation', self.ns+':'+reltype, self.ns+':markable'}) self.add_node(target_span_id, layers={self.ns, self.ns+':targetspan', self.ns+':'+reltype, self.ns+':markable'}) self.add_edge(source_id, target_span_id, layers={self.ns, self.ns+':coreference', self.ns+':splitrelation', self.ns+':'+reltype}, edge_type=dg.EdgeTypes.pointing_relation) for target_node_id in target_node_ids: self.add_edge(target_span_id, target_node_id, layers={self.ns, self.ns+':'+reltype}, edge_type=dg.EdgeTypes.spanning_relation)
[ "def", "add_splitrelation", "(", "self", ",", "splitrelation", ")", ":", "if", "self", ".", "ignore_relations", "is", "False", "and", "self", ".", "ignore_splitrelations", "is", "False", ":", "source_id", "=", "self", ".", "get_element_id", "(", "splitrelation",...
58.619048
32.52381
def search(description, all=False): """ Gets a list of :class:`language_tags.Subtag.Subtag` objects where the description matches. :param description: a string or compiled regular expression. For example: ``search(re.compile('\d{4}'))`` if the description of the returned subtag must contain four contiguous numerical digits. :type description: str or RegExp :param all: If set on True grandfathered and redundant tags will be included in the return list. :type all: bool, optional :return: list of :class:`language_tags.Subtag.Subtag` objects each including the description. The return list can be empty. """ # If the input query is all lowercase, make a case-insensitive match. if isinstance(description, str): list_to_string = lambda l: ', '.join(l).lower() if description.lower() == description else ', '.join(l) def test(record): return description in list_to_string(record['Description']) elif hasattr(description.search, '__call__'): def test(record): return description.search(', '.join(record['Description'])) is not None records = filter(lambda r: False if ('Subtag' not in r and not all) else test(r), registry) if six.PY3: records = list(records) # Sort by matched description string length. This is a quick way to push precise matches towards the top. results = sorted(records, key=lambda r: min([abs(len(r_description) - len(description)) for r_description in r['Description']])) \ if isinstance(description, str) else records return [Subtag(r['Subtag'], r['Type']) if 'Subtag' in r else Tag(['Tag']) for r in results]
[ "def", "search", "(", "description", ",", "all", "=", "False", ")", ":", "# If the input query is all lowercase, make a case-insensitive match.", "if", "isinstance", "(", "description", ",", "str", ")", ":", "list_to_string", "=", "lambda", "l", ":", "', '", ".", ...
54.939394
33.848485
def multicolumn_store_with_uncompressed_write(mongo_server): """ The database state created by this fixture is equivalent to the following operations using arctic 1.40 or previous: arctic.initialize_library('arctic_test.TEST', m.VERSION_STORE, segment='month') library = arctic.get_library('arctic_test.TEST') df = pd.DataFrame([[1,2], [3,4]], index=['x','y'], columns=[['a','w'], ['a','v']]) library.write('pandas', df) different from newer versions, the last write creates a uncompressed chunk. """ mongo_server.api.drop_database('arctic_test') library_name = 'arctic_test.TEST' arctic = m.Arctic(mongo_host=mongo_server.api) arctic.initialize_library(library_name, m.VERSION_STORE, segment='month') db = mongo_server.api.arctic_test db.TEST.insert_many([ { 'parent': [bson.ObjectId('5ad0dc065c911d1188b512d8')], 'data': bson.Binary(b'\x11\x00\x00\x002x\x01\x00\x01\x00\x80\x02\x00\x00\x00\x00\x00\x00\x00', 0), 'symbol': 'pandas', 'sha': bson.Binary(b'\xaa\\`\x0e\xc2D-\xc1_\xf7\xfd\x12\xfa\xd2\x17\x05`\x00\x98\xe2', 0), 'compressed': True, '_id': bson.ObjectId('5ad0dc067934ecad404070be'), 'segment': 0 }, { 'parent': [bson.ObjectId('5ad0dc065c911d1188b512d8')], 'data': bson.Binary(b'y\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00', 0), 'symbol': 'pandas', 'sha': bson.Binary(b'\xfe=WQ\xb5\xfdL\xb7\xcavd\x85o\x04]\x04\xdb\xa8]3', 0), 'compressed': False, '_id': bson.ObjectId('5ad0dc077934ecad404070bf'), 'segment': 1 } ]) db.TEST.ARCTIC.update_one({"_id": "ARCTIC_META"}, {"$set": {"_id": "ARCTIC_META", "TYPE": "VersionStore", "QUOTA": 10737418240}}) db.TEST.version_nums.insert_one({'symbol': 'pandas', '_id': bson.ObjectId('5ad0dc067934ecad404070bd'), 'version': 2}) db.TEST.versions.insert_many([ { 'append_count': 0, 'dtype_metadata': { 'index': ['index'], 'columns': ["('a', 'a')", "('w', 'v')"] }, 'segment_count': 1, 'dtype': '[(\'index\', \'S1\'), ("(\'a\', \'a\')", \'<i8\'), ("(\'w\', \'v\')", \'<i8\')]', 'symbol': 'pandas', 'up_to': 1, 'metadata': None, 'sha': bson.Binary(b'\xf2\x15h\x9d\x925\x95\xa5\x0e\x95J\xc4x\xfc\xfc\xd5\x80\xe0\x1d\xef', 0), 'shape': [-1], 'version': 1, 'base_sha': bson.Binary(b'\xf2\x15h\x9d\x925\x95\xa5\x0e\x95J\xc4x\xfc\xfc\xd5\x80\xe0\x1d\xef', 0), '_id': bson.ObjectId('5ad0dc065c911d1188b512d8'), 'type': 'pandasdf', 'append_size': 0 }, { 'append_count': 1, 'dtype_metadata': { 'index': ['index'], 'columns': ["('a', 'a')", "('w', 'v')"] }, 'segment_count': 2, 'sha': bson.Binary(b'1\x83[ZO\xec\x080D\x80f\xe4@\xe4\xd3\x94yG\xe2\x08', 0), 'dtype': '[(\'index\', \'S1\'), ("(\'a\', \'a\')", \'<i8\'), ("(\'w\', \'v\')", \'<i8\')]', 'symbol': 'pandas', 'up_to': 2, 'metadata': None, 'base_version_id': bson.ObjectId('5ad0dc065c911d1188b512d8'), 'shape': [-1], 'version': 2, 'base_sha': bson.Binary(b'\xf2\x15h\x9d\x925\x95\xa5\x0e\x95J\xc4x\xfc\xfc\xd5\x80\xe0\x1d\xef', 0), '_id': bson.ObjectId('5ad0dc075c911d1188b512d9'), 'type': 'pandasdf', 'append_size': 17 } ]) return {'symbol': 'pandas', 'store': arctic.get_library('arctic_test.TEST')}
[ "def", "multicolumn_store_with_uncompressed_write", "(", "mongo_server", ")", ":", "mongo_server", ".", "api", ".", "drop_database", "(", "'arctic_test'", ")", "library_name", "=", "'arctic_test.TEST'", "arctic", "=", "m", ".", "Arctic", "(", "mongo_host", "=", "mon...
44.107143
26.988095
def print_row(*argv): """ Print one row of data """ #for i in range(0, len(argv)): # row += f"{argv[i]}" # columns row = "" # id row += f"{argv[0]:<3}" # name row += f" {argv[1]:<13}" # allocation row += f" {argv[2]:>5}" # level #row += f"{argv[3]}" print(row)
[ "def", "print_row", "(", "*", "argv", ")", ":", "#for i in range(0, len(argv)):", "# row += f\"{argv[i]}\"", "# columns", "row", "=", "\"\"", "# id", "row", "+=", "f\"{argv[0]:<3}\"", "# name", "row", "+=", "f\" {argv[1]:<13}\"", "# allocation", "row", "+=", "f\" {arg...
19.125
20.4375
def serverUrl(self, value): """gets/sets the server url""" if value.lower() != self._serverUrl.lower(): self._serverUrl = value
[ "def", "serverUrl", "(", "self", ",", "value", ")", ":", "if", "value", ".", "lower", "(", ")", "!=", "self", ".", "_serverUrl", ".", "lower", "(", ")", ":", "self", ".", "_serverUrl", "=", "value" ]
38
7.5
def compute_time_at_sun_angle(day, latitude, angle): """Compute the floating point time difference between mid-day and an angle. All the prayers are defined as certain angles from mid-day (Zuhr). This formula is taken from praytimes.org/calculation :param day: The day to which to compute for :param longitude: Longitude of the place of interest :angle: The angle at which to compute the time :returns: The floating point time delta between Zuhr and the angle, the sign of the result corresponds to the sign of the angle """ positive_angle_rad = radians(abs(angle)) angle_sign = abs(angle)/angle latitude_rad = radians(latitude) declination = radians(sun_declination(day)) numerator = -sin(positive_angle_rad) - sin(latitude_rad) * sin(declination) denominator = cos(latitude_rad) * cos(declination) time_diff = degrees(acos(numerator/denominator)) / 15 return time_diff * angle_sign
[ "def", "compute_time_at_sun_angle", "(", "day", ",", "latitude", ",", "angle", ")", ":", "positive_angle_rad", "=", "radians", "(", "abs", "(", "angle", ")", ")", "angle_sign", "=", "abs", "(", "angle", ")", "/", "angle", "latitude_rad", "=", "radians", "(...
36.307692
22.076923
def embedded_images(X, images, exclusion_radius=None, ax=None, cmap=None, zoom=1, seed=None, frameon=False): '''Plots a subset of images on an axis. Useful for visualizing image embeddings, especially when plotted over a scatterplot. Selects random points to annotate with their corresponding image, respecting an exclusion_radius around each selected point.''' assert X.shape[0] == images.shape[0], 'Unequal number of points and images' assert X.shape[1] == 2, 'X must be 2d' if ax is None: ax = plt.gca() if exclusion_radius is None: # TODO: make a smarter default based on image size and axis limits exclusion_radius = 1. if seed is not None: np.random.seed(seed) while X.shape[0] > 0: i = np.random.choice(X.shape[0]) im = OffsetImage(images[i], zoom=zoom, cmap=cmap) ab = AnnotationBbox(im, X[i], xycoords='data', frameon=frameon) ax.add_artist(ab) dist = np.sqrt(np.square(X[i] - X).sum(axis=1)) mask = (dist > exclusion_radius).ravel() X = X[mask] images = images[mask] return plt.show
[ "def", "embedded_images", "(", "X", ",", "images", ",", "exclusion_radius", "=", "None", ",", "ax", "=", "None", ",", "cmap", "=", "None", ",", "zoom", "=", "1", ",", "seed", "=", "None", ",", "frameon", "=", "False", ")", ":", "assert", "X", ".", ...
42.32
19.6
def _backtrack(ex): """ If this function is satisfiable, return a satisfying input upoint. Otherwise, return None. """ if ex is Zero: return None elif ex is One: return dict() else: v = ex.top points = {v: 0}, {v: 1} for point in points: soln = _backtrack(ex.restrict(point)) if soln is not None: soln.update(point) return soln return None
[ "def", "_backtrack", "(", "ex", ")", ":", "if", "ex", "is", "Zero", ":", "return", "None", "elif", "ex", "is", "One", ":", "return", "dict", "(", ")", "else", ":", "v", "=", "ex", ".", "top", "points", "=", "{", "v", ":", "0", "}", ",", "{", ...
25.222222
15.444444
def check_image_state(self, image_id, wait=True): ''' method for checking the state of an image on AWS EC2 :param image_id: string with AWS id of image :param wait: [optional] boolean to wait for image while pending :return: string reporting state of image ''' title = '%s.check_image_state' % self.__class__.__name__ # validate inputs input_fields = { 'image_id': image_id } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # notify state check self.iam.printer('Querying AWS region %s for state of image %s.' % (self.iam.region_name, image_id)) # check connection to API try: self.connection.describe_instances() except: raise AWSConnectionError(title) # check existence of image try: response = self.connection.describe_images(ImageIds=[ image_id ]) except: raise ValueError('\nImage %s does not exist in your permission scope.' % image_id) if not 'Images' in response.keys(): raise ValueError('\nImage %s does not exist in your permission scope.' % image_id) elif not response['Images'][0]: raise ValueError('\nImage %s does not exist in your permission scope.' % image_id) # check into state of image elif not 'State' in response['Images'][0].keys(): from time import sleep from timeit import timeit as timer self.iam.printer('Checking into the status of image %s' % image_id, flush=True) state_timeout = 0 while not 'State' in response['Images'][0].keys(): self.iam.printer('.', flush=True) sleep(3) state_timeout += 1 response = self.connection.describe_images( ImageIds=[ image_id ] ) if state_timeout > 3: raise Exception('\nFailure to determine status of image %s.' % image_id) self.iam.printer(' done.') image_state = response['Images'][0]['State'] # return None if image has already been deregistered or is invalid if image_state == 'deregistered': self.iam.printer('Image %s has already been deregistered.' % image_id) return None elif image_state == 'invalid' or image_state == 'transient' or image_state == 'failed': self.iam.printer('Image %s is %s.' % (image_id, image_state)) return None # wait while image is pending elif image_state == 'pending': self.iam.printer('Image %s is %s.' % (image_id, image_state), flush=True) if not wait: return image_state else: from time import sleep from timeit import timeit as timer delay = 3 state_timeout = 0 while image_state != 'available': self.iam.printer('.', flush=True) sleep(delay) t3 = timer() response = self.connection.describe_images( ImageIds=[ image_id ] ) t4 = timer() state_timeout += 1 response_time = t4 - t3 if 3 - response_time > 0: delay = 3 - response_time else: delay = 0 if state_timeout > 300: raise Exception('\nTimeout. Failure initializing image %s on AWS in less than 15min' % image_id) image_state = response['Images'][0]['State'] self.iam.printer(' done.') # report outcome self.iam.printer('Image %s is %s.' % (image_id, image_state)) return image_state
[ "def", "check_image_state", "(", "self", ",", "image_id", ",", "wait", "=", "True", ")", ":", "title", "=", "'%s.check_image_state'", "%", "self", ".", "__class__", ".", "__name__", "# validate inputs", "input_fields", "=", "{", "'image_id'", ":", "image_id", ...
40.285714
21.285714
def catch_all(path): """Catch all path - return a JSON 404 """ return (dict(error='Invalid URL: /{}'.format(path), links=dict(root='{}{}'.format(request.url_root, PREFIX[1:]))), HTTPStatus.NOT_FOUND)
[ "def", "catch_all", "(", "path", ")", ":", "return", "(", "dict", "(", "error", "=", "'Invalid URL: /{}'", ".", "format", "(", "path", ")", ",", "links", "=", "dict", "(", "root", "=", "'{}{}'", ".", "format", "(", "request", ".", "url_root", ",", "P...
46.4
16.2
def _assert_in_buildroot(self, filepath): """Raises an error if the given filepath isn't in the buildroot. Returns the normalized, absolute form of the path. """ filepath = os.path.normpath(filepath) root = get_buildroot() if not os.path.abspath(filepath) == filepath: # If not absolute, assume relative to the build root. return os.path.join(root, filepath) else: if '..' in os.path.relpath(filepath, root).split(os.path.sep): # The path wasn't in the buildroot. This is an error because it violates the pants being # hermetic. raise ValueError('Received a file_option that was not inside the build root:\n' ' file_option: {filepath}\n' ' build_root: {buildroot}\n' .format(filepath=filepath, buildroot=root)) return filepath
[ "def", "_assert_in_buildroot", "(", "self", ",", "filepath", ")", ":", "filepath", "=", "os", ".", "path", ".", "normpath", "(", "filepath", ")", "root", "=", "get_buildroot", "(", ")", "if", "not", "os", ".", "path", ".", "abspath", "(", "filepath", "...
45.421053
18.842105
def serve(): """main entry point""" logging.getLogger().setLevel(logging.DEBUG) logging.info('Python Tornado Crossdock Server Starting ...') tracer = Tracer( service_name='python', reporter=NullReporter(), sampler=ConstSampler(decision=True)) opentracing.tracer = tracer tchannel = TChannel(name='python', hostport=':%d' % DEFAULT_SERVER_PORT, trace=True) register_tchannel_handlers(tchannel=tchannel) tchannel.listen() app = tornado.web.Application(debug=True) register_http_handlers(app) app.listen(DEFAULT_CLIENT_PORT) tornado.ioloop.IOLoop.current().start()
[ "def", "serve", "(", ")", ":", "logging", ".", "getLogger", "(", ")", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "logging", ".", "info", "(", "'Python Tornado Crossdock Server Starting ...'", ")", "tracer", "=", "Tracer", "(", "service_name", "=", "...
30.52381
17.190476
def exciter(self, Xexc, Pexc, Vexc): """ Exciter model. Based on Exciter.m from MatDyn by Stijn Cole, developed at Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/electa/teaching/ matdyn/} for more information. """ exciters = self.exciters F = zeros(Xexc.shape) typ1 = [e.generator._i for e in exciters if e.model ==CONST_EXCITATION] typ2 = [e.generator._i for e in exciters if e.model == IEEE_DC1A] # Exciter type 1: constant excitation F[typ1, :] = 0.0 # Exciter type 2: IEEE DC1A Efd = Xexc[typ2, 0] Uf = Xexc[typ2, 1] Ur = Xexc[typ2, 2] Ka = Pexc[typ2, 0] Ta = Pexc[typ2, 1] Ke = Pexc[typ2, 2] Te = Pexc[typ2, 3] Kf = Pexc[typ2, 4] Tf = Pexc[typ2, 5] Aex = Pexc[typ2, 6] Bex = Pexc[typ2, 7] Ur_min = Pexc[typ2, 8] Ur_max = Pexc[typ2, 9] Uref = Pexc[typ2, 10] Uref2 = Pexc[typ2, 11] U = Vexc[typ2, 1] Ux = Aex * exp(Bex * Efd) dUr = 1 / Ta * (Ka * (Uref - U + Uref2 - Uf) - Ur) dUf = 1 / Tf * (Kf / Te * (Ur - Ux - Ke * Efd) - Uf) if sum(flatnonzero(Ur > Ur_max)) >= 1: Ur2 = Ur_max elif sum(flatnonzero(Ur < Ur_max)) >= 1: Ur2 = Ur_min else: Ur2 = Ur dEfd = 1 / Te * (Ur2 - Ux - Ke * Efd) F[typ2, :] = c_[dEfd, dUf, dUr] # Exciter type 3: # Exciter type 4: return F
[ "def", "exciter", "(", "self", ",", "Xexc", ",", "Pexc", ",", "Vexc", ")", ":", "exciters", "=", "self", ".", "exciters", "F", "=", "zeros", "(", "Xexc", ".", "shape", ")", "typ1", "=", "[", "e", ".", "generator", ".", "_i", "for", "e", "in", "...
26.660714
20.071429
def _load_class(class_path, default): """ Loads the class from the class_path string """ if class_path is None: return default component = class_path.rsplit('.', 1) result_processor = getattr( importlib.import_module(component[0]), component[1], default ) if len(component) > 1 else default return result_processor
[ "def", "_load_class", "(", "class_path", ",", "default", ")", ":", "if", "class_path", "is", "None", ":", "return", "default", "component", "=", "class_path", ".", "rsplit", "(", "'.'", ",", "1", ")", "result_processor", "=", "getattr", "(", "importlib", "...
27.692308
14.461538
def parse_spec(self, spec): """Parse the given spec into a `specs.Spec` object. :param spec: a single spec string. :return: a single specs.Specs object. :raises: CmdLineSpecParser.BadSpecError if the address selector could not be parsed. """ if spec.endswith('::'): spec_path = spec[:-len('::')] return DescendantAddresses(self._normalize_spec_path(spec_path)) elif spec.endswith(':'): spec_path = spec[:-len(':')] return SiblingAddresses(self._normalize_spec_path(spec_path)) else: spec_parts = spec.rsplit(':', 1) spec_path = self._normalize_spec_path(spec_parts[0]) name = spec_parts[1] if len(spec_parts) > 1 else os.path.basename(spec_path) return SingleAddress(spec_path, name)
[ "def", "parse_spec", "(", "self", ",", "spec", ")", ":", "if", "spec", ".", "endswith", "(", "'::'", ")", ":", "spec_path", "=", "spec", "[", ":", "-", "len", "(", "'::'", ")", "]", "return", "DescendantAddresses", "(", "self", ".", "_normalize_spec_pa...
39.368421
17.578947
def set_edist_powerlaw(self, emin_mev, emax_mev, delta, ne_cc): """Set the energy distribution function to a power law. **Call signature** *emin_mev* The minimum energy of the distribution, in MeV *emax_mev* The maximum energy of the distribution, in MeV *delta* The power-law index of the distribution *ne_cc* The number density of energetic electrons, in cm^-3. Returns *self* for convenience in chaining. """ if not (emin_mev >= 0): raise ValueError('must have emin_mev >= 0; got %r' % (emin_mev,)) if not (emax_mev >= emin_mev): raise ValueError('must have emax_mev >= emin_mev; got %r, %r' % (emax_mev, emin_mev)) if not (delta >= 0): raise ValueError('must have delta >= 0; got %r, %r' % (delta,)) if not (ne_cc >= 0): raise ValueError('must have ne_cc >= 0; got %r, %r' % (ne_cc,)) self.in_vals[IN_VAL_EDIST] = EDIST_PLW self.in_vals[IN_VAL_EMIN] = emin_mev self.in_vals[IN_VAL_EMAX] = emax_mev self.in_vals[IN_VAL_DELTA1] = delta self.in_vals[IN_VAL_NB] = ne_cc return self
[ "def", "set_edist_powerlaw", "(", "self", ",", "emin_mev", ",", "emax_mev", ",", "delta", ",", "ne_cc", ")", ":", "if", "not", "(", "emin_mev", ">=", "0", ")", ":", "raise", "ValueError", "(", "'must have emin_mev >= 0; got %r'", "%", "(", "emin_mev", ",", ...
38.580645
18.774194
def ipv4_prefix_to_mask(prefix): """ ipv4 cidr prefix to net mask :param prefix: cidr prefix , rang in (0, 32) :type prefix: int :return: dot separated ipv4 net mask code, eg: 255.255.255.0 :rtype: str """ if prefix > 32 or prefix < 0: raise ValueError("invalid cidr prefix for ipv4") else: mask = ((1 << 32) - 1) ^ ((1 << (32 - prefix)) - 1) eight_ones = 255 # 0b11111111 mask_str = '' for i in range(0, 4): mask_str = str(mask & eight_ones) + mask_str mask = mask >> 8 if i != 3: mask_str = '.' + mask_str return mask_str
[ "def", "ipv4_prefix_to_mask", "(", "prefix", ")", ":", "if", "prefix", ">", "32", "or", "prefix", "<", "0", ":", "raise", "ValueError", "(", "\"invalid cidr prefix for ipv4\"", ")", "else", ":", "mask", "=", "(", "(", "1", "<<", "32", ")", "-", "1", ")...
30.52381
14.333333
def focus_left(pymux): " Move focus to the left. " _move_focus(pymux, lambda wp: wp.xpos - 2, # 2 in order to skip over the border. lambda wp: wp.ypos)
[ "def", "focus_left", "(", "pymux", ")", ":", "_move_focus", "(", "pymux", ",", "lambda", "wp", ":", "wp", ".", "xpos", "-", "2", ",", "# 2 in order to skip over the border.", "lambda", "wp", ":", "wp", ".", "ypos", ")" ]
37.6
17.6
def mdct(x, L): """Modified Discrete Cosine Transform (MDCT) Returns the Modified Discrete Cosine Transform with fixed window size L of the signal x. The window is based on a sine window. Parameters ---------- x : ndarray, shape (N,) The signal L : int The window length Returns ------- y : ndarray, shape (L/2, 2 * N / L) The MDCT coefficients See also -------- imdct """ x = np.asarray(x, dtype=np.float) N = x.size # Number of frequency channels K = L // 2 # Test length if N % K != 0: raise RuntimeError('Input length must be a multiple of the half of ' 'the window size') # Pad edges with zeros xx = np.zeros(L // 4 + N + L // 4) xx[L // 4:-L // 4] = x x = xx del xx # Number of frames P = N // K if P < 2: raise ValueError('Signal too short') # Framing x = _framing(x, L) # Windowing aL = np.arange(L, dtype=np.float) w_long = np.sin((np.pi / L) * (aL + 0.5)) w_edge_L = w_long.copy() w_edge_L[:L // 4] = 0. w_edge_L[L // 4:L // 2] = 1. w_edge_R = w_long.copy() w_edge_R[L // 2:L // 2 + L // 4] = 1. w_edge_R[L // 2 + L // 4:] = 0. x[:, 0] *= w_edge_L x[:, 1:-1] *= w_long[:, None] x[:, -1] *= w_edge_R # Pre-twiddle x = x.astype(np.complex) x *= np.exp((-1j * np.pi / L) * aL)[:, None] # FFT y = fft(x, axis=0) # Post-twiddle y = y[:L // 2, :] y *= np.exp((-1j * np.pi * (L // 2 + 1.) / L) * (0.5 + aL[:L // 2]))[:, None] # Real part and scaling y = math.sqrt(2. / K) * np.real(y) return y
[ "def", "mdct", "(", "x", ",", "L", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ",", "dtype", "=", "np", ".", "float", ")", "N", "=", "x", ".", "size", "# Number of frequency channels", "K", "=", "L", "//", "2", "# Test length", "if", "N",...
20.948718
21.217949
def update_redirect_to_from_json(page, redirect_to_complete_slugs): """ The second pass of create_and_update_from_json_data used to update the redirect_to field. Returns a messages list to be appended to the messages from the first pass. """ messages = [] s = '' for lang, s in list(redirect_to_complete_slugs.items()): r = Page.objects.from_path(s, lang, exclude_drafts=False) if r: page.redirect_to = r page.save() break else: messages.append(_("Could not find page for redirect-to field" " '%s'") % (s,)) return messages
[ "def", "update_redirect_to_from_json", "(", "page", ",", "redirect_to_complete_slugs", ")", ":", "messages", "=", "[", "]", "s", "=", "''", "for", "lang", ",", "s", "in", "list", "(", "redirect_to_complete_slugs", ".", "items", "(", ")", ")", ":", "r", "="...
31.05
20.05
def reduce_prod(attrs, inputs, proto_obj): """Reduce the array along a given axis by product value""" new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'}) return 'prod', new_attrs, inputs
[ "def", "reduce_prod", "(", "attrs", ",", "inputs", ",", "proto_obj", ")", ":", "new_attrs", "=", "translation_utils", ".", "_fix_attribute_names", "(", "attrs", ",", "{", "'axes'", ":", "'axis'", "}", ")", "return", "'prod'", ",", "new_attrs", ",", "inputs" ...
54.5
11
def led(host, seq, anim, f, d): """ Control the drones LED. Parameters: seq -- sequence number anim -- Integer: animation to play f -- Float: frequency in HZ of the animation d -- Integer: total duration in seconds of the animation """ at(host, 'LED', seq, [anim, float(f), d])
[ "def", "led", "(", "host", ",", "seq", ",", "anim", ",", "f", ",", "d", ")", ":", "at", "(", "host", ",", "'LED'", ",", "seq", ",", "[", "anim", ",", "float", "(", "f", ")", ",", "d", "]", ")" ]
27.636364
12.363636
def roll_qtrday(other, n, month, day_option, modby=3): """Possibly increment or decrement the number of periods to shift based on rollforward/rollbackward conventions. Parameters ---------- other : cftime.datetime n : number of periods to increment, before adjusting for rolling month : int reference month giving the first month of the year day_option : 'start', 'end' The convention to use in finding the day in a given month against which to compare for rollforward/rollbackward decisions. modby : int 3 for quarters, 12 for years Returns ------- n : int number of periods to increment See Also -------- _get_day_of_month : Find the day in a month provided an offset. """ months_since = other.month % modby - month % modby if n > 0: if months_since < 0 or ( months_since == 0 and other.day < _get_day_of_month(other, day_option)): # pretend to roll back if on same month but # before compare_day n -= 1 else: if months_since > 0 or ( months_since == 0 and other.day > _get_day_of_month(other, day_option)): # make sure to roll forward, so negate n += 1 return n
[ "def", "roll_qtrday", "(", "other", ",", "n", ",", "month", ",", "day_option", ",", "modby", "=", "3", ")", ":", "months_since", "=", "other", ".", "month", "%", "modby", "-", "month", "%", "modby", "if", "n", ">", "0", ":", "if", "months_since", "...
32.487179
20.692308
def by_category(self): """Returns every :class:`CategoryChannel` and their associated channels. These channels and categories are sorted in the official Discord UI order. If the channels do not have a category, then the first element of the tuple is ``None``. Returns -------- List[Tuple[Optional[:class:`CategoryChannel`], List[:class:`abc.GuildChannel`]]]: The categories and their associated channels. """ grouped = defaultdict(list) for channel in self._channels.values(): if isinstance(channel, CategoryChannel): continue grouped[channel.category_id].append(channel) def key(t): k, v = t return ((k.position, k.id) if k else (-1, -1), v) _get = self._channels.get as_list = [(_get(k), v) for k, v in grouped.items()] as_list.sort(key=key) for _, channels in as_list: channels.sort(key=lambda c: (c._sorting_bucket, c.position, c.id)) return as_list
[ "def", "by_category", "(", "self", ")", ":", "grouped", "=", "defaultdict", "(", "list", ")", "for", "channel", "in", "self", ".", "_channels", ".", "values", "(", ")", ":", "if", "isinstance", "(", "channel", ",", "CategoryChannel", ")", ":", "continue"...
34.866667
23.366667
def parse_data(self, logfile): """Parse data from data stream and replace object lines. :param logfile: [required] Log file data stream. :type logfile: str """ for line in logfile: stripped_line = line.strip() parsed_line = Line(stripped_line) if parsed_line.valid: self._valid_lines.append(parsed_line) else: self._invalid_lines.append(stripped_line) self.total_lines = len(self._valid_lines) + len(self._invalid_lines)
[ "def", "parse_data", "(", "self", ",", "logfile", ")", ":", "for", "line", "in", "logfile", ":", "stripped_line", "=", "line", ".", "strip", "(", ")", "parsed_line", "=", "Line", "(", "stripped_line", ")", "if", "parsed_line", ".", "valid", ":", "self", ...
33.5
17.0625
def update_settings(self): """After changing the settings, we need to recreate the whole image.""" self.display() self.display_markers() if self.parent.notes.annot is not None: self.parent.notes.display_notes()
[ "def", "update_settings", "(", "self", ")", ":", "self", ".", "display", "(", ")", "self", ".", "display_markers", "(", ")", "if", "self", ".", "parent", ".", "notes", ".", "annot", "is", "not", "None", ":", "self", ".", "parent", ".", "notes", ".", ...
41.5
9
def imeicsum(text): ''' Calculate the imei check byte. ''' digs = [] for i in range(14): v = int(text[i]) if i % 2: v *= 2 [digs.append(int(x)) for x in str(v)] chek = 0 valu = sum(digs) remd = valu % 10 if remd != 0: chek = 10 - remd return str(chek)
[ "def", "imeicsum", "(", "text", ")", ":", "digs", "=", "[", "]", "for", "i", "in", "range", "(", "14", ")", ":", "v", "=", "int", "(", "text", "[", "i", "]", ")", "if", "i", "%", "2", ":", "v", "*=", "2", "[", "digs", ".", "append", "(", ...
16
24.5
def disconnect(self, driver): """Disconnect from the console.""" self.log("TELNETCONSOLE disconnect") try: while self.device.mode != 'global': self.device.send('exit', timeout=10) except OSError: self.log("TELNETCONSOLE already disconnected") except pexpect.TIMEOUT: self.log("TELNETCONSOLE unable to get the root prompt") try: self.device.ctrl.send(chr(4)) except OSError: self.log("TELNETCONSOLE already disconnected")
[ "def", "disconnect", "(", "self", ",", "driver", ")", ":", "self", ".", "log", "(", "\"TELNETCONSOLE disconnect\"", ")", "try", ":", "while", "self", ".", "device", ".", "mode", "!=", "'global'", ":", "self", ".", "device", ".", "send", "(", "'exit'", ...
35.933333
15.8
def newDocProp(self, name, value): """Create a new property carried by a document. """ ret = libxml2mod.xmlNewDocProp(self._o, name, value) if ret is None:raise treeError('xmlNewDocProp() failed') __tmp = xmlAttr(_obj=ret) return __tmp
[ "def", "newDocProp", "(", "self", ",", "name", ",", "value", ")", ":", "ret", "=", "libxml2mod", ".", "xmlNewDocProp", "(", "self", ".", "_o", ",", "name", ",", "value", ")", "if", "ret", "is", "None", ":", "raise", "treeError", "(", "'xmlNewDocProp() ...
45
12.833333
def statistical_distances(samples1, samples2, earth_mover_dist=True, energy_dist=True): """Compute measures of the statistical distance between samples. Parameters ---------- samples1: 1d array samples2: 1d array earth_mover_dist: bool, optional Whether or not to compute the Earth mover's distance between the samples. energy_dist: bool, optional Whether or not to compute the energy distance between the samples. Returns ------- 1d array """ out = [] temp = scipy.stats.ks_2samp(samples1, samples2) out.append(temp.pvalue) out.append(temp.statistic) if earth_mover_dist: out.append(scipy.stats.wasserstein_distance(samples1, samples2)) if energy_dist: out.append(scipy.stats.energy_distance(samples1, samples2)) return np.asarray(out)
[ "def", "statistical_distances", "(", "samples1", ",", "samples2", ",", "earth_mover_dist", "=", "True", ",", "energy_dist", "=", "True", ")", ":", "out", "=", "[", "]", "temp", "=", "scipy", ".", "stats", ".", "ks_2samp", "(", "samples1", ",", "samples2", ...
31.481481
20.777778
def _embedding_dim(vocab_size): """Calculate a reasonable embedding size for a vocabulary. Rule of thumb is 6 * 4th root of vocab_size. Args: vocab_size: Size of the input vocabulary. Returns: The embedding size to use. Raises: ValueError: if `vocab_size` is invalid. """ if not vocab_size or (vocab_size <= 0): raise ValueError("Invalid vocab_size %g." % vocab_size) return int(round(6.0 * math.sqrt(math.sqrt(vocab_size))))
[ "def", "_embedding_dim", "(", "vocab_size", ")", ":", "if", "not", "vocab_size", "or", "(", "vocab_size", "<=", "0", ")", ":", "raise", "ValueError", "(", "\"Invalid vocab_size %g.\"", "%", "vocab_size", ")", "return", "int", "(", "round", "(", "6.0", "*", ...
29.666667
16.4
def getMember(self, address, id, headers=None, query_params=None, content_type="application/json"): """ Get network member settings It is method for GET /network/{id}/member/{address} """ uri = self.client.base_url + "/network/"+id+"/member/"+address return self.client.get(uri, None, headers, query_params, content_type)
[ "def", "getMember", "(", "self", ",", "address", ",", "id", ",", "headers", "=", "None", ",", "query_params", "=", "None", ",", "content_type", "=", "\"application/json\"", ")", ":", "uri", "=", "self", ".", "client", ".", "base_url", "+", "\"/network/\"",...
51.857143
21.571429
def _choose_read_fs(authority, cache, read_path, version_check, hasher): ''' Context manager returning the appropriate up-to-date readable filesystem Use ``cache`` if it is a valid filessystem and has a file at ``read_path``, otherwise use ``authority``. If the file at ``read_path`` is out of date, update the file in ``cache`` before returning it. ''' if cache and cache.fs.isfile(read_path): if version_check(hasher(cache.fs.open(read_path, 'rb'))): yield cache.fs elif authority.fs.isfile(read_path): fs.utils.copyfile( authority.fs, read_path, cache.fs, read_path) yield cache.fs else: _makedirs(authority.fs, fs.path.dirname(read_path)) _makedirs(cache.fs, fs.path.dirname(read_path)) yield cache.fs else: if not authority.fs.isfile(read_path): _makedirs(authority.fs, fs.path.dirname(read_path)) yield authority.fs
[ "def", "_choose_read_fs", "(", "authority", ",", "cache", ",", "read_path", ",", "version_check", ",", "hasher", ")", ":", "if", "cache", "and", "cache", ".", "fs", ".", "isfile", "(", "read_path", ")", ":", "if", "version_check", "(", "hasher", "(", "ca...
31.875
23.5625
def get_segment(sla,N,last=True,mid=None,first=None,remove_edges=True,truncate_if_continents=True): ''' Intelligent segmentation of data. :keyword remove_edges: discard data at track edges. :keyword truncate_if_continents: Force truncating data if a continent is found within a segment of data. :keyword last: Get segments of data sticked to the last point in track :keyword first: Get segments of data sticked to the first point in track :keyword mid: Get segments of data sticked to the middle point in track ''' #Set defaults if first is not None : last=None mid=None elif mid is not None : last=None first=None dumsla=sla.copy() nx=sla.shape[1] nt=sla.shape[0] #Save input mask dumsla.data[dumsla.mask]=dumsla.fill_value if len(dumsla.mask.shape) > 0: mask=np.ma.array(dumsla.mask.copy(),mask=np.zeros(sla.shape,dtype=bool)) else: mask=np.array([dumsla.mask]*sla.size).reshape(sla.shape) mask=np.ma.array(mask,mask=np.zeros(sla.shape,dtype=bool)) dumsla.mask=mask#np.array([dumsla.mask]*sla.size).reshape(sla.shape) #~ dumsla.mask[:]=False #Get edges if remove_edges : xid=np.ma.array(np.repeat(np.arange(nx),nt).reshape(nx,nt).transpose(),mask=mask.data) else : xid=np.ma.array(np.repeat(np.arange(nx),nt).reshape(nx,nt).transpose(),mask=np.zeros(sla.shape,dtype=bool)) left=xid.min(axis=1) right=xid.max(axis=1) #Shift towards end if last : st=(right-N).astype(int) +1 en=(right).astype(int) + 1 elif mid : midpt=nx/2 rlag=right-midpt llag=midpt-left odd = np.int(N)&1 and True or False if not odd : nr=nl=np.int(N)/2 else : nr=np.int(N)/2 + 1 nl=np.int(N)/2 # for i,jk in enumerate(zip(*(llag,rlag))): # j,k=jk # st=0 st=np.repeat(midpt-nl,nt) en=np.repeat(midpt+nr,nt) elif first : st=(left).astype(int) en=(left+N).astype(int) if not remove_edges : st[st < 0] = 0 en[en > nx] = nx for i in np.arange(nt) : dumsla.mask[i,:st[i]]=True dumsla.mask[i,en[i]:]=True mask.mask[i,:st[i]]=True mask.mask[i,en[i]:]=True #Update nt cycempty=dumsla.mask.sum(axis=1) == N ind=np.arange(nt)[~cycempty] nt=(~cycempty).sum() #Reform stuff dumsla=dumsla.compressed().reshape(nt,N) mask=mask.compressed().reshape(nt,N) if truncate_if_continents : empty=mask.sum(axis=0) == nt if empty.sum() > 0 : dumsla=dumsla[:,~empty] mask=mask[:,~empty] print '[WARNING] Points over land mass - removed {} pts'.format(empty.sum()) return np.ma.array(dumsla,mask=mask), ind
[ "def", "get_segment", "(", "sla", ",", "N", ",", "last", "=", "True", ",", "mid", "=", "None", ",", "first", "=", "None", ",", "remove_edges", "=", "True", ",", "truncate_if_continents", "=", "True", ")", ":", "#Set defaults\r", "if", "first", "is", "n...
31.791209
21.747253
def _to_diagonally_dominant_weighted(mat): """Make matrix weighted diagonally dominant using the Laplacian.""" mat += np.diag(np.sum(np.abs(mat), axis=1) + 0.01) return mat
[ "def", "_to_diagonally_dominant_weighted", "(", "mat", ")", ":", "mat", "+=", "np", ".", "diag", "(", "np", ".", "sum", "(", "np", ".", "abs", "(", "mat", ")", ",", "axis", "=", "1", ")", "+", "0.01", ")", "return", "mat" ]
45.25
10.5
def fcs(args): """ %prog fcs fcsfile Process the results from Genbank contaminant screen. An example of the file looks like: contig name, length, span(s), apparent source contig0746 11760 1..141 vector contig0751 14226 13476..14226 vector contig0800 124133 30512..30559 primer/adapter """ p = OptionParser(fcs.__doc__) p.add_option("--cutoff", default=200, help="Skip small components less than [default: %default]") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fcsfile, = args cutoff = opts.cutoff fp = open(fcsfile) for row in fp: if row[0] == "#": continue sep = "\t" if "\t" in row else None atoms = row.rstrip().split(sep, 3) contig, length = atoms[:2] length = int(length) label = atoms[-1] label = label.replace(" ", "_") if len(atoms) == 3: ranges = "{0}..{1}".format(1, length) else: assert len(atoms) == 4 ranges = atoms[2] for ab in ranges.split(","): a, b = ab.split("..") a, b = int(a), int(b) assert a <= b ahang = a - 1 bhang = length - b if ahang < cutoff: a = 1 if bhang < cutoff: b = length print("\t".join(str(x) for x in (contig, a - 1, b, label)))
[ "def", "fcs", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "fcs", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--cutoff\"", ",", "default", "=", "200", ",", "help", "=", "\"Skip small components less than [default: %default]\"", ")", "opts", ...
28.156863
16.823529
def remove_get_department_uids(portal): """Removes getDepartmentUIDs indexes and metadata """ logger.info("Removing filtering by department ...") del_index(portal, "bika_catalog", "getDepartmentUIDs") del_index(portal, "bika_setup_catalog", "getDepartmentUID") del_index(portal, CATALOG_ANALYSIS_REQUEST_LISTING, "getDepartmentUIDs") del_index(portal, CATALOG_WORKSHEET_LISTING, "getDepartmentUIDs") del_index(portal, CATALOG_ANALYSIS_LISTING, "getDepartmentUID") del_metadata(portal, CATALOG_ANALYSIS_REQUEST_LISTING, "getDepartmentUIDs") del_metadata(portal, CATALOG_WORKSHEET_LISTING, "getDepartmentUIDs") del_metadata(portal, CATALOG_ANALYSIS_LISTING, "getDepartmentUID")
[ "def", "remove_get_department_uids", "(", "portal", ")", ":", "logger", ".", "info", "(", "\"Removing filtering by department ...\"", ")", "del_index", "(", "portal", ",", "\"bika_catalog\"", ",", "\"getDepartmentUIDs\"", ")", "del_index", "(", "portal", ",", "\"bika_...
54.461538
22.307692
def has_no_flat_neurites(neuron, tol=0.1, method='ratio'): '''Check that a neuron has no flat neurites Arguments: neuron(Neuron): The neuron object to test tol(float): tolerance method(string): way of determining flatness, 'tolerance', 'ratio' \ as described in :meth:`neurom.check.morphtree.get_flat_neurites` Returns: CheckResult with result ''' return CheckResult(len(get_flat_neurites(neuron, tol, method)) == 0)
[ "def", "has_no_flat_neurites", "(", "neuron", ",", "tol", "=", "0.1", ",", "method", "=", "'ratio'", ")", ":", "return", "CheckResult", "(", "len", "(", "get_flat_neurites", "(", "neuron", ",", "tol", ",", "method", ")", ")", "==", "0", ")" ]
35.846154
24.615385
def move_to_step(self, step): """ Use in cases when you need to move in given step depending on input """ if step not in self._scenario_steps.keys(): raise UndefinedState("step {} not defined in scenario".format(step)) try: session_id = session.sessionId self.session_machines.set_state(session_id, step) except UninitializedStateMachine as e: logger.error(e) return statement(INTERNAL_ERROR_MSG)
[ "def", "move_to_step", "(", "self", ",", "step", ")", ":", "if", "step", "not", "in", "self", ".", "_scenario_steps", ".", "keys", "(", ")", ":", "raise", "UndefinedState", "(", "\"step {} not defined in scenario\"", ".", "format", "(", "step", ")", ")", "...
41.083333
14.583333
def getGeometry(self,ra=None,dec=None): """Return an array of rectangles that represent the 'ra,dec' corners of the FOV""" import math,ephem ccds=[] if ra is None: ra=self.ra if dec is None: dec=self.dec self.ra=ephem.hours(ra) self.dec=ephem.degrees(dec) for geo in self.geometry[self.camera]: ycen=math.radians(geo["dec"])+dec xcen=math.radians(geo["ra"])/math.cos(ycen)+ra dy=math.radians(geo["ddec"]) dx=math.radians(geo["dra"]/math.cos(ycen)) ccds.append([xcen-dx/2.0,ycen-dy/2.0,xcen+dx/2.0,ycen+dy/2.0]) return ccds
[ "def", "getGeometry", "(", "self", ",", "ra", "=", "None", ",", "dec", "=", "None", ")", ":", "import", "math", ",", "ephem", "ccds", "=", "[", "]", "if", "ra", "is", "None", ":", "ra", "=", "self", ".", "ra", "if", "dec", "is", "None", ":", ...
33.1
17.1
def set_or_reset_runtime_param(self, key, value): """Maintains the context of the runtime settings for invoking a command. This should be called by a click.option callback, and only called once for each setting for each command invocation. If the setting exists, it follows that the runtime settings are stale, so the entire runtime settings are reset. """ if self._runtime.has_option('general', key): self._runtime = self._new_parser() if value is None: return settings._runtime.set('general', key.replace('tower_', ''), six.text_type(value))
[ "def", "set_or_reset_runtime_param", "(", "self", ",", "key", ",", "value", ")", ":", "if", "self", ".", "_runtime", ".", "has_option", "(", "'general'", ",", "key", ")", ":", "self", ".", "_runtime", "=", "self", ".", "_new_parser", "(", ")", "if", "v...
39.058824
20.117647
def get_story(self, id): """Fetches a single story by id. get /v1/public/stories/{storyId} :param id: ID of Story :type params: int :returns: StoryDataWrapper >>> m = Marvel(public_key, private_key) >>> response = m.get_story(29) >>> print response.data.result.title Caught in the heart of a nuclear explosion, mild-mannered scientist Bruce Banner finds himself... """ url = "%s/%s" % (Story.resource_url(), id) response = json.loads(self._call(url).text) return StoryDataWrapper(self, response)
[ "def", "get_story", "(", "self", ",", "id", ")", ":", "url", "=", "\"%s/%s\"", "%", "(", "Story", ".", "resource_url", "(", ")", ",", "id", ")", "response", "=", "json", ".", "loads", "(", "self", ".", "_call", "(", "url", ")", ".", "text", ")", ...
31.736842
17.684211
def create_app(self, args): """创建应用 在指定区域创建一个新应用,所属应用为当前请求方。 Args: - args: 请求参数(json),参考 http://kirk-docs.qiniu.com/apidocs/ Returns: - result 成功返回所创建的应用信息,若失败则返回None - ResponseInfo 请求的Response信息 """ url = '{0}/v3/apps'.format(self.host) return http._post_with_qiniu_mac(url, args, self.auth)
[ "def", "create_app", "(", "self", ",", "args", ")", ":", "url", "=", "'{0}/v3/apps'", ".", "format", "(", "self", ".", "host", ")", "return", "http", ".", "_post_with_qiniu_mac", "(", "url", ",", "args", ",", "self", ".", "auth", ")" ]
25.466667
20
def change_active_pointer_grab(self, event_mask, cursor, time, onerror = None): """Change the dynamic parameters of a pointer grab. See XChangeActivePointerGrab(3X11).""" request.ChangeActivePointerGrab(display = self.display, onerror = onerror, cursor = cursor, time = time, event_mask = event_mask)
[ "def", "change_active_pointer_grab", "(", "self", ",", "event_mask", ",", "cursor", ",", "time", ",", "onerror", "=", "None", ")", ":", "request", ".", "ChangeActivePointerGrab", "(", "display", "=", "self", ".", "display", ",", "onerror", "=", "onerror", ",...
59.625
16.5
def fallbacks(enable=True): """ Temporarily switch all language fallbacks on or off. Example: with fallbacks(False): lang_has_slug = bool(self.slug) May be used to enable fallbacks just when they're needed saving on some processing or check if there is a value for the current language (not knowing the language) """ current_enable_fallbacks = settings.ENABLE_FALLBACKS settings.ENABLE_FALLBACKS = enable try: yield finally: settings.ENABLE_FALLBACKS = current_enable_fallbacks
[ "def", "fallbacks", "(", "enable", "=", "True", ")", ":", "current_enable_fallbacks", "=", "settings", ".", "ENABLE_FALLBACKS", "settings", ".", "ENABLE_FALLBACKS", "=", "enable", "try", ":", "yield", "finally", ":", "settings", ".", "ENABLE_FALLBACKS", "=", "cu...
28.526316
20.947368
def name(self): """ Returns name of the node so if its path then only last part is returned. """ org = safe_unicode(self.path.rstrip('/').split('/')[-1]) return u'%s @ %s' % (org, self.changeset.short_id)
[ "def", "name", "(", "self", ")", ":", "org", "=", "safe_unicode", "(", "self", ".", "path", ".", "rstrip", "(", "'/'", ")", ".", "split", "(", "'/'", ")", "[", "-", "1", "]", ")", "return", "u'%s @ %s'", "%", "(", "org", ",", "self", ".", "chan...
35.142857
10.571429
def model(self, autoGenerate=False): """ Returns the default Table class that is associated with this \ schema instance. :param autoGenerate | <bool> :return <subclass of Table> """ if self.__model is None and autoGenerate: self.__model = orb.system.generateModel(self) self.setModel(self.__model) return self.__model
[ "def", "model", "(", "self", ",", "autoGenerate", "=", "False", ")", ":", "if", "self", ".", "__model", "is", "None", "and", "autoGenerate", ":", "self", ".", "__model", "=", "orb", ".", "system", ".", "generateModel", "(", "self", ")", "self", ".", ...
32.307692
12
def daemon_run(no_error, restart, record_path, keep_json, check_duplicate, use_polling, log_level): """ Run RASH index daemon. This daemon watches the directory ``~/.config/rash/data/record`` and translate the JSON files dumped by ``record`` command into sqlite3 DB at ``~/.config/rash/data/db.sqlite``. ``rash init`` will start RASH automatically by default. But there are alternative ways to start daemon. If you want to organize background process in one place such as supervisord_, it is good to add `--restart` option to force stop other daemon process if you accidentally started it in other place. Here is an example of supervisord_ setup:: [program:rash-daemon] command=rash daemon --restart .. _supervisord: http://supervisord.org/ Alternatively, you can call ``rash index`` in cron job to avoid using daemon. It is useful if you want to use RASH on NFS, as it looks like watchdog does not work on NFS.:: # Refresh RASH DB every 10 minutes */10 * * * * rash index """ # Probably it makes sense to use this daemon to provide search # API, so that this daemon is going to be the only process that # is connected to the DB? from .config import ConfigStore from .indexer import Indexer from .log import setup_daemon_log_file, LogForTheFuture from .watchrecord import watch_record, install_sigterm_handler install_sigterm_handler() cfstore = ConfigStore() if log_level: cfstore.daemon_log_level = log_level flogger = LogForTheFuture() # SOMEDAY: make PID checking/writing atomic if possible flogger.debug('Checking old PID file %r.', cfstore.daemon_pid_path) if os.path.exists(cfstore.daemon_pid_path): flogger.debug('Old PID file exists. Reading from it.') with open(cfstore.daemon_pid_path, 'rt') as f: pid = int(f.read().strip()) flogger.debug('Checking if old process with PID=%d is alive', pid) try: os.kill(pid, 0) # check if `pid` is alive except OSError: flogger.info( 'Process with PID=%d is already dead. ' 'So just go on and use this daemon.', pid) else: if restart: flogger.info('Stopping old daemon with PID=%d.', pid) stop_running_daemon(cfstore, pid) else: message = ('There is already a running daemon (PID={0})!' .format(pid)) if no_error: flogger.debug(message) # FIXME: Setup log handler and flogger.dump(). # Note that using the default log file is not safe # since it has already been used. return else: raise RuntimeError(message) else: flogger.debug('Daemon PID file %r does not exists. ' 'So just go on and use this daemon.', cfstore.daemon_pid_path) with open(cfstore.daemon_pid_path, 'w') as f: f.write(str(os.getpid())) try: setup_daemon_log_file(cfstore) flogger.dump() indexer = Indexer(cfstore, check_duplicate, keep_json, record_path) indexer.index_all() watch_record(indexer, use_polling) finally: os.remove(cfstore.daemon_pid_path)
[ "def", "daemon_run", "(", "no_error", ",", "restart", ",", "record_path", ",", "keep_json", ",", "check_duplicate", ",", "use_polling", ",", "log_level", ")", ":", "# Probably it makes sense to use this daemon to provide search", "# API, so that this daemon is going to be the o...
38.181818
19.568182
def AddTrainingOperators(model, softmax, label): """Adds training operators to the model.""" xent = model.LabelCrossEntropy([softmax, label], 'xent') # compute the expected loss loss = model.AveragedLoss(xent, "loss") # track the accuracy of the model AddAccuracy(model, softmax, label) # use the average loss we just computed to add gradient operators to the # model model.AddGradientOperators([loss]) # do a simple stochastic gradient descent ITER = brew.iter(model, "iter") # set the learning rate schedule LR = model.LearningRate( ITER, "LR", base_lr=-0.1, policy="step", stepsize=1, gamma=0.999) # ONE is a constant value that is used in the gradient update. We only need # to create it once, so it is explicitly placed in param_init_net. ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0) # Now, for each parameter, we do the gradient updates. for param in model.params: # Note how we get the gradient of each parameter - ModelHelper keeps # track of that. param_grad = model.param_to_grad[param] # The update is a simple weighted sum: param = param + param_grad * LR model.WeightedSum([param, ONE, param_grad, LR], param)
[ "def", "AddTrainingOperators", "(", "model", ",", "softmax", ",", "label", ")", ":", "xent", "=", "model", ".", "LabelCrossEntropy", "(", "[", "softmax", ",", "label", "]", ",", "'xent'", ")", "# compute the expected loss", "loss", "=", "model", ".", "Averag...
49.84
16.92
def get(self, tag, default=None): """Get a metadata value. Each metadata value is referenced by a ``tag`` -- a short string such as ``'xlen'`` or ``'audit'``. In the sidecar file these tag names are prepended with ``'Xmp.pyctools.'``, which corresponds to a custom namespace in the XML file. :param str tag: The tag name. :returns: The metadata value associated with ``tag``. :rtype: :py:class:`str` """ full_tag = 'Xmp.pyctools.' + tag if full_tag in self.data: return self.data[full_tag] return default
[ "def", "get", "(", "self", ",", "tag", ",", "default", "=", "None", ")", ":", "full_tag", "=", "'Xmp.pyctools.'", "+", "tag", "if", "full_tag", "in", "self", ".", "data", ":", "return", "self", ".", "data", "[", "full_tag", "]", "return", "default" ]
31.526316
19.368421
def menu(self, venue_id, date): """Get the menu for the venue corresponding to venue_id, on date. :param venue_id: A string representing the id of a venue, e.g. "abc". :param date: A string representing the date of a venue's menu, e.g. "2015-09-20". >>> commons_menu = din.menu("593", "2015-09-20") """ query = "&date=" + date response = self._request(V2_ENDPOINTS['MENUS'] + venue_id + query) return response
[ "def", "menu", "(", "self", ",", "venue_id", ",", "date", ")", ":", "query", "=", "\"&date=\"", "+", "date", "response", "=", "self", ".", "_request", "(", "V2_ENDPOINTS", "[", "'MENUS'", "]", "+", "venue_id", "+", "query", ")", "return", "response" ]
32.666667
21.666667
def request_info_of_jids(self, peer_jids: Union[str, List[str]]): """ Requests basic information (username, display name, picture) of some peer JIDs. When the information arrives, the callback on_peer_info_received() will fire. :param peer_jids: The JID(s) for which to request the information. If you want to request information for more than one JID, supply a list of strings. Otherwise, supply a string """ return self._send_xmpp_element(roster.BatchPeerInfoRequest(peer_jids))
[ "def", "request_info_of_jids", "(", "self", ",", "peer_jids", ":", "Union", "[", "str", ",", "List", "[", "str", "]", "]", ")", ":", "return", "self", ".", "_send_xmpp_element", "(", "roster", ".", "BatchPeerInfoRequest", "(", "peer_jids", ")", ")" ]
60.777778
36.111111
def read_proximity(self, timeout_sec=1): """Read the sensor proximity and return it as an unsigned 16-bit value. The larger the value the closer an object is to the sensor. """ # Ask for a proximity measurement and wait for the response. self._device.write8(VCNL40xx_COMMAND, VCNL40xx_MEASUREPROXIMITY) self._wait_response(VCNL40xx_PROXIMITYREADY, timeout_sec) # Return the proximity response. return self._device.readU16BE(VCNL40xx_PROXIMITYDATA)
[ "def", "read_proximity", "(", "self", ",", "timeout_sec", "=", "1", ")", ":", "# Ask for a proximity measurement and wait for the response.", "self", ".", "_device", ".", "write8", "(", "VCNL40xx_COMMAND", ",", "VCNL40xx_MEASUREPROXIMITY", ")", "self", ".", "_wait_respo...
55.888889
14.777778
async def statistics(self, tube_name=None): """ Returns queue statistics (coroutine) :param tube_name: If specified, statistics by a specific tube is returned, else statistics about all tubes is returned """ args = None if tube_name is not None: args = (tube_name,) res = await self._conn.call('{}.statistics'.format(self._namespace), args) if self._conn.version < (1, 7): # pragma: nocover return res.body[0][0] return res.body[0]
[ "async", "def", "statistics", "(", "self", ",", "tube_name", "=", "None", ")", ":", "args", "=", "None", "if", "tube_name", "is", "not", "None", ":", "args", "=", "(", "tube_name", ",", ")", "res", "=", "await", "self", ".", "_conn", ".", "call", "...
34.6875
16.9375
async def main(): redis = await create_pool(RedisSettings()) job = await redis.enqueue_job('the_task') # get the job's id print(job.job_id) """ > 68362958a244465b9be909db4b7b5ab4 (or whatever) """ # get information about the job, will include results if the job has finished, but # doesn't await the job's result debug(await job.info()) """ > docs/examples/job_results.py:23 main JobDef( function='the_task', args=(), kwargs={}, job_try=None, enqueue_time=datetime.datetime(2019, 4, 23, 13, 58, 56, 781000), score=1556027936781 ) (JobDef) """ # get the Job's status print(await job.status()) """ > JobStatus.queued """ # poll redis for the job result, if the job raised an exception, # it will be raised here # (You'll need the worker running at the same time to get a result here) print(await job.result(timeout=5)) """ > 42 """
[ "async", "def", "main", "(", ")", ":", "redis", "=", "await", "create_pool", "(", "RedisSettings", "(", ")", ")", "job", "=", "await", "redis", ".", "enqueue_job", "(", "'the_task'", ")", "# get the job's id", "print", "(", "job", ".", "job_id", ")", "# ...
24.538462
17.410256
def guess_content_type(self, pathname): """Guess the content type for the given path. :param path: The path of file for which to guess the content type. :return: Returns the content type or ``None`` if the content type could not be determined. Usage: >>> db = ContentTypesDatabase() >>> db.add_config_file('content-types.yaml') >>> g = db.guess_content_type >>> assert g("__init__.py") == "python" >>> assert g("Makefile") == "Makefile" >>> assert g("Makefile.gmake") == "Makefile" >>> assert g("Makefile.py") == "python" >>> assert g("foobar.rb") == "ruby" >>> assert g("wscript") == "python" >>> assert g("foo.coffee") == "coffee-script" >>> assert g("Rakefile") == "ruby" >>> assert g("foobar.xml") == "xml" >>> assert g("foobar.html") == "html" >>> assert g("foo7a738fg") == None >>> assert g("foo.rst") == "structured-text" >>> assert g("foo.md") == "structured-text" >>> assert g("foo.markdown") == "structured-text" """ file_basename = os.path.basename(pathname) content_type = None # Try to determine from the path. if not content_type and self._filename_map.has_key(file_basename): content_type = self._filename_map[file_basename] #logger.debug("Content type of '%s' is '%s' (determined from full "\ # "path).", pathname, content_type) # Try to determine from the suffix. if not content_type and '.' in file_basename: extension = "." + file_basename.split(".")[-1] extension = extension_case_transform_func(extension) try: content_type = self._extension_map[extension] #logger.debug("Content type of '%s' is '%s' (determined from "\ # "suffix '%s').", pathname, content_type, extension) except KeyError: pass # Try to determine from the registered set of regular expression patterns. if not content_type: for regexp, _content_type in self._regexp_map.iteritems(): if regexp.search(file_basename): content_type = _content_type #logger.debug( # "Content type of '%s' is '%s' (matches regexp '%s')", # pathname, content_type, regexp.pattern) break # Try to determine from the file contents. if os.path.exists(pathname): with open(pathname, 'rb') as f: content = f.read() if content.startswith("<?xml"): # cheap XML sniffing content_type = "XML" # TODO: Try to determine from mime-type. return content_type
[ "def", "guess_content_type", "(", "self", ",", "pathname", ")", ":", "file_basename", "=", "os", ".", "path", ".", "basename", "(", "pathname", ")", "content_type", "=", "None", "# Try to determine from the path.", "if", "not", "content_type", "and", "self", "."...
42.632353
18.161765
def ensure_local_image( local_image: str, parent_image: str = SC_PARENT_IMAGE, java_image: str = SC_JAVA_IMAGE, starcraft_base_dir: str = SCBW_BASE_DIR, starcraft_binary_link: str = SC_BINARY_LINK, ) -> None: """ Check if `local_image` is present locally. If it is not, pull parent images and build. This includes pulling starcraft binary. :raises docker.errors.ImageNotFound :raises docker.errors.APIError """ logger.info(f"checking if there is local image {local_image}") docker_images = docker_client.images.list(local_image) if len(docker_images) and docker_images[0].short_id is not None: logger.info(f"image {local_image} found locally.") return logger.info("image not found locally, creating...") pkg_docker_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), "local_docker") base_dir = os.path.join(starcraft_base_dir, "docker") logger.info(f"copying files from {pkg_docker_dir} to {base_dir}.") distutils.dir_util.copy_tree(pkg_docker_dir, base_dir) starcraft_zip_file = f"{base_dir}/starcraft.zip" if not os.path.exists(starcraft_zip_file): logger.info(f"downloading starcraft.zip to {starcraft_zip_file}") download_file(starcraft_binary_link, starcraft_zip_file) logger.info(f"pulling image {parent_image}, this may take a while...") pulled_image = docker_client.images.pull(parent_image) pulled_image.tag(java_image) logger.info(f"building local image {local_image}, this may take a while...") docker_client.images.build(path=base_dir, dockerfile="game.dockerfile", tag=local_image) logger.info(f"successfully built image {local_image}")
[ "def", "ensure_local_image", "(", "local_image", ":", "str", ",", "parent_image", ":", "str", "=", "SC_PARENT_IMAGE", ",", "java_image", ":", "str", "=", "SC_JAVA_IMAGE", ",", "starcraft_base_dir", ":", "str", "=", "SCBW_BASE_DIR", ",", "starcraft_binary_link", ":...
44.421053
22.105263
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: ExecutionStepContextContext for this ExecutionStepContextInstance :rtype: twilio.rest.studio.v1.flow.execution.execution_step.execution_step_context.ExecutionStepContextContext """ if self._context is None: self._context = ExecutionStepContextContext( self._version, flow_sid=self._solution['flow_sid'], execution_sid=self._solution['execution_sid'], step_sid=self._solution['step_sid'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "ExecutionStepContextContext", "(", "self", ".", "_version", ",", "flow_sid", "=", "self", ".", "_solution", "[", "'flow_sid'", "]", "...
45.625
24.125
def append_cell_value(self, column_family_id, column, value): """Appends a value to an existing cell. .. note:: This method adds a read-modify rule protobuf to the accumulated read-modify rules on this row, but does not make an API request. To actually send an API request (with the rules) to the Google Cloud Bigtable API, call :meth:`commit`. For example: .. literalinclude:: snippets_table.py :start-after: [START bigtable_row_append_cell_value] :end-before: [END bigtable_row_append_cell_value] :type column_family_id: str :param column_family_id: The column family that contains the column. Must be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. :type column: bytes :param column: The column within the column family where the cell is located. :type value: bytes :param value: The value to append to the existing value in the cell. If the targeted cell is unset, it will be treated as containing the empty string. """ column = _to_bytes(column) value = _to_bytes(value) rule_pb = data_v2_pb2.ReadModifyWriteRule( family_name=column_family_id, column_qualifier=column, append_value=value ) self._rule_pb_list.append(rule_pb)
[ "def", "append_cell_value", "(", "self", ",", "column_family_id", ",", "column", ",", "value", ")", ":", "column", "=", "_to_bytes", "(", "column", ")", "value", "=", "_to_bytes", "(", "value", ")", "rule_pb", "=", "data_v2_pb2", ".", "ReadModifyWriteRule", ...
40.083333
23.25
def timed_call(self, ms, callback, *args, **kwargs): """ Invoke a callable on the main event loop thread at a specified time in the future. Parameters ---------- ms : int The time to delay, in milliseconds, before executing the callable. callback : callable The callable object to execute at some point in the future. *args, **kwargs Any additional positional and keyword arguments to pass to the callback. """ return self.loop.timed_call(ms, callback, *args, **kwargs)
[ "def", "timed_call", "(", "self", ",", "ms", ",", "callback", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "loop", ".", "timed_call", "(", "ms", ",", "callback", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
30.894737
22.210526
def update_x(self, x, indices=None): """ Update partial or entire x. Args: x (numpy.ndarray or list): to-be-updated x indices (numpy.ndarray or list or optional): to-be-updated qubit indices Returns: Pauli: self Raises: QiskitError: when updating whole x, the number of qubits must be the same. """ x = _make_np_bool(x) if indices is None: if len(self._x) != len(x): raise QiskitError("During updating whole x, you can not change " "the number of qubits.") self._x = x else: if not isinstance(indices, list) and not isinstance(indices, np.ndarray): indices = [indices] for p, idx in enumerate(indices): self._x[idx] = x[p] return self
[ "def", "update_x", "(", "self", ",", "x", ",", "indices", "=", "None", ")", ":", "x", "=", "_make_np_bool", "(", "x", ")", "if", "indices", "is", "None", ":", "if", "len", "(", "self", ".", "_x", ")", "!=", "len", "(", "x", ")", ":", "raise", ...
32.222222
21.333333
def CQO(cpu): """ RDX:RAX = sign-extend of RAX. """ res = Operators.SEXTEND(cpu.RAX, 64, 128) cpu.RAX = Operators.EXTRACT(res, 0, 64) cpu.RDX = Operators.EXTRACT(res, 64, 64)
[ "def", "CQO", "(", "cpu", ")", ":", "res", "=", "Operators", ".", "SEXTEND", "(", "cpu", ".", "RAX", ",", "64", ",", "128", ")", "cpu", ".", "RAX", "=", "Operators", ".", "EXTRACT", "(", "res", ",", "0", ",", "64", ")", "cpu", ".", "RDX", "="...
30.857143
7.714286
def listen(self): """Starts the listen loop. If threading is enabled, then the loop will be started in its own thread. Args: None Returns: None """ self.listening = True if self.threading: from threading import Thread self.listen_thread = Thread(target=self.listen_loop) self.listen_thread.daemon = True self.listen_thread.start() self.scheduler_thread = Thread(target=self.scheduler) self.scheduler_thread.daemon = True self.scheduler_thread.start() else: self.listen_loop()
[ "def", "listen", "(", "self", ")", ":", "self", ".", "listening", "=", "True", "if", "self", ".", "threading", ":", "from", "threading", "import", "Thread", "self", ".", "listen_thread", "=", "Thread", "(", "target", "=", "self", ".", "listen_loop", ")",...
25.48
19.76
def _separate_objects_by_boxes(self, objects: Set[Object]) -> Dict[Box, List[Object]]: """ Given a set of objects, separate them by the boxes they belong to and return a dict. """ objects_per_box: Dict[Box, List[Object]] = defaultdict(list) for box in self.boxes: for object_ in objects: if object_ in box.objects: objects_per_box[box].append(object_) return objects_per_box
[ "def", "_separate_objects_by_boxes", "(", "self", ",", "objects", ":", "Set", "[", "Object", "]", ")", "->", "Dict", "[", "Box", ",", "List", "[", "Object", "]", "]", ":", "objects_per_box", ":", "Dict", "[", "Box", ",", "List", "[", "Object", "]", "...
46.1
16.9
def init_logging(logfile=None, loglevel=logging.INFO, configfile=None): """ Configures the logging using either basic filename + loglevel or passed config file path. This is performed separately from L{init_config()} in order to support the case where logging should happen independent of (usu. *after*) other aspects of the configuration initialization. For example, if logging may need to be initialized within a daemon context. @param logfile: An explicitly specified logfile destination. If this is specified in addition to default logging, a warning will be issued. @type logfile: C{str} @param loglevel: Which level to use when logging to explicitly specified file or stdout. @type loglevel: C{int} @param configfile: The path to a configuration file. This takes precedence over any explicitly specified logfile/loglevel (but a warning will be logged if both are specified). If the file is not specified or does not exist annd no logfile was specified, then the default.cfg configuration file will be used to initialize logging. @type configfile: C{str} """ # If a config file was specified, we will use that in place of the # explicitly use_configfile = False if configfile and os.path.exists(configfile): testcfg = ConfigParser() read = testcfg.read(configfile) use_configfile = (read and testcfg.has_section('loggers')) if use_configfile: logging.config.fileConfig(configfile) if logfile: msg = "Config file conflicts with explicitly specified logfile; config file takes precedence." logging.warn(msg) else: format = '%(asctime)s [%(threadName)s] %(name)s - %(levelname)s - %(message)s' if logfile: logging.basicConfig( filename=logfile, level=loglevel, format=format) else: logging.basicConfig(level=loglevel, format=format)
[ "def", "init_logging", "(", "logfile", "=", "None", ",", "loglevel", "=", "logging", ".", "INFO", ",", "configfile", "=", "None", ")", ":", "# If a config file was specified, we will use that in place of the", "# explicitly", "use_configfile", "=", "False", "if", "con...
47.642857
30.785714
def wrap(access_pyxb, read_only=False): """Work with the AccessPolicy in a SystemMetadata PyXB object. Args: access_pyxb : AccessPolicy PyXB object The AccessPolicy to modify. read_only: bool Do not update the wrapped AccessPolicy. When only a single AccessPolicy operation is needed, there's no need to use this context manager. Instead, use the generated context manager wrappers. """ w = AccessPolicyWrapper(access_pyxb) yield w if not read_only: w.get_normalized_pyxb()
[ "def", "wrap", "(", "access_pyxb", ",", "read_only", "=", "False", ")", ":", "w", "=", "AccessPolicyWrapper", "(", "access_pyxb", ")", "yield", "w", "if", "not", "read_only", ":", "w", ".", "get_normalized_pyxb", "(", ")" ]
29.333333
20.055556
def initPos(self, startpos=0.0): """ initialize the elements position [m] in lattice, the starting point is 0 [m] for the first element by default. :param startpos: starting point, 0 [m] by default """ spos = startpos for ele in self._lattice_eleobjlist: # print("{name:<10s}: {pos:<10.3f}".format(name=ele.name, pos=spos)) ele.setPosition(spos) spos += ele.getLength()
[ "def", "initPos", "(", "self", ",", "startpos", "=", "0.0", ")", ":", "spos", "=", "startpos", "for", "ele", "in", "self", ".", "_lattice_eleobjlist", ":", "# print(\"{name:<10s}: {pos:<10.3f}\".format(name=ele.name, pos=spos))", "ele", ".", "setPosition", "(", "spo...
41.090909
14.727273
def annotate_snv(adpter, variant): """Annotate an SNV/INDEL variant Args: adapter(loqusdb.plugin.adapter) variant(cyvcf2.Variant) """ variant_id = get_variant_id(variant) variant_obj = adapter.get_variant(variant={'_id':variant_id}) annotated_variant = annotated_variant(variant, variant_obj) return annotated_variant
[ "def", "annotate_snv", "(", "adpter", ",", "variant", ")", ":", "variant_id", "=", "get_variant_id", "(", "variant", ")", "variant_obj", "=", "adapter", ".", "get_variant", "(", "variant", "=", "{", "'_id'", ":", "variant_id", "}", ")", "annotated_variant", ...
30
14.916667
def check_experiment_id(args): '''check if the id is valid ''' update_experiment() experiment_config = Experiments() experiment_dict = experiment_config.get_all_experiments() if not experiment_dict: print_normal('There is no experiment running...') return None if not args.id: running_experiment_list = [] for key in experiment_dict.keys(): if isinstance(experiment_dict[key], dict): if experiment_dict[key].get('status') != 'STOPPED': running_experiment_list.append(key) elif isinstance(experiment_dict[key], list): # if the config file is old version, remove the configuration from file experiment_config.remove_experiment(key) if len(running_experiment_list) > 1: print_error('There are multiple experiments, please set the experiment id...') experiment_information = "" for key in running_experiment_list: experiment_information += (EXPERIMENT_DETAIL_FORMAT % (key, experiment_dict[key]['status'], \ experiment_dict[key]['port'], experiment_dict[key].get('platform'), experiment_dict[key]['startTime'], experiment_dict[key]['endTime'])) print(EXPERIMENT_INFORMATION_FORMAT % experiment_information) exit(1) elif not running_experiment_list: print_error('There is no experiment running!') return None else: return running_experiment_list[0] if experiment_dict.get(args.id): return args.id else: print_error('Id not correct!') return None
[ "def", "check_experiment_id", "(", "args", ")", ":", "update_experiment", "(", ")", "experiment_config", "=", "Experiments", "(", ")", "experiment_dict", "=", "experiment_config", ".", "get_all_experiments", "(", ")", "if", "not", "experiment_dict", ":", "print_norm...
45.583333
20.75
def data_filler_detailed_registration(self, number_of_rows, db): '''creates and fills the table with detailed regis. information ''' try: detailed_registration = db data_list = list() for i in range(0, number_of_rows): post_det_reg = { "id": rnd_id_generator(self), "email": self.faker.safe_email(), "password": self.faker.md5(raw_output=False), "lastname": self.faker.last_name(), "name": self.faker.first_name(), "adress": self.faker.address(), "phone": self.faker.phone_number() } detailed_registration.save(post_det_reg) logger.warning( 'detailed_registration Commits are successful after write job!', extra=d) except Exception as e: logger.error(e, extra=d)
[ "def", "data_filler_detailed_registration", "(", "self", ",", "number_of_rows", ",", "db", ")", ":", "try", ":", "detailed_registration", "=", "db", "data_list", "=", "list", "(", ")", "for", "i", "in", "range", "(", "0", ",", "number_of_rows", ")", ":", "...
38.12
19.16
def historicalData(self, reqId, date, open, high, low, close, volume, barCount, WAP, hasGaps): """historicalData(EWrapper self, TickerId reqId, IBString const & date, double open, double high, double low, double close, int volume, int barCount, double WAP, int hasGaps)""" return _swigibpy.EWrapper_historicalData(self, reqId, date, open, high, low, close, volume, barCount, WAP, hasGaps)
[ "def", "historicalData", "(", "self", ",", "reqId", ",", "date", ",", "open", ",", "high", ",", "low", ",", "close", ",", "volume", ",", "barCount", ",", "WAP", ",", "hasGaps", ")", ":", "return", "_swigibpy", ".", "EWrapper_historicalData", "(", "self",...
134
45.666667
def p_list_andnot(self, p): 'list : list ANDNOT list' p[0] = p[1].loc[set(p[1].index) - set(p[3].index)]
[ "def", "p_list_andnot", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", ".", "loc", "[", "set", "(", "p", "[", "1", "]", ".", "index", ")", "-", "set", "(", "p", "[", "3", "]", ".", "index", ")", "]" ]
39.333333
12.666667
def layer_norm_compute(x, epsilon, scale, bias, layer_collection=None): """Layer norm raw computation.""" # Save these before they get converted to tensors by the casting below params = (scale, bias) epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]] mean = tf.reduce_mean(x, axis=[-1], keepdims=True) variance = tf.reduce_mean( tf.squared_difference(x, mean), axis=[-1], keepdims=True) norm_x = (x - mean) * tf.rsqrt(variance + epsilon) output = norm_x * scale + bias return output
[ "def", "layer_norm_compute", "(", "x", ",", "epsilon", ",", "scale", ",", "bias", ",", "layer_collection", "=", "None", ")", ":", "# Save these before they get converted to tensors by the casting below", "params", "=", "(", "scale", ",", "bias", ")", "epsilon", ",",...
32.375
25.3125
def _reset(self, **kwargs): """ Reset after repopulating from API (or when initializing). """ # set object attributes from params for key in kwargs: setattr(self, key, kwargs[key]) # set defaults (if need be) where the default is not None for attr in self.ATTRIBUTES: if not hasattr(self, attr) and self.ATTRIBUTES[attr] is not None: setattr(self, attr, self.ATTRIBUTES[attr])
[ "def", "_reset", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# set object attributes from params", "for", "key", "in", "kwargs", ":", "setattr", "(", "self", ",", "key", ",", "kwargs", "[", "key", "]", ")", "# set defaults (if need be) where the default is ...
38.5
15.166667
def column_family_name(cls, include_keyspace=True): """ Returns the column family name if it's been defined otherwise, it creates it from the module and class name """ cf_name = '' if cls.__table_name__: cf_name = cls.__table_name__.lower() else: # get polymorphic base table names if model is polymorphic if cls._is_polymorphic and not cls._is_polymorphic_base: return cls._polymorphic_base.column_family_name(include_keyspace=include_keyspace) camelcase = re.compile(r'([a-z])([A-Z])') ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s) cf_name += ccase(cls.__name__) #trim to less than 48 characters or cassandra will complain cf_name = cf_name[-48:] cf_name = cf_name.lower() cf_name = re.sub(r'^_+', '', cf_name) if not include_keyspace: return cf_name return '{}.{}'.format(cls._get_keyspace(), cf_name)
[ "def", "column_family_name", "(", "cls", ",", "include_keyspace", "=", "True", ")", ":", "cf_name", "=", "''", "if", "cls", ".", "__table_name__", ":", "cf_name", "=", "cls", ".", "__table_name__", ".", "lower", "(", ")", "else", ":", "# get polymorphic base...
45.173913
20.304348
def WriteTo(self, values): """Writes values to a byte stream. Args: values (tuple[object, ...]): values to copy to the byte stream. Returns: bytes: byte stream. Raises: IOError: if byte stream cannot be written. OSError: if byte stream cannot be read. """ try: return self._struct.pack(*values) except (TypeError, struct.error) as exception: raise IOError('Unable to write stream with error: {0!s}'.format( exception))
[ "def", "WriteTo", "(", "self", ",", "values", ")", ":", "try", ":", "return", "self", ".", "_struct", ".", "pack", "(", "*", "values", ")", "except", "(", "TypeError", ",", "struct", ".", "error", ")", "as", "exception", ":", "raise", "IOError", "(",...
26.555556
20.611111
def receive_connection(): """Wait for and then return a connected socket.. Opens a TCP connection on port 8080, and waits for a single client. """ server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server.bind(("localhost", 8080)) server.listen(1) client = server.accept()[0] server.close() return client
[ "def", "receive_connection", "(", ")", ":", "server", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "server", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_REUSEADDR", ",...
31
19.230769
def hash(self): """Return an hash string computed on the PSF data.""" hash_list = [] for key, value in sorted(self.__dict__.items()): if not callable(value): if isinstance(value, np.ndarray): hash_list.append(value.tostring()) else: hash_list.append(str(value)) return hashlib.md5(repr(hash_list).encode()).hexdigest()
[ "def", "hash", "(", "self", ")", ":", "hash_list", "=", "[", "]", "for", "key", ",", "value", "in", "sorted", "(", "self", ".", "__dict__", ".", "items", "(", ")", ")", ":", "if", "not", "callable", "(", "value", ")", ":", "if", "isinstance", "("...
42.5
13.8
def get_rows(self, infer_nrows, skiprows=None): """ Read rows from self.f, skipping as specified. We distinguish buffer_rows (the first <= infer_nrows lines) from the rows returned to detect_colspecs because it's simpler to leave the other locations with skiprows logic alone than to modify them to deal with the fact we skipped some rows here as well. Parameters ---------- infer_nrows : int Number of rows to read from self.f, not counting rows that are skipped. skiprows: set, optional Indices of rows to skip. Returns ------- detect_rows : list of str A list containing the rows to read. """ if skiprows is None: skiprows = set() buffer_rows = [] detect_rows = [] for i, row in enumerate(self.f): if i not in skiprows: detect_rows.append(row) buffer_rows.append(row) if len(detect_rows) >= infer_nrows: break self.buffer = iter(buffer_rows) return detect_rows
[ "def", "get_rows", "(", "self", ",", "infer_nrows", ",", "skiprows", "=", "None", ")", ":", "if", "skiprows", "is", "None", ":", "skiprows", "=", "set", "(", ")", "buffer_rows", "=", "[", "]", "detect_rows", "=", "[", "]", "for", "i", ",", "row", "...
30.675676
15.216216
def get_homogenous_list_type(list_): """ Returns the best matching python type even if it is an ndarray assumes all items in the list are of the same type. does not check this """ # TODO Expand and make work correctly if HAVE_NUMPY and isinstance(list_, np.ndarray): item = list_ elif isinstance(list_, list) and len(list_) > 0: item = list_[0] else: item = None if item is not None: if is_float(item): type_ = float elif is_int(item): type_ = int elif is_bool(item): type_ = bool elif is_str(item): type_ = str else: type_ = get_type(item) else: type_ = None return type_
[ "def", "get_homogenous_list_type", "(", "list_", ")", ":", "# TODO Expand and make work correctly", "if", "HAVE_NUMPY", "and", "isinstance", "(", "list_", ",", "np", ".", "ndarray", ")", ":", "item", "=", "list_", "elif", "isinstance", "(", "list_", ",", "list",...
27.807692
16.269231
def locked_coroutine(f): """ Method decorator that replace asyncio.coroutine that warranty that this specific method of this class instance will not we executed twice at the same time """ @asyncio.coroutine def new_function(*args, **kwargs): # In the instance of the class we will store # a lock has an attribute. lock_var_name = "__" + f.__name__ + "_lock" if not hasattr(args[0], lock_var_name): setattr(args[0], lock_var_name, asyncio.Lock()) with (yield from getattr(args[0], lock_var_name)): return (yield from f(*args, **kwargs)) return new_function
[ "def", "locked_coroutine", "(", "f", ")", ":", "@", "asyncio", ".", "coroutine", "def", "new_function", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# In the instance of the class we will store", "# a lock has an attribute.", "lock_var_name", "=", "\"__\"",...
33.473684
16.315789
def cmd(send, msg, args): """Reports the difference between now and some specified time. Syntax: {command} <time> """ parser = arguments.ArgParser(args['config']) parser.add_argument('date', nargs='*', action=arguments.DateParser) try: cmdargs = parser.parse_args(msg) except arguments.ArgumentException as e: send(str(e)) return if not cmdargs.date: send("Time until when?") return delta = dateutil.relativedelta.relativedelta(cmdargs.date, datetime.datetime.now()) diff = "%s is " % cmdargs.date.strftime("%x") if delta.years: diff += "%d years " % (delta.years) if delta.months: diff += "%d months " % (delta.months) if delta.days: diff += "%d days " % (delta.days) if delta.hours: diff += "%d hours " % (delta.hours) if delta.minutes: diff += "%d minutes " % (delta.minutes) if delta.seconds: diff += "%d seconds " % (delta.seconds) diff += "away" send(diff)
[ "def", "cmd", "(", "send", ",", "msg", ",", "args", ")", ":", "parser", "=", "arguments", ".", "ArgParser", "(", "args", "[", "'config'", "]", ")", "parser", ".", "add_argument", "(", "'date'", ",", "nargs", "=", "'*'", ",", "action", "=", "arguments...
31.03125
16.5625
def remove_team(name, profile="github"): ''' Remove a github team. name The name of the team to be removed. profile The name of the profile configuration to use. Defaults to ``github``. CLI Example: .. code-block:: bash salt myminion github.remove_team 'team_name' .. versionadded:: 2016.11.0 ''' team_info = get_team(name, profile=profile) if not team_info: log.error('Team %s to be removed does not exist.', name) return False try: client = _get_client(profile) organization = client.get_organization( _get_config_value(profile, 'org_name') ) team = organization.get_team(team_info['id']) team.delete() return list_teams(ignore_cache=True, profile=profile).get(name) is None except github.GithubException: log.exception('Error deleting a team') return False
[ "def", "remove_team", "(", "name", ",", "profile", "=", "\"github\"", ")", ":", "team_info", "=", "get_team", "(", "name", ",", "profile", "=", "profile", ")", "if", "not", "team_info", ":", "log", ".", "error", "(", "'Team %s to be removed does not exist.'", ...
27.181818
22.393939