_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q263400
PyMataCommandHandler._string_data
validation
def _string_data(self, data): """ This method handles the incoming string data message from Firmata. The string is printed to the console :param data: Message data from Firmata :return: No return value.s """ print("_string_data:") string_to_print = [] for i in data[::2]: string_to_print.append(chr(i)) print("".join(string_to_print))
python
{ "resource": "" }
q263401
PyMataCommandHandler.run
validation
def run(self): """ This method starts the thread that continuously runs to receive and interpret messages coming from Firmata. This must be the last method in this file It also checks the deque for messages to be sent to Firmata. """ # To add a command to the command dispatch table, append here. self.command_dispatch.update({self.REPORT_VERSION: [self.report_version, 2]}) self.command_dispatch.update({self.REPORT_FIRMWARE: [self.report_firmware, 1]}) self.command_dispatch.update({self.ANALOG_MESSAGE: [self.analog_message, 2]}) self.command_dispatch.update({self.DIGITAL_MESSAGE: [self.digital_message, 2]}) self.command_dispatch.update({self.ENCODER_DATA: [self.encoder_data, 3]}) self.command_dispatch.update({self.SONAR_DATA: [self.sonar_data, 3]}) self.command_dispatch.update({self.STRING_DATA: [self._string_data, 2]}) self.command_dispatch.update({self.I2C_REPLY: [self.i2c_reply, 2]}) self.command_dispatch.update({self.CAPABILITY_RESPONSE: [self.capability_response, 2]}) self.command_dispatch.update({self.PIN_STATE_RESPONSE: [self.pin_state_response, 2]}) self.command_dispatch.update({self.ANALOG_MAPPING_RESPONSE: [self.analog_mapping_response, 2]}) self.command_dispatch.update({self.STEPPER_DATA: [self.stepper_version_response, 2]}) while not self.is_stopped(): if len(self.pymata.command_deque): # get next byte from the deque and process it data = self.pymata.command_deque.popleft() # this list will be populated with the received data for the command command_data = [] # process sysex commands if data == self.START_SYSEX: # next char is the actual sysex command # wait until we can get data from the deque while len(self.pymata.command_deque) == 0: pass sysex_command = self.pymata.command_deque.popleft() # retrieve the associated command_dispatch entry for this command dispatch_entry = self.command_dispatch.get(sysex_command) # get a "pointer" to the method that will process this command method = dispatch_entry[0] # now get the rest of the data excluding the END_SYSEX byte end_of_sysex = False while not end_of_sysex: # wait for more data to arrive while len(self.pymata.command_deque) == 0: pass data = self.pymata.command_deque.popleft() if data != self.END_SYSEX: command_data.append(data) else: end_of_sysex = True # invoke the method to process the command method(command_data) # go to the beginning of the loop to process the next command continue # is this a command byte in the range of 0x80-0xff - these are the non-sysex messages elif 0x80 <= data <= 0xff: # look up the method for the command in the command dispatch table # for the digital reporting the command value is modified with port number # the handler needs the port to properly process, so decode that from the command and # place in command_data if 0x90 <= data <= 0x9f: port = data & 0xf command_data.append(port) data = 0x90 # the pin number for analog data is embedded in the command so, decode it elif 0xe0 <= data <= 0xef: pin = data & 0xf command_data.append(pin) data = 0xe0 else: pass dispatch_entry = self.command_dispatch.get(data) # this calls the method retrieved from the dispatch table method = dispatch_entry[0] # get the number of parameters that this command provides num_args = dispatch_entry[1] # look at the number of args that the selected method requires # now get that number of bytes to pass to the called method for i in range(num_args): while len(self.pymata.command_deque) == 0: pass data = self.pymata.command_deque.popleft() command_data.append(data) # go execute the command with the argument list method(command_data) # go to the beginning of the loop to process the next command continue else: time.sleep(.1)
python
{ "resource": "" }
q263402
Haul.retrieve_url
validation
def retrieve_url(self, url): """ Use requests to fetch remote content """ try: r = requests.get(url) except requests.ConnectionError: raise exceptions.RetrieveError('Connection fail') if r.status_code >= 400: raise exceptions.RetrieveError('Connected, but status code is %s' % (r.status_code)) real_url = r.url content = r.content try: content_type = r.headers['Content-Type'] except KeyError: content_type, encoding = mimetypes.guess_type(real_url, strict=False) self.response = r return content_type.lower(), content
python
{ "resource": "" }
q263403
HaulResult.image_urls
validation
def image_urls(self): """ Combine finder_image_urls and extender_image_urls, remove duplicate but keep order """ all_image_urls = self.finder_image_urls[:] for image_url in self.extender_image_urls: if image_url not in all_image_urls: all_image_urls.append(image_url) return all_image_urls
python
{ "resource": "" }
q263404
background_image_finder
validation
def background_image_finder(pipeline_index, soup, finder_image_urls=[], *args, **kwargs): """ Find image URL in background-image Example: <div style="width: 100%; height: 100%; background-image: url(http://distilleryimage10.ak.instagram.com/bde04558a43b11e28e5d22000a1f979a_7.jpg);" class="Image iLoaded iWithTransition Frame" src="http://distilleryimage10.ak.instagram.com/bde04558a43b11e28e5d22000a1f979a_7.jpg"></div> to http://distilleryimage10.ak.instagram.com/bde04558a43b11e28e5d22000a1f979a_7.jpg """ now_finder_image_urls = [] for tag in soup.find_all(style=True): style_string = tag['style'] if 'background-image' in style_string.lower(): style = cssutils.parseStyle(style_string) background_image = style.getProperty('background-image') if background_image: for property_value in background_image.propertyValue: background_image_url = str(property_value.value) if background_image_url: if (background_image_url not in finder_image_urls) and \ (background_image_url not in now_finder_image_urls): now_finder_image_urls.append(background_image_url) output = {} output['finder_image_urls'] = finder_image_urls + now_finder_image_urls return output
python
{ "resource": "" }
q263405
StrictRedisCluster._getnodenamefor
validation
def _getnodenamefor(self, name): "Return the node name where the ``name`` would land to" return 'node_' + str( (abs(binascii.crc32(b(name)) & 0xffffffff) % self.no_servers) + 1)
python
{ "resource": "" }
q263406
StrictRedisCluster.getnodefor
validation
def getnodefor(self, name): "Return the node where the ``name`` would land to" node = self._getnodenamefor(name) return {node: self.cluster['nodes'][node]}
python
{ "resource": "" }
q263407
StrictRedisCluster.object
validation
def object(self, infotype, key): "Return the encoding, idletime, or refcount about the key" redisent = self.redises[self._getnodenamefor(key) + '_slave'] return getattr(redisent, 'object')(infotype, key)
python
{ "resource": "" }
q263408
StrictRedisCluster._rc_brpoplpush
validation
def _rc_brpoplpush(self, src, dst, timeout=0): """ Pop a value off the tail of ``src``, push it on the head of ``dst`` and then return it. This command blocks until a value is in ``src`` or until ``timeout`` seconds elapse, whichever is first. A ``timeout`` value of 0 blocks forever. Not atomic """ rpop = self.brpop(src, timeout) if rpop is not None: self.lpush(dst, rpop[1]) return rpop[1] return None
python
{ "resource": "" }
q263409
StrictRedisCluster._rc_rpoplpush
validation
def _rc_rpoplpush(self, src, dst): """ RPOP a value off of the ``src`` list and LPUSH it on to the ``dst`` list. Returns the value. """ rpop = self.rpop(src) if rpop is not None: self.lpush(dst, rpop) return rpop return None
python
{ "resource": "" }
q263410
StrictRedisCluster._rc_smove
validation
def _rc_smove(self, src, dst, value): """ Move ``value`` from set ``src`` to set ``dst`` not atomic """ if self.type(src) != b("set"): return self.smove(src + "{" + src + "}", dst, value) if self.type(dst) != b("set"): return self.smove(dst + "{" + dst + "}", src, value) if self.srem(src, value): return 1 if self.sadd(dst, value) else 0 return 0
python
{ "resource": "" }
q263411
StrictRedisCluster._rc_sunion
validation
def _rc_sunion(self, src, *args): """ Returns the members of the set resulting from the union between the first set and all the successive sets. """ args = list_or_args(src, args) src_set = self.smembers(args.pop(0)) if src_set is not set([]): for key in args: src_set.update(self.smembers(key)) return src_set
python
{ "resource": "" }
q263412
StrictRedisCluster._rc_sunionstore
validation
def _rc_sunionstore(self, dst, src, *args): """ Store the union of sets ``src``, ``args`` into a new set named ``dest``. Returns the number of keys in the new set. """ args = list_or_args(src, args) result = self.sunion(*args) if result is not set([]): return self.sadd(dst, *list(result)) return 0
python
{ "resource": "" }
q263413
StrictRedisCluster._rc_msetnx
validation
def _rc_msetnx(self, mapping): """ Sets each key in the ``mapping`` dict to its corresponding value if none of the keys are already set """ for k in iterkeys(mapping): if self.exists(k): return False return self._rc_mset(mapping)
python
{ "resource": "" }
q263414
StrictRedisCluster._rc_rename
validation
def _rc_rename(self, src, dst): """ Rename key ``src`` to ``dst`` """ if src == dst: return self.rename(src + "{" + src + "}", src) if not self.exists(src): return self.rename(src + "{" + src + "}", src) self.delete(dst) ktype = self.type(src) kttl = self.ttl(src) if ktype == b('none'): return False if ktype == b('string'): self.set(dst, self.get(src)) elif ktype == b('hash'): self.hmset(dst, self.hgetall(src)) elif ktype == b('list'): for k in self.lrange(src, 0, -1): self.rpush(dst, k) elif ktype == b('set'): for k in self.smembers(src): self.sadd(dst, k) elif ktype == b('zset'): for k, v in self.zrange(src, 0, -1, withscores=True): self.zadd(dst, v, k) # Handle keys with an expire time set kttl = -1 if kttl is None or kttl < 0 else int(kttl) if kttl != -1: self.expire(dst, kttl) return self.delete(src)
python
{ "resource": "" }
q263415
StrictRedisCluster._rc_renamenx
validation
def _rc_renamenx(self, src, dst): "Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist" if self.exists(dst): return False return self._rc_rename(src, dst)
python
{ "resource": "" }
q263416
StrictRedisCluster._rc_keys
validation
def _rc_keys(self, pattern='*'): "Returns a list of keys matching ``pattern``" result = [] for alias, redisent in iteritems(self.redises): if alias.find('_slave') == -1: continue result.extend(redisent.keys(pattern)) return result
python
{ "resource": "" }
q263417
StrictRedisCluster._rc_dbsize
validation
def _rc_dbsize(self): "Returns the number of keys in the current database" result = 0 for alias, redisent in iteritems(self.redises): if alias.find('_slave') == -1: continue result += redisent.dbsize() return result
python
{ "resource": "" }
q263418
Base.prepare
validation
def prepare(self): """Prepare the date in the instance state for serialization. """ # Create a collection for the attributes and elements of # this instance. attributes, elements = OrderedDict(), [] # Initialize the namespace map. nsmap = dict([self.meta.namespace]) # Iterate through all declared items. for name, item in self._items.items(): if isinstance(item, Attribute): # Prepare the item as an attribute. attributes[name] = item.prepare(self) elif isinstance(item, Element): # Update the nsmap. nsmap.update([item.namespace]) # Prepare the item as an element. elements.append(item) # Return the collected attributes and elements return attributes, elements, nsmap
python
{ "resource": "" }
q263419
verify
validation
def verify(xml, stream): """ Verify the signaure of an XML document with the given certificate. Returns `True` if the document is signed with a valid signature. Returns `False` if the document is not signed or if the signature is invalid. :param lxml.etree._Element xml: The document to sign :param file stream: The private key to sign the document with :rtype: Boolean """ # Import xmlsec here to delay initializing the C library in # case we don't need it. import xmlsec # Find the <Signature/> node. signature_node = xmlsec.tree.find_node(xml, xmlsec.Node.SIGNATURE) if signature_node is None: # No `signature` node found; we cannot verify return False # Create a digital signature context (no key manager is needed). ctx = xmlsec.SignatureContext() # Register <Response/> and <Assertion/> ctx.register_id(xml) for assertion in xml.xpath("//*[local-name()='Assertion']"): ctx.register_id(assertion) # Load the public key. key = None for fmt in [ xmlsec.KeyFormat.PEM, xmlsec.KeyFormat.CERT_PEM]: stream.seek(0) try: key = xmlsec.Key.from_memory(stream, fmt) break except ValueError: # xmlsec now throws when it can't load the key pass # Set the key on the context. ctx.key = key # Verify the signature. try: ctx.verify(signature_node) return True except Exception: return False
python
{ "resource": "" }
q263420
GalleryAdmin.get_queryset
validation
def get_queryset(self, request): """ Add number of photos to each gallery. """ qs = super(GalleryAdmin, self).get_queryset(request) return qs.annotate(photo_count=Count('photos'))
python
{ "resource": "" }
q263421
GalleryAdmin.save_model
validation
def save_model(self, request, obj, form, change): """ Set currently authenticated user as the author of the gallery. """ obj.author = request.user obj.save()
python
{ "resource": "" }
q263422
GalleryAdmin.save_formset
validation
def save_formset(self, request, form, formset, change): """ For each photo set it's author to currently authenticated user. """ instances = formset.save(commit=False) for instance in instances: if isinstance(instance, Photo): instance.author = request.user instance.save()
python
{ "resource": "" }
q263423
Ranges.parse_byteranges
validation
def parse_byteranges(cls, environ): """ Outputs a list of tuples with ranges or the empty list According to the rfc, start or end values can be omitted """ r = [] s = environ.get(cls.header_range, '').replace(' ','').lower() if s: l = s.split('=') if len(l) == 2: unit, vals = tuple(l) if unit == 'bytes' and vals: gen_rng = ( tuple(rng.split('-')) for rng in vals.split(',') if '-' in rng ) for start, end in gen_rng: if start or end: r.append( (int(start) if start else None, int(end) if end else None) ) return r
python
{ "resource": "" }
q263424
Ranges.check_ranges
validation
def check_ranges(cls, ranges, length): """Removes errored ranges""" result = [] for start, end in ranges: if isinstance(start, int) or isinstance(end, int): if isinstance(start, int) and not (0 <= start < length): continue elif isinstance(start, int) and isinstance(end, int) and not (start <= end): continue elif start is None and end == 0: continue result.append( (start,end) ) return result
python
{ "resource": "" }
q263425
Ranges.convert_ranges
validation
def convert_ranges(cls, ranges, length): """Converts to valid byte ranges""" result = [] for start, end in ranges: if end is None: result.append( (start, length-1) ) elif start is None: s = length - end result.append( (0 if s < 0 else s, length-1) ) else: result.append( (start, end if end < length else length-1) ) return result
python
{ "resource": "" }
q263426
Ranges.condense_ranges
validation
def condense_ranges(cls, ranges): """Sorts and removes overlaps""" result = [] if ranges: ranges.sort(key=lambda tup: tup[0]) result.append(ranges[0]) for i in range(1, len(ranges)): if result[-1][1] + 1 >= ranges[i][0]: result[-1] = (result[-1][0], max(result[-1][1], ranges[i][1])) else: result.append(ranges[i]) return result
python
{ "resource": "" }
q263427
social_widget_render
validation
def social_widget_render(parser, token): """ Renders the selected social widget. You can specify optional settings that will be passed to widget template. Sample usage: {% social_widget_render widget_template ke1=val1 key2=val2 %} For example to render Twitter follow button you can use code like this: {% social_widget_render 'twitter/follow_button.html' username="ev" %} """ bits = token.split_contents() tag_name = bits[0] if len(bits) < 2: raise TemplateSyntaxError("'%s' takes at least one argument" % tag_name) args = [] kwargs = {} bits = bits[1:] if len(bits): for bit in bits: match = kwarg_re.match(bit) if not match: raise TemplateSyntaxError("Malformed arguments to %s tag" % tag_name) name, value = match.groups() if name: # Replacing hyphens with underscores because # variable names cannot contain hyphens. name = name.replace('-', '_') kwargs[name] = parser.compile_filter(value) else: args.append(parser.compile_filter(value)) return SocialWidgetNode(args, kwargs)
python
{ "resource": "" }
q263428
Sparse3DMatrix.add
validation
def add(self, addend_mat, axis=1): """ In-place addition :param addend_mat: A matrix to be added on the Sparse3DMatrix object :param axis: The dimension along the addend_mat is added :return: Nothing (as it performs in-place operations) """ if self.finalized: if axis == 0: raise NotImplementedError('The method is not yet implemented for the axis.') elif axis == 1: for hid in xrange(self.shape[1]): self.data[hid] = self.data[hid] + addend_mat elif axis == 2: raise NotImplementedError('The method is not yet implemented for the axis.') else: raise RuntimeError('The axis should be 0, 1, or 2.') else: raise RuntimeError('The original matrix must be finalized.')
python
{ "resource": "" }
q263429
Sparse3DMatrix.multiply
validation
def multiply(self, multiplier, axis=None): """ In-place multiplication :param multiplier: A matrix or vector to be multiplied :param axis: The dim along which 'multiplier' is multiplied :return: Nothing (as it performs in-place operations) """ if self.finalized: if multiplier.ndim == 1: if axis == 0: # multiplier is np.array of length |haplotypes| raise NotImplementedError('The method is not yet implemented for the axis.') elif axis == 1: # multiplier is np.array of length |loci| sz = len(multiplier) multiplier_mat = lil_matrix((sz, sz)) multiplier_mat.setdiag(multiplier) for hid in xrange(self.shape[1]): self.data[hid] = self.data[hid] * multiplier_mat elif axis == 2: # multiplier is np.array of length |reads| for hid in xrange(self.shape[1]): self.data[hid].data *= multiplier[self.data[hid].indices] else: raise RuntimeError('The axis should be 0, 1, or 2.') elif multiplier.ndim == 2: if axis == 0: # multiplier is sp.sparse matrix of shape |reads| x |haplotypes| for hid in xrange(self.shape[1]): self.data[hid].data *= multiplier[self.data[hid].indices, hid] elif axis == 1: # multiplier is sp.sparse matrix of shape |reads| x |loci| for hid in xrange(self.shape[1]): self.data[hid] = self.data[hid].multiply(multiplier) elif axis == 2: # multiplier is np.matrix of shape |haplotypes| x |loci| for hid in xrange(self.shape[1]): multiplier_vec = multiplier[hid, :] multiplier_vec = multiplier_vec.ravel() self.data[hid].data *= multiplier_vec.repeat(np.diff(self.data[hid].indptr)) else: raise RuntimeError('The axis should be 0, 1, or 2.') elif isinstance(multiplier, Sparse3DMatrix): # multiplier is Sparse3DMatrix object for hid in xrange(self.shape[1]): self.data[hid] = self.data[hid].multiply(multiplier.data[hid]) else: raise RuntimeError('The multiplier should be 1, 2 dimensional numpy array or a Sparse3DMatrix object.') else: raise RuntimeError('The original matrix must be finalized.')
python
{ "resource": "" }
q263430
EMfactory.update_probability_at_read_level
validation
def update_probability_at_read_level(self, model=3): """ Updates the probability of read origin at read level :param model: Normalization model (1: Gene->Allele->Isoform, 2: Gene->Isoform->Allele, 3: Gene->Isoform*Allele, 4: Gene*Isoform*Allele) :return: Nothing (as it performs in-place operations) """ self.probability.reset() # reset to alignment incidence matrix if model == 1: self.probability.multiply(self.allelic_expression, axis=APM.Axis.READ) self.probability.normalize_reads(axis=APM.Axis.HAPLOGROUP, grouping_mat=self.t2t_mat) haplogroup_sum_mat = self.allelic_expression * self.t2t_mat self.probability.multiply(haplogroup_sum_mat, axis=APM.Axis.READ) self.probability.normalize_reads(axis=APM.Axis.GROUP, grouping_mat=self.t2t_mat) self.probability.multiply(haplogroup_sum_mat.sum(axis=0), axis=APM.Axis.HAPLOTYPE) self.probability.normalize_reads(axis=APM.Axis.READ) elif model == 2: self.probability.multiply(self.allelic_expression, axis=APM.Axis.READ) self.probability.normalize_reads(axis=APM.Axis.LOCUS) self.probability.multiply(self.allelic_expression.sum(axis=0), axis=APM.Axis.HAPLOTYPE) self.probability.normalize_reads(axis=APM.Axis.GROUP, grouping_mat=self.t2t_mat) self.probability.multiply((self.allelic_expression * self.t2t_mat).sum(axis=0), axis=APM.Axis.HAPLOTYPE) self.probability.normalize_reads(axis=APM.Axis.READ) elif model == 3: self.probability.multiply(self.allelic_expression, axis=APM.Axis.READ) self.probability.normalize_reads(axis=APM.Axis.GROUP, grouping_mat=self.t2t_mat) self.probability.multiply((self.allelic_expression * self.t2t_mat).sum(axis=0), axis=APM.Axis.HAPLOTYPE) self.probability.normalize_reads(axis=APM.Axis.READ) elif model == 4: self.probability.multiply(self.allelic_expression, axis=APM.Axis.READ) self.probability.normalize_reads(axis=APM.Axis.READ) else: raise RuntimeError('The read normalization model should be 1, 2, 3, or 4.')
python
{ "resource": "" }
q263431
EMfactory.run
validation
def run(self, model, tol=0.001, max_iters=999, verbose=True): """ Runs EM iterations :param model: Normalization model (1: Gene->Allele->Isoform, 2: Gene->Isoform->Allele, 3: Gene->Isoform*Allele, 4: Gene*Isoform*Allele) :param tol: Tolerance for termination :param max_iters: Maximum number of iterations until termination :param verbose: Display information on how EM is running :return: Nothing (as it performs in-place operations) """ orig_err_states = np.seterr(all='raise') np.seterr(under='ignore') if verbose: print print "Iter No Time (hh:mm:ss) Total change (TPM) " print "------- --------------- ----------------------" num_iters = 0 err_sum = 1000000.0 time0 = time.time() target_err = 1000000.0 * tol while err_sum > target_err and num_iters < max_iters: prev_isoform_expression = self.get_allelic_expression().sum(axis=0) prev_isoform_expression *= (1000000.0 / prev_isoform_expression.sum()) self.update_allelic_expression(model=model) curr_isoform_expression = self.get_allelic_expression().sum(axis=0) curr_isoform_expression *= (1000000.0 / curr_isoform_expression.sum()) err = np.abs(curr_isoform_expression - prev_isoform_expression) err_sum = err.sum() num_iters += 1 if verbose: time1 = time.time() delmin, s = divmod(int(time1 - time0), 60) h, m = divmod(delmin, 60) print " %5d %4d:%02d:%02d %9.1f / 1000000" % (num_iters, h, m, s, err_sum)
python
{ "resource": "" }
q263432
EMfactory.report_read_counts
validation
def report_read_counts(self, filename, grp_wise=False, reorder='as-is', notes=None): """ Exports expected read counts :param filename: File name for output :param grp_wise: whether the report is at isoform level or gene level :param reorder: whether the report should be either 'decreasing' or 'increasing' order or just 'as-is' :return: Nothing but the method writes a file """ expected_read_counts = self.probability.sum(axis=APM.Axis.READ) if grp_wise: lname = self.probability.gname expected_read_counts = expected_read_counts * self.grp_conv_mat else: lname = self.probability.lname total_read_counts = expected_read_counts.sum(axis=0) if reorder == 'decreasing': report_order = np.argsort(total_read_counts.flatten()) report_order = report_order[::-1] elif reorder == 'increasing': report_order = np.argsort(total_read_counts.flatten()) elif reorder == 'as-is': report_order = np.arange(len(lname)) # report in the original locus order cntdata = np.vstack((expected_read_counts, total_read_counts)) fhout = open(filename, 'w') fhout.write("locus\t" + "\t".join(self.probability.hname) + "\ttotal") if notes is not None: fhout.write("\tnotes") fhout.write("\n") for locus_id in report_order: lname_cur = lname[locus_id] fhout.write("\t".join([lname_cur] + map(str, cntdata[:, locus_id].ravel()))) if notes is not None: fhout.write("\t%s" % notes[lname_cur]) fhout.write("\n") fhout.close()
python
{ "resource": "" }
q263433
EMfactory.report_depths
validation
def report_depths(self, filename, tpm=True, grp_wise=False, reorder='as-is', notes=None): """ Exports expected depths :param filename: File name for output :param grp_wise: whether the report is at isoform level or gene level :param reorder: whether the report should be either 'decreasing' or 'increasing' order or just 'as-is' :return: Nothing but the method writes a file """ if grp_wise: lname = self.probability.gname depths = self.allelic_expression * self.grp_conv_mat else: lname = self.probability.lname depths = self.allelic_expression if tpm: depths *= (1000000.0 / depths.sum()) total_depths = depths.sum(axis=0) if reorder == 'decreasing': report_order = np.argsort(total_depths.flatten()) report_order = report_order[::-1] elif reorder == 'increasing': report_order = np.argsort(total_depths.flatten()) elif reorder == 'as-is': report_order = np.arange(len(lname)) # report in the original locus order cntdata = np.vstack((depths, total_depths)) fhout = open(filename, 'w') fhout.write("locus\t" + "\t".join(self.probability.hname) + "\ttotal") if notes is not None: fhout.write("\tnotes") fhout.write("\n") for locus_id in report_order: lname_cur = lname[locus_id] fhout.write("\t".join([lname_cur] + map(str, cntdata[:, locus_id].ravel()))) if notes is not None: fhout.write("\t%s" % notes[lname_cur]) fhout.write("\n") fhout.close()
python
{ "resource": "" }
q263434
EMfactory.export_posterior_probability
validation
def export_posterior_probability(self, filename, title="Posterior Probability"): """ Writes the posterior probability of read origin :param filename: File name for output :param title: The title of the posterior probability matrix :return: Nothing but the method writes a file in EMASE format (PyTables) """ self.probability.save(h5file=filename, title=title)
python
{ "resource": "" }
q263435
AlignmentPropertyMatrix.print_read
validation
def print_read(self, rid): """ Prints nonzero rows of the read wanted """ if self.rname is not None: print self.rname[rid] print '--' r = self.get_read_data(rid) aligned_loci = np.unique(r.nonzero()[1]) for locus in aligned_loci: nzvec = r[:, locus].todense().transpose()[0].A.flatten() if self.lname is not None: print self.lname[locus], else: print locus, print nzvec
python
{ "resource": "" }
q263436
_roman
validation
def _roman(data, scheme_map, **kw): """Transliterate `data` with the given `scheme_map`. This function is used when the source scheme is a Roman scheme. :param data: the data to transliterate :param scheme_map: a dict that maps between characters in the old scheme and characters in the new scheme """ vowels = scheme_map.vowels marks = scheme_map.marks virama = scheme_map.virama consonants = scheme_map.consonants non_marks_viraama = scheme_map.non_marks_viraama max_key_length_from_scheme = scheme_map.max_key_length_from_scheme to_roman = scheme_map.to_scheme.is_roman togglers = kw.pop('togglers', set()) suspend_on = kw.pop('suspend_on', set()) suspend_off = kw.pop('suspend_off', set()) if kw: raise TypeError('Unexpected keyword argument %s' % list(kw.keys())[0]) buf = [] i = 0 had_consonant = found = False len_data = len(data) append = buf.append # If true, don't transliterate. The toggle token is discarded. toggled = False # If true, don't transliterate. The suspend token is retained. # `suspended` overrides `toggled`. suspended = False while i <= len_data: # The longest token in the source scheme has length `max_key_length_from_scheme`. Iterate # over `data` while taking `max_key_length_from_scheme` characters at a time. If we don`t # find the character group in our scheme map, lop off a character and # try again. # # If we've finished reading through `data`, then `token` will be empty # and the loop below will be skipped. token = data[i:i + max_key_length_from_scheme] while token: if token in togglers: toggled = not toggled i += 2 # skip over the token found = True # force the token to fill up again break if token in suspend_on: suspended = True elif token in suspend_off: suspended = False if toggled or suspended: token = token[:-1] continue # Catch the pattern CV, where C is a consonant and V is a vowel. # V should be rendered as a vowel mark, a.k.a. a "dependent" # vowel. But due to the nature of Brahmic scripts, 'a' is implicit # and has no vowel mark. If we see 'a', add nothing. if had_consonant and token in vowels: mark = marks.get(token, '') if mark: append(mark) elif to_roman: append(vowels[token]) found = True # Catch any non_marks_viraama character, including consonants, punctuation, # and regular vowels. Due to the implicit 'a', we must explicitly # end any lingering consonants before we can handle the current # token. elif token in non_marks_viraama: if had_consonant: append(virama['']) append(non_marks_viraama[token]) found = True if found: had_consonant = token in consonants i += len(token) break else: token = token[:-1] # We've exhausted the token; this must be some other character. Due to # the implicit 'a', we must explicitly end any lingering consonants # before we can handle the current token. if not found: if had_consonant: append(virama['']) if i < len_data: append(data[i]) had_consonant = False i += 1 found = False return ''.join(buf)
python
{ "resource": "" }
q263437
_brahmic
validation
def _brahmic(data, scheme_map, **kw): """Transliterate `data` with the given `scheme_map`. This function is used when the source scheme is a Brahmic scheme. :param data: the data to transliterate :param scheme_map: a dict that maps between characters in the old scheme and characters in the new scheme """ if scheme_map.from_scheme.name == northern.GURMUKHI: data = northern.GurmukhiScheme.replace_tippi(text=data) marks = scheme_map.marks virama = scheme_map.virama consonants = scheme_map.consonants non_marks_viraama = scheme_map.non_marks_viraama to_roman = scheme_map.to_scheme.is_roman max_key_length_from_scheme = scheme_map.max_key_length_from_scheme buf = [] i = 0 to_roman_had_consonant = found = False append = buf.append # logging.debug(pprint.pformat(scheme_map.consonants)) # We dont just translate each brAhmic character one after another in order to prefer concise transliterations when possible - for example ज्ञ -> jn in optitrans rather than j~n. while i <= len(data): # The longest token in the source scheme has length `max_key_length_from_scheme`. Iterate # over `data` while taking `max_key_length_from_scheme` characters at a time. If we don`t # find the character group in our scheme map, lop off a character and # try again. # # If we've finished reading through `data`, then `token` will be empty # and the loop below will be skipped. token = data[i:i + max_key_length_from_scheme] while token: if len(token) == 1: if token in marks: append(marks[token]) found = True elif token in virama: append(virama[token]) found = True else: if to_roman_had_consonant: append('a') append(non_marks_viraama.get(token, token)) found = True else: if token in non_marks_viraama: if to_roman_had_consonant: append('a') append(non_marks_viraama.get(token)) found = True if found: to_roman_had_consonant = to_roman and token in consonants i += len(token) break else: token = token[:-1] # Continuing the outer while loop. # We've exhausted the token; this must be some other character. Due to # the implicit 'a', we must explicitly end any lingering consonants # before we can handle the current token. if not found: if to_roman_had_consonant: append(next(iter(virama.values()))) if i < len(data): append(data[i]) to_roman_had_consonant = False i += 1 found = False if to_roman_had_consonant: append('a') return ''.join(buf)
python
{ "resource": "" }
q263438
detect
validation
def detect(text): """Detect the input's transliteration scheme. :param text: some text data, either a `unicode` or a `str` encoded in UTF-8. """ if sys.version_info < (3, 0): # Verify encoding try: text = text.decode('utf-8') except UnicodeError: pass # Brahmic schemes are all within a specific range of code points. for L in text: code = ord(L) if code >= BRAHMIC_FIRST_CODE_POINT: for name, start_code in BLOCKS: if start_code <= code <= BRAHMIC_LAST_CODE_POINT: return name # Romanizations if Regex.IAST_OR_KOLKATA_ONLY.search(text): if Regex.KOLKATA_ONLY.search(text): return Scheme.Kolkata else: return Scheme.IAST if Regex.ITRANS_ONLY.search(text): return Scheme.ITRANS if Regex.SLP1_ONLY.search(text): return Scheme.SLP1 if Regex.VELTHUIS_ONLY.search(text): return Scheme.Velthuis if Regex.ITRANS_OR_VELTHUIS_ONLY.search(text): return Scheme.ITRANS return Scheme.HK
python
{ "resource": "" }
q263439
_setup
validation
def _setup(): """Add a variety of default schemes.""" s = str.split if sys.version_info < (3, 0): # noinspection PyUnresolvedReferences s = unicode.split def pop_all(some_dict, some_list): for scheme in some_list: some_dict.pop(scheme) global SCHEMES SCHEMES = copy.deepcopy(sanscript.SCHEMES) pop_all(SCHEMES, [sanscript.ORIYA, sanscript.BENGALI, sanscript.GUJARATI]) SCHEMES[HK].update({ 'vowels': s("""a A i I u U R RR lR lRR E ai O au""") + s("""e o"""), 'marks': s("""A i I u U R RR lR lRR E ai O au""") + s("""e o"""), 'consonants': sanscript.SCHEMES[HK]['consonants'] + s("""n2 r2 zh""") }) SCHEMES[ITRANS].update({ 'vowels': s("""a A i I u U R RR LLi LLI E ai O au""") + s("""e o"""), 'marks': s("""A i I u U R RR LLi LLI E ai O au""") + s("""e o"""), 'consonants': sanscript.SCHEMES[ITRANS]['consonants'] + s("""n2 r2 zh""") }) pop_all(SCHEMES[ITRANS].synonym_map, s("""e o""")) SCHEMES[OPTITRANS].update({ 'vowels': s("""a A i I u U R RR LLi LLI E ai O au""") + s("""e o"""), 'marks': s("""A i I u U R RR LLi LLI E ai O au""") + s("""e o"""), 'consonants': sanscript.SCHEMES[OPTITRANS]['consonants'] + s("""n2 r2 zh""") }) pop_all(SCHEMES[OPTITRANS].synonym_map, s("""e o"""))
python
{ "resource": "" }
q263440
to_utf8
validation
def to_utf8(y): """ converts an array of integers to utf8 string """ out = [] for x in y: if x < 0x080: out.append(x) elif x < 0x0800: out.append((x >> 6) | 0xC0) out.append((x & 0x3F) | 0x80) elif x < 0x10000: out.append((x >> 12) | 0xE0) out.append(((x >> 6) & 0x3F) | 0x80) out.append((x & 0x3F) | 0x80) else: out.append((x >> 18) | 0xF0) out.append((x >> 12) & 0x3F) out.append(((x >> 6) & 0x3F) | 0x80) out.append((x & 0x3F) | 0x80) return ''.join(map(chr, out))
python
{ "resource": "" }
q263441
Parser.set_script
validation
def set_script(self, i): """ set the value of delta to reflect the current codepage """ if i in range(1, 10): n = i - 1 else: raise IllegalInput("Invalid Value for ATR %s" % (hex(i))) if n > -1: # n = -1 is the default script .. self.curr_script = n self.delta = n * DELTA return
python
{ "resource": "" }
q263442
_unrecognised
validation
def _unrecognised(chr): """ Handle unrecognised characters. """ if options['handleUnrecognised'] == UNRECOGNISED_ECHO: return chr elif options['handleUnrecognised'] == UNRECOGNISED_SUBSTITUTE: return options['substituteChar'] else: raise (KeyError, chr)
python
{ "resource": "" }
q263443
DevanagariCharacterBlock._equivalent
validation
def _equivalent(self, char, prev, next, implicitA): """ Transliterate a Latin character equivalent to Devanagari. Add VIRAMA for ligatures. Convert standalone to dependent vowels. """ result = [] if char.isVowel == False: result.append(char.chr) if char.isConsonant \ and ((next is not None and next.isConsonant) \ or next is None): result.append(DevanagariCharacter._VIRAMA) else: if prev is None or prev.isConsonant == False: result.append(char.chr) else: if char._dependentVowel is not None: result.append(char._dependentVowel) return result
python
{ "resource": "" }
q263444
Scheme.from_devanagari
validation
def from_devanagari(self, data): """A convenience method""" from indic_transliteration import sanscript return sanscript.transliterate(data=data, _from=sanscript.DEVANAGARI, _to=self.name)
python
{ "resource": "" }
q263445
generate
validation
def generate(grammar=None, num=1, output=sys.stdout, max_recursion=10, seed=None): """Load and generate ``num`` number of top-level rules from the specified grammar. :param list grammar: The grammar file to load and generate data from :param int num: The number of times to generate data :param output: The output destination (an open, writable stream-type object. default=``sys.stdout``) :param int max_recursion: The maximum reference-recursion when generating data (default=``10``) :param int seed: The seed to initialize the PRNG with. If None, will not initialize it. """ if seed is not None: gramfuzz.rand.seed(seed) fuzzer = gramfuzz.GramFuzzer() fuzzer.load_grammar(grammar) cat_group = os.path.basename(grammar).replace(".py", "") results = fuzzer.gen(cat_group=cat_group, num=num, max_recursion=max_recursion) for res in results: output.write(res)
python
{ "resource": "" }
q263446
Q.build
validation
def build(self, pre=None, shortest=False): """Build the ``Quote`` instance :param list pre: The prerequisites list :param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated. """ res = super(Q, self).build(pre, shortest=shortest) if self.escape: return repr(res) elif self.html_js_escape: return ("'" + res.encode("string_escape").replace("<", "\\x3c").replace(">", "\\x3e") + "'") else: return "".join([self.quote, res, self.quote])
python
{ "resource": "" }
q263447
make_present_participles
validation
def make_present_participles(verbs): """Make the list of verbs into present participles E.g.: empower -> empowering drive -> driving """ res = [] for verb in verbs: parts = verb.split() if parts[0].endswith("e"): parts[0] = parts[0][:-1] + "ing" else: parts[0] = parts[0] + "ing" res.append(" ".join(parts)) return res
python
{ "resource": "" }
q263448
MailerMessageManager.clear_sent_messages
validation
def clear_sent_messages(self, offset=None): """ Deletes sent MailerMessage records """ if offset is None: offset = getattr(settings, 'MAILQUEUE_CLEAR_OFFSET', defaults.MAILQUEUE_CLEAR_OFFSET) if type(offset) is int: offset = datetime.timedelta(hours=offset) delete_before = timezone.now() - offset self.filter(sent=True, last_attempt__lte=delete_before).delete()
python
{ "resource": "" }
q263449
_loadNamelistIncludes
validation
def _loadNamelistIncludes(item, unique_glyphs, cache): """Load the includes of an encoding Namelist files. This is an implementation detail of readNamelist. """ includes = item["includes"] = [] charset = item["charset"] = set() | item["ownCharset"] noCharcode = item["noCharcode"] = set() | item["ownNoCharcode"] dirname = os.path.dirname(item["fileName"]) for include in item["header"]["includes"]: includeFile = os.path.join(dirname, include) try: includedItem = readNamelist(includeFile, unique_glyphs, cache) except NamelistRecursionError: continue if includedItem in includes: continue includes.append(includedItem) charset |= includedItem["charset"] noCharcode |= includedItem["ownNoCharcode"] return item
python
{ "resource": "" }
q263450
__readNamelist
validation
def __readNamelist(cache, filename, unique_glyphs): """Return a dict with the data of an encoding Namelist file. This is an implementation detail of readNamelist. """ if filename in cache: item = cache[filename] else: cps, header, noncodes = parseNamelist(filename) item = { "fileName": filename , "ownCharset": cps , "header": header , "ownNoCharcode": noncodes , "includes": None # placeholder , "charset": None # placeholder , "noCharcode": None } cache[filename] = item if unique_glyphs or item["charset"] is not None: return item # full-charset/includes are requested and not cached yet _loadNamelistIncludes(item, unique_glyphs, cache) return item
python
{ "resource": "" }
q263451
_readNamelist
validation
def _readNamelist(currentlyIncluding, cache, namFilename, unique_glyphs): """ Detect infinite recursion and prevent it. This is an implementation detail of readNamelist. Raises NamelistRecursionError if namFilename is in the process of being included """ # normalize filename = os.path.abspath(os.path.normcase(namFilename)) if filename in currentlyIncluding: raise NamelistRecursionError(filename) currentlyIncluding.add(filename) try: result = __readNamelist(cache, filename, unique_glyphs) finally: currentlyIncluding.remove(filename) return result
python
{ "resource": "" }
q263452
codepointsInNamelist
validation
def codepointsInNamelist(namFilename, unique_glyphs=False, cache=None): """Returns the set of codepoints contained in a given Namelist file. This is a replacement CodepointsInSubset and implements the "#$ include" header format. Args: namFilename: The path to the Namelist file. unique_glyphs: Optional, whether to only include glyphs unique to subset. Returns: A set containing the glyphs in the subset. """ key = 'charset' if not unique_glyphs else 'ownCharset' internals_dir = os.path.dirname(os.path.abspath(__file__)) target = os.path.join(internals_dir, namFilename) result = readNamelist(target, unique_glyphs, cache) return result[key]
python
{ "resource": "" }
q263453
TTFont.get_orthographies
validation
def get_orthographies(self, _library=library): ''' Returns list of CharsetInfo about supported orthographies ''' results = [] for charset in _library.charsets: if self._charsets: cn = getattr(charset, 'common_name', False) abbr = getattr(charset, 'abbreviation', False) nn = getattr(charset, 'short_name', False) naive = getattr(charset, 'native_name', False) if cn and cn.lower() in self._charsets: results.append(charset) elif nn and nn.lower() in self._charsets: results.append(charset) elif naive and naive.lower() in self._charsets: results.append(charset) elif abbr and abbr.lower() in self._charsets: results.append(charset) else: results.append(charset) for result in results: yield CharsetInfo(self, result)
python
{ "resource": "" }
q263454
BaseOAuth.generate_oauth2_headers
validation
def generate_oauth2_headers(self): """Generates header for oauth2 """ encoded_credentials = base64.b64encode(('{0}:{1}'.format(self.consumer_key,self.consumer_secret)).encode('utf-8')) headers={ 'Authorization':'Basic {0}'.format(encoded_credentials.decode('utf-8')), 'Content-Type': 'application/x-www-form-urlencoded' } return headers
python
{ "resource": "" }
q263455
BaseOAuth.oauth2_access_parser
validation
def oauth2_access_parser(self, raw_access): """Parse oauth2 access """ parsed_access = json.loads(raw_access.content.decode('utf-8')) self.access_token = parsed_access['access_token'] self.token_type = parsed_access['token_type'] self.refresh_token = parsed_access['refresh_token'] self.guid = parsed_access['xoauth_yahoo_guid'] credentials = { 'access_token': self.access_token, 'token_type': self.token_type, 'refresh_token': self.refresh_token, 'guid': self.guid } return credentials
python
{ "resource": "" }
q263456
BaseOAuth.refresh_access_token
validation
def refresh_access_token(self,): """Refresh access token """ logger.debug("REFRESHING TOKEN") self.token_time = time.time() credentials = { 'token_time': self.token_time } if self.oauth_version == 'oauth1': self.access_token, self.access_token_secret = self.oauth.get_access_token(self.access_token, self.access_token_secret, params={"oauth_session_handle": self.session_handle}) credentials.update({ 'access_token': self.access_token, 'access_token_secret': self.access_token_secret, 'session_handle': self.session_handle, 'token_time': self.token_time }) else: headers = self.generate_oauth2_headers() raw_access = self.oauth.get_raw_access_token(data={"refresh_token": self.refresh_token, 'redirect_uri': self.callback_uri,'grant_type':'refresh_token'}, headers=headers) credentials.update(self.oauth2_access_parser(raw_access)) return credentials
python
{ "resource": "" }
q263457
get_data
validation
def get_data(filename): """Calls right function according to file extension """ name, ext = get_file_extension(filename) func = json_get_data if ext == '.json' else yaml_get_data return func(filename)
python
{ "resource": "" }
q263458
write_data
validation
def write_data(data, filename): """Call right func to save data according to file extension """ name, ext = get_file_extension(filename) func = json_write_data if ext == '.json' else yaml_write_data return func(data, filename)
python
{ "resource": "" }
q263459
json_write_data
validation
def json_write_data(json_data, filename): """Write json data into a file """ with open(filename, 'w') as fp: json.dump(json_data, fp, indent=4, sort_keys=True, ensure_ascii=False) return True return False
python
{ "resource": "" }
q263460
json_get_data
validation
def json_get_data(filename): """Get data from json file """ with open(filename) as fp: json_data = json.load(fp) return json_data return False
python
{ "resource": "" }
q263461
yaml_get_data
validation
def yaml_get_data(filename): """Get data from .yml file """ with open(filename, 'rb') as fd: yaml_data = yaml.load(fd) return yaml_data return False
python
{ "resource": "" }
q263462
yaml_write_data
validation
def yaml_write_data(yaml_data, filename): """Write data into a .yml file """ with open(filename, 'w') as fd: yaml.dump(yaml_data, fd, default_flow_style=False) return True return False
python
{ "resource": "" }
q263463
RBFize.transform
validation
def transform(self, X): ''' Turns distances into RBF values. Parameters ---------- X : array The raw pairwise distances. Returns ------- X_rbf : array of same shape as X The distances in X passed through the RBF kernel. ''' X = check_array(X) X_rbf = np.empty_like(X) if self.copy else X X_in = X if not self.squared: np.power(X_in, 2, out=X_rbf) X_in = X_rbf if self.scale_by_median: scale = self.median_ if self.squared else self.median_ ** 2 gamma = self.gamma * scale else: gamma = self.gamma np.multiply(X_in, -gamma, out=X_rbf) np.exp(X_rbf, out=X_rbf) return X_rbf
python
{ "resource": "" }
q263464
ProjectPSD.fit
validation
def fit(self, X, y=None): ''' Learn the linear transformation to clipped eigenvalues. Note that if min_eig isn't zero and any of the original eigenvalues were exactly zero, this will leave those eigenvalues as zero. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part. ''' n = X.shape[0] if X.shape != (n, n): raise TypeError("Input must be a square matrix.") # TODO: only get negative eigs somehow? memory = get_memory(self.memory) vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])( X, overwrite_a=not self.copy) vals = vals.reshape(-1, 1) if self.min_eig == 0: inner = vals > self.min_eig else: with np.errstate(divide='ignore'): inner = np.where(vals >= self.min_eig, 1, np.where(vals == 0, 0, self.min_eig / vals)) self.clip_ = np.dot(vecs, inner * vecs.T) return self
python
{ "resource": "" }
q263465
FlipPSD.fit
validation
def fit(self, X, y=None): ''' Learn the linear transformation to flipped eigenvalues. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part. ''' n = X.shape[0] if X.shape != (n, n): raise TypeError("Input must be a square matrix.") # TODO: only get negative eigs somehow? memory = get_memory(self.memory) vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])( X, overwrite_a=not self.copy) vals = vals[:, None] self.flip_ = np.dot(vecs, np.sign(vals) * vecs.T) return self
python
{ "resource": "" }
q263466
FlipPSD.transform
validation
def transform(self, X): ''' Transforms X according to the linear transformation corresponding to flipping the input eigenvalues. Parameters ---------- X : array, shape [n_test, n] The test similarities to training points. Returns ------- Xt : array, shape [n_test, n] The transformed test similarites to training points. ''' n = self.flip_.shape[0] if X.ndim != 2 or X.shape[1] != n: msg = "X should have {} columns, the number of samples at fit time" raise TypeError(msg.format(self.flip_.shape[0])) return np.dot(X, self.flip_)
python
{ "resource": "" }
q263467
FlipPSD.fit_transform
validation
def fit_transform(self, X, y=None): ''' Flips the negative eigenvalues of X. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. If X is asymmetric, it will be treated as if it were symmetric based on its lower-triangular part. Returns ------- Xt : array, shape [n, n] The transformed training similarities. ''' n = X.shape[0] if X.shape != (n, n): raise TypeError("Input must be a square matrix.") memory = get_memory(self.memory) discard_X = not self.copy and self.negatives_likely vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])( X, overwrite_a=discard_X) vals = vals[:, None] self.clip_ = np.dot(vecs, np.sign(vals) * vecs.T) if discard_X or vals[0, 0] < 0: del X np.abs(vals, out=vals) X = np.dot(vecs, vals * vecs.T) del vals, vecs # should be symmetric, but make sure because floats X = Symmetrize(copy=False).fit_transform(X) return X
python
{ "resource": "" }
q263468
ShiftPSD.fit
validation
def fit(self, X, y=None): ''' Learn the transformation to shifted eigenvalues. Only depends on the input dimension. Parameters ---------- X : array, shape [n, n] The *symmetric* input similarities. ''' n = X.shape[0] if X.shape != (n, n): raise TypeError("Input must be a square matrix.") self.train_ = X memory = get_memory(self.memory) lo, = memory.cache(scipy.linalg.eigvalsh)(X, eigvals=(0, 0)) self.shift_ = max(self.min_eig - lo, 0) return self
python
{ "resource": "" }
q263469
ShiftPSD.transform
validation
def transform(self, X): ''' Transforms X according to the linear transformation corresponding to shifting the input eigenvalues to all be at least ``self.min_eig``. Parameters ---------- X : array, shape [n_test, n] The test similarities to training points. Returns ------- Xt : array, shape [n_test, n] The transformed test similarites to training points. Only different from X if X is the training data. ''' n = self.train_.shape[0] if X.ndim != 2 or X.shape[1] != n: msg = "X should have {} columns, the number of samples at fit time" raise TypeError(msg.format(n)) if self.copy: X = X.copy() if self.shift_ != 0 and X is self.train_ or ( X.shape == self.train_.shape and np.allclose(X, self.train_)): X[xrange(n), xrange(n)] += self.shift_ return X
python
{ "resource": "" }
q263470
L2DensityTransformer.fit
validation
def fit(self, X, y=None): ''' Picks the elements of the basis to use for the given data. Only depends on the dimension of X. If it's more convenient, you can pass a single integer for X, which is the dimension to use. Parameters ---------- X : an integer, a :class:`Features` instance, or a list of bag features The input data, or just its dimension, since only the dimension is needed here. ''' if is_integer(X): dim = X else: X = as_features(X) dim = X.dim M = self.smoothness # figure out the smooth-enough elements of our basis inds = np.mgrid[(slice(M + 1),) * dim].reshape(dim, (M + 1) ** dim).T self.inds_ = inds[(inds ** 2).sum(axis=1) <= M ** 2] return self
python
{ "resource": "" }
q263471
L2DensityTransformer.transform
validation
def transform(self, X): ''' Transform a list of bag features into its projection series representation. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays New data to transform. The data should all lie in [0, 1]; use :class:`skl_groups.preprocessing.BagMinMaxScaler` if not. Returns ------- X_new : integer array, shape ``[len(X), dim_]`` X transformed into the new space. ''' self._check_fitted() M = self.smoothness dim = self.dim_ inds = self.inds_ do_check = self.do_bounds_check X = as_features(X) if X.dim != dim: msg = "model fit for dimension {} but got dim {}" raise ValueError(msg.format(dim, X.dim)) Xt = np.empty((len(X), self.inds_.shape[0])) Xt.fill(np.nan) if self.basis == 'cosine': # TODO: put this in a C extension? coefs = (np.pi * np.arange(M + 1))[..., :] for i, bag in enumerate(X): if do_check: if np.min(bag) < 0 or np.max(bag) > 1: raise ValueError("Bag {} not in [0, 1]".format(i)) # apply each phi func to each dataset point: n x dim x M phi = coefs * bag[..., np.newaxis] np.cos(phi, out=phi) phi[:, :, 1:] *= np.sqrt(2) # B is the evaluation of each tensor-prodded basis func # at each point: n x inds.shape[0] B = reduce(op.mul, (phi[:, i, inds[:, i]] for i in xrange(dim))) Xt[i, :] = np.mean(B, axis=0) else: raise ValueError("unknown basis '{}'".format(self.basis)) return Xt
python
{ "resource": "" }
q263472
VersiontoolsEnchancedDistributionMetadata.get_version
validation
def get_version(self): """ Get distribution version. This method is enhanced compared to original distutils implementation. If the version string is set to a special value then instead of using the actual value the real version is obtained by querying versiontools. If versiontools package is not installed then the version is obtained from the standard section of the ``PKG-INFO`` file. This file is automatically created by any source distribution. This method is less useful as it cannot take advantage of version control information that is automatically loaded by versiontools. It has the advantage of not requiring versiontools installation and that it does not depend on ``setup_requires`` feature of ``setuptools``. """ if (self.name is not None and self.version is not None and self.version.startswith(":versiontools:")): return (self.__get_live_version() or self.__get_frozen_version() or self.__fail_to_get_any_version()) else: return self.__base.get_version(self)
python
{ "resource": "" }
q263473
VersiontoolsEnchancedDistributionMetadata.__get_live_version
validation
def __get_live_version(self): """ Get a live version string using versiontools """ try: import versiontools except ImportError: return None else: return str(versiontools.Version.from_expression(self.name))
python
{ "resource": "" }
q263474
BagPreprocesser.fit
validation
def fit(self, X, y=None, **params): ''' Fit the transformer on the stacked points. Parameters ---------- X : :class:`Features` or list of arrays of shape ``[n_samples[i], n_features]`` Training set. If a Features object, it will be stacked. any other keyword argument : Passed on as keyword arguments to the transformer's ``fit()``. ''' X = as_features(X, stack=True) self.transformer.fit(X.stacked_features, y, **params) return self
python
{ "resource": "" }
q263475
BagPreprocesser.transform
validation
def transform(self, X, **params): ''' Transform the stacked points. Parameters ---------- X : :class:`Features` or list of bag feature arrays New data to transform. any other keyword argument : Passed on as keyword arguments to the transformer's ``transform()``. Returns ------- X_new : :class:`Features` Transformed features. ''' X = as_features(X, stack=True) X_new = self.transformer.transform(X.stacked_features, **params) return self._gather_outputs(X, X_new)
python
{ "resource": "" }
q263476
BagPreprocesser.fit_transform
validation
def fit_transform(self, X, y=None, **params): ''' Fit and transform the stacked points. Parameters ---------- X : :class:`Features` or list of bag feature arrays Data to train on and transform. any other keyword argument : Passed on as keyword arguments to the transformer's ``transform()``. Returns ------- X_new : :class:`Features` Transformed features. ''' X = as_features(X, stack=True) X_new = self.transformer.fit_transform(X.stacked_features, y, **params) return self._gather_outputs(X, X_new)
python
{ "resource": "" }
q263477
MinMaxScaler.fit
validation
def fit(self, X, y=None): """Compute the minimum and maximum to be used for later scaling. Parameters ---------- X : array-like, shape [n_samples, n_features] The data used to compute the per-feature minimum and maximum used for later scaling along the features axis. """ X = check_array(X, copy=self.copy, dtype=[np.float64, np.float32, np.float16, np.float128]) feature_range = self.feature_range if feature_range[0] >= feature_range[1]: raise ValueError("Minimum of desired feature range must be smaller" " than maximum. Got %s." % str(feature_range)) if self.fit_feature_range is not None: fit_feature_range = self.fit_feature_range if fit_feature_range[0] >= fit_feature_range[1]: raise ValueError("Minimum of desired (fit) feature range must " "be smaller than maximum. Got %s." % str(feature_range)) if (fit_feature_range[0] < feature_range[0] or fit_feature_range[1] > feature_range[1]): raise ValueError("fit_feature_range must be a subset of " "feature_range. Got %s, fit %s." % (str(feature_range), str(fit_feature_range))) feature_range = fit_feature_range data_min = np.min(X, axis=0) data_range = np.max(X, axis=0) - data_min # Do not scale constant features data_range[data_range == 0.0] = 1.0 self.scale_ = (feature_range[1] - feature_range[0]) / data_range self.min_ = feature_range[0] - data_min * self.scale_ self.data_range = data_range self.data_min = data_min return self
python
{ "resource": "" }
q263478
MinMaxScaler.transform
validation
def transform(self, X): """Scaling features of X according to feature_range. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ X = check_array(X, copy=self.copy) X *= self.scale_ X += self.min_ if self.truncate: np.maximum(self.feature_range[0], X, out=X) np.minimum(self.feature_range[1], X, out=X) return X
python
{ "resource": "" }
q263479
MinMaxScaler.inverse_transform
validation
def inverse_transform(self, X): """Undo the scaling of X according to feature_range. Note that if truncate is true, any truncated points will not be restored exactly. Parameters ---------- X : array-like with shape [n_samples, n_features] Input data that will be transformed. """ X = check_array(X, copy=self.copy) X -= self.min_ X /= self.scale_ return X
python
{ "resource": "" }
q263480
BagOfWords.fit
validation
def fit(self, X, y=None): ''' Choose the codewords based on a training set. Parameters ---------- X : :class:`skl_groups.features.Features` or list of arrays of shape ``[n_samples[i], n_features]`` Training set. If a Features object, it will be stacked. ''' self.kmeans_fit_ = copy(self.kmeans) X = as_features(X, stack=True) self.kmeans_fit_.fit(X.stacked_features) return self
python
{ "resource": "" }
q263481
BagOfWords.transform
validation
def transform(self, X): ''' Transform a list of bag features into its bag-of-words representation. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays New data to transform. Returns ------- X_new : integer array, shape [len(X), kmeans.n_clusters] X transformed into the new space. ''' self._check_fitted() X = as_features(X, stack=True) assignments = self.kmeans_fit_.predict(X.stacked_features) return self._group_assignments(X, assignments)
python
{ "resource": "" }
q263482
is_categorical_type
validation
def is_categorical_type(ary): "Checks whether the array is either integral or boolean." ary = np.asanyarray(ary) return is_integer_type(ary) or ary.dtype.kind == 'b'
python
{ "resource": "" }
q263483
as_integer_type
validation
def as_integer_type(ary): ''' Returns argument as an integer array, converting floats if convertable. Raises ValueError if it's a float array with nonintegral values. ''' ary = np.asanyarray(ary) if is_integer_type(ary): return ary rounded = np.rint(ary) if np.any(rounded != ary): raise ValueError("argument array must contain only integers") return rounded.astype(int)
python
{ "resource": "" }
q263484
ProgressLogger.start
validation
def start(self, total): ''' Signal the start of the process. Parameters ---------- total : int The total number of steps in the process, or None if unknown. ''' self.logger.info(json.dumps(['START', self.name, total]))
python
{ "resource": "" }
q263485
_build_indices
validation
def _build_indices(X, flann_args): "Builds FLANN indices for each bag." # TODO: should probably multithread this logger.info("Building indices...") indices = [None] * len(X) for i, bag in enumerate(plog(X, name="index building")): indices[i] = idx = FLANNIndex(**flann_args) idx.build_index(bag) return indices
python
{ "resource": "" }
q263486
_get_rhos
validation
def _get_rhos(X, indices, Ks, max_K, save_all_Ks, min_dist): "Gets within-bag distances for each bag." logger.info("Getting within-bag distances...") if max_K >= X.n_pts.min(): msg = "asked for K = {}, but there's a bag with only {} points" raise ValueError(msg.format(max_K, X.n_pts.min())) # need to throw away the closest neighbor, which will always be self # thus K=1 corresponds to column 1 in the result array which_Ks = slice(1, None) if save_all_Ks else Ks indices = plog(indices, name="within-bag distances") rhos = [None] * len(X) for i, (idx, bag) in enumerate(zip(indices, X)): r = np.sqrt(idx.nn_index(bag, max_K + 1)[1][:, which_Ks]) np.maximum(min_dist, r, out=r) rhos[i] = r return rhos
python
{ "resource": "" }
q263487
linear
validation
def linear(Ks, dim, num_q, rhos, nus): r''' Estimates the linear inner product \int p q between two distributions, based on kNN distances. ''' return _get_linear(Ks, dim)(num_q, rhos, nus)
python
{ "resource": "" }
q263488
quadratic
validation
def quadratic(Ks, dim, rhos, required=None): r''' Estimates \int p^2 based on kNN distances. In here because it's used in the l2 distance, above. Returns array of shape (num_Ks,). ''' # Estimated with alpha=1, beta=0: # B_{k,d,1,0} is the same as B_{k,d,0,1} in linear() # and the full estimator is # B / (n - 1) * mean(rho ^ -dim) N = rhos.shape[0] Ks = np.asarray(Ks) Bs = (Ks - 1) / np.pi ** (dim / 2) * gamma(dim / 2 + 1) # shape (num_Ks,) est = Bs / (N - 1) * np.mean(rhos ** (-dim), axis=0) return est
python
{ "resource": "" }
q263489
topological_sort
validation
def topological_sort(deps): ''' Topologically sort a DAG, represented by a dict of child => set of parents. The dependency dict is destroyed during operation. Uses the Kahn algorithm: http://en.wikipedia.org/wiki/Topological_sorting Not a particularly good implementation, but we're just running it on tiny graphs. ''' order = [] available = set() def _move_available(): to_delete = [] for n, parents in iteritems(deps): if not parents: available.add(n) to_delete.append(n) for n in to_delete: del deps[n] _move_available() while available: n = available.pop() order.append(n) for parents in itervalues(deps): parents.discard(n) _move_available() if available: raise ValueError("dependency cycle found") return order
python
{ "resource": "" }
q263490
KNNDivergenceEstimator._get_Ks
validation
def _get_Ks(self): "Ks as an array and type-checked." Ks = as_integer_type(self.Ks) if Ks.ndim != 1: raise TypeError("Ks should be 1-dim, got shape {}".format(Ks.shape)) if Ks.min() < 1: raise ValueError("Ks should be positive; got {}".format(Ks.min())) return Ks
python
{ "resource": "" }
q263491
KNNDivergenceEstimator._flann_args
validation
def _flann_args(self, X=None): "The dictionary of arguments to give to FLANN." args = {'cores': self._n_jobs} if self.flann_algorithm == 'auto': if X is None or X.dim > 5: args['algorithm'] = 'linear' else: args['algorithm'] = 'kdtree_single' else: args['algorithm'] = self.flann_algorithm if self.flann_args: args.update(self.flann_args) # check that arguments are correct try: FLANNParameters().update(args) except AttributeError as e: msg = "flann_args contains an invalid argument:\n {}" raise TypeError(msg.format(e)) return args
python
{ "resource": "" }
q263492
KNNDivergenceEstimator.fit
validation
def fit(self, X, y=None, get_rhos=False): ''' Sets up for divergence estimation "from" new data "to" X. Builds FLANN indices for each bag, and maybe gets within-bag distances. Parameters ---------- X : list of arrays or :class:`skl_groups.features.Features` The bags to search "to". get_rhos : boolean, optional, default False Compute within-bag distances :attr:`rhos_`. These are only needed for some divergence functions or if do_sym is passed, and they'll be computed (and saved) during :meth:`transform` if they're not computed here. If you're using Jensen-Shannon divergence, a higher max_K may be needed once it sees the number of points in the transformed bags, so the computation here might be wasted. ''' self.features_ = X = as_features(X, stack=True, bare=True) # if we're using a function that needs to pick its K vals itself, # then we need to set max_K here. when we transform(), might have to # re-do this :| Ks = self._get_Ks() _, _, _, max_K, save_all_Ks, _ = _choose_funcs( self.div_funcs, Ks, X.dim, X.n_pts, None, self.version) if max_K >= X.n_pts.min(): msg = "asked for K = {}, but there's a bag with only {} points" raise ValueError(msg.format(max_K, X.n_pts.min())) memory = self.memory if isinstance(memory, string_types): memory = Memory(cachedir=memory, verbose=0) self.indices_ = id = memory.cache(_build_indices)(X, self._flann_args()) if get_rhos: self.rhos_ = _get_rhos(X, id, Ks, max_K, save_all_Ks, self.min_dist) elif hasattr(self, 'rhos_'): del self.rhos_ return self
python
{ "resource": "" }
q263493
Features.make_stacked
validation
def make_stacked(self): "If unstacked, convert to stacked. If stacked, do nothing." if self.stacked: return self._boundaries = bounds = np.r_[0, np.cumsum(self.n_pts)] self.stacked_features = stacked = np.vstack(self.features) self.features = np.array( [stacked[bounds[i-1]:bounds[i]] for i in xrange(1, len(bounds))], dtype=object) self.stacked = True
python
{ "resource": "" }
q263494
Features.copy
validation
def copy(self, stack=False, copy_meta=False, memo=None): ''' Copies the Feature object. Makes a copy of the features array. Parameters ---------- stack : boolean, optional, default False Whether to stack the copy if this one is unstacked. copy_meta : boolean, optional, default False Also copy the metadata. If False, metadata in both points to the same object. ''' if self.stacked: fs = deepcopy(self.stacked_features, memo) n_pts = self.n_pts.copy() elif stack: fs = np.vstack(self.features) n_pts = self.n_pts.copy() else: fs = deepcopy(self.features, memo) n_pts = None meta = deepcopy(self.meta, memo) if copy_meta else self.meta return Features(fs, n_pts, copy=False, **meta)
python
{ "resource": "" }
q263495
Features.bare
validation
def bare(self): "Make a Features object with no metadata; points to the same features." if not self.meta: return self elif self.stacked: return Features(self.stacked_features, self.n_pts, copy=False) else: return Features(self.features, copy=False)
python
{ "resource": "" }
q263496
MeanMapKernel.fit
validation
def fit(self, X, y=None): ''' Specify the data to which kernel values should be computed. Parameters ---------- X : list of arrays or :class:`skl_groups.features.Features` The bags to compute "to". ''' self.features_ = as_features(X, stack=True, bare=True) # TODO: could precompute things like squared norms if kernel == "rbf". # Probably should add support to sklearn instead of hacking it here. return self
python
{ "resource": "" }
q263497
BagMean.transform
validation
def transform(self, X): ''' Transform a list of bag features into a matrix of its mean features. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays Data to transform. Returns ------- X_new : array, shape ``[len(X), X.dim]`` X transformed into its means. ''' X = as_features(X) return np.vstack([np.mean(bag, axis=0) for bag in X])
python
{ "resource": "" }
q263498
Client.run
validation
def run(self): """Start listening to the server""" logger.info(u'Started listening') while not self._stop: xml = self._readxml() # Exit on invalid XML if xml is None: break # Raw xml only if not self.modelize: logger.info(u'Raw xml: %s' % xml) self.results.put(xml) continue # Model objects + raw xml as fallback if xml.tag == 'RECOGOUT': sentence = Sentence.from_shypo(xml.find('SHYPO'), self.encoding) logger.info(u'Modelized recognition: %r' % sentence) self.results.put(sentence) else: logger.info(u'Unmodelized xml: %s' % xml) self.results.put(xml) logger.info(u'Stopped listening')
python
{ "resource": "" }
q263499
Client.connect
validation
def connect(self): """Connect to the server :raise ConnectionError: If socket cannot establish a connection """ try: logger.info(u'Connecting %s:%d' % (self.host, self.port)) self.sock.connect((self.host, self.port)) except socket.error: raise ConnectionError() self.state = CONNECTED
python
{ "resource": "" }