code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def remove(self): self.clean() if os.path.isfile(self.schema_zip_file): os.remove(self.schema_zip_file) if os.path.isdir(self.schema_root_dir) and \ os.listdir(self.schema_root_dir) == []: os.rmdir(self.schema_root_dir)
The `schema_mof_dir` directory is removed if it esists and the `schema_zip_file` is removed if it exists. If the `schema_root_dir` is empty after these removals that directory is removed.
def earth_gyro(RAW_IMU,ATTITUDE): r = rotation(ATTITUDE) accel = Vector3(degrees(RAW_IMU.xgyro), degrees(RAW_IMU.ygyro), degrees(RAW_IMU.zgyro)) * 0.001 return r * accel
return earth frame gyro vector
def _read_hopopt_options(self, length): counter = 0 optkind = list() options = dict() while counter < length: code = self._read_unpack(1) if not code: break abbr, desc = _HOPOPT_OPT.get(code, ('none', 'Unassigned')) data = _HOPOPT_PROC(abbr)(self, code, desc=desc) enum = _OPT_TYPE.get(code) counter += data['length'] if enum in optkind: if isinstance(options[abbr], tuple): options[abbr] += (Info(data),) else: options[abbr] = (Info(options[abbr]), Info(data)) else: optkind.append(enum) options[abbr] = data if counter != length: raise ProtocolError(f'{self.alias}: invalid format') return tuple(optkind), options
Read HOPOPT options. Positional arguments: * length -- int, length of options Returns: * dict -- extracted HOPOPT options
def _update_metadata_for_video(self, metadata_href, video): current_metadata = self.clarify_client.get_metadata(metadata_href) cur_data = current_metadata.get('data') if cur_data.get('updated_at') != video['updated_at']: self.log('Updating metadata for video {0}'.format(video['id'])) if not self.dry_run: metadata = self._metadata_from_video(video) self.clarify_client.update_metadata(metadata_href, metadata=metadata) self.sync_stats['updated'] += 1
Update the metadata for the video if video has been updated in Brightcove since the bundle metadata was last updated.
def _pipeline_cell(args, cell_body): if args['action'] == 'deploy': raise Exception('Deploying a pipeline is not yet supported') env = {} for key, value in datalab.utils.commands.notebook_environment().items(): if isinstance(value, datalab.bigquery._udf.UDF): env[key] = value query = _get_query_argument(args, cell_body, env) if args['verbose']: print(query.sql) if args['action'] == 'dryrun': print(query.sql) result = query.execute_dry_run() return datalab.bigquery._query_stats.QueryStats(total_bytes=result['totalBytesProcessed'], is_cached=result['cacheHit']) if args['action'] == 'run': return query.execute(args['target'], table_mode=args['mode'], use_cache=not args['nocache'], allow_large_results=args['large'], dialect=args['dialect'], billing_tier=args['billing']).results
Implements the BigQuery cell magic used to validate, execute or deploy BQ pipelines. The supported syntax is: %%bigquery pipeline [-q|--sql <query identifier>] <other args> <action> [<YAML or JSON cell_body or inline SQL>] Args: args: the arguments following '%bigquery pipeline'. cell_body: optional contents of the cell interpreted as YAML or JSON. Returns: The QueryResultsTable
def THUMBNAIL_OPTIONS(self): from django.core.exceptions import ImproperlyConfigured size = self._setting('DJNG_THUMBNAIL_SIZE', (200, 200)) if not (isinstance(size, (list, tuple)) and len(size) == 2 and isinstance(size[0], int) and isinstance(size[1], int)): raise ImproperlyConfigured("'DJNG_THUMBNAIL_SIZE' must be a 2-tuple of integers.") return {'crop': True, 'size': size}
Set the size as a 2-tuple for thumbnailed images after uploading them.
def generic_stitch(cube, arrays): for name, ary in arrays.iteritems(): if name not in type(cube).__dict__: setattr(type(cube), name, ArrayDescriptor(name)) setattr(cube, name, ary)
Creates descriptors associated with array name and then sets the array as a member variable
def get_public_inline_preview_url(self, id, submission_id=None): path = {} data = {} params = {} path["id"] = id if submission_id is not None: params["submission_id"] = submission_id self.logger.debug("GET /api/v1/files/{id}/public_url with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/files/{id}/public_url".format(**path), data=data, params=params, no_data=True)
Get public inline preview url. Determine the URL that should be used for inline preview of the file.
def _check_type(self, check_type, properties): if 'PrimitiveType' in properties: return properties['PrimitiveType'] == check_type if properties['Type'] == 'List': if 'ItemType' in properties: return properties['ItemType'] == check_type else: return properties['PrimitiveItemType'] == check_type return False
Decode a properties type looking for a specific type.
def do_add_item(self, args): if args.food: add_item = args.food elif args.sport: add_item = args.sport elif args.other: add_item = args.other else: add_item = 'no items' self.poutput("You added {}".format(add_item))
Add item command help
def _close(self, id): connection = self.connections[id] connection.connectionLost(Failure(CONNECTION_DONE)) return {}
Respond to a CLOSE command, dumping some data onto the stream. As with WRITE, this returns an empty acknowledgement. An occurrence of I{Close} on the wire, together with the response generated by this method, might have this apperance:: C: -Command: Close C: -Ask: 1 C: Id: glyph@divmod.com->radix@twistedmatrix.com:q2q-example:0 C: S: -Answer: 1 S:
def get_message_content(self, message_id, timeout=None): response = self._get( '/v2/bot/message/{message_id}/content'.format(message_id=message_id), stream=True, timeout=timeout ) return Content(response)
Call get content API. https://devdocs.line.me/en/#get-content Retrieve image, video, and audio data sent by users. :param str message_id: Message ID :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) :rtype: :py:class:`linebot.models.responses.Content` :return: Content instance
def subscriptions(self): if not hasattr(self, '_subscriptions'): subscriptions_resource = self.resource.subscriptions self._subscriptions = Subscriptions( subscriptions_resource, self.client) return self._subscriptions
Fetch and return Subscriptions associated with this user.
def draw_str(self, x, y, string, fg=Ellipsis, bg=Ellipsis): x, y = self._normalizePoint(x, y) fg, bg = _format_color(fg, self._fg), _format_color(bg, self._bg) width, height = self.get_size() def _drawStrGen(x=x, y=y, string=string, width=width, height=height): for char in _format_str(string): if y == height: raise TDLError('End of console reached.') yield((x, y), char) x += 1 if x == width: x = 0 y += 1 self._set_batch(_drawStrGen(), fg, bg)
Draws a string starting at x and y. A string that goes past the right side will wrap around. A string wrapping to below the console will raise :any:`tdl.TDLError` but will still be written out. This means you can safely ignore the errors with a try..except block if you're fine with partially written strings. \\r and \\n are drawn on the console as normal character tiles. No special encoding is done and any string will translate to the character table as is. For a string drawing operation that respects special characters see :any:`print_str`. Args: x (int): x-coordinate to start at. y (int): y-coordinate to start at. string (Union[Text, Iterable[int]]): A string or an iterable of numbers. Special characters are ignored and rendered as any other character. fg (Optional[Union[Tuple[int, int, int], int, Ellipsis]]) bg (Optional[Union[Tuple[int, int, int], int, Ellipsis]]) Raises: AssertionError: Having x or y values that can't be placed inside of the console will raise an AssertionError. You can use always use ``((x, y) in console)`` to check if a tile is drawable. .. seealso:: :any:`print_str`
def _precesion(date): t = date.change_scale('TT').julian_century zeta = (2306.2181 * t + 0.30188 * t ** 2 + 0.017998 * t ** 3) / 3600. theta = (2004.3109 * t - 0.42665 * t ** 2 - 0.041833 * t ** 3) / 3600. z = (2306.2181 * t + 1.09468 * t ** 2 + 0.018203 * t ** 3) / 3600. return zeta, theta, z
Precession in degrees
def update(self, callback=None, errback=None, **kwargs): if not self.data: raise MonitorException('monitor not loaded') def success(result, *args): self.data = result if callback: return callback(self) else: return self return self._rest.update(self.data['id'], {}, callback=success, errback=errback, **kwargs)
Update monitor configuration. Pass a list of keywords and their values to update.
def run_kernel(self, func, gpu_args, threads, grid): global_size = (grid[0]*threads[0], grid[1]*threads[1], grid[2]*threads[2]) local_size = threads event = func(self.queue, global_size, local_size, *gpu_args) event.wait()
runs the OpenCL kernel passed as 'func' :param func: An OpenCL Kernel :type func: pyopencl.Kernel :param gpu_args: A list of arguments to the kernel, order should match the order in the code. Allowed values are either variables in global memory or single values passed by value. :type gpu_args: list( pyopencl.Buffer, numpy.int32, ...) :param threads: A tuple listing the number of work items in each dimension of the work group. :type threads: tuple(int, int, int) :param grid: A tuple listing the number of work groups in each dimension of the NDRange. :type grid: tuple(int, int)
def execute(self): logger.info("Starting Argos event loop...") exitCode = self.qApplication.exec_() logger.info("Argos event loop finished with exit code: {}".format(exitCode)) return exitCode
Executes all main windows by starting the Qt main application
def persist_booking(booking, user): if booking is not None: existing_bookings = Booking.objects.filter( user=user, booking_status__slug='inprogress').exclude( pk=booking.pk) existing_bookings.delete() booking.session = None booking.user = user booking.save()
Ties an in-progress booking from a session to a user when the user logs in. If we don't do this, the booking will be lost, because on a login, the old session will be deleted and a new one will be created. Since the booking has a FK to the session, it would be deleted as well when the user logs in. We assume that a user can only have one booking that is in-progress. Therefore we will delete any existing in-progress bookings of this user before tying the one from the session to the user. TODO: Find a more generic solution for this, as this assumes that there is a status called inprogress and that a user can only have one such booking. :param booking: The booking that should be tied to the user. :user: The user the booking should be tied to.
def filter_objects(self, objects, perm=None): if perm is None: perm = build_permission_name(self.model_class, 'view') return filter(lambda o: self.user.has_perm(perm, obj=o), objects)
Return only objects with specified permission in objects list. If perm not specified, 'view' perm will be used.
def setReferenceVoltage(self, caldb, calv): self.caldb = caldb self.calv = calv
Sets the reference point to determine what outgoing voltage will produce what intensity, used to calculate the proper output amplitude of components :param caldb: calibration intensity in dbSPL :type caldb: float :param calv: calibration voltage that was used to record the intensity provided :type calv: float
def list_projects(self): data = self._run( url_path="projects/list" ) projects = data['result'].get('projects', []) return [self._project_formatter(item) for item in projects]
Returns the list of projects owned by user.
def get_monomials(variables, degree): if degree == -1: return [] if not variables: return [S.One] else: _variables = variables[:] _variables.insert(0, 1) ncmonomials = [S.One] ncmonomials.extend(var for var in variables) for var in variables: if not is_hermitian(var): ncmonomials.append(var.adjoint()) for _ in range(1, degree): temp = [] for var in _variables: for new_var in ncmonomials: temp.append(var * new_var) if var != 1 and not is_hermitian(var): temp.append(var.adjoint() * new_var) ncmonomials = unique(temp[:]) return ncmonomials
Generates all noncommutative monomials up to a degree :param variables: The noncommutative variables to generate monomials from :type variables: list of :class:`sympy.physics.quantum.operator.Operator` or :class:`sympy.physics.quantum.operator.HermitianOperator`. :param degree: The maximum degree. :type degree: int. :returns: list of monomials.
def serialize(self): return { 'name' : self.name, 'weight' : self.weight, 'value' : self.value, 'msgs' : self.msgs, 'children' : [i.serialize() for i in self.children] }
Returns a serializable dictionary that represents the result object
def works(self, member_id): context = '%s/%s' % (self.ENDPOINT, str(member_id)) return Works(context=context)
This method retrieve a iterable of Works of the given member. args: Member ID (Integer) return: Works()
def SetDecodedStreamSize(self, decoded_stream_size): if self._is_open: raise IOError('Already open.') if decoded_stream_size < 0: raise ValueError(( 'Invalid decoded stream size: {0:d} value out of ' 'bounds.').format(decoded_stream_size)) self._decoded_stream_size = decoded_stream_size
Sets the decoded stream size. This function is used to set the decoded stream size if it can be determined separately. Args: decoded_stream_size (int): size of the decoded stream in bytes. Raises: IOError: if the file-like object is already open. OSError: if the file-like object is already open. ValueError: if the decoded stream size is invalid.
def ignore_further_calls_to_server(self, server): log.error(u'ignoring further calls to {}'.format(server)) self.api.servers.remove(server)
Takes a server out of the list.
def sample_tmatrix(C, nsample=1, nsteps=None, reversible=False, mu=None, T0=None, return_statdist=False): r if issparse(C): _showSparseConversionWarning() C = C.toarray() sampler = tmatrix_sampler(C, reversible=reversible, mu=mu, T0=T0, nsteps=nsteps) return sampler.sample(nsamples=nsample, return_statdist=return_statdist)
r"""samples transition matrices from the posterior distribution Parameters ---------- C : (M, M) ndarray or scipy.sparse matrix Count matrix nsample : int number of samples to be drawn nstep : int, default=None number of full Gibbs sampling sweeps internally done for each sample returned. This option is meant to ensure approximately uncorrelated samples for every call to sample(). If None, the number of steps will be automatically determined based on the other options and the matrix size. nstep>1 will only be used for reversible sampling, because nonreversible sampling generates statistically independent transition matrices every step. reversible : bool If true sample from the ensemble of transition matrices restricted to those obeying a detailed balance condition, else draw from the whole ensemble of stochastic matrices. mu : array_like A fixed stationary distribution. Transition matrices with that stationary distribution will be sampled T0 : ndarray, shape=(n, n) or scipy.sparse matrix Starting point of the MC chain of the sampling algorithm. Has to obey the required constraints. return_statdist : bool, optional, default = False if true, will also return the stationary distribution. Returns ------- P : ndarray(n,n) or array of ndarray(n,n) sampled transition matrix (or multiple matrices if nsample > 1) Notes ----- The transition matrix sampler generates transition matrices from the posterior distribution. The posterior distribution is given as a product of Dirichlet distributions .. math:: \mathbb{P}(T|C) \propto \prod_{i=1}^{M} \left( \prod_{j=1}^{M} p_{ij}^{c_{ij}} \right) See also -------- tmatrix_sampler
def featured_event_query(self, **kwargs): if not kwargs.get('location') and (not kwargs.get('latitude') or not kwargs.get('longitude')): raise ValueError('A valid location (parameter "location") or latitude/longitude combination ' '(parameters "latitude" and "longitude") must be provided.') return self._query(FEATURED_EVENT_API_URL, **kwargs)
Query the Yelp Featured Event API. documentation: https://www.yelp.com/developers/documentation/v3/featured_event required parameters: * one of either: * location - text specifying a location to search for * latitude and longitude
def _check_valid_basic(self, get_params): try: if get_params(self.variable): return self.default except: pass return not self.default
Simple check that the variable is set
def save_ds9(output, filename): ds9_file = open(filename, 'wt') ds9_file.write(output) ds9_file.close()
Save ds9 region output info filename. Parameters ---------- output : str String containing the full output to be exported as a ds9 region file. filename : str Output file name.
def get_chatlist(chatfile): if not chatfile: return set() try: with open(chatfile) as file_contents: return set(int(chat) for chat in file_contents) except (OSError, IOError) as exc: LOGGER.error('could not load saved chats:\n%s', exc) return set()
Try reading ids of saved chats from file. If we fail, return empty set
def add_include(self, name, included_scope, module): assert name not in self.included_scopes self.included_scopes[name] = included_scope self.add_surface(name, module)
Register an imported module into this scope. Raises ``ThriftCompilerError`` if the name has already been used.
def attach_pipeline(self, pipeline, name, chunks=None, eager=True): if chunks is None: chunks = chain([5], repeat(126)) elif isinstance(chunks, int): chunks = repeat(chunks) if name in self._pipelines: raise DuplicatePipelineName(name=name) self._pipelines[name] = AttachedPipeline(pipeline, iter(chunks), eager) return pipeline
Register a pipeline to be computed at the start of each day. Parameters ---------- pipeline : Pipeline The pipeline to have computed. name : str The name of the pipeline. chunks : int or iterator, optional The number of days to compute pipeline results for. Increasing this number will make it longer to get the first results but may improve the total runtime of the simulation. If an iterator is passed, we will run in chunks based on values of the iterator. Default is True. eager : bool, optional Whether or not to compute this pipeline prior to before_trading_start. Returns ------- pipeline : Pipeline Returns the pipeline that was attached unchanged. See Also -------- :func:`zipline.api.pipeline_output`
def index_to_coords(index, shape): coords = [] for i in xrange(1, len(shape)): divisor = int(np.product(shape[i:])) value = index // divisor coords.append(value) index -= value * divisor coords.append(index) return tuple(coords)
convert index to coordinates given the shape
def remove_leading_zeros(num: str) -> str: if not num: return num if num.startswith('M'): ret = 'M' + num[1:].lstrip('0') elif num.startswith('-'): ret = '-' + num[1:].lstrip('0') else: ret = num.lstrip('0') return '0' if ret in ('', 'M', '-') else ret
Strips zeros while handling -, M, and empty strings
def _assert_occurrence(probe, target, amount=1): occ = len(probe) if occ > amount: msg = 'more than' elif occ < amount: msg = 'less than' elif not occ: msg = 'no' else: msg = None if msg: raise CommandExecutionError('Found {0} expected occurrences in "{1}" expression'.format(msg, target)) return occ
Raise an exception, if there are different amount of specified occurrences in src.
def print_mhc_peptide(neoepitope_info, peptides, pepmap, outfile, netmhc=False): if netmhc: peptide_names = [neoepitope_info.peptide_name] else: peptide_names = [x for x, y in peptides.items() if neoepitope_info.pept in y] neoepitope_info = neoepitope_info._asdict() if neoepitope_info['normal_pept'] == 'N' * len(neoepitope_info['pept']): neoepitope_info['normal_pept'] = neoepitope_info['normal_pred'] = 'NA' for peptide_name in peptide_names: print('{ni[allele]}\t' '{ni[pept]}\t' '{ni[normal_pept]}\t' '{pname}\t' '{ni[core]}\t' '0\t' '{ni[tumor_pred]}\t' '{ni[normal_pred]}\t' '{pmap}'.format(ni=neoepitope_info, pname=peptide_name, pmap=pepmap[peptide_name]), file=outfile) return None
Accept data about one neoepitope from merge_mhc_peptide_calls and print it to outfile. This is a generic module to reduce code redundancy. :param pandas.core.frame neoepitope_info: object containing with allele, pept, pred, core, normal_pept, normal_pred :param dict peptides: Dict of pepname: pep sequence for all IARS considered :param dict pepmap: Dict containing teh contents from the peptide map file. :param file outfile: An open file descriptor to the output file :param bool netmhc: Does this record correspond to a netmhcIIpan record? These are processed differently.
def parse_trailer(header): pos = 0 names = [] while pos < len(header): name, pos = expect_re(re_token, header, pos) if name: names.append(name) _, pos = accept_ws(header, pos) _, pos = expect_lit(',', header, pos) _, pos = accept_ws(header, pos) return names
Parse the "Trailer" header.
def inspect(self, tab_width=2, ident_char='-'): startpath = self.path output = [] for (root, dirs, files) in os.walk(startpath): level = root.replace(startpath, '').count(os.sep) indent = ident_char * tab_width * (level) if level == 0: output.append('{}{}/'.format(indent, os.path.basename(root))) else: output.append('|{}{}/'.format(indent, os.path.basename(root))) subindent = ident_char * tab_width * (level + 1) [output.append('|{}{}'.format(subindent, f)) for f in files] return '\n'.join(output)
Inspects a project file structure based based on the instance folder property. :param tab_width: width size for subfolders and files. :param ident_char: char to be used to show identation level Returns A string containing the project structure.
async def delete_shade_from_scene(self, shade_id, scene_id): return await self.request.delete( self._base_path, params={ATTR_SCENE_ID: scene_id, ATTR_SHADE_ID: shade_id} )
Delete a shade from a scene.
def SendSms(self, *TargetNumbers, **Properties): sms = self.CreateSms(smsMessageTypeOutgoing, *TargetNumbers) for name, value in Properties.items(): if isinstance(getattr(sms.__class__, name, None), property): setattr(sms, name, value) else: raise TypeError('Unknown property: %s' % prop) sms.Send() return sms
Creates and sends an SMS message. :Parameters: TargetNumbers : str One or more target SMS numbers. Properties Message properties. Properties available are same as `SmsMessage` object properties. :return: An sms message object. The message is already sent at this point. :rtype: `SmsMessage`
def inverted(self): out = self.copy() out.setInverted(not self.isInverted()) return out
Returns an inverted copy of this query. :return <orb.Query>
def seek(self, offset, whence=SEEK_SET): if whence == SEEK_SET: self.__sf.seek(offset) elif whence == SEEK_CUR: self.__sf.seek(self.tell() + offset) elif whence == SEEK_END: self.__sf.seek(self.__sf.filesize - offset)
Reposition the file pointer.
def from_request(cls, header_data, ignore_bad_cookies=False): "Construct a Cookies object from request header data." cookies = cls() cookies.parse_request( header_data, ignore_bad_cookies=ignore_bad_cookies) return cookies
Construct a Cookies object from request header data.
def _from_dict(cls, _dict): args = {} if 'final' in _dict or 'final_results' in _dict: args['final_results'] = _dict.get('final') or _dict.get( 'final_results') else: raise ValueError( 'Required property \'final\' not present in SpeechRecognitionResult JSON' ) if 'alternatives' in _dict: args['alternatives'] = [ SpeechRecognitionAlternative._from_dict(x) for x in (_dict.get('alternatives')) ] else: raise ValueError( 'Required property \'alternatives\' not present in SpeechRecognitionResult JSON' ) if 'keywords_result' in _dict: args['keywords_result'] = _dict.get('keywords_result') if 'word_alternatives' in _dict: args['word_alternatives'] = [ WordAlternativeResults._from_dict(x) for x in (_dict.get('word_alternatives')) ] return cls(**args)
Initialize a SpeechRecognitionResult object from a json dictionary.
def vinet_v(p, v0, k0, k0p, min_strain=0.01): if isuncertainties([p, v0, k0, k0p]): f_u = np.vectorize(uct.wrap(vinet_v_single), excluded=[1, 2, 3, 4]) return f_u(p, v0, k0, k0p, min_strain=min_strain) else: f_v = np.vectorize(vinet_v_single, excluded=[1, 2, 3, 4]) return f_v(p, v0, k0, k0p, min_strain=min_strain)
find volume at given pressure :param p: pressure in GPa :param v0: unit-cell volume in A^3 at 1 bar :param k0: bulk modulus at reference conditions :param k0p: pressure derivative of bulk modulus at reference conditions :param min_strain: defining minimum v/v0 value to search volume for :return: unit cell volume at high pressure in A^3 :note: wrapper function vetorizing vinet_v_single
def _set_key(self): if self.roll: self.date = time.strftime(self.date_format, time.gmtime(self.start_time)) self.final_key = '{}:{}'.format(self.key, self.date) else: self.final_key = self.key
sets the final key to be used currently
def send_one_ping(sock: socket, dest_addr: str, icmp_id: int, seq: int, size: int): try: dest_addr = socket.gethostbyname(dest_addr) except socket.gaierror as e: print("Cannot resolve {}: Unknown host".format(dest_addr)) raise errors.HostUnknown(dest_addr) from e pseudo_checksum = 0 icmp_header = struct.pack(ICMP_HEADER_FORMAT, IcmpType.ECHO_REQUEST, ICMP_DEFAULT_CODE, pseudo_checksum, icmp_id, seq) padding = (size - struct.calcsize(ICMP_TIME_FORMAT) - struct.calcsize(ICMP_HEADER_FORMAT)) * "Q" icmp_payload = struct.pack(ICMP_TIME_FORMAT, default_timer()) + padding.encode() real_checksum = checksum(icmp_header + icmp_payload) icmp_header = struct.pack(ICMP_HEADER_FORMAT, IcmpType.ECHO_REQUEST, ICMP_DEFAULT_CODE, socket.htons(real_checksum), icmp_id, seq) packet = icmp_header + icmp_payload sock.sendto(packet, (dest_addr, 0))
Sends one ping to the given destination. ICMP Header (bits): type (8), code (8), checksum (16), id (16), sequence (16) ICMP Payload: time (double), data ICMP Wikipedia: https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol Args: sock: Socket. dest_addr: The destination address, can be an IP address or a domain name. Ex. "192.168.1.1"/"example.com" icmp_id: ICMP packet id, usually is same as pid. seq: ICMP packet sequence, usually increases from 0 in the same process. size: The ICMP packet payload size in bytes. Note this is only for the payload part. Raises: HostUnkown: If destination address is a domain name and cannot resolved.
def nextpow2(value): if value >= 1: return 2**np.ceil(np.log2(value)).astype(int) elif value > 0: return 1 elif value == 0: return 0 else: raise ValueError('Value must be positive')
Compute the nearest power of two greater or equal to the input value
def _create_complete_graph(node_ids): g = nx.Graph() g.add_nodes_from(node_ids) for (i, j) in combinations(node_ids, 2): g.add_edge(i, j) return g
Create a complete graph from the list of node ids. Args: node_ids: a list of node ids Returns: An undirected graph (as a networkx.Graph)
def update(self, duration): if duration >= 0: self.histogram.update(duration) self.meter.mark()
Add a recorded duration.
def c_time_locale(): old_time_locale = locale.getlocale(locale.LC_TIME) locale.setlocale(locale.LC_TIME, 'C') yield locale.setlocale(locale.LC_TIME, old_time_locale)
Context manager with C LC_TIME locale
def read_scoring_scheme( f, gap_open, gap_extend, gap1="-", gap2=None, **kwargs ): close_it = False if (type(f) == str): f = file(f,"rt") close_it = True ss = build_scoring_scheme("".join([line for line in f]),gap_open, gap_extend, gap1=gap1, gap2=gap2, **kwargs) if (close_it): f.close() return ss
Initialize scoring scheme from a file containint a blastz style text blob. f can be either a file or the name of a file.
def get_modifications(self): for modtype, modclass in modtype_to_modclass.items(): if modtype == 'modification': continue stmts = self._get_generic_modification(modclass) self.statements += stmts
Extract INDRA Modification Statements from the BioPAX model. To extract Modifications, this method reuses the structure of BioPAX Pattern's org.biopax.paxtools.pattern.PatternBox.constrolsStateChange pattern with additional constraints to specify the type of state change occurring (phosphorylation, deubiquitination, etc.).
def check_column_existence(col_name, df, presence=True): if presence: if col_name not in df.columns: msg = "Ensure that `{}` is in `df.columns`." raise ValueError(msg.format(col_name)) else: if col_name in df.columns: msg = "Ensure that `{}` is not in `df.columns`." raise ValueError(msg.format(col_name)) return None
Checks whether or not `col_name` is in `df` and raises a helpful error msg if the desired condition is not met. Parameters ---------- col_name : str. Should represent a column whose presence in `df` is to be checked. df : pandas DataFrame. The dataframe that will be checked for the presence of `col_name`. presence : bool, optional. If True, then this function checks for the PRESENCE of `col_name` from `df`. If False, then this function checks for the ABSENCE of `col_name` in `df`. Default == True. Returns ------- None.
def _convert(self, image, output=None): with Image.open(image) as im: width, height = im.size co = CanvasObjects() co.add(CanvasImg(image, 1.0, w=width, h=height)) return WatermarkDraw(co, tempdir=self.tempdir, pagesize=(width, height)).write(output)
Private method for converting a single PNG image to a PDF.
def rdopkg_runner(): aman = ActionManager() aman.add_actions_modules(actions) aman.fill_aliases() return ActionRunner(action_manager=aman)
default rdopkg action runner including rdopkg action modules
def parse_create(prs, conn): prs_create = prs.add_parser( 'create', help='create record of specific zone') set_option(prs_create, 'domain') conn_options(prs_create, conn) prs_create.set_defaults(func=create)
Create record. Arguments: prs: parser object of argparse conn: dictionary of connection information
def create_logout_request(self, destination, issuer_entity_id, subject_id=None, name_id=None, reason=None, expire=None, message_id=0, consent=None, extensions=None, sign=False, session_indexes=None, sign_alg=None, digest_alg=None): if subject_id: if self.entity_type == "idp": name_id = NameID(text=self.users.get_entityid(subject_id, issuer_entity_id, False)) else: name_id = NameID(text=subject_id) if not name_id: raise SAMLError("Missing subject identification") args = {} if session_indexes: sis = [] for si in session_indexes: if isinstance(si, SessionIndex): sis.append(si) else: sis.append(SessionIndex(text=si)) args["session_index"] = sis return self._message(LogoutRequest, destination, message_id, consent, extensions, sign, name_id=name_id, reason=reason, not_on_or_after=expire, issuer=self._issuer(), sign_alg=sign_alg, digest_alg=digest_alg, **args)
Constructs a LogoutRequest :param destination: Destination of the request :param issuer_entity_id: The entity ID of the IdP the request is target at. :param subject_id: The identifier of the subject :param name_id: A NameID instance identifying the subject :param reason: An indication of the reason for the logout, in the form of a URI reference. :param expire: The time at which the request expires, after which the recipient may discard the message. :param message_id: Request identifier :param consent: Whether the principal have given her consent :param extensions: Possible extensions :param sign: Whether the query should be signed or not. :param session_indexes: SessionIndex instances or just values :return: A LogoutRequest instance
def _config(self): config = 0 if self.mode == MODE_NORMAL: config += (self._t_standby << 5) if self._iir_filter: config += (self._iir_filter << 2) return config
Value to be written to the device's config register
def render_payment_form(self): self.context[self.form_context_name] = self.payment_form_cls() return TemplateResponse(self.request, self.payment_template, self.context)
Display the DirectPayment for entering payment information.
def setup_runner(self): runner = ApplicationRunner( url=self.config['transport_host'], realm=u'realm1', extra={ 'config': self.config, 'handlers': self.handlers, } ) return runner
Setup instance of runner var
def open_report_template_path(self): directory_name = QFileDialog.getExistingDirectory( self, self.tr('Templates directory'), self.leReportTemplatePath.text(), QFileDialog.ShowDirsOnly) if directory_name: self.leReportTemplatePath.setText(directory_name)
Open File dialog to choose the report template path.
def instance(cls, interval=5): if not cls._instance: cls._instance = _Messenger(interval) return cls._instance
Returns existing instance of messenger. If one does not exist it will be created and returned. :param int interval: Number of miliseconds that represents interval when messages will be processed. Note that this parameter will be used only the first time when instance is requested, every other time it will be ignored because existing instance of :class:`._Messenger` is returned.
def _run_automation(self, conf): self._fill_form(self._find_form(conf)) self._run_scenario(conf) self._delay(conf)
1. Fill form. 2. Run scenario. 3. Delay.
def _initialize_trunk_interfaces_to_none(self, switch_ip, replay=True): try: switch_ifs = self._mdriver._get_switch_interfaces( switch_ip, cfg_only=(False if replay else True)) if not switch_ifs: LOG.debug("Skipping switch %s which has no configured " "interfaces", switch_ip) return self._driver.initialize_all_switch_interfaces( switch_ifs, switch_ip) except Exception: with excutils.save_and_reraise_exception(): LOG.warning("Unable to initialize interfaces to " "switch %(switch_ip)s", {'switch_ip': switch_ip}) self._mdriver.register_switch_as_inactive(switch_ip, 'replay init_interface') if self._mdriver.is_replay_enabled(): return
Initialize all nexus interfaces to trunk allowed none.
def begin_application(self, req, res): self.loop.create_task(self.http_application.handle_client_request(req, res))
Entry point for the application middleware chain for an asyncio event loop.
def branch_rate(self, filename=None): if filename is None: el = self.xml else: el = self._get_class_element_by_filename(filename) return float(el.attrib['branch-rate'])
Return the global branch rate of the coverage report. If the `filename` file is given, return the branch rate of the file.
def init(parser = None): global p,subparsers if parser is None: p = argparse.ArgumentParser() else: p = parser arg = p.add_argument subparsers = p.add_subparsers()
module needs to be initialized by 'init'. Can be called with parser to use a pre-built parser, otherwise a simple default parser is created
def folder_created_message(self, request, folder): messages.success(request, _("Folder {} was created".format(folder)))
Send messages.success message after successful folder creation.
def parse_view(query): try: idx = query.lower().index('where') query = query[:idx] except ValueError: pass if not query.endswith(';'): query = query.strip() query += ';' result = _view_stmt.parseString(query) return View(result)
Parses asql query to view object. Args: query (str): asql query Returns: View instance: parsed view.
def create_os_dummy_rtr(self, tenant_id, fw_dict, is_fw_virt=False): res = fw_const.OS_DUMMY_RTR_CREATE_SUCCESS tenant_name = fw_dict.get('tenant_name') try: rtr_id = fw_dict.get('router_id') if rtr_id is None: LOG.error("Invalid router id, attaching dummy interface" " failed") return False if is_fw_virt: net_id = subnet_id = None else: net_id, subnet_id = ( self._attach_dummy_intf_rtr(tenant_id, tenant_name, rtr_id)) if net_id is None or subnet_id is None: LOG.error("Invalid net_id or subnet_id, creating dummy" " interface failed") return False except Exception as exc: LOG.error("Creation of Openstack Router failed " "tenant %(tenant)s, Exception %(exc)s", {'tenant': tenant_id, 'exc': str(exc)}) res = fw_const.OS_DUMMY_RTR_CREATE_FAIL self.store_fw_db_router(tenant_id, net_id, subnet_id, rtr_id, res) return True
Create the dummy interface and attach it to router. Attach the dummy interface to the Openstack router and store the info in DB.
def close_pages_for_specific_sm_id(self, sm_id): states_to_be_closed = [] for state_identifier in self.tabs: state_m = self.tabs[state_identifier]["state_m"] if state_m.state.get_state_machine().state_machine_id == sm_id: states_to_be_closed.append(state_identifier) for state_identifier in states_to_be_closed: self.close_page(state_identifier, delete=False)
Closes all tabs of the states editor for a specific sm_id
def root(self) -> "GameNode": node = self while node.parent: node = node.parent return node
Gets the root node, i.e., the game.
def reference_year(self, index): ref_date = self.reference_date(index) try: return parse(ref_date).year except ValueError: matched = re.search(r"\d{4}", ref_date) if matched: return int(matched.group()) else: return ""
Return the reference publication year.
def autocomplete(self): self.facet = False params = self.solr_params() logging.info("PARAMS=" + str(params)) results = self.solr.search(**params) logging.info("Docs found: {}".format(results.hits)) return self._process_autocomplete_results(results)
Execute solr autocomplete
def file(mode='w+b', suffix='', prefix='tmp', dir=None, ignore_missing=False): if isinstance(suffix, int): raise ValueError('Passed an integer as suffix. Did you want to ' 'specify the deprecated parameter `bufsize`?') fp = tempfile.NamedTemporaryFile(mode=mode, suffix=suffix, prefix=prefix, dir=dir, delete=False) try: yield fp finally: try: os.unlink(fp.name) except OSError as e: if e.errno != ENOENT or ignore_missing is False: raise
Create a temporary file. A contextmanager that creates and returns a named temporary file and removes it on exit. Differs from temporary file functions in :mod:`tempfile` by not deleting the file once it is closed, making it safe to write and close the file and then processing it with an external program. If the temporary file is moved elsewhere later on, `ignore_missing` should be set to `True`. :param mode: Passed on to :func:`tempfile.NamedTemporaryFile`. :param suffix: Passed on to :func:`tempfile.NamedTemporaryFile`. :param prefix: Passed on to :func:`tempfile.NamedTemporaryFile`. :param dir: Passed on to :func:`tempfile.NamedTemporaryFile`. :param ignore_missing: If set to `True`, no exception will be raised if the temporary file has been deleted when trying to clean it up. :return: A file object with a `.name`.
def query_tracking_code(tracking_code, year=None): payload = { 'Anio': year or datetime.now().year, 'Tracking': tracking_code, } response = _make_request(TRACKING_URL, payload) if not response['d']: return [] data = response['d'][0] destination = data['RetornoCadena6'] payload.update({ 'Destino': destination, }) response = _make_request(TRACKING_DETAIL_URL, payload) return _process_detail(response['d'])
Given a tracking_code return a list of events related the tracking code
def create_dataset( self, parent, dataset, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "create_dataset" not in self._inner_api_calls: self._inner_api_calls[ "create_dataset" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_dataset, default_retry=self._method_configs["CreateDataset"].retry, default_timeout=self._method_configs["CreateDataset"].timeout, client_info=self._client_info, ) request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["create_dataset"]( request, retry=retry, timeout=timeout, metadata=metadata )
Creates a dataset. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # TODO: Initialize `dataset`: >>> dataset = {} >>> >>> response = client.create_dataset(parent, dataset) Args: parent (str): The resource name of the project to create the dataset for. dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Dataset` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
def zip(value=data, mu=mu, psi=psi): like = 0.0 for x in value: if not x: like += np.log((1. - psi) + psi * np.exp(-mu)) else: like += np.log(psi) + poisson_like(x, mu) return like
Zero-inflated Poisson likelihood
def run_command(cmd_to_run): with tempfile.TemporaryFile() as stdout_file, tempfile.TemporaryFile() as stderr_file: popen = subprocess.Popen(cmd_to_run, stdout=stdout_file, stderr=stderr_file) popen.wait() stderr_file.seek(0) stdout_file.seek(0) stderr = stderr_file.read() stdout = stdout_file.read() if six.PY3: stderr = stderr.decode() stdout = stdout.decode() return stderr, stdout
Wrapper around subprocess that pipes the stderr and stdout from `cmd_to_run` to temporary files. Using the temporary files gets around subprocess.PIPE's issues with handling large buffers. Note: this command will block the python process until `cmd_to_run` has completed. Returns a tuple, containing the stderr and stdout as strings.
def load_crawler(self, crawler, url, ignore_regex): self.process = CrawlerProcess(self.cfg.get_scrapy_options()) self.process.crawl( crawler, self.helper, url=url, config=self.cfg, ignore_regex=ignore_regex)
Loads the given crawler with the given url. :param class crawler: class of the crawler to load :param str url: url to start the crawler with :param regex ignore_regex: to be able to ignore urls that match this regex code
def _document_root(self, fully_qualified=True): nsmap = {"xsi": utils.NAMESPACES["xsi"], "xlink": utils.NAMESPACES["xlink"]} if fully_qualified: nsmap["mets"] = utils.NAMESPACES["mets"] else: nsmap[None] = utils.NAMESPACES["mets"] attrib = { "{}schemaLocation".format(utils.lxmlns("xsi")): utils.SCHEMA_LOCATIONS } if self.objid: attrib["OBJID"] = self.objid return etree.Element(utils.lxmlns("mets") + "mets", nsmap=nsmap, attrib=attrib)
Return the mets Element for the document root.
def parse(self, words): def exact(words): try: return float(words) except: return None guess = exact(words) if guess is not None: return guess split = words.split(' ') if split[-1] in self.__fractions__: split[-1] = self.__fractions__[split[-1]] elif split[-1] in self.__ordinals__: split[-1] = self.__ordinals__[split[-1]] parsed_ordinals = ' '.join(split) return self.parseFloat(parsed_ordinals)
A general method for parsing word-representations of numbers. Supports floats and integers. Args: words (str): Description of an arbitrary number. Returns: A double representation of the words.
def generate(self): s = self._docstring s += self._get_source().replace( 'endpoints = {}', 'endpoints = ' + self._config_src ).replace( 'logger.setLevel(logging.INFO)', 'logger.setLevel(logging.%s)' % self.config.logging_level ) return s
Generate Lambda function source; return it as a string. :rtype: str :returns: lambda function source
def write(self, text): if self.disabled: self.write_with_encoding(self.encoding, text) return to_write, text = split_writable_text(self.encoder, text, self.encoding) if to_write: self.write_with_encoding(self.encoding, to_write) while text: encoding = self.encoder.find_suitable_encoding(text[0]) if not encoding: self._handle_character_failed(text[0]) text = text[1:] continue to_write, text = split_writable_text(self.encoder, text, encoding) if to_write: self.write_with_encoding(encoding, to_write)
Write the text, automatically switching encodings.
def _detect_encoding(data=None): import locale enc_list = ['utf-8', 'latin-1', 'iso8859-1', 'iso8859-2', 'utf-16', 'cp720'] code = locale.getpreferredencoding(False) if data is None: return code if code.lower() not in enc_list: enc_list.insert(0, code.lower()) for c in enc_list: try: for line in data: line.decode(c) except (UnicodeDecodeError, UnicodeError, AttributeError): continue return c print("Encoding not detected. Please pass encoding value manually")
Return the default system encoding. If data is passed, try to decode the data with the default system encoding or from a short list of encoding types to test. Args: data - list of lists Returns: enc - system encoding
def get_approximate_times(times: List[int]) -> List[int]: approximate_times = [] for time in times: hour = int(time/HOUR_TO_TWENTY_FOUR) % 24 minute = time % HOUR_TO_TWENTY_FOUR approximate_time = datetime.now() approximate_time = approximate_time.replace(hour=hour, minute=minute) start_time_range = approximate_time - timedelta(minutes=30) end_time_range = approximate_time + timedelta(minutes=30) approximate_times.extend([start_time_range.hour * HOUR_TO_TWENTY_FOUR + start_time_range.minute, end_time_range.hour * HOUR_TO_TWENTY_FOUR + end_time_range.minute]) return approximate_times
Given a list of times that follow a word such as ``about``, we return a list of times that could appear in the query as a result of this. For example if ``about 7pm`` appears in the utterance, then we also want to add ``1830`` and ``1930``.
def save(self): redis = type(self).get_redis() pipe = to_pipeline(redis) pipe.hset(self.key(), 'id', self.id) for fieldname, field in self.proxy: if not isinstance(field, Relation): field.save(getattr(self, fieldname), pipe, commit=False) pipe.sadd(type(self).members_key(), self.id) pipe.execute() if self.notify: data = json.dumps({ 'event': 'create' if not self._persisted else 'update', 'data': self.to_json(), }) redis.publish(type(self).cls_key(), data) redis.publish(self.key(), data) self._persisted = True return self
Persists this object to the database. Each field knows how to store itself so we don't have to worry about it
def load_word_file(filename): words_file = resource_filename(__name__, "words/%s" % filename) handle = open(words_file, 'r') words = handle.readlines() handle.close() return words
Loads a words file as a list of lines
def clean(self, *args, **kwargs): if not self.pk: node = self.node if node.participation_settings.comments_allowed is False: raise ValidationError("Comments not allowed for this node") if 'nodeshot.core.layers' in settings.INSTALLED_APPS: layer = node.layer if layer.participation_settings.comments_allowed is False: raise ValidationError("Comments not allowed for this layer")
Check if comments can be inserted for parent node or parent layer
def dict_to_switch(d): def lookup(query): return d[query] lookup._always_inline_ = True unrolling_items = unrolling_iterable(d.items()) return lookup
Convert of dictionary with integer keys to a switch statement.
def close_handle(self): try: if hasattr(self.hThread, 'close'): self.hThread.close() elif self.hThread not in (None, win32.INVALID_HANDLE_VALUE): win32.CloseHandle(self.hThread) finally: self.hThread = None
Closes the handle to the thread. @note: Normally you don't need to call this method. All handles created by I{WinAppDbg} are automatically closed when the garbage collector claims them.
def notifications_view(request): page_name = "Your Notifications" notifications = list(request.user.notifications.all()) request.user.notifications.mark_all_as_read() return render_to_response("list_notifications.html", { "page_name": page_name, "notifications": notifications, }, context_instance=RequestContext(request))
Show a user their notifications.
def identify_gwf(origin, filepath, fileobj, *args, **kwargs): if fileobj is not None: loc = fileobj.tell() fileobj.seek(0) try: if fileobj.read(4) == GWF_SIGNATURE: return True finally: fileobj.seek(loc) if filepath is not None: if filepath.endswith('.gwf'): return True if filepath.endswith(('.lcf', '.cache')): try: cache = read_cache(filepath) except IOError: return False else: if cache[0].path.endswith('.gwf'): return True
Identify a filename or file object as GWF This function is overloaded in that it will also identify a cache file as 'gwf' if the first entry in the cache contains a GWF file extension
def services(self): if self._resources is None: self.__init() if "services" in self._resources: url = self._url + "/services" return _services.Services(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=True) else: return None
Gets the services object which will provide the ArcGIS Server's admin information about services and folders.
def get_machines(self, origin, hostnames): hostnames = { hostname: True for hostname in hostnames } machines = origin.Machines.read(hostnames=hostnames) machines = [ machine for machine in machines if hostnames.pop(machine.hostname, False) ] if len(hostnames) > 0: raise CommandError( "Unable to find %s %s." % ( "machines" if len(hostnames) > 1 else "machine", ','.join(hostnames))) return machines
Return a set of machines based on `hostnames`. Any hostname that is not found will result in an error.
def connected(G, method_name, **kwargs): warnings.warn("To be removed in 0.8. Use GraphCollection.analyze instead.", DeprecationWarning) return G.analyze(['connected', method_name], **kwargs)
Performs analysis methods from networkx.connected on each graph in the collection. Parameters ---------- G : :class:`.GraphCollection` The :class:`.GraphCollection` to analyze. The specified method will be applied to each graph in ``G``. method : string Name of method in networkx.connected. **kwargs : kwargs Keyword arguments, passed directly to method. Returns ------- results : dict Keys are graph indices, values are output of method for that graph. Raises ------ ValueError If name is not in networkx.connected, or if no such method exists.
def render_or_send(func, message): if request.endpoint != func.func_name: mail.send(message) if (current_user.is_authenticated() and current_user.superuser): return render_template('debug_email.html', message=message)
Renders an email message for debugging or actually sends it.