code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def file_list(load): ''' Return a list of all files in a specified environment ''' ret = set() try: for container in __opts__['azurefs']: if container.get('saltenv', 'base') != load['saltenv']: continue container_list = _get_container_path(container) + '.list' lk = container_list + '.lk' salt.fileserver.wait_lock(lk, container_list, 5) if not os.path.exists(container_list): continue with salt.utils.files.fopen(container_list, 'r') as fp_: ret.update(set(salt.utils.json.load(fp_))) except Exception as exc: log.error('azurefs: an error ocurred retrieving file lists. ' 'It should be resolved next time the fileserver ' 'updates. Please do not manually modify the azurefs ' 'cache directory.') return list(ret)
Return a list of all files in a specified environment
def reset_training_state(self, dones, batch_info): """ A hook for a model to react when during training episode is finished """ for idx, done in enumerate(dones): if done > 0.5: self.processes[idx].reset()
A hook for a model to react when during training episode is finished
def start(self): """ Connect to the Cytomine server and switch to job connection Incurs dataflows """ run_by_ui = False if not self.current_user.algo: # If user connects as a human (CLI execution) self._job = Job(self._project.id, self._software.id).save() user_job = User().fetch(self._job.userJob) self.set_credentials(user_job.publicKey, user_job.privateKey) else: # If the user executes the job through the Cytomine interface self._job = Job().fetch(self.current_user.job) run_by_ui = True # set job state to RUNNING self._job.status = Job.RUNNING self._job.update() # add software parameters if not run_by_ui and self._parameters is not None: parameters = vars(self._parameters) for software_param in self._software.parameters: name = software_param["name"] if name in parameters: value = parameters[name] else: value = software_param["defaultParamValue"] JobParameter(self._job.id, software_param["id"], value).save()
Connect to the Cytomine server and switch to job connection Incurs dataflows
def _get_channel(host, timeout): """ Create communication channel for given `host`. Args: host (str): Specified --host. timeout (int): Set `timeout` for returned `channel`. Returns: Object: Pika channel object. """ connection = create_blocking_connection(host) # register timeout if timeout >= 0: connection.add_timeout( timeout, lambda: sys.stderr.write("Timeouted!\n") or sys.exit(1) ) return connection.channel()
Create communication channel for given `host`. Args: host (str): Specified --host. timeout (int): Set `timeout` for returned `channel`. Returns: Object: Pika channel object.
def yield_to_ioloop(self): """Function that will allow Rejected to process IOLoop events while in a tight-loop inside an asynchronous consumer. .. code-block:: python :caption: Example Usage class Consumer(consumer.Consumer): @gen.coroutine def process(self): for iteration in range(0, 1000000): yield self.yield_to_ioloop() """ try: yield self._yield_condition.wait( self._message.channel.connection.ioloop.time() + 0.001) except gen.TimeoutError: pass
Function that will allow Rejected to process IOLoop events while in a tight-loop inside an asynchronous consumer. .. code-block:: python :caption: Example Usage class Consumer(consumer.Consumer): @gen.coroutine def process(self): for iteration in range(0, 1000000): yield self.yield_to_ioloop()
def page(self, enabled=values.unset, date_created_after=values.unset, date_created_before=values.unset, friendly_name=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of CompositionHookInstance records from the API. Request is executed immediately :param bool enabled: Only show Composition Hooks enabled or disabled. :param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone. :param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone. :param unicode friendly_name: Only show Composition Hooks with friendly name that match this name. :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of CompositionHookInstance :rtype: twilio.rest.video.v1.composition_hook.CompositionHookPage """ params = values.of({ 'Enabled': enabled, 'DateCreatedAfter': serialize.iso8601_datetime(date_created_after), 'DateCreatedBefore': serialize.iso8601_datetime(date_created_before), 'FriendlyName': friendly_name, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return CompositionHookPage(self._version, response, self._solution)
Retrieve a single page of CompositionHookInstance records from the API. Request is executed immediately :param bool enabled: Only show Composition Hooks enabled or disabled. :param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone. :param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone. :param unicode friendly_name: Only show Composition Hooks with friendly name that match this name. :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of CompositionHookInstance :rtype: twilio.rest.video.v1.composition_hook.CompositionHookPage
def connect_patch_namespaced_pod_proxy(self, name, namespace, **kwargs): """ connect PATCH requests to proxy of Pod This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_patch_namespaced_pod_proxy(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodProxyOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str path: Path is the URL path to use for the current proxy request to pod. :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.connect_patch_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) else: (data) = self.connect_patch_namespaced_pod_proxy_with_http_info(name, namespace, **kwargs) return data
connect PATCH requests to proxy of Pod This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_patch_namespaced_pod_proxy(name, namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodProxyOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str path: Path is the URL path to use for the current proxy request to pod. :return: str If the method is called asynchronously, returns the request thread.
def _get_default_field_names(self, declared_fields, model_info): """ Return default field names for serializer. """ return ( [model_info.pk.name] + list(declared_fields.keys()) + list(model_info.fields.keys()) + list(model_info.relations.keys()) )
Return default field names for serializer.
def get_authuser_by_name(cls, request): """ Get user by username Used by Token-based auth. Is added as request method to populate `request.user`. """ username = authenticated_userid(request) if username: return cls.get_item(username=username)
Get user by username Used by Token-based auth. Is added as request method to populate `request.user`.
def main(argv=None): """Run roll() from a command line interface""" args = docopt.docopt(__doc__, argv=argv, version=__version__) verbose = bool(args['--verbose']) f_roll = dice.roll kwargs = {} if args['--min']: f_roll = dice.roll_min elif args['--max']: f_roll = dice.roll_max if args['--max-dice']: try: kwargs['max_dice'] = int(args['--max-dice']) except ValueError: print("Invalid value for --max-dice: '%s'" % args['--max-dice']) exit(1) expr = ' '.join(args['<expression>']) try: roll, kwargs = f_roll(expr, raw=True, return_kwargs=True, **kwargs) if verbose: print('Result: ', end='') print(str(roll.evaluate_cached(**kwargs))) if verbose: print('Breakdown:') print(dice.utilities.verbose_print(roll, **kwargs)) except DiceBaseException as e: print('Whoops! Something went wrong:') print(e.pretty_print()) exit(1)
Run roll() from a command line interface
def _set_error_handler_callbacks(self, app): """ Sets the error handler callbacks used by this extension """ @app.errorhandler(NoAuthorizationError) def handle_auth_error(e): return self._unauthorized_callback(str(e)) @app.errorhandler(CSRFError) def handle_csrf_error(e): return self._unauthorized_callback(str(e)) @app.errorhandler(ExpiredSignatureError) def handle_expired_error(e): try: token = ctx_stack.top.expired_jwt return self._expired_token_callback(token) except TypeError: msg = ( "jwt.expired_token_loader callback now takes the expired token " "as an additional paramter. Example: expired_callback(token)" ) warn(msg, DeprecationWarning) return self._expired_token_callback() @app.errorhandler(InvalidHeaderError) def handle_invalid_header_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(InvalidTokenError) def handle_invalid_token_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(JWTDecodeError) def handle_jwt_decode_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(WrongTokenError) def handle_wrong_token_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(InvalidAudienceError) def handle_invalid_audience_error(e): return self._invalid_token_callback(str(e)) @app.errorhandler(RevokedTokenError) def handle_revoked_token_error(e): return self._revoked_token_callback() @app.errorhandler(FreshTokenRequired) def handle_fresh_token_required(e): return self._needs_fresh_token_callback() @app.errorhandler(UserLoadError) def handler_user_load_error(e): # The identity is already saved before this exception was raised, # otherwise a different exception would be raised, which is why we # can safely call get_jwt_identity() here identity = get_jwt_identity() return self._user_loader_error_callback(identity) @app.errorhandler(UserClaimsVerificationError) def handle_failed_user_claims_verification(e): return self._verify_claims_failed_callback()
Sets the error handler callbacks used by this extension
def promise_method(func): """ A decorator which ensures that once a method has been marked as resolved (via Class.__resolved)) will then propagate the attribute (function) call upstream. """ name = func.__name__ @wraps(func) def wrapped(self, *args, **kwargs): cls_name = type(self).__name__ if getattr(self, '_%s__resolved' % (cls_name,)): return getattr(getattr(self, '_%s__wrapped' % (cls_name,)), name)(*args, **kwargs) return func(self, *args, **kwargs) return wrapped
A decorator which ensures that once a method has been marked as resolved (via Class.__resolved)) will then propagate the attribute (function) call upstream.
def open_report_template_path(self): """Open File dialog to choose the report template path.""" # noinspection PyCallByClass,PyTypeChecker directory_name = QFileDialog.getExistingDirectory( self, self.tr('Templates directory'), self.leReportTemplatePath.text(), QFileDialog.ShowDirsOnly) if directory_name: self.leReportTemplatePath.setText(directory_name)
Open File dialog to choose the report template path.
def set(self, key, value, time=0, compress_level=-1): """ Set a value for a key on server. :param key: Key's name :type key: str :param value: A value to be stored on server. :type value: object :param time: Time in seconds that your key will expire. :type time: int :param compress_level: How much to compress. 0 = no compression, 1 = fastest, 9 = slowest but best, -1 = default compression level. :type compress_level: int :return: True in case of success and False in case of failure :rtype: bool """ returns = [] for server in self.servers: returns.append(server.set(key, value, time, compress_level=compress_level)) return any(returns)
Set a value for a key on server. :param key: Key's name :type key: str :param value: A value to be stored on server. :type value: object :param time: Time in seconds that your key will expire. :type time: int :param compress_level: How much to compress. 0 = no compression, 1 = fastest, 9 = slowest but best, -1 = default compression level. :type compress_level: int :return: True in case of success and False in case of failure :rtype: bool
def add_all(self, items, overflow_policy=OVERFLOW_POLICY_OVERWRITE): """ Adds all of the item in the specified collection to the tail of the Ringbuffer. An add_all is likely to outperform multiple calls to add(object) due to better io utilization and a reduced number of executed operations. The items are added in the order of the Iterator of the collection. If there is no space in the Ringbuffer, the action is determined by overflow policy as :const:`OVERFLOW_POLICY_OVERWRITE` or :const:`OVERFLOW_POLICY_FAIL`. :param items: (Collection), the specified collection which contains the items to be added. :param overflow_policy: (int), the OverflowPolicy to be used when there is no space (optional). :return: (long), the sequenceId of the last written item, or -1 of the last write is failed. """ check_not_empty(items, "items can't be empty") if len(items) > MAX_BATCH_SIZE: raise AssertionError("Batch size can't be greater than %d" % MAX_BATCH_SIZE) for item in items: check_not_none(item, "item can't be None") item_list = [self._to_data(x) for x in items] return self._encode_invoke(ringbuffer_add_all_codec, value_list=item_list, overflow_policy=overflow_policy)
Adds all of the item in the specified collection to the tail of the Ringbuffer. An add_all is likely to outperform multiple calls to add(object) due to better io utilization and a reduced number of executed operations. The items are added in the order of the Iterator of the collection. If there is no space in the Ringbuffer, the action is determined by overflow policy as :const:`OVERFLOW_POLICY_OVERWRITE` or :const:`OVERFLOW_POLICY_FAIL`. :param items: (Collection), the specified collection which contains the items to be added. :param overflow_policy: (int), the OverflowPolicy to be used when there is no space (optional). :return: (long), the sequenceId of the last written item, or -1 of the last write is failed.
def get(self, using=None, **kwargs): """ The get index API allows to retrieve information about the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get`` unchanged. """ return self._get_connection(using).indices.get(index=self._name, **kwargs)
The get index API allows to retrieve information about the index. Any additional keyword arguments will be passed to ``Elasticsearch.indices.get`` unchanged.
def find(*args, **kwargs): """ kwargs supported: user: the username start_at: start datetime object end_at: end datetime object limit: # of objects to fetch """ # default end_at to now and start to 1 day from now end_at = kwargs.get('end_at', datetime.datetime.now()) start_at = kwargs.get('start_at', end_at - datetime.timedelta(days=1)) # default # of records to fetch to be 50 limit = kwargs.get('limit', 50) # if no user is defined, fetch for ALL user = kwargs.get('user', None) if user: return request_log.find(start_at, end_at, limit, spec={'user': user}) else: return request_log.find(start_at, end_at, limit)
kwargs supported: user: the username start_at: start datetime object end_at: end datetime object limit: # of objects to fetch
def recursively_preempt_states(self): """ Preempt the state and all of it child states. """ super(ContainerState, self).recursively_preempt_states() # notify the transition condition variable to let the state instantaneously stop self._transitions_cv.acquire() self._transitions_cv.notify_all() self._transitions_cv.release() for state in self.states.values(): state.recursively_preempt_states()
Preempt the state and all of it child states.
def set_running_mean(self, running_mean): """ Set the running mean of the layer. Only use this method for a BatchNormalization layer. :param running_mean: a Numpy array. """ callBigDlFunc(self.bigdl_type, "setRunningMean", self.value, JTensor.from_ndarray(running_mean)) return self
Set the running mean of the layer. Only use this method for a BatchNormalization layer. :param running_mean: a Numpy array.
def clear(self): """Clear all work items from the session. This removes any associated results as well. """ with self._conn: self._conn.execute('DELETE FROM results') self._conn.execute('DELETE FROM work_items')
Clear all work items from the session. This removes any associated results as well.
def _create_sagemaker_model(self, instance_type, accelerator_type=None, tags=None): """Create a SageMaker Model Entity Args: instance_type (str): The EC2 instance type that this Model will be used for, this is only used to determine if the image needs GPU support or not. accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint for model loading and inference, for example, 'ml.eia1.medium'. If not specified, no Elastic Inference accelerator will be attached to the endpoint. tags(List[dict[str, str]]): Optional. The list of tags to add to the model. Example: >>> tags = [{'Key': 'tagname', 'Value': 'tagvalue'}] For more information about tags, see https://boto3.amazonaws.com/v1/documentation\ /api/latest/reference/services/sagemaker.html#SageMaker.Client.add_tags """ container_def = self.prepare_container_def(instance_type, accelerator_type=accelerator_type) self.name = self.name or utils.name_from_image(container_def['Image']) enable_network_isolation = self.enable_network_isolation() self.sagemaker_session.create_model(self.name, self.role, container_def, vpc_config=self.vpc_config, enable_network_isolation=enable_network_isolation, tags=tags)
Create a SageMaker Model Entity Args: instance_type (str): The EC2 instance type that this Model will be used for, this is only used to determine if the image needs GPU support or not. accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint for model loading and inference, for example, 'ml.eia1.medium'. If not specified, no Elastic Inference accelerator will be attached to the endpoint. tags(List[dict[str, str]]): Optional. The list of tags to add to the model. Example: >>> tags = [{'Key': 'tagname', 'Value': 'tagvalue'}] For more information about tags, see https://boto3.amazonaws.com/v1/documentation\ /api/latest/reference/services/sagemaker.html#SageMaker.Client.add_tags
def H12(self): "Information measure of correlation 1." maxima = np.vstack((self.hx, self.hy)).max(0) return (self.H9() - self.hxy1) / maxima
Information measure of correlation 1.
def _scale_y_values(self, values, new_min, new_max, scale_old_from_zero=True): ''' Take values and transmute them into a new range ''' # Scale Y values - Create a scaled list of values to use for the visual graph scaled_values = [] y_min_value = min(values) if scale_old_from_zero: y_min_value = 0 y_max_value = max(values) new_min = 0 OldRange = (y_max_value - y_min_value) or 1 # Prevents division by zero if all values are the same NewRange = (new_max - new_min) # max_height is new_max for old_value in values: new_value = (((old_value - y_min_value) * NewRange) / OldRange) + new_min scaled_values.append(new_value) return scaled_values
Take values and transmute them into a new range
def acceptance_fractions(mean_acceptance_fractions, burn=None, ax=None): """ Plot the meana cceptance fractions for each MCMC step. :param mean_acceptance_fractions: The acceptance fractions at each MCMC step. :type mean_acceptance_fractions: :class:`numpy.array` :param burn: [optional] The burn-in point. If provided, a dashed vertical line will be shown at the burn-in point. :type burn: int :param ax: [optional] The axes to plot the mean acceptance fractions on. :type ax: :class:`matplotlib.axes.AxesSubplot` :returns: The acceptance fractions figure. """ factor = 2.0 lbdim = 0.2 * factor trdim = 0.2 * factor whspace = 0.10 dimy = lbdim + factor + trdim dimx = lbdim + factor + trdim if ax is None: fig, ax = plt.subplots() else: fig = ax.figure lm = lbdim / dimx bm = lbdim / dimy trm = (lbdim + factor) / dimy fig.subplots_adjust(left=lm, bottom=bm, right=trm, top=trm, wspace=whspace, hspace=whspace) ax.plot(mean_acceptance_fractions, color="k", lw=2) if burn is not None: ax.axvline(burn, linestyle=":", color="k") ax.set_xlim(0, len(mean_acceptance_fractions)) ax.xaxis.set_major_locator(MaxNLocator(5)) [l.set_rotation(45) for l in ax.get_xticklabels()] ax.yaxis.set_major_locator(MaxNLocator(5)) [l.set_rotation(45) for l in ax.get_yticklabels()] ax.set_xlabel("Step") ax.set_ylabel("$\langle{}a_f\\rangle$") fig.tight_layout() return fig
Plot the meana cceptance fractions for each MCMC step. :param mean_acceptance_fractions: The acceptance fractions at each MCMC step. :type mean_acceptance_fractions: :class:`numpy.array` :param burn: [optional] The burn-in point. If provided, a dashed vertical line will be shown at the burn-in point. :type burn: int :param ax: [optional] The axes to plot the mean acceptance fractions on. :type ax: :class:`matplotlib.axes.AxesSubplot` :returns: The acceptance fractions figure.
def systemInformationType17(): """SYSTEM INFORMATION TYPE 17 Section 9.1.43e""" a = L2PseudoLength(l2pLength=0x01) b = TpPd(pd=0x6) c = MessageType(mesType=0x3e) # 00111110 d = Si17RestOctets() packet = a / b / c / d return packet
SYSTEM INFORMATION TYPE 17 Section 9.1.43e
def _register_self(self, logfile, key=JobDetails.topkey, status=JobStatus.unknown): """Runs this link, captures output to logfile, and records the job in self.jobs""" fullkey = JobDetails.make_fullkey(self.full_linkname, key) if fullkey in self.jobs: job_details = self.jobs[fullkey] job_details.status = status else: job_details = self._register_job(key, self.args, logfile, status)
Runs this link, captures output to logfile, and records the job in self.jobs
def add_output(summary_output, long_output, helper): """ if the summary output is empty, we don't add it as summary, otherwise we would have empty spaces (e.g.: '. . . . .') in our summary report """ if summary_output != '': helper.add_summary(summary_output) helper.add_long_output(long_output)
if the summary output is empty, we don't add it as summary, otherwise we would have empty spaces (e.g.: '. . . . .') in our summary report
def build_system_error(cls, errors=None): """Utility method to build a HTTP 500 System Error response""" errors = [errors] if not isinstance(errors, list) else errors return cls(Status.SYSTEM_ERROR, errors)
Utility method to build a HTTP 500 System Error response
def format_list(extracted_list): """Format a list of traceback entry tuples for printing. Given a list of tuples as returned by extract_tb() or extract_stack(), return a list of strings ready for printing. Each string in the resulting list corresponds to the item with the same index in the argument list. Each string ends in a newline; the strings may contain internal newlines as well, for those items whose source text line is not None. """ list = [] for filename, lineno, name, line in extracted_list: item = ' File "%s", line %d, in %s\n' % (filename,lineno,name) if line: item = item + ' %s\n' % line.strip() list.append(item) return list
Format a list of traceback entry tuples for printing. Given a list of tuples as returned by extract_tb() or extract_stack(), return a list of strings ready for printing. Each string in the resulting list corresponds to the item with the same index in the argument list. Each string ends in a newline; the strings may contain internal newlines as well, for those items whose source text line is not None.
def update_version_records(self): """ Update rash_info table if necessary. """ from .__init__ import __version__ as version with self.connection(commit=True) as connection: for vrec in self.get_version_records(): if (vrec.rash_version == version and vrec.schema_version == schema_version): return # no need to insert the new one! connection.execute( 'INSERT INTO rash_info (rash_version, schema_version) ' 'VALUES (?, ?)', [version, schema_version])
Update rash_info table if necessary.
def connect(host="localhost", user=None, password="", db=None, port=3306, unix_socket=None, charset='', sql_mode=None, read_default_file=None, conv=decoders, use_unicode=None, client_flag=0, cursorclass=Cursor, init_command=None, connect_timeout=None, read_default_group=None, no_delay=None, autocommit=False, echo=False, local_infile=False, loop=None, ssl=None, auth_plugin='', program_name='', server_public_key=None): """See connections.Connection.__init__() for information about defaults.""" coro = _connect(host=host, user=user, password=password, db=db, port=port, unix_socket=unix_socket, charset=charset, sql_mode=sql_mode, read_default_file=read_default_file, conv=conv, use_unicode=use_unicode, client_flag=client_flag, cursorclass=cursorclass, init_command=init_command, connect_timeout=connect_timeout, read_default_group=read_default_group, no_delay=no_delay, autocommit=autocommit, echo=echo, local_infile=local_infile, loop=loop, ssl=ssl, auth_plugin=auth_plugin, program_name=program_name) return _ConnectionContextManager(coro)
See connections.Connection.__init__() for information about defaults.
def get_index_line(self,lnum): """ Take the 1-indexed line number and return its index information""" if lnum < 1: sys.stderr.write("ERROR: line number should be greater than zero\n") sys.exit() elif lnum > len(self._lines): sys.stderr.write("ERROR: too far this line nuber is not in index\n") sys.exit() return self._lines[lnum-1]
Take the 1-indexed line number and return its index information
def patch(self, path=None, url_kwargs=None, **kwargs): """ Sends a PUT request. :param path: The HTTP path (either absolute or relative). :param url_kwargs: Parameters to override in the generated URL. See `~hyperlink.URL`. :param **kwargs: Optional arguments that ``request`` takes. :return: response object """ return self._session.patch(self._url(path, url_kwargs), **kwargs)
Sends a PUT request. :param path: The HTTP path (either absolute or relative). :param url_kwargs: Parameters to override in the generated URL. See `~hyperlink.URL`. :param **kwargs: Optional arguments that ``request`` takes. :return: response object
def air_range(self) -> Union[int, float]: """ Does not include upgrades """ if self._weapons: weapon = next( (weapon for weapon in self._weapons if weapon.type in {TargetType.Air.value, TargetType.Any.value}), None, ) if weapon: return weapon.range return 0
Does not include upgrades
def calc_chunklen(alph_len): ''' computes the ideal conversion ratio for the given alphabet. A ratio is considered ideal when the number of bits in one output encoding chunk that don't add up to one input encoding chunk is minimal. ''' binlen, enclen = min([ (i, i*8 / math.log(alph_len, 2)) for i in range(1, 7) ], key=lambda k: k[1] % 1) return binlen, int(enclen)
computes the ideal conversion ratio for the given alphabet. A ratio is considered ideal when the number of bits in one output encoding chunk that don't add up to one input encoding chunk is minimal.
def draw_flow(img, flow, step=16, dtype=uint8): """ draws flow vectors on image this came from opencv/examples directory another way: http://docs.opencv.org/trunk/doc/py_tutorials/py_gui/py_drawing_functions/py_drawing_functions.html """ maxval = iinfo(img.dtype).max # scaleFact = 1. #arbitary factor to make flow visible canno = (0, maxval, 0) # green color h, w = img.shape[:2] y, x = mgrid[step//2:h:step, step//2:w:step].reshape(2, -1) fx, fy = flow[y, x].T # create line endpoints lines = vstack([x, y, (x+fx), (y+fy)]).T.reshape(-1, 2, 2) lines = int32(lines + 0.5) # create image if img.ndim == 2: # assume gray vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) else: # already RGB vis = img # draw line cv2.polylines(vis, lines, isClosed=False, color=canno, thickness=1, lineType=8) # draw filled green circles for (x1, y1), (x2, y2) in lines: cv2.circle(vis, center=(x1, y1), radius=1, color=canno, thickness=-1) return vis
draws flow vectors on image this came from opencv/examples directory another way: http://docs.opencv.org/trunk/doc/py_tutorials/py_gui/py_drawing_functions/py_drawing_functions.html
def get_prior(self, twig=None, **kwargs): """ [NOT IMPLEMENTED] :raises NotImplementedError: because it isn't """ raise NotImplementedError kwargs['context'] = 'prior' return self.filter(twig=twig, **kwargs)
[NOT IMPLEMENTED] :raises NotImplementedError: because it isn't
def _load_from_socket(port, auth_secret): """ Load data from a given socket, this is a blocking method thus only return when the socket connection has been closed. """ (sockfile, sock) = local_connect_and_auth(port, auth_secret) # The barrier() call may block forever, so no timeout sock.settimeout(None) # Make a barrier() function call. write_int(BARRIER_FUNCTION, sockfile) sockfile.flush() # Collect result. res = UTF8Deserializer().loads(sockfile) # Release resources. sockfile.close() sock.close() return res
Load data from a given socket, this is a blocking method thus only return when the socket connection has been closed.
def estimate_intraday(returns, positions, transactions, EOD_hour=23): """ Intraday strategies will often not hold positions at the day end. This attempts to find the point in the day that best represents the activity of the strategy on that day, and effectively resamples the end-of-day positions with the positions at this point of day. The point of day is found by detecting when our exposure in the market is at its maximum point. Note that this is an estimate. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in create_full_tear_sheet. positions : pd.DataFrame Daily net position values. - See full explanation in create_full_tear_sheet. transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in create_full_tear_sheet. Returns ------- pd.DataFrame Daily net position values, resampled for intraday behavior. """ # Construct DataFrame of transaction amounts txn_val = transactions.copy() txn_val.index.names = ['date'] txn_val['value'] = txn_val.amount * txn_val.price txn_val = txn_val.reset_index().pivot_table( index='date', values='value', columns='symbol').replace(np.nan, 0) # Cumulate transaction amounts each day txn_val['date'] = txn_val.index.date txn_val = txn_val.groupby('date').cumsum() # Calculate exposure, then take peak of exposure every day txn_val['exposure'] = txn_val.abs().sum(axis=1) condition = (txn_val['exposure'] == txn_val.groupby( pd.TimeGrouper('24H'))['exposure'].transform(max)) txn_val = txn_val[condition].drop('exposure', axis=1) # Compute cash delta txn_val['cash'] = -txn_val.sum(axis=1) # Shift EOD positions to positions at start of next trading day positions_shifted = positions.copy().shift(1).fillna(0) starting_capital = positions.iloc[0].sum() / (1 + returns[0]) positions_shifted.cash[0] = starting_capital # Format and add start positions to intraday position changes txn_val.index = txn_val.index.normalize() corrected_positions = positions_shifted.add(txn_val, fill_value=0) corrected_positions.index.name = 'period_close' corrected_positions.columns.name = 'sid' return corrected_positions
Intraday strategies will often not hold positions at the day end. This attempts to find the point in the day that best represents the activity of the strategy on that day, and effectively resamples the end-of-day positions with the positions at this point of day. The point of day is found by detecting when our exposure in the market is at its maximum point. Note that this is an estimate. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in create_full_tear_sheet. positions : pd.DataFrame Daily net position values. - See full explanation in create_full_tear_sheet. transactions : pd.DataFrame Prices and amounts of executed trades. One row per trade. - See full explanation in create_full_tear_sheet. Returns ------- pd.DataFrame Daily net position values, resampled for intraday behavior.
def object_to_id(self, obj): """ Searches elasticsearch for objects with the same username, password, optional domain, host_ip and service_id. """ # Not sure yet if this is advisable... Older passwords can be overwritten... search = Credential.search() search = search.filter("term", username=obj.username) search = search.filter("term", secret=obj.secret) if obj.domain: search = search.filter("term", domain=obj.domain) else: search = search.exclude("exists", field="domain") if obj.host_ip: search = search.filter("term", host_ip=obj.host_ip) else: search = search.exclude("exists", field="host_ip") if obj.service_id: search = search.filter("term", service_id=obj.service_id) else: search = search.exclude("exists", field="service_id") if search.count(): result = search[0].execute()[0] return result.meta.id else: return None
Searches elasticsearch for objects with the same username, password, optional domain, host_ip and service_id.
def get_consensus_at(self, block_id): """ Get the consensus hash at a given block. Return the consensus hash if we have one for this block. Return None if we don't """ query = 'SELECT consensus_hash FROM snapshots WHERE block_id = ?;' args = (block_id,) con = self.db_open(self.impl, self.working_dir) rows = self.db_query_execute(con, query, args, verbose=False) res = None for r in rows: res = r['consensus_hash'] con.close() return res
Get the consensus hash at a given block. Return the consensus hash if we have one for this block. Return None if we don't
def is_readable(path=None): """ Test if the supplied filesystem path can be read :param path: A filesystem path :return: True if the path is a file that can be read. Otherwise, False """ if os.path.isfile(path) and os.access(path, os.R_OK): return True return False
Test if the supplied filesystem path can be read :param path: A filesystem path :return: True if the path is a file that can be read. Otherwise, False
def update_link(self): """ redirects all links to self (the new linked object) """ name = repr(self) if not name: return self l = self.__class__._get_links() to_be_changed = list() if name in l: for wal in l[name]: if wal.ref_obj and self is not wal(): to_be_changed.append((wal.ref_obj, wal.attr)) for o, a in to_be_changed: setattr(o, a, self) self.clean_up_link_dict() return self
redirects all links to self (the new linked object)
def _warcprox_opts(self, args): ''' Takes args as produced by the argument parser built by _build_arg_parser and builds warcprox arguments object suitable to pass to warcprox.main.init_controller. Copies some arguments, renames some, populates some with defaults appropriate for brozzler-easy, etc. ''' warcprox_opts = warcprox.Options() warcprox_opts.address = 'localhost' # let the OS choose an available port; discover it later using # sock.getsockname()[1] warcprox_opts.port = 0 warcprox_opts.cacert = args.cacert warcprox_opts.certs_dir = args.certs_dir warcprox_opts.directory = args.warcs_dir warcprox_opts.gzip = True warcprox_opts.prefix = 'brozzler' warcprox_opts.size = 1000 * 1000* 1000 warcprox_opts.rollover_idle_time = 3 * 60 warcprox_opts.digest_algorithm = 'sha1' warcprox_opts.base32 = True warcprox_opts.stats_db_file = None warcprox_opts.playback_port = None warcprox_opts.playback_index_db_file = None warcprox_opts.rethinkdb_big_table_url = ( 'rethinkdb://%s/%s/captures' % ( args.rethinkdb_servers, args.rethinkdb_db)) warcprox_opts.queue_size = 500 warcprox_opts.max_threads = None warcprox_opts.profile = False warcprox_opts.onion_tor_socks_proxy = args.onion_tor_socks_proxy return warcprox_opts
Takes args as produced by the argument parser built by _build_arg_parser and builds warcprox arguments object suitable to pass to warcprox.main.init_controller. Copies some arguments, renames some, populates some with defaults appropriate for brozzler-easy, etc.
def winddir_text(pts): "Convert wind direction from 0..15 to compass point text" global _winddir_text_array if pts is None: return None if not isinstance(pts, int): pts = int(pts + 0.5) % 16 if not _winddir_text_array: _ = pywws.localisation.translation.ugettext _winddir_text_array = ( _(u'N'), _(u'NNE'), _(u'NE'), _(u'ENE'), _(u'E'), _(u'ESE'), _(u'SE'), _(u'SSE'), _(u'S'), _(u'SSW'), _(u'SW'), _(u'WSW'), _(u'W'), _(u'WNW'), _(u'NW'), _(u'NNW'), ) return _winddir_text_array[pts]
Convert wind direction from 0..15 to compass point text
def quotes(self, security, start, end): """ Get historical prices for the given ticker security. Date format is 'YYYYMMDD' Returns a nested list. """ try: url = 'http://www.google.com/finance/historical?q=%s&startdate=%s&enddate=%s&output=csv' % (security.symbol, start, end) try: page = self._request(url) except UfException as ufExcep: # if symol is not right, will get 400 if Errors.NETWORK_400_ERROR == ufExcep.getCode: raise UfException(Errors.STOCK_SYMBOL_ERROR, "Can find data for stock %s, security error?" % security) raise ufExcep days = page.readlines() values = [day.split(',') for day in days] # sample values:[['Date', 'Open', 'High', 'Low', 'Close', 'Volume'], \ # ['2009-12-31', '112.77', '112.80', '111.39', '111.44', '90637900']...] for value in values[1:]: date = convertGoogCSVDate(value[0]) try: yield Quote(date, value[1].strip(), value[2].strip(), value[3].strip(), value[4].strip(), value[5].strip(), None) except Exception: LOG.warning("Exception when processing %s at date %s for value %s" % (security, date, value)) except BaseException: raise UfException(Errors.UNKNOWN_ERROR, "Unknown Error in GoogleFinance.getHistoricalPrices %s" % traceback.format_exc())
Get historical prices for the given ticker security. Date format is 'YYYYMMDD' Returns a nested list.
def set_formatter(name, func): """Replace the formatter function used by the trace decorator to handle formatting a specific kind of argument. There are several kinds of arguments that trace discriminates between: * instance argument - the object bound to an instance method. * class argument - the class object bound to a class method. * positional arguments (named) - values bound to distinct names. * positional arguments (default) - named positional arguments with default values specified in the function declaration. * positional arguments (anonymous) - an arbitrary number of values that are all bound to the '*' variable. * keyword arguments - zero or more name-value pairs that are placed in a dictionary and bound to the double-star variable. \var{name} - specifies the name of the formatter to be modified. * instance argument - "self", "instance" or "this" * class argument - "class" * named argument - "named", "param" or "parameter" * default argument - "default", "optional" * anonymous argument - "anonymous", "arbitrary" or "unnamed" * keyword argument - "keyword", "pair" or "pairs" \var{func} - a function to format an argument. * For all but anonymous formatters this function must accept two arguments: the variable name and the value to which it is bound. * The anonymous formatter function is passed only one argument corresponding to an anonymous value. * if \var{func} is "None" then the default formatter will be used. """ if name in ('self', 'instance', 'this'): global af_self af_self = _formatter_self if func is None else func elif name == 'class': global af_class af_class = _formatter_class if func is None else func elif name in ('named', 'param', 'parameter'): global af_named af_named = _formatter_named if func is None else func elif name in ('default', 'optional'): global af_default af_default = _formatter_defaults if func is None else func elif name in ('anonymous', 'arbitrary', 'unnamed'): global af_anonymous af_anonymous = chop if func is None else func elif name in ('keyword', 'pair', 'pairs'): global af_keyword af_keyword = _formatter_named if func is None else func else: raise ValueError('unknown trace formatter %r' % name)
Replace the formatter function used by the trace decorator to handle formatting a specific kind of argument. There are several kinds of arguments that trace discriminates between: * instance argument - the object bound to an instance method. * class argument - the class object bound to a class method. * positional arguments (named) - values bound to distinct names. * positional arguments (default) - named positional arguments with default values specified in the function declaration. * positional arguments (anonymous) - an arbitrary number of values that are all bound to the '*' variable. * keyword arguments - zero or more name-value pairs that are placed in a dictionary and bound to the double-star variable. \var{name} - specifies the name of the formatter to be modified. * instance argument - "self", "instance" or "this" * class argument - "class" * named argument - "named", "param" or "parameter" * default argument - "default", "optional" * anonymous argument - "anonymous", "arbitrary" or "unnamed" * keyword argument - "keyword", "pair" or "pairs" \var{func} - a function to format an argument. * For all but anonymous formatters this function must accept two arguments: the variable name and the value to which it is bound. * The anonymous formatter function is passed only one argument corresponding to an anonymous value. * if \var{func} is "None" then the default formatter will be used.
def read_original_textlc(lcpath): ''' Read .epdlc, and .tfalc light curves and return a corresponding labelled dict (if LC from <2012) or astropy table (if >=2012). Each has different keys that can be accessed via .keys() Input: lcpath: path (string) to light curve data, which is a textfile with HAT LC data. Example: dat = read_original_textlc('HAT-115-0003266.epdlc') ''' LOGINFO('reading original HAT text LC: {:s}'.format(lcpath)) N_lines_to_parse_comments = 50 with open(lcpath, 'rb') as file: head = [next(file) for ind in range(N_lines_to_parse_comments)] N_comment_lines = len([l for l in head if l.decode('UTF-8')[0] == '#']) # if there are too many comment lines, fail out if N_comment_lines < N_lines_to_parse_comments: LOGERROR( 'LC file {fpath} has too many comment lines'.format(fpath=lcpath) ) return None first_data_line = list( filter(None, head[N_comment_lines].decode('UTF-8').split()) ) N_cols = len(first_data_line) # There are different column formats depending on when HAT pipeline was run # also different formats for different types of LCs: # pre-2012: .epdlc -> 17 columns # pre-2012: .tfalc -> 20 columns # post-2012: .epdlc or .tfalc -> 32 columns if N_cols == 17: colformat = 'pre2012-epdlc' elif N_cols == 20: colformat = 'pre2012-tfalc' elif N_cols == 32: colformat = 'post2012-hatlc' else: LOGERROR("can't handle this column format yet, " "file: {fpath}, ncols: {ncols}".format(fpath=lcpath, ncols=N_cols)) return None # deal with pre-2012 column format if colformat == 'pre2012-epdlc': col_names = ['framekey','rjd', 'aim_000','aie_000','aiq_000', 'aim_001','aie_001','aiq_001', 'aim_002','aie_002','aiq_002', 'arm_000','arm_001','arm_002', 'aep_000','aep_001','aep_002'] col_dtypes = ['U8',float, float,float,'U1', float,float,'U1', float,float,'U1', float,float,float, float,float,float] dtype_pairs = [el for el in zip(col_names, col_dtypes)] data = np.genfromtxt(lcpath, names=col_names, dtype=col_dtypes, skip_header=N_comment_lines, delimiter=None) out = {} for ix in range(len(data.dtype.names)): out[data.dtype.names[ix]] = data[data.dtype.names[ix]] elif colformat == 'pre2012-tfalc': col_names = ['framekey','rjd', 'aim_000','aie_000','aiq_000', 'aim_001','aie_001','aiq_001', 'aim_002','aie_002','aiq_002', 'arm_000','arm_001','arm_002', 'aep_000','aep_001','aep_002', 'atf_000','atf_001','atf_002'] col_dtypes = ['U8',float, float,float,'U1', float,float,'U1', float,float,'U1', float,float,float, float,float,float, float,float,float] dtype_pairs = [el for el in zip(col_names, col_dtypes)] data = np.genfromtxt(lcpath, names=col_names, dtype=col_dtypes, skip_header=N_comment_lines, delimiter=None) out = {} for ix in range(len(data.dtype.names)): out[data.dtype.names[ix]] = data[data.dtype.names[ix]] elif colformat == 'post2012-hatlc': col_names = ['hatid', 'framekey', 'fld', 'bjd', 'aim_000', 'aie_000', 'aiq_000', 'aim_001', 'aie_001', 'aiq_001', 'aim_002', 'aie_002', 'aiq_002', 'arm_000', 'arm_001', 'arm_002', 'aep_000', 'aep_001', 'aep_002', 'atf_000', 'atf_001', 'atf_002', 'xcc', 'ycc', 'bgv', 'bge', 'fsv', 'fdv', 'fkv', 'iha', 'izd', 'rjd'] out = astascii.read(lcpath, names=col_names, comment='#') return out
Read .epdlc, and .tfalc light curves and return a corresponding labelled dict (if LC from <2012) or astropy table (if >=2012). Each has different keys that can be accessed via .keys() Input: lcpath: path (string) to light curve data, which is a textfile with HAT LC data. Example: dat = read_original_textlc('HAT-115-0003266.epdlc')
def is_type(msg_type, msg): """ Return message's type is or not """ for prop in MessageType.FIELDS[msg_type]["must"]: if msg.get(prop, False) is False: return False for prop in MessageType.FIELDS[msg_type]["prohibit"]: if msg.get(prop, False) is not False: return False return True
Return message's type is or not
def generate_output_list(self, source, key, val, line='2', hr=True, show_name=False, colorize=True): """ The function for generating CLI output RDAP list results. Args: source (:obj:`str`): The parent key 'network' or 'objects' (required). key (:obj:`str`): The event key 'events' or 'events_actor' (required). val (:obj:`dict`): The event dictionary (required). line (:obj:`str`): The line number (0-4). Determines indentation. Defaults to '0'. hr (:obj:`bool`): Enable human readable key translations. Defaults to True. show_name (:obj:`bool`): Show human readable name (default is to only show short). Defaults to False. colorize (:obj:`bool`): Colorize the console output with ANSI colors. Defaults to True. Returns: str: The generated output. """ output = generate_output( line=line, short=HR_RDAP[source][key]['_short'] if hr else key, name=HR_RDAP[source][key]['_name'] if (hr and show_name) else None, is_parent=False if (val is None or len(val) == 0) else True, value='None' if (val is None or len(val) == 0) else None, colorize=colorize ) if val is not None: for item in val: output += generate_output( line=str(int(line)+1), value=item, colorize=colorize ) return output
The function for generating CLI output RDAP list results. Args: source (:obj:`str`): The parent key 'network' or 'objects' (required). key (:obj:`str`): The event key 'events' or 'events_actor' (required). val (:obj:`dict`): The event dictionary (required). line (:obj:`str`): The line number (0-4). Determines indentation. Defaults to '0'. hr (:obj:`bool`): Enable human readable key translations. Defaults to True. show_name (:obj:`bool`): Show human readable name (default is to only show short). Defaults to False. colorize (:obj:`bool`): Colorize the console output with ANSI colors. Defaults to True. Returns: str: The generated output.
def _doElegant(self): """ perform elegant tracking """ cmdlist = ['bash', self.sim_script, self.elegant_file, self.sim_path, self.sim_exec] subprocess.call(cmdlist)
perform elegant tracking
def simxGetLastErrors(clientID, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' errors =[] errorCnt = ct.c_int() errorStrings = ct.POINTER(ct.c_char)() ret = c_GetLastErrors(clientID, ct.byref(errorCnt), ct.byref(errorStrings), operationMode) if ret == 0: s = 0 for i in range(errorCnt.value): a = bytearray() while errorStrings[s] != b'\0': if sys.version_info[0] == 3: a.append(int.from_bytes(errorStrings[s],'big')) else: a.append(errorStrings[s]) s += 1 s += 1 #skip null if sys.version_info[0] == 3: errors.append(str(a,'utf-8')) else: errors.append(str(a)) return ret, errors
Please have a look at the function description/documentation in the V-REP user manual
def appendImport(self, statement): '''append additional import statement(s). import_stament -- tuple or list or str ''' if type(statement) in (list,tuple): self.extras += statement else: self.extras.append(statement)
append additional import statement(s). import_stament -- tuple or list or str
def _utc_float(self): """Return UTC as a floating point Julian date.""" tai = self.tai leap_dates = self.ts.leap_dates leap_offsets = self.ts.leap_offsets leap_reverse_dates = leap_dates + leap_offsets / DAY_S i = searchsorted(leap_reverse_dates, tai, 'right') return tai - leap_offsets[i] / DAY_S
Return UTC as a floating point Julian date.
async def load(self, file_path, locale=None, key: int = 0, pos: int = 1, neg: Optional[ColRanges] = None): """ Start the loading/watching process """ if neg is None: neg: ColRanges = [(2, None)] await self.start(file_path, locale, kwargs={ 'key': key, 'pos': pos, 'neg': neg, })
Start the loading/watching process
def bump(ctx, verbose=False, pypi=False): """Bump a development version.""" cfg = config.load() scm = scm_provider(cfg.project_root, commit=False, ctx=ctx) # Check for uncommitted changes if not scm.workdir_is_clean(): notify.warning("You have uncommitted changes, will create a time-stamped version!") pep440 = scm.pep440_dev_version(verbose=verbose, non_local=pypi) # Rewrite 'setup.cfg' TODO: refactor to helper, see also release-prep # with util.rewrite_file(cfg.rootjoin('setup.cfg')) as lines: # ... setup_cfg = cfg.rootjoin('setup.cfg') if not pep440: notify.info("Working directory contains a release version!") elif os.path.exists(setup_cfg): with io.open(setup_cfg, encoding='utf-8') as handle: data = handle.readlines() changed = False for i, line in enumerate(data): if re.match(r"#? *tag_build *= *.*", line): verb, _ = data[i].split('=', 1) data[i] = '{}= {}\n'.format(verb, pep440) changed = True if changed: notify.info("Rewriting 'setup.cfg'...") with io.open(setup_cfg, 'w', encoding='utf-8') as handle: handle.write(''.join(data)) else: notify.warning("No 'tag_build' setting found in 'setup.cfg'!") else: notify.warning("Cannot rewrite 'setup.cfg', none found!") if os.path.exists(setup_cfg): # Update metadata and print version egg_info = shell.capture("python setup.py egg_info", echo=True if verbose else None) for line in egg_info.splitlines(): if line.endswith('PKG-INFO'): pkg_info_file = line.split(None, 1)[1] with io.open(pkg_info_file, encoding='utf-8') as handle: notify.info('\n'.join(i for i in handle.readlines() if i.startswith('Version:')).strip()) ctx.run("python setup.py -q develop", echo=True if verbose else None)
Bump a development version.
def get_directed_graph_paths(element, arrow_length): """ Computes paths for a directed path which include an arrow to indicate the directionality of each edge. """ edgepaths = element._split_edgepaths edges = edgepaths.split(datatype='array', dimensions=edgepaths.kdims) arrows = [] for e in edges: sx, sy = e[0] ex, ey = e[1] rad = np.arctan2(ey-sy, ex-sx) xa0 = ex - np.cos(rad+np.pi/8)*arrow_length ya0 = ey - np.sin(rad+np.pi/8)*arrow_length xa1 = ex - np.cos(rad-np.pi/8)*arrow_length ya1 = ey - np.sin(rad-np.pi/8)*arrow_length arrow = np.array([(sx, sy), (ex, ey), (np.nan, np.nan), (xa0, ya0), (ex, ey), (xa1, ya1)]) arrows.append(arrow) return arrows
Computes paths for a directed path which include an arrow to indicate the directionality of each edge.
def error(self, *args): """Log an error. By default this will also raise an exception.""" if _canShortcutLogging(self.logCategory, ERROR): return errorObject(self.logObjectName(), self.logCategory, *self.logFunction(*args))
Log an error. By default this will also raise an exception.
def on_ready_to_stop(self): """Invoked when the consumer is ready to stop.""" # Set the state to shutting down if it wasn't set as that during loop self.set_state(self.STATE_SHUTTING_DOWN) # Reset any signal handlers signal.signal(signal.SIGABRT, signal.SIG_IGN) signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGPROF, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN) # Allow the consumer to gracefully stop and then stop the IOLoop if self.consumer: self.stop_consumer() # Clear IOLoop constructs self.consumer_lock = None # Stop the IOLoop if self.ioloop: LOGGER.debug('Stopping IOLoop') self.ioloop.stop() # Note that shutdown is complete and set the state accordingly self.set_state(self.STATE_STOPPED) LOGGER.info('Shutdown complete')
Invoked when the consumer is ready to stop.
def get_namespace_view(self): """ Return the namespace view This is a dictionary with the following structure {'a': {'color': '#800000', 'size': 1, 'type': 'str', 'view': '1'}} Here: * 'a' is the variable name * 'color' is the color used to show it * 'size' and 'type' are self-evident * and'view' is its value or the text shown in the last column """ from spyder_kernels.utils.nsview import make_remote_view settings = self.namespace_view_settings if settings: ns = self._get_current_namespace() view = repr(make_remote_view(ns, settings, EXCLUDED_NAMES)) return view else: return repr(None)
Return the namespace view This is a dictionary with the following structure {'a': {'color': '#800000', 'size': 1, 'type': 'str', 'view': '1'}} Here: * 'a' is the variable name * 'color' is the color used to show it * 'size' and 'type' are self-evident * and'view' is its value or the text shown in the last column
def _make_links_from(self, body): '''Creates linked navigators from a HAL response body''' ld = utils.CurieDict(self._core.default_curie, {}) for rel, link in body.get('_links', {}).items(): if rel != 'curies': if isinstance(link, list): ld[rel] = utils.LinkList( (self._navigator_or_thunk(lnk), lnk) for lnk in link) else: ld[rel] = self._navigator_or_thunk(link) return ld
Creates linked navigators from a HAL response body
def _set_fabric_priority(self, v, load=False): """ Setter method for fabric_priority, mapped from YANG variable /cee_map/remap/fabric_priority (container) If this variable is read-only (config: false) in the source YANG file, then _set_fabric_priority is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fabric_priority() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=fabric_priority.fabric_priority, is_container='container', presence=False, yang_name="fabric-priority", rest_name="fabric-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' CoS for fabric priority'}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """fabric_priority must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=fabric_priority.fabric_priority, is_container='container', presence=False, yang_name="fabric-priority", rest_name="fabric-priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' CoS for fabric priority'}}, namespace='urn:brocade.com:mgmt:brocade-cee-map', defining_module='brocade-cee-map', yang_type='container', is_config=True)""", }) self.__fabric_priority = t if hasattr(self, '_set'): self._set()
Setter method for fabric_priority, mapped from YANG variable /cee_map/remap/fabric_priority (container) If this variable is read-only (config: false) in the source YANG file, then _set_fabric_priority is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fabric_priority() directly.
def py_run(command_options="", return_std=False, stdout=None, stderr=None): """Run pylint from python ``command_options`` is a string containing ``pylint`` command line options; ``return_std`` (boolean) indicates return of created standard output and error (see below); ``stdout`` and ``stderr`` are 'file-like' objects in which standard output could be written. Calling agent is responsible for stdout/err management (creation, close). Default standard output and error are those from sys, or standalone ones (``subprocess.PIPE``) are used if they are not set and ``return_std``. If ``return_std`` is set to ``True``, this function returns a 2-uple containing standard output and error related to created process, as follows: ``(stdout, stderr)``. To silently run Pylint on a module, and get its standard output and error: >>> (pylint_stdout, pylint_stderr) = py_run( 'module_name.py', True) """ # Detect if we use Python as executable or not, else default to `python` executable = sys.executable if "python" in sys.executable else "python" # Create command line to call pylint epylint_part = [executable, "-c", "from pylint import epylint;epylint.Run()"] options = shlex.split(command_options, posix=not sys.platform.startswith("win")) cli = epylint_part + options # Providing standard output and/or error if not set if stdout is None: if return_std: stdout = PIPE else: stdout = sys.stdout if stderr is None: if return_std: stderr = PIPE else: stderr = sys.stderr # Call pylint in a subprocess process = Popen( cli, shell=False, stdout=stdout, stderr=stderr, env=_get_env(), universal_newlines=True, ) proc_stdout, proc_stderr = process.communicate() # Return standard output and error if return_std: return StringIO(proc_stdout), StringIO(proc_stderr) return None
Run pylint from python ``command_options`` is a string containing ``pylint`` command line options; ``return_std`` (boolean) indicates return of created standard output and error (see below); ``stdout`` and ``stderr`` are 'file-like' objects in which standard output could be written. Calling agent is responsible for stdout/err management (creation, close). Default standard output and error are those from sys, or standalone ones (``subprocess.PIPE``) are used if they are not set and ``return_std``. If ``return_std`` is set to ``True``, this function returns a 2-uple containing standard output and error related to created process, as follows: ``(stdout, stderr)``. To silently run Pylint on a module, and get its standard output and error: >>> (pylint_stdout, pylint_stderr) = py_run( 'module_name.py', True)
def _filter_records(self, records, rtype=None, name=None, content=None, identifier=None): # pylint: disable=too-many-arguments,no-self-use """ Filter dns entries based on type, name or content. """ if not records: return [] if identifier is not None: LOGGER.debug('Filtering %d records by id: %s', len(records), identifier) records = [record for record in records if record['id'] == identifier] if rtype is not None: LOGGER.debug('Filtering %d records by type: %s', len(records), rtype) records = [record for record in records if record['type'] == rtype] if name is not None: LOGGER.debug('Filtering %d records by name: %s', len(records), name) if name.endswith('.'): name = name[:-1] records = [record for record in records if name == record['name']] if content is not None: LOGGER.debug('Filtering %d records by content: %s', len(records), content.lower()) records = [record for record in records if record['content'].lower() == content.lower()] return records
Filter dns entries based on type, name or content.
def dump_weights(tf_save_dir, outfile, options): """ Dump the trained weights from a model to a HDF5 file. """ def _get_outname(tf_name): outname = re.sub(':0$', '', tf_name) outname = outname.lstrip('lm/') outname = re.sub('/rnn/', '/RNN/', outname) outname = re.sub('/multi_rnn_cell/', '/MultiRNNCell/', outname) outname = re.sub('/cell_', '/Cell', outname) outname = re.sub('/lstm_cell/', '/LSTMCell/', outname) if '/RNN/' in outname: if 'projection' in outname: outname = re.sub('projection/kernel', 'W_P_0', outname) else: outname = re.sub('/kernel', '/W_0', outname) outname = re.sub('/bias', '/B', outname) return outname ckpt_file = tf.train.latest_checkpoint(tf_save_dir) config = tf.ConfigProto(allow_soft_placement=True) with tf.Graph().as_default(): with tf.Session(config=config) as sess: with tf.variable_scope('lm'): LanguageModel(options, False) # Create graph # we use the "Saver" class to load the variables loader = tf.train.Saver() loader.restore(sess, ckpt_file) with h5py.File(outfile, 'w') as fout: for v in tf.trainable_variables(): if v.name.find('softmax') >= 0: # don't dump these continue outname = _get_outname(v.name) # print("Saving variable {0} with name {1}".format( # v.name, outname)) shape = v.get_shape().as_list() dset = fout.create_dataset(outname, shape, dtype='float32') values = sess.run([v])[0] dset[...] = values
Dump the trained weights from a model to a HDF5 file.
def save(self, obj): """ save an object :param obj: the object :return: the saved object """ if obj not in self.session: self.session.add(obj) else: obj = self.session.merge(obj) self.session.flush() self.session.refresh(obj) return obj
save an object :param obj: the object :return: the saved object
def product_requests_button(page, page_perms, is_parent=False): """Renders a 'requests' button on the page index showing the number of times the product has been requested. Attempts to only show such a button for valid product/variant pages """ # Is this page the 'product' model? # It is generally safe to assume either the page will have a 'variants' # member or will be an instance of longclaw.utils.ProductVariant if hasattr(page, 'variants') or isinstance(page, ProductVariant): yield widgets.PageListingButton( 'View Requests', reverse('productrequests_admin', kwargs={'pk': page.id}), priority=40 )
Renders a 'requests' button on the page index showing the number of times the product has been requested. Attempts to only show such a button for valid product/variant pages
def getTransfer(self, iso_packets=0): """ Get an USBTransfer instance for asynchronous use. iso_packets: the number of isochronous transfer descriptors to allocate. """ result = USBTransfer( self.__handle, iso_packets, self.__inflight_add, self.__inflight_remove, ) self.__transfer_set.add(result) return result
Get an USBTransfer instance for asynchronous use. iso_packets: the number of isochronous transfer descriptors to allocate.
async def parse_tag_results(soup): """ Parse a page of tag or trait results. Same format. :param soup: BS4 Class Object :return: A list of tags, Nothing else really useful there """ soup = soup.find_all('td', class_='tc3') tags = [] for item in soup: tags.append(item.a.string) return tags
Parse a page of tag or trait results. Same format. :param soup: BS4 Class Object :return: A list of tags, Nothing else really useful there
def rpc_get_definition(self, filename, source, offset): """Get the location of the definition for the symbol at the offset. """ return self._call_backend("rpc_get_definition", None, filename, get_source(source), offset)
Get the location of the definition for the symbol at the offset.
def get_all_entities(self, membership_cache=None, entities_by_kind=None, return_models=False, is_active=True): """ Returns a list of all entity ids in this group or optionally returns a queryset for all entity models. In order to reduce queries for multiple group lookups, it is expected that the membership_cache and entities_by_kind are built outside of this method and passed in as arguments. :param membership_cache: A group cache dict generated from `EntityGroup.objects.get_membership_cache()` :type membership_cache: dict :param entities_by_kind: An entities by kind dict generated from the `get_entities_by_kind` function :type entities_by_kind: dict :param return_models: If True, returns an Entity queryset, if False, returns a set of entity ids :type return_models: bool :param is_active: Flag to control entities being returned. Defaults to True for active entities only :type is_active: bool """ # If cache args were not passed, generate the cache if membership_cache is None: membership_cache = EntityGroup.objects.get_membership_cache([self.id], is_active=is_active) if entities_by_kind is None: entities_by_kind = entities_by_kind or get_entities_by_kind(membership_cache=membership_cache) # Build set of all entity ids for this group entity_ids = set() # This group does have entities if membership_cache.get(self.id): # Loop over each membership in this group for entity_id, entity_kind_id in membership_cache[self.id]: if entity_id: if entity_kind_id: # All sub entities of this kind under this entity entity_ids.update(entities_by_kind[entity_kind_id][entity_id]) else: # Individual entity entity_ids.add(entity_id) else: # All entities of this kind entity_ids.update(entities_by_kind[entity_kind_id]['all']) # Check if a queryset needs to be returned if return_models: return Entity.objects.filter(id__in=entity_ids) return entity_ids
Returns a list of all entity ids in this group or optionally returns a queryset for all entity models. In order to reduce queries for multiple group lookups, it is expected that the membership_cache and entities_by_kind are built outside of this method and passed in as arguments. :param membership_cache: A group cache dict generated from `EntityGroup.objects.get_membership_cache()` :type membership_cache: dict :param entities_by_kind: An entities by kind dict generated from the `get_entities_by_kind` function :type entities_by_kind: dict :param return_models: If True, returns an Entity queryset, if False, returns a set of entity ids :type return_models: bool :param is_active: Flag to control entities being returned. Defaults to True for active entities only :type is_active: bool
def _parse_author(self): r"""Parse the author from TeX source. Sets the ``_authors`` attribute. Goal is to parse:: \author{ A.~Author, B.~Author, and C.~Author} Into:: ['A. Author', 'B. Author', 'C. Author'] """ command = LatexCommand( 'author', {'name': 'authors', 'required': True, 'bracket': '{'}) try: parsed = next(command.parse(self._tex)) except StopIteration: self._logger.warning('lsstdoc has no author') self._authors = [] return try: content = parsed['authors'] except KeyError: self._logger.warning('lsstdoc has no author') self._authors = [] return # Clean content content = content.replace('\n', ' ') content = content.replace('~', ' ') content = content.strip() # Split content into list of individual authors authors = [] for part in content.split(','): part = part.strip() for split_part in part.split('and '): split_part = split_part.strip() if len(split_part) > 0: authors.append(split_part) self._authors = authors
r"""Parse the author from TeX source. Sets the ``_authors`` attribute. Goal is to parse:: \author{ A.~Author, B.~Author, and C.~Author} Into:: ['A. Author', 'B. Author', 'C. Author']
def save_config_variables(self): """Save the configuration variables in non-volatile memory. This method should be used in conjuction with *write_config_variables*. :rtype: boolean :Example: >>> alpha.save_config_variables() True """ command = 0x43 byte_list = [0x3F, 0x3C, 0x3F, 0x3C, 0x43] success = [0xF3, 0x43, 0x3F, 0x3C, 0x3F, 0x3C] resp = [] # Send the command byte and then wait for 10 ms r = self.cnxn.xfer([command])[0] sleep(10e-3) # append the response of the command byte to the List resp.append(r) # Send the rest of the config bytes for each in byte_list: r = self.cnxn.xfer([each])[0] resp.append(r) sleep(0.1) return True if resp == success else False
Save the configuration variables in non-volatile memory. This method should be used in conjuction with *write_config_variables*. :rtype: boolean :Example: >>> alpha.save_config_variables() True
def zip_process(**kwargs): """ Process zip operations. :param kwargs: :return: """ str_localPath = "" str_zipFileName = "" str_action = "zip" str_arcroot = "" for k,v in kwargs.items(): if k == 'path': str_localPath = v if k == 'action': str_action = v if k == 'payloadFile': str_zipFileName = v if k == 'arcroot': str_arcroot = v if str_action == 'zip': str_mode = 'w' str_zipFileName = '%s.zip' % uuid.uuid4() else: str_mode = 'r' try: ziphandler = zipfile.ZipFile(str_zipFileName, str_mode, zipfile.ZIP_DEFLATED) if str_mode == 'w': if os.path.isdir(str_localPath): zipdir(str_localPath, ziphandler, arcroot = str_arcroot) else: if len(str_arcroot): str_arcname = str_arcroot.split('/')[-1] + str_localPath.split(str_arcroot)[1] else: str_arcname = str_localPath try: ziphandler.write(str_localPath, arcname = str_arcname) except: ziphandler.close() os.remove(str_zipFileName) return { 'msg': json.dumps({"msg": "No file or directory found for '%s'" % str_localPath}), 'status': False } if str_mode == 'r': ziphandler.extractall(str_localPath) ziphandler.close() str_msg = '%s operation successful' % str_action b_status = True except: str_msg = '%s operation failed' % str_action b_status = False return { 'msg': str_msg, 'fileProcessed': str_zipFileName, 'status': b_status, 'path': str_localPath, 'zipmode': str_mode, 'filesize': "{:,}".format(os.stat(str_zipFileName).st_size), 'timestamp': '%s' % datetime.datetime.now() }
Process zip operations. :param kwargs: :return:
def enrich(self, column): """ This method adds a new column depending on the extension of the file. :param column: column where the file path is found :type column: string :return: returns the original dataframe with a new column named as 'filetype' that contains information about its extension :rtype: pandas.DataFrame """ if column not in self.data: return self.data # Insert a new column with default values self.data["filetype"] = 'Other' # Insert 'Code' only in those rows that are # detected as being source code thanks to its extension reg = "\.c$|\.h$|\.cc$|\.cpp$|\.cxx$|\.c\+\+$|\.cp$|\.py$|\.js$|\.java$|\.rs$|\.go$" self.data.loc[self.data[column].str.contains(reg), 'filetype'] = 'Code' return self.data
This method adds a new column depending on the extension of the file. :param column: column where the file path is found :type column: string :return: returns the original dataframe with a new column named as 'filetype' that contains information about its extension :rtype: pandas.DataFrame
def find_transport_reactions(model): """ Return a list of all transport reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- A transport reaction is defined as follows: 1. It contains metabolites from at least 2 compartments and 2. at least 1 metabolite undergoes no chemical reaction, i.e., the formula and/or annotation stays the same on both sides of the equation. A notable exception is transport via PTS, which also contains the following restriction: 3. The transported metabolite(s) are transported into a compartment through the exchange of a phosphate group. An example of transport via PTS would be pep(c) + glucose(e) -> glucose-6-phosphate(c) + pyr(c) Reactions similar to transport via PTS (referred to as "modified transport reactions") follow a similar pattern: A(x) + B-R(y) -> A-R(y) + B(y) Such modified transport reactions can be detected, but only when a formula field exists for all metabolites in a particular reaction. If this is not the case, transport reactions are identified through annotations, which cannot detect modified transport reactions. """ transport_reactions = [] transport_rxn_candidates = set(model.reactions) - set(model.boundary) \ - set(find_biomass_reaction(model)) transport_rxn_candidates = set( [rxn for rxn in transport_rxn_candidates if len(rxn.compartments) >= 2] ) # Add all labeled transport reactions sbo_matches = set([rxn for rxn in transport_rxn_candidates if rxn.annotation is not None and 'sbo' in rxn.annotation and rxn.annotation['sbo'] in TRANSPORT_RXN_SBO_TERMS]) if len(sbo_matches) > 0: transport_reactions += list(sbo_matches) # Find unlabeled transport reactions via formula or annotation checks for rxn in transport_rxn_candidates: # Check if metabolites have formula field rxn_mets = set([met.formula for met in rxn.metabolites]) if (None not in rxn_mets) and (len(rxn_mets) != 0): if is_transport_reaction_formulae(rxn): transport_reactions.append(rxn) elif is_transport_reaction_annotations(rxn): transport_reactions.append(rxn) return set(transport_reactions)
Return a list of all transport reactions. Parameters ---------- model : cobra.Model The metabolic model under investigation. Notes ----- A transport reaction is defined as follows: 1. It contains metabolites from at least 2 compartments and 2. at least 1 metabolite undergoes no chemical reaction, i.e., the formula and/or annotation stays the same on both sides of the equation. A notable exception is transport via PTS, which also contains the following restriction: 3. The transported metabolite(s) are transported into a compartment through the exchange of a phosphate group. An example of transport via PTS would be pep(c) + glucose(e) -> glucose-6-phosphate(c) + pyr(c) Reactions similar to transport via PTS (referred to as "modified transport reactions") follow a similar pattern: A(x) + B-R(y) -> A-R(y) + B(y) Such modified transport reactions can be detected, but only when a formula field exists for all metabolites in a particular reaction. If this is not the case, transport reactions are identified through annotations, which cannot detect modified transport reactions.
def findentry(self, item): """A caseless way of checking if an item is in the list or not. It returns None or the entry.""" if not isinstance(item, str): raise TypeError( 'Members of this object must be strings. ' 'You supplied \"%s\"' % type(item)) for entry in self: if item.lower() == entry.lower(): return entry return None
A caseless way of checking if an item is in the list or not. It returns None or the entry.
def new_thing(self, name, **stats): """Create a new thing, located here, and return it.""" return self.character.new_thing( name, self.name, **stats )
Create a new thing, located here, and return it.
def get(block_id): """Processing block detail resource.""" _url = get_root_url() try: block = DB.get_block_details([block_id]).__next__() response = block response['links'] = { 'self': '{}'.format(request.url), 'list': '{}/processing-blocks'.format(_url), 'home': '{}'.format(_url) } return block except IndexError as error: response = dict(message='Unable to GET Processing Block', id='{}'.format(block_id), error=error.__str__()) response['links'] = { 'list': '{}/processing-blocks'.format(_url), 'home': '{}'.format(_url) } return response, HTTPStatus.NOT_FOUND
Processing block detail resource.
def length(self, t0=0, t1=1, error=None, min_depth=None): """returns the length of the line segment between t0 and t1.""" return abs(self.end - self.start)*(t1-t0)
returns the length of the line segment between t0 and t1.
def from_layer(cls, font, layerName=None, copy=False, skipExportGlyphs=None): """Return a mapping of glyph names to glyph objects from `font`.""" if layerName is not None: layer = font.layers[layerName] else: layer = font.layers.defaultLayer if copy: self = _copyLayer(layer, obj_type=cls) self.lib = deepcopy(layer.lib) else: self = cls((g.name, g) for g in layer) self.lib = layer.lib # If any glyphs in the skipExportGlyphs list are used as components, decompose # them in the containing glyphs... if skipExportGlyphs: for glyph in self.values(): if any(c.baseGlyph in skipExportGlyphs for c in glyph.components): deepCopyContours(self, glyph, glyph, Transform(), skipExportGlyphs) if hasattr(glyph, "removeComponent"): # defcon for c in [ component for component in glyph.components if component.baseGlyph in skipExportGlyphs ]: glyph.removeComponent(c) else: # ufoLib2 glyph.components[:] = [ c for c in glyph.components if c.baseGlyph not in skipExportGlyphs ] # ... and then remove them from the glyph set, if even present. for glyph_name in skipExportGlyphs: if glyph_name in self: del self[glyph_name] self.name = layer.name if layerName is not None else None return self
Return a mapping of glyph names to glyph objects from `font`.
def authenticate(self, token_value): """Check that the password is valid. This allows for revoking of a user's preview rights by changing the valid passwords. """ try: backend_path, user_id = token_value.split(':', 1) except (ValueError, AttributeError): return False backend = auth.load_backend(backend_path) return bool(backend.get_user(user_id))
Check that the password is valid. This allows for revoking of a user's preview rights by changing the valid passwords.
def dataset_archive(dataset, signature, data_home=None, ext=".zip"): """ Checks to see if the dataset archive file exists in the data home directory, found with ``get_data_home``. By specifying the signature, this function also checks to see if the archive is the latest version by comparing the sha256sum of the local archive with the specified signature. Parameters ---------- dataset : str The name of the dataset; should either be a folder in data home or specified in the yellowbrick.datasets.DATASETS variable. signature : str The SHA 256 signature of the dataset, used to determine if the archive is the latest version of the dataset or not. data_home : str, optional The path on disk where data is stored. If not passed in, it is looked up from YELLOWBRICK_DATA or the default returned by ``get_data_home``. ext : str, default: ".zip" The extension of the archive file. Returns ------- exists : bool True if the dataset archive exists and is the latest version. """ data_home = get_data_home(data_home) path = os.path.join(data_home, dataset+ext) if os.path.exists(path) and os.path.isfile(path): return sha256sum(path) == signature return False
Checks to see if the dataset archive file exists in the data home directory, found with ``get_data_home``. By specifying the signature, this function also checks to see if the archive is the latest version by comparing the sha256sum of the local archive with the specified signature. Parameters ---------- dataset : str The name of the dataset; should either be a folder in data home or specified in the yellowbrick.datasets.DATASETS variable. signature : str The SHA 256 signature of the dataset, used to determine if the archive is the latest version of the dataset or not. data_home : str, optional The path on disk where data is stored. If not passed in, it is looked up from YELLOWBRICK_DATA or the default returned by ``get_data_home``. ext : str, default: ".zip" The extension of the archive file. Returns ------- exists : bool True if the dataset archive exists and is the latest version.
def register_actions(self, shortcut_manager): """Register callback methods for triggered actions :param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager: Shortcut Manager Object holding mappings between shortcuts and actions. """ shortcut_manager.add_callback_for_action('rename', self.rename_selected_state) super(StatesEditorController, self).register_actions(shortcut_manager)
Register callback methods for triggered actions :param rafcon.gui.shortcut_manager.ShortcutManager shortcut_manager: Shortcut Manager Object holding mappings between shortcuts and actions.
def _symlink_local_src(self, gopath, go_local_src, required_links): """Creates symlinks from the given gopath to the source files of the given local package. Also duplicates directory structure leading to source files of package within gopath, in order to provide isolation to the package. Adds the symlinks to the source files to required_links. """ source_list = [os.path.join(get_buildroot(), src) for src in go_local_src.sources_relative_to_buildroot()] rel_list = go_local_src.sources_relative_to_target_base() source_iter = zip(source_list, rel_list) return self._symlink_lib(gopath, go_local_src, source_iter, required_links)
Creates symlinks from the given gopath to the source files of the given local package. Also duplicates directory structure leading to source files of package within gopath, in order to provide isolation to the package. Adds the symlinks to the source files to required_links.
def gf_poly_mul_simple(p, q): # simple equivalent way of multiplying two polynomials without precomputation, but thus it's slower '''Multiply two polynomials, inside Galois Field''' # Pre-allocate the result array r = bytearray(len(p) + len(q) - 1) # Compute the polynomial multiplication (just like the outer product of two vectors, we multiply each coefficients of p with all coefficients of q) for j in xrange(len(q)): for i in xrange(len(p)): r[i + j] ^= gf_mul(p[i], q[j]) # equivalent to: r[i + j] = gf_add(r[i+j], gf_mul(p[i], q[j])) -- you can see it's your usual polynomial multiplication return r
Multiply two polynomials, inside Galois Field
def kwargs(self): """Returns a dict of the kwargs for this Struct which were not interpreted by the baseclass. This excludes fields like `extends`, `merges`, and `abstract`, which are consumed by SerializableFactory.create and Validatable.validate. """ return {k: v for k, v in self._kwargs.items() if k not in self._INTERNAL_FIELDS}
Returns a dict of the kwargs for this Struct which were not interpreted by the baseclass. This excludes fields like `extends`, `merges`, and `abstract`, which are consumed by SerializableFactory.create and Validatable.validate.
def ToTsvExcel(self, columns_order=None, order_by=()): """Returns a file in tab-separated-format readable by MS Excel. Returns a file in UTF-16 little endian encoding, with tabs separating the values. Args: columns_order: Delegated to ToCsv. order_by: Delegated to ToCsv. Returns: A tab-separated little endian UTF16 file representing the table. """ csv_result = self.ToCsv(columns_order, order_by, separator="\t") if not isinstance(csv_result, six.text_type): csv_result = csv_result.decode("utf-8") return csv_result.encode("UTF-16LE")
Returns a file in tab-separated-format readable by MS Excel. Returns a file in UTF-16 little endian encoding, with tabs separating the values. Args: columns_order: Delegated to ToCsv. order_by: Delegated to ToCsv. Returns: A tab-separated little endian UTF16 file representing the table.
def process_notes(notes, ref_data): '''Add reference information to the bottom of a notes file `:ref:` tags are removed and the actual reference data is appended ''' ref_keys = ref_data.keys() found_refs = set() for k in ref_keys: if k in notes: found_refs.add(k) # The block to append reference_sec = '\n\n' reference_sec += '-------------------------------------------------\n' reference_sec += ' REFERENCES MENTIONED ABOVE\n' reference_sec += ' (not necessarily references for the basis sets)\n' reference_sec += '-------------------------------------------------\n' # Add reference data if len(found_refs) == 0: return notes for r in sorted(found_refs): rtxt = references.reference_text(ref_data[r]) reference_sec += r + '\n' reference_sec += textwrap.indent(rtxt, ' ' * 4) reference_sec += '\n\n' return notes + reference_sec
Add reference information to the bottom of a notes file `:ref:` tags are removed and the actual reference data is appended
def escape(s): """Convert the characters &, <, >, ' and " in string s to HTML-safe sequences. Use this if you need to display text that might contain such characters in HTML. Marks return value as markup string. """ if hasattr(s, '__html__'): return s.__html__() if isinstance(s, six.binary_type): s = six.text_type(str(s), 'utf8') elif isinstance(s, six.text_type): s = s else: s = str(s) return (s .replace('&', '&amp;') .replace('>', '&gt;') .replace('<', '&lt;') .replace("'", '&#39;') .replace('"', '&#34;') )
Convert the characters &, <, >, ' and " in string s to HTML-safe sequences. Use this if you need to display text that might contain such characters in HTML. Marks return value as markup string.
def from_object(obj): """ Convert an object representing a contact information to an instance `Contact`. @param obj: an object containg the following attributes: * `name`: an item of the enumeration `ContactName` representing the type of this contact information. * `value`: value of this contact information representing by a string, such as ``+84.01272170781``, the formatted value for a telephone number property. * `is_primary`: indicate whether this contact property is the first to be used to contact the entity that this contact information corresponds to. There is only one primary contact property for a given property name (e.g., `EMAIL`, `PHONE`, `WEBSITE`). * `is_verified`: indicate whether this contact information has been verified, whether it has been grabbed from a trusted Social Networking Service (SNS), or whether through a challenge/response process. @raise ValueError: if the value of this contact information is null. """ return obj if isinstance(obj, Contact) \ else Contact(cast.string_to_enum(obj.name, Contact.ContactName), obj.value, is_primary=obj.is_primary and cast.string_to_boolean(obj.is_primary, strict=True), is_verified=obj.is_verified and cast.string_to_boolean(obj.is_verified, strict=True))
Convert an object representing a contact information to an instance `Contact`. @param obj: an object containg the following attributes: * `name`: an item of the enumeration `ContactName` representing the type of this contact information. * `value`: value of this contact information representing by a string, such as ``+84.01272170781``, the formatted value for a telephone number property. * `is_primary`: indicate whether this contact property is the first to be used to contact the entity that this contact information corresponds to. There is only one primary contact property for a given property name (e.g., `EMAIL`, `PHONE`, `WEBSITE`). * `is_verified`: indicate whether this contact information has been verified, whether it has been grabbed from a trusted Social Networking Service (SNS), or whether through a challenge/response process. @raise ValueError: if the value of this contact information is null.
def virtual_machines_list_available_sizes(name, resource_group, **kwargs): # pylint: disable=invalid-name ''' .. versionadded:: 2019.2.0 Lists all available virtual machine sizes to which the specified virtual machine can be resized. :param name: The name of the virtual machine. :param resource_group: The resource group name assigned to the virtual machine. CLI Example: .. code-block:: bash salt-call azurearm_compute.virtual_machines_list_available_sizes testvm testgroup ''' result = {} compconn = __utils__['azurearm.get_client']('compute', **kwargs) try: sizes = __utils__['azurearm.paged_object_to_list']( compconn.virtual_machines.list_available_sizes( resource_group_name=resource_group, vm_name=name ) ) for size in sizes: result[size['name']] = size except CloudError as exc: __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs) result = {'error': str(exc)} return result
.. versionadded:: 2019.2.0 Lists all available virtual machine sizes to which the specified virtual machine can be resized. :param name: The name of the virtual machine. :param resource_group: The resource group name assigned to the virtual machine. CLI Example: .. code-block:: bash salt-call azurearm_compute.virtual_machines_list_available_sizes testvm testgroup
def _create_ucsm_host_to_service_profile_mapping(self): """Reads list of Service profiles and finds associated Server.""" ucsm_ips = list(CONF.ml2_cisco_ucsm.ucsms) for ucsm_ip in ucsm_ips: with self.ucsm_connect_disconnect(ucsm_ip) as handle: try: sp_list_temp = handle.ConfigResolveClass('lsServer', None, inHierarchical=False) if sp_list_temp and sp_list_temp.OutConfigs is not None: sp_list = sp_list_temp.OutConfigs.GetChild() or [] for sp in sp_list: if sp.PnDn: server_name = self._get_server_name(handle, sp, ucsm_ip) if (server_name and not sp.OperSrcTemplName): LOG.debug('Server %s info retrieved ' 'from UCSM %s', server_name, ucsm_ip) key = (ucsm_ip, server_name) self.ucsm_sp_dict[key] = str(sp.Dn) self.ucsm_host_dict[server_name] = ucsm_ip except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigReadFailed(ucsm_ip=ucsm_ip, exc=e)
Reads list of Service profiles and finds associated Server.
def _update_valid_moves(self): ''' Updates self.valid_moves according to the latest game state. Assumes that the board and all hands are non-empty. ''' left_end = self.board.left_end() right_end = self.board.right_end() moves = [] for d in self.hands[self.turn]: if left_end in d: moves.append((d, True)) # do not double count moves if both of the board's ends have # the same value, and a domino can be placed on both of them if right_end in d and left_end != right_end: moves.append((d, False)) self.valid_moves = tuple(moves)
Updates self.valid_moves according to the latest game state. Assumes that the board and all hands are non-empty.
def upload_files(selected_file, selected_host, only_link, file_name): """ Uploads selected file to the host, thanks to the fact that every pomf.se based site has pretty much the same architecture. """ try: answer = requests.post( url=selected_host[0]+"upload.php", files={'files[]':selected_file}) file_name_1 = re.findall(r'"url": *"((h.+\/){0,1}(.+?))"[,\}]', \ answer.text.replace("\\", ""))[0][2] if only_link: return [selected_host[1]+file_name_1, "{}: {}{}".format(file_name, selected_host[1], file_name_1)] else: return "{}: {}{}".format(file_name, selected_host[1], file_name_1) except requests.exceptions.ConnectionError: print(file_name + ' couldn\'t be uploaded to ' + selected_host[0])
Uploads selected file to the host, thanks to the fact that every pomf.se based site has pretty much the same architecture.
async def read_frame(self) -> DataFrame: """Read a single frame from the local buffer. If no frames are available but the stream is still open, waits until more frames arrive. Otherwise, raises StreamConsumedError. When a stream is closed, a single `None` is added to the data frame Queue to wake up any waiting `read_frame` coroutines. """ if self._data_frames.qsize() == 0 and self.closed: raise StreamConsumedError(self.id) frame = await self._data_frames.get() self._data_frames.task_done() if frame is None: raise StreamConsumedError(self.id) return frame
Read a single frame from the local buffer. If no frames are available but the stream is still open, waits until more frames arrive. Otherwise, raises StreamConsumedError. When a stream is closed, a single `None` is added to the data frame Queue to wake up any waiting `read_frame` coroutines.
def canGoBack(self): """ Returns whether or not this wizard can move forward. :return <bool> """ try: backId = self._navigation.index(self.currentId())-1 if backId >= 0: self._navigation[backId] else: return False except StandardError: return False else: return True
Returns whether or not this wizard can move forward. :return <bool>
def upsert(self, *fields): """ Update or Insert this document depending on whether it exists or not. The presense of an `_id` value in the document is used to determine if the document exists. NOTE: This method is not the same as specifying the `upsert` flag when calling MongoDB. When called for a document with an `_id` value, this method will call the database to see if a record with that Id exists, if not it will call `insert`, if so it will call `update`. This operation is therefore not atomic and much slower than the equivalent MongoDB operation (due to the extra call). """ # If no `_id` is provided then we insert the document if not self._id: return self.insert() # If an `_id` is provided then we need to check if it exists before # performing the `upsert`. # if self.count({'_id': self._id}) == 0: self.insert() else: self.update(*fields)
Update or Insert this document depending on whether it exists or not. The presense of an `_id` value in the document is used to determine if the document exists. NOTE: This method is not the same as specifying the `upsert` flag when calling MongoDB. When called for a document with an `_id` value, this method will call the database to see if a record with that Id exists, if not it will call `insert`, if so it will call `update`. This operation is therefore not atomic and much slower than the equivalent MongoDB operation (due to the extra call).
def load_config(path=None, defaults=None): """ Loads and parses an INI style configuration file using Python's built-in ConfigParser module. If path is specified, load it. If ``defaults`` (a list of strings) is given, try to load each entry as a file, without throwing any error if the operation fails. If ``defaults`` is not given, the following locations listed in the DEFAULT_FILES constant are tried. To completely disable defaults loading, pass in an empty list or ``False``. Returns the SafeConfigParser instance used to load and parse the files. """ if defaults is None: defaults = DEFAULT_FILES config = configparser.SafeConfigParser(allow_no_value=True) if defaults: config.read(defaults) if path: with open(path) as fh: config.readfp(fh) return config
Loads and parses an INI style configuration file using Python's built-in ConfigParser module. If path is specified, load it. If ``defaults`` (a list of strings) is given, try to load each entry as a file, without throwing any error if the operation fails. If ``defaults`` is not given, the following locations listed in the DEFAULT_FILES constant are tried. To completely disable defaults loading, pass in an empty list or ``False``. Returns the SafeConfigParser instance used to load and parse the files.
def dms2dd(degrees, minutes, seconds): """ Convert latitude/longitude of a location that is in degrees, minutes, seconds to decimal degrees Parameters ---------- degrees : degrees of latitude/longitude minutes : minutes of latitude/longitude seconds : seconds of latitude/longitude Returns ------- degrees : decimal degrees of location Examples -------- Convert 180 degrees 4 minutes 23 seconds to decimal degrees: >>> ipmag.dms2dd(180,4,23) 180.07305555555556 """ dd = float(degrees) + old_div(float(minutes), 60) + \ old_div(float(seconds), (60 * 60)) return dd
Convert latitude/longitude of a location that is in degrees, minutes, seconds to decimal degrees Parameters ---------- degrees : degrees of latitude/longitude minutes : minutes of latitude/longitude seconds : seconds of latitude/longitude Returns ------- degrees : decimal degrees of location Examples -------- Convert 180 degrees 4 minutes 23 seconds to decimal degrees: >>> ipmag.dms2dd(180,4,23) 180.07305555555556