positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def incremental_fit(self, train_x, train_y): """ Incrementally fit the regressor. """ if not self._first_fitted: raise ValueError("The first_fit function needs to be called first.") train_x, train_y = np.array(train_x), np.array(train_y) # Incrementally compute K up_right_k = edit_distance_matrix(self._x, train_x) down_left_k = np.transpose(up_right_k) down_right_k = edit_distance_matrix(train_x) up_k = np.concatenate((self._distance_matrix, up_right_k), axis=1) down_k = np.concatenate((down_left_k, down_right_k), axis=1) temp_distance_matrix = np.concatenate((up_k, down_k), axis=0) k_matrix = bourgain_embedding_matrix(temp_distance_matrix) diagonal = np.diag_indices_from(k_matrix) diagonal = (diagonal[0][-len(train_x) :], diagonal[1][-len(train_x) :]) k_matrix[diagonal] += self.alpha try: self._l_matrix = cholesky(k_matrix, lower=True) # Line 2 except LinAlgError: return self self._x = np.concatenate((self._x, train_x), axis=0) self._y = np.concatenate((self._y, train_y), axis=0) self._distance_matrix = temp_distance_matrix self._alpha_vector = cho_solve((self._l_matrix, True), self._y) # Line 3 return self
Incrementally fit the regressor.
def add_post(self, *args, **kwargs): """ Shortcut for add_route with method POST """ return self.add_route(hdrs.METH_POST, *args, **kwargs)
Shortcut for add_route with method POST
def disabled(name, **kwargs): ''' Ensure a job is disabled in the schedule name The unique name that is given to the scheduled job. persist Whether the job should persist between minion restarts, defaults to True. ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': []} current_schedule = __salt__['schedule.list'](show_all=True, return_yaml=False) if name in current_schedule: if 'test' in __opts__ and __opts__['test']: kwargs['test'] = True result = __salt__['schedule.disable_job'](name, **kwargs) ret['comment'].append(result['comment']) else: result = __salt__['schedule.disable_job'](name, **kwargs) if not result['result']: ret['result'] = result['result'] ret['comment'] = result['comment'] return ret else: ret['comment'].append('Disabled job {0} from schedule'.format(name)) else: ret['comment'].append('Job {0} not present in schedule'.format(name)) ret['comment'] = '\n'.join(ret['comment']) return ret
Ensure a job is disabled in the schedule name The unique name that is given to the scheduled job. persist Whether the job should persist between minion restarts, defaults to True.
def paginate_stream_index(self, bucket, index, startkey, endkey=None, max_results=1000, return_terms=None, continuation=None, timeout=None, term_regex=None): """ Iterates over a streaming paginated index query. This is equivalent to calling :meth:`stream_index` and then successively calling :meth:`~riak.client.index_page.IndexPage.next_page` until all results are exhausted. Because limiting the result set is necessary to invoke pagination, the ``max_results`` option has a default of ``1000``. The caller should explicitly close each yielded page, either using :func:`contextlib.closing` or calling ``close()`` explicitly. Consuming the entire page will also close the stream. If it does not, the associated connection might not be returned to the pool. Example:: from contextlib import closing # Using contextlib.closing for page in client.paginate_stream_index(mybucket, 'name_bin', 'Smith'): with closing(page): for key in page: do_something(key) # Explicit close() for page in client.paginate_stream_index(mybucket, 'name_bin', 'Smith'): for key in page: do_something(key) page.close() :param bucket: the bucket whose index will be queried :type bucket: RiakBucket :param index: the index to query :type index: string :param startkey: the sole key to query, or beginning of the query range :type startkey: string, integer :param endkey: the end of the query range (optional if equality) :type endkey: string, integer :param return_terms: whether to include the secondary index value :type return_terms: boolean :param max_results: the maximum number of results to return (page size), defaults to 1000 :type max_results: integer :param continuation: the opaque continuation returned from a previous paginated request :type continuation: string :param timeout: a timeout value in milliseconds, or 'infinity' :type timeout: int :param term_regex: a regular expression used to filter index terms :type term_regex: string :rtype: generator over instances of :class:`~riak.client.index_page.IndexPage` """ # TODO FUTURE: implement "retry on connection closed" # as in stream_mapred page = self.stream_index(bucket, index, startkey, endkey=endkey, max_results=max_results, return_terms=return_terms, continuation=continuation, timeout=timeout, term_regex=term_regex) yield page while page.has_next_page(): page = page.next_page() yield page
Iterates over a streaming paginated index query. This is equivalent to calling :meth:`stream_index` and then successively calling :meth:`~riak.client.index_page.IndexPage.next_page` until all results are exhausted. Because limiting the result set is necessary to invoke pagination, the ``max_results`` option has a default of ``1000``. The caller should explicitly close each yielded page, either using :func:`contextlib.closing` or calling ``close()`` explicitly. Consuming the entire page will also close the stream. If it does not, the associated connection might not be returned to the pool. Example:: from contextlib import closing # Using contextlib.closing for page in client.paginate_stream_index(mybucket, 'name_bin', 'Smith'): with closing(page): for key in page: do_something(key) # Explicit close() for page in client.paginate_stream_index(mybucket, 'name_bin', 'Smith'): for key in page: do_something(key) page.close() :param bucket: the bucket whose index will be queried :type bucket: RiakBucket :param index: the index to query :type index: string :param startkey: the sole key to query, or beginning of the query range :type startkey: string, integer :param endkey: the end of the query range (optional if equality) :type endkey: string, integer :param return_terms: whether to include the secondary index value :type return_terms: boolean :param max_results: the maximum number of results to return (page size), defaults to 1000 :type max_results: integer :param continuation: the opaque continuation returned from a previous paginated request :type continuation: string :param timeout: a timeout value in milliseconds, or 'infinity' :type timeout: int :param term_regex: a regular expression used to filter index terms :type term_regex: string :rtype: generator over instances of :class:`~riak.client.index_page.IndexPage`
def filter_networks(self, predicate): """ Return a new Class1AffinityPredictor containing a subset of this predictor's neural networks. Parameters ---------- predicate : Class1NeuralNetwork -> boolean Function specifying which neural networks to include Returns ------- Class1AffinityPredictor """ allele_to_allele_specific_models = {} for (allele, models) in self.allele_to_allele_specific_models.items(): allele_to_allele_specific_models[allele] = [ m for m in models if predicate(m) ] class1_pan_allele_models = [ m for m in self.class1_pan_allele_models if predicate(m) ] return Class1AffinityPredictor( allele_to_allele_specific_models=allele_to_allele_specific_models, class1_pan_allele_models=class1_pan_allele_models, allele_to_fixed_length_sequence=self.allele_to_fixed_length_sequence, )
Return a new Class1AffinityPredictor containing a subset of this predictor's neural networks. Parameters ---------- predicate : Class1NeuralNetwork -> boolean Function specifying which neural networks to include Returns ------- Class1AffinityPredictor
def make_nb(config, path=None, stop_on_error=True, just_tests=False): """Create a Jupyter notebook sciunit tests for the given configuration.""" root, nb_name = nb_name_from_path(config, path) clean = lambda varStr: re.sub('\W|^(?=\d)', '_', varStr) name = clean(nb_name) mpl_style = config.get('misc', 'matplotlib', fallback='inline') cells = [new_markdown_cell('## Sciunit Testing Notebook for %s' % nb_name)] add_code_cell(cells, ( "%%matplotlib %s\n" "from IPython.display import display\n" "from importlib.machinery import SourceFileLoader\n" "%s = SourceFileLoader('scidash', '%s/__init__.py').load_module()") % (mpl_style, name, root)) if just_tests: add_code_cell(cells, ( "for test in %s.tests.tests:\n" " score_array = test.judge(%s.models.models, stop_on_error=%r)\n" " display(score_array)") % (name, name, stop_on_error)) else: add_code_cell(cells, ( "for suite in %s.suites.suites:\n" " score_matrix = suite.judge(" "%s.models.models, stop_on_error=%r)\n" " display(score_matrix)") % (name, name, stop_on_error)) write_nb(root, nb_name, cells)
Create a Jupyter notebook sciunit tests for the given configuration.
def cli(env, package_keyname, location, preset, verify, billing, complex_type, quantity, extras, order_items): """Place or verify an order. This CLI command is used for placing/verifying an order of the specified package in the given location (denoted by a datacenter's long name). Orders made via the CLI can then be converted to be made programmatically by calling SoftLayer.OrderingManager.place_order() with the same keynames. Packages for ordering can be retrieved from `slcli order package-list` Presets for ordering can be retrieved from `slcli order preset-list` (not all packages have presets) Items can be retrieved from `slcli order item-list`. In order to find required items for the order, use `slcli order category-list`, and then provide the --category option for each category code in `slcli order item-list`. Example:: # Order an hourly VSI with 4 CPU, 16 GB RAM, 100 GB SAN disk, # Ubuntu 16.04, and 1 Gbps public & private uplink in dal13 slcli order place --billing hourly CLOUD_SERVER DALLAS13 \\ GUEST_CORES_4 \\ RAM_16_GB \\ REBOOT_REMOTE_CONSOLE \\ 1_GBPS_PUBLIC_PRIVATE_NETWORK_UPLINKS \\ BANDWIDTH_0_GB_2 \\ 1_IP_ADDRESS \\ GUEST_DISK_100_GB_SAN \\ OS_UBUNTU_16_04_LTS_XENIAL_XERUS_MINIMAL_64_BIT_FOR_VSI \\ MONITORING_HOST_PING \\ NOTIFICATION_EMAIL_AND_TICKET \\ AUTOMATED_NOTIFICATION \\ UNLIMITED_SSL_VPN_USERS_1_PPTP_VPN_USER_PER_ACCOUNT \\ NESSUS_VULNERABILITY_ASSESSMENT_REPORTING \\ --extras '{"virtualGuests": [{"hostname": "test", "domain": "softlayer.com"}]}' \\ --complex-type SoftLayer_Container_Product_Order_Virtual_Guest """ manager = ordering.OrderingManager(env.client) if extras: try: extras = json.loads(extras) except ValueError as err: raise exceptions.CLIAbort("There was an error when parsing the --extras value: {}".format(err)) args = (package_keyname, location, order_items) kwargs = {'preset_keyname': preset, 'extras': extras, 'quantity': quantity, 'complex_type': complex_type, 'hourly': bool(billing == 'hourly')} if verify: table = formatting.Table(COLUMNS) order_to_place = manager.verify_order(*args, **kwargs) for price in order_to_place['orderContainers'][0]['prices']: cost_key = 'hourlyRecurringFee' if billing == 'hourly' else 'recurringFee' table.add_row([ price['item']['keyName'], price['item']['description'], price[cost_key] if cost_key in price else formatting.blank() ]) else: if not (env.skip_confirmations or formatting.confirm( "This action will incur charges on your account. Continue?")): raise exceptions.CLIAbort("Aborting order.") order = manager.place_order(*args, **kwargs) table = formatting.KeyValueTable(['name', 'value']) table.align['name'] = 'r' table.align['value'] = 'l' table.add_row(['id', order['orderId']]) table.add_row(['created', order['orderDate']]) table.add_row(['status', order['placedOrder']['status']]) env.fout(table)
Place or verify an order. This CLI command is used for placing/verifying an order of the specified package in the given location (denoted by a datacenter's long name). Orders made via the CLI can then be converted to be made programmatically by calling SoftLayer.OrderingManager.place_order() with the same keynames. Packages for ordering can be retrieved from `slcli order package-list` Presets for ordering can be retrieved from `slcli order preset-list` (not all packages have presets) Items can be retrieved from `slcli order item-list`. In order to find required items for the order, use `slcli order category-list`, and then provide the --category option for each category code in `slcli order item-list`. Example:: # Order an hourly VSI with 4 CPU, 16 GB RAM, 100 GB SAN disk, # Ubuntu 16.04, and 1 Gbps public & private uplink in dal13 slcli order place --billing hourly CLOUD_SERVER DALLAS13 \\ GUEST_CORES_4 \\ RAM_16_GB \\ REBOOT_REMOTE_CONSOLE \\ 1_GBPS_PUBLIC_PRIVATE_NETWORK_UPLINKS \\ BANDWIDTH_0_GB_2 \\ 1_IP_ADDRESS \\ GUEST_DISK_100_GB_SAN \\ OS_UBUNTU_16_04_LTS_XENIAL_XERUS_MINIMAL_64_BIT_FOR_VSI \\ MONITORING_HOST_PING \\ NOTIFICATION_EMAIL_AND_TICKET \\ AUTOMATED_NOTIFICATION \\ UNLIMITED_SSL_VPN_USERS_1_PPTP_VPN_USER_PER_ACCOUNT \\ NESSUS_VULNERABILITY_ASSESSMENT_REPORTING \\ --extras '{"virtualGuests": [{"hostname": "test", "domain": "softlayer.com"}]}' \\ --complex-type SoftLayer_Container_Product_Order_Virtual_Guest
def custom_action(sender, action, instance, user=None, **kwargs): """ Manually trigger a custom action (or even a standard action). """ opts = get_opts(instance) model = '.'.join([opts.app_label, opts.object_name]) distill_model_event(instance, model, action, user_override=user)
Manually trigger a custom action (or even a standard action).
def _dump_stats(self): ''' Dumps the stats out ''' extras = {} if 'total' in self.stats_dict: self.logger.debug("Compiling total/fail dump stats") for key in self.stats_dict['total']: final = 'total_{t}'.format(t=key) extras[final] = self.stats_dict['total'][key].value() for key in self.stats_dict['fail']: final = 'fail_{t}'.format(t=key) extras[final] = self.stats_dict['fail'][key].value() if 'plugins' in self.stats_dict: self.logger.debug("Compiling plugin dump stats") for name in self.stats_dict['plugins']: for key in self.stats_dict['plugins'][name]: final = 'plugin_{n}_{t}'.format(n=name, t=key) extras[final] = self.stats_dict['plugins'][name][key].value() if not self.logger.json: self.logger.info('Kafka Monitor Stats Dump:\n{0}'.format( json.dumps(extras, indent=4, sort_keys=True))) else: self.logger.info('Kafka Monitor Stats Dump', extra=extras)
Dumps the stats out
def get(cls, xuid, scid, clip_id): ''' Gets a specific game clip :param xuid: xuid of an xbox live user :param scid: scid of a clip :param clip_id: id of a clip ''' url = ( 'https://gameclipsmetadata.xboxlive.com/users' '/xuid(%(xuid)s)/scids/%(scid)s/clips/%(clip_id)s' % { 'xuid': xuid, 'scid': scid, 'clip_id': clip_id, } ) resp = xbox.client._get(url) # scid does not seem to matter when fetching clips, # as long as it looks like a uuid it should be fine. # perhaps we'll raise an exception in future if resp.status_code == 404: msg = 'Could not find clip: xuid=%s, scid=%s, clip_id=%s' % ( xuid, scid, clip_id, ) raise ClipNotFound(msg) data = resp.json() # as we don't have the user object let's # create a lazily evaluated proxy object # that will fetch it only when required user = UserProxy(xuid) return cls(user, data['gameClip'])
Gets a specific game clip :param xuid: xuid of an xbox live user :param scid: scid of a clip :param clip_id: id of a clip
def currency_pair(code): '''Construct a :class:`ccy_pair` from a six letter string.''' c = str(code) c1 = currency(c[:3]) c2 = currency(c[3:]) return ccy_pair(c1, c2)
Construct a :class:`ccy_pair` from a six letter string.
def devices(self) -> selectiontools.Selection: """The additional devices defined for the respective `reader` or `writer` element contained within a |Selection| object. ToDo If the `reader` or `writer` element does not define its own additional devices, |XMLInterface.devices| of |XMLInterface| is used. >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface >>> hp = HydPy('LahnH') >>> with TestIO(): ... hp.prepare_network() ... interface = XMLInterface('single_run.xml') >>> series_io = interface.series_io >>> for seq in (series_io.readers + series_io.writers): ... print(seq.info, seq.devices.nodes, seq.devices.elements) all input data Nodes() \ Elements("land_dill", "land_lahn_1", "land_lahn_2", "land_lahn_3") precipitation Nodes() Elements("land_lahn_1", "land_lahn_2") soilmoisture Nodes("dill") Elements("land_dill", "land_lahn_1") averaged Nodes() Elements() """ devices = self.find('devices') master = self while devices is None: master = master.master devices = master.find('devices') return _query_devices(devices)
The additional devices defined for the respective `reader` or `writer` element contained within a |Selection| object. ToDo If the `reader` or `writer` element does not define its own additional devices, |XMLInterface.devices| of |XMLInterface| is used. >>> from hydpy.core.examples import prepare_full_example_1 >>> prepare_full_example_1() >>> from hydpy import HydPy, TestIO, XMLInterface >>> hp = HydPy('LahnH') >>> with TestIO(): ... hp.prepare_network() ... interface = XMLInterface('single_run.xml') >>> series_io = interface.series_io >>> for seq in (series_io.readers + series_io.writers): ... print(seq.info, seq.devices.nodes, seq.devices.elements) all input data Nodes() \ Elements("land_dill", "land_lahn_1", "land_lahn_2", "land_lahn_3") precipitation Nodes() Elements("land_lahn_1", "land_lahn_2") soilmoisture Nodes("dill") Elements("land_dill", "land_lahn_1") averaged Nodes() Elements()
def std(self, axis): """Returns d-1 dimensional histogram of (estimated) std value along axis NB this is very different from just std of the histogram values (which describe bin counts) """ def weighted_std(values, weights, axis): # Stolen from http://stackoverflow.com/questions/2413522 average = np.average(values, weights=weights, axis=axis) average = average[self._simsalabim_slice(axis)] variance = np.average((values-average)**2, weights=weights, axis=axis) return np.sqrt(variance) axis = self.get_axis_number(axis) std_hist = weighted_std(self.all_axis_bin_centers(axis), weights=self.histogram, axis=axis) if self.dimensions == 2: new_hist = Hist1d else: new_hist = Histdd return new_hist.from_histogram(histogram=std_hist, bin_edges=itemgetter(*self.other_axes(axis))(self.bin_edges), axis_names=self.axis_names_without(axis))
Returns d-1 dimensional histogram of (estimated) std value along axis NB this is very different from just std of the histogram values (which describe bin counts)
def inputs_outputs(self): """Get information on method inputs & outputs.""" r = fapi.get_inputs_outputs(self.namespace, self.name, self.snapshot_id, self.api_url) fapi._check_response_code(r, 200) return r.json()
Get information on method inputs & outputs.
def open(self, options=None, mimetype='application/octet-stream'): """ open: return a file like object for self. The method can be used with the 'with' statment. """ return self.connection.open(self, options, mimetype)
open: return a file like object for self. The method can be used with the 'with' statment.
def path(self): """Return the full path to the Group, including any parent Groups.""" # If root, return '/' if self.dataset is self: return '' else: # Otherwise recurse return self.dataset.path + '/' + self.name
Return the full path to the Group, including any parent Groups.
def mouseMoveEvent(self, event): """Override Qt method. Show code analisis, if left button pressed select lines. """ line_number = self.editor.get_linenumber_from_mouse_event(event) block = self.editor.document().findBlockByNumber(line_number-1) data = block.userData() # this disables pyflakes messages if there is an active drag/selection # operation check = self._released == -1 if data and data.code_analysis and check: self.editor.show_code_analysis_results(line_number, data) else: self.editor.hide_tooltip() if event.buttons() == Qt.LeftButton: self._released = line_number self.editor.select_lines(self._pressed, self._released)
Override Qt method. Show code analisis, if left button pressed select lines.
def generate(data, dimOrder, maxWindowSize, overlapPercent, transforms = []): """ Generates a set of sliding windows for the specified dataset. """ # Determine the dimensions of the input data width = data.shape[dimOrder.index('w')] height = data.shape[dimOrder.index('h')] # Generate the windows return generateForSize(width, height, dimOrder, maxWindowSize, overlapPercent, transforms)
Generates a set of sliding windows for the specified dataset.
def setValidityErrorHandler(self, err_func, warn_func, arg=None): """ Register error and warning handlers for RelaxNG validation. These will be called back as f(msg,arg) """ libxml2mod.xmlRelaxNGSetValidErrors(self._o, err_func, warn_func, arg)
Register error and warning handlers for RelaxNG validation. These will be called back as f(msg,arg)
def expunge(self, instance): '''Remove *instance* from the :class:`Session`. Instance could be a :class:`Model` or an id. :parameter instance: a :class:`Model` or an *id* :rtype: the :class:`Model` removed from session or ``None`` if it was not in the session. ''' instance = self.pop(instance) instance.session = None return instance
Remove *instance* from the :class:`Session`. Instance could be a :class:`Model` or an id. :parameter instance: a :class:`Model` or an *id* :rtype: the :class:`Model` removed from session or ``None`` if it was not in the session.
def get_calc_ids(datadir=None): """ Extract the available calculation IDs from the datadir, in order. """ datadir = datadir or get_datadir() if not os.path.exists(datadir): return [] calc_ids = set() for f in os.listdir(datadir): mo = re.match(CALC_REGEX, f) if mo: calc_ids.add(int(mo.group(2))) return sorted(calc_ids)
Extract the available calculation IDs from the datadir, in order.
def query_ssos(self, target_name, lunation_count=None): """Send a query to the SSOS web service, looking for available observations using the given track. :param target_name: name of target to query against SSOIS db :param lunation_count: ignored :rtype: SSOSData """ # we observe ~ a week either side of new moon # but we don't know when in the dark run the discovery happened # so be generous with the search boundaries, add extra 2 weeks # current date just has to be the night of the triplet, from mp_ephem import horizons search_start_date = Time('1999-01-01', scale='utc') search_end_date = Time(datetime.datetime.now().strftime('%Y-%m-%d'), scale='utc') logger.info("Sending query to SSOS start_date: {} end_data: {}\n".format(search_start_date, search_end_date)) query = Query(target_name, search_start_date=search_start_date, search_end_date=search_end_date) logger.debug("Parsing query results...") tracks_data = self.ssos_parser.parse(query.get()) tracks_data.mpc_observations = {} start_time = Time(search_start_date) stop_time = Time(search_end_date) step_size = 5 * units.hour self.orbit = horizons.Body(target_name, start_time, stop_time, step_size) ref_sky_coord = None for source in tracks_data.get_sources(): astrom_observations = tracks_data.observations source_readings = source.get_readings() for idx in range(len(source_readings)): source_reading = source_readings[idx] assert isinstance(source_reading, SourceReading) if ref_sky_coord is None or source_reading.sky_coord.separation(ref_sky_coord) > 40 * units.arcsec: ref_sky_coord = source_reading.sky_coord source_reading.reference_sky_coord = ref_sky_coord astrom_observation = astrom_observations[idx] self.orbit.predict(Time(astrom_observation.mjd, format='mjd', scale='utc')) source_reading.pa = self.orbit.pa # why are these being recorded just in pixels? Because the error ellipse is drawn in pixels. # TODO: Modify error ellipse drawing routine to use WCS but be sure # that this does not cause trouble with the use of dra/ddec for cutout computer source_reading.dx = self.orbit.dra source_reading.dy = self.orbit.ddec logger.debug("Sending back set of observations that might contain the target: {}".format(tracks_data)) return tracks_data
Send a query to the SSOS web service, looking for available observations using the given track. :param target_name: name of target to query against SSOIS db :param lunation_count: ignored :rtype: SSOSData
def getServiceJobsToStart(self, maxWait): """ :param float maxWait: Time in seconds to wait to get a job before returning. :return: a tuple of (serviceJobStoreID, memory, cores, disk, ..) representing a service job to start. :rtype: toil.job.ServiceJobNode """ try: serviceJob = self._serviceJobGraphsToStart.get(timeout=maxWait) assert self.jobsIssuedToServiceManager >= 0 self.jobsIssuedToServiceManager -= 1 return serviceJob except Empty: return None
:param float maxWait: Time in seconds to wait to get a job before returning. :return: a tuple of (serviceJobStoreID, memory, cores, disk, ..) representing a service job to start. :rtype: toil.job.ServiceJobNode
def first_unique_char(s): """ :type s: str :rtype: int """ if (len(s) == 1): return 0 ban = [] for i in range(len(s)): if all(s[i] != s[k] for k in range(i + 1, len(s))) == True and s[i] not in ban: return i else: ban.append(s[i]) return -1
:type s: str :rtype: int
def get_client(self, destination_params, job_id, **kwargs): """Build a client given specific destination parameters and job_id.""" destination_params = _parse_destination_params(destination_params) destination_params.update(**kwargs) job_manager_interface_class = self.job_manager_interface_class job_manager_interface_args = dict(destination_params=destination_params, **self.job_manager_interface_args) job_manager_interface = job_manager_interface_class(**job_manager_interface_args) return self.client_class(destination_params, job_id, job_manager_interface, **self.extra_client_kwds)
Build a client given specific destination parameters and job_id.
def ls(self, folder="", begin_from_file="", num=-1, get_grants=False, all_grant_data=False): """ gets the list of file names (keys) in a s3 folder Parameters ---------- folder : string Path to file on S3 num: integer, optional number of results to return, by default it returns all results. begin_from_file: string, optional which file to start from on S3. This is usedful in case you are iterating over lists of files and you need to page the result by starting listing from a certain file and fetching certain num (number) of files. Examples -------- >>> from s3utils import S3utils >>> s3utils = S3utils( ... AWS_ACCESS_KEY_ID = 'your access key', ... AWS_SECRET_ACCESS_KEY = 'your secret key', ... AWS_STORAGE_BUCKET_NAME = 'your bucket name', ... S3UTILS_DEBUG_LEVEL = 1, #change it to 0 for less verbose ... ) >>> print(s3utils.ls("test/")) {u'test/myfolder/', u'test/myfolder/em/', u'test/myfolder/hoho/', u'test/myfolder/hoho/.DS_Store', u'test/myfolder/hoho/haha/', u'test/myfolder/hoho/haha/ff', u'test/myfolder/hoho/haha/photo.JPG'} """ # S3 object key can't start with / folder = re.sub(r"^/", "", folder) bucket_files = self.bucket.list(prefix=folder, marker=begin_from_file) # in case listing grants if get_grants: list_of_files = OrderedDict() for (i, v) in enumerate(bucket_files): file_info = {v.name: self.__get_grants(v.name, all_grant_data)} list_of_files.update(file_info) if i == num: break else: list_of_files = set([]) for (i, v) in enumerate(bucket_files): list_of_files.add(v.name) if i == num: break return list_of_files
gets the list of file names (keys) in a s3 folder Parameters ---------- folder : string Path to file on S3 num: integer, optional number of results to return, by default it returns all results. begin_from_file: string, optional which file to start from on S3. This is usedful in case you are iterating over lists of files and you need to page the result by starting listing from a certain file and fetching certain num (number) of files. Examples -------- >>> from s3utils import S3utils >>> s3utils = S3utils( ... AWS_ACCESS_KEY_ID = 'your access key', ... AWS_SECRET_ACCESS_KEY = 'your secret key', ... AWS_STORAGE_BUCKET_NAME = 'your bucket name', ... S3UTILS_DEBUG_LEVEL = 1, #change it to 0 for less verbose ... ) >>> print(s3utils.ls("test/")) {u'test/myfolder/', u'test/myfolder/em/', u'test/myfolder/hoho/', u'test/myfolder/hoho/.DS_Store', u'test/myfolder/hoho/haha/', u'test/myfolder/hoho/haha/ff', u'test/myfolder/hoho/haha/photo.JPG'}
def unpack(self, buff=None, offset=0): """Unpack *buff* into this object. This method will convert a binary data into a readable value according to the attribute format. Args: buff (bytes): Binary buffer. offset (int): Where to begin unpacking. Raises: :exc:`~.exceptions.UnpackException`: If unpack fails. """ band_type = UBInt16(enum_ref=MeterBandType) band_type.unpack(buff, offset) self.__class__ = MeterBandType(band_type.value).find_class() length = UBInt16() length.unpack(buff, offset=offset+2) super().unpack(buff[:offset+length.value], offset)
Unpack *buff* into this object. This method will convert a binary data into a readable value according to the attribute format. Args: buff (bytes): Binary buffer. offset (int): Where to begin unpacking. Raises: :exc:`~.exceptions.UnpackException`: If unpack fails.
def walk(start, ofn, cyc=None): """ Non recursive DFS to detect cycles :param start: start vertex in graph :param ofn: function to get the list of outgoing edges of a vertex :param cyc: list of existing cycles, cycles are represented in a list started with minimum vertex. :return: cycles :rtype: list of lists """ ctx, stk = {}, [start] cyc = [] if cyc == None else cyc while len(stk): top = stk[-1] if top not in ctx: ctx.update({top:list(ofn(top))}) if len(ctx[top]): n = ctx[top][0] if n in stk: # cycles found, # normalize the representation of cycles, # start from the smallest vertex, ex. # 4 -> 5 -> 2 -> 7 -> 9 would produce # (2, 7, 9, 4, 5) nc = stk[stk.index(n):] ni = nc.index(min(nc)) nc = nc[ni:] + nc[:ni] + [min(nc)] if nc not in cyc: cyc.append(nc) ctx[top].pop(0) else: stk.append(n) else: ctx.pop(top) stk.pop() if len(stk): ctx[stk[-1]].remove(top) return cyc
Non recursive DFS to detect cycles :param start: start vertex in graph :param ofn: function to get the list of outgoing edges of a vertex :param cyc: list of existing cycles, cycles are represented in a list started with minimum vertex. :return: cycles :rtype: list of lists
def default_privileges_revoke(name, object_name, object_type, defprivileges=None, prepend='public', maintenance_db=None, user=None, host=None, port=None, password=None, runas=None): ''' .. versionadded:: 2019.0.0 Revoke default privileges on a postgres object CLI Example: .. code-block:: bash salt '*' postgres.default_privileges_revoke user_name table_name table \\ SELECT,UPDATE maintenance_db=db_name name Name of the role whose default privileges should be revoked object_name Name of the object on which the revoke is to be performed object_type The object type, which can be one of the following: - table - sequence - schema - group - function privileges Comma separated list of privileges to revoke, from the list below: - INSERT - CREATE - TRUNCATE - TRIGGER - SELECT - USAGE - UPDATE - EXECUTE - REFERENCES - DELETE - ALL maintenance_db The database to connect to user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of ''' object_type, defprivileges, _defprivs = _mod_defpriv_opts(object_type, defprivileges) _validate_default_privileges(object_type, _defprivs, defprivileges) if not has_default_privileges(name, object_name, object_type, defprivileges, prepend=prepend, maintenance_db=maintenance_db, user=user, host=host, port=port, password=password, runas=runas): log.info('The object: %s of type: %s does not' ' have default privileges: %s set', object_name, object_type, defprivileges) return False _grants = ','.join(_defprivs) if object_type in ['table', 'sequence']: on_part = '{0}.{1}'.format(prepend, object_name) else: on_part = object_name if object_type == 'group': query = 'ALTER DEFAULT PRIVILEGES REVOKE {0} FROM {1}'.format(object_name, name) else: query = 'ALTER DEFAULT PRIVILEGES IN SCHEMA {2} REVOKE {0} ON {1}S FROM {3}'.format( _grants, object_type.upper(), prepend, name) ret = _psql_prepare_and_run(['-c', query], user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) return ret['retcode'] == 0
.. versionadded:: 2019.0.0 Revoke default privileges on a postgres object CLI Example: .. code-block:: bash salt '*' postgres.default_privileges_revoke user_name table_name table \\ SELECT,UPDATE maintenance_db=db_name name Name of the role whose default privileges should be revoked object_name Name of the object on which the revoke is to be performed object_type The object type, which can be one of the following: - table - sequence - schema - group - function privileges Comma separated list of privileges to revoke, from the list below: - INSERT - CREATE - TRUNCATE - TRIGGER - SELECT - USAGE - UPDATE - EXECUTE - REFERENCES - DELETE - ALL maintenance_db The database to connect to user database username if different from config or default password user password if any password for a specified user host Database host if different from config or default port Database port if different from config or default runas System user all operations should be performed on behalf of
def czdivide(a, b, null=0): ''' czdivide(a, b) returns the quotient a / b as a numpy array object. Like numpy's divide function or a/b syntax, czdivide will thread over the latest dimension possible. Unlike numpy's divide, czdivide works with sparse matrices. Additionally, czdivide multiplies a by the zinv of b, so divide-by-zero entries are replaced with 0 in the result. The optional argument null (default: 0) may be given to specify that zeros in the arary b should instead be replaced with the given value in the result. Note that if this value is not equal to 0, then any sparse array passed as argument b must be reified. The czdivide function never raises an error due to divide-by-zero; if you desire this behavior, use the cdivide function instead. ''' if null == 0: return a.multiply(zinv(b)) if sps.issparse(a) else a * zinv(b) elif sps.issparse(b): b = b.toarray() else: b = np.asarray(b) z = np.isclose(b, 0) q = np.logical_not(z) zi = q / (b + z) if sps.issparse(a): r = a.multiply(zi).tocsr() else: r = np.asarray(a) * zi r[np.ones(a.shape, dtype=np.bool)*z] = null return r
czdivide(a, b) returns the quotient a / b as a numpy array object. Like numpy's divide function or a/b syntax, czdivide will thread over the latest dimension possible. Unlike numpy's divide, czdivide works with sparse matrices. Additionally, czdivide multiplies a by the zinv of b, so divide-by-zero entries are replaced with 0 in the result. The optional argument null (default: 0) may be given to specify that zeros in the arary b should instead be replaced with the given value in the result. Note that if this value is not equal to 0, then any sparse array passed as argument b must be reified. The czdivide function never raises an error due to divide-by-zero; if you desire this behavior, use the cdivide function instead.
def splitname(path): """Split a path into a directory, name, and extensions.""" dirpath, filename = os.path.split(path) # we don't use os.path.splitext here because we want all extensions, # not just the last, to be put in exts name, exts = filename.split(os.extsep, 1) return dirpath, name, exts
Split a path into a directory, name, and extensions.
def reclassify(layer, exposure_key=None, overwrite_input=False): """Reclassify a continuous raster layer. Issue https://github.com/inasafe/inasafe/issues/3182 This function is a wrapper for the code from https://github.com/chiatt/gdal_reclassify For instance if you want to reclassify like this table : Original Value | Class - ∞ < val <= 0 | 1 0 < val <= 0.5 | 2 0.5 < val <= 5 | 3 5 < val < + ∞ | 6 You need a dictionary : ranges = OrderedDict() ranges[1] = [None, 0] ranges[2] = [0.0, 0.5] ranges[3] = [0.5, 5] ranges[6] = [5, None] :param layer: The raster layer. :type layer: QgsRasterLayer :param overwrite_input: Option for the output layer. True will overwrite the input layer. False will create a temporary layer. :type overwrite_input: bool :param exposure_key: The exposure key. :type exposure_key: str :return: The classified raster layer. :rtype: QgsRasterLayer .. versionadded:: 4.0 """ output_layer_name = reclassify_raster_steps['output_layer_name'] output_layer_name = output_layer_name % layer.keywords['layer_purpose'] if exposure_key: classification_key = active_classification( layer.keywords, exposure_key) thresholds = active_thresholds_value_maps(layer.keywords, exposure_key) layer.keywords['thresholds'] = thresholds layer.keywords['classification'] = classification_key else: classification_key = layer.keywords.get('classification') thresholds = layer.keywords.get('thresholds') if not thresholds: raise InvalidKeywordsForProcessingAlgorithm( 'thresholds are missing from the layer %s' % layer.keywords['layer_purpose']) if not classification_key: raise InvalidKeywordsForProcessingAlgorithm( 'classification is missing from the layer %s' % layer.keywords['layer_purpose']) ranges = {} value_map = {} hazard_classes = definition(classification_key)['classes'] for hazard_class in hazard_classes: ranges[hazard_class['value']] = thresholds[hazard_class['key']] value_map[hazard_class['key']] = [hazard_class['value']] if overwrite_input: output_raster = layer.source() else: output_raster = unique_filename(suffix='.tiff', dir=temp_dir()) driver = gdal.GetDriverByName('GTiff') raster_file = gdal.Open(layer.source()) band = raster_file.GetRasterBand(1) no_data = band.GetNoDataValue() source = band.ReadAsArray() destination = source.copy() for value, interval in list(ranges.items()): v_min = interval[0] v_max = interval[1] if v_min is None: destination[np.where(source <= v_max)] = value elif v_max is None: destination[np.where(source > v_min)] = value elif v_min < v_max: destination[np.where((v_min < source) & (source <= v_max))] = value # Tag no data cells destination[np.where(source == no_data)] = no_data_value # Create the new file. output_file = driver.Create( output_raster, raster_file.RasterXSize, raster_file.RasterYSize, 1) output_file.GetRasterBand(1).WriteArray(destination) output_file.GetRasterBand(1).SetNoDataValue(no_data_value) # CRS output_file.SetProjection(raster_file.GetProjection()) output_file.SetGeoTransform(raster_file.GetGeoTransform()) output_file.FlushCache() del output_file if not isfile(output_raster): raise FileNotFoundError reclassified = QgsRasterLayer(output_raster, output_layer_name) # We transfer keywords to the output. reclassified.keywords = layer.keywords.copy() reclassified.keywords['layer_mode'] = 'classified' value_map = {} hazard_classes = definition(classification_key)['classes'] for hazard_class in reversed(hazard_classes): value_map[hazard_class['key']] = [hazard_class['value']] reclassified.keywords['value_map'] = value_map reclassified.keywords['title'] = output_layer_name check_layer(reclassified) return reclassified
Reclassify a continuous raster layer. Issue https://github.com/inasafe/inasafe/issues/3182 This function is a wrapper for the code from https://github.com/chiatt/gdal_reclassify For instance if you want to reclassify like this table : Original Value | Class - ∞ < val <= 0 | 1 0 < val <= 0.5 | 2 0.5 < val <= 5 | 3 5 < val < + ∞ | 6 You need a dictionary : ranges = OrderedDict() ranges[1] = [None, 0] ranges[2] = [0.0, 0.5] ranges[3] = [0.5, 5] ranges[6] = [5, None] :param layer: The raster layer. :type layer: QgsRasterLayer :param overwrite_input: Option for the output layer. True will overwrite the input layer. False will create a temporary layer. :type overwrite_input: bool :param exposure_key: The exposure key. :type exposure_key: str :return: The classified raster layer. :rtype: QgsRasterLayer .. versionadded:: 4.0
def _get_parameter(self, parameter, dest_addr_long=None): """ Fetches and returns the value of the specified parameter. """ frame = self._send_and_wait( command=parameter, dest_addr_long=dest_addr_long) return frame["parameter"]
Fetches and returns the value of the specified parameter.
def result(self, wait=0): """ return the full list of results from the chain when it finishes. blocks until timeout. :param int wait: how many milliseconds to wait for a result :return: an unsorted list of results """ if self.started: return result_group(self.group, wait=wait, count=self.length(), cached=self.cached)
return the full list of results from the chain when it finishes. blocks until timeout. :param int wait: how many milliseconds to wait for a result :return: an unsorted list of results
def flatten(iterable): """Fully flattens an iterable: In: flatten([1,2,3,4,[5,6,[7,8]]]) Out: [1,2,3,4,5,6,7,8] """ container = iterable.__class__ placeholder = [] for item in iterable: try: placeholder.extend(flatten(item)) except TypeError: placeholder.append(item) return container(placeholder)
Fully flattens an iterable: In: flatten([1,2,3,4,[5,6,[7,8]]]) Out: [1,2,3,4,5,6,7,8]
def save(self, conflict_resolver=choose_mine): '''Save all options in memory to the `config_file`. Options are read once more from the file (to allow other writers to save configuration), keys in conflict are resolved, and the final results are written back to the file. :param conflict_resolver: a simple lambda or function to choose when an option key is provided from an outside source (THEIRS, usually a file on disk) but is also already set on this ConfigStruct (MINE) ''' config = self._load(conflict_resolver) # in case some other process has added items with open(self._config_file, 'wb') as cf: config.write(cf)
Save all options in memory to the `config_file`. Options are read once more from the file (to allow other writers to save configuration), keys in conflict are resolved, and the final results are written back to the file. :param conflict_resolver: a simple lambda or function to choose when an option key is provided from an outside source (THEIRS, usually a file on disk) but is also already set on this ConfigStruct (MINE)
def add_param(self, name, **kwargs): '''Add a parameter to this group. Parameters ---------- name : str Name of the parameter to add to this group. The name will automatically be case-normalized. Additional keyword arguments will be passed to the `Param` constructor. ''' self.params[name.upper()] = Param(name.upper(), **kwargs)
Add a parameter to this group. Parameters ---------- name : str Name of the parameter to add to this group. The name will automatically be case-normalized. Additional keyword arguments will be passed to the `Param` constructor.
def glob(self, filename): """Returns a list of files that match the given pattern(s).""" # Only support prefix with * at the end and no ? in the string star_i = filename.find('*') quest_i = filename.find('?') if quest_i >= 0: raise NotImplementedError( "{} not supported by compat glob".format(filename)) if star_i != len(filename) - 1: # Just return empty so we can use glob from directory watcher # # TODO: Remove and instead handle in GetLogdirSubdirectories. # However, we would need to handle it for all non-local registered # filesystems in some way. return [] filename = filename[:-1] client = boto3.client("s3") bucket, path = self.bucket_and_path(filename) p = client.get_paginator("list_objects") keys = [] for r in p.paginate(Bucket=bucket, Prefix=path): for o in r.get("Contents", []): key = o["Key"][len(path):] if key: # Skip the base dir, which would add an empty string keys.append(filename + key) return keys
Returns a list of files that match the given pattern(s).
def parseFilename(filename): """ Parse out filename from any specified extensions. Returns rootname and string version of extension name. """ # Parse out any extension specified in filename _indx = filename.find('[') if _indx > 0: # Read extension name provided _fname = filename[:_indx] _extn = filename[_indx + 1:-1] else: _fname = filename _extn = None return _fname, _extn
Parse out filename from any specified extensions. Returns rootname and string version of extension name.
def _make_callsites(self, stack_pointer_tracker=None): """ Simplify all function call statements. :return: None """ # Computing reaching definitions rd = self.project.analyses.ReachingDefinitions(func=self.function, func_graph=self.graph, observe_all=True) for key in self._blocks: block = self._blocks[key] csm = self.project.analyses.AILCallSiteMaker(block, reaching_definitions=rd) if csm.result_block: ail_block = csm.result_block simp = self.project.analyses.AILBlockSimplifier(ail_block, stack_pointer_tracker=stack_pointer_tracker) self._blocks[key] = simp.result_block self._update_graph()
Simplify all function call statements. :return: None
def vector_angle_cos(u, v): ''' vector_angle_cos(u, v) yields the cosine of the angle between the two vectors u and v. If u or v (or both) is a (d x n) matrix of n vectors, the result will be a length n vector of the cosines. ''' u = np.asarray(u) v = np.asarray(v) return (u * v).sum(0) / np.sqrt((u ** 2).sum(0) * (v ** 2).sum(0))
vector_angle_cos(u, v) yields the cosine of the angle between the two vectors u and v. If u or v (or both) is a (d x n) matrix of n vectors, the result will be a length n vector of the cosines.
def error(self, amplexception): """ Receives notification of an error. """ msg = '\t'+str(amplexception).replace('\n', '\n\t') print('Error:\n{:s}'.format(msg)) raise amplexception
Receives notification of an error.
def make_chars_uppercase( lst: Union[list, tuple, str, set], uppercase: int ) -> Union[list, tuple, str, set]: """Make uppercase some randomly selected characters. The characters can be in a (mix of) string, list, tuple or set. Keyword arguments: lst -- the object to make all chars uppercase, which can be a (mix of) list, tuple, string or set. uppercase -- Number of characters to be set as uppercase. """ if not isinstance(lst, (list, tuple, str, set)): raise TypeError('lst must be a list, a tuple, a set or a string') if not isinstance(uppercase, int): raise TypeError('uppercase must be an integer') if uppercase < 0: raise ValueError('uppercase must be bigger than zero') lowercase = Aux.lowercase_count(lst) if uppercase == 0 or lowercase == 0: return lst elif uppercase >= lowercase: # Make it all uppercase return Aux.make_all_uppercase(lst) arr = list(lst) # Check if at least an element is supported # This is required to avoid an infinite loop below supported = False for element in arr: if isinstance(element, (list, tuple, str, set)): supported = True break if supported: # Pick a word at random, then make a character uppercase count = 0 while count < uppercase: windex = randbelow(len(arr)) element = arr[windex] # Skip unsupported types or empty ones if element: aux = element if isinstance(element, str): aux = Aux._make_one_char_uppercase(element) elif isinstance(element, (list, tuple, set)): aux = Aux.make_chars_uppercase(element, 1) if aux != element: arr[windex] = aux count += 1 if isinstance(lst, set): return set(arr) elif isinstance(lst, str): return ''.join(arr) elif isinstance(lst, tuple): return tuple(arr) return arr
Make uppercase some randomly selected characters. The characters can be in a (mix of) string, list, tuple or set. Keyword arguments: lst -- the object to make all chars uppercase, which can be a (mix of) list, tuple, string or set. uppercase -- Number of characters to be set as uppercase.
def p_SingleType_any(p): """SingleType : any TypeSuffixStartingWithArray""" p[0] = helper.unwrapTypeSuffix(model.SimpleType( model.SimpleType.ANY), p[2])
SingleType : any TypeSuffixStartingWithArray
def get_object(self, request, object_id, from_field=None): """ our implementation of get_object allows for cloning when updating an object, not cloning when the button 'save but not clone' is pushed and at no other time will clone be called """ # from_field breaks in 1.7.8 obj = super(VersionedAdmin, self).get_object(request, object_id) # Only clone if update view as get_object() is also called for change, # delete, and history views if request.method == 'POST' and \ obj and \ obj.is_latest and \ 'will_not_clone' not in request.path and \ 'delete' not in request.path and \ 'restore' not in request.path: obj = obj.clone() return obj
our implementation of get_object allows for cloning when updating an object, not cloning when the button 'save but not clone' is pushed and at no other time will clone be called
def do_rmfit(self, arg): """Removes a fit function from a variable. See 'fit'.""" if arg in self.curargs["fits"]: del self.curargs["fits"][arg] #We also need to remove the variable entry if it exists. if "timing" in arg: fitvar = "{}|fit".format(arg) else: fitvar = "{}.fit".format(arg) if fitvar in self.curargs["dependents"]: self.curargs["dependents"].remove(fitvar)
Removes a fit function from a variable. See 'fit'.
def alias_log_entry(self, log_entry_id, alias_id): """Adds an ``Id`` to a ``LogEntry`` for the purpose of creating compatibility. The primary ``Id`` of the ``LogEntry`` is determined by the provider. The new ``Id`` performs as an alias to the primary ``Id``. If the alias is a pointer to another log entry, it is reassigned to the given log entry ``Id``. arg: log_entry_id (osid.id.Id): the ``Id`` of a ``LogEntry`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is already assigned raise: NotFound - ``log_entry_id`` not found raise: NullArgument - ``log_entry_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.alias_resources_template self._alias_id(primary_id=log_entry_id, equivalent_id=alias_id)
Adds an ``Id`` to a ``LogEntry`` for the purpose of creating compatibility. The primary ``Id`` of the ``LogEntry`` is determined by the provider. The new ``Id`` performs as an alias to the primary ``Id``. If the alias is a pointer to another log entry, it is reassigned to the given log entry ``Id``. arg: log_entry_id (osid.id.Id): the ``Id`` of a ``LogEntry`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is already assigned raise: NotFound - ``log_entry_id`` not found raise: NullArgument - ``log_entry_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def submit_row(context): """ Overrides 'django.contrib.admin.templatetags.admin_modify.submit_row'. Manipulates the context going into that function by hiding all of the buttons in the submit row if the key `readonly` is set in the context. """ ctx = original_submit_row(context) if context.get('readonly', False): ctx.update({ 'show_delete_link': False, 'show_save_as_new': False, 'show_save_and_add_another': False, 'show_save_and_continue': False, 'show_save': False, }) else: return ctx
Overrides 'django.contrib.admin.templatetags.admin_modify.submit_row'. Manipulates the context going into that function by hiding all of the buttons in the submit row if the key `readonly` is set in the context.
def indel_at( self, position, check_insertions=True, check_deletions=True, one_based=True ): """Does the read contain an indel at the given position? Return True if the read contains an insertion at the given position (position must be the base before the insertion event) or if the read contains a deletion where the base at position is deleted. Return False otherwise.""" (insertions, deletions) = self.get_indels( one_based=one_based ) if check_insertions: for insertion in insertions: if insertion[0] == position: return True if check_deletions: for deletion in deletions: if deletion[0] < position < deletion[0] + deletion[1] + 1: return True return False
Does the read contain an indel at the given position? Return True if the read contains an insertion at the given position (position must be the base before the insertion event) or if the read contains a deletion where the base at position is deleted. Return False otherwise.
def get_cats(self): '''Get top keywords categories''' start_url = 'http://top.taobao.com/index.php?from=tbsy' rs = self.fetch(start_url) if not rs: return None soup = BeautifulSoup(rs.content, convertEntities=BeautifulSoup.HTML_ENTITIES, markupMassage=hexentityMassage) cats = [{'id':'TR_%s'%li['id'].encode('utf-8').upper(), 'title':li.a.text.encode('utf-8').strip()} for li in soup.find('div', id='nav').findAll('li') if li['id']!='index'] threadPool = ThreadPool(len(cats) if len(cats)<=5 else 5) for cat in cats: threadPool.run(self.get_cats_thread, callback=None, cat=cat) cats = threadPool.killAllWorkers(None) return cats
Get top keywords categories
def cmdargs(mysqldump: str, username: str, password: str, database: str, verbose: bool, with_drop_create_database: bool, max_allowed_packet: str, hide_password: bool = False) -> List[str]: """ Returns command arguments for a ``mysqldump`` call. Args: mysqldump: ``mysqldump`` executable filename username: user name password: password database: database name verbose: verbose output? with_drop_create_database: produce commands to ``DROP`` the database and recreate it? max_allowed_packet: passed to ``mysqldump`` hide_password: obscure the password (will break the arguments but provide a safe version to show the user)? Returns: list of command-line arguments """ ca = [ mysqldump, "-u", username, "-p{}".format("*****" if hide_password else password), "--max_allowed_packet={}".format(max_allowed_packet), "--hex-blob", # preferable to raw binary in our .sql file ] if verbose: ca.append("--verbose") if with_drop_create_database: ca.extend([ "--add-drop-database", "--databases", database ]) else: ca.append(database) pass return ca
Returns command arguments for a ``mysqldump`` call. Args: mysqldump: ``mysqldump`` executable filename username: user name password: password database: database name verbose: verbose output? with_drop_create_database: produce commands to ``DROP`` the database and recreate it? max_allowed_packet: passed to ``mysqldump`` hide_password: obscure the password (will break the arguments but provide a safe version to show the user)? Returns: list of command-line arguments
def _find_all_simple(path): """ Find all files under 'path' """ results = ( os.path.join(base, file) for base, dirs, files in os.walk(path, followlinks=True) for file in files ) return filter(os.path.isfile, results)
Find all files under 'path'
def process_xml(xml_string): """Return a TripsProcessor by processing a TRIPS EKB XML string. Parameters ---------- xml_string : str A TRIPS extraction knowledge base (EKB) string to be processed. http://trips.ihmc.us/parser/api.html Returns ------- tp : TripsProcessor A TripsProcessor containing the extracted INDRA Statements in tp.statements. """ tp = TripsProcessor(xml_string) if tp.tree is None: return None tp.get_modifications_indirect() tp.get_activations_causal() tp.get_activations_stimulate() tp.get_complexes() tp.get_modifications() tp.get_active_forms() tp.get_active_forms_state() tp.get_activations() tp.get_translocation() tp.get_regulate_amounts() tp.get_degradations() tp.get_syntheses() tp.get_conversions() tp.get_simple_increase_decrease() return tp
Return a TripsProcessor by processing a TRIPS EKB XML string. Parameters ---------- xml_string : str A TRIPS extraction knowledge base (EKB) string to be processed. http://trips.ihmc.us/parser/api.html Returns ------- tp : TripsProcessor A TripsProcessor containing the extracted INDRA Statements in tp.statements.
def string_to_bytes(string, size): """Convert string to bytes add padding.""" if len(string) > size: raise PyVLXException("string_to_bytes::string_to_large") encoded = bytes(string, encoding='utf-8') return encoded + bytes(size-len(encoded))
Convert string to bytes add padding.
def resnet_model_fn(features, labels, mode, model_class, resnet_size, weight_decay, learning_rate_fn, momentum, data_format, version, loss_scale, loss_filter_fn=None, dtype=resnet_model.DEFAULT_DTYPE, label_smoothing=0.0, enable_lars=False): """Shared functionality for different resnet model_fns. Initializes the ResnetModel representing the model layers and uses that model to build the necessary EstimatorSpecs for the `mode` in question. For training, this means building losses, the optimizer, and the train op that get passed into the EstimatorSpec. For evaluation and prediction, the EstimatorSpec is returned without a train op, but with the necessary parameters for the given mode. Args: features: tensor representing input images labels: tensor representing class labels for all input images mode: current estimator mode; should be one of `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT` model_class: a class representing a TensorFlow model that has a __call__ function. We assume here that this is a subclass of ResnetModel. resnet_size: A single integer for the size of the ResNet model. weight_decay: weight decay loss rate used to regularize learned variables. learning_rate_fn: function that returns the current learning rate given the current global_step momentum: momentum term used for optimization data_format: Input format ('channels_last', 'channels_first', or None). If set to None, the format is dependent on whether a GPU is available. version: Integer representing which version of the ResNet network to use. See README for details. Valid values: [1, 2] loss_scale: The factor to scale the loss for numerical stability. A detailed summary is present in the arg parser help text. loss_filter_fn: function that takes a string variable name and returns True if the var should be included in loss calculation, and False otherwise. If None, batch_normalization variables will be excluded from the loss. dtype: the TensorFlow dtype to use for calculations. Returns: EstimatorSpec parameterized according to the input params and the current mode. """ # Generate a summary node for the images tf.summary.image('images', features, max_outputs=6) # Checks that features/images have same data type being used for calculations. assert features.dtype == dtype features = tf.cast(features, dtype) model = model_class(resnet_size, data_format, version=version, dtype=dtype) logits = model(features, mode == tf.estimator.ModeKeys.TRAIN) # This acts as a no-op if the logits are already in fp32 (provided logits are # not a SparseTensor). If dtype is is low precision, logits must be cast to # fp32 for numerical stability. logits = tf.cast(logits, tf.float32) num_examples_metric = tf_mlperf_log.sum_metric(tensor=tf.shape(logits)[0], name=_NUM_EXAMPLES_NAME) predictions = { 'classes': tf.argmax(logits, axis=1), 'probabilities': tf.nn.softmax(logits, name='softmax_tensor') } if mode == tf.estimator.ModeKeys.PREDICT: # Return the predictions and the specification for serving a SavedModel return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, export_outputs={ 'predict': tf.estimator.export.PredictOutput(predictions) }) # Calculate loss, which includes softmax cross entropy and L2 regularization. mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_LOSS_FN, value=mlperf_log.CCE) if label_smoothing != 0.0: one_hot_labels = tf.one_hot(labels, 1001) cross_entropy = tf.losses.softmax_cross_entropy( logits=logits, onehot_labels=one_hot_labels, label_smoothing=label_smoothing) else: cross_entropy = tf.losses.sparse_softmax_cross_entropy( logits=logits, labels=labels) # Create a tensor named cross_entropy for logging purposes. tf.identity(cross_entropy, name='cross_entropy') tf.summary.scalar('cross_entropy', cross_entropy) # If no loss_filter_fn is passed, assume we want the default behavior, # which is that batch_normalization variables are excluded from loss. def exclude_batch_norm(name): return 'batch_normalization' not in name loss_filter_fn = loss_filter_fn or exclude_batch_norm mlperf_log.resnet_print(key=mlperf_log.MODEL_EXCLUDE_BN_FROM_L2, value=not loss_filter_fn('batch_normalization')) # Add weight decay to the loss. mlperf_log.resnet_print(key=mlperf_log.MODEL_L2_REGULARIZATION, value=weight_decay) l2_loss = weight_decay * tf.add_n( # loss is computed using fp32 for numerical stability. [tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables() if loss_filter_fn(v.name)]) tf.summary.scalar('l2_loss', l2_loss) loss = cross_entropy + l2_loss if mode == tf.estimator.ModeKeys.TRAIN: global_step = tf.train.get_or_create_global_step() learning_rate = learning_rate_fn(global_step) log_id = mlperf_log.resnet_print(key=mlperf_log.OPT_LR, deferred=True) learning_rate = tf_mlperf_log.log_deferred(op=learning_rate, log_id=log_id, every_n=100) # Create a tensor named learning_rate for logging purposes tf.identity(learning_rate, name='learning_rate') tf.summary.scalar('learning_rate', learning_rate) mlperf_log.resnet_print(key=mlperf_log.OPT_NAME, value=mlperf_log.SGD_WITH_MOMENTUM) mlperf_log.resnet_print(key=mlperf_log.OPT_MOMENTUM, value=momentum) if enable_lars: optimizer = tf.contrib.opt.LARSOptimizer( learning_rate, momentum=momentum, weight_decay=weight_decay, skip_list=['batch_normalization', 'bias']) else: optimizer = tf.train.MomentumOptimizer( learning_rate=learning_rate, momentum=momentum ) if loss_scale != 1: # When computing fp16 gradients, often intermediate tensor values are # so small, they underflow to 0. To avoid this, we multiply the loss by # loss_scale to make these tensor values loss_scale times bigger. scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale) # Once the gradient computation is complete we can scale the gradients # back to the correct scale before passing them to the optimizer. unscaled_grad_vars = [(grad / loss_scale, var) for grad, var in scaled_grad_vars] minimize_op = optimizer.apply_gradients(unscaled_grad_vars, global_step) else: minimize_op = optimizer.minimize(loss, global_step) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) train_op = tf.group(minimize_op, update_ops, num_examples_metric[1]) else: train_op = None accuracy = tf.metrics.accuracy(labels, predictions['classes']) accuracy_top_5 = tf.metrics.mean(tf.nn.in_top_k(predictions=logits, targets=labels, k=5, name='top_5_op')) metrics = {'accuracy': accuracy, 'accuracy_top_5': accuracy_top_5, _NUM_EXAMPLES_NAME: num_examples_metric} # Create a tensor named train_accuracy for logging purposes tf.identity(accuracy[1], name='train_accuracy') tf.identity(accuracy_top_5[1], name='train_accuracy_top_5') tf.summary.scalar('train_accuracy', accuracy[1]) tf.summary.scalar('train_accuracy_top_5', accuracy_top_5[1]) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=metrics)
Shared functionality for different resnet model_fns. Initializes the ResnetModel representing the model layers and uses that model to build the necessary EstimatorSpecs for the `mode` in question. For training, this means building losses, the optimizer, and the train op that get passed into the EstimatorSpec. For evaluation and prediction, the EstimatorSpec is returned without a train op, but with the necessary parameters for the given mode. Args: features: tensor representing input images labels: tensor representing class labels for all input images mode: current estimator mode; should be one of `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT` model_class: a class representing a TensorFlow model that has a __call__ function. We assume here that this is a subclass of ResnetModel. resnet_size: A single integer for the size of the ResNet model. weight_decay: weight decay loss rate used to regularize learned variables. learning_rate_fn: function that returns the current learning rate given the current global_step momentum: momentum term used for optimization data_format: Input format ('channels_last', 'channels_first', or None). If set to None, the format is dependent on whether a GPU is available. version: Integer representing which version of the ResNet network to use. See README for details. Valid values: [1, 2] loss_scale: The factor to scale the loss for numerical stability. A detailed summary is present in the arg parser help text. loss_filter_fn: function that takes a string variable name and returns True if the var should be included in loss calculation, and False otherwise. If None, batch_normalization variables will be excluded from the loss. dtype: the TensorFlow dtype to use for calculations. Returns: EstimatorSpec parameterized according to the input params and the current mode.
def fromimporterror(cls, bundle, importerid, rsid, exception, endpoint): # type: (Bundle, Tuple[str, str], Tuple[Tuple[str, str], int], Optional[Tuple[Any, Any, Any]], EndpointDescription) -> RemoteServiceAdminEvent """ Creates a RemoteServiceAdminEvent object from an import error """ return RemoteServiceAdminEvent( RemoteServiceAdminEvent.IMPORT_ERROR, bundle, importerid, rsid, None, None, exception, endpoint, )
Creates a RemoteServiceAdminEvent object from an import error
def dup_finder(file_path, directory=".", enable_scandir=False): """ Check a directory for duplicates of the specified file. This is meant for a single file only, for checking a directory for dups, use directory_duplicates. This is designed to be as fast as possible by doing lighter checks before progressing to more extensive ones, in order they are: 1. File size 2. First twenty bytes 3. Full SHA256 compare .. code:: python list(reusables.dup_finder( "test_structure\\files_2\\empty_file")) # ['C:\\Reusables\\test\\data\\fake_dir', # 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_1', # 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_2', # 'C:\\Reusables\\test\\data\\test_structure\\files_2\\empty_file'] :param file_path: Path to file to check for duplicates of :param directory: Directory to dig recursively into to look for duplicates :param enable_scandir: on python < 3.5 enable external scandir package :return: generators """ size = os.path.getsize(file_path) if size == 0: for empty_file in remove_empty_files(directory, dry_run=True): yield empty_file else: with open(file_path, 'rb') as f: first_twenty = f.read(20) file_sha256 = file_hash(file_path, "sha256") for root, directories, files in _walk(directory, enable_scandir=enable_scandir): for each_file in files: test_file = os.path.join(root, each_file) if os.path.getsize(test_file) == size: try: with open(test_file, 'rb') as f: test_first_twenty = f.read(20) except OSError: logger.warning("Could not open file to compare - " "{0}".format(test_file)) else: if first_twenty == test_first_twenty: if file_hash(test_file, "sha256") == file_sha256: yield os.path.abspath(test_file)
Check a directory for duplicates of the specified file. This is meant for a single file only, for checking a directory for dups, use directory_duplicates. This is designed to be as fast as possible by doing lighter checks before progressing to more extensive ones, in order they are: 1. File size 2. First twenty bytes 3. Full SHA256 compare .. code:: python list(reusables.dup_finder( "test_structure\\files_2\\empty_file")) # ['C:\\Reusables\\test\\data\\fake_dir', # 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_1', # 'C:\\Reusables\\test\\data\\test_structure\\Files\\empty_file_2', # 'C:\\Reusables\\test\\data\\test_structure\\files_2\\empty_file'] :param file_path: Path to file to check for duplicates of :param directory: Directory to dig recursively into to look for duplicates :param enable_scandir: on python < 3.5 enable external scandir package :return: generators
def aggregate(raster, ndv, block_size): ''' Aggregate raster to smaller resolution, by adding cells. Usage: aggregate(raster, ndv, block_size) where: raster is a Numpy array created by importing the raster (e.g. geotiff) ndv is the NoData Value for the raster (can be read using the get_geo_info function) block_size is a duple of factors by which the raster will be shrinked Example: raster = HMISea.tif ndv, xsize, ysize, geot, projection, datatype = get_geo_info(raster) costs = load_tiff(raster) costs2=aggregate(costs, ndv, (10,10)) ''' raster2 = block_reduce(raster, block_size, func=np.ma.sum) return raster2
Aggregate raster to smaller resolution, by adding cells. Usage: aggregate(raster, ndv, block_size) where: raster is a Numpy array created by importing the raster (e.g. geotiff) ndv is the NoData Value for the raster (can be read using the get_geo_info function) block_size is a duple of factors by which the raster will be shrinked Example: raster = HMISea.tif ndv, xsize, ysize, geot, projection, datatype = get_geo_info(raster) costs = load_tiff(raster) costs2=aggregate(costs, ndv, (10,10))
def check_extract_from_egg(pth, todir=None): r""" Check if path points to a file inside a python egg file, extract the file from the egg to a cache directory (following pkg_resources convention) and return [(extracted path, egg file path, relative path inside egg file)]. Otherwise, just return [(original path, None, None)]. If path points to an egg file directly, return a list with all files from the egg formatted like above. Example: >>> check_extract_from_egg(r'C:\Python26\Lib\site-packages\my.egg\mymodule\my.pyd') [(r'C:\Users\UserName\AppData\Roaming\Python-Eggs\my.egg-tmp\mymodule\my.pyd', r'C:\Python26\Lib\site-packages\my.egg', r'mymodule/my.pyd')] """ rv = [] if os.path.altsep: pth = pth.replace(os.path.altsep, os.path.sep) components = pth.split(os.path.sep) for i, name in enumerate(components): if name.lower().endswith(".egg"): eggpth = os.path.sep.join(components[:i + 1]) if os.path.isfile(eggpth): # eggs can also be directories! try: egg = zipfile.ZipFile(eggpth) except zipfile.BadZipfile, e: raise SystemExit("Error: %s %s" % (eggpth, e)) if todir is None: # Use the same directory as setuptools/pkg_resources. So, # if the specific egg was accessed before (not necessarily # by pyinstaller), the extracted contents already exist # (pkg_resources puts them there) and can be used. todir = os.path.join(pkg_resouces_get_default_cache(), name + "-tmp") if components[i + 1:]: members = ["/".join(components[i + 1:])] else: members = egg.namelist() for member in members: pth = os.path.join(todir, member) if not os.path.isfile(pth): dirname = os.path.dirname(pth) if not os.path.isdir(dirname): os.makedirs(dirname) f = open(pth, "wb") f.write(egg.read(member)) f.close() rv.append((pth, eggpth, member)) return rv return [(pth, None, None)]
r""" Check if path points to a file inside a python egg file, extract the file from the egg to a cache directory (following pkg_resources convention) and return [(extracted path, egg file path, relative path inside egg file)]. Otherwise, just return [(original path, None, None)]. If path points to an egg file directly, return a list with all files from the egg formatted like above. Example: >>> check_extract_from_egg(r'C:\Python26\Lib\site-packages\my.egg\mymodule\my.pyd') [(r'C:\Users\UserName\AppData\Roaming\Python-Eggs\my.egg-tmp\mymodule\my.pyd', r'C:\Python26\Lib\site-packages\my.egg', r'mymodule/my.pyd')]
def hist_3d_index(x, y, z, shape): """ Fast 3d histogram of 3D indices with C++ inner loop optimization. Is more than 2 orders faster than np.histogramdd(). The indices are given in x, y, z coordinates and have to fit into a histogram of the dimensions shape. Parameters ---------- x : array like y : array like z : array like shape : tuple tuple with x,y,z dimensions: (x, y, z) Returns ------- np.ndarray with given shape """ if len(shape) != 3: raise InvalidInputError('The shape has to describe a 3-d histogram') # change memory alignment for c++ library x = np.ascontiguousarray(x.astype(np.int32)) y = np.ascontiguousarray(y.astype(np.int32)) z = np.ascontiguousarray(z.astype(np.int32)) result = np.zeros(shape=shape, dtype=np.uint32).ravel() # ravel hist in c-style, 3D --> 1D analysis_functions.hist_3d(x, y, z, shape[0], shape[1], shape[2], result) return np.reshape(result, shape)
Fast 3d histogram of 3D indices with C++ inner loop optimization. Is more than 2 orders faster than np.histogramdd(). The indices are given in x, y, z coordinates and have to fit into a histogram of the dimensions shape. Parameters ---------- x : array like y : array like z : array like shape : tuple tuple with x,y,z dimensions: (x, y, z) Returns ------- np.ndarray with given shape
def setComplete(self, basepath): """Set complete flag for this comic, ie. all comics are downloaded.""" if self.endOfLife: filename = self.getCompleteFile(basepath) if not os.path.exists(filename): with open(filename, 'w') as f: f.write('All comics should be downloaded here.')
Set complete flag for this comic, ie. all comics are downloaded.
def fetch(self): """ Fetch & return a new `Image` object representing the image's current state :rtype: Image :raises DOAPIError: if the API endpoint replies with an error (e.g., if the image no longer exists) """ api = self.doapi_manager return api._image(api.request(self.url)["image"])
Fetch & return a new `Image` object representing the image's current state :rtype: Image :raises DOAPIError: if the API endpoint replies with an error (e.g., if the image no longer exists)
def p0f(pkt): """Passive OS fingerprinting: which OS emitted this TCP packet ? p0f(packet) -> accuracy, [list of guesses] """ db, sig = packet2p0f(pkt) if db: pb = db.get_base() else: pb = [] if not pb: warning("p0f base empty.") return [] #s = len(pb[0][0]) r = [] max = len(sig[4].split(",")) + 5 for b in pb: d = p0f_correl(sig,b) if d == max: r.append((b[6], b[7], b[1] - pkt[IP].ttl)) return r
Passive OS fingerprinting: which OS emitted this TCP packet ? p0f(packet) -> accuracy, [list of guesses]
def debug_callback(event, *args, **kwds): '''Example callback, useful for debugging. ''' l = ['event %s' % (event.type,)] if args: l.extend(map(str, args)) if kwds: l.extend(sorted('%s=%s' % t for t in kwds.items())) print('Debug callback (%s)' % ', '.join(l))
Example callback, useful for debugging.
def save(): """ save function """ results = {} cpu_number = 0 while True: try: _file = open( CPU_PREFIX + 'cpu{}/cpufreq/scaling_governor'.format(cpu_number)) except: break governor = _file.read().strip() results.setdefault(cpu_number, {})['governor'] = governor _file.close() try: _file = open( CPU_PREFIX + 'cpu{}/cpufreq/scaling_cur_freq'.format(cpu_number)) except: break results[cpu_number]['freq'] = _file.read().strip() _file.close() cpu_number += 1 return results
save function
def _save_percolator(self): """ Saves the query field as an elasticsearch percolator """ index = Content.search_objects.mapping.index query_filter = self.get_content(published=False).to_dict() q = {} if "query" in query_filter: q = {"query": query_filter.get("query", {})} else: # We don't know how to save this return # We'll need this data, to decide which special coverage section to use q["sponsored"] = bool(self.tunic_campaign_id) # Elasticsearch v1.4 percolator "field_value_factor" does not # support missing fields, so always need to include q["start_date"] = self.start_date # NOTE: set end_date to datetime.max if special coverage has no end date # (i.e. is a neverending special coverage) q["end_date"] = self.end_date if self.end_date else datetime.max.replace(tzinfo=pytz.UTC) # Elasticsearch v1.4 percolator range query does not support DateTime range queries # (PercolateContext.nowInMillisImpl is not implemented). if q["start_date"]: q['start_date_epoch'] = datetime_to_epoch_seconds(q["start_date"]) if q["end_date"]: q['end_date_epoch'] = datetime_to_epoch_seconds(q["end_date"]) # Store manually included IDs for percolator retrieval scoring (boost # manually included content). if self.query: q['included_ids'] = self.query.get('included_ids', []) es.index( index=index, doc_type=".percolator", body=q, id=self.es_id )
Saves the query field as an elasticsearch percolator
def create(self): """ Create the subqueue to change the default behavior of Lock to semaphore. """ self.queue = self.scheduler.queue.addSubQueue(self.priority, LockEvent.createMatcher(self.context, self.key), maxdefault = self.size, defaultQueueClass = CBQueue.AutoClassQueue.initHelper('locker', subqueuelimit = 1))
Create the subqueue to change the default behavior of Lock to semaphore.
def tostr(self): """Export SVG as a string""" element = _transform.SVGFigure(self.width, self.height) element.append(self) svgstr = element.to_str() return svgstr
Export SVG as a string
def load_bytecode_definitions(*, path=None) -> dict: """Load bytecode definitions from JSON file. If no path is provided the default bytecode.json will be loaded. :param path: Either None or a path to a JSON file to load containing bytecode definitions. """ if path is not None: with open(path, 'rb') as file_in: j = json.load(file_in) else: try: j = json.loads(pkgutil.get_data('jawa.util', 'bytecode.json')) except json.JSONDecodeError: # Unfortunately our best way to handle missing/malformed/empty # bytecode.json files since it may not actually be backed by a # "real" file. return {} for definition in j.values(): # If the entry has any operands take the text labels and convert # them into pre-cached struct objects and operand types. operands = definition['operands'] if operands: definition['operands'] = [ [getattr(OperandFmts, oo[0]), OperandTypes[oo[1]]] for oo in operands ] # Return one dict that contains both mnemonic keys and opcode keys. return {**j, **{v['op']: v for v in j.values()}}
Load bytecode definitions from JSON file. If no path is provided the default bytecode.json will be loaded. :param path: Either None or a path to a JSON file to load containing bytecode definitions.
def version(): ''' Return the system version for this minion .. versionchanged:: 2016.11.4 Added support for AIX .. versionchanged:: 2018.3.0 Added support for OpenBSD CLI Example: .. code-block:: bash salt '*' status.version ''' def linux_version(): ''' linux specific implementation of version ''' try: with salt.utils.files.fopen('/proc/version', 'r') as fp_: return salt.utils.stringutils.to_unicode(fp_.read()).strip() except IOError: return {} def bsd_version(): ''' bsd specific implementation of version ''' return __salt__['cmd.run']('sysctl -n kern.version') # dict that returns a function that does the right thing per platform get_version = { 'Linux': linux_version, 'FreeBSD': bsd_version, 'OpenBSD': bsd_version, 'AIX': lambda: __salt__['cmd.run']('oslevel -s'), } errmsg = 'This method is unsupported on the current operating system!' return get_version.get(__grains__['kernel'], lambda: errmsg)()
Return the system version for this minion .. versionchanged:: 2016.11.4 Added support for AIX .. versionchanged:: 2018.3.0 Added support for OpenBSD CLI Example: .. code-block:: bash salt '*' status.version
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw): r"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a DSON document) to a Python object. If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name must be specified. Encodings that are not ASCII based (such as UCS-2) are not allowed and should be decoded to ``unicode`` first. ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. DSON-RPC class hinting). ``object_pairs_hook`` is an optional function that will be called with the result of any object literal decoded with an ordered list of pairs. The return value of ``object_pairs_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders that rely on the order that the key and value pairs are decoded (for example, collections.OrderedDict will remember the order of insertion). If ``object_hook`` is also defined, the ``object_pairs_hook`` takes priority. ``parse_float``, if specified, will be called with the string of every DSON float to be decoded. By default this is equivalent to float(num_str). This can be used to use another datatype or parser for DSON floats (e.g. decimal.Decimal). ``parse_int``, if specified, will be called with the string of every DSON int to be decoded. By default this is equivalent to int(num_str). This can be used to use another datatype or parser for DSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the following strings: -Infinity, Infinity, NaN, null, true, false. This can be used to raise an exception if invalid DSON numbers are encountered. To use a custom ``DSONDecoder`` subclass, specify it with the ``cls`` kwarg; otherwise ``DSONDecoder`` is used. """ if (cls is None and encoding is None and object_hook is None and parse_int is None and parse_float is None and parse_constant is None and object_pairs_hook is None and not kw): return _default_decoder.decode(s) if cls is None: cls = DSONDecoder if object_hook is not None: kw['object_hook'] = object_hook if object_pairs_hook is not None: kw['object_pairs_hook'] = object_pairs_hook if parse_float is not None: kw['parse_float'] = parse_float if parse_int is not None: kw['parse_int'] = parse_int if parse_constant is not None: kw['parse_constant'] = parse_constant return cls(encoding=encoding, **kw).decode(s)
r"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a DSON document) to a Python object. If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name must be specified. Encodings that are not ASCII based (such as UCS-2) are not allowed and should be decoded to ``unicode`` first. ``object_hook`` is an optional function that will be called with the result of any object literal decode (a ``dict``). The return value of ``object_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders (e.g. DSON-RPC class hinting). ``object_pairs_hook`` is an optional function that will be called with the result of any object literal decoded with an ordered list of pairs. The return value of ``object_pairs_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders that rely on the order that the key and value pairs are decoded (for example, collections.OrderedDict will remember the order of insertion). If ``object_hook`` is also defined, the ``object_pairs_hook`` takes priority. ``parse_float``, if specified, will be called with the string of every DSON float to be decoded. By default this is equivalent to float(num_str). This can be used to use another datatype or parser for DSON floats (e.g. decimal.Decimal). ``parse_int``, if specified, will be called with the string of every DSON int to be decoded. By default this is equivalent to int(num_str). This can be used to use another datatype or parser for DSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the following strings: -Infinity, Infinity, NaN, null, true, false. This can be used to raise an exception if invalid DSON numbers are encountered. To use a custom ``DSONDecoder`` subclass, specify it with the ``cls`` kwarg; otherwise ``DSONDecoder`` is used.
def core_profile_check(self) -> None: ''' Core profile check. FOR DEBUG PURPOSES ONLY ''' profile_mask = self.info['GL_CONTEXT_PROFILE_MASK'] if profile_mask != 1: warnings.warn('The window should request a CORE OpenGL profile') version_code = self.version_code if not version_code: major, minor = map(int, self.info['GL_VERSION'].split('.', 2)[:2]) version_code = major * 100 + minor * 10 if version_code < 330: warnings.warn('The window should support OpenGL 3.3+ (version_code=%d)' % version_code)
Core profile check. FOR DEBUG PURPOSES ONLY
def _from_specs(self, dims, spacing=(1.0,1.0,1.0), origin=(0.0, 0.0, 0.0)): """ Create VTK image data directly from numpy arrays. A uniform grid is defined by the node spacings for each axis (uniform along each individual axis) and the number of nodes on each axis. These are relative to a specified origin (default is ``(0.0, 0.0, 0.0)``). Parameters ---------- dims : tuple(int) Length 3 tuple of ints specifying how many nodes along each axis spacing : tuple(float) Length 3 tuple of floats/ints specifying the node spacings for each axis origin : tuple(float) Length 3 tuple of floats/ints specifying minimum value for each axis """ xn, yn, zn = dims[0], dims[1], dims[2] xs, ys, zs = spacing[0], spacing[1], spacing[2] xo, yo, zo = origin[0], origin[1], origin[2] self.SetDimensions(xn, yn, zn) self.SetOrigin(xo, yo, zo) self.SetSpacing(xs, ys, zs)
Create VTK image data directly from numpy arrays. A uniform grid is defined by the node spacings for each axis (uniform along each individual axis) and the number of nodes on each axis. These are relative to a specified origin (default is ``(0.0, 0.0, 0.0)``). Parameters ---------- dims : tuple(int) Length 3 tuple of ints specifying how many nodes along each axis spacing : tuple(float) Length 3 tuple of floats/ints specifying the node spacings for each axis origin : tuple(float) Length 3 tuple of floats/ints specifying minimum value for each axis
def data_complete(self): """ Return True if all the expected datadir files are present """ return task.data_complete(self.datadir, self.sitedir, self._get_container_name)
Return True if all the expected datadir files are present
def extract_formats(config_handle): """Get application formats. See :class:`gogoutils.Formats` for available options. Args: config_handle (configparser.ConfigParser): Instance of configurations. Returns: dict: Formats in ``{$format_type: $format_pattern}``. """ configurations = dict(config_handle) formats = dict(configurations.get('formats', {})) return formats
Get application formats. See :class:`gogoutils.Formats` for available options. Args: config_handle (configparser.ConfigParser): Instance of configurations. Returns: dict: Formats in ``{$format_type: $format_pattern}``.
def get_center(self, element): """Get center coordinates of an element :param element: either a WebElement, PageElement or element locator as a tuple (locator_type, locator_value) :returns: dict with center coordinates """ web_element = self.get_web_element(element) location = web_element.location size = web_element.size return {'x': location['x'] + (size['width'] / 2), 'y': location['y'] + (size['height'] / 2)}
Get center coordinates of an element :param element: either a WebElement, PageElement or element locator as a tuple (locator_type, locator_value) :returns: dict with center coordinates
def addImagingColumns(msname, ack=True): """ Add the columns to an MS needed for the casa imager. It adds the columns MODEL_DATA, CORRECTED_DATA, and IMAGING_WEIGHT. It also sets the CHANNEL_SELECTION keyword needed for the older casa imagers. A column is not added if already existing. """ # numpy is needed import numpy as np # Open the MS t = table(msname, readonly=False, ack=False) cnames = t.colnames() # Get the description of the DATA column. try: cdesc = t.getcoldesc('DATA') except: raise ValueError('Column DATA does not exist') # Determine if the DATA storage specification is tiled. hasTiled = False try: dminfo = t.getdminfo("DATA") if dminfo['TYPE'][:5] == 'Tiled': hasTiled = True except: hasTiled = False # Use TiledShapeStMan if needed. if not hasTiled: dminfo = {'TYPE': 'TiledShapeStMan', 'SPEC': {'DEFAULTTILESHAPE': [4, 32, 128]}} # Add the columns(if not existing). Use the description of the DATA column. if 'MODEL_DATA' in cnames: six.print_("Column MODEL_DATA not added; it already exists") else: dminfo['NAME'] = 'modeldata' cdesc['comment'] = 'The model data column' t.addcols(maketabdesc(makecoldesc('MODEL_DATA', cdesc)), dminfo) if ack: six.print_("added column MODEL_DATA") if 'CORRECTED_DATA' in cnames: six.print_("Column CORRECTED_DATA not added; it already exists") else: dminfo['NAME'] = 'correcteddata' cdesc['comment'] = 'The corrected data column' t.addcols(maketabdesc(makecoldesc('CORRECTED_DATA', cdesc)), dminfo) if ack: six.print_("'added column CORRECTED_DATA") if 'IMAGING_WEIGHT' in cnames: six.print_("Column IMAGING_WEIGHT not added; it already exists") else: # Add IMAGING_WEIGHT which is 1-dim and has type float. # It needs a shape, otherwise the CASA imager complains. shp = [] if 'shape' in cdesc: shp = cdesc['shape'] if len(shp) > 0: shp = [shp[0]] # use nchan from shape else: shp = [t.getcell('DATA', 0).shape[0]] # use nchan from actual data cd = makearrcoldesc('IMAGING_WEIGHT', 0, ndim=1, shape=shp, valuetype='float') dminfo = {'TYPE': 'TiledShapeStMan', 'SPEC': {'DEFAULTTILESHAPE': [32, 128]}} dminfo['NAME'] = 'imagingweight' t.addcols(maketabdesc(cd), dminfo) if ack: six.print_("added column IMAGING_WEIGHT") # Add or overwrite keyword CHANNEL_SELECTION. if 'CHANNEL_SELECTION' in t.colkeywordnames('MODEL_DATA'): t.removecolkeyword('MODEL_DATA', 'CHANNEL_SELECTION') # Define the CHANNEL_SELECTION keyword containing the channels of # all spectral windows. tspw = table(t.getkeyword('SPECTRAL_WINDOW'), ack=False) nchans = tspw.getcol('NUM_CHAN') chans = [[0, nch] for nch in nchans] t.putcolkeyword('MODEL_DATA', 'CHANNEL_SELECTION', np.int32(chans)) if ack: six.print_("defined keyword CHANNEL_SELECTION in column MODEL_DATA") # Flush the table to make sure it is written. t.flush()
Add the columns to an MS needed for the casa imager. It adds the columns MODEL_DATA, CORRECTED_DATA, and IMAGING_WEIGHT. It also sets the CHANNEL_SELECTION keyword needed for the older casa imagers. A column is not added if already existing.
def add_mag_drifts(inst): """Adds ion drifts in magnetic coordinates using ion drifts in S/C coordinates along with pre-calculated unit vectors for magnetic coordinates. Note ---- Requires ion drifts under labels 'iv_*' where * = (x,y,z) along with unit vectors labels 'unit_zonal_*', 'unit_fa_*', and 'unit_mer_*', where the unit vectors are expressed in S/C coordinates. These vectors are calculated by add_mag_drift_unit_vectors. Parameters ---------- inst : pysat.Instrument Instrument object will be modified to include new ion drift magnitudes Returns ------- None Instrument object modified in place """ inst['iv_zon'] = {'data':inst['unit_zon_x'] * inst['iv_x'] + inst['unit_zon_y']*inst['iv_y'] + inst['unit_zon_z']*inst['iv_z'], 'units':'m/s', 'long_name':'Zonal ion velocity', 'notes':('Ion velocity relative to co-rotation along zonal ' 'direction, normal to meridional plane. Positive east. ' 'Velocity obtained using ion velocities relative ' 'to co-rotation in the instrument frame along ' 'with the corresponding unit vectors expressed in ' 'the instrument frame. '), 'label': 'Zonal Ion Velocity', 'axis': 'Zonal Ion Velocity', 'desc': 'Zonal ion velocity', 'scale': 'Linear', 'value_min':-500., 'value_max':500.} inst['iv_fa'] = {'data':inst['unit_fa_x'] * inst['iv_x'] + inst['unit_fa_y'] * inst['iv_y'] + inst['unit_fa_z'] * inst['iv_z'], 'units':'m/s', 'long_name':'Field-Aligned ion velocity', 'notes':('Ion velocity relative to co-rotation along magnetic field line. Positive along the field. ', 'Velocity obtained using ion velocities relative ' 'to co-rotation in the instrument frame along ' 'with the corresponding unit vectors expressed in ' 'the instrument frame. '), 'label':'Field-Aligned Ion Velocity', 'axis':'Field-Aligned Ion Velocity', 'desc':'Field-Aligned Ion Velocity', 'scale':'Linear', 'value_min':-500., 'value_max':500.} inst['iv_mer'] = {'data':inst['unit_mer_x'] * inst['iv_x'] + inst['unit_mer_y']*inst['iv_y'] + inst['unit_mer_z']*inst['iv_z'], 'units':'m/s', 'long_name':'Meridional ion velocity', 'notes':('Velocity along meridional direction, perpendicular ' 'to field and within meridional plane. Positive is up at magnetic equator. ', 'Velocity obtained using ion velocities relative ' 'to co-rotation in the instrument frame along ' 'with the corresponding unit vectors expressed in ' 'the instrument frame. '), 'label':'Meridional Ion Velocity', 'axis':'Meridional Ion Velocity', 'desc':'Meridional Ion Velocity', 'scale':'Linear', 'value_min':-500., 'value_max':500.} return
Adds ion drifts in magnetic coordinates using ion drifts in S/C coordinates along with pre-calculated unit vectors for magnetic coordinates. Note ---- Requires ion drifts under labels 'iv_*' where * = (x,y,z) along with unit vectors labels 'unit_zonal_*', 'unit_fa_*', and 'unit_mer_*', where the unit vectors are expressed in S/C coordinates. These vectors are calculated by add_mag_drift_unit_vectors. Parameters ---------- inst : pysat.Instrument Instrument object will be modified to include new ion drift magnitudes Returns ------- None Instrument object modified in place
def _words_at_the_beginning(word, tree, prefix=""): ''' We return all portions of the tree corresponding to the beginning of `word`. This is used recursively, so we pass the prefix so we can return meaningful words+translations. ''' l = [] if "" in tree: l.append([prefix, tree[""]]) if len(word) > 0 and word[0] in tree: l.extend(_words_at_the_beginning( word[1:], tree[word[0]], prefix=prefix+word[0] )) return l
We return all portions of the tree corresponding to the beginning of `word`. This is used recursively, so we pass the prefix so we can return meaningful words+translations.
def help_center_categories_list(self, locale=None, sort_by=None, sort_order=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/help_center/categories#list-categories" api_path = "/api/v2/help_center/categories.json" if locale: api_opt_path = "/api/v2/help_center/{locale}/categories.json" api_path = api_opt_path.format(locale=locale) api_query = {} if "query" in kwargs.keys(): api_query.update(kwargs["query"]) del kwargs["query"] if sort_by: api_query.update({ "sort_by": sort_by, }) if sort_order: api_query.update({ "sort_order": sort_order, }) return self.call(api_path, query=api_query, **kwargs)
https://developer.zendesk.com/rest_api/docs/help_center/categories#list-categories
def set_noise_filter(self, user_gpio, steady, active): """ Sets a noise filter on a GPIO. Level changes on the GPIO are ignored until a level which has been stable for [*steady*] microseconds is detected. Level changes on the GPIO are then reported for [*active*] microseconds after which the process repeats. user_gpio:= 0-31 steady:= 0-300000 active:= 0-1000000 Returns 0 if OK, otherwise PI_BAD_USER_GPIO, or PI_BAD_FILTER. This filter affects the GPIO samples returned to callbacks set up with [*callback*] and [*wait_for_edge*]. It does not affect levels read by [*read*], [*read_bank_1*], or [*read_bank_2*]. Level changes before and after the active period may be reported. Your software must be designed to cope with such reports. ... pi.set_noise_filter(23, 1000, 5000) ... """ # pigpio message format # I p1 user_gpio # I p2 steady # I p3 4 ## extension ## # I active extents = [struct.pack("I", active)] res = yield from self._pigpio_aio_command_ext(_PI_CMD_FN, user_gpio, steady, 4, extents) return _u2i(res)
Sets a noise filter on a GPIO. Level changes on the GPIO are ignored until a level which has been stable for [*steady*] microseconds is detected. Level changes on the GPIO are then reported for [*active*] microseconds after which the process repeats. user_gpio:= 0-31 steady:= 0-300000 active:= 0-1000000 Returns 0 if OK, otherwise PI_BAD_USER_GPIO, or PI_BAD_FILTER. This filter affects the GPIO samples returned to callbacks set up with [*callback*] and [*wait_for_edge*]. It does not affect levels read by [*read*], [*read_bank_1*], or [*read_bank_2*]. Level changes before and after the active period may be reported. Your software must be designed to cope with such reports. ... pi.set_noise_filter(23, 1000, 5000) ...
def simplify(self, e=None): """ Simplifies `e`. If `e` is None, simplifies the constraints of this state. """ if e is None: return self._solver.simplify() elif isinstance(e, (int, float, bool)): return e elif isinstance(e, claripy.ast.Base) and e.op in claripy.operations.leaf_operations_concrete: return e elif isinstance(e, SimActionObject) and e.op in claripy.operations.leaf_operations_concrete: return e.ast elif not isinstance(e, (SimActionObject, claripy.ast.Base)): return e else: return self._claripy_simplify(e)
Simplifies `e`. If `e` is None, simplifies the constraints of this state.
def time_to_first_byte(self): """ The aggregate time to first byte for all pages. """ ttfb = [] for page in self.pages: if page.time_to_first_byte is not None: ttfb.append(page.time_to_first_byte) return round(mean(ttfb), self.decimal_precision)
The aggregate time to first byte for all pages.
def from_pem(cls, data, password=None): """Creates a key from PKCS#8 formatted data loaded from a PEM file. See the function `import_from_pem` for details. :param data(bytes): The data contained in a PEM file. :param password(bytes): An optional password to unwrap the key. """ obj = cls() obj.import_from_pem(data, password) return obj
Creates a key from PKCS#8 formatted data loaded from a PEM file. See the function `import_from_pem` for details. :param data(bytes): The data contained in a PEM file. :param password(bytes): An optional password to unwrap the key.
def _add_header_client_encryption_hmac(request_bytes, key, iv, custom_headers): """ :type request_bytes: bytes :type key: bytes :type iv: bytes :type custom_headers: dict[str, str] :rtype: None """ hashed = hmac.new(key, iv + request_bytes, sha1) hashed_base64 = base64.b64encode(hashed.digest()).decode() custom_headers[_HEADER_CLIENT_ENCRYPTION_HMAC] = hashed_base64
:type request_bytes: bytes :type key: bytes :type iv: bytes :type custom_headers: dict[str, str] :rtype: None
def list_env(saltenv='base'): ''' Return all of the file paths found in an environment ''' ret = {} if saltenv not in __opts__['pillar_roots']: return ret for f_root in __opts__['pillar_roots'][saltenv]: ret[f_root] = {} for root, dirs, files in salt.utils.path.os_walk(f_root): sub = ret[f_root] if root != f_root: # grab subroot ref sroot = root above = [] # Populate the above dict while not os.path.samefile(sroot, f_root): base = os.path.basename(sroot) if base: above.insert(0, base) sroot = os.path.dirname(sroot) for aroot in above: sub = sub[aroot] for dir_ in dirs: sub[dir_] = {} for fn_ in files: sub[fn_] = 'f' return ret
Return all of the file paths found in an environment
def get_valid_location(location): """Check if the given location represents a valid cellular component.""" # If we're given None, return None if location is not None and cellular_components.get(location) is None: loc = cellular_components_reverse.get(location) if loc is None: raise InvalidLocationError(location) else: return loc return location
Check if the given location represents a valid cellular component.
def parse_state_machine_path(path): """Parser for argparse checking pfor a proper state machine path :param str path: Input path from the user :return: The path :raises argparse.ArgumentTypeError: if the path does not contain a statemachine.json file """ sm_root_file = join(path, storage.STATEMACHINE_FILE) if exists(sm_root_file): return path else: sm_root_file = join(path, storage.STATEMACHINE_FILE_OLD) if exists(sm_root_file): return path raise argparse.ArgumentTypeError("Failed to open {0}: {1} not found in path".format(path, storage.STATEMACHINE_FILE))
Parser for argparse checking pfor a proper state machine path :param str path: Input path from the user :return: The path :raises argparse.ArgumentTypeError: if the path does not contain a statemachine.json file
def is_module_or_package(path): """Return True if path is a Python module/package""" is_module = osp.isfile(path) and osp.splitext(path)[1] in ('.py', '.pyw') is_package = osp.isdir(path) and osp.isfile(osp.join(path, '__init__.py')) return is_module or is_package
Return True if path is a Python module/package
def GetSortedEvents(self, time_range=None): """Retrieves the events in increasing chronological order. Args: time_range (Optional[TimeRange]): time range used to filter events that fall in a specific period. Yield: EventObject: event. """ filter_expression = None if time_range: filter_expression = [] if time_range.start_timestamp: filter_expression.append( '_timestamp >= {0:d}'.format(time_range.start_timestamp)) if time_range.end_timestamp: filter_expression.append( '_timestamp <= {0:d}'.format(time_range.end_timestamp)) filter_expression = ' AND '.join(filter_expression) event_generator = self._GetAttributeContainers( self._CONTAINER_TYPE_EVENT, filter_expression=filter_expression, order_by='_timestamp') for event in event_generator: if hasattr(event, 'event_data_row_identifier'): event_data_identifier = identifiers.SQLTableIdentifier( 'event_data', event.event_data_row_identifier) event.SetEventDataIdentifier(event_data_identifier) del event.event_data_row_identifier yield event
Retrieves the events in increasing chronological order. Args: time_range (Optional[TimeRange]): time range used to filter events that fall in a specific period. Yield: EventObject: event.
def browser_authorize(self): """ Open a browser to the authorization url and spool up a CherryPy server to accept the response """ url = self.authorize_url() # Open the web browser in a new thread for command-line browser support threading.Timer(1, webbrowser.open, args=(url,)).start() server_config = { 'server.socket_host': '0.0.0.0', 'server.socket_port': 443, 'server.ssl_module': 'pyopenssl', 'server.ssl_certificate': 'tests/files/certificate.cert', 'server.ssl_private_key': 'tests/files/key.key', } cherrypy.config.update(server_config) cherrypy.quickstart(self)
Open a browser to the authorization url and spool up a CherryPy server to accept the response
def run(data): """HLA typing with OptiType, parsing output from called genotype files. """ hlas = [] for hla_fq in tz.get_in(["hla", "fastq"], data, []): hla_type = re.search("[.-](?P<hlatype>HLA-[\w-]+).fq", hla_fq).group("hlatype") if hla_type in SUPPORTED_HLAS: if utils.file_exists(hla_fq): hlas.append((hla_type, hla_fq)) if len(hlas) > 0: out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "align", dd.get_sample_name(data), "hla", "OptiType-HLA-A_B_C")) # When running UMIs and hla typing we want to pick the original fastqs if len(hlas) > len(SUPPORTED_HLAS): hlas = [x for x in hlas if os.path.basename(x[1]).find("-cumi") == -1] if len(hlas) == len(SUPPORTED_HLAS): hla_fq = combine_hla_fqs(hlas, out_dir + "-input.fq", data) if utils.file_exists(hla_fq): out_file = glob.glob(os.path.join(out_dir, "*", "*_result.tsv")) if len(out_file) > 0: out_file = out_file[0] else: out_file = _call_hla(hla_fq, out_dir, data) out_file = _prepare_calls(out_file, os.path.dirname(out_dir), data) data["hla"].update({"call_file": out_file, "hlacaller": "optitype"}) return data
HLA typing with OptiType, parsing output from called genotype files.
def bulk_overwrite(self, entities_and_kinds): """ Update the group to the given entities and sub-entity groups. After this operation, the only members of this EntityGroup will be the given entities, and sub-entity groups. :type entities_and_kinds: List of (Entity, EntityKind) pairs. :param entities_and_kinds: A list of entity, entity-kind pairs to set to the EntityGroup. In the pairs the entity-kind can be ``None``, to add a single entity, or some entity kind to add all sub-entities of that kind. """ EntityGroupMembership.objects.filter(entity_group=self).delete() return self.bulk_add_entities(entities_and_kinds)
Update the group to the given entities and sub-entity groups. After this operation, the only members of this EntityGroup will be the given entities, and sub-entity groups. :type entities_and_kinds: List of (Entity, EntityKind) pairs. :param entities_and_kinds: A list of entity, entity-kind pairs to set to the EntityGroup. In the pairs the entity-kind can be ``None``, to add a single entity, or some entity kind to add all sub-entities of that kind.
def signal_handler_mapping(self): """A dict mapping (signal number) -> (a method handling the signal).""" # Could use an enum here, but we never end up doing any matching on the specific signal value, # instead just iterating over the registered signals to set handlers, so a dict is probably # better. return { signal.SIGINT: self.handle_sigint, signal.SIGQUIT: self.handle_sigquit, signal.SIGTERM: self.handle_sigterm, }
A dict mapping (signal number) -> (a method handling the signal).
def _get_geom_type(type_bytes): """Get the GeoJSON geometry type label from a WKB type byte string. :param type_bytes: 4 byte string in big endian byte order containing a WKB type number. It may also contain a "has SRID" flag in the high byte (the first type, since this is big endian byte order), indicated as 0x20. If the SRID flag is not set, the high byte will always be null (0x00). :returns: 3-tuple ofGeoJSON geometry type label, the bytes resprenting the geometry type, and a separate "has SRID" flag. If the input `type_bytes` contains an SRID flag, it will be removed. >>> # Z Point, with SRID flag >>> _get_geom_type(b'\\x20\\x00\\x03\\xe9') == ( ... 'Point', b'\\x00\\x00\\x03\\xe9', True) True >>> # 2D MultiLineString, without SRID flag >>> _get_geom_type(b'\\x00\\x00\\x00\\x05') == ( ... 'MultiLineString', b'\\x00\\x00\\x00\\x05', False) True """ # slice off the high byte, which may contain the SRID flag high_byte = type_bytes[0] if six.PY3: high_byte = bytes([high_byte]) has_srid = high_byte == b'\x20' if has_srid: # replace the high byte with a null byte type_bytes = as_bin_str(b'\x00' + type_bytes[1:]) else: type_bytes = as_bin_str(type_bytes) # look up the geometry type geom_type = _BINARY_TO_GEOM_TYPE.get(type_bytes) return geom_type, type_bytes, has_srid
Get the GeoJSON geometry type label from a WKB type byte string. :param type_bytes: 4 byte string in big endian byte order containing a WKB type number. It may also contain a "has SRID" flag in the high byte (the first type, since this is big endian byte order), indicated as 0x20. If the SRID flag is not set, the high byte will always be null (0x00). :returns: 3-tuple ofGeoJSON geometry type label, the bytes resprenting the geometry type, and a separate "has SRID" flag. If the input `type_bytes` contains an SRID flag, it will be removed. >>> # Z Point, with SRID flag >>> _get_geom_type(b'\\x20\\x00\\x03\\xe9') == ( ... 'Point', b'\\x00\\x00\\x03\\xe9', True) True >>> # 2D MultiLineString, without SRID flag >>> _get_geom_type(b'\\x00\\x00\\x00\\x05') == ( ... 'MultiLineString', b'\\x00\\x00\\x00\\x05', False) True
def close(self): """ Closes the event streaming. """ if not self._response.raw.closed: # find the underlying socket object # based on api.client._get_raw_response_socket sock_fp = self._response.raw._fp.fp if hasattr(sock_fp, 'raw'): sock_raw = sock_fp.raw if hasattr(sock_raw, 'sock'): sock = sock_raw.sock elif hasattr(sock_raw, '_sock'): sock = sock_raw._sock elif hasattr(sock_fp, 'channel'): # We're working with a paramiko (SSH) channel, which doesn't # support cancelable streams with the current implementation raise DockerException( 'Cancellable streams not supported for the SSH protocol' ) else: sock = sock_fp._sock if hasattr(urllib3.contrib, 'pyopenssl') and isinstance( sock, urllib3.contrib.pyopenssl.WrappedSocket): sock = sock.socket sock.shutdown(socket.SHUT_RDWR) sock.close()
Closes the event streaming.
def version_tuple(self): """tuple[int]: version tuple or None if version is not set or invalid.""" try: return tuple([int(digit, 10) for digit in self.version.split('.')]) except (AttributeError, TypeError, ValueError): return None
tuple[int]: version tuple or None if version is not set or invalid.
def remove_child(self, index): """ Removes child at given index from the Node children. Usage:: >>> node_a = AbstractCompositeNode("MyNodeA") >>> node_b = AbstractCompositeNode("MyNodeB", node_a) >>> node_c = AbstractCompositeNode("MyNodeC", node_a) >>> node_a.remove_child(1) True >>> [child.name for child in node_a.children] [u'MyNodeB'] :param index: Node index. :type index: int :return: Removed child. :rtype: AbstractNode or AbstractCompositeNode or Object """ if index < 0 or index > len(self.__children): return child = self.__children.pop(index) child.parent = None return child
Removes child at given index from the Node children. Usage:: >>> node_a = AbstractCompositeNode("MyNodeA") >>> node_b = AbstractCompositeNode("MyNodeB", node_a) >>> node_c = AbstractCompositeNode("MyNodeC", node_a) >>> node_a.remove_child(1) True >>> [child.name for child in node_a.children] [u'MyNodeB'] :param index: Node index. :type index: int :return: Removed child. :rtype: AbstractNode or AbstractCompositeNode or Object
def fit(self, Xs=None, ys=None, Xt=None, yt=None): """Build a coupling matrix from source and target sets of samples (Xs, ys) and (Xt, yt) Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. ys : array-like, shape (n_source_samples,) The class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabeled, fill the yt's elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label Returns ------- self : object Returns self. """ # check the necessary inputs parameters are here if check_params(Xs=Xs, Xt=Xt): # pairwise distance self.cost_ = dist(Xs, Xt, metric=self.metric) self.cost_ = cost_normalization(self.cost_, self.norm) if (ys is not None) and (yt is not None): if self.limit_max != np.infty: self.limit_max = self.limit_max * np.max(self.cost_) # assumes labeled source samples occupy the first rows # and labeled target samples occupy the first columns classes = [c for c in np.unique(ys) if c != -1] for c in classes: idx_s = np.where((ys != c) & (ys != -1)) idx_t = np.where(yt == c) # all the coefficients corresponding to a source sample # and a target sample : # with different labels get a infinite for j in idx_t[0]: self.cost_[idx_s[0], j] = self.limit_max # distribution estimation self.mu_s = self.distribution_estimation(Xs) self.mu_t = self.distribution_estimation(Xt) # store arrays of samples self.xs_ = Xs self.xt_ = Xt return self
Build a coupling matrix from source and target sets of samples (Xs, ys) and (Xt, yt) Parameters ---------- Xs : array-like, shape (n_source_samples, n_features) The training input samples. ys : array-like, shape (n_source_samples,) The class labels Xt : array-like, shape (n_target_samples, n_features) The training input samples. yt : array-like, shape (n_target_samples,) The class labels. If some target samples are unlabeled, fill the yt's elements with -1. Warning: Note that, due to this convention -1 cannot be used as a class label Returns ------- self : object Returns self.
def sync_proxy(self, mri, block): """Abstract method telling the ClientComms to sync this proxy Block with its remote counterpart. Should wait until it is connected Args: mri (str): The mri for the remote block block (BlockModel): The local proxy Block to keep in sync """ # Send a root Subscribe to the server subscribe = Subscribe(path=[mri], delta=True) done_queue = Queue() def handle_response(response): # Called from tornado if not isinstance(response, Delta): # Return or Error is the end of our subscription, log and ignore self.log.debug("Proxy got response %r", response) done_queue.put(None) else: cothread.Callback( self._handle_response, response, block, done_queue) subscribe.set_callback(handle_response) IOLoopHelper.call(self._send_request, subscribe) done_queue.get(timeout=DEFAULT_TIMEOUT)
Abstract method telling the ClientComms to sync this proxy Block with its remote counterpart. Should wait until it is connected Args: mri (str): The mri for the remote block block (BlockModel): The local proxy Block to keep in sync