positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def request(self, method, url, **kwargs): """Constructs a :class:`requests.Request`, prepares it and sends it. Raises HTTPErrors by default. :param method: method for the new :class:`Request` object. :type method: :class:`str` :param url: URL for the new :class:`Request` object. :type url: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError` """ if oauthlib.oauth2.is_secure_transport(url): m = super(OAuthSession, self).request else: m = super(requests_oauthlib.OAuth2Session, self).request log.debug("%s \"%s\" with %s", method, url, kwargs) response = m(method, url, **kwargs) response.raise_for_status() return response
Constructs a :class:`requests.Request`, prepares it and sends it. Raises HTTPErrors by default. :param method: method for the new :class:`Request` object. :type method: :class:`str` :param url: URL for the new :class:`Request` object. :type url: :class:`str` :param kwargs: keyword arguments of :meth:`requests.Session.request` :returns: a resonse object :rtype: :class:`requests.Response` :raises: :class:`requests.HTTPError`
def can_pay_cost(self, card): """ Returns whether the player can pay the resource cost of a card. """ if self.spells_cost_health and card.type == CardType.SPELL: return self.hero.health > card.cost return self.mana >= card.cost
Returns whether the player can pay the resource cost of a card.
def calculate_similarity(container1=None, container2=None, comparison=None, metric=None): '''calculate_similarity will calculate similarity of two containers by files content, default will calculate 2.0*len(intersect) / total package1 + total package2 Parameters ========== container1: container 1 container2: container 2 must be defined or metric a function to take a total1, total2, and intersect count (we can make this more general if / when more are added) valid are currently files.txt or folders.txt comparison: the comparison result object for the tree. If provided, will skip over function to obtain it. ''' if metric is None: metric = information_coefficient if comparison == None: comparison = compare_containers(container1=container1, container2=container2) return metric(total1=comparison['total1'], total2=comparison['total2'], intersect=comparison["intersect"])
calculate_similarity will calculate similarity of two containers by files content, default will calculate 2.0*len(intersect) / total package1 + total package2 Parameters ========== container1: container 1 container2: container 2 must be defined or metric a function to take a total1, total2, and intersect count (we can make this more general if / when more are added) valid are currently files.txt or folders.txt comparison: the comparison result object for the tree. If provided, will skip over function to obtain it.
def _parse_datetime_default_value(property_name, default_value_string): """Parse and return the default value for a datetime property.""" # OrientDB doesn't use ISO-8601 datetime format, so we have to parse it manually # and then turn it into a python datetime object. strptime() will raise an exception # if the provided value cannot be parsed correctly. parsed_value = time.strptime(default_value_string, ORIENTDB_DATETIME_FORMAT) return datetime.datetime( parsed_value.tm_year, parsed_value.tm_mon, parsed_value.tm_mday, parsed_value.tm_hour, parsed_value.tm_min, parsed_value.tm_sec, 0, None)
Parse and return the default value for a datetime property.
def sc_cuts_coarse(self, viewer, event, msg=True): """Adjust cuts interactively by setting the low AND high cut levels. This function adjusts it coarsely. """ if self.cancut: # adjust the cut by 10% on each end self._adjust_cuts(viewer, event.direction, 0.1, msg=msg) return True
Adjust cuts interactively by setting the low AND high cut levels. This function adjusts it coarsely.
def delete(self, row): """Delete a track value""" i = self._get_key_index(row) del self.keys[i]
Delete a track value
def extract_blocks(ubi): """Get a list of UBI block objects from file Arguments:. Obj:ubi -- UBI object. Returns: Dict -- Of block objects keyed by PEB number. """ blocks = {} ubi.file.seek(ubi.file.start_offset) peb_count = 0 cur_offset = 0 bad_blocks = [] # range instead of xrange, as xrange breaks > 4GB end_offset. for i in range(ubi.file.start_offset, ubi.file.end_offset, ubi.file.block_size): try: buf = ubi.file.read(ubi.file.block_size) except Exception as e: if settings.warn_only_block_read_errors: error(extract_blocks, 'Error', 'PEB: %s: %s' % (ubi.first_peb_num + peb_count, str(e))) continue else: error(extract_blocks, 'Fatal', 'PEB: %s: %s' % (ubi.first_peb_num + peb_count, str(e))) if buf.startswith(UBI_EC_HDR_MAGIC): blk = description(buf) blk.file_offset = i blk.peb_num = ubi.first_peb_num + peb_count blk.size = ubi.file.block_size blocks[blk.peb_num] = blk peb_count += 1 log(extract_blocks, blk) verbose_log(extract_blocks, 'file addr: %s' % (ubi.file.last_read_addr())) ec_hdr_errors = '' vid_hdr_errors = '' if blk.ec_hdr.errors: ec_hdr_errors = ','.join(blk.ec_hdr.errors) if blk.vid_hdr and blk.vid_hdr.errors: vid_hdr_errors = ','.join(blk.vid_hdr.errors) if ec_hdr_errors or vid_hdr_errors: if blk.peb_num not in bad_blocks: bad_blocks.append(blk.peb_num) log(extract_blocks, 'PEB: %s has possible issue EC_HDR [%s], VID_HDR [%s]' % (blk.peb_num, ec_hdr_errors, vid_hdr_errors)) verbose_display(blk) else: cur_offset += ubi.file.block_size ubi.first_peb_num = cur_offset/ubi.file.block_size ubi.file.start_offset = cur_offset return blocks
Get a list of UBI block objects from file Arguments:. Obj:ubi -- UBI object. Returns: Dict -- Of block objects keyed by PEB number.
def highres_imu_encode(self, time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated): ''' The IMU readings in SI units in NED body frame time_usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) xacc : X acceleration (m/s^2) (float) yacc : Y acceleration (m/s^2) (float) zacc : Z acceleration (m/s^2) (float) xgyro : Angular speed around X axis (rad / sec) (float) ygyro : Angular speed around Y axis (rad / sec) (float) zgyro : Angular speed around Z axis (rad / sec) (float) xmag : X Magnetic field (Gauss) (float) ymag : Y Magnetic field (Gauss) (float) zmag : Z Magnetic field (Gauss) (float) abs_pressure : Absolute pressure in millibar (float) diff_pressure : Differential pressure in millibar (float) pressure_alt : Altitude calculated from pressure (float) temperature : Temperature in degrees celsius (float) fields_updated : Bitmask for fields that have updated since last message, bit 0 = xacc, bit 12: temperature (uint16_t) ''' return MAVLink_highres_imu_message(time_usec, xacc, yacc, zacc, xgyro, ygyro, zgyro, xmag, ymag, zmag, abs_pressure, diff_pressure, pressure_alt, temperature, fields_updated)
The IMU readings in SI units in NED body frame time_usec : Timestamp (microseconds, synced to UNIX time or since system boot) (uint64_t) xacc : X acceleration (m/s^2) (float) yacc : Y acceleration (m/s^2) (float) zacc : Z acceleration (m/s^2) (float) xgyro : Angular speed around X axis (rad / sec) (float) ygyro : Angular speed around Y axis (rad / sec) (float) zgyro : Angular speed around Z axis (rad / sec) (float) xmag : X Magnetic field (Gauss) (float) ymag : Y Magnetic field (Gauss) (float) zmag : Z Magnetic field (Gauss) (float) abs_pressure : Absolute pressure in millibar (float) diff_pressure : Differential pressure in millibar (float) pressure_alt : Altitude calculated from pressure (float) temperature : Temperature in degrees celsius (float) fields_updated : Bitmask for fields that have updated since last message, bit 0 = xacc, bit 12: temperature (uint16_t)
def file(self, name, *args, **kwargs): """Open a file. :param name: Filename appended to self.root :param args: passed to open() :param kwargs: passed to open() :rtype: file """ return open(self(name), *args, **kwargs)
Open a file. :param name: Filename appended to self.root :param args: passed to open() :param kwargs: passed to open() :rtype: file
def exists(self, record_key): ''' a method to determine if a record exists in collection :param record_key: string with key of record :return: boolean reporting status ''' title = '%s.exists' % self.__class__.__name__ # validate inputs input_fields = { 'record_key': record_key } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # send get metadata request file_path = '/%s' % record_key try: self.dropbox.files_get_metadata(file_path) except Exception as err: if str(err).find("LookupError('not_found'") > -1: return False else: raise DropboxConnectionError(title) return True
a method to determine if a record exists in collection :param record_key: string with key of record :return: boolean reporting status
def get_package_version(self, feed, group_id, artifact_id, version, show_deleted=None): """GetPackageVersion. [Preview API] Get information about a package version. :param str feed: Name or ID of the feed. :param str group_id: Group ID of the package. :param str artifact_id: Artifact ID of the package. :param str version: Version of the package. :param bool show_deleted: True to show information for deleted packages. :rtype: :class:`<Package> <azure.devops.v5_1.maven.models.Package>` """ route_values = {} if feed is not None: route_values['feed'] = self._serialize.url('feed', feed, 'str') if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'str') if artifact_id is not None: route_values['artifactId'] = self._serialize.url('artifact_id', artifact_id, 'str') if version is not None: route_values['version'] = self._serialize.url('version', version, 'str') query_parameters = {} if show_deleted is not None: query_parameters['showDeleted'] = self._serialize.query('show_deleted', show_deleted, 'bool') response = self._send(http_method='GET', location_id='180ed967-377a-4112-986b-607adb14ded4', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('Package', response)
GetPackageVersion. [Preview API] Get information about a package version. :param str feed: Name or ID of the feed. :param str group_id: Group ID of the package. :param str artifact_id: Artifact ID of the package. :param str version: Version of the package. :param bool show_deleted: True to show information for deleted packages. :rtype: :class:`<Package> <azure.devops.v5_1.maven.models.Package>`
def delete_subscription(self, subscription_id): """ Delete single subscription """ url = self.SUBSCRIPTIONS_ID_URL % subscription_id connection = Connection(self.token) connection.set_url(self.production, url) return connection.delete_request()
Delete single subscription
def save_selected_state_as(): """Save selected state as separate state machine :return True if successfully stored, False if the storing process was canceled or stopped by condition fail :rtype bool: :raises exceptions.ValueError: If dialog response ids are out of bounds """ state_machine_manager_model = rafcon.gui.singleton.state_machine_manager_model selection = state_machine_manager_model.get_selected_state_machine_model().selection selected_state = selection.get_selected_state() state_machine_id = state_machine_manager_model.get_selected_state_machine_model().state_machine.state_machine_id if len(selection.states) == 1: state_m = copy.copy(selected_state) sm_m = StateMachineModel(StateMachine(root_state=state_m.state)) sm_m.root_state = state_m path = interface.create_folder_func("Please choose a root folder and a folder name for the state-machine your " "state is saved in. The default folder name is the name of state.", format_default_folder_name(selected_state.state.name)) if path: storage.save_state_machine_to_path(sm_m.state_machine, base_path=path) sm_m.store_meta_data() else: logger.warning("No valid path specified") return False def open_as_state_machine_saved_state_as_separate_state_machine(): logger.debug("Open state machine.") try: open_state_machine(path=path, recent_opened_notification=True) except (ValueError, IOError) as e: logger.error('Error while trying to open state machine: {0}'.format(e)) # check if state machine is in library path root_window = rafcon.gui.singleton.main_window_controller.get_root_window() if library_manager.is_os_path_within_library_root_paths(path): _library_path, _library_name = \ library_manager.get_library_path_and_name_for_os_path(sm_m.state_machine.file_system_path) overwrote_old_lib = library_manager.is_library_in_libraries(_library_path, _library_name) message_string = "You stored your state machine in a path that is within the library root paths. " \ "Thereby your state machine can be used as a library state.\n\n"\ "Do you want to:" table_header = ["Option", "Description"] table_data = [(True, "Substitute the original state by this new library state."), (True, "Open the newly created library state machine.")] if overwrote_old_lib: table_data.append((False, "Refresh all open state machines, as an already existing library was " "overwritten.")) dialog = RAFCONCheckBoxTableDialog(message_string, button_texts=("Apply", "Cancel"), table_header=table_header, table_data=table_data, message_type=Gtk.MessageType.QUESTION, parent=root_window, width=800, standalone=False) response_id = dialog.run() if response_id == 1: # Apply pressed if overwrote_old_lib and dialog.list_store[2][0]: # refresh all open state machine selected logger.debug("Refresh all is triggered.") refresh_all() else: # if not all was refreshed at least the libraries are refreshed logger.debug("Library refresh is triggered.") refresh_libraries() if dialog.list_store[0][0]: # Substitute saved state with Library selected logger.debug("Substitute saved state with Library.") if dialog.list_store[0][0] or dialog.list_store[0][1]: refresh_libraries() state_machine_manager_model.selected_state_machine_id = state_machine_id [library_path, library_name] = library_manager.get_library_path_and_name_for_os_path(path) state = library_manager.get_library_instance(library_path, library_name) try: substitute_selected_state(state, as_template=False) except ValueError as e: logger.error('Error while trying to open state machine: {0}'.format(e)) if dialog.list_store[1][0]: # Open as state machine saved state as separate state machine selected open_as_state_machine_saved_state_as_separate_state_machine() elif response_id in [2, -4]: # Cancel or Close pressed pass else: raise ValueError("Response id: {} is not considered".format(response_id)) dialog.destroy() else: # Offer to open saved state machine dialog message_string = "Should the newly created state machine be opened?" dialog = RAFCONButtonDialog(message_string, ["Open", "Do not open"], message_type=Gtk.MessageType.QUESTION, parent=root_window) response_id = dialog.run() if response_id == 1: # Apply pressed open_as_state_machine_saved_state_as_separate_state_machine() elif response_id in [2, -4]: # Cancel or Close pressed pass else: raise ValueError("Response id: {} is not considered".format(response_id)) dialog.destroy() return True else: logger.warning("Multiple states can not be saved as state machine directly. Group them before.") return False
Save selected state as separate state machine :return True if successfully stored, False if the storing process was canceled or stopped by condition fail :rtype bool: :raises exceptions.ValueError: If dialog response ids are out of bounds
def _send_block_request(self, transaction): """ A former request resulted in a block wise transfer. With this method, the block wise transfer will be continued, including triggering of the retry mechanism. :param transaction: The former transaction including the request which should be continued. """ transaction = self._messageLayer.send_request(transaction.request) # ... but don't forget to reset the acknowledge flag transaction.request.acknowledged = False self.send_datagram(transaction.request) if transaction.request.type == defines.Types["CON"]: self._start_retransmission(transaction, transaction.request)
A former request resulted in a block wise transfer. With this method, the block wise transfer will be continued, including triggering of the retry mechanism. :param transaction: The former transaction including the request which should be continued.
def issue(self, id, fields=None, expand=None): """Get an issue Resource from the server. :param id: ID or key of the issue to get :type id: Union[Issue, str] :param fields: comma-separated string of issue fields to include in the results :type fields: Optional[str] :param expand: extra information to fetch inside each resource :type expand: Optional[str] :rtype: Issue """ # this allows us to pass Issue objects to issue() if isinstance(id, Issue): return id issue = Issue(self._options, self._session) params = {} if fields is not None: params['fields'] = fields if expand is not None: params['expand'] = expand issue.find(id, params=params) return issue
Get an issue Resource from the server. :param id: ID or key of the issue to get :type id: Union[Issue, str] :param fields: comma-separated string of issue fields to include in the results :type fields: Optional[str] :param expand: extra information to fetch inside each resource :type expand: Optional[str] :rtype: Issue
def const(const): '''Convenience wrapper to yield the value of a constant''' try: return getattr(_c, const) except AttributeError: raise FSQEnvError(errno.EINVAL, u'No such constant:'\ u' {0}'.format(const)) except TypeError: raise TypeError(errno.EINVAL, u'const name must be a string or'\ u' unicode object, not:'\ u' {0}'.format(const.__class__.__name__))
Convenience wrapper to yield the value of a constant
async def generate_access_token(self, user): """ Generate an access token for a given user. """ payload = await self._get_payload(user) secret = self._get_secret(True) algorithm = self._get_algorithm() return jwt.encode(payload, secret, algorithm=algorithm).decode("utf-8")
Generate an access token for a given user.
def list_datastore_full(service_instance, datastore): ''' Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore. ''' datastore_object = get_mor_by_name(service_instance, vim.Datastore, datastore) if not datastore_object: raise salt.exceptions.VMwareObjectRetrievalError( 'Datastore \'{0}\' does not exist.'.format(datastore) ) items = {} items['name'] = str(datastore_object.summary.name).replace("'", "") items['type'] = str(datastore_object.summary.type).replace("'", "") items['url'] = str(datastore_object.summary.url).replace("'", "") items['capacity'] = datastore_object.summary.capacity / 1024 / 1024 items['free'] = datastore_object.summary.freeSpace / 1024 / 1024 items['used'] = items['capacity'] - items['free'] items['usage'] = (float(items['used']) / float(items['capacity'])) * 100 items['hosts'] = [] for host in datastore_object.host: host_key = str(host.key).replace("'", "").split(":", 1)[1] host_object = get_mor_by_moid(service_instance, vim.HostSystem, host_key) items['hosts'].append(host_object.name) return items
Returns a dictionary with the basic information for the given datastore: name, type, url, capacity, free, used, usage, hosts service_instance The Service Instance Object from which to obtain datastores. datastore Name of the datastore.
def _create_err(self, errclass: str, *args) -> "Err": """ Create an error """ error = self._new_err(errclass, *args) self._add(error) return error
Create an error
def clean(df,error_rate = 0): """ Superficially cleans data, i.e. changing simple things about formatting. Parameters: df - DataFrame DataFrame to clean error_rate - float {0 <= error_rate <= 1}, default 0 Maximum amount of errors/inconsistencies caused explicitly by cleaning, expressed as a percentage of total dataframe rows (0 = 0%, .5 = 50%, etc.) Ex: na values from coercing a column of data to numeric """ df = df.copy() # Change colnames basics.clean_colnames(df) # Eventually use a more advanced function to clean colnames print('Changed colnames to {}'.format(df.columns)) # Remove extra whitespace obj_col_list = df.select_dtypes(include = 'object').columns for col_name in obj_col_list: df[col_name] = basics.col_strip(df,col_name) print("Stripped extra whitespace from '{}'".format(col_name)) # Coerce columns if possible for col_name in obj_col_list: new_dtype = coerce_col(df,col_name,error_rate) if new_dtype is not None: print("Coerced '{}' to datatype '{}'".format(col_name, new_dtype)) # Scrub columns obj_col_list = df.select_dtypes(include = 'object').columns for col_name in obj_col_list: scrubf, scrubb = smart_scrub(df,col_name,1-error_rate) if scrubf is not None or scrubb is not None: print("Scrubbed '{}' from the front and '{}' from the back of column '{}'" \ .format(scrubf,scrubb,col_name)) # Coerice columns if possible for col_name in obj_col_list: new_dtype = coerce_col(df,col_name,error_rate) if new_dtype is not None: print("Coerced '{}' to datatype '{}'".format(col_name, new_dtype)) return df
Superficially cleans data, i.e. changing simple things about formatting. Parameters: df - DataFrame DataFrame to clean error_rate - float {0 <= error_rate <= 1}, default 0 Maximum amount of errors/inconsistencies caused explicitly by cleaning, expressed as a percentage of total dataframe rows (0 = 0%, .5 = 50%, etc.) Ex: na values from coercing a column of data to numeric
def copy(self, path, dest, raise_if_exists=False): """ Copies the contents of a single file path to dest """ if raise_if_exists and dest in self.get_all_data(): raise RuntimeError('Destination exists: %s' % path) contents = self.get_all_data()[path] self.get_all_data()[dest] = contents
Copies the contents of a single file path to dest
def __get_return_value_no_withargs(self, *args, **kwargs): """ Pre-conditions: (1) The user has created a stub and specified the stub behaviour (2) The user has called the stub function with the specified "args" and "kwargs" (3) No 'withArgs' conditions were applicable in this case Args: args: tuple, the arguments inputed by the user kwargs: dictionary, the keyword arguments inputed by the user Returns: any type, the appropriate return value, based on the stub's behaviour setup and the user input """ c = self._conditions call_count = self._wrapper.callCount # if there might be applicable onCall conditions if call_count in c["oncall"]: index_list = [i for i, x in enumerate(c["oncall"]) if x and not c["args"][i] and not c["kwargs"][i]] for i in reversed(index_list): # if the onCall condition applies if call_count == c["oncall"][i]: return c["action"][i](*args, **kwargs) # else all conditions did not match return c["default"](*args, **kwargs)
Pre-conditions: (1) The user has created a stub and specified the stub behaviour (2) The user has called the stub function with the specified "args" and "kwargs" (3) No 'withArgs' conditions were applicable in this case Args: args: tuple, the arguments inputed by the user kwargs: dictionary, the keyword arguments inputed by the user Returns: any type, the appropriate return value, based on the stub's behaviour setup and the user input
def get_sub_array_ids(self): """Get list of sub array ids""" # Initialise empty list _scheduling_block_ids = [] _sub_array_ids = [] for blocks_id in self.get_sched_block_instance_ids(): _scheduling_block_ids.append(blocks_id) block_details = self.get_block_details(_scheduling_block_ids) for details in block_details: _sub_array_ids.append(details['sub_array_id']) _sub_array_ids = sorted(list(set(_sub_array_ids))) return _sub_array_ids
Get list of sub array ids
def wiki_pages(self, extra_params=None): """ All Wiki Pages with access to this Space """ return self.api._get_json( WikiPage, space=self, rel_path=self._build_rel_path('wiki_pages'), extra_params=extra_params, )
All Wiki Pages with access to this Space
def create_compiler_path(xml_generator, compiler_path): """ Try to guess a path for the compiler. If you want ot use a specific compiler, please provide the compiler path manually, as the guess may not be what you are expecting. Providing the path can be done by passing it as an argument (compiler_path) to the xml_generator_configuration_t() or by defining it in your pygccxml configuration file. """ if xml_generator == 'castxml' and compiler_path is None: if platform.system() == 'Windows': # Look for msvc p = subprocess.Popen( ['where', 'cl'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) compiler_path = p.stdout.read().decode("utf-8").rstrip() p.wait() p.stdout.close() p.stderr.close() # No msvc found; look for mingw if compiler_path == '': p = subprocess.Popen( ['where', 'mingw'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) compiler_path = p.stdout.read().decode("utf-8").rstrip() p.wait() p.stdout.close() p.stderr.close() else: # OS X or Linux # Look for clang first, then gcc p = subprocess.Popen( ['which', 'clang++'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) compiler_path = p.stdout.read().decode("utf-8").rstrip() p.wait() p.stdout.close() p.stderr.close() # No clang found; use gcc if compiler_path == '': p = subprocess.Popen( ['which', 'c++'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) compiler_path = p.stdout.read().decode("utf-8").rstrip() p.wait() p.stdout.close() p.stderr.close() if compiler_path == "": compiler_path = None return compiler_path
Try to guess a path for the compiler. If you want ot use a specific compiler, please provide the compiler path manually, as the guess may not be what you are expecting. Providing the path can be done by passing it as an argument (compiler_path) to the xml_generator_configuration_t() or by defining it in your pygccxml configuration file.
def _set_circ_chstats(self, v, load=False): """ Setter method for circ_chstats, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_chstats (container) If this variable is read-only (config: false) in the source YANG file, then _set_circ_chstats is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_circ_chstats() directly. YANG Description: ISIS circuit change statistics """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=circ_chstats.circ_chstats, is_container='container', presence=False, yang_name="circ-chstats", rest_name="circ-chstats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-circuit-change-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """circ_chstats must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=circ_chstats.circ_chstats, is_container='container', presence=False, yang_name="circ-chstats", rest_name="circ-chstats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-circuit-change-stats', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""", }) self.__circ_chstats = t if hasattr(self, '_set'): self._set()
Setter method for circ_chstats, mapped from YANG variable /isis_state/interface_detail/isis_intf/circ_chstats (container) If this variable is read-only (config: false) in the source YANG file, then _set_circ_chstats is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_circ_chstats() directly. YANG Description: ISIS circuit change statistics
def load_session_from_file(self, username: str, filename: Optional[str] = None) -> None: """Internally stores :class:`requests.Session` object loaded from file. If filename is None, the file with the default session path is loaded. :raises FileNotFoundError: If the file does not exist. """ if filename is None: filename = get_default_session_filename(username) with open(filename, 'rb') as sessionfile: self.context.load_session_from_file(username, sessionfile) self.context.log("Loaded session from %s." % filename)
Internally stores :class:`requests.Session` object loaded from file. If filename is None, the file with the default session path is loaded. :raises FileNotFoundError: If the file does not exist.
def clean(self): """Remove internal fields""" doc = self._resource result = {k: v for k, v in doc.iteritems() if k not in self.internal_fields} if '_id' in doc and 'id' not in result: result['id'] = doc['_id'] return result
Remove internal fields
def get_state_all(self): """Returns all device states""" state_dict = {} for device in self.get_device_names().keys(): state_dict[device] = self.get_state(device) return state_dict
Returns all device states
def _get_timestamp(dirname_full, remove): """ Get the timestamp from the timestamp file. Optionally mark it for removal if we're going to write another one. """ record_filename = os.path.join(dirname_full, RECORD_FILENAME) if not os.path.exists(record_filename): return None mtime = os.stat(record_filename).st_mtime mtime_str = datetime.fromtimestamp(mtime) print('Found timestamp {}:{}'.format(dirname_full, mtime_str)) if Settings.record_timestamp and remove: OLD_TIMESTAMPS.add(record_filename) return mtime
Get the timestamp from the timestamp file. Optionally mark it for removal if we're going to write another one.
def replace_parent(self, parent_simples): """If ``&`` (or the legacy xCSS equivalent ``self``) appears in this selector, replace it with the given iterable of parent selectors. Returns a tuple of simple selectors. """ assert parent_simples ancestors = parent_simples[:-1] parent = parent_simples[-1] did_replace = False new_tokens = [] for token in self.tokens: if not did_replace and token in ('&', 'self'): did_replace = True new_tokens.extend(parent.tokens) if token == 'self': warn(FutureWarning( "The xCSS 'self' selector is deprecated and will be " "removed in 2.0. Use & instead. ({0!r})" .format(self) )) else: new_tokens.append(token) if not did_replace: # This simple selector doesn't contain a parent reference so just # stick it on the end return parent_simples + (self,) # This simple selector was merged into the direct parent. merged_self = type(self)(parent.combinator, new_tokens) selector = ancestors + (merged_self,) # Our combinator goes on the first ancestor, i.e., substituting "foo # bar baz" into "+ &.quux" produces "+ foo bar baz.quux". This means a # potential conflict with the first ancestor's combinator! root = selector[0] if not _is_combinator_subset_of(self.combinator, root.combinator): raise ValueError( "Can't sub parent {0!r} into {1!r}: " "combinators {2!r} and {3!r} conflict!" .format( parent_simples, self, self.combinator, root.combinator)) root = type(self)(self.combinator, root.tokens) selector = (root,) + selector[1:] return tuple(selector)
If ``&`` (or the legacy xCSS equivalent ``self``) appears in this selector, replace it with the given iterable of parent selectors. Returns a tuple of simple selectors.
def get_description(self, twig=None, **kwargs): """ TODO: add documentation """ return self.get_parameter(twig=twig, **kwargs).get_description()
TODO: add documentation
def typedefs( self, name=None, function=None, header_dir=None, header_file=None, recursive=None, allow_empty=None): """returns a set of typedef declarations, that are matched defined criteria""" return ( self._find_multiple( self._impl_matchers[scopedef_t.typedef], name=name, function=function, decl_type=self._impl_decl_types[ scopedef_t.typedef], header_dir=header_dir, header_file=header_file, recursive=recursive, allow_empty=allow_empty) )
returns a set of typedef declarations, that are matched defined criteria
def debug(msg: str, resource: Optional['Resource'] = None, stream_id: Optional[int] = None) -> None: """ Logs a message to the Pulumi CLI's debug channel, associating it with a resource and stream_id if provided. :param str msg: The message to send to the Pulumi CLI. :param Optional[Resource] resource: If provided, associate this message with the given resource in the Pulumi CLI. :param Optional[int] stream_id: If provided, associate this message with a stream of other messages. """ engine = get_engine() if engine is not None: _log(engine, engine_pb2.DEBUG, msg, resource, stream_id) else: print("debug: " + msg, file=sys.stderr)
Logs a message to the Pulumi CLI's debug channel, associating it with a resource and stream_id if provided. :param str msg: The message to send to the Pulumi CLI. :param Optional[Resource] resource: If provided, associate this message with the given resource in the Pulumi CLI. :param Optional[int] stream_id: If provided, associate this message with a stream of other messages.
def parse(cls, parser, token): """ Custom parsing for the ``{% ajax_comment_tags for ... %}`` tag. """ # Process the template line. tag_name, args, kwargs = parse_token_kwargs( parser, token, allowed_kwargs=cls.allowed_kwargs, compile_args=False, # Only overrule here, keep at render() phase. compile_kwargs=cls.compile_kwargs ) # remove "for" keyword, so all other args can be resolved in render(). if args[0] == 'for': args.pop(0) # And apply the compilation afterwards for i in range(len(args)): args[i] = parser.compile_filter(args[i]) cls.validate_args(tag_name, *args, **kwargs) return cls(tag_name, *args, **kwargs)
Custom parsing for the ``{% ajax_comment_tags for ... %}`` tag.
def set_timezone(tz=None, deploy=False): ''' Set the timezone of the Palo Alto proxy minion. A commit will be required before this is processed. CLI Example: Args: tz (str): The name of the timezone to set. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_timezone UTC salt '*' panos.set_timezone UTC deploy=True ''' if not tz: raise CommandExecutionError("Timezone name option must not be none.") ret = {} query = {'type': 'config', 'action': 'set', 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/timezone', 'element': '<timezone>{0}</timezone>'.format(tz)} ret.update(__proxy__['panos.call'](query)) if deploy is True: ret.update(commit()) return ret
Set the timezone of the Palo Alto proxy minion. A commit will be required before this is processed. CLI Example: Args: tz (str): The name of the timezone to set. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_timezone UTC salt '*' panos.set_timezone UTC deploy=True
def detect_hooks(): """ Returns True if the import hooks are installed, False if not. """ flog.debug('Detecting hooks ...') present = any([hasattr(hook, 'RENAMER') for hook in sys.meta_path]) if present: flog.debug('Detected.') else: flog.debug('Not detected.') return present
Returns True if the import hooks are installed, False if not.
def add_files(self, files): """ Adds files to this project. :param files: List of files or a Collection object. """ for file in files: file.copy(project=self.id)
Adds files to this project. :param files: List of files or a Collection object.
def __header(self, line): """Build the header (contain the number of CPU). CPU0 CPU1 CPU2 CPU3 0: 21 0 0 0 IO-APIC 2-edge timer """ self.cpu_number = len(line.split()) return self.cpu_number
Build the header (contain the number of CPU). CPU0 CPU1 CPU2 CPU3 0: 21 0 0 0 IO-APIC 2-edge timer
def restore(self, time=None): """ Undeletes the object. Returns True if undeleted, False if it was already not deleted """ if self.deleted: time = time if time else self.deleted_at if time == self.deleted_at: self.deleted = False self.save() return True else: return False return False
Undeletes the object. Returns True if undeleted, False if it was already not deleted
def perturb_vec(q, cone_half_angle=2): r"""Perturb a vector randomly qp = perturb_vec(q, cone_half_angle=2) Parameters ---------- q : (n,) numpy array Vector to perturb cone_half_angle : float Maximum angle to perturb the vector in degrees Returns ------- perturbed : (n,) numpy array Perturbed numpy array Author ------ Shankar Kulumani GWU skulumani@gwu.edu References ---------- .. [1] https://stackoverflow.com/questions/2659257/perturb-vector-by-some-angle """ rand_vec = tan_rand(q) cross_vector = attitude.unit_vector(np.cross(q, rand_vec)) s = np.random.uniform(0, 1, 1) r = np.random.uniform(0, 1, 1) h = np.cos(np.deg2rad(cone_half_angle)) phi = 2 * np.pi * s z = h + ( 1- h) * r sinT = np.sqrt(1 - z**2) x = np.cos(phi) * sinT y = np.sin(phi) * sinT perturbed = rand_vec * x + cross_vector * y + q * z return perturbed
r"""Perturb a vector randomly qp = perturb_vec(q, cone_half_angle=2) Parameters ---------- q : (n,) numpy array Vector to perturb cone_half_angle : float Maximum angle to perturb the vector in degrees Returns ------- perturbed : (n,) numpy array Perturbed numpy array Author ------ Shankar Kulumani GWU skulumani@gwu.edu References ---------- .. [1] https://stackoverflow.com/questions/2659257/perturb-vector-by-some-angle
def tenses(self, verb, parse=True): """ Returns a list of possible tenses for the given inflected verb. """ verb = verb.lower() a = set() b = self.lemma(verb, parse=parse) v = [] if b in self: v = self[b] elif parse is True: # rule-based v = self.find_lexeme(b) # For each tense in the verb lexeme that matches the given tense, # 1) retrieve the tense tuple, # 2) retrieve the tense tuples for which that tense is a default. for i, tense in enumerate(v): if tense == verb: for id, index in self._format.items(): if i == index: a.add(id) for id1, id2 in self._default.items(): if id2 in a: a.add(id1) for id1, id2 in self._default.items(): if id2 in a: a.add(id1) a = (TENSES[id][:-2] for id in a) a = Tenses(sorted(a)) return a
Returns a list of possible tenses for the given inflected verb.
def schedule_forced_svc_check(self, service, check_time): """Schedule a forced check on a service Format of the line that triggers function call:: SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time> :param service: service to check :type service: alignak.object.service.Service :param check_time: time to check :type check_time: int :return: None """ service.schedule(self.daemon.hosts, self.daemon.services, self.daemon.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=True, force_time=check_time) self.send_an_element(service.get_update_status_brok())
Schedule a forced check on a service Format of the line that triggers function call:: SCHEDULE_FORCED_SVC_CHECK;<host_name>;<service_description>;<check_time> :param service: service to check :type service: alignak.object.service.Service :param check_time: time to check :type check_time: int :return: None
def _process_macro_args(self): """Macro args are pretty tricky! Arg names themselves are simple, but you can set arbitrary default values, including doing stuff like: {% macro my_macro(arg="x" + ("}% {# {% endmacro %}" * 2)) %} Which makes you a jerk, but is valid jinja. """ # we are currently after the first parenthesis (+ any whitespace) after # the macro args started. You can either have the close paren, or a # name. while self._parenthesis_stack: match = self._expect_match('macro arguments', MACRO_ARGS_END_PATTERN, MACRO_ARG_PATTERN) self.advance(match.end()) matchgroups = match.groupdict() if matchgroups.get('macro_end') is not None: self._parenthesis_stack.pop() # we got an argument. let's see what it has elif matchgroups.get('value') is not None: # we have to process a single macro argument. This mutates # the parenthesis stack! If it finds a comma, it will continue # the loop. self._process_macro_default_arg() elif matchgroups.get('more_args') is not None: continue else: raise dbt.exceptions.InternalException( 'unhandled regex in _process_macro_args(), no match: {}' .format(matchgroups) )
Macro args are pretty tricky! Arg names themselves are simple, but you can set arbitrary default values, including doing stuff like: {% macro my_macro(arg="x" + ("}% {# {% endmacro %}" * 2)) %} Which makes you a jerk, but is valid jinja.
def prepend_path(orig, text, pathsep=os.pathsep): """Returns a $PATH-like environment variable with `text` prepended. `orig` is the original variable value, or None. `pathsep` is the character separating path elements, defaulting to `os.pathsep`. Example: newpath = cli.prepend_path(oldpath, '/mypackage/bin') See also `prepend_environ_path`. """ if orig is None: orig = '' if not len(orig): return text return ''.join([text, pathsep, orig])
Returns a $PATH-like environment variable with `text` prepended. `orig` is the original variable value, or None. `pathsep` is the character separating path elements, defaulting to `os.pathsep`. Example: newpath = cli.prepend_path(oldpath, '/mypackage/bin') See also `prepend_environ_path`.
def parse_line(self, line): """Parse a line into a dictionary""" match = re.findall(self.date_regex, line) if match: fields = self.fields elif self.backup_format_regex and not match: match = re.findall(self.backup_date_regex, line) fields = self.backup_fields if match: entry = {} entry['raw_text'] = line entry['parser'] = self.name matchlist = list(zip(fields, match[0])) for f, v in matchlist: entry[f] = v if 'date_stamp' in entry.keys(): if self.datestamp_type == 'standard': entry = logdissect.utils.convert_standard_datestamp(entry) elif self.datestamp_type == 'iso': entry = logdissect.utils.convert_iso_datestamp( entry) elif self.datestamp_type == 'webaccess': entry = logdissect.utils.convert_webaccess_datestamp( entry) elif self.datestamp_type == 'nodate': entry, self.datedata = \ logdissect.utils.convert_nodate_datestamp( entry, self.datedata) elif self.datestamp_type == 'unix': entry = logdissect.utils.convert_unix_datestamp( entry) if self.datestamp_type == 'now': entry = logdissect.utils.convert_now_datestamp( entry) entry = self.post_parse_action(entry) return entry else: return None
Parse a line into a dictionary
def get_final_numbers(filename, out_dir): """Copy the final_files file and get the number of markers and samples. :param filename: the name of the file. :param out_dir: the output directory. :type filename: str :type out_dir: str :returns: the final number of markers and samples :rtype: tuple """ # Copying the file shutil.copy(filename, out_dir) # Reading the number of markers and samples nb_samples = None nb_markers = None with open(filename, "r") as i_file: for line in i_file: row = line.rstrip("\r\n").split("\t") if len(row) == 1: continue path, ext = os.path.splitext(row[0]) if ext in {".bim", ".tped", ".map"}: nb_markers = row[1] elif ext in {".fam", ".ped", ".tfam"}: nb_samples = row[1] assert nb_samples assert nb_markers return nb_markers, nb_samples
Copy the final_files file and get the number of markers and samples. :param filename: the name of the file. :param out_dir: the output directory. :type filename: str :type out_dir: str :returns: the final number of markers and samples :rtype: tuple
def mutect_caller(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Run the MuTect paired analysis algorithm. """ config = items[0]["config"] if out_file is None: out_file = "%s-paired-variants.vcf.gz" % os.path.splitext(align_bams[0])[0] if not file_exists(out_file): base_config = items[0]["config"] broad_runner = broad.runner_from_config(base_config, "mutect") out_file_mutect = (out_file.replace(".vcf", "-mutect.vcf") if "vcf" in out_file else out_file + "-mutect.vcf") broad_runner, params = \ _mutect_call_prep(align_bams, items, ref_file, assoc_files, region, out_file_mutect) if (not isinstance(region, (list, tuple)) and not all(has_aligned_reads(x, region) for x in align_bams)): paired = vcfutils.get_paired(items) vcfutils.write_empty_vcf(out_file, samples=[x for x in (paired.tumor_name, paired.normal_name) if x]) return out_file_orig = "%s-orig%s" % utils.splitext_plus(out_file_mutect) if not file_exists(out_file_orig): with file_transaction(config, out_file_orig) as tx_out_file: # Rationale: MuTect writes another table to stdout, which we don't need params += ["--vcf", tx_out_file, "-o", os.devnull] broad_runner.run_mutect(params) is_paired = "-I:normal" in params if not utils.file_uptodate(out_file_mutect, out_file_orig): out_file_mutect = _fix_mutect_output(out_file_orig, config, out_file_mutect, is_paired) indelcaller = vcfutils.get_indelcaller(base_config) if ("scalpel" in indelcaller.lower() and region and isinstance(region, (tuple, list)) and chromhacks.is_autosomal_or_sex(region[0])): # Scalpel InDels out_file_indels = (out_file.replace(".vcf", "-somaticIndels.vcf") if "vcf" in out_file else out_file + "-somaticIndels.vcf") if scalpel.is_installed(items[0]["config"]): if not is_paired: vcfutils.check_paired_problems(items) scalpel._run_scalpel_caller(align_bams, items, ref_file, assoc_files, region=region, out_file=out_file_indels) else: scalpel._run_scalpel_paired(align_bams, items, ref_file, assoc_files, region=region, out_file=out_file_indels) out_file = vcfutils.combine_variant_files(orig_files=[out_file_mutect, out_file_indels], out_file=out_file, ref_file=items[0]["sam_ref"], config=items[0]["config"], region=region) else: utils.symlink_plus(out_file_mutect, out_file) elif "pindel" in indelcaller.lower(): from bcbio.structural import pindel out_file_indels = (out_file.replace(".vcf", "-somaticIndels.vcf") if "vcf" in out_file else out_file + "-somaticIndels.vcf") if pindel.is_installed(items[0]["config"]): pindel._run_tumor_pindel_caller(align_bams, items, ref_file, assoc_files, region=region, out_file=out_file_indels) out_file = vcfutils.combine_variant_files(orig_files=[out_file_mutect, out_file_indels], out_file=out_file, ref_file=ref_file, config=items[0]["config"], region=region) else: utils.symlink_plus(out_file_mutect, out_file) elif (("somaticindeldetector" in indelcaller.lower() or "sid" in indelcaller.lower()) and "appistry" in broad_runner.get_mutect_version()): # SomaticIndelDetector InDels out_file_indels = (out_file.replace(".vcf", "-somaticIndels.vcf") if "vcf" in out_file else out_file + "-somaticIndels.vcf") params_indels = _SID_call_prep(align_bams, items, ref_file, assoc_files, region, out_file_indels) with file_transaction(config, out_file_indels) as tx_out_file: params_indels += ["-o", tx_out_file] broad_runner.run_mutect(params_indels) out_file = vcfutils.combine_variant_files(orig_files=[out_file_mutect, out_file_indels], out_file=out_file, ref_file=items[0]["sam_ref"], config=items[0]["config"], region=region) else: utils.symlink_plus(out_file_mutect, out_file) return out_file
Run the MuTect paired analysis algorithm.
def copy_assets(self, assets_path): """Banana banana """ if not os.path.exists(assets_path): os.mkdir(assets_path) extra_files = self._get_extra_files() for ex_files in Formatter.get_extra_files_signal(self): extra_files.extend(ex_files) for src, dest in extra_files: dest = os.path.join(assets_path, dest) destdir = os.path.dirname(dest) if not os.path.exists(destdir): os.makedirs(destdir) if os.path.isfile(src): shutil.copy(src, dest) elif os.path.isdir(src): recursive_overwrite(src, dest)
Banana banana
def get_library_meta(self): ''' Fetches the meta data for the current library. The data could be in the superlib meta data file. If we can't find the data None is returned. ''' parent_dir = os.path.dirname(self.library_dir) if self.test_file_exists(os.path.join(self.library_dir,'meta'),['libraries.json']): with open(os.path.join(self.library_dir,'meta','libraries.json'),'r') as f: meta_data = json.load(f) if isinstance(meta_data,list): for lib in meta_data: if lib['key'] == self.library_key: return lib elif 'key' in meta_data and meta_data['key'] == self.library_key: return meta_data if not self.test_dir_exists(os.path.join(self.library_dir,'meta')) \ and self.test_file_exists(os.path.join(parent_dir,'meta'),['libraries.json']): with open(os.path.join(parent_dir,'meta','libraries.json'),'r') as f: libraries_json = json.load(f) if isinstance(libraries_json,list): for lib in libraries_json: if lib['key'] == self.library_key: return lib return None
Fetches the meta data for the current library. The data could be in the superlib meta data file. If we can't find the data None is returned.
def stop(self, unique_id, configs=None): """Stop the service. If the deployer has not started a service with`unique_id` the deployer will raise an Exception There are two configs that will be considered: 'terminate_only': if this config is passed in then this method is the same as terminate(unique_id) (this is also the behavior if stop_command is None and not overridden) 'stop_command': overrides the default stop_command :param unique_id: :param configs: :return: """ # the following is necessay to set the configs for this function as the combination of the # default configurations and the parameter with the parameter superceding the defaults but # not modifying the defaults if configs is None: configs = {} tmp = self.default_configs.copy() tmp.update(configs) configs = tmp logger.debug("stopping " + unique_id) if unique_id in self.processes: hostname = self.processes[unique_id].hostname else: logger.error("Can't stop {0}: process not known".format(unique_id)) raise DeploymentError("Can't stop {0}: process not known".format(unique_id)) if configs.get('terminate_only', False): self.terminate(unique_id, configs) else: stop_command = configs.get('stop_command') or self.default_configs.get('stop_command') env = configs.get("env", {}) if stop_command is not None: install_path = self.processes[unique_id].install_path with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: log_output(exec_with_env(ssh, "cd {0}; {1}".format(install_path, stop_command), msg="Failed to stop {0}".format(unique_id), env=env)) else: self.terminate(unique_id, configs) if 'delay' in configs: time.sleep(configs['delay'])
Stop the service. If the deployer has not started a service with`unique_id` the deployer will raise an Exception There are two configs that will be considered: 'terminate_only': if this config is passed in then this method is the same as terminate(unique_id) (this is also the behavior if stop_command is None and not overridden) 'stop_command': overrides the default stop_command :param unique_id: :param configs: :return:
def get_resampled_coordinates(lons, lats): """ Resample polygon line segments and return the coordinates of the new vertices. This limits distortions when projecting a polygon onto a spherical surface. Parameters define longitudes and latitudes of a point collection in the form of lists or numpy arrays. :return: A tuple of two numpy arrays: longitudes and latitudes of resampled vertices. """ num_coords = len(lons) assert num_coords == len(lats) lons1 = numpy.array(lons) lats1 = numpy.array(lats) lons2 = numpy.concatenate((lons1[1:], lons1[:1])) lats2 = numpy.concatenate((lats1[1:], lats1[:1])) distances = geodetic.geodetic_distance(lons1, lats1, lons2, lats2) resampled_lons = [lons[0]] resampled_lats = [lats[0]] for i in range(num_coords): next_point = (i + 1) % num_coords lon1, lat1 = lons[i], lats[i] lon2, lat2 = lons[next_point], lats[next_point] distance = distances[i] num_points = int(distance / UPSAMPLING_STEP_KM) + 1 if num_points >= 2: # We need to increase the resolution of this arc by adding new # points. new_lons, new_lats, _ = geodetic.npoints_between( lon1, lat1, 0, lon2, lat2, 0, num_points) resampled_lons.extend(new_lons[1:]) resampled_lats.extend(new_lats[1:]) else: resampled_lons.append(lon2) resampled_lats.append(lat2) # NB: we cut off the last point because it repeats the first one return numpy.array(resampled_lons[:-1]), numpy.array(resampled_lats[:-1])
Resample polygon line segments and return the coordinates of the new vertices. This limits distortions when projecting a polygon onto a spherical surface. Parameters define longitudes and latitudes of a point collection in the form of lists or numpy arrays. :return: A tuple of two numpy arrays: longitudes and latitudes of resampled vertices.
def _get_var_name(self, register_name, mode): """Get variable name for a register considering pre and post mode. """ var_name = { "pre": self._translator.get_name_init(register_name), "post": self._translator.get_name_curr(register_name), } return var_name[mode]
Get variable name for a register considering pre and post mode.
def delete(gandi, fqdn, name, type, force): """Delete record entry for a domain.""" domains = gandi.dns.list() domains = [domain['fqdn'] for domain in domains] if fqdn not in domains: gandi.echo('Sorry domain %s does not exist' % fqdn) gandi.echo('Please use one of the following: %s' % ', '.join(domains)) return if not force: if not name and not type: prompt = ("Are you sure to delete all records for domain %s ?" % fqdn) elif name and not type: prompt = ("Are you sure to delete all '%s' name records for " "domain %s ?" % (name, fqdn)) else: prompt = ("Are you sure to delete all '%s' records of type %s " "for domain %s ?" % (name, type, fqdn)) proceed = click.confirm(prompt) if not proceed: return result = gandi.dns.del_record(fqdn, name, type) gandi.echo('Delete successful.') return result
Delete record entry for a domain.
def root(self): """ Reset all nodes and branches to 'root'. """ str_treeRoot = '/' self.l_cwd = [str_treeRoot] self.snode_current = self.snode_root self.sbranch_current = self.sbranch_root
Reset all nodes and branches to 'root'.
def fetch( self, limit=None, offset=0, start_cursor=None, end_cursor=None, client=None, eventual=False, ): """Execute the Query; return an iterator for the matching entities. For example:: >>> from google.cloud import datastore >>> client = datastore.Client() >>> query = client.query(kind='Person') >>> query.add_filter('name', '=', 'Sally') >>> list(query.fetch()) [<Entity object>, <Entity object>, ...] >>> list(query.fetch(1)) [<Entity object>] :type limit: int :param limit: (Optional) limit passed through to the iterator. :type offset: int :param offset: (Optional) offset passed through to the iterator. :type start_cursor: bytes :param start_cursor: (Optional) cursor passed through to the iterator. :type end_cursor: bytes :param end_cursor: (Optional) cursor passed through to the iterator. :type client: :class:`google.cloud.datastore.client.Client` :param client: (Optional) client used to connect to datastore. If not supplied, uses the query's value. :type eventual: bool :param eventual: (Optional) Defaults to strongly consistent (False). Setting True will use eventual consistency, but cannot be used inside a transaction or will raise ValueError. :rtype: :class:`Iterator` :returns: The iterator for the query. """ if client is None: client = self._client return Iterator( self, client, limit=limit, offset=offset, start_cursor=start_cursor, end_cursor=end_cursor, eventual=eventual, )
Execute the Query; return an iterator for the matching entities. For example:: >>> from google.cloud import datastore >>> client = datastore.Client() >>> query = client.query(kind='Person') >>> query.add_filter('name', '=', 'Sally') >>> list(query.fetch()) [<Entity object>, <Entity object>, ...] >>> list(query.fetch(1)) [<Entity object>] :type limit: int :param limit: (Optional) limit passed through to the iterator. :type offset: int :param offset: (Optional) offset passed through to the iterator. :type start_cursor: bytes :param start_cursor: (Optional) cursor passed through to the iterator. :type end_cursor: bytes :param end_cursor: (Optional) cursor passed through to the iterator. :type client: :class:`google.cloud.datastore.client.Client` :param client: (Optional) client used to connect to datastore. If not supplied, uses the query's value. :type eventual: bool :param eventual: (Optional) Defaults to strongly consistent (False). Setting True will use eventual consistency, but cannot be used inside a transaction or will raise ValueError. :rtype: :class:`Iterator` :returns: The iterator for the query.
def datetimes_to_durations( start_times, end_times, fill_date=datetime.today(), freq="D", dayfirst=False, na_values=None ): """ This is a very flexible function for transforming arrays of start_times and end_times to the proper format for lifelines: duration and event observation arrays. Parameters ---------- start_times: an array, Series or DataFrame iterable representing start times. These can be strings, or datetime objects. end_times: an array, Series or DataFrame iterable representing end times. These can be strings, or datetimes. These values can be None, or an empty string, which corresponds to censorship. fill_date: datetime, optional (default=datetime.Today()) the date to use if end_times is a None or empty string. This corresponds to last date of observation. Anything after this date is also censored. freq: string, optional (default='D') the units of time to use. See Pandas 'freq'. Default 'D' for days. dayfirst: boolean, optional (default=False) convert assuming European-style dates, i.e. day/month/year. na_values : list, optional list of values to recognize as NA/NaN. Ex: ['', 'NaT'] Returns ------- T: numpy array array of floats representing the durations with time units given by freq. C: numpy array boolean array of event observations: 1 if death observed, 0 else. Examples -------- >>> from lifelines.utils import datetimes_to_durations >>> >>> start_dates = ['2015-01-01', '2015-04-01', '2014-04-05'] >>> end_dates = ['2016-02-02', None, '2014-05-06'] >>> >>> T, E = datetimes_to_durations(start_dates, end_dates, freq="D") >>> T # array([ 397., 1414., 31.]) >>> E # array([ True, False, True]) """ fill_date = pd.to_datetime(fill_date) freq_string = "timedelta64[%s]" % freq start_times = pd.Series(start_times).copy() end_times = pd.Series(end_times).copy() C = ~(pd.isnull(end_times).values | end_times.isin(na_values or [""])) end_times[~C] = fill_date start_times_ = pd.to_datetime(start_times, dayfirst=dayfirst) end_times_ = pd.to_datetime(end_times, dayfirst=dayfirst, errors="coerce") deaths_after_cutoff = end_times_ > fill_date C[deaths_after_cutoff] = False T = (end_times_ - start_times_).values.astype(freq_string).astype(float) if (T < 0).sum(): warnings.warn("Warning: some values of start_times are after end_times") return T, C.values
This is a very flexible function for transforming arrays of start_times and end_times to the proper format for lifelines: duration and event observation arrays. Parameters ---------- start_times: an array, Series or DataFrame iterable representing start times. These can be strings, or datetime objects. end_times: an array, Series or DataFrame iterable representing end times. These can be strings, or datetimes. These values can be None, or an empty string, which corresponds to censorship. fill_date: datetime, optional (default=datetime.Today()) the date to use if end_times is a None or empty string. This corresponds to last date of observation. Anything after this date is also censored. freq: string, optional (default='D') the units of time to use. See Pandas 'freq'. Default 'D' for days. dayfirst: boolean, optional (default=False) convert assuming European-style dates, i.e. day/month/year. na_values : list, optional list of values to recognize as NA/NaN. Ex: ['', 'NaT'] Returns ------- T: numpy array array of floats representing the durations with time units given by freq. C: numpy array boolean array of event observations: 1 if death observed, 0 else. Examples -------- >>> from lifelines.utils import datetimes_to_durations >>> >>> start_dates = ['2015-01-01', '2015-04-01', '2014-04-05'] >>> end_dates = ['2016-02-02', None, '2014-05-06'] >>> >>> T, E = datetimes_to_durations(start_dates, end_dates, freq="D") >>> T # array([ 397., 1414., 31.]) >>> E # array([ True, False, True])
def _get_jacobian_hessian_strategy(self): """ Figure out how to calculate the jacobian and hessian. Will return a tuple describing how best to calculate the jacobian and hessian, repectively. If None, it should be calculated using the available analytical method. :return: tuple of jacobian_method, hessian_method """ if self.jacobian is not None and self.hessian is None: jacobian = None hessian = 'cs' elif self.jacobian is None and self.hessian is None: jacobian = 'cs' hessian = soBFGS(exception_strategy='damp_update') else: jacobian = None hessian = None return jacobian, hessian
Figure out how to calculate the jacobian and hessian. Will return a tuple describing how best to calculate the jacobian and hessian, repectively. If None, it should be calculated using the available analytical method. :return: tuple of jacobian_method, hessian_method
def create(self, name, **kwargs): ''' Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated. ''' new_backend = kwargs.pop("new_backend", False) resource = _format_old_api_request(content_type="json") if new_backend: resource += "?nbe=true" payload = {"name": name} if "row_identifier" in kwargs: payload["metadata"] = { "rowIdentifier": kwargs.pop("row_identifier", None) } payload.update(kwargs) payload = _clear_empty_values(payload) return self._perform_update("post", resource, payload)
Create a dataset, including the field types. Optionally, specify args such as: description : description of the dataset columns : list of columns (see docs/tests for list structure) category : must exist in /admin/metadata tags : list of tag strings row_identifier : field name of primary key new_backend : whether to create the dataset in the new backend WARNING: This api endpoint might be deprecated.
def plot_elbow_curve(clf, X, title='Elbow Plot', cluster_ranges=None, ax=None, figsize=None, title_fontsize="large", text_fontsize="medium"): """Plots elbow curve of different values of K for KMeans clustering. Args: clf: Clusterer instance that implements ``fit`` and ``fit_predict`` methods and a ``score`` parameter. X (array-like, shape (n_samples, n_features)): Data to cluster, where n_samples is the number of samples and n_features is the number of features. title (string, optional): Title of the generated plot. Defaults to "Elbow Plot" cluster_ranges (None or :obj:`list` of int, optional): List of n_clusters for which to plot the explained variances. Defaults to ``range(1, 12, 2)``. copy (boolean, optional): Determines whether ``fit`` is used on **clf** or on a copy of **clf**. ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot.plotters as skplt >>> kmeans = KMeans(random_state=1) >>> skplt.plot_elbow_curve(kmeans, cluster_ranges=range(1, 11)) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_elbow_curve.png :align: center :alt: Elbow Curve """ if cluster_ranges is None: cluster_ranges = range(1, 12, 2) else: cluster_ranges = sorted(cluster_ranges) if not hasattr(clf, 'n_clusters'): raise TypeError('"n_clusters" attribute not in classifier. ' 'Cannot plot elbow method.') clfs = [] for i in cluster_ranges: current_clf = clone(clf) setattr(current_clf, "n_clusters", i) clfs.append(current_clf.fit(X).score(X)) if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) ax.set_title(title, fontsize=title_fontsize) ax.plot(cluster_ranges, np.absolute(clfs), 'b*-') ax.grid(True) ax.set_xlabel('Number of clusters', fontsize=text_fontsize) ax.set_ylabel('Sum of Squared Errors', fontsize=text_fontsize) ax.tick_params(labelsize=text_fontsize) return ax
Plots elbow curve of different values of K for KMeans clustering. Args: clf: Clusterer instance that implements ``fit`` and ``fit_predict`` methods and a ``score`` parameter. X (array-like, shape (n_samples, n_features)): Data to cluster, where n_samples is the number of samples and n_features is the number of features. title (string, optional): Title of the generated plot. Defaults to "Elbow Plot" cluster_ranges (None or :obj:`list` of int, optional): List of n_clusters for which to plot the explained variances. Defaults to ``range(1, 12, 2)``. copy (boolean, optional): Determines whether ``fit`` is used on **clf** or on a copy of **clf**. ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot.plotters as skplt >>> kmeans = KMeans(random_state=1) >>> skplt.plot_elbow_curve(kmeans, cluster_ranges=range(1, 11)) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_elbow_curve.png :align: center :alt: Elbow Curve
def check_version(dstore): """ :param dstore: a DataStore instance :returns: a message if the stored version is different from the current version """ ds_version = dstore.hdf5.attrs['engine_version'] if ds_version != __version__: return (': the datastore is at version %s, but the exporter at ' 'version %s' % (ds_version, __version__)) else: return ''
:param dstore: a DataStore instance :returns: a message if the stored version is different from the current version
def counter(self): """ Rolling counter that ensures same-machine and same-time cuids don't collide. """ self._counter += 1 if self._counter >= DISCRETE_VALUES: self._counter = 0 return self._counter
Rolling counter that ensures same-machine and same-time cuids don't collide.
def parseInline(self, src): """Parses CSS inline source string using the current cssBuilder. Use to parse a tag's 'sytle'-like attribute.""" self.cssBuilder.beginInline() try: try: src, properties = self._parseDeclarationGroup(src.strip(), braces=False) except self.ParseError as err: err.setFullCSSSource(src, inline=True) raise result = self.cssBuilder.inline(properties) finally: self.cssBuilder.endInline() return result
Parses CSS inline source string using the current cssBuilder. Use to parse a tag's 'sytle'-like attribute.
def create_table(cls): """ create_table Manually create a temporary table for model in test data base. :return: """ schema_editor = getattr(connection, 'schema_editor', None) if schema_editor: with schema_editor() as schema_editor: schema_editor.create_model(cls) else: raw_sql, _ = connection.creation.sql_create_model( cls, no_style(), []) cls.delete_table() cursor = connection.cursor() try: cursor.execute(*raw_sql) finally: cursor.close()
create_table Manually create a temporary table for model in test data base. :return:
def base64(self, charset=None): '''Data encoded as base 64''' return b64encode(self.bytes()).decode(charset or self.charset)
Data encoded as base 64
def get_index_config(self, project_name, logstore_name): """ get index config detail of a logstore Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :return: GetIndexResponse :raise: LogException """ headers = {} params = {} resource = "/logstores/" + logstore_name + "/index" (resp, header) = self._send("GET", project_name, None, resource, params, headers) return GetIndexResponse(resp, header)
get index config detail of a logstore Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :return: GetIndexResponse :raise: LogException
def _copy(self): """Copies this instance. Its IonEvent (if any) is not preserved. Keeping this protected until/unless we decide there's use for it publicly. """ args, kwargs = self._to_constructor_args(self) value = self.__class__(*args, **kwargs) value.ion_event = None value.ion_type = self.ion_type value.ion_annotations = self.ion_annotations return value
Copies this instance. Its IonEvent (if any) is not preserved. Keeping this protected until/unless we decide there's use for it publicly.
def insert_data(self, node, data, start, end): """loops through all the data and inserts them into the empty tree""" for item in data: self.recursive_insert(node, [item[0], item[1]], item[-1], start, end)
loops through all the data and inserts them into the empty tree
def blackbox_network(): """A micro-network to demonstrate blackboxing. Diagram:: +----------+ +-------------------->+ A (COPY) + <---------------+ | +----------+ | | +----------+ | | +-----------+ B (COPY) + <-------------+ | v v +----------+ | | +-+-----+-+ +-+-----+-+ | | | | | C (AND) | | F (AND) | | | | | +-+-----+-+ +-+-----+-+ | | ^ ^ | | +----------+ | | | +---------> + D (COPY) +---------------+ | | +----------+ | | +----------+ | +-------------------> + E (COPY) +-----------------+ +----------+ Connectivity Matrix: +---+---+---+---+---+---+---+ | . | A | B | C | D | E | F | +---+---+---+---+---+---+---+ | A | 0 | 0 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ | B | 0 | 0 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ | C | 0 | 0 | 0 | 1 | 1 | 0 | +---+---+---+---+---+---+---+ | D | 0 | 0 | 0 | 0 | 0 | 1 | +---+---+---+---+---+---+---+ | E | 0 | 0 | 0 | 0 | 0 | 1 | +---+---+---+---+---+---+---+ | F | 1 | 1 | 0 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ In the documentation example, the state is (0, 0, 0, 0, 0, 0). """ num_nodes = 6 num_states = 2 ** num_nodes tpm = np.zeros((num_states, num_nodes)) for index, previous_state in enumerate(all_states(num_nodes)): current_state = [0 for i in range(num_nodes)] if previous_state[5] == 1: current_state[0] = 1 current_state[1] = 1 if previous_state[0] == 1 and previous_state[1]: current_state[2] = 1 if previous_state[2] == 1: current_state[3] = 1 current_state[4] = 1 if previous_state[3] == 1 and previous_state[4] == 1: current_state[5] = 1 tpm[index, :] = current_state cm = np.array([ [0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 1], [1, 1, 0, 0, 0, 0] ]) return Network(tpm, cm, node_labels=LABELS[:tpm.shape[1]])
A micro-network to demonstrate blackboxing. Diagram:: +----------+ +-------------------->+ A (COPY) + <---------------+ | +----------+ | | +----------+ | | +-----------+ B (COPY) + <-------------+ | v v +----------+ | | +-+-----+-+ +-+-----+-+ | | | | | C (AND) | | F (AND) | | | | | +-+-----+-+ +-+-----+-+ | | ^ ^ | | +----------+ | | | +---------> + D (COPY) +---------------+ | | +----------+ | | +----------+ | +-------------------> + E (COPY) +-----------------+ +----------+ Connectivity Matrix: +---+---+---+---+---+---+---+ | . | A | B | C | D | E | F | +---+---+---+---+---+---+---+ | A | 0 | 0 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ | B | 0 | 0 | 1 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ | C | 0 | 0 | 0 | 1 | 1 | 0 | +---+---+---+---+---+---+---+ | D | 0 | 0 | 0 | 0 | 0 | 1 | +---+---+---+---+---+---+---+ | E | 0 | 0 | 0 | 0 | 0 | 1 | +---+---+---+---+---+---+---+ | F | 1 | 1 | 0 | 0 | 0 | 0 | +---+---+---+---+---+---+---+ In the documentation example, the state is (0, 0, 0, 0, 0, 0).
def set_cookie( # type: ignore self, key: str, value: AnyStr='', max_age: Optional[Union[int, timedelta]]=None, expires: Optional[datetime]=None, path: str='/', domain: Optional[str]=None, secure: bool=False, httponly: bool=False, ) -> None: """Set a cookie in the response headers. The arguments are the standard cookie morsels and this is a wrapper around the stdlib SimpleCookie code. """ if isinstance(value, bytes): value = value.decode() # type: ignore cookie = create_cookie(key, value, max_age, expires, path, domain, secure, httponly) # type: ignore # noqa: E501 self.headers.add('Set-Cookie', cookie.output(header=''))
Set a cookie in the response headers. The arguments are the standard cookie morsels and this is a wrapper around the stdlib SimpleCookie code.
def evaluate_world_model( agent_type, loop_hparams, planner_hparams, model_dir, policy_dir, random_starts_step_limit, debug_video_path, log_every_steps ): """Evaluates the world model.""" if debug_video_path: debug_video_path = os.path.join(debug_video_path, "0.avi") storage_env = rl_utils.setup_env(loop_hparams, batch_size=1, max_num_noops=0) stacked_env = rl_utils.BatchStackWrapper( storage_env, loop_hparams.frame_stack_size ) policy_hparams = trainer_lib.create_hparams(loop_hparams.base_algo_params) agent = make_agent_from_hparams( agent_type, storage_env, stacked_env, loop_hparams, policy_hparams, planner_hparams, model_dir, policy_dir, # TODO(koz4k): Loop over eval_sampling_temps? sampling_temp=loop_hparams.eval_sampling_temps[0], ) collect_frames_for_random_starts( storage_env, stacked_env, agent, loop_hparams.frame_stack_size, random_starts_step_limit, log_every_steps ) return rl_utils.evaluate_world_model( storage_env, loop_hparams, model_dir, debug_video_path, split=None )
Evaluates the world model.
def keyPressEvent(self, event): """Reimplement Qt method to allow cyclic behavior.""" if event.key() == Qt.Key_Down: self.select_row(1) elif event.key() == Qt.Key_Up: self.select_row(-1)
Reimplement Qt method to allow cyclic behavior.
def select(self, return_models=False, nest=False, bypass_safe_limit=False, sql=None, sql_args=None): """ Executes the SELECT statement and returns the rows as a list of dictionaries or a list of model instances :type return_models: bool :param return_models: Set to True to return a list of models instead of a list of dictionaries. Defaults to False :type nest: bool :param nest: Set to True to treat all double underscores in keynames as nested data. This will convert all keys with double underscores to dictionaries keyed off of the left side of the underscores. Ex: {"id": 1", "account__id": 1, "account__name": "Name"} becomes {"id": 1, "account": {"id": 1, "name": "Name"}} :type bypass_safe_limit: bool :param bypass_safe_limit: Ignores the safe_limit option even if the safe_limit is enabled :type sql: str or None :param sql: The sql to execute in the SELECT statement. If one is not specified, then the query will use ``self.get_sql()`` :type sql_args: str or None :param sql_args: The sql args to be used in the SELECT statement. If none are specified, then the query wil use ``self.get_args()`` :rtype: list of dict :return: list of dictionaries of the rows """ # Check if we need to set a safe limit if bypass_safe_limit is False: if Query.enable_safe_limit: if self.count() > Query.safe_limit: self.limit(Query.safe_limit) # determine which sql to use if sql is None: sql = self.get_sql() # determine which sql args to use if sql_args is None: sql_args = self.get_args() # get the cursor to execute the query cursor = self.get_cursor() # execute the query cursor.execute(sql, sql_args) # get the results as a list of dictionaries rows = self._fetch_all_as_dict(cursor) # check if models should be returned instead of dictionaries if return_models: # set nesting to true, so the nested models can easily load the data nest = True # build model map of map name to model model_map = {} for join_item in self.joins: model_map[join_item.right_table.field_prefix] = join_item.right_table.model # check if results should be nested if nest: # convert keys with double underscores to dictionaries for row in rows: _row = row.copy() for key, value in _row.items(): set_value_for_keypath(row, key, value, True, '__') if '__' in key: row.pop(key) # create models if needed if return_models: model_class = self.tables[0].model new_rows = [] for row in rows: model = model_class() # assign all non-model keys first because django 1.5 requires # that the model has an id set before setting a property that is # a foreign key for key, value in row.items(): if key not in model_map: setattr(model, key, value) # assign all model instances for key, value in row.items(): if key in model_map: child_model = model_map[key]() for child_key, child_value in value.items(): setattr(child_model, child_key, child_value) value = child_model setattr(model, key, value) new_rows.append(model) rows = new_rows return rows
Executes the SELECT statement and returns the rows as a list of dictionaries or a list of model instances :type return_models: bool :param return_models: Set to True to return a list of models instead of a list of dictionaries. Defaults to False :type nest: bool :param nest: Set to True to treat all double underscores in keynames as nested data. This will convert all keys with double underscores to dictionaries keyed off of the left side of the underscores. Ex: {"id": 1", "account__id": 1, "account__name": "Name"} becomes {"id": 1, "account": {"id": 1, "name": "Name"}} :type bypass_safe_limit: bool :param bypass_safe_limit: Ignores the safe_limit option even if the safe_limit is enabled :type sql: str or None :param sql: The sql to execute in the SELECT statement. If one is not specified, then the query will use ``self.get_sql()`` :type sql_args: str or None :param sql_args: The sql args to be used in the SELECT statement. If none are specified, then the query wil use ``self.get_args()`` :rtype: list of dict :return: list of dictionaries of the rows
def run(generate_pks, show_pks, host, port, uri): """Connect sandman to <URI> and start the API server/admin interface.""" app.config['SQLALCHEMY_DATABASE_URI'] = uri app.config['SANDMAN_GENERATE_PKS'] = generate_pks app.config['SANDMAN_SHOW_PKS'] = show_pks app.config['SERVER_HOST'] = host app.config['SERVER_PORT'] = port activate(name='sandmanctl') app.run(host=host, port=int(port), debug=True)
Connect sandman to <URI> and start the API server/admin interface.
def minlen(min_length, strict=False # type: bool ): """ 'Minimum length' validation_function generator. Returns a validation_function to check that len(x) >= min_length (strict=False, default) or len(x) > min_length (strict=True) :param min_length: minimum length for x :param strict: Boolean flag to switch between len(x) >= min_length (strict=False) and len(x) > min_length (strict=True) :return: """ if strict: def minlen_(x): if len(x) > min_length: return True else: # raise Failure('minlen: len(x) > ' + str(min_length) + ' does not hold for x=' + str(x)) raise TooShort(wrong_value=x, min_length=min_length, strict=True) else: def minlen_(x): if len(x) >= min_length: return True else: # raise Failure('minlen: len(x) >= ' + str(min_length) + ' does not hold for x=' + str(x)) raise TooShort(wrong_value=x, min_length=min_length, strict=False) minlen_.__name__ = 'length_{}greater_than_{}'.format('strictly_' if strict else '', min_length) return minlen_
'Minimum length' validation_function generator. Returns a validation_function to check that len(x) >= min_length (strict=False, default) or len(x) > min_length (strict=True) :param min_length: minimum length for x :param strict: Boolean flag to switch between len(x) >= min_length (strict=False) and len(x) > min_length (strict=True) :return:
def electric_field_amplitude_top(P, a, Omega=1e6, units="ad-hoc"): """Return the amplitude of the electric field for a top hat beam. This is the amplitude of a laser beam of power P (in Watts) and a top-hat\ intensity distribution of radius a (in meters). The value of E0 is given in\ rescaled units according to the frequency scale Omega (in Hertz)\ understood as absolute frequency (as opposed to angular frequency). >>> print(electric_field_amplitude_top(0.001, 0.001)) 27.8404157371 """ e0 = hbar*Omega/(e*a0) # This is the electric field scale. E0 = sqrt((c*mu0*P)/(Pi*a**2)) if units == "ad-hoc": E0 = E0/e0 return E0
Return the amplitude of the electric field for a top hat beam. This is the amplitude of a laser beam of power P (in Watts) and a top-hat\ intensity distribution of radius a (in meters). The value of E0 is given in\ rescaled units according to the frequency scale Omega (in Hertz)\ understood as absolute frequency (as opposed to angular frequency). >>> print(electric_field_amplitude_top(0.001, 0.001)) 27.8404157371
def create_from_taskfile(self, taskfile): """Create a new TaskFileInfo and return it for the given taskfile :param taskfile: the taskfile to represent :type taskfile: :class:`jukeboxcore.djadapter.models.TaskFile` :returns: a taskfileinfo :rtype: :class:`TaskFileInfo` :raises: None """ return TaskFileInfo(task=taskfile.task, version=taskfile.version, releasetype=taskfile.releasetype, descriptor=taskfile.descriptor, typ=taskfile.typ)
Create a new TaskFileInfo and return it for the given taskfile :param taskfile: the taskfile to represent :type taskfile: :class:`jukeboxcore.djadapter.models.TaskFile` :returns: a taskfileinfo :rtype: :class:`TaskFileInfo` :raises: None
def editor_multi_agent_example(): """This editor example shows how to interact with holodeck worlds that have multiple agents. This is specifically for when working with UE4 directly and not a prebuilt binary. """ agent_definitions = [ AgentDefinition("uav0", agents.UavAgent, [Sensors.PIXEL_CAMERA, Sensors.LOCATION_SENSOR]), AgentDefinition("uav1", agents.UavAgent, [Sensors.LOCATION_SENSOR, Sensors.VELOCITY_SENSOR]) ] env = HolodeckEnvironment(agent_definitions, start_world=False) cmd0 = np.array([0, 0, -2, 10]) cmd1 = np.array([0, 0, 5, 10]) for i in range(10): env.reset() env.act("uav0", cmd0) env.act("uav1", cmd1) for _ in range(1000): states = env.tick() uav0_terminal = states["uav0"][Sensors.TERMINAL] uav1_reward = states["uav1"][Sensors.REWARD]
This editor example shows how to interact with holodeck worlds that have multiple agents. This is specifically for when working with UE4 directly and not a prebuilt binary.
def execute_route(self, meta_data, request_pdu): """ Execute configured route based on requests meta data and request PDU. :param meta_data: A dict with meta data. It must at least contain key 'unit_id'. :param request_pdu: A bytearray containing request PDU. :return: A bytearry containing reponse PDU. """ try: function = create_function_from_request_pdu(request_pdu) results =\ function.execute(meta_data['unit_id'], self.route_map) try: # ReadFunction's use results of callbacks to build response # PDU... return function.create_response_pdu(results) except TypeError: # ...other functions don't. return function.create_response_pdu() except ModbusError as e: function_code = get_function_code_from_request_pdu(request_pdu) return pack_exception_pdu(function_code, e.error_code) except Exception as e: log.exception('Could not handle request: {0}.'.format(e)) function_code = get_function_code_from_request_pdu(request_pdu) return pack_exception_pdu(function_code, ServerDeviceFailureError.error_code)
Execute configured route based on requests meta data and request PDU. :param meta_data: A dict with meta data. It must at least contain key 'unit_id'. :param request_pdu: A bytearray containing request PDU. :return: A bytearry containing reponse PDU.
def post(method, hmc, uri, uri_parms, body, logon_required, wait_for_completion): """Operation: Stop CPC (requires DPM mode).""" assert wait_for_completion is True # async not supported yet cpc_oid = uri_parms[0] try: cpc = hmc.cpcs.lookup_by_oid(cpc_oid) except KeyError: raise InvalidResourceError(method, uri) if not cpc.dpm_enabled: raise CpcNotInDpmError(method, uri, cpc) cpc.properties['status'] = 'not-operating'
Operation: Stop CPC (requires DPM mode).
def check_no_element_by_selector(self, selector): """Assert an element does not exist matching the given selector.""" elems = find_elements_by_jquery(world.browser, selector) if elems: raise AssertionError("Expected no matching elements, found {}.".format( len(elems)))
Assert an element does not exist matching the given selector.
def best_assemblyfile(self): """ Determine whether the contigs.fasta output file from the assembler is present. If not, set the .bestassembly attribute to 'NA' """ for sample in self.metadata: try: # Set the name of the filtered assembly file filtered_outputfile = os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) # Set the name of the unfiltered spades assembly output file if os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile) # Ensure that the assembly isn't just an empty file if size == 0: sample.general.bestassemblyfile = 'NA' else: sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile = 'NA' # Add the name and path of the filtered file to the metadata sample.general.filteredfile = filtered_outputfile except AttributeError: sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile = 'NA'
Determine whether the contigs.fasta output file from the assembler is present. If not, set the .bestassembly attribute to 'NA'
def _other_to_dict(self, other): """When serializing models, this allows attached models (children, parents, etc.) to also be serialized. """ if isinstance(other, ModelBase): return other.to_dict() elif isinstance(other, list): # TODO: what if it's not a list? return [self._other_to_dict(i) for i in other] else: return other
When serializing models, this allows attached models (children, parents, etc.) to also be serialized.
def _apply_list(self, methods): """Return a single callable that applies a list of methods to a value. If a method returns None, the last value is kept; if it returns some other value, that replaces the last value. Exceptions are not caught. """ def call(value): for method in methods: newvalue = method(self, value) if newvalue is not None: value = newvalue return value return call
Return a single callable that applies a list of methods to a value. If a method returns None, the last value is kept; if it returns some other value, that replaces the last value. Exceptions are not caught.
def get_symbol_map(self): """ If you need the symbol map, use this method. The symbol map is an array of string pairs mapping common tokens to X Keysym strings, such as "alt" to "Alt_L" :return: array of strings. """ # todo: make sure we return a list of strings! sm = _libxdo.xdo_get_symbol_map() # Return value is like: # ['alt', 'Alt_L', ..., None, None, None, ...] # We want to return only values up to the first None. # todo: any better solution than this? i = 0 ret = [] while True: c = sm[i] if c is None: return ret ret.append(c) i += 1
If you need the symbol map, use this method. The symbol map is an array of string pairs mapping common tokens to X Keysym strings, such as "alt" to "Alt_L" :return: array of strings.
def update_security_group(self, security_group, body=None): """Updates a security group.""" return self.put(self.security_group_path % security_group, body=body)
Updates a security group.
def execute(st, **kwargs): """ Work around for Python3 exec function which doesn't allow changes to the local namespace because of scope. This breaks a lot of the old functionality in the code which was origionally in Python2. So this function runs just like exec except that it returns the output of the input statement to the local namespace. It may break if you start feeding it multiline monoliths of statements (haven't tested) but you shouldn't do that anyway (bad programming). Parameters ----------- st : the statement you want executed and for which you want the return kwargs : anything that may need to be in this namespace to execute st Returns ------- The return value of executing the input statement """ namespace = kwargs exec("b = {}".format(st), namespace) return namespace['b']
Work around for Python3 exec function which doesn't allow changes to the local namespace because of scope. This breaks a lot of the old functionality in the code which was origionally in Python2. So this function runs just like exec except that it returns the output of the input statement to the local namespace. It may break if you start feeding it multiline monoliths of statements (haven't tested) but you shouldn't do that anyway (bad programming). Parameters ----------- st : the statement you want executed and for which you want the return kwargs : anything that may need to be in this namespace to execute st Returns ------- The return value of executing the input statement
def main(args=None, **kwargs): """Run the Tango SDP Master device server.""" LOG.info('Starting %s', __service_id__) return run([SDPMasterDevice], verbose=True, msg_stream=sys.stdout, args=args, **kwargs)
Run the Tango SDP Master device server.
def global_state_code(self): """ Returns global variables for generating function from ``func_code`` as code. Includes compiled regular expressions and imports. """ self._generate_func_code() if not self._compile_regexps: return '\n'.join( [ 'from fastjsonschema import JsonSchemaException', '', '', ] ) regexs = ['"{}": re.compile(r"{}")'.format(key, value.pattern) for key, value in self._compile_regexps.items()] return '\n'.join( [ 'import re', 'from fastjsonschema import JsonSchemaException', '', '', 'REGEX_PATTERNS = {', ' ' + ',\n '.join(regexs), '}', '', ] )
Returns global variables for generating function from ``func_code`` as code. Includes compiled regular expressions and imports.
def handle_exception(klass, exc, tb, coro=None): """run all the registered exception handlers the first 3 arguments to this function match the output of ``sys.exc_info()`` :param klass: the exception klass :type klass: type :param exc: the exception instance :type exc: Exception :param tb: the traceback object :type tb: Traceback :param coro: behave as though the exception occurred in this coroutine (defaults to the current coroutine) :type coro: greenlet exception handlers run would be all those added with :func:`global_exception_handler`, and any added for the relevant coroutine with :func:`local_exception_handler`. """ if coro is None: coro = compat.getcurrent() replacement = [] for weak in state.local_exception_handlers.get(coro, ()): func = weak() if func is None: continue try: func(klass, exc, tb) except Exception: continue replacement.append(weak) if replacement: state.local_exception_handlers[coro][:] = replacement replacement = [] for weak in state.global_exception_handlers: func = weak() if func is None: continue try: func(klass, exc, tb) except Exception: continue replacement.append(weak) state.global_exception_handlers[:] = replacement
run all the registered exception handlers the first 3 arguments to this function match the output of ``sys.exc_info()`` :param klass: the exception klass :type klass: type :param exc: the exception instance :type exc: Exception :param tb: the traceback object :type tb: Traceback :param coro: behave as though the exception occurred in this coroutine (defaults to the current coroutine) :type coro: greenlet exception handlers run would be all those added with :func:`global_exception_handler`, and any added for the relevant coroutine with :func:`local_exception_handler`.
def versioning_model_classname(manager, model): """Get the name of the versioned model class.""" if manager.options.get('use_module_name', True): return '%s%sVersion' % ( model.__module__.title().replace('.', ''), model.__name__) else: return '%sVersion' % (model.__name__,)
Get the name of the versioned model class.
def generated_target_ps(is_suffix, type, prop_set): """ Returns suffix that should be used when generating target of 'type', with the specified properties. If not suffix were specified for 'type', returns suffix for base type, if any. """ if __debug__: from .property_set import PropertySet assert isinstance(is_suffix, (int, bool)) assert isinstance(type, basestring) assert isinstance(prop_set, PropertySet) key = (is_suffix, type, prop_set) v = __target_suffixes_cache.get(key, None) if not v: v = generated_target_ps_real(is_suffix, type, prop_set.raw()) __target_suffixes_cache [key] = v return v
Returns suffix that should be used when generating target of 'type', with the specified properties. If not suffix were specified for 'type', returns suffix for base type, if any.
def _q_to_dcm(self, q): """ Create DCM (Matrix3) from q :param q: array q which represents a quaternion [w, x, y, z] :returns: Matrix3 """ assert(len(q) == 4) arr = super(Quaternion, self)._q_to_dcm(q) return self._dcm_array_to_matrix3(arr)
Create DCM (Matrix3) from q :param q: array q which represents a quaternion [w, x, y, z] :returns: Matrix3
def read_actions(): """Yields actions for pressed keys.""" while True: key = get_key() # Handle arrows, j/k (qwerty), and n/e (colemak) if key in (const.KEY_UP, const.KEY_CTRL_N, 'k', 'e'): yield const.ACTION_PREVIOUS elif key in (const.KEY_DOWN, const.KEY_CTRL_P, 'j', 'n'): yield const.ACTION_NEXT elif key in (const.KEY_CTRL_C, 'q'): yield const.ACTION_ABORT elif key in ('\n', '\r'): yield const.ACTION_SELECT
Yields actions for pressed keys.
def stats_on_depth(d, depth): """Display the node stats info on specific depth in this dict """ root_nodes, leaf_nodes = 0, 0 for _, node in DictTree.kv_depth(d, depth): if DictTree.length(node) == 0: leaf_nodes += 1 else: root_nodes += 1 total = root_nodes + leaf_nodes print("On depth %s, having %s root nodes, %s leaf nodes. " "%s nodes in total." % (depth, root_nodes, leaf_nodes, total))
Display the node stats info on specific depth in this dict
def _get_log_entries(self) -> List[Tuple[int, bytes, List[int], bytes]]: """ Return the log entries for this computation and its children. They are sorted in the same order they were emitted during the transaction processing, and include the sequential counter as the first element of the tuple representing every entry. """ if self.is_error: return [] else: return sorted(itertools.chain( self._log_entries, *(child._get_log_entries() for child in self.children) ))
Return the log entries for this computation and its children. They are sorted in the same order they were emitted during the transaction processing, and include the sequential counter as the first element of the tuple representing every entry.
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ apacheInfo = ApacheInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) return apacheInfo is not None
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
def __sort_up(self): """Sort the updatable objects according to ascending order""" if self.__do_need_sort_up: self.__up_objects.sort(key=cmp_to_key(self.__up_cmp)) self.__do_need_sort_up = False
Sort the updatable objects according to ascending order
def get_transaction_by_index( self, block_number: BlockNumber, transaction_index: int, transaction_class: Type['BaseTransaction']) -> 'BaseTransaction': """ Returns the transaction at the specified `transaction_index` from the block specified by `block_number` from the canonical chain. Raises TransactionNotFound if no block """ try: block_header = self.get_canonical_block_header_by_number(block_number) except HeaderNotFound: raise TransactionNotFound("Block {} is not in the canonical chain".format(block_number)) transaction_db = HexaryTrie(self.db, root_hash=block_header.transaction_root) encoded_index = rlp.encode(transaction_index) if encoded_index in transaction_db: encoded_transaction = transaction_db[encoded_index] return rlp.decode(encoded_transaction, sedes=transaction_class) else: raise TransactionNotFound( "No transaction is at index {} of block {}".format(transaction_index, block_number))
Returns the transaction at the specified `transaction_index` from the block specified by `block_number` from the canonical chain. Raises TransactionNotFound if no block
def vector_angle_between(vector1, vector2, **kwargs): """ Computes the angle between the two input vectors. If the keyword argument ``degrees`` is set to *True*, then the angle will be in degrees. Otherwise, it will be in radians. By default, ``degrees`` is set to *True*. :param vector1: vector :type vector1: list, tuple :param vector2: vector :type vector2: list, tuple :return: angle between the vectors :rtype: float """ degrees = kwargs.get('degrees', True) magn1 = vector_magnitude(vector1) magn2 = vector_magnitude(vector2) acos_val = vector_dot(vector1, vector2) / (magn1 * magn2) angle_radians = math.acos(acos_val) if degrees: return math.degrees(angle_radians) else: return angle_radians
Computes the angle between the two input vectors. If the keyword argument ``degrees`` is set to *True*, then the angle will be in degrees. Otherwise, it will be in radians. By default, ``degrees`` is set to *True*. :param vector1: vector :type vector1: list, tuple :param vector2: vector :type vector2: list, tuple :return: angle between the vectors :rtype: float