code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def from_dict(d: Dict[str, Any]) -> 'BugZooException': assert 'error' in d d = d['error'] cls = getattr(sys.modules[__name__], d['kind']) assert issubclass(cls, BugZooException) return cls.from_message_and_data(d['message'], d.get('data', {}))
Reconstructs a BugZoo exception from a dictionary-based description.
def subsets(self): source, target = self.builder_config.language_pair filtered_subsets = {} for split, ss_names in self._subsets.items(): filtered_subsets[split] = [] for ss_name in ss_names: ds = DATASET_MAP[ss_name] if ds.target != target or source not in ds.sources: logging.info( "Skipping sub-dataset that does not include language pair: %s", ss_name) else: filtered_subsets[split].append(ss_name) logging.info("Using sub-datasets: %s", filtered_subsets) return filtered_subsets
Subsets that make up each split of the dataset for the language pair.
def map_statements(self): for stmt in self.statements: for agent in stmt.agent_list(): if agent is None: continue all_mappings = [] for db_name, db_id in agent.db_refs.items(): if isinstance(db_id, list): db_id = db_id[0][0] mappings = self._map_id(db_name, db_id) all_mappings += mappings for map_db_name, map_db_id, score, orig_db_name in all_mappings: if map_db_name in agent.db_refs: continue if self.scored: try: orig_score = agent.db_refs[orig_db_name][0][1] except Exception: orig_score = 1.0 agent.db_refs[map_db_name] = \ [(map_db_id, score * orig_score)] else: if map_db_name in ('UN', 'HUME'): agent.db_refs[map_db_name] = [(map_db_id, 1.0)] else: agent.db_refs[map_db_name] = map_db_id
Run the ontology mapping on the statements.
def create(self, project_name, template_name, substitutions): self.project_name = project_name self.template_name = template_name for subs in substitutions: current_sub = subs.split(',') current_key = current_sub[0].strip() current_val = current_sub[1].strip() self.substitutes_dict[current_key] = current_val self.term.print_info(u"Creating project '{0}' with template {1}" .format(self.term.text_in_color(project_name, TERM_PINK), template_name)) self.make_directories() self.make_files() self.make_posthook()
Launch the project creation.
def pack(value, nbits=None): if nbits is None: nbits = pack_size(value) * BITS_PER_BYTE elif nbits <= 0: raise ValueError('Given number of bits must be greater than 0.') buf_size = int(math.ceil(nbits / float(BITS_PER_BYTE))) buf = (ctypes.c_uint8 * buf_size)() for (idx, _) in enumerate(buf): buf[idx] = (value >> (idx * BITS_PER_BYTE)) & 0xFF return buf
Packs a given value into an array of 8-bit unsigned integers. If ``nbits`` is not present, calculates the minimal number of bits required to represent the given ``value``. The result is little endian. Args: value (int): the integer value to pack nbits (int): optional number of bits to use to represent the value Returns: An array of ``ctypes.c_uint8`` representing the packed ``value``. Raises: ValueError: if ``value < 0`` and ``nbits`` is ``None`` or ``nbits <= 0``. TypeError: if ``nbits`` or ``value`` are not numbers.
def _query_response_to_snapshot(response_pb, collection, expected_prefix): if not response_pb.HasField("document"): return None document_id = _helpers.get_doc_id(response_pb.document, expected_prefix) reference = collection.document(document_id) data = _helpers.decode_dict(response_pb.document.fields, collection._client) snapshot = document.DocumentSnapshot( reference, data, exists=True, read_time=response_pb.read_time, create_time=response_pb.document.create_time, update_time=response_pb.document.update_time, ) return snapshot
Parse a query response protobuf to a document snapshot. Args: response_pb (google.cloud.proto.firestore.v1beta1.\ firestore_pb2.RunQueryResponse): A collection (~.firestore_v1beta1.collection.CollectionReference): A reference to the collection that initiated the query. expected_prefix (str): The expected prefix for fully-qualified document names returned in the query results. This can be computed directly from ``collection`` via :meth:`_parent_info`. Returns: Optional[~.firestore.document.DocumentSnapshot]: A snapshot of the data returned in the query. If ``response_pb.document`` is not set, the snapshot will be :data:`None`.
def get_variables_substitution_dictionaries(self, lhs_graph, rhs_graph): if not rhs_graph: return {}, {}, {} self.matching_code_container.add_graph_to_namespace(lhs_graph) self.matching_code_container.add_graph_to_namespace(rhs_graph) return self.__collect_variables_that_match_graph(lhs_graph, rhs_graph)
Looks for sub-isomorphisms of rhs into lhs :param lhs_graph: The graph to look sub-isomorphisms into (the bigger graph) :param rhs_graph: The smaller graph :return: The list of matching names
def login(self, request, session, creds, segments): self._maybeCleanSessions() if isinstance(creds, credentials.Anonymous): preauth = self.authenticatedUserForKey(session.uid) if preauth is not None: self.savorSessionCookie(request) creds = userbase.Preauthenticated(preauth) def cbLoginSuccess(input): user = request.args.get('username') if user is not None: cookieValue = session.uid if request.args.get('rememberMe'): self.createSessionForKey(cookieValue, creds.username) self.savorSessionCookie(request) return input return ( guard.SessionWrapper.login( self, request, session, creds, segments) .addCallback(cbLoginSuccess))
Called to check the credentials of a user. Here we extend guard's implementation to preauthenticate users if they have a valid persistent session. @type request: L{nevow.inevow.IRequest} @param request: The HTTP request being handled. @type session: L{nevow.guard.GuardSession} @param session: The user's current session. @type creds: L{twisted.cred.credentials.ICredentials} @param creds: The credentials the user presented. @type segments: L{tuple} @param segments: The remaining segments of the URL. @return: A deferred firing with the user's avatar.
def condition_from_code(condcode): if condcode in __BRCONDITIONS: cond_data = __BRCONDITIONS[condcode] return {CONDCODE: condcode, CONDITION: cond_data[0], DETAILED: cond_data[1], EXACT: cond_data[2], EXACTNL: cond_data[3], } return None
Get the condition name from the condition code.
def f(s): frame = sys._getframe(1) d = dict(builtins.__dict__) d.update(frame.f_globals) d.update(frame.f_locals) return s.format_map(d)
Basic support for 3.6's f-strings, in 3.5! Formats "s" using appropriate globals and locals dictionaries. This f-string: f"hello a is {a}" simply becomes f("hello a is {a}") In other words, just throw parentheses around the string, and you're done! Implemented internally using str.format_map(). This means it doesn't support expressions: f("two minus three is {2-3}") And it doesn't support function calls: f("how many elements? {len(my_list)}") But most other f-string features work.
def get_plugin_tabwidget(self, plugin): try: tabwidget = plugin.get_current_tab_manager().tabs except AttributeError: tabwidget = plugin.get_current_tab_manager().tabwidget return tabwidget
Get the tabwidget of the plugin's current tab manager.
def permissions_match(self, path): audit = FilePermissionAudit(path, self.user, self.group, self.mode) return audit.is_compliant(path)
Determines if the file owner and permissions match. :param path: the path to check.
def index_of_coincidence(*texts): if not texts: raise ValueError("texts must not be empty") return statistics.mean(_calculate_index_of_coincidence(frequency_analyze(text), len(text)) for text in texts)
Calculate the index of coincidence for one or more ``texts``. The results are averaged over multiple texts to return the delta index of coincidence. Examples: >>> index_of_coincidence("aabbc") 0.2 >>> index_of_coincidence("aabbc", "abbcc") 0.2 Args: *texts (variable length argument list): The texts to analyze Returns: Decimal value of the index of coincidence Raises: ValueError: If texts is empty ValueError: If any text is less that 2 character long
def add_view( self, baseview, name, href="", icon="", label="", category="", category_icon="", category_label="", ): baseview = self._check_and_init(baseview) log.info(LOGMSG_INF_FAB_ADD_VIEW.format(baseview.__class__.__name__, name)) if not self._view_exists(baseview): baseview.appbuilder = self self.baseviews.append(baseview) self._process_inner_views() if self.app: self.register_blueprint(baseview) self._add_permission(baseview) self.add_link( name=name, href=href, icon=icon, label=label, category=category, category_icon=category_icon, category_label=category_label, baseview=baseview, ) return baseview
Add your views associated with menus using this method. :param baseview: A BaseView type class instantiated or not. This method will instantiate the class for you if needed. :param name: The string name that identifies the menu. :param href: Override the generated href for the menu. You can use an url string or an endpoint name if non provided default_view from view will be set as href. :param icon: Font-Awesome icon name, optional. :param label: The label that will be displayed on the menu, if absent param name will be used :param category: The menu category where the menu will be included, if non provided the view will be acessible as a top menu. :param category_icon: Font-Awesome icon name for the category, optional. :param category_label: The label that will be displayed on the menu, if absent param name will be used Examples:: appbuilder = AppBuilder(app, db) # Register a view, rendering a top menu without icon. appbuilder.add_view(MyModelView(), "My View") # or not instantiated appbuilder.add_view(MyModelView, "My View") # Register a view, a submenu "Other View" from "Other" with a phone icon. appbuilder.add_view( MyOtherModelView, "Other View", icon='fa-phone', category="Others" ) # Register a view, with category icon and translation. appbuilder.add_view( YetOtherModelView, "Other View", icon='fa-phone', label=_('Other View'), category="Others", category_icon='fa-envelop', category_label=_('Other View') ) # Add a link appbuilder.add_link("google", href="www.google.com", icon = "fa-google-plus")
def _unbuffered(self, proc, stream='stdout'): if self.working_handler is not None: t = Thread(target=self._handle_process, args=(proc, stream)) t.start() out = getattr(proc, stream) try: for line in iter(out.readline, ""): yield line.rstrip() finally: out.close()
Unbuffered output handler. :type proc: subprocess.Popen :type stream: six.text_types :return:
def add_user(config, group, username): client = Client() client.prepare_connection() group_api = API(client) try: group_api.add_user(group, username) except ldap_tools.exceptions.NoGroupsFound: print("Group ({}) not found".format(group)) except ldap_tools.exceptions.TooManyResults: print("Query for group ({}) returned multiple results.".format( group)) except ldap3.TYPE_OR_VALUE_EXISTS: print("{} already exists in {}".format(username, group))
Add specified user to specified group.
def _try_resolve_sam_resource_id_refs(self, input, supported_resource_id_refs): if not self._is_intrinsic_dict(input): return input function_type = list(input.keys())[0] return self.supported_intrinsics[function_type].resolve_resource_id_refs(input, supported_resource_id_refs)
Try to resolve SAM resource id references on the given template. If the given object looks like one of the supported intrinsics, it calls the appropriate resolution on it. If not, this method returns the original input unmodified. :param dict input: Dictionary that may represent an intrinsic function :param dict supported_resource_id_refs: Dictionary that maps old logical ids to new ones. :return: Modified input dictionary with id references resolved
def addhash(frame,**kw): hashes = genhash(frame,**kw); frame['data'] = rfn.rec_append_fields( frame['data'],'hash',hashes); return frame;
helper function to add hashes to the given frame given in the dictionary d returned from firsthash. Parameters: ----------- frame : frame to hash. Keywords: --------- same as genhash Returns frame with added hashes, although it will be added in place.
def do_counter_reset(self, element, decl, pseudo): step = self.state[self.state['current_step']] counter_name = '' for term in decl.value: if type(term) is ast.WhitespaceToken: continue elif type(term) is ast.IdentToken: if counter_name: step['counters'][counter_name] = 0 counter_name = term.value elif type(term) is ast.LiteralToken: if counter_name: step['counters'][counter_name] = 0 counter_name = '' elif type(term) is ast.NumberToken: if counter_name: step['counters'][counter_name] = int(term.value) counter_name = '' else: log(WARN, u"Unrecognized counter-reset term {}" .format(type(term)).encode('utf-8')) if counter_name: step['counters'][counter_name] = 0
Clear specified counters.
def from_text(text, origin = None, rdclass = dns.rdataclass.IN, relativize = True, zone_factory=Zone, filename=None, allow_include=False, check_origin=True): if filename is None: filename = '<string>' tok = dns.tokenizer.Tokenizer(text, filename) reader = _MasterReader(tok, origin, rdclass, relativize, zone_factory, allow_include=allow_include, check_origin=check_origin) reader.read() return reader.zone
Build a zone object from a master file format string. @param text: the master file format input @type text: string. @param origin: The origin of the zone; if not specified, the first $ORIGIN statement in the master file will determine the origin of the zone. @type origin: dns.name.Name object or string @param rdclass: The zone's rdata class; the default is class IN. @type rdclass: int @param relativize: should names be relativized? The default is True @type relativize: bool @param zone_factory: The zone factory to use @type zone_factory: function returning a Zone @param filename: The filename to emit when describing where an error occurred; the default is '<string>'. @type filename: string @param allow_include: is $INCLUDE allowed? @type allow_include: bool @param check_origin: should sanity checks of the origin node be done? The default is True. @type check_origin: bool @raises dns.zone.NoSOA: No SOA RR was found at the zone origin @raises dns.zone.NoNS: No NS RRset was found at the zone origin @rtype: dns.zone.Zone object
def general_node_label(self, node): G = self.G if G.node[node]['is_event']: return 'event type=' + G.node[node]['type'] else: return 'entity text=' + G.node[node]['text']
Used for debugging - gives a short text description of a graph node.
def use_comparative_vault_view(self): self._catalog_view = COMPARATIVE if self._catalog_session is not None: self._catalog_session.use_comparative_catalog_view()
The returns from the lookup methods may omit or translate elements based on this session, such as authorization, and not result in an error. This view is used when greater interoperability is desired at the expense of precision. *compliance: mandatory -- This method is must be implemented.*
def get_table_cache_key(db_alias, table): cache_key = '%s:%s' % (db_alias, table) return sha1(cache_key.encode('utf-8')).hexdigest()
Generates a cache key from a SQL table. :arg db_alias: Alias of the used database :type db_alias: str or unicode :arg table: Name of the SQL table :type table: str or unicode :return: A cache key :rtype: int
def concat(dfs): if isstr(dfs) or not hasattr(dfs, '__iter__'): msg = 'Argument must be a non-string iterable (e.g., list or tuple)' raise TypeError(msg) _df = None for df in dfs: df = df if isinstance(df, IamDataFrame) else IamDataFrame(df) if _df is None: _df = copy.deepcopy(df) else: _df.append(df, inplace=True) return _df
Concatenate a series of `pyam.IamDataFrame`-like objects together
def deploy_gateway(collector): configuration = collector.configuration aws_syncr = configuration['aws_syncr'] aws_syncr, amazon, stage, gateway = find_gateway(aws_syncr, configuration) gateway.deploy(aws_syncr, amazon, stage) if not configuration['amazon'].changes: log.info("No changes were made!!")
Deploy the apigateway to a particular stage
def list_files(tag=None, sat_id=None, data_path=None, format_str=None): desc = None level = tag if level == 'level_1': code = 'L1' desc = None elif level == 'level_2': code = 'L2' desc = None else: raise ValueError('Unsupported level supplied: ' + level) if format_str is None: format_str = 'ICON_'+code+'_EUV_Daytime' if desc is not None: format_str += '_' + desc +'_' format_str += '_{year:4d}-{month:02d}-{day:02d}_v{version:02d}r{revision:03d}.NC' return pysat.Files.from_os(data_path=data_path, format_str=format_str)
Produce a list of ICON EUV files. Notes ----- Currently fixed to level-2
def empty_topic(self, topic): nsq.assert_valid_topic_name(topic) return self._request('POST', '/topic/empty', fields={'topic': topic})
Empty all the queued messages for an existing topic.
def create_image(self, instance_id, name, description=None, no_reboot=False): params = {'InstanceId' : instance_id, 'Name' : name} if description: params['Description'] = description if no_reboot: params['NoReboot'] = 'true' img = self.get_object('CreateImage', params, Image, verb='POST') return img.id
Will create an AMI from the instance in the running or stopped state. :type instance_id: string :param instance_id: the ID of the instance to image. :type name: string :param name: The name of the new image :type description: string :param description: An optional human-readable string describing the contents and purpose of the AMI. :type no_reboot: bool :param no_reboot: An optional flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the responsibility of maintaining file system integrity is left to the owner of the instance. :rtype: string :return: The new image id
def _get_child_mock(mock, **kw): attribute = "." + kw["name"] if "name" in kw else "()" mock_name = _extract_mock_name(mock) + attribute raise AttributeError(mock_name)
Intercepts call to generate new mocks and raises instead
def medial_wall_to_nan(in_file, subjects_dir, target_subject, newpath=None): import nibabel as nb import numpy as np import os fn = os.path.basename(in_file) if not target_subject.startswith('fs'): return in_file cortex = nb.freesurfer.read_label(os.path.join( subjects_dir, target_subject, 'label', '{}.cortex.label'.format(fn[:2]))) func = nb.load(in_file) medial = np.delete(np.arange(len(func.darrays[0].data)), cortex) for darray in func.darrays: darray.data[medial] = np.nan out_file = os.path.join(newpath or os.getcwd(), fn) func.to_filename(out_file) return out_file
Convert values on medial wall to NaNs
def publishToOther(self, roomId, name, data): tmpList = self.getRoom(roomId) userList = [x for x in tmpList if x is not self] self.publishToRoom(roomId, name, data, userList)
Publish to only other people than myself
def set_inteface_up(ifindex, auth, url, devid=None, devip=None): if devip is not None: devid = get_dev_details(devip, auth, url)['id'] set_int_up_url = "/imcrs/plat/res/device/" + str(devid) + "/interface/" + str(ifindex) + "/up" f_url = url + set_int_up_url try: response = requests.put(f_url, auth=auth, headers=HEADERS) if response.status_code == 204: return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " set_inteface_up: An Error has occured"
function takest devid and ifindex of specific device and interface and issues a RESTFUL call to "undo shut" the specified interface on the target device. :param devid: int or str value of the target device :param devip: ipv4 address of the target devices :param ifindex: int or str value of the target interface :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: HTTP status code 204 with no values. :rype: int >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.device import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> int_down_response = set_interface_down( '9', auth.creds, auth.url, devid = '10') 204 >>> int_up_response = set_inteface_up( '9', auth.creds, auth.url, devid = '10') >>> int_down_response = set_interface_down( '9', auth.creds, auth.url, devid = '10') 204 >>> int_up_response = set_inteface_up('9', auth.creds, auth.url, devip = '10.101.0.221') >>> assert type(int_up_response) is int >>> assert int_up_response is 204
def is_mention_line(cls, word): if word.startswith('@'): return True elif word.startswith('http://'): return True elif word.startswith('https://'): return True else: return False
Detects links and mentions :param word: Token to be evaluated
def wploader(self): if self.target_system not in self.wploader_by_sysid: self.wploader_by_sysid[self.target_system] = mavwp.MAVWPLoader() return self.wploader_by_sysid[self.target_system]
per-sysid wploader
def get_version(version): assert len(version) == 5 version_parts = version[:2] if version[2] == 0 else version[:3] major = '.'.join(str(x) for x in version_parts) if version[3] == 'final': return major sub = ''.join(str(x) for x in version[3:5]) if version[3] == 'dev': timestamp = get_git_changeset() sub = 'dev%s' % (timestamp if timestamp else version[4]) return '%s.%s' % (major, sub) if version[3] == 'post': return '%s.%s' % (major, sub) elif version[3] in ('a', 'b', 'rc'): return '%s%s' % (major, sub) else: raise ValueError('Invalid version: %s' % str(version))
Returns a PEP 440-compliant version number from VERSION. Created by modifying django.utils.version.get_version
def _scalar_coef_op_left(func): @wraps(func) def verif(self, scoef): if isinstance(scoef, ScalarCoefs): if len(self._vec) == len(scoef._vec): return ScalarCoefs(func(self, self._vec, scoef._vec), self.nmax, self.mmax) else: raise ValueError(err_msg['SC_sz_msmtch'] % \ (self.nmax, self.mmax, scoef.nmax, scoef.mmax)) elif isinstance(scoef, numbers.Number): return ScalarCoefs(func(self, self._vec, scoef), self.nmax, self.mmax) else: raise TypeError(err_msg['no_combi_SC']) return verif
decorator for operator overloading when ScalarCoef is on the left
def rasterToWKB(cls, rasterPath, srid, noData, raster2pgsql): raster2pgsqlProcess = subprocess.Popen([raster2pgsql, '-s', srid, '-N', noData, rasterPath, 'n_a'],stdout=subprocess.PIPE) sql, error = raster2pgsqlProcess.communicate() if sql: wellKnownBinary = sql.split("'")[1] else: print(error) raise return wellKnownBinary
Accepts a raster file and converts it to Well Known Binary text using the raster2pgsql executable that comes with PostGIS. This is the format that rasters are stored in a PostGIS database.
def write_extra_resources(self, compile_context): target = compile_context.target if isinstance(target, ScalacPlugin): self._write_scalac_plugin_info(compile_context.classes_dir.path, target) elif isinstance(target, JavacPlugin): self._write_javac_plugin_info(compile_context.classes_dir.path, target) elif isinstance(target, AnnotationProcessor) and target.processors: processor_info_file = os.path.join(compile_context.classes_dir.path, _PROCESSOR_INFO_FILE) self._write_processor_info(processor_info_file, target.processors)
Override write_extra_resources to produce plugin and annotation processor files.
def to_binary(self,filename): retrans = False if self.istransformed: self._back_transform(inplace=True) retrans = True if self.isnull().values.any(): warnings.warn("NaN in par ensemble",PyemuWarning) self.as_pyemu_matrix().to_coo(filename) if retrans: self._transform(inplace=True)
write the parameter ensemble to a jco-style binary file Parameters ---------- filename : str the filename to write Returns ------- None Note ---- this function back-transforms inplace with respect to log10 before writing
def weekdays(first_day=None): if first_day is None: first_day = 'Monday' ix = _lower_weekdays().index(first_day.lower()) return _double_weekdays()[ix:ix+7]
Returns a list of weekday names. Arguments --------- first_day : str, default None The first day of the week. If not given, 'Monday' is used. Returns ------- list A list of weekday names.
def register(self, name, fun, description=None): self.methods[name] = fun self.descriptions[name] = description
Register function on this service
def get_all_fix_names(fixer_pkg, remove_prefix=True): pkg = __import__(fixer_pkg, [], [], ["*"]) fixer_dir = os.path.dirname(pkg.__file__) fix_names = [] for name in sorted(os.listdir(fixer_dir)): if name.startswith("fix_") and name.endswith(".py"): if remove_prefix: name = name[4:] fix_names.append(name[:-3]) return fix_names
Return a sorted list of all available fix names in the given package.
def __get_obj_by_discriminator(self, payload, obj_type): obj_cast = cast(Any, obj_type) namespaced_class_name = obj_cast.get_real_child_model(payload) if not namespaced_class_name: raise SerializationException( "Couldn't resolve object by discriminator type " "for {} class".format(obj_type)) return self.__load_class_from_name(namespaced_class_name)
Get correct subclass instance using the discriminator in payload. :param payload: Payload for deserialization :type payload: str :param obj_type: parent class for deserializing payload into :type obj_type: Union[object, str] :return: Subclass of provided parent class, that resolves to the discriminator in payload. :rtype: object :raises: :py:class:`ask_sdk_core.exceptions.SerializationException`
def add_listener(self, callback, event_type=None): listener_uid = uuid4() self.listeners.append( { 'uid': listener_uid, 'callback': callback, 'event_type': event_type } ) return listener_uid
Add a listener that will send a callback when the client recieves an event. Args: callback (func(roomchunk)): Callback called when an event arrives. event_type (str): The event_type to filter for. Returns: uuid.UUID: Unique id of the listener, can be used to identify the listener.
def _if_not_freed(f): @add_signature_to_docstring(f) @functools.wraps(f) def f_(self, *args, **kwargs): if self._freed: raise OSError return f(self, *args, **kwargs) return f_
Run the method iff. the memory view hasn't been closed.
def init_app(self, app): app.config.setdefault('OPENERP_SERVER', 'http://localhost:8069') app.config.setdefault('OPENERP_DATABASE', 'openerp') app.config.setdefault('OPENERP_DEFAULT_USER', 'admin') app.config.setdefault('OPENERP_DEFAULT_PASSWORD', 'admin') app.jinja_env.globals.update( get_data_from_record=get_data_from_record ) cnx = Client( server=app.config['OPENERP_SERVER'], db=app.config['OPENERP_DATABASE'], user=app.config['OPENERP_DEFAULT_USER'], password=app.config['OPENERP_DEFAULT_PASSWORD'] ) self.default_user = cnx.user app.before_request(self.before_request)
This callback can be used to initialize an application for use with the OpenERP server.
def Process(self, parser_mediator, plist_name, top_level, **kwargs): if not plist_name.startswith(self.PLIST_PATH): raise errors.WrongPlistPlugin(self.NAME, plist_name) super(AppleAccountPlugin, self).Process( parser_mediator, plist_name=self.PLIST_PATH, top_level=top_level)
Check if it is a valid Apple account plist file name. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. plist_name (str): name of the plist. top_level (dict[str, object]): plist top-level key.
def observer_update(self, message): for action in ('added', 'changed', 'removed'): for item in message[action]: self.send_json( { 'msg': action, 'observer': message['observer'], 'primary_key': message['primary_key'], 'order': item['order'], 'item': item['data'], } )
Called when update from observer is received.
def modify_phonetic_representation(self, phonetic_representation): for i in range(len(phonetic_representation)): phonetic_representation[i] = re.sub('\d+', '', phonetic_representation[i]) multis = ['AA', 'AE', 'AH', 'AO', 'AW', 'AY', 'CH', 'DH', 'EH', 'ER', 'EY', 'HH', 'IH', 'IY', 'JH', 'NG', 'OW', 'OY', 'SH', 'TH', 'UH', 'UW', 'ZH'] singles = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w'] for i in range(len(phonetic_representation)): if phonetic_representation[i] in multis: phonetic_representation[i] = singles[multis.index(phonetic_representation[i])] phonetic_representation = ''.join(phonetic_representation) return phonetic_representation
Returns a compact phonetic representation given a CMUdict-formatted representation. :param list phonetic_representation: a phonetic representation in standard CMUdict formatting, i.e. a list of phonemes like ['HH', 'EH0', 'L', 'OW1'] :returns: A string representing a custom phonetic representation, where each phoneme is mapped to a single ascii character. Changing the phonetic representation from a list to a string is useful for calculating phonetic simlarity scores.
def _connect_docker(spec): return { 'method': 'docker', 'kwargs': { 'username': spec.remote_user(), 'container': spec.remote_addr(), 'python_path': spec.python_path(), 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), 'remote_name': get_remote_name(spec), } }
Return ContextService arguments for a Docker connection.
def GetNumCoresOnHosts(hosts, private_key): results = runner.Runner(host_list=hosts, private_key=private_key, module_name='setup').run() num_cores_list = [] for _, props in results['contacted'].iteritems(): cores = props['ansible_facts']['ansible_processor_cores'] val = 0 try: val = int(cores) except ValueError: pass num_cores_list.append(val) return num_cores_list
Returns list of the number of cores for each host requested in hosts.
def _handle_actionpush(self, length): init_pos = self._src.tell() while self._src.tell() < init_pos + length: obj = _make_object("ActionPush") obj.Type = unpack_ui8(self._src) push_types = { 0: ("String", self._get_struct_string), 1: ("Float", lambda: unpack_float(self._src)), 2: ("Null", lambda: None), 4: ("RegisterNumber", lambda: unpack_ui8(self._src)), 5: ("Boolean", lambda: unpack_ui8(self._src)), 6: ("Double", lambda: unpack_double(self._src)), 7: ("Integer", lambda: unpack_ui32(self._src)), 8: ("Constant8", lambda: unpack_ui8(self._src)), 9: ("Constant16", lambda: unpack_ui16(self._src)), } name, func = push_types[obj.Type] setattr(obj, name, func()) yield obj
Handle the ActionPush action.
def random_sample(self): data = np.empty((1, self.dim)) for col, (lower, upper) in enumerate(self._bounds): data.T[col] = self.random_state.uniform(lower, upper, size=1) return data.ravel()
Creates random points within the bounds of the space. Returns ---------- data: ndarray [num x dim] array points with dimensions corresponding to `self._keys` Example ------- >>> target_func = lambda p1, p2: p1 + p2 >>> pbounds = {'p1': (0, 1), 'p2': (1, 100)} >>> space = TargetSpace(target_func, pbounds, random_state=0) >>> space.random_points(1) array([[ 55.33253689, 0.54488318]])
def GetArtifactsForCollection(os_name, artifact_list): artifact_arranger = ArtifactArranger(os_name, artifact_list) artifact_names = artifact_arranger.GetArtifactsInProperOrder() return artifact_names
Wrapper for the ArtifactArranger. Extend the artifact list by dependencies and sort the artifacts to resolve the dependencies. Args: os_name: String specifying the OS name. artifact_list: List of requested artifact names. Returns: A list of artifacts such that if they are collected in the given order their dependencies are resolved.
def set_metadata(candidates, traces, dependencies, pythons): metasets_mapping = _calculate_metasets_mapping( dependencies, pythons, copy.deepcopy(traces), ) for key, candidate in candidates.items(): candidate.markers = _format_metasets(metasets_mapping[key])
Add "metadata" to candidates based on the dependency tree. Metadata for a candidate includes markers and a specifier for Python version requirements. :param candidates: A key-candidate mapping. Candidates in the mapping will have their markers set. :param traces: A graph trace (produced by `traces.trace_graph`) providing information about dependency relationships between candidates. :param dependencies: A key-collection mapping containing what dependencies each candidate in `candidates` requested. :param pythons: A key-str mapping containing Requires-Python information of each candidate. Keys in mappings and entries in the trace are identifiers of a package, as implemented by the `identify` method of the resolver's provider. The candidates are modified in-place.
def get_fn_plan(callback=None, out_callback=None, name='pycbc_cufft', parameters=None): if parameters is None: parameters = [] source = fftsrc.render(input_callback=callback, output_callback=out_callback, parameters=parameters) path = compile(source, name) lib = ctypes.cdll.LoadLibrary(path) fn = lib.execute fn.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] plan = lib.create_plan plan.restype = ctypes.c_void_p plan.argyptes = [ctypes.c_uint] return fn, plan
Get the IFFT execute and plan functions
def write_padding_bits(buff, version, length): if version not in (consts.VERSION_M1, consts.VERSION_M3): buff.extend([0] * (8 - (length % 8)))
\ Writes padding bits if the data stream does not meet the codeword boundary. :param buff: The byte buffer. :param int length: Data stream length.
def canparse(argparser, args): old_error_method = argparser.error argparser.error = _raise_ValueError try: argparser.parse_args(args) except ValueError: return False else: return True finally: argparser.error = old_error_method
Determines if argparser can parse args.
def touch(args): p = OptionParser(touch.__doc__) opts, args = p.parse_args(args) fp = sys.stdin for link_name in fp: link_name = link_name.strip() if not op.islink(link_name): continue if not op.exists(link_name): continue source = get_abs_path(link_name) lnsf(source, link_name)
find . -type l | %prog touch Linux commands `touch` wouldn't modify mtime for links, this script can. Use find to pipe in all the symlinks.
def load_model_by_id(self, model_id): with open(os.path.join(self.path, str(model_id) + ".json")) as fin: json_str = fin.read().replace("\n", "") load_model = json_to_graph(json_str) return load_model
Get the model by model_id Parameters ---------- model_id : int model index Returns ------- load_model : Graph the model graph representation
async def toggle(self): self.logger.debug("toggle command") if not self.state == 'ready': return if self.streamer is None: return try: if self.streamer.is_playing(): await self.pause() else: await self.resume() except Exception as e: logger.error(e) pass
Toggles between pause and resume command
def collect_static() -> bool: from django.core.management import execute_from_command_line wf('Collecting static files... ', False) execute_from_command_line(['./manage.py', 'collectstatic', '-c', '--noinput', '-v0']) wf('[+]\n') return True
Runs Django ``collectstatic`` command in silent mode. :return: always ``True``
def mangle_mako_loop(node, printer): loop_variable = LoopVariable() node.accept_visitor(loop_variable) if loop_variable.detected: node.nodes[-1].has_loop_context = True match = _FOR_LOOP.match(node.text) if match: printer.writelines( 'loop = __M_loop._enter(%s)' % match.group(2), 'try:' ) text = 'for %s in loop:' % match.group(1) else: raise SyntaxError("Couldn't apply loop context: %s" % node.text) else: text = node.text return text
converts a for loop into a context manager wrapped around a for loop when access to the `loop` variable has been detected in the for loop body
def call_fsm(method): fsm_method = getattr(fsm.fsm, method.__name__) def new_method(*legos): alphabet = set().union(*[lego.alphabet() for lego in legos]) return from_fsm(fsm_method(*[lego.to_fsm(alphabet) for lego in legos])) return new_method
Take a method which acts on 0 or more regular expression objects... return a new method which simply converts them all to FSMs, calls the FSM method on them instead, then converts the result back to a regular expression. We do this for several of the more annoying operations.
def permission_denied(self, request, message=None): if not request.successful_authenticator: raise exceptions.NotAuthenticated() raise exceptions.PermissionDenied(detail=message)
If request is not permitted, determine what kind of exception to raise.
def server(self): try: tar = urllib2.urlopen(self.registry) meta = tar.info() return int(meta.getheaders("Content-Length")[0]) except (urllib2.URLError, IndexError): return " "
Returns the size of remote files
def control_low_limit(self) -> Optional[Union[int, float]]: return self._get_field_value(SpecialDevice.PROP_CONTROL_LOW_LIMIT)
Control low limit setting for a special sensor. For LS-10/LS-20 base units only.
def get_usages(self): "Return a dictionary mapping full usages Ids to plain values" result = dict() for key, usage in self.items(): result[key] = usage.value return result
Return a dictionary mapping full usages Ids to plain values
def _process_mark_toggle(self, p_todo_id, p_force=None): if p_force in ['mark', 'unmark']: action = p_force else: action = 'mark' if p_todo_id not in self.marked_todos else 'unmark' if action == 'mark': self.marked_todos.add(p_todo_id) return True else: self.marked_todos.remove(p_todo_id) return False
Adds p_todo_id to marked_todos attribute and returns True if p_todo_id is not already marked. Removes p_todo_id from marked_todos and returns False otherwise. p_force parameter accepting 'mark' or 'unmark' values, if set, can force desired action without checking p_todo_id presence in marked_todos.
def detect_aromatic_rings_in_ligand(self): self.ligrings = {} try: ring_info = self.topology_data.mol.GetRingInfo() self.ligand_ring_num = ring_info.NumRings() except Exception as e: m = Chem.MolFromPDBFile("lig.pdb") ring_info = m.GetRingInfo() self.ligand_ring_num = ring_info.NumRings() i=0 for ring in range(self.ligand_ring_num): if 4 < len(ring_info.AtomRings()[ring]) <= 6 and False not in [self.topology_data.mol.GetAtomWithIdx(x).GetIsAromatic() for x in ring_info.AtomRings()[ring]]: atom_ids_in_ring = [] for atom in ring_info.AtomRings()[ring]: atom_ids_in_ring.append(self.topology_data.universe.ligand.atoms[atom].name) self.ligrings[i]=atom_ids_in_ring i+=1
Using rdkit to detect aromatic rings in ligand - size 4-6 atoms and all atoms are part of the ring. Saves this data in self.ligrings.
def lines_diff(before_lines, after_lines, check_modified=False): before_comps = [ LineComparator(line, check_modified=check_modified) for line in before_lines ] after_comps = [ LineComparator(line, check_modified=check_modified) for line in after_lines ] diff_result = diff( before_comps, after_comps, check_modified=check_modified ) return diff_result
Diff the lines in two strings. Parameters ---------- before_lines : iterable Iterable containing lines used as the baseline version. after_lines : iterable Iterable containing lines to be compared against the baseline. Returns ------- diff_result : A list of dictionaries containing diff information.
def _run_command(self, cmdline): try: if self.verbose: print(cmdline) subprocess.check_call(cmdline, shell=True) except subprocess.CalledProcessError: print('when running: ', cmdline) raise
Run a subcommand, quietly. Prints the full command on error.
def truncate_to(value: Decimal, currency: str) -> Decimal: decimal_places = DECIMALS.get(currency.upper(), 2) return truncate(value, decimal_places)
Truncates a value to the number of decimals corresponding to the currency
def register_validator(flag_name, checker, message='Flag validation failed', flag_values=_flagvalues.FLAGS): v = SingleFlagValidator(flag_name, checker, message) _add_validator(flag_values, v)
Adds a constraint, which will be enforced during program execution. The constraint is validated when flags are initially parsed, and after each change of the corresponding flag's value. Args: flag_name: str, name of the flag to be checked. checker: callable, a function to validate the flag. input - A single positional argument: The value of the corresponding flag (string, boolean, etc. This value will be passed to checker by the library). output - bool, True if validator constraint is satisfied. If constraint is not satisfied, it should either return False or raise flags.ValidationError(desired_error_message). message: str, error text to be shown to the user if checker returns False. If checker raises flags.ValidationError, message from the raised error will be shown. flag_values: flags.FlagValues, optional FlagValues instance to validate against. Raises: AttributeError: Raised when flag_name is not registered as a valid flag name.
def from_dict(cls, async): async_options = decode_async_options(async) target, args, kwargs = async_options.pop('job') return cls(target, args, kwargs, **async_options)
Return an async job from a dict output by Async.to_dict.
def _compile_re(self, expression): meta_words = "|".join(self.sanitize_words) expression = expression.replace("META_WORDS_HERE", meta_words) return re.compile(expression, re.IGNORECASE)
Compile given regular expression for current sanitize words
def iter_child_nodes(node): for name, field in iter_fields(node): if isinstance(field, AST): yield field elif isinstance(field, list): for item in field: if isinstance(item, AST): yield item
Iterate over all child nodes or a node.
def name_from_path(self): name = os.path.splitext(os.path.basename(self.path))[0] if name == 'catalog': name = os.path.basename(os.path.dirname(self.path)) return name.replace('.', '_')
If catalog is named 'catalog' take name from parent directory
def _snpeff_args_from_config(data): config = data["config"] args = ["-hgvs"] resources = config_utils.get_resources("snpeff", config) if resources.get("options"): args += [str(x) for x in resources.get("options", [])] if vcfutils.get_paired_phenotype(data): args += ["-cancer"] effects_transcripts = dd.get_effects_transcripts(data) if effects_transcripts in set(["canonical_cancer"]): _, snpeff_base_dir = get_db(data) canon_list_file = os.path.join(snpeff_base_dir, "transcripts", "%s.txt" % effects_transcripts) if not utils.file_exists(canon_list_file): raise ValueError("Cannot find expected file for effects_transcripts: %s" % canon_list_file) args += ["-canonList", canon_list_file] elif effects_transcripts == "canonical" or tz.get_in(("config", "algorithm", "clinical_reporting"), data): args += ["-canon"] return args
Retrieve snpEff arguments supplied through input configuration.
def parse(self, generator): gen = iter(generator) for line in gen: block = {} for rule in self.rules: if rule[0](line): block = rule[1](line, gen) break yield block
Parse an iterable source of strings into a generator
def walk(self, visitor): visitor(self) for c in self.children: c.walk(visitor) return self
Walk the branch and call the visitor function on each node. @param visitor: A function. @return: self @rtype: L{Element}
def _setup_events(plugin): events = plugin.events if events and isinstance(events, (list, tuple)): for event in [e for e in events if e in _EVENT_VALS]: register('event', event, plugin)
Handles setup or teardown of event hook registration for the provided plugin. `plugin` ``Plugin`` class.
def unbare_repo(func): @wraps(func) def wrapper(self, *args, **kwargs): if self.repo.bare: raise InvalidGitRepositoryError("Method '%s' cannot operate on bare repositories" % func.__name__) return func(self, *args, **kwargs) return wrapper
Methods with this decorator raise InvalidGitRepositoryError if they encounter a bare repository
def setup(self, bitdepth=16): fmt = self.get_file_format() newfmt = copy.copy(fmt) newfmt.mFormatID = AUDIO_ID_PCM newfmt.mFormatFlags = \ PCM_IS_SIGNED_INT | PCM_IS_PACKED newfmt.mBitsPerChannel = bitdepth newfmt.mBytesPerPacket = \ (fmt.mChannelsPerFrame * newfmt.mBitsPerChannel // 8) newfmt.mFramesPerPacket = 1 newfmt.mBytesPerFrame = newfmt.mBytesPerPacket self.set_client_format(newfmt)
Set the client format parameters, specifying the desired PCM audio data format to be read from the file. Must be called before reading from the file.
def alter(self, operation, timeout=None, metadata=None, credentials=None): new_metadata = self.add_login_metadata(metadata) try: return self.any_client().alter(operation, timeout=timeout, metadata=new_metadata, credentials=credentials) except Exception as error: if util.is_jwt_expired(error): self.retry_login() new_metadata = self.add_login_metadata(metadata) return self.any_client().alter(operation, timeout=timeout, metadata=new_metadata, credentials=credentials) else: raise error
Runs a modification via this client.
def context_export(zap_helper, name, file_path): with zap_error_handler(): result = zap_helper.zap.context.export_context(name, file_path) if result != 'OK': raise ZAPError('Exporting context to file failed: {}'.format(result)) console.info('Exported context {0} to {1}'.format(name, file_path))
Export a given context to a file.
def detectWebOSTV(self): return UAgentInfo.deviceWebOStv in self.__userAgent \ and UAgentInfo.smartTV2 in self.__userAgent
Return detection of a WebOS smart TV Detects if the current browser is on a WebOS smart TV.
def soap(self): logger.debug("- SOAP -") _dict = self.unpack_soap() logger.debug("_dict: %s", _dict) return self.operation(_dict, BINDING_SOAP)
Single log out using HTTP_SOAP binding
def show_loadbalancer(self, lbaas_loadbalancer, **_params): return self.get(self.lbaas_loadbalancer_path % (lbaas_loadbalancer), params=_params)
Fetches information for a load balancer.
def items (self): return [(key, value[1]) for key, value in super(LFUCache, self).items()]
Return list of items, not updating usage count.
def parse_debug(self, inputstring): return self.parse(inputstring, self.file_parser, {"strip": True}, {"header": "none", "initial": "none", "final_endline": False})
Parse debug code.
def check_package_data(dist, attr, value): if isinstance(value, dict): for k, v in value.items(): if not isinstance(k, str): break try: iter(v) except TypeError: break else: return raise DistutilsSetupError( attr + " must be a dictionary mapping package names to lists of " "wildcard patterns" )
Verify that value is a dictionary of package names to glob lists
def skip(self): if self.skip_list and isinstance(self.skip_list, list): for skip in self.skip_list: if request.path.startswith('/{0}'.format(skip)): return True return False
Checks the skip list.
def path_to_pattern(path, metadata=None): if not isinstance(path, str): return pattern = path if metadata: cache = metadata.get('cache') if cache: regex = next(c.get('regex') for c in cache if c.get('argkey') == 'urlpath') pattern = pattern.split(regex)[-1] return pattern
Remove source information from path when using chaching Returns None if path is not str Parameters ---------- path : str Path to data optionally containing format_strings metadata : dict, optional Extra arguments to the class, contains any cache information Returns ------- pattern : str Pattern style path stripped of everything to the left of cache regex.
def is45(msg): if allzeros(msg): return False d = hex2bin(data(msg)) if wrongstatus(d, 1, 2, 3): return False if wrongstatus(d, 4, 5, 6): return False if wrongstatus(d, 7, 8, 9): return False if wrongstatus(d, 10, 11, 12): return False if wrongstatus(d, 13, 14, 15): return False if wrongstatus(d, 16, 17, 26): return False if wrongstatus(d, 27, 28, 38): return False if wrongstatus(d, 39, 40, 51): return False if bin2int(d[51:56]) != 0: return False temp = temp45(msg) if temp: if temp > 60 or temp < -80: return False return True
Check if a message is likely to be BDS code 4,5. Meteorological hazard report Args: msg (String): 28 bytes hexadecimal message string Returns: bool: True or False
def _fields(self): return ( ('reference_name', self.reference_name,), ('annotation_name', self.annotation_name), ('annotation_version', self.annotation_version), ('cache_directory_path', self.cache_directory_path), ('decompress_on_download', self.decompress_on_download), ('copy_local_files_to_cache', self.copy_local_files_to_cache) )
Fields used for hashing, string representation, equality comparison
def wait(self): try: SpawningProxy(self.containers, abort_on_error=True).wait() except Exception: self.stop() raise
Wait for all running containers to stop.
def serialize(ad_objects, output_format='json', indent=2, attributes_only=False): if attributes_only: ad_objects = [key for key in sorted(ad_objects[0].keys())] if output_format == 'json': return json.dumps(ad_objects, indent=indent, ensure_ascii=False, sort_keys=True) elif output_format == 'yaml': return yaml.dump(sorted(ad_objects), indent=indent)
Serialize the object to the specified format :param ad_objects list: A list of ADObjects to serialize :param output_format str: The output format, json or yaml. Defaults to json :param indent int: The number of spaces to indent, defaults to 2 :param attributes only: Only serialize the attributes found in the first record of the list of ADObjects :return: A serialized, formatted representation of the list of ADObjects :rtype: str
def _build_qname(self, uri=None, namespaces=None): if not uri: uri = self.uri if not namespaces: namespaces = self.namespaces return uri2niceString(uri, namespaces)
extracts a qualified name for a uri
def delete(cls, id): client = cls._new_api_client() return client.make_request(cls, 'delete', url_params={'id': id})
Destroy a Union object