code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def Scroll_up(self, n, dl = 0): """鼠标滚轮向上n次 """ self.Delay(dl) self.mouse.scroll(vertical = n)
鼠标滚轮向上n次
def update_portal(self, portal_obj): """ Implements the Update device Portals API. This function is extremely dangerous. The portal object you pass in will completely overwrite the portal. http://docs.exosite.com/portals/#update-portal """ headers = { 'User-Agent': self.user_agent(), } headers.update(self.headers()) r = requests.put( self.portals_url()+'/portals/'+self.portal_id(), data=json.dumps(portal_obj), headers=headers, auth=self.auth()) if HTTP_STATUS.OK == r.status_code: return r.json() else: print("update_portal: Something went wrong: <{0}>: {1}".format( r.status_code, r.reason)) r.raise_for_status()
Implements the Update device Portals API. This function is extremely dangerous. The portal object you pass in will completely overwrite the portal. http://docs.exosite.com/portals/#update-portal
def merge_single_qubit_gates_into_phased_x_z( circuit: circuits.Circuit, atol: float = 1e-8) -> None: """Canonicalizes runs of single-qubit rotations in a circuit. Specifically, any run of non-parameterized circuits will be replaced by an optional PhasedX operation followed by an optional Z operation. Args: circuit: The circuit to rewrite. This value is mutated in-place. atol: Absolute tolerance to angle error. Larger values allow more negligible gates to be dropped, smaller values increase accuracy. """ def synth(qubit: ops.Qid, matrix: np.ndarray) -> List[ops.Operation]: out_gates = decompositions.single_qubit_matrix_to_phased_x_z( matrix, atol) return [gate(qubit) for gate in out_gates] MergeSingleQubitGates(synthesizer=synth).optimize_circuit(circuit)
Canonicalizes runs of single-qubit rotations in a circuit. Specifically, any run of non-parameterized circuits will be replaced by an optional PhasedX operation followed by an optional Z operation. Args: circuit: The circuit to rewrite. This value is mutated in-place. atol: Absolute tolerance to angle error. Larger values allow more negligible gates to be dropped, smaller values increase accuracy.
def transform(self): """Check the field values in self.dcmf1 and self.dcmf2 and returns True if all the field values are the same, False otherwise. Returns ------- bool """ if self.dcmf1 is None or self.dcmf2 is None: return np.inf for field_name in self.field_weights: if (str(getattr(self.dcmf1, field_name, '')) != str(getattr(self.dcmf2, field_name, ''))): return False return True
Check the field values in self.dcmf1 and self.dcmf2 and returns True if all the field values are the same, False otherwise. Returns ------- bool
def GetMetadataLegacy(client, token=None): """Builds ExportedMetadata object for a given client id. Note: This is a legacy aff4-only implementation. TODO(user): deprecate as soon as REL_DB migration is done. Args: client: RDFURN of a client or VFSGRRClient object itself. token: Security token. Returns: ExportedMetadata object with metadata of the client. """ if isinstance(client, rdfvalue.RDFURN): client_fd = aff4.FACTORY.Open(client, mode="r", token=token) else: client_fd = client metadata = ExportedMetadata() metadata.client_urn = client_fd.urn metadata.client_age = client_fd.urn.age metadata.hostname = utils.SmartUnicode( client_fd.Get(client_fd.Schema.HOSTNAME, "")) metadata.os = utils.SmartUnicode(client_fd.Get(client_fd.Schema.SYSTEM, "")) metadata.uname = utils.SmartUnicode(client_fd.Get(client_fd.Schema.UNAME, "")) metadata.os_release = utils.SmartUnicode( client_fd.Get(client_fd.Schema.OS_RELEASE, "")) metadata.os_version = utils.SmartUnicode( client_fd.Get(client_fd.Schema.OS_VERSION, "")) kb = client_fd.Get(client_fd.Schema.KNOWLEDGE_BASE) usernames = "" if kb: usernames = [user.username for user in kb.users] or "" metadata.usernames = utils.SmartUnicode(usernames) metadata.mac_address = utils.SmartUnicode( client_fd.Get(client_fd.Schema.MAC_ADDRESS, "")) system_labels = set() user_labels = set() for l in client_fd.GetLabels(): if l.owner == "GRR": system_labels.add(l.name) else: user_labels.add(l.name) metadata.labels = ",".join(sorted(system_labels | user_labels)) metadata.system_labels = ",".join(sorted(system_labels)) metadata.user_labels = ",".join(sorted(user_labels)) metadata.hardware_info = client_fd.Get(client_fd.Schema.HARDWARE_INFO) metadata.kernel_version = client_fd.Get(client_fd.Schema.KERNEL) return metadata
Builds ExportedMetadata object for a given client id. Note: This is a legacy aff4-only implementation. TODO(user): deprecate as soon as REL_DB migration is done. Args: client: RDFURN of a client or VFSGRRClient object itself. token: Security token. Returns: ExportedMetadata object with metadata of the client.
def get_injuries_by_team(self, season, week, team_id): """ Injuries by week and team """ result = self._method_call("Injuries/{season}/{week}/{team_id}", "stats", season=season, week=week, team_id=team_id) return result
Injuries by week and team
def get_cluster(self, label): """Returns a connection to a mongo-clusters. Args: label (string): the label of a cluster. Returns: A connection to the cluster labeld with label. Raises: AttributeError: there is no cluster with the given label in the config """ for cluster in self._clusters: if label == cluster['label']: return self._get_connection(cluster) raise AttributeError('No such cluster %s.' % label)
Returns a connection to a mongo-clusters. Args: label (string): the label of a cluster. Returns: A connection to the cluster labeld with label. Raises: AttributeError: there is no cluster with the given label in the config
def calcFontScaling(self): '''Calculates the current font size and left position for the current window.''' self.ypx = self.figure.get_size_inches()[1]*self.figure.dpi self.xpx = self.figure.get_size_inches()[0]*self.figure.dpi self.fontSize = self.vertSize*(self.ypx/2.0) self.leftPos = self.axes.get_xlim()[0] self.rightPos = self.axes.get_xlim()[1]
Calculates the current font size and left position for the current window.
async def probe_message(self, _message, context): """Handle a probe message. See :meth:`AbstractDeviceAdapter.probe`. """ client_id = context.user_data await self.probe(client_id)
Handle a probe message. See :meth:`AbstractDeviceAdapter.probe`.
def issubset(self, other): """Test whether the resources available in this machine description are a (non-strict) subset of those available in another machine. .. note:: This test being False does not imply that the this machine is a superset of the other machine; machines may have disjoint resources. """ return (set(self).issubset(set(other)) and set(self.iter_links()).issubset(set(other.iter_links())) and all(set(self[chip]).issubset(other[chip]) and all(self[chip][r] <= other[chip][r] for r in self[chip]) for chip in self))
Test whether the resources available in this machine description are a (non-strict) subset of those available in another machine. .. note:: This test being False does not imply that the this machine is a superset of the other machine; machines may have disjoint resources.
def key_exists(self, namespace, key): """Checks a namespace for the existence of a specific key Args: namespace (str): Namespace to check in key (str): Name of the key to check for Returns: `True` if key exists in the namespace, else `False` """ return namespace in self.__data and key in self.__data[namespace]
Checks a namespace for the existence of a specific key Args: namespace (str): Namespace to check in key (str): Name of the key to check for Returns: `True` if key exists in the namespace, else `False`
def execute(self, shell = True): """ Executes the command setted into class Args: shell (boolean): Set True if command is a shell command. Default: True """ process = Popen(self.command, stdout=PIPE, stderr=PIPE, shell=shell) self.output, self.errors = process.communicate()
Executes the command setted into class Args: shell (boolean): Set True if command is a shell command. Default: True
def remove(item): """ Delete item, whether it's a file, a folder, or a folder full of other files and folders. """ if os.path.isdir(item): shutil.rmtree(item) else: # Assume it's a file. error if not. os.remove(item)
Delete item, whether it's a file, a folder, or a folder full of other files and folders.
def scale_degree_to_bitmap(scale_degree, modulo=False, length=BITMAP_LENGTH): """Create a bitmap representation of a scale degree. Note that values in the bitmap may be negative, indicating that the semitone is to be removed. Parameters ---------- scale_degree : str Spelling of a relative scale degree, e.g. 'b3', '7', '#5' modulo : bool, default=True If a scale degree exceeds the length of the bit-vector, modulo the scale degree back into the bit-vector; otherwise it is discarded. length : int, default=12 Length of the bit-vector to produce Returns ------- bitmap : np.ndarray, in [-1, 0, 1], len=`length` Bitmap representation of this scale degree. """ sign = 1 if scale_degree.startswith("*"): sign = -1 scale_degree = scale_degree.strip("*") edit_map = [0] * length sd_idx = scale_degree_to_semitone(scale_degree) if sd_idx < length or modulo: edit_map[sd_idx % length] = sign return np.array(edit_map)
Create a bitmap representation of a scale degree. Note that values in the bitmap may be negative, indicating that the semitone is to be removed. Parameters ---------- scale_degree : str Spelling of a relative scale degree, e.g. 'b3', '7', '#5' modulo : bool, default=True If a scale degree exceeds the length of the bit-vector, modulo the scale degree back into the bit-vector; otherwise it is discarded. length : int, default=12 Length of the bit-vector to produce Returns ------- bitmap : np.ndarray, in [-1, 0, 1], len=`length` Bitmap representation of this scale degree.
def triplify(self, data, parent=None): """ Recursively generate statements from the data supplied. """ if data is None: return if self.is_object: for res in self._triplify_object(data, parent): yield res elif self.is_array: for item in data: for res in self.items.triplify(item, parent): yield res else: # TODO: figure out if I ever want to check for reverse here. type_name = typecast.name(data) obj = typecast.stringify(type_name, data) if obj is not None: obj = obj.strip() yield (parent, self.predicate, obj, type_name)
Recursively generate statements from the data supplied.
def randrange(seq): """ Yields random values from @seq until @seq is empty """ seq = seq.copy() choose = rng().choice remove = seq.remove for x in range(len(seq)): y = choose(seq) remove(y) yield y
Yields random values from @seq until @seq is empty
def next_partname(self, template): """Return a |PackURI| instance representing partname matching *template*. The returned part-name has the next available numeric suffix to distinguish it from other parts of its type. *template* is a printf (%)-style template string containing a single replacement item, a '%d' to be used to insert the integer portion of the partname. Example: "/word/header%d.xml" """ partnames = {part.partname for part in self.iter_parts()} for n in range(1, len(partnames) + 2): candidate_partname = template % n if candidate_partname not in partnames: return PackURI(candidate_partname)
Return a |PackURI| instance representing partname matching *template*. The returned part-name has the next available numeric suffix to distinguish it from other parts of its type. *template* is a printf (%)-style template string containing a single replacement item, a '%d' to be used to insert the integer portion of the partname. Example: "/word/header%d.xml"
def main(): parser = argparse.ArgumentParser(description="An interface to CarbonBlack environments") #profiles = auth.CredentialStore("response").get_profiles() parser.add_argument('-e', '--environment', choices=auth.CredentialStore("response").get_profiles(), help='specify a specific instance you want to work with. If not defined \'-t production\' will be used implicitly.') parser.add_argument('-t', '--envtypes', type=str, help='specify any combination of envtypes. Default=All \'production\' envtypes. Ignored if -e is set.', default='production') #parser.add_argument('--debug', action='store_true', help='print debugging info') #parser.add_argument('--warnings', action='store_true', # help="Warn before printing large executions") subparsers = parser.add_subparsers(dest='command') #title='subcommands', help='additional help') cbinterface_commands = [ 'query', 'proc', 'collect', 'remediate', 'enumerate_usb', 'vxdetect'] parser_vx = subparsers.add_parser('vxdetect', help="search cbsandbox for processes in vxstream report, show detections") parser_vx.add_argument('vxstream_report', help='path to vxstream report') parser_vx.add_argument('-p', '--print-process-tree', action='store_true', help='print the process tree') parser_usb = subparsers.add_parser('enumerate_usb', help="Show recent removable drive activity on the sensor") parser_usb.add_argument('sensor', help='hostname of the sensor') parser_usb.add_argument('-s', '--start-time', action='store', help='how far back to query (default:ALL time)') parser_proc = subparsers.add_parser('proc', help="analyze a process GUID. 'proc -h' for more") parser_proc.add_argument('process', help="the process GUID to analyze") parser_proc.add_argument('--warnings', action='store_true', help="Warn before printing large executions") parser_proc.add_argument('-w', '--walk-tree', action='store_true', help="walk and analyze the process tree") parser_proc.add_argument('-wp', '--walk-parents', action='store_true', help="print details on the process ancestry") #parser_proc.add_argument('-d', '--detection', action='store_true', # help="show detections that would result in ACE alerts") parser_proc.add_argument('-i', '--proc-info', action='store_true', help="show binary and process information") parser_proc.add_argument('-c','--show-children', action='store_true', help="only print process children event details") parser_proc.add_argument('-nc', '--netconns', action='store_true', help="print network connections") parser_proc.add_argument('-fm', '--filemods', action='store_true', help="print file modifications") parser_proc.add_argument('-rm', '--regmods', action='store_true', help="print registry modifications") parser_proc.add_argument('-um', '--unsigned-modloads', action='store_true', help="print unsigned modloads") parser_proc.add_argument('-ml', '--modloads', action='store_true', help="print modloads") parser_proc.add_argument('-cp', '--crossprocs', action='store_true', help="print crossprocs") #parser_proc.add_argument('-intel', '--intel-hits', action='store_true', # help="show intel (feed/WL) hits that do not result in ACE alerts") parser_proc.add_argument('--no-analysis', action='store_true', help="Don't fetch and print process activity") parser_proc.add_argument('--json', action='store_true', help='output process summary in json') parser_proc.add_argument('--segment-limit', action='store', type=int, default=None, help='stop processing events into json after this many process segments') facet_args = [ 'process_name', 'childproc_name', 'username', 'parent_name', 'path', 'hostname', 'parent_pid', 'comms_ip', 'process_md5', 'start', 'group', 'interface_ip', 'modload_count', 'childproc_count', 'cmdline', 'regmod_count', 'process_pid', 'parent_id', 'os_type', 'rocessblock_count', 'crossproc_count', 'netconn_count', 'parent_md5', 'host_type', 'last_update', 'filemod_count' ] parser_query = subparsers.add_parser('query', help="execute a process search query. 'query -h' for more") parser_query.add_argument('query', help="the process search query you'd like to execute") parser_query.add_argument('-s', '--start-time', action='store', help="Only return processes with events after given date/time stamp\ (server’s clock). Format:'Y-m-d H:M:S' eastern time") parser_query.add_argument('-e', '--end-time', action='store', help="Set the maximum last update time. Format:'Y-m-d H:M:S' eastern time") parser_query.add_argument('--facet', action='store', choices=facet_args, help='stats info on single field accross query results (ex. process_name)') parser_query.add_argument('--no-warnings', action='store_true', help="Don't warn before printing large query results") parser_query.add_argument('-lh', '--logon-history', action='store_true', help="Display available logon history for given username or hostname") parser_collect = subparsers.add_parser('collect', help='perform LR collection tasks on a host') parser_collect.add_argument('sensor', help="the hostname/sensor to collect from") parser_collect.add_argument('-f', '--filepath', action='store', help='collect file') parser_collect.add_argument('-c', '--command-exec', action='store', help='command to execute') parser_collect.add_argument('-p', '--process-list', action='store_true', help='show processes running on sensor') parser_collect.add_argument('-m', '--memdump', action='store', const='ALLMEM', nargs='?', help='dump memory on a specific process-id') parser_collect.add_argument('-lr', '--regkeypath', action='store', help='List all registry values from the specified registry key.') parser_collect.add_argument('-r', '--regkeyvalue', action='store', help='Returns the associated value of the specified registry key.') parser_collect.add_argument('-i', '--info', action='store_true', help='print sensor information') parser_collect.add_argument('-gst', '--get-task', action='store_true', help='get scheduled tasks or specifc task') parser_collect.add_argument('-mc', '--multi-collect', action='store', help='path to ini file listing files and regs to collect') remediate_file_example = """Example remediate ini file: [files] file1=C:\\Users\\user\\Desktop\\testfile.txt [process_names] proc1=cmd.exe proc2=notepad++.exe [directories] directory1=C:\\Users\\user\\Desktop\\nanocore [scheduled_tasks] task1=\\monkey_task task1=\\Microsoft\\windows\\some\\flake\\task [pids] pid1=10856 [registry_paths] reg1=HKLM\\Software\\Microsoft\\Windows\\CurrentVersion\\Run\\calc reg2=HKLM\\Software\\Microsoft\\Windows\\CurrentVersion\\Run\\hippo""" parser_remediate = subparsers.add_parser('remediate', help='remediate a host') parser_remediate.add_argument('sensor', help="the hostname/sensor needing remediation") parser_remediate.add_argument('-i', '--isolate', help='toggle host isolation', default=False, action='store_true') parser_remediate.add_argument('-f', '--remediation-filepath', help="path to the remediation ini file; 'help' as the filepath for example") parser_remediate.add_argument('-dst', '--delete-scheduled-task', help="path of scheduled task to delete") parser_remediate.add_argument('-kpname', '--kill-process-name', help="kill all processes with this name") parser_remediate.add_argument('-kpid', '--kill-pid', help="a process id to kill") parser_remediate.add_argument('-df', '--delete-file', help="path to file needing deletion") parser_remediate.add_argument('-dr', '--delete-regkey', help="path to regkey value needing deletion") parser_remediate.add_argument('-dd', '--delete-directory', help="path to directory needing deletion") args = parser.parse_args() if args.command == 'remediate' and args.remediation_filepath == 'help': print(remediate_file_example) parser.parse_args(['remediate', '-h']) if args.command is None: print("\n\n*****") print("You must specify one of the following commands:\n") print(cbinterface_commands) print("\n*****\n\n") parser.parse_args(['-h']) #args.debug = True #if args.debug: # configure some more logging root = logging.getLogger() root.addHandler(logging.StreamHandler()) logging.getLogger("cbapi").setLevel(logging.ERROR) logging.getLogger("lerc_api").setLevel(logging.WARNING) ''' All VxStream related stuff may be removed in a future version ''' if args.command == 'vxdetect': cb = CbResponseAPI(profile='vxstream') process_list = parse_vxstream_report(cb, args.vxstream_report) if args.print_process_tree: print() print(process_list) print() return 0 # Set up environment profiles profile = None profiles = [] if args.environment: print("Using {} environment ..".format(args.environment)) profiles.append(args.environment) else: # a little hack for getting our environment type variable defined default_profile = auth.default_profile default_profile['envtype'] = 'production' query_envtype = set(args.envtypes.lower().split(',')) for profile in auth.CredentialStore("response").get_profiles(): credentials = auth.CredentialStore("response").get_credentials(profile=profile) profile_envtype = set(credentials['envtype'].lower().split(',')) if(query_envtype.issubset(profile_envtype)): profiles.append(profile) # Process Quering # if args.command == 'query': for profile in profiles: handle_proxy(profile) print("\nSearching {} environment..".format(profile)) q = CBquery(profile=profile) q.process_query(args) return 0 # Select correct environment by sensor hostname and get the sensor object sensor = None if args.command == 'collect' or args.command == 'remediate' or args.command == 'enumerate_usb': cb_results = sensor_search(profiles, args.sensor) if not isinstance(cb_results, list): # an error occured return cb_results else: if not cb_results: LOGGER.info("A sensor with hostname {} wasn't found in any environments".format(args.sensor)) return 0 elif len(cb_results) > 1: LOGGER.error("A sensor by hostname {} was found in multiple environments".format(args.sensor)) for r in cb_results: print("Results:") print("Profile {}: {} (SID:{})".format(r[1],r[0].hostname,r[0].id)) return 1 results = cb_results[0] profile = results[1] sensor = results[0] # Show USB Regmod activity if args.command == 'enumerate_usb': enumerate_usb(sensor, args.start_time) # lerc install arguments can differ by company/environment # same lazy hack to define in cb config config = {} try: default_profile = auth.default_profile default_profile['lerc_install_cmd'] = None config = auth.CredentialStore("response").get_credentials(profile=profile) except: pass # Collection # if args.command == 'collect': hyper_lr = hyperLiveResponse(sensor) if args.info: print(hyper_lr) return True # start a cb lr session lr_session = hyper_lr.go_live() if args.multi_collect: filepaths = regpaths = full_collect = None config = ConfigParser() config.read(args.multi_collect) try: filepaths = config.items("files") except: filepaths = [] try: regpaths = config.items("registry_paths") except: regpaths = [] try: full_collect = config.get('full_collect', 'action') except: pass if regpaths is not None: for regpath in regpaths: if isinstance(regpath, tuple): regpath = regpath[1] print("~ Trying to get {}".format(regpath)) try: result = lr_session.get_registry_value(regpath) if result: localfname = args.sensor + '_regkey_' + result['value_name'] + ".txt" with open(localfname,'wb') as f: f.write(bytes(result['value_data'], 'UTF-8')) print("\t+ Data written to: {}".format(localfname)) except Exception as e: print("[!] Error: {}".format(str(e))) if filepaths is not None: for filepath in filepaths: try: hyper_lr.getFile_with_timeout(filepath[1]) except Exception as e: print("[!] Error: {}".format(str(e))) if full_collect == 'True': return False #LR_collection(hyper_lr, args) return True elif args.filepath: hyper_lr.getFile_with_timeout(args.filepath) elif args.process_list: hyper_lr.print_processes() elif args.memdump: # get config items config = ConfigParser() config.read(CONFIG_PATH) #if config.has_section('memory'): # if cb_compress = config['memory'].getboolean('cb_default_compress') custom_compress = config['memory'].getboolean('custom_compress') custom_compress_file = config['memory']['custom_compress_file'] auto_collect_mem = config['memory'].getboolean('auto_collect_mem_file') lerc_collect_mem = config['memory'].getboolean('lerc_collect_mem') path_to_procdump = config['memory']['path_to_procdump'] if args.memdump == "ALLMEM": return hyper_lr.dump_sensor_memory(cb_compress=cb_compress, custom_compress=custom_compress, custom_compress_file=custom_compress_file, auto_collect_result=auto_collect_mem) else: return hyper_lr.dump_process_memory(args.memdump, path_to_procdump=path_to_procdump) elif args.command_exec: print("executing '{}' on {}".format(args.command_exec, args.sensor)) result = lr_session.create_process(args.command_exec, wait_timeout=60, wait_for_output=True) print("\n-------------------------") result = result.decode('utf-8') print(result + "\n-------------------------") print() elif args.regkeypath: print("\n\t{}".format(args.regkeypath)) results = lr_session.list_registry_keys(args.regkeypath) for result in results: print("\t-------------------------") print("\tName: {}".format(result['value_name'])) print("\tType: {}".format(result['value_type'])) print("\tData: {}".format(result['value_data'])) print() elif args.regkeyvalue: print("\n\t{}".format(args.regkeyvalue)) result = lr_session.get_registry_value(args.regkeyvalue) print("\t-------------------------") print("\tName: {}".format(result['value_name'])) print("\tType: {}".format(result['value_type'])) print("\tData: {}".format(result['value_data'])) print() elif args.get_task: return hyper_lr.get_scheduled_tasks() else: # perform full live response collection if config['lerc_install_cmd']: result = hyper_lr.get_lerc_status() if not result or result == 'UNINSTALLED' or result == 'UNKNOWN': if not hyper_lr.deploy_lerc(config['lerc_install_cmd']): LOGGER.warn("LERC deployment failed") else: LOGGER.info("{} environment is not configrued for LERC deployment".format(profile)) return LR_collection(hyper_lr, args) # Remediation # if args.command == 'remediate': return Remediation(sensor, args) # Process Investigation # process_tree = None if args.command == 'proc': proc = proc_search_environments(profiles, args.process) if not proc: return 1 sp = SuperProcess(proc) if args.proc_info: print(sp) elif args.walk_tree: sp.walk_process_tree() print() print(sp.process_tree) for process in sp.process_tree: if process.is_suppressed: print("+ [DATA SUPPRESSED] {} (PID:{}) - {}".format(process.name, process.pid, process.id)) continue print("+ {} (PID:{}) - {}".format(process.name, process.pid, process.id)) if args.filemods: process.print_filemods() args.no_analysis = True if args.netconns: process.print_netconns() args.no_analysis = True if args.regmods: process.print_regmods() args.no_analysis = True if args.unsigned_modloads: process.print_unsigned_modloads() args.no_analysis = True if args.modloads: process.print_modloads() args.no_analysis = True if args.crossprocs: process.print_crossprocs() args.no_analysis = True if args.walk_parents: sp.show_ancestry() args.no_analysis = True if args.no_analysis != True: if args.json: if args.segment_limit: print(process.events_to_json(segment_limit=args.segment_limit)) else: print(process.events_to_json()) else: process.default_print() else: print() print(sp.process_tree) if args.walk_parents: sp.show_ancestry() args.no_analysis = True if args.filemods: sp.print_filemods() args.no_analysis = True if args.netconns: sp.print_netconns() args.no_analysis = True if args.regmods: sp.print_regmods() args.no_analysis = True if args.unsigned_modloads: sp.print_unsigned_modloads() args.no_analysis = True if args.modloads: sp.print_modloads() args.no_analysis = True if args.crossprocs: sp.print_crossprocs() args.no_analysis = True if args.show_children: sp.print_child_events() args.no_analysis = True if args.no_analysis != True: if args.json: if args.segment_limit: print(sp.events_to_json(segment_limit=args.segment_limit)) else: print(sp.events_to_json()) else: sp.default_print() print() return True
All VxStream related stuff may be removed in a future version
def edit_config_input_target_config_target_running_running(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") edit_config = ET.Element("edit_config") config = edit_config input = ET.SubElement(edit_config, "input") target = ET.SubElement(input, "target") config_target = ET.SubElement(target, "config-target") running = ET.SubElement(config_target, "running") running = ET.SubElement(running, "running") callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def checkCache(fnm, strip=0, upx=0): """ Cache prevents preprocessing binary files again and again. """ # On darwin a cache is required anyway to keep the libaries # with relative install names. Caching on darwin does not work # since we need to modify binary headers to use relative paths # to dll depencies and starting with '@executable_path'. if ((not strip and not upx and not is_darwin and not is_win) or fnm.lower().endswith(".manifest")): return fnm if strip: strip = 1 else: strip = 0 if upx: upx = 1 else: upx = 0 # Load cache index cachedir = os.path.join(CONFIGDIR, 'bincache%d%d' % (strip, upx)) if not os.path.exists(cachedir): os.makedirs(cachedir) cacheindexfn = os.path.join(cachedir, "index.dat") if os.path.exists(cacheindexfn): cache_index = _load_data(cacheindexfn) else: cache_index = {} # Verify if the file we're looking for is present in the cache. basenm = os.path.normcase(os.path.basename(fnm)) digest = cacheDigest(fnm) cachedfile = os.path.join(cachedir, basenm) cmd = None if basenm in cache_index: if digest != cache_index[basenm]: os.remove(cachedfile) else: # On Mac OS X we need relative paths to dll dependencies # starting with @executable_path if is_darwin: dylib.mac_set_relative_dylib_deps(cachedfile) return cachedfile if upx: if strip: fnm = checkCache(fnm, 1, 0) bestopt = "--best" # FIXME: Linux builds of UPX do not seem to contain LZMA (they assert out) # A better configure-time check is due. if config["hasUPX"] >= (3,) and os.name == "nt": bestopt = "--lzma" upx_executable = "upx" if config.get('upx_dir'): upx_executable = os.path.join(config['upx_dir'], upx_executable) cmd = [upx_executable, bestopt, "-q", cachedfile] else: if strip: # -S = strip only debug symbols. # The default strip behaviour breaks some shared libraries # under Mac OSX cmd = ["strip", "-S", cachedfile] shutil.copy2(fnm, cachedfile) os.chmod(cachedfile, 0755) if pyasm and fnm.lower().endswith(".pyd"): # If python.exe has dependent assemblies, check for embedded manifest # of cached pyd file because we may need to 'fix it' for pyinstaller try: res = winmanifest.GetManifestResources(os.path.abspath(cachedfile)) except winresource.pywintypes.error, e: if e.args[0] == winresource.ERROR_BAD_EXE_FORMAT: # Not a win32 PE file pass else: logger.error(os.path.abspath(cachedfile)) raise else: if winmanifest.RT_MANIFEST in res and len(res[winmanifest.RT_MANIFEST]): for name in res[winmanifest.RT_MANIFEST]: for language in res[winmanifest.RT_MANIFEST][name]: try: manifest = winmanifest.Manifest() manifest.filename = ":".join([cachedfile, str(winmanifest.RT_MANIFEST), str(name), str(language)]) manifest.parse_string(res[winmanifest.RT_MANIFEST][name][language], False) except Exception, exc: logger.error("Cannot parse manifest resource %s, " "%s from", name, language) logger.error(cachedfile) logger.exception(exc) else: # Fix the embedded manifest (if any): # Extension modules built with Python 2.6.5 have # an empty <dependency> element, we need to add # dependentAssemblies from python.exe for # pyinstaller olen = len(manifest.dependentAssemblies) _depNames = set([dep.name for dep in manifest.dependentAssemblies]) for pydep in pyasm: if not pydep.name in _depNames: logger.info("Adding %r to dependent " "assemblies of %r", pydep.name, cachedfile) manifest.dependentAssemblies.append(pydep) _depNames.update(pydep.name) if len(manifest.dependentAssemblies) > olen: try: manifest.update_resources(os.path.abspath(cachedfile), [name], [language]) except Exception, e: logger.error(os.path.abspath(cachedfile)) raise if cmd: try: compat.exec_command(*cmd) except OSError, e: raise SystemExit("Execution failed: %s" % e) # update cache index cache_index[basenm] = digest _save_data(cacheindexfn, cache_index) # On Mac OS X we need relative paths to dll dependencies # starting with @executable_path if is_darwin: dylib.mac_set_relative_dylib_deps(cachedfile) return cachedfile
Cache prevents preprocessing binary files again and again.
def reset_defaults(self): """Reset login and password in QgsSettings.""" self.save_login.setChecked(False) self.save_password.setChecked(False) self.save_url.setChecked(False) set_setting(GEONODE_USER, '') set_setting(GEONODE_PASSWORD, '') set_setting(GEONODE_URL, '') self.login.setText('') self.password.setText('') self.url.setText('')
Reset login and password in QgsSettings.
def run_process(command, environ): """ Run the specified process and wait until it finishes. Use environ dict for environment variables. """ log.info('running %r with %r', command, environ) env = dict(os.environ) env.update(environ) try: p = subprocess.Popen(args=command, env=env) except OSError as e: raise OSError('cannot run %r: %s' % (command, e)) log.debug('subprocess %d is running', p.pid) ret = p.wait() log.debug('subprocess %d exited: %d', p.pid, ret) return ret
Run the specified process and wait until it finishes. Use environ dict for environment variables.
def make_unique_script_attr(attributes): """ Filter out duplicate `Script` TransactionAttributeUsage types. Args: attributes: a list of TransactionAttribute's Returns: list: """ filtered_attr = [] script_list = [] for attr in attributes: if attr.Usage != TransactionAttributeUsage.Script: filtered_attr.append(attr) else: data = attr.Data if isinstance(data, UInt160): # convert it to equal type data = attr.Data.ToArray() # only add if it's not already in the list if data not in script_list: script_list.append(data) filtered_attr.append(attr) return filtered_attr
Filter out duplicate `Script` TransactionAttributeUsage types. Args: attributes: a list of TransactionAttribute's Returns: list:
def _validate_indexers( self, indexers: Mapping, ) -> List[Tuple[Any, Union[slice, Variable]]]: """ Here we make sure + indexer has a valid keys + indexer is in a valid data type + string indexers are cast to the appropriate date type if the associated index is a DatetimeIndex or CFTimeIndex """ from .dataarray import DataArray invalid = [k for k in indexers if k not in self.dims] if invalid: raise ValueError("dimensions %r do not exist" % invalid) # all indexers should be int, slice, np.ndarrays, or Variable indexers_list = [] # type: List[Tuple[Any, Union[slice, Variable]]] for k, v in indexers.items(): if isinstance(v, slice): indexers_list.append((k, v)) continue if isinstance(v, Variable): pass elif isinstance(v, DataArray): v = v.variable elif isinstance(v, tuple): v = as_variable(v) elif isinstance(v, Dataset): raise TypeError('cannot use a Dataset as an indexer') elif isinstance(v, Sequence) and len(v) == 0: v = IndexVariable((k, ), np.zeros((0,), dtype='int64')) else: v = np.asarray(v) if v.dtype.kind == 'U' or v.dtype.kind == 'S': index = self.indexes[k] if isinstance(index, pd.DatetimeIndex): v = v.astype('datetime64[ns]') elif isinstance(index, xr.CFTimeIndex): v = _parse_array_of_cftime_strings(v, index.date_type) if v.ndim == 0: v = Variable((), v) elif v.ndim == 1: v = IndexVariable((k,), v) else: raise IndexError( "Unlabeled multi-dimensional array cannot be " "used for indexing: {}".format(k)) if v.ndim == 1: v = v.to_index_variable() indexers_list.append((k, v)) return indexers_list
Here we make sure + indexer has a valid keys + indexer is in a valid data type + string indexers are cast to the appropriate date type if the associated index is a DatetimeIndex or CFTimeIndex
def rateServiceTypeInResult(discoveryResponse): """Gives a quality rating for a given service type in a result, higher is better. Several UpnP devices reply to a discovery request with multiple responses with different service type announcements. To find the most specific one we need to be able rate the service types against each other. Usually this is an internal method and just exported for convenience reasons. :param DiscoveryResponse discoveryResponse: the response to rate :return: a rating of the quality of the given service type :rtype: int """ if discoveryResponse is None: return 0 serviceType = discoveryResponse.service if serviceType.startswith("urn:dslforum-org:device"): return 11 if serviceType.startswith("urn:dslforum-org:service"): return 10 if serviceType.startswith("urn:dslforum-org:"): return 9 if serviceType.startswith("urn:schemas-upnp-org:device"): return 8 if serviceType.startswith("urn:schemas-upnp-org:service"): return 7 if serviceType.startswith("urn:schemas-upnp-org:"): return 6 if serviceType.startswith("urn:schemas-"): # other schemas, schema-any-com for example return 5 if serviceType.startswith("urn:"): return 4 if serviceType.startswith("upnp:rootdevice"): return 3 if serviceType.startswith("uuid:"): # no service, just the uuid given return 2 return 1
Gives a quality rating for a given service type in a result, higher is better. Several UpnP devices reply to a discovery request with multiple responses with different service type announcements. To find the most specific one we need to be able rate the service types against each other. Usually this is an internal method and just exported for convenience reasons. :param DiscoveryResponse discoveryResponse: the response to rate :return: a rating of the quality of the given service type :rtype: int
async def create_link_secret(self, label: str) -> None: """ Create link secret (a.k.a. master secret) used in proofs by HolderProver, if the current link secret does not already correspond to the input link secret label. Raise WalletState if wallet is closed, or any other IndyError causing failure to set link secret in wallet. :param label: label for link secret; indy-sdk uses label to generate link secret """ LOGGER.debug('Wallet.create_link_secret >>> label: %s', label) if not self.handle: LOGGER.debug('Wallet.create_link_secret <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) try: await anoncreds.prover_create_master_secret(self.handle, label) await self._write_link_secret_label(label) except IndyError as x_indy: if x_indy.error_code == ErrorCode.AnoncredsMasterSecretDuplicateNameError: LOGGER.warning( 'Wallet %s link secret already current: abstaining from updating label record', self.name) await self._write_link_secret_label(label) else: LOGGER.debug( 'Wallet.create_link_secret <!< cannot create link secret for wallet %s, indy error code %s', self.name, x_indy.error_code) raise LOGGER.debug('Wallet.create_link_secret <<<')
Create link secret (a.k.a. master secret) used in proofs by HolderProver, if the current link secret does not already correspond to the input link secret label. Raise WalletState if wallet is closed, or any other IndyError causing failure to set link secret in wallet. :param label: label for link secret; indy-sdk uses label to generate link secret
def export_disks( self, standalone=True, dst_dir=None, compress=False, collect_only=False, with_threads=True, *args, **kwargs ): """ Thin method that just uses the provider """ return self.provider.export_disks( standalone, dst_dir, compress, collect_only, with_threads, *args, **kwargs )
Thin method that just uses the provider
def user_invite(self, username, email, roles): """ Invite a user to the tenant. """ uri = 'openstack/users' data = { "username": username, "email": email, "roles": list(set(roles)) } post_body = json.dumps(data) resp, body = self.post(uri, body=post_body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body)
Invite a user to the tenant.
def remove_tar_files(file_list): """Public function that removes temporary tar archive files in a local directory""" for f in file_list: if file_exists(f) and f.endswith('.tar'): os.remove(f)
Public function that removes temporary tar archive files in a local directory
def get_GET_array(request, var_name, fail_silently=True): """ Returns the GET array's contents for the specified variable. """ vals = request.GET.getlist(var_name) if not vals: if fail_silently: return [] else: raise Exception, _("No array called '%(varname)s' in GET variables") % {'varname': var_name} return vals
Returns the GET array's contents for the specified variable.
def get_stock_codes(self, cached=True, as_json=False): """ returns a dictionary with key as stock code and value as stock name. It also implements cache functionality and hits the server only if user insists or cache is empty :return: dict """ url = self.stocks_csv_url req = Request(url, None, self.headers) res_dict = {} if cached is not True or self.__CODECACHE__ is None: # raises HTTPError and URLError res = self.opener.open(req) if res is not None: # for py3 compat covert byte file like object to # string file like object res = byte_adaptor(res) for line in res.read().split('\n'): if line != '' and re.search(',', line): (code, name) = line.split(',')[0:2] res_dict[code] = name # else just skip the evaluation, line may not be a valid csv else: raise Exception('no response received') self.__CODECACHE__ = res_dict return self.render_response(self.__CODECACHE__, as_json)
returns a dictionary with key as stock code and value as stock name. It also implements cache functionality and hits the server only if user insists or cache is empty :return: dict
def add_attachment(self, attachment): """Adds an attachment to the SlackMessage payload This public method adds a slack message to the attachment list. :param attachment: SlackAttachment object :return: None """ log = logging.getLogger(self.cls_logger + '.add_attachment') if not isinstance(attachment, SlackAttachment): msg = 'attachment must be of type: SlackAttachment' log.error(msg) raise ValueError(msg) self.attachments.append(attachment.attachment) log.debug('Added attachment: {a}'.format(a=attachment))
Adds an attachment to the SlackMessage payload This public method adds a slack message to the attachment list. :param attachment: SlackAttachment object :return: None
def create_channel(cls, address="spanner.googleapis.com:443", credentials=None): """Create and return a gRPC channel object. Args: address (str): The host for the channel to use. credentials (~.Credentials): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. Returns: grpc.Channel: A gRPC channel object. """ grpc_gcp_config = grpc_gcp.api_config_from_text_pb( pkg_resources.resource_string(__name__, _SPANNER_GRPC_CONFIG) ) options = [(grpc_gcp.API_CONFIG_CHANNEL_ARG, grpc_gcp_config)] return google.api_core.grpc_helpers.create_channel( address, credentials=credentials, scopes=cls._OAUTH_SCOPES )
Create and return a gRPC channel object. Args: address (str): The host for the channel to use. credentials (~.Credentials): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. Returns: grpc.Channel: A gRPC channel object.
def replace(self, record_id, fields, typecast=False): """ Replaces a record by its record id. All Fields are updated to match the new ``fields`` provided. If a field is not included in ``fields``, value will bet set to null. To update only selected fields, use :any:`update`. >>> record = airtable.match('Seat Number', '22A') >>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'} >>> airtable.replace(record['id'], fields) Args: record_id(``str``): Id of Record to update fields(``dict``): Fields to replace with. Must be dictionary with Column names as Key. typecast(``boolean``): Automatic data conversion from string values. Returns: record (``dict``): New record """ record_url = self.record_url(record_id) return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
Replaces a record by its record id. All Fields are updated to match the new ``fields`` provided. If a field is not included in ``fields``, value will bet set to null. To update only selected fields, use :any:`update`. >>> record = airtable.match('Seat Number', '22A') >>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'} >>> airtable.replace(record['id'], fields) Args: record_id(``str``): Id of Record to update fields(``dict``): Fields to replace with. Must be dictionary with Column names as Key. typecast(``boolean``): Automatic data conversion from string values. Returns: record (``dict``): New record
def _get_all_headers(self, method, endpoint, request_bytes, custom_headers): """ :type method: str :type endpoint: str :type request_bytes: bytes :type custom_headers: dict[str, str] :rtype: dict[str, str] """ headers = self._get_default_headers() headers.update(custom_headers) if self._api_context.token is not None: headers[self.HEADER_AUTHENTICATION] = self._api_context.token headers[self.HEADER_SIGNATURE] = security.sign_request( self._api_context.installation_context.private_key_client, method, endpoint, request_bytes, headers ) return headers
:type method: str :type endpoint: str :type request_bytes: bytes :type custom_headers: dict[str, str] :rtype: dict[str, str]
def add_user_role(self, user, role): """ Add role to given user. Args: user (string): User name. role (string): Role to assign. Raises: requests.HTTPError on failure. """ self.project_service.set_auth(self._token_project) self.project_service.add_user_role(user, role)
Add role to given user. Args: user (string): User name. role (string): Role to assign. Raises: requests.HTTPError on failure.
def get_namespace(self, key): """ Returns a :class:`~bang.util.SharedNamespace` for the given :attr:`key`. These are used by :class:`~bang.deployers.deployer.Deployer` objects of the same ``deployer_class`` to coordinate control over multiple deployed instances of like resources. E.g. With 5 clones of an application server, 5 :class:`~bang.deployers.deployer.Deployer` objects in separate, concurrent processes will use the same shared namespace to ensure that each object/process controls a distinct server. :param str key: Unique ID for the namespace. :class:`~bang.deployers.deployer.Deployer` objects that call :meth:`get_namespace` with the same :attr:`key` will receive the same :class:`~bang.util.SharedNamespace` object. """ namespace = self.shared_namespaces.get(key) if namespace: return namespace ns = SharedNamespace(self.manager) self.shared_namespaces[key] = ns return ns
Returns a :class:`~bang.util.SharedNamespace` for the given :attr:`key`. These are used by :class:`~bang.deployers.deployer.Deployer` objects of the same ``deployer_class`` to coordinate control over multiple deployed instances of like resources. E.g. With 5 clones of an application server, 5 :class:`~bang.deployers.deployer.Deployer` objects in separate, concurrent processes will use the same shared namespace to ensure that each object/process controls a distinct server. :param str key: Unique ID for the namespace. :class:`~bang.deployers.deployer.Deployer` objects that call :meth:`get_namespace` with the same :attr:`key` will receive the same :class:`~bang.util.SharedNamespace` object.
def _save_nb(nb_name): """ Attempts to save notebook. If unsuccessful, shows a warning. """ display(Javascript('IPython.notebook.save_checkpoint();')) display(Javascript('IPython.notebook.save_notebook();')) print('Saving notebook...', end=' ') if _wait_for_save(nb_name): print("Saved '{}'.".format(nb_name)) else: logging.warning( "Could not save your notebook (timed out waiting for " "IPython save). Make sure your notebook is saved " "and export again." )
Attempts to save notebook. If unsuccessful, shows a warning.
def transform(self, X, lenscale=None): r""" Apply the sigmoid basis function to X. Parameters ---------- X: ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. lenscale: float the length scale (scalar) of the RBFs to apply to X. If not input, this uses the value of the initial length scale. Returns ------- ndarray: of shape (N, D) where D is number of centres. """ N, d = X.shape lenscale = self._check_dim(d, lenscale) return expit(cdist(X / lenscale, self.C / lenscale, 'euclidean'))
r""" Apply the sigmoid basis function to X. Parameters ---------- X: ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. lenscale: float the length scale (scalar) of the RBFs to apply to X. If not input, this uses the value of the initial length scale. Returns ------- ndarray: of shape (N, D) where D is number of centres.
def _opt_to_args(cls, opt, val): """Convert a named option and optional value to command line argument notation, correctly handling options that take no value or that have special representations (e.g. verify and verbose). """ no_value = ( "alloptions", "all-logs", "batch", "build", "debug", "experimental", "list-plugins", "list-presets", "list-profiles", "noreport", "quiet", "verify" ) count = ("verbose",) if opt in no_value: return ["--%s" % opt] if opt in count: return ["--%s" % opt for d in range(0, int(val))] return ["--" + opt + "=" + val]
Convert a named option and optional value to command line argument notation, correctly handling options that take no value or that have special representations (e.g. verify and verbose).
def control_valve_noise_g_2011(m, P1, P2, T1, rho, gamma, MW, Kv, d, Di, t_pipe, Fd, FL, FLP=None, FP=None, rho_pipe=7800.0, c_pipe=5000.0, P_air=101325.0, rho_air=1.2, c_air=343.0, An=-3.8, Stp=0.2, T2=None, beta=0.93): r'''Calculates the sound made by a gas flowing through a control valve according to the standard IEC 60534-8-3 (2011) [1]_. Parameters ---------- m : float Mass flow rate of gas through the control valve, [kg/s] P1 : float Inlet pressure of the gas before valves and reducers [Pa] P2 : float Outlet pressure of the gas after valves and reducers [Pa] T1 : float Inlet gas temperature, [K] rho : float Density of the gas at the inlet [kg/m^3] gamma : float Specific heat capacity ratio [-] MW : float Molecular weight of the gas [g/mol] Kv : float Metric Kv valve flow coefficient (flow rate of water at a pressure drop of 1 bar) [m^3/hr] d : float Diameter of the valve [m] Di : float Internal diameter of the pipe before and after the valve [m] t_pipe : float Wall thickness of the pipe after the valve, [m] Fd : float Valve style modifier (0.1 to 1; varies tremendously depending on the type of valve and position; do not use the default at all!) [-] FL : float Liquid pressure recovery factor of a control valve without attached fittings (normally 0.8-0.9 at full open and decreasing as opened further to below 0.5; use default very cautiously!) [-] FLP : float, optional Combined liquid pressure recovery factor with piping geometry factor, for a control valve with attached fittings [-] FP : float, optional Piping geometry factor [-] rho_pipe : float, optional Density of the pipe wall material at flowing conditions, [kg/m^3] c_pipe : float, optional Speed of sound of the pipe wall material at flowing conditions, [m/s] P_air : float, optional Pressure of the air surrounding the valve and pipe wall, [Pa] rho_air : float, optional Density of the air surrounding the valve and pipe wall, [kg/m^3] c_air : float, optional Speed of sound of the air surrounding the valve and pipe wall, [m/s] An : float, optional Valve correction factor for acoustic efficiency Stp : float, optional Strouhal number at the peak `fp`; between 0.1 and 0.3 typically, [-] T2 : float, optional Outlet gas temperature; assumed `T1` if not provided (a PH flash should be used to obtain this if possible), [K] beta : float, optional Valve outlet / expander inlet contraction coefficient, [-] Returns ------- LpAe1m : float A weighted sound pressure level 1 m from the pipe wall, 1 m distance dowstream of the valve (at reference sound pressure level 2E-5), [dBA] Notes ----- For formulas see [1]_. This takes on the order of 100 us to compute. For values of `An`, see [1]_. This model was checked against six examples in [1]_; they match to all given decimals. Several additional formulas are given for multihole trim valves, control valves with two or more fixed area stages, and multipath, multistage trim valves. Examples -------- >>> control_valve_noise_g_2011(m=2.22, P1=1E6, P2=7.2E5, T1=450, rho=5.3, ... gamma=1.22, MW=19.8, Kv=77.85, d=0.1, Di=0.2031, FL=None, FLP=0.792, ... FP=0.98, Fd=0.296, t_pipe=0.008, rho_pipe=8000.0, c_pipe=5000.0, ... rho_air=1.293, c_air=343.0, An=-3.8, Stp=0.2) 91.67702674629604 References ---------- .. [1] IEC 60534-8-3 : Industrial-Process Control Valves - Part 8-3: Noise Considerations - Control Valve Aerodynamic Noise Prediction Method." ''' k = gamma # alias C = Kv_to_Cv(Kv) N14 = 4.6E-3 N16 = 4.89E4 fs = 1.0 # structural loss factor reference frequency, Hz P_air_std = 101325.0 if T2 is None: T2 = T1 x = (P1 - P2)/P1 # FLP/FP when fittings attached FL_term = FLP/FP if FP is not None else FL P_vc = P1*(1.0 - x/FL_term**2) x_vcc = 1.0 - (2.0/(k + 1.0))**(k/(k - 1.0)) # mostly matches xc = FL_term**2*x_vcc alpha = (1.0 - x_vcc)/(1.0 - xc) xB = 1.0 - 1.0/alpha*(1.0/k)**((k/(k - 1.0))) xCE = 1.0 - 1.0/(22.0*alpha) # Regime determination check - should be ordered or won't work assert xc < x_vcc assert x_vcc < xB assert xB < xCE regime = None if x <= xc: regime = 1 elif xc < x <= x_vcc: regime = 2 elif x_vcc < x <= xB: regime = 3 elif xB < x <= xCE: regime = 4 else: regime = 5 # print('regime', regime) Dj = N14*Fd*(C*(FL_term))**0.5 Mj5 = (2.0/(k - 1.0)*( 22.0**((k-1.0)/k) - 1.0 ))**0.5 if regime == 1: Mvc = ( (2.0/(k-1.0)) *((1.0 - x/FL_term**2)**((1.0 - k)/k) - 1.0) )**0.5 # Not match elif regime in (2, 3, 4): Mj = ( (2.0/(k-1.0))*((1.0/(alpha*(1.0-x)))**((k - 1.0)/k) - 1.0) )**0.5 # Not match Mj = min(Mj, Mj5) elif regime == 5: pass if regime == 1: Tvc = T1*(1.0 - x/(FL_term)**2)**((k - 1.0)/k) cvc = (k*P1/rho*(1 - x/(FL_term)**2)**((k-1.0)/k))**0.5 Wm = 0.5*m*(Mvc*cvc)**2 else: Tvcc = 2.0*T1/(k + 1.0) cvcc = (2.0*k*P1/(k+1.0)/rho)**0.5 Wm = 0.5*m*cvcc*cvcc # print('Wm', Wm) if regime == 1: fp = Stp*Mvc*cvc/Dj elif regime in (2, 3): fp = Stp*Mj*cvcc/Dj elif regime == 4: fp = 1.4*Stp*cvcc/Dj/(Mj*Mj - 1.0)**0.5 elif regime == 5: fp = 1.4*Stp*cvcc/Dj/(Mj5*Mj5 - 1.0)**0.5 # print('fp', fp) if regime == 1: eta = 10.0**An*FL_term**2*(Mvc)**3 elif regime == 2: eta = 10.0**An*x/x_vcc*Mj**(6.6*FL_term*FL_term) elif regime == 3: eta = 10.0**An*Mj**(6.6*FL_term*FL_term) elif regime == 4: eta = 0.5*10.0**An*Mj*Mj*(2.0**0.5)**(6.6*FL_term*FL_term) elif regime == 5: eta = 0.5*10.0**An*Mj5*Mj5*(2.0**0.5)**(6.6*FL_term*FL_term) # print('eta', eta) Wa = eta*Wm rho2 = rho*(P2/P1) # Speed of sound c2 = (k*R*T2/(MW/1000.))**0.5 Mo = 4.0*m/(pi*d*d*rho2*c2) M2 = 4.0*m/(pi*Di*Di*rho2*c2) # print('M2', M2) Lg = 16.0*log10(1.0/(1.0 - min(M2, 0.3))) # dB if M2 > 0.3: Up = 4.0*m/(pi*rho2*Di*Di) UR = Up*Di*Di/(beta*d*d) WmR = 0.5*m*UR*UR*( (1.0 - d*d/(Di*Di))**2 + 0.2) fpR = Stp*UR/d MR = UR/c2 # Value listed in appendix here is wrong, "based on another # earlier standard. Calculation thereon is wrong". Assumed # correct, matches spreadsheet to three decimals. eta_R = 10**An*MR**3 WaR = eta_R*WmR L_piR = 10.0*log10((3.2E9)*WaR*rho2*c2/(Di*Di)) + Lg # print('Up', Up) # print('UR', UR) # print('WmR', WmR) # print('fpR', fpR) # print('MR', MR) # print('eta_R', eta_R, eta_R/8.8E-4) # print('WaR', WaR) # print('L_piR', L_piR) L_pi = 10.0*log10((3.2E9)*Wa*rho2*c2/(Di*Di)) + Lg # print('L_pi', L_pi) fr = c_pipe/(pi*Di) fo = 0.25*fr*(c2/c_air) fg = 3**0.5*c_air**2/(pi*t_pipe*c_pipe) if d > 0.15: dTL = 0.0 elif 0.05 <= d <= 0.15: dTL = -16660.0*d**3 + 6370.0*d**2 - 813.0*d + 35.8 else: dTL = 9.0 # print(dTL, 'dTL') P_air_ratio = P_air/P_air_std LpAe1m_sum = 0.0 LPis = [] LPIRs = [] L_pe1m_fis = [] for fi, A_weight in zip(fis_l_2015, A_weights_l_2015): # This gets adjusted when Ma > 0.3 fi_turb_ratio = fi/fp t1 = 1.0 + (0.5*fi_turb_ratio)**2.5 t2 = 1.0 + (0.5/fi_turb_ratio)**1.7 # Formula forgot to use log10, but log10 is needed for the numbers Lpif = L_pi - 8.0 - 10.0*log10(t1*t2) # print(Lpif, 'Lpif') LPis.append(Lpif) if M2 > 0.3: fiR_turb_ratio = fi/fpR t1 = 1.0 + (0.5*fiR_turb_ratio)**2.5 t2 = 1.0 + (0.5/fiR_turb_ratio)**1.7 # Again, log10 is missing LpiRf = L_piR - 8.0 - 10.0*log10(t1*t2) LPIRs.append(LpiRf) LpiSf = 10.0*log10( 10**(0.1*Lpif) + 10.0**(0.1*LpiRf) ) if fi < fo: Gx = (fo/fr)**(2.0/3.0)*(fi/fo)**4.0 if fo < fg: Gy = (fo/fg) else: Gy = 1.0 else: if fi < fr: Gx = (fi/fr)**0.5 else: Gx = 1.0 if fi < fg: Gy = fi/fg else: Gy = 1.0 eta_s = (0.01/fi)**0.5 # print('eta_s', eta_s) # up to eta_s is good den = (rho2*c2 + 2.0*pi*t_pipe*fi*rho_pipe*eta_s)/(415.0*Gy) + 1.0 TL_fi = 10.0*log10(8.25E-7*(c2/(t_pipe*fi))**2*Gx/den*P_air_ratio) - dTL # Formula forgot to use log10, but log10 is needed for the numbers if M2 > 0.3: term = LpiSf else: term = Lpif L_pe1m_fi = term + TL_fi - 10.0*log10((Di + 2.0*t_pipe + 2.0)/(Di + 2.0*t_pipe)) L_pe1m_fis.append(L_pe1m_fi) # print(L_pe1m_fi) LpAe1m_sum += 10.0**(0.1*(L_pe1m_fi + A_weight)) LpAe1m = 10.0*log10(LpAe1m_sum) return LpAe1m
r'''Calculates the sound made by a gas flowing through a control valve according to the standard IEC 60534-8-3 (2011) [1]_. Parameters ---------- m : float Mass flow rate of gas through the control valve, [kg/s] P1 : float Inlet pressure of the gas before valves and reducers [Pa] P2 : float Outlet pressure of the gas after valves and reducers [Pa] T1 : float Inlet gas temperature, [K] rho : float Density of the gas at the inlet [kg/m^3] gamma : float Specific heat capacity ratio [-] MW : float Molecular weight of the gas [g/mol] Kv : float Metric Kv valve flow coefficient (flow rate of water at a pressure drop of 1 bar) [m^3/hr] d : float Diameter of the valve [m] Di : float Internal diameter of the pipe before and after the valve [m] t_pipe : float Wall thickness of the pipe after the valve, [m] Fd : float Valve style modifier (0.1 to 1; varies tremendously depending on the type of valve and position; do not use the default at all!) [-] FL : float Liquid pressure recovery factor of a control valve without attached fittings (normally 0.8-0.9 at full open and decreasing as opened further to below 0.5; use default very cautiously!) [-] FLP : float, optional Combined liquid pressure recovery factor with piping geometry factor, for a control valve with attached fittings [-] FP : float, optional Piping geometry factor [-] rho_pipe : float, optional Density of the pipe wall material at flowing conditions, [kg/m^3] c_pipe : float, optional Speed of sound of the pipe wall material at flowing conditions, [m/s] P_air : float, optional Pressure of the air surrounding the valve and pipe wall, [Pa] rho_air : float, optional Density of the air surrounding the valve and pipe wall, [kg/m^3] c_air : float, optional Speed of sound of the air surrounding the valve and pipe wall, [m/s] An : float, optional Valve correction factor for acoustic efficiency Stp : float, optional Strouhal number at the peak `fp`; between 0.1 and 0.3 typically, [-] T2 : float, optional Outlet gas temperature; assumed `T1` if not provided (a PH flash should be used to obtain this if possible), [K] beta : float, optional Valve outlet / expander inlet contraction coefficient, [-] Returns ------- LpAe1m : float A weighted sound pressure level 1 m from the pipe wall, 1 m distance dowstream of the valve (at reference sound pressure level 2E-5), [dBA] Notes ----- For formulas see [1]_. This takes on the order of 100 us to compute. For values of `An`, see [1]_. This model was checked against six examples in [1]_; they match to all given decimals. Several additional formulas are given for multihole trim valves, control valves with two or more fixed area stages, and multipath, multistage trim valves. Examples -------- >>> control_valve_noise_g_2011(m=2.22, P1=1E6, P2=7.2E5, T1=450, rho=5.3, ... gamma=1.22, MW=19.8, Kv=77.85, d=0.1, Di=0.2031, FL=None, FLP=0.792, ... FP=0.98, Fd=0.296, t_pipe=0.008, rho_pipe=8000.0, c_pipe=5000.0, ... rho_air=1.293, c_air=343.0, An=-3.8, Stp=0.2) 91.67702674629604 References ---------- .. [1] IEC 60534-8-3 : Industrial-Process Control Valves - Part 8-3: Noise Considerations - Control Valve Aerodynamic Noise Prediction Method."
def filter(self, func=None, **query): """ Tables can be filtered in one of two ways: - Simple keyword arguments return rows where values match *exactly* - Pass in a function and return rows where that function evaluates to True In either case, a new TableFu instance is returned """ if callable(func): result = filter(func, self) result.insert(0, self.default_columns) return TableFu(result, **self.options) else: result = self for column, value in query.items(): result = result.filter(lambda r: r[column] == value) return result
Tables can be filtered in one of two ways: - Simple keyword arguments return rows where values match *exactly* - Pass in a function and return rows where that function evaluates to True In either case, a new TableFu instance is returned
def getElements(self, zero_based=True, pared=False): """ Get the elements of the mesh as a list of point index list. :param zero_based: use zero based index of points if true otherwise use 1-based index of points. :param pared: use the pared down list of points :return: A list of point index lists """ points = self._points[:] elements = self._elements[:] offset = 0 if not zero_based: offset = 1 np = None if pared: np = NodePare() np.addPoints(points) np.parePoints() if pared or not zero_based: modified_elements = [] for element in elements: modified_element = [index + offset if np is None else np.getParedIndex(index) + offset for index in element] modified_elements.append(modified_element) elements = modified_elements return elements
Get the elements of the mesh as a list of point index list. :param zero_based: use zero based index of points if true otherwise use 1-based index of points. :param pared: use the pared down list of points :return: A list of point index lists
def get_bond(iface): ''' Return the content of a bond script CLI Example: .. code-block:: bash salt '*' ip.get_bond bond0 ''' path = os.path.join(_DEB_NETWORK_CONF_FILES, '{0}.conf'.format(iface)) return _read_file(path)
Return the content of a bond script CLI Example: .. code-block:: bash salt '*' ip.get_bond bond0
def update(self, date_expiry=values.unset, ttl=values.unset, mode=values.unset, status=values.unset, participants=values.unset): """ Update the SessionInstance :param datetime date_expiry: The ISO 8601 date when the Session should expire :param unicode ttl: When the session will expire :param SessionInstance.Mode mode: The Mode of the Session :param SessionInstance.Status status: The new status of the resource :param dict participants: The Participant objects to include in the session :returns: Updated SessionInstance :rtype: twilio.rest.proxy.v1.service.session.SessionInstance """ return self._proxy.update( date_expiry=date_expiry, ttl=ttl, mode=mode, status=status, participants=participants, )
Update the SessionInstance :param datetime date_expiry: The ISO 8601 date when the Session should expire :param unicode ttl: When the session will expire :param SessionInstance.Mode mode: The Mode of the Session :param SessionInstance.Status status: The new status of the resource :param dict participants: The Participant objects to include in the session :returns: Updated SessionInstance :rtype: twilio.rest.proxy.v1.service.session.SessionInstance
def dok15_s(k15): """ calculates least-squares matrix for 15 measurements from Jelinek [1976] """ # A, B = design(15) # get design matrix for 15 measurements sbar = np.dot(B, k15) # get mean s t = (sbar[0] + sbar[1] + sbar[2]) # trace bulk = old_div(t, 3.) # bulk susceptibility Kbar = np.dot(A, sbar) # get best fit values for K dels = k15 - Kbar # get deltas dels, sbar = old_div(dels, t), old_div(sbar, t) # normalize by trace So = sum(dels**2) sigma = np.sqrt(old_div(So, 9.)) # standard deviation return sbar, sigma, bulk
calculates least-squares matrix for 15 measurements from Jelinek [1976]
def register(self, type): """ Registers a custom formatting function with ub.repr2 """ def _decorator(func): if isinstance(type, tuple): for t in type: self.func_registry[t] = func else: self.func_registry[type] = func return func return _decorator
Registers a custom formatting function with ub.repr2
def apply_rotation_scheme(self, backups_by_frequency, most_recent_backup): """ Apply the user defined rotation scheme to the result of :func:`group_backups()`. :param backups_by_frequency: A :class:`dict` in the format generated by :func:`group_backups()`. :param most_recent_backup: The :class:`~datetime.datetime` of the most recent backup. :raises: :exc:`~exceptions.ValueError` when the rotation scheme dictionary is empty (this would cause all backups to be deleted). .. note:: This method mutates the given data structure by removing all backups that should be removed to apply the user defined rotation scheme. """ if not self.rotation_scheme: raise ValueError("Refusing to use empty rotation scheme! (all backups would be deleted)") for frequency, backups in backups_by_frequency.items(): # Ignore frequencies not specified by the user. if frequency not in self.rotation_scheme: backups.clear() else: # Reduce the number of backups in each time slot of this # rotation frequency to a single backup (the oldest one or the # newest one). for period, backups_in_period in backups.items(): index = -1 if self.prefer_recent else 0 selected_backup = sorted(backups_in_period)[index] backups[period] = [selected_backup] # Check if we need to rotate away backups in old periods. retention_period = self.rotation_scheme[frequency] if retention_period != 'always': # Remove backups created before the minimum date of this # rotation frequency? (relative to the most recent backup) if self.strict: minimum_date = most_recent_backup - SUPPORTED_FREQUENCIES[frequency] * retention_period for period, backups_in_period in list(backups.items()): for backup in backups_in_period: if backup.timestamp < minimum_date: backups_in_period.remove(backup) if not backups_in_period: backups.pop(period) # If there are more periods remaining than the user # requested to be preserved we delete the oldest one(s). items_to_preserve = sorted(backups.items())[-retention_period:] backups_by_frequency[frequency] = dict(items_to_preserve)
Apply the user defined rotation scheme to the result of :func:`group_backups()`. :param backups_by_frequency: A :class:`dict` in the format generated by :func:`group_backups()`. :param most_recent_backup: The :class:`~datetime.datetime` of the most recent backup. :raises: :exc:`~exceptions.ValueError` when the rotation scheme dictionary is empty (this would cause all backups to be deleted). .. note:: This method mutates the given data structure by removing all backups that should be removed to apply the user defined rotation scheme.
def ExportMigrations(): """Exports counts of unapplied migrations. This is meant to be called during app startup, ideally by django_prometheus.apps.AppConfig. """ # Import MigrationExecutor lazily. MigrationExecutor checks at # import time that the apps are ready, and they are not when # django_prometheus is imported. ExportMigrations() should be # called in AppConfig.ready(), which signals that all apps are # ready. from django.db.migrations.executor import MigrationExecutor if 'default' in connections and ( type(connections['default']) == DatabaseWrapper): # This is the case where DATABASES = {} in the configuration, # i.e. the user is not using any databases. Django "helpfully" # adds a dummy database and then throws when you try to # actually use it. So we don't do anything, because trying to # export stats would crash the app on startup. return for alias in connections.databases: executor = MigrationExecutor(connections[alias]) ExportMigrationsForDatabase(alias, executor)
Exports counts of unapplied migrations. This is meant to be called during app startup, ideally by django_prometheus.apps.AppConfig.
def wake_lock_size(self): """Get the size of the current wake lock.""" output = self.adb_shell(WAKE_LOCK_SIZE_CMD) if not output: return None return int(output.split("=")[1].strip())
Get the size of the current wake lock.
def increment_slug(s): """Generate next slug for a series. Some docstore types will use slugs (see above) as document ids. To support unique ids, we'll serialize them as follows: TestUserA/my-test TestUserA/my-test-2 TestUserA/my-test-3 ... """ slug_parts = s.split('-') # advance (or add) the serial counter on the end of this slug # noinspection PyBroadException try: # if it's an integer, increment it slug_parts[-1] = str(1 + int(slug_parts[-1])) except: # there's no counter! add one now slug_parts.append('2') return '-'.join(slug_parts)
Generate next slug for a series. Some docstore types will use slugs (see above) as document ids. To support unique ids, we'll serialize them as follows: TestUserA/my-test TestUserA/my-test-2 TestUserA/my-test-3 ...
def main(): """Main function.""" table_data = [ ['Long String', ''], # One row. Two columns. Long string will replace this empty string. ] table = SingleTable(table_data) # Calculate newlines. max_width = table.column_max_width(1) wrapped_string = '\n'.join(wrap(LONG_STRING, max_width)) table.table_data[0][1] = wrapped_string print(table.table)
Main function.
def get_tag(self, tagname, tagidx): """ :returns: the tag associated to the given tagname and tag index """ return '%s=%s' % (tagname, decode(getattr(self, tagname)[tagidx]))
:returns: the tag associated to the given tagname and tag index
def _GetBytes(partition_key): """Gets the bytes representing the value of the partition key. """ if isinstance(partition_key, six.string_types): return bytearray(partition_key, encoding='utf-8') else: raise ValueError("Unsupported " + str(type(partition_key)) + " for partitionKey.")
Gets the bytes representing the value of the partition key.
def print_version(ctx, value): """Print the current version of sandman and exit.""" if not value: return import pkg_resources version = None try: version = pkg_resources.get_distribution('sandman').version finally: del pkg_resources click.echo(version) ctx.exit()
Print the current version of sandman and exit.
def bust_self(self, obj): """Remove the value that is being stored on `obj` for this :class:`.cached_property` object. :param obj: The instance on which to bust the cache. """ if self.func.__name__ in obj.__dict__: delattr(obj, self.func.__name__)
Remove the value that is being stored on `obj` for this :class:`.cached_property` object. :param obj: The instance on which to bust the cache.
def load_template_source(self, template_name, template_dirs=None): """Template loader that loads templates from zipped modules.""" #Get every app's folder log.error("Calling zip loader") for folder in app_template_dirs: if ".zip/" in folder.replace("\\", "/"): lib_file, relative_folder = get_zip_file_and_relative_path(folder) log.error(lib_file, relative_folder) try: z = zipfile.ZipFile(lib_file) log.error(relative_folder + template_name) template_path_in_zip = os.path.join(relative_folder, template_name).replace("\\", "/") source = z.read(template_path_in_zip) except (IOError, KeyError) as e: import traceback log.error(traceback.format_exc()) try: z.close() except: pass continue z.close() # We found a template, so return the source. template_path = "%s:%s" % (lib_file, template_path_in_zip) return (source, template_path) # If we reach here, the template couldn't be loaded raise TemplateDoesNotExist(template_name)
Template loader that loads templates from zipped modules.
def has_ended(self): """Tests if this assessment has ended. return: (boolean) - ``true`` if the assessment has ended, ``false`` otherwise *compliance: mandatory -- This method must be implemented.* """ assessment_offered = self.get_assessment_offered() now = DateTime.utcnow() # There's got to be a better way to do this: if self._my_map['completionTime'] is not None: return True elif assessment_offered.has_deadline() and assessment_offered.has_duration(): if self._my_map['actualStartTime'] is None: return now >= assessment_offered.get_deadline() else: return (now >= assessment_offered.get_deadline() and now >= self._my_map['actualStartTime'] + assessment_offered.get_duration()) elif assessment_offered.has_deadline(): return now >= assessment_offered.get_deadline() elif assessment_offered.has_duration() and self._my_map['actualStartTime'] is not None: return now >= self._my_map['actualStartTime'] + assessment_offered.get_duration() else: return False
Tests if this assessment has ended. return: (boolean) - ``true`` if the assessment has ended, ``false`` otherwise *compliance: mandatory -- This method must be implemented.*
def p_expr_number(p): "number : NUMBER" p[0] = node.number(p[1], lineno=p.lineno(1), lexpos=p.lexpos(1))
number : NUMBER
def config(self): """uses "global config" for cfg""" if self._config: return self._config else: self._config = p_config.ProsperConfig(self.config_path) return self._config
uses "global config" for cfg
def _SnakeCaseToCamelCase(path_name): """Converts a path name from snake_case to camelCase.""" result = [] after_underscore = False for c in path_name: if c.isupper(): raise Error('Fail to print FieldMask to Json string: Path name ' '{0} must not contain uppercase letters.'.format(path_name)) if after_underscore: if c.islower(): result.append(c.upper()) after_underscore = False else: raise Error('Fail to print FieldMask to Json string: The ' 'character after a "_" must be a lowercase letter ' 'in path name {0}.'.format(path_name)) elif c == '_': after_underscore = True else: result += c if after_underscore: raise Error('Fail to print FieldMask to Json string: Trailing "_" ' 'in path name {0}.'.format(path_name)) return ''.join(result)
Converts a path name from snake_case to camelCase.
def Rsky(self): """Projected sky separation of stars """ return np.sqrt(self.position.x**2 + self.position.y**2)
Projected sky separation of stars
def bbox(self): """(left, top, right, bottom) tuple.""" if not hasattr(self, '_bbox'): self._bbox = extract_bbox(self) return self._bbox
(left, top, right, bottom) tuple.
def do_array(self, parent=None, ident=0): """ Handles a TC_ARRAY opcode :param parent: :param ident: Log indentation level :return: A list of deserialized objects """ # TC_ARRAY classDesc newHandle (int)<size> values[size] log_debug("[array]", ident) _, classdesc = self._read_and_exec_opcode( ident=ident + 1, expect=( self.TC_CLASSDESC, self.TC_PROXYCLASSDESC, self.TC_NULL, self.TC_REFERENCE, ), ) array = JavaArray(classdesc) self._add_reference(array, ident) (size,) = self._readStruct(">i") log_debug("size: {0}".format(size), ident) type_char = classdesc.name[0] assert type_char == self.TYPE_ARRAY type_char = classdesc.name[1] if type_char == self.TYPE_OBJECT or type_char == self.TYPE_ARRAY: for _ in range(size): _, res = self._read_and_exec_opcode(ident=ident + 1) log_debug("Object value: {0}".format(res), ident) array.append(res) elif type_char == self.TYPE_BYTE: array = JavaByteArray(self.object_stream.read(size), classdesc) elif self.use_numpy_arrays: import numpy array = numpy.fromfile( self.object_stream, dtype=JavaObjectConstants.NUMPY_TYPE_MAP[type_char], count=size, ) else: for _ in range(size): res = self._read_value(type_char, ident) log_debug("Native value: {0}".format(repr(res)), ident) array.append(res) return array
Handles a TC_ARRAY opcode :param parent: :param ident: Log indentation level :return: A list of deserialized objects
def users(self): """Returns the list of users in the database""" result = self.db.read("", {"q": "ls"}) if result is None or result.json() is None: return [] users = [] for u in result.json(): usr = self(u["name"]) usr.metadata = u users.append(usr) return users
Returns the list of users in the database
def _default_commands(self): """ Build the list of CLI commands by finding subclasses of the Command class Also allows commands to be installed using the "enaml_native_command" entry point. This entry point should return a Command subclass """ commands = [c() for c in find_commands(Command)] #: Get commands installed via entry points for ep in pkg_resources.iter_entry_points( group="enaml_native_command"): c = ep.load() if not issubclass(c, Command): print("Warning: entry point {} did not return a valid enaml " "cli command! This command will be ignored!".format( ep.name)) commands.append(c()) return commands
Build the list of CLI commands by finding subclasses of the Command class Also allows commands to be installed using the "enaml_native_command" entry point. This entry point should return a Command subclass
def expand_branch_name(self, name): """ Expand branch names to their unambiguous form. :param name: The name of a local or remote branch (a string). :returns: The unambiguous form of the branch name (a string). This internal method is used by methods like :func:`find_revision_id()` and :func:`find_revision_number()` to detect and expand remote branch names into their unambiguous form which is accepted by commands like ``git rev-parse`` and ``git rev-list --count``. """ # If no name is given we pick the default revision. if not name: return self.default_revision # Run `git for-each-ref' once and remember the results. branches = list(self.find_branches_raw()) # Check for an exact match against a local branch. for prefix, other_name, revision_id in branches: if prefix == 'refs/heads/' and name == other_name: # If we find a local branch whose name exactly matches the name # given by the caller then we consider the argument given by # the caller unambiguous. logger.debug("Branch name %r matches local branch.", name) return name # Check for an exact match against a remote branch. for prefix, other_name, revision_id in branches: if prefix.startswith('refs/remotes/') and name == other_name: # If we find a remote branch whose name exactly matches the # name given by the caller then we expand the name given by the # caller into the full %(refname) emitted by `git for-each-ref'. unambiguous_name = prefix + name logger.debug("Branch name %r matches remote branch %r.", name, unambiguous_name) return unambiguous_name # As a fall back we return the given name without expanding it. # This code path might not be necessary but was added out of # conservativeness, with the goal of trying to guarantee # backwards compatibility. logger.debug("Failed to expand branch name %r.", name) return name
Expand branch names to their unambiguous form. :param name: The name of a local or remote branch (a string). :returns: The unambiguous form of the branch name (a string). This internal method is used by methods like :func:`find_revision_id()` and :func:`find_revision_number()` to detect and expand remote branch names into their unambiguous form which is accepted by commands like ``git rev-parse`` and ``git rev-list --count``.
def forceValue(self, newVal, noteEdited=False): """Force-set a parameter entry to the given value""" if newVal is None: newVal = "" self.choice.set(newVal) if noteEdited: self.widgetEdited(val=newVal, skipDups=False)
Force-set a parameter entry to the given value
def list_connections(self, status=None): ''' list connections ''' if status is None: status = 'ALL' response, status_code = self.__pod__.Connection.get_v1_connection_list( sessionToken=self.__session__, status=status ).result() self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
list connections
def get_forced_variation(self, experiment, user_id): """ Determine if a user is forced into a variation for the given experiment and return that variation. Args: experiment: Object representing the experiment for which user is to be bucketed. user_id: ID for the user. Returns: Variation in which the user with ID user_id is forced into. None if no variation. """ forced_variations = experiment.forcedVariations if forced_variations and user_id in forced_variations: variation_key = forced_variations.get(user_id) variation = self.config.get_variation_from_key(experiment.key, variation_key) if variation: self.logger.info('User "%s" is forced in variation "%s".' % (user_id, variation_key)) return variation return None
Determine if a user is forced into a variation for the given experiment and return that variation. Args: experiment: Object representing the experiment for which user is to be bucketed. user_id: ID for the user. Returns: Variation in which the user with ID user_id is forced into. None if no variation.
def _next_server(self): """ Chooses next available server to connect. """ if self.options["dont_randomize"]: server = self._server_pool.pop(0) self._server_pool.append(server) else: shuffle(self._server_pool) s = None for server in self._server_pool: if self.options["max_reconnect_attempts"] > 0 and ( server.reconnects > self.options["max_reconnect_attempts"]): continue else: s = server return s
Chooses next available server to connect.
def substring(ctx, full, start, length): ''' Yields one string ''' full = next(string_arg(ctx, full), '') start = int(next(to_number(start))) length = int(next(to_number(length))) yield full[start-1:start-1+length]
Yields one string
def get_solvers(self, refresh=False, order_by='avg_load', **filters): """Return a filtered list of solvers handled by this client. Args: refresh (bool, default=False): Force refresh of cached list of solvers/properties. order_by (callable/str/None, default='avg_load'): Solver sorting key function (or :class:`Solver` attribute/item dot-separated path). By default, solvers are sorted by average load. To explicitly not sort the solvers (and use the API-returned order), set ``order_by=None``. Signature of the `key` `callable` is:: key :: (Solver s, Ord k) => s -> k Basic structure of the `key` string path is:: "-"? (attr|item) ( "." (attr|item) )* For example, to use solver property named ``max_anneal_schedule_points``, available in ``Solver.properties`` dict, you can either specify a callable `key`:: key=lambda solver: solver.properties['max_anneal_schedule_points'] or, you can use a short string path based key:: key='properties.max_anneal_schedule_points' Solver derived properties, available as :class:`Solver` properties can also be used (e.g. ``num_active_qubits``, ``online``, ``avg_load``, etc). Ascending sort order is implied, unless the key string path does not start with ``-``, in which case descending sort is used. Note: the sort used for ordering solvers by `key` is **stable**, meaning that if multiple solvers have the same value for the key, their relative order is preserved, and effectively they are in the same order as returned by the API. Note: solvers with ``None`` for key appear last in the list of solvers. When providing a key callable, ensure all values returned are of the same type (particularly in Python 3). For solvers with undefined key value, return ``None``. **filters: See `Filtering forms` and `Operators` below. Solver filters are defined, similarly to Django QuerySet filters, with keyword arguments of form `<key1>__...__<keyN>[__<operator>]=<value>`. Each `<operator>` is a predicate (boolean) function that acts on two arguments: value of feature `<name>` (described with keys path `<key1.key2...keyN>`) and the required `<value>`. Feature `<name>` can be: 1) a derived solver property, available as an identically named :class:`Solver`'s property (`name`, `qpu`, `software`, `online`, `num_active_qubits`, `avg_load`) 2) a solver parameter, available in :obj:`Solver.parameters` 3) a solver property, available in :obj:`Solver.properties` 4) a path describing a property in nested dictionaries Filtering forms are: * <derived_property>__<operator> (object <value>) * <derived_property> (bool) This form ensures the value of solver's property bound to `derived_property`, after applying `operator` equals the `value`. The default operator is `eq`. For example:: >>> client.get_solvers(avg_load__gt=0.5) but also:: >>> client.get_solvers(online=True) >>> # identical to: >>> client.get_solvers(online__eq=True) * <parameter>__<operator> (object <value>) * <parameter> (bool) This form ensures that the solver supports `parameter`. General operator form can be used but usually does not make sense for parameters, since values are human-readable descriptions. The default operator is `available`. Example:: >>> client.get_solvers(flux_biases=True) >>> # identical to: >>> client.get_solvers(flux_biases__available=True) * <property>__<operator> (object <value>) * <property> (bool) This form ensures the value of the solver's `property`, after applying `operator` equals the righthand side `value`. The default operator is `eq`. Note: if a non-existing parameter/property name/key given, the default operator is `eq`. Operators are: * `available` (<name>: str, <value>: bool): Test availability of <name> feature. * `eq`, `lt`, `lte`, `gt`, `gte` (<name>: str, <value>: any): Standard relational operators that compare feature <name> value with <value>. * `regex` (<name>: str, <value>: str): Test regular expression matching feature value. * `covers` (<name>: str, <value>: single value or range expressed as 2-tuple/list): Test feature <name> value (which should be a *range*) covers a given value or a subrange. * `within` (<name>: str, <value>: range expressed as 2-tuple/list): Test feature <name> value (which can be a *single value* or a *range*) is within a given range. * `in` (<name>: str, <value>: container type): Test feature <name> value is *in* <value> container. * `contains` (<name>: str, <value>: any): Test feature <name> value (container type) *contains* <value>. * `issubset` (<name>: str, <value>: container type): Test feature <name> value (container type) is a subset of <value>. * `issuperset` (<name>: str, <value>: container type): Test feature <name> value (container type) is a superset of <value>. Derived properies are: * `name` (str): Solver name/id. * `qpu` (bool): Is solver QPU based? * `software` (bool): Is solver software based? * `online` (bool, default=True): Is solver online? * `num_active_qubits` (int): Number of active qubits. Less then or equal to `num_qubits`. * `avg_load` (float): Solver's average load (similar to Unix load average). Common solver parameters are: * `flux_biases`: Should solver accept flux biases? * `anneal_schedule`: Should solver accept anneal schedule? Common solver properties are: * `num_qubits` (int): Number of qubits available. * `vfyc` (bool): Should solver work on "virtual full-yield chip"? * `max_anneal_schedule_points` (int): Piecewise linear annealing schedule points. * `h_range` ([int,int]), j_range ([int,int]): Biases/couplings values range. * `num_reads_range` ([int,int]): Range of allowed values for `num_reads` parameter. Returns: list[Solver]: List of all solvers that satisfy the conditions. Note: Client subclasses (e.g. :class:`dwave.cloud.qpu.Client` or :class:`dwave.cloud.sw.Client`) already filter solvers by resource type, so for `qpu` and `software` filters to have effect, call :meth:`.get_solvers` on base class :class:`~dwave.cloud.client.Client`. Examples:: client.get_solvers( num_qubits__gt=2000, # we need more than 2000 qubits num_qubits__lt=4000, # ... but fewer than 4000 qubits num_qubits__within=(2000, 4000), # an alternative to the previous two lines num_active_qubits=1089, # we want a particular number of active qubits vfyc=True, # we require a fully yielded Chimera vfyc__in=[False, None], # inverse of the previous filter vfyc__available=False, # we want solvers that do not advertize the vfyc property anneal_schedule=True, # we need support for custom anneal schedule max_anneal_schedule_points__gte=4, # we need at least 4 points for our anneal schedule num_reads_range__covers=1000, # our solver must support returning 1000 reads extended_j_range__covers=[-2, 2], # we need extended J range to contain subrange [-2,2] couplings__contains=[0, 128], # coupling (edge between) qubits (0,128) must exist couplings__issuperset=[[0,128], [0,4]], # two couplings required: (0,128) and (0,4) qubits__issuperset={0, 4, 215}, # qubits 0, 4 and 215 must exist supported_problem_types__issubset={'ising', 'qubo'}, # require Ising, QUBO or both to be supported name='DW_2000Q_3', # full solver name/ID match name__regex='.*2000.*', # partial/regex-based solver name match chip_id__regex='DW_.*', # chip ID prefix must be DW_ topology__type__eq="chimera" # topology.type must be chimera ) """ def covers_op(prop, val): """Does LHS `prop` (range) fully cover RHS `val` (range or item)?""" # `prop` must be a 2-element list/tuple range. if not isinstance(prop, (list, tuple)) or not len(prop) == 2: raise ValueError("2-element list/tuple range required for LHS value") llo, lhi = min(prop), max(prop) # `val` can be a single value, or a range (2-list/2-tuple). if isinstance(val, (list, tuple)) and len(val) == 2: # val range within prop range? rlo, rhi = min(val), max(val) return llo <= rlo and lhi >= rhi else: # val item within prop range? return llo <= val <= lhi def within_op(prop, val): """Is LHS `prop` (range or item) fully covered by RHS `val` (range)?""" try: return covers_op(val, prop) except ValueError: raise ValueError("2-element list/tuple range required for RHS value") def _set(iterable): """Like set(iterable), but works for lists as items in iterable. Before constructing a set, lists are converted to tuples. """ first = next(iter(iterable)) if isinstance(first, list): return set(tuple(x) for x in iterable) return set(iterable) def with_valid_lhs(op): @wraps(op) def _wrapper(prop, val): if prop is None: return False return op(prop, val) return _wrapper # available filtering operators ops = { 'lt': with_valid_lhs(operator.lt), 'lte': with_valid_lhs(operator.le), 'gt': with_valid_lhs(operator.gt), 'gte': with_valid_lhs(operator.ge), 'eq': operator.eq, 'available': lambda prop, val: prop is not None if val else prop is None, 'regex': with_valid_lhs(lambda prop, val: re.match("^{}$".format(val), prop)), # range operations 'covers': with_valid_lhs(covers_op), 'within': with_valid_lhs(within_op), # membership tests 'in': lambda prop, val: prop in val, 'contains': with_valid_lhs(lambda prop, val: val in prop), # set tests 'issubset': with_valid_lhs(lambda prop, val: _set(prop).issubset(_set(val))), 'issuperset': with_valid_lhs(lambda prop, val: _set(prop).issuperset(_set(val))), } def predicate(solver, query, val): # needs to handle kwargs like these: # key=val # key__op=val # key__key=val # key__key__op=val # LHS is split on __ in `query` assert len(query) >= 1 potential_path, potential_op_name = query[:-1], query[-1] if potential_op_name in ops: # op is explicit, and potential path is correct op_name = potential_op_name else: # op is implied and depends on property type, path is the whole query op_name = None potential_path = query path = '.'.join(potential_path) if path in solver.derived_properties: op = ops[op_name or 'eq'] return op(getattr(solver, path), val) elif pluck(solver.parameters, path, None) is not None: op = ops[op_name or 'available'] return op(pluck(solver.parameters, path), val) elif pluck(solver.properties, path, None) is not None: op = ops[op_name or 'eq'] return op(pluck(solver.properties, path), val) else: op = ops[op_name or 'eq'] return op(None, val) # param validation sort_reverse = False if not order_by: sort_key = None elif isinstance(order_by, six.string_types): if order_by[0] == '-': sort_reverse = True order_by = order_by[1:] if not order_by: sort_key = None else: sort_key = lambda solver: pluck(solver, order_by, None) elif callable(order_by): sort_key = order_by else: raise TypeError("expected string or callable for 'order_by'") # default filters: filters.setdefault('online', True) predicates = [] for lhs, val in filters.items(): query = lhs.split('__') predicates.append(partial(predicate, query=query, val=val)) _LOGGER.debug("Filtering solvers with predicates=%r", predicates) # optimization for case when exact solver name/id is known: # we can fetch only that solver # NOTE: in future, complete feature-based filtering will be on server-side query = dict(refresh_=refresh) if 'name' in filters: query['name'] = filters['name'] if 'name__eq' in filters: query['name'] = filters['name__eq'] # filter solvers = self._fetch_solvers(**query) solvers = [s for s in solvers if all(p(s) for p in predicates)] # sort: undefined (None) key values go last if sort_key is not None: solvers_with_keys = [(sort_key(solver), solver) for solver in solvers] solvers_with_invalid_keys = [(key, solver) for key, solver in solvers_with_keys if key is None] solvers_with_valid_keys = [(key, solver) for key, solver in solvers_with_keys if key is not None] solvers_with_valid_keys.sort(key=operator.itemgetter(0)) solvers = [solver for key, solver in chain(solvers_with_valid_keys, solvers_with_invalid_keys)] # reverse if necessary (as a separate step from sorting, so it works for invalid keys # and plain list reverse without sorting) if sort_reverse: solvers.reverse() return solvers
Return a filtered list of solvers handled by this client. Args: refresh (bool, default=False): Force refresh of cached list of solvers/properties. order_by (callable/str/None, default='avg_load'): Solver sorting key function (or :class:`Solver` attribute/item dot-separated path). By default, solvers are sorted by average load. To explicitly not sort the solvers (and use the API-returned order), set ``order_by=None``. Signature of the `key` `callable` is:: key :: (Solver s, Ord k) => s -> k Basic structure of the `key` string path is:: "-"? (attr|item) ( "." (attr|item) )* For example, to use solver property named ``max_anneal_schedule_points``, available in ``Solver.properties`` dict, you can either specify a callable `key`:: key=lambda solver: solver.properties['max_anneal_schedule_points'] or, you can use a short string path based key:: key='properties.max_anneal_schedule_points' Solver derived properties, available as :class:`Solver` properties can also be used (e.g. ``num_active_qubits``, ``online``, ``avg_load``, etc). Ascending sort order is implied, unless the key string path does not start with ``-``, in which case descending sort is used. Note: the sort used for ordering solvers by `key` is **stable**, meaning that if multiple solvers have the same value for the key, their relative order is preserved, and effectively they are in the same order as returned by the API. Note: solvers with ``None`` for key appear last in the list of solvers. When providing a key callable, ensure all values returned are of the same type (particularly in Python 3). For solvers with undefined key value, return ``None``. **filters: See `Filtering forms` and `Operators` below. Solver filters are defined, similarly to Django QuerySet filters, with keyword arguments of form `<key1>__...__<keyN>[__<operator>]=<value>`. Each `<operator>` is a predicate (boolean) function that acts on two arguments: value of feature `<name>` (described with keys path `<key1.key2...keyN>`) and the required `<value>`. Feature `<name>` can be: 1) a derived solver property, available as an identically named :class:`Solver`'s property (`name`, `qpu`, `software`, `online`, `num_active_qubits`, `avg_load`) 2) a solver parameter, available in :obj:`Solver.parameters` 3) a solver property, available in :obj:`Solver.properties` 4) a path describing a property in nested dictionaries Filtering forms are: * <derived_property>__<operator> (object <value>) * <derived_property> (bool) This form ensures the value of solver's property bound to `derived_property`, after applying `operator` equals the `value`. The default operator is `eq`. For example:: >>> client.get_solvers(avg_load__gt=0.5) but also:: >>> client.get_solvers(online=True) >>> # identical to: >>> client.get_solvers(online__eq=True) * <parameter>__<operator> (object <value>) * <parameter> (bool) This form ensures that the solver supports `parameter`. General operator form can be used but usually does not make sense for parameters, since values are human-readable descriptions. The default operator is `available`. Example:: >>> client.get_solvers(flux_biases=True) >>> # identical to: >>> client.get_solvers(flux_biases__available=True) * <property>__<operator> (object <value>) * <property> (bool) This form ensures the value of the solver's `property`, after applying `operator` equals the righthand side `value`. The default operator is `eq`. Note: if a non-existing parameter/property name/key given, the default operator is `eq`. Operators are: * `available` (<name>: str, <value>: bool): Test availability of <name> feature. * `eq`, `lt`, `lte`, `gt`, `gte` (<name>: str, <value>: any): Standard relational operators that compare feature <name> value with <value>. * `regex` (<name>: str, <value>: str): Test regular expression matching feature value. * `covers` (<name>: str, <value>: single value or range expressed as 2-tuple/list): Test feature <name> value (which should be a *range*) covers a given value or a subrange. * `within` (<name>: str, <value>: range expressed as 2-tuple/list): Test feature <name> value (which can be a *single value* or a *range*) is within a given range. * `in` (<name>: str, <value>: container type): Test feature <name> value is *in* <value> container. * `contains` (<name>: str, <value>: any): Test feature <name> value (container type) *contains* <value>. * `issubset` (<name>: str, <value>: container type): Test feature <name> value (container type) is a subset of <value>. * `issuperset` (<name>: str, <value>: container type): Test feature <name> value (container type) is a superset of <value>. Derived properies are: * `name` (str): Solver name/id. * `qpu` (bool): Is solver QPU based? * `software` (bool): Is solver software based? * `online` (bool, default=True): Is solver online? * `num_active_qubits` (int): Number of active qubits. Less then or equal to `num_qubits`. * `avg_load` (float): Solver's average load (similar to Unix load average). Common solver parameters are: * `flux_biases`: Should solver accept flux biases? * `anneal_schedule`: Should solver accept anneal schedule? Common solver properties are: * `num_qubits` (int): Number of qubits available. * `vfyc` (bool): Should solver work on "virtual full-yield chip"? * `max_anneal_schedule_points` (int): Piecewise linear annealing schedule points. * `h_range` ([int,int]), j_range ([int,int]): Biases/couplings values range. * `num_reads_range` ([int,int]): Range of allowed values for `num_reads` parameter. Returns: list[Solver]: List of all solvers that satisfy the conditions. Note: Client subclasses (e.g. :class:`dwave.cloud.qpu.Client` or :class:`dwave.cloud.sw.Client`) already filter solvers by resource type, so for `qpu` and `software` filters to have effect, call :meth:`.get_solvers` on base class :class:`~dwave.cloud.client.Client`. Examples:: client.get_solvers( num_qubits__gt=2000, # we need more than 2000 qubits num_qubits__lt=4000, # ... but fewer than 4000 qubits num_qubits__within=(2000, 4000), # an alternative to the previous two lines num_active_qubits=1089, # we want a particular number of active qubits vfyc=True, # we require a fully yielded Chimera vfyc__in=[False, None], # inverse of the previous filter vfyc__available=False, # we want solvers that do not advertize the vfyc property anneal_schedule=True, # we need support for custom anneal schedule max_anneal_schedule_points__gte=4, # we need at least 4 points for our anneal schedule num_reads_range__covers=1000, # our solver must support returning 1000 reads extended_j_range__covers=[-2, 2], # we need extended J range to contain subrange [-2,2] couplings__contains=[0, 128], # coupling (edge between) qubits (0,128) must exist couplings__issuperset=[[0,128], [0,4]], # two couplings required: (0,128) and (0,4) qubits__issuperset={0, 4, 215}, # qubits 0, 4 and 215 must exist supported_problem_types__issubset={'ising', 'qubo'}, # require Ising, QUBO or both to be supported name='DW_2000Q_3', # full solver name/ID match name__regex='.*2000.*', # partial/regex-based solver name match chip_id__regex='DW_.*', # chip ID prefix must be DW_ topology__type__eq="chimera" # topology.type must be chimera )
def atan(x): """ Inverse tangent """ if isinstance(x, UncertainFunction): mcpts = np.arctan(x._mcpts) return UncertainFunction(mcpts) else: return np.arctan(x)
Inverse tangent
def run_cmd(cmd, echo=False, fail_silently=False, **kwargs): r"""Call given command with ``subprocess.call`` function. :param cmd: Command to run. :type cmd: tuple or str :param echo: If enabled show command to call and its output in STDOUT, otherwise hide all output. By default: False :param fail_silently: Do not raise exception on error. By default: False :param \*\*kwargs: Additional keyword arguments to be passed to ``subprocess.call`` function. STDOUT and STDERR streams would be setup inside of function to ensure hiding command output in case of disabling ``echo``. """ out, err = None, None if echo: cmd_str = cmd if isinstance(cmd, string_types) else ' '.join(cmd) kwargs['stdout'], kwargs['stderr'] = sys.stdout, sys.stderr print_message('$ {0}'.format(cmd_str)) else: out, err = get_temp_streams() kwargs['stdout'], kwargs['stderr'] = out, err try: retcode = subprocess.call(cmd, **kwargs) except subprocess.CalledProcessError as err: if fail_silently: return False print_error(str(err) if IS_PY3 else unicode(err)) # noqa finally: if out: out.close() if err: err.close() if retcode and echo and not fail_silently: print_error('Command {0!r} returned non-zero exit status {1}'. format(cmd_str, retcode)) return retcode
r"""Call given command with ``subprocess.call`` function. :param cmd: Command to run. :type cmd: tuple or str :param echo: If enabled show command to call and its output in STDOUT, otherwise hide all output. By default: False :param fail_silently: Do not raise exception on error. By default: False :param \*\*kwargs: Additional keyword arguments to be passed to ``subprocess.call`` function. STDOUT and STDERR streams would be setup inside of function to ensure hiding command output in case of disabling ``echo``.
def from_json(cls, data): """Create a Data Collection from a dictionary. Args: { "header": A Ladybug Header, "values": An array of values, "datetimes": An array of datetimes, "validated_a_period": Boolean for whether header analysis_period is valid } """ assert 'header' in data, 'Required keyword "header" is missing!' assert 'values' in data, 'Required keyword "values" is missing!' assert 'datetimes' in data, 'Required keyword "datetimes" is missing!' collection = cls(Header.from_json(data['header']), data['values'], [DateTime.from_json(dat) for dat in data['datetimes']]) if 'validated_a_period' in data: collection._validated_a_period = data['validated_a_period'] return collection
Create a Data Collection from a dictionary. Args: { "header": A Ladybug Header, "values": An array of values, "datetimes": An array of datetimes, "validated_a_period": Boolean for whether header analysis_period is valid }
def ensure_directory(path): """ Ensure directory exists for a given file path. """ dirname = os.path.dirname(path) if not os.path.exists(dirname): os.makedirs(dirname)
Ensure directory exists for a given file path.
def extras_to_string(extras): # type: (Iterable[S]) -> S """Turn a list of extras into a string""" if isinstance(extras, six.string_types): if extras.startswith("["): return extras else: extras = [extras] if not extras: return "" return "[{0}]".format(",".join(sorted(set(extras))))
Turn a list of extras into a string
def convert_body_to_unicode(resp): """ If the request or responses body is bytes, decode it to a string (for python3 support) """ if type(resp) is not dict: # Some of the tests just serialize and deserialize a string. return _convert_string_to_unicode(resp) else: body = resp.get('body') if body is not None: try: body['string'] = _convert_string_to_unicode( body['string'] ) except (KeyError, TypeError, AttributeError): # The thing we were converting either wasn't a dictionary or # didn't have the keys we were expecting. # For example request object has no 'string' key. resp['body'] = _convert_string_to_unicode(body) return resp
If the request or responses body is bytes, decode it to a string (for python3 support)
def process_word(word: str, to_lower: bool = False, append_case: Optional[str] = None) -> Tuple[str]: """Converts word to a tuple of symbols, optionally converts it to lowercase and adds capitalization label. Args: word: input word to_lower: whether to lowercase append_case: whether to add case mark ('<FIRST_UPPER>' for first capital and '<ALL_UPPER>' for all caps) Returns: a preprocessed word """ if all(x.isupper() for x in word) and len(word) > 1: uppercase = "<ALL_UPPER>" elif word[0].isupper(): uppercase = "<FIRST_UPPER>" else: uppercase = None if to_lower: word = word.lower() if word.isdigit(): answer = ["<DIGIT>"] elif word.startswith("http://") or word.startswith("www."): answer = ["<HTTP>"] else: answer = list(word) if to_lower and uppercase is not None: if append_case == "first": answer = [uppercase] + answer elif append_case == "last": answer = answer + [uppercase] return tuple(answer)
Converts word to a tuple of symbols, optionally converts it to lowercase and adds capitalization label. Args: word: input word to_lower: whether to lowercase append_case: whether to add case mark ('<FIRST_UPPER>' for first capital and '<ALL_UPPER>' for all caps) Returns: a preprocessed word
def template_sunmoon(self, **kwargs): """ return the file name for sun or moon template files """ kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs)) kwargs_copy['component'] = kwargs.get( 'component', self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.templatesunmoon_format.format(**kwargs_copy) if kwargs.get('fullpath', False): return self.fullpath(localpath=localpath) return localpath
return the file name for sun or moon template files
def _preserve_settings(method: T.Callable) -> T.Callable: """Decorator that ensures ObservableProperty-specific attributes are kept when using methods to change deleter, getter or setter.""" @functools.wraps(method) def _wrapper( old: "ObservableProperty", handler: T.Callable ) -> "ObservableProperty": new = method(old, handler) # type: ObservableProperty new.event = old.event new.observable = old.observable return new return _wrapper
Decorator that ensures ObservableProperty-specific attributes are kept when using methods to change deleter, getter or setter.
def run(self): """ Set up the process environment in preparation for running an Ansible module. This monkey-patches the Ansible libraries in various places to prevent it from trying to kill the process on completion, and to prevent it from reading sys.stdin. :returns: Module result dictionary. """ self.setup() if self.detach: self.econtext.detach() try: return self._run() finally: self.revert()
Set up the process environment in preparation for running an Ansible module. This monkey-patches the Ansible libraries in various places to prevent it from trying to kill the process on completion, and to prevent it from reading sys.stdin. :returns: Module result dictionary.
def path_to_text(self, path): ''' Transform local PDF file to string. Args: path: path to PDF file. Returns: string. ''' rsrcmgr = PDFResourceManager() retstr = StringIO() codec = 'utf-8' laparams = LAParams() device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams) fp = open(path, 'rb') interpreter = PDFPageInterpreter(rsrcmgr, device) password = "" maxpages = 0 caching = True pagenos = set() pages_data = PDFPage.get_pages( fp, pagenos, maxpages=maxpages, password=password, caching=caching, check_extractable=True ) for page in pages_data: interpreter.process_page(page) text = retstr.getvalue() text = text.replace("\n", "") fp.close() device.close() retstr.close() return text
Transform local PDF file to string. Args: path: path to PDF file. Returns: string.
def dropwhile(self, func=None): """ Return a new Collection with the first few items removed. Parameters: func : function(Node) -> Node Returns: A new Collection, discarding all items before the first item where bool(func(item)) == True """ func = _make_callable(func) return Collection(dropwhile(func, self._items))
Return a new Collection with the first few items removed. Parameters: func : function(Node) -> Node Returns: A new Collection, discarding all items before the first item where bool(func(item)) == True
def apply_tfa_magseries(lcfile, timecol, magcol, errcol, templateinfo, mintemplatedist_arcmin=10.0, lcformat='hat-sql', lcformatdir=None, interp='nearest', sigclip=5.0): '''This applies the TFA correction to an LC given TFA template information. Parameters ---------- lcfile : str This is the light curve file to apply the TFA correction to. timecol,magcol,errcol : str These are the column keys in the lcdict for the LC file to apply the TFA correction to. templateinfo : dict or str This is either the dict produced by `tfa_templates_lclist` or the pickle produced by the same function. mintemplatedist_arcmin : float This sets the minimum distance required from the target object for objects in the TFA template ensemble. Objects closer than this distance will be removed from the ensemble. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. interp : str This is passed to scipy.interpolate.interp1d as the kind of interpolation to use when reforming this light curve to the timebase of the TFA templates. sigclip : float or sequence of two floats or None This is the sigma clip to apply to this light curve before running TFA on it. Returns ------- str This returns the filename of the light curve file generated after TFA applications. This is a pickle (that can be read by `lcproc.read_pklc`) in the same directory as `lcfile`. The `magcol` will be encoded in the filename, so each `magcol` in `lcfile` gets its own output file. ''' try: formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir) if formatinfo: (dfileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo else: LOGERROR("can't figure out the light curve format") return None except Exception as e: LOGEXCEPTION("can't figure out the light curve format") return None # get the templateinfo from a pickle if necessary if isinstance(templateinfo,str) and os.path.exists(templateinfo): with open(templateinfo,'rb') as infd: templateinfo = pickle.load(infd) lcdict = readerfunc(lcfile) if ((isinstance(lcdict, (tuple, list))) and isinstance(lcdict[0], dict)): lcdict = lcdict[0] objectid = lcdict['objectid'] # this is the initial template array tmagseries = templateinfo[magcol][ 'template_magseries' ][::] # if the object itself is in the template ensemble, remove it if objectid in templateinfo[magcol]['template_objects']: LOGWARNING('object %s found in the TFA template ensemble, removing...' % objectid) templateind = templateinfo[magcol]['template_objects'] == objectid # get the objects in the tmagseries not corresponding to the current # object's index tmagseries = tmagseries[~templateind,:] # check if there are close matches to the current object in the templates object_matches = coordutils.conesearch_kdtree( templateinfo[magcol]['template_radecl_kdtree'], lcdict['objectinfo']['ra'], lcdict['objectinfo']['decl'], mintemplatedist_arcmin/60.0 ) if len(object_matches) > 0: LOGWARNING( "object %s is within %.1f arcminutes of %s " "template objects. Will remove these objects " "from the template applied to this object." % (objectid, mintemplatedist_arcmin, len(object_matches)) ) removalind = np.full( templateinfo[magcol]['template_objects'].size, False, dtype=np.bool ) removalind[np.array(object_matches)] = True tmagseries = tmagseries[~removalind,:] # # finally, proceed to TFA # # this is the normal matrix normal_matrix = np.dot(tmagseries, tmagseries.T) # get the inverse of the matrix normal_matrix_inverse = spla.pinv2(normal_matrix) # get the timebase from the template timebase = templateinfo[magcol]['timebase'] # use this to reform the target lc in the same manner as that for a TFA # template LC reformed_targetlc = _reform_templatelc_for_tfa(( lcfile, lcformat, lcformatdir, timecol, magcol, errcol, timebase, interp, sigclip )) # calculate the scalar products of the target and template magseries scalar_products = np.dot(tmagseries, reformed_targetlc['mags']) # calculate the corrections corrections = np.dot(normal_matrix_inverse, scalar_products) # finally, get the corrected time series for the target object corrected_magseries = ( reformed_targetlc['origmags'] - np.dot(tmagseries.T, corrections) ) outdict = { 'times':timebase, 'mags':corrected_magseries, 'errs':reformed_targetlc['errs'], 'mags_median':np.median(corrected_magseries), 'mags_mad': np.median(np.abs(corrected_magseries - np.median(corrected_magseries))), 'work':{'tmagseries':tmagseries, 'normal_matrix':normal_matrix, 'normal_matrix_inverse':normal_matrix_inverse, 'scalar_products':scalar_products, 'corrections':corrections, 'reformed_targetlc':reformed_targetlc}, } # we'll write back the tfa times and mags to the lcdict lcdict['tfa'] = outdict outfile = os.path.join( os.path.dirname(lcfile), '%s-tfa-%s-pklc.pkl' % ( squeeze(objectid).replace(' ','-'), magcol ) ) with open(outfile,'wb') as outfd: pickle.dump(lcdict, outfd, pickle.HIGHEST_PROTOCOL) return outfile
This applies the TFA correction to an LC given TFA template information. Parameters ---------- lcfile : str This is the light curve file to apply the TFA correction to. timecol,magcol,errcol : str These are the column keys in the lcdict for the LC file to apply the TFA correction to. templateinfo : dict or str This is either the dict produced by `tfa_templates_lclist` or the pickle produced by the same function. mintemplatedist_arcmin : float This sets the minimum distance required from the target object for objects in the TFA template ensemble. Objects closer than this distance will be removed from the ensemble. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. interp : str This is passed to scipy.interpolate.interp1d as the kind of interpolation to use when reforming this light curve to the timebase of the TFA templates. sigclip : float or sequence of two floats or None This is the sigma clip to apply to this light curve before running TFA on it. Returns ------- str This returns the filename of the light curve file generated after TFA applications. This is a pickle (that can be read by `lcproc.read_pklc`) in the same directory as `lcfile`. The `magcol` will be encoded in the filename, so each `magcol` in `lcfile` gets its own output file.
def delete_expired_requests(): """Delete expired inclusion requests.""" InclusionRequest.query.filter_by( InclusionRequest.expiry_date > datetime.utcnow()).delete() db.session.commit()
Delete expired inclusion requests.
def get_user(self, username="~"): """ get info about user (if no user specified, use the one initiating request) :param username: str, name of user to get info about, default="~" :return: dict """ url = self._build_url("users/%s/" % username, _prepend_namespace=False) response = self._get(url) check_response(response) return response
get info about user (if no user specified, use the one initiating request) :param username: str, name of user to get info about, default="~" :return: dict
def from_name(cls, name): """Retrieve webacc id associated to a webacc name.""" result = cls.list({'items_per_page': 500}) webaccs = {} for webacc in result: webaccs[webacc['name']] = webacc['id'] return webaccs.get(name)
Retrieve webacc id associated to a webacc name.
def switch_opt(default, shortname, help_msg): """Define a switchable ConfOpt. This creates a boolean option. If you use it in your CLI, it can be switched on and off by prepending + or - to its name: +opt / -opt. Args: default (bool): the default value of the swith option. shortname (str): short name of the option, no shortname will be used if it is set to None. help_msg (str): short description of the option. Returns: :class:`~loam.manager.ConfOpt`: a configuration option with the given properties. """ return ConfOpt(bool(default), True, shortname, dict(action=internal.Switch), True, help_msg, None)
Define a switchable ConfOpt. This creates a boolean option. If you use it in your CLI, it can be switched on and off by prepending + or - to its name: +opt / -opt. Args: default (bool): the default value of the swith option. shortname (str): short name of the option, no shortname will be used if it is set to None. help_msg (str): short description of the option. Returns: :class:`~loam.manager.ConfOpt`: a configuration option with the given properties.
def set_working_dir(self, working_dir): """ Sets the working directory for this hypervisor. :param working_dir: path to the working directory """ # encase working_dir in quotes to protect spaces in the path yield from self.send('hypervisor working_dir "{}"'.format(working_dir)) self._working_dir = working_dir log.debug("Working directory set to {}".format(self._working_dir))
Sets the working directory for this hypervisor. :param working_dir: path to the working directory
def run(cls, raw_data): """description of run""" logger.debug("{}.ReceivedFromKafka: {}".format( cls.__name__, raw_data )) try: kmsg = cls._onmessage(cls.TRANSPORT.loads(raw_data)) except Exception as exc: logger.error( "{}.ImportError: Failed to load data from kafka: {}".format( cls.__name__, exc ), extra=dict(kafka_raw_data=raw_data) ) return Result.from_exception(exc) try: cls.start_processing(kmsg) if kmsg.entrypoint not in cls.ENTRYPOINTS: raise ValidationError( "Entrypoint '{}' not registred".format(kmsg.entrypoint), extra=dict( uuid=kmsg.uuid, entrypoint=kmsg.entrypoint, allowed=list(cls.ENTRYPOINTS.keys()) ) ) result = cls.ENTRYPOINTS[kmsg.entrypoint].from_Message( kmsg ).execute() except Exception as exc: result = Result.from_exception(exc, kmsg.uuid) finally: cls.stop_processing() # noinspection PyUnboundLocalVariable if result and result.retcode < 300: return cls._onsuccess(kmsg=kmsg, result=result) else: return cls._onerror(kmsg=kmsg, result=result)
description of run
def user_show(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/users#show-user" api_path = "/api/v2/users/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/users#show-user
def parse(cls, xml_path): """ Parses an xml_path with the inherited xml parser :param xml_path: :return: """ parser = etree.XMLParser(target=cls.xml_parse()) return etree.parse(xml_path, parser)
Parses an xml_path with the inherited xml parser :param xml_path: :return:
def stop_plugins(watcher_plugin, health_plugin): """ Stops all plugins. """ logging.debug("Stopping health-check monitor...") health_plugin.stop() logging.debug("Stopping config change observer...") watcher_plugin.stop()
Stops all plugins.
def wind_speed_hub(self, weather_df): r""" Calculates the wind speed at hub height. The method specified by the parameter `wind_speed_model` is used. Parameters ---------- weather_df : pandas.DataFrame DataFrame with time series for wind speed `wind_speed` in m/s and roughness length `roughness_length` in m. The columns of the DataFrame are a MultiIndex where the first level contains the variable name (e.g. wind_speed) and the second level contains the height at which it applies (e.g. 10, if it was measured at a height of 10 m). See documentation of :func:`ModelChain.run_model` for an example on how to create the weather_df DataFrame. Returns ------- wind_speed_hub : pandas.Series or numpy.array Wind speed in m/s at hub height. Notes ----- If `weather_df` contains wind speeds at different heights the given wind speed(s) closest to the hub height are used. """ if self.power_plant.hub_height in weather_df['wind_speed']: wind_speed_hub = weather_df['wind_speed'][ self.power_plant.hub_height] elif self.wind_speed_model == 'logarithmic': logging.debug('Calculating wind speed using logarithmic wind ' 'profile.') closest_height = weather_df['wind_speed'].columns[ min(range(len(weather_df['wind_speed'].columns)), key=lambda i: abs(weather_df['wind_speed'].columns[i] - self.power_plant.hub_height))] wind_speed_hub = wind_speed.logarithmic_profile( weather_df['wind_speed'][closest_height], closest_height, self.power_plant.hub_height, weather_df['roughness_length'].iloc[:, 0], self.obstacle_height) elif self.wind_speed_model == 'hellman': logging.debug('Calculating wind speed using hellman equation.') closest_height = weather_df['wind_speed'].columns[ min(range(len(weather_df['wind_speed'].columns)), key=lambda i: abs(weather_df['wind_speed'].columns[i] - self.power_plant.hub_height))] wind_speed_hub = wind_speed.hellman( weather_df['wind_speed'][closest_height], closest_height, self.power_plant.hub_height, weather_df['roughness_length'].iloc[:, 0], self.hellman_exp) elif self.wind_speed_model == 'interpolation_extrapolation': logging.debug('Calculating wind speed using linear inter- or ' 'extrapolation.') wind_speed_hub = tools.linear_interpolation_extrapolation( weather_df['wind_speed'], self.power_plant.hub_height) elif self.wind_speed_model == 'log_interpolation_extrapolation': logging.debug('Calculating wind speed using logarithmic inter- or ' 'extrapolation.') wind_speed_hub = tools.logarithmic_interpolation_extrapolation( weather_df['wind_speed'], self.power_plant.hub_height) else: raise ValueError("'{0}' is an invalid value. ".format( self.wind_speed_model) + "`wind_speed_model` must be " "'logarithmic', 'hellman', 'interpolation_extrapolation' " + "or 'log_interpolation_extrapolation'.") return wind_speed_hub
r""" Calculates the wind speed at hub height. The method specified by the parameter `wind_speed_model` is used. Parameters ---------- weather_df : pandas.DataFrame DataFrame with time series for wind speed `wind_speed` in m/s and roughness length `roughness_length` in m. The columns of the DataFrame are a MultiIndex where the first level contains the variable name (e.g. wind_speed) and the second level contains the height at which it applies (e.g. 10, if it was measured at a height of 10 m). See documentation of :func:`ModelChain.run_model` for an example on how to create the weather_df DataFrame. Returns ------- wind_speed_hub : pandas.Series or numpy.array Wind speed in m/s at hub height. Notes ----- If `weather_df` contains wind speeds at different heights the given wind speed(s) closest to the hub height are used.
def decompose(self, noise=False, verbosity=0, logic='or', **kwargs): """ Use prune to remove links between distant points: prune is None: no pruning prune={int > 0}: prunes links beyond `prune` nearest neighbours prune='estimate': searches for the smallest value that retains a fully connected graph """ matrix = self.get_dm(noise) # get local scale estimate est_scale = None # ADJUST MASK if self._pruning_option == options.PRUNING_NONE: # Set kp to max value kp = len(matrix) - 1 mask = np.ones(matrix.shape, dtype=bool) elif self._pruning_option == options.PRUNING_MANUAL: # Manually set value of kp kp = self._manual_pruning mask = kmask(matrix, self._manual_pruning, logic=logic) elif self._pruning_option == options.PRUNING_ESTIMATE: # Must estimate value of kp kp, mask, est_scale = binsearch_mask(matrix, logic=logic) else: raise ValueError("Unexpected error: 'kp' not set") # ADJUST SCALE if self._scale_option == options.LOCAL_SCALE_MEDIAN: dist = np.median(matrix, axis=1) scale = np.outer(dist, dist) elif self._scale_option == options.LOCAL_SCALE_MANUAL: scale = kscale(matrix, self._manual_scale) elif self._scale_option == options.LOCAL_SCALE_ESTIMATE: if est_scale is None: _, _, scale = binsearch_mask(matrix, logic=logic) else: # Nothing to be done - est_scale was set during the PRUNING_ESTIMATE scale = est_scale else: raise ValueError("Unexpected error: 'scale' not set") # ZeroDivisionError safety check if not (scale > 1e-5).all(): if verbosity > 0: print('Rescaling to avoid zero-div error') _, _, scale = binsearch_mask(matrix, logic=logic) assert (scale > 1e-5).all() aff = affinity(matrix, mask, scale) aff.flat[::len(aff)+1] = 1.0 return aff
Use prune to remove links between distant points: prune is None: no pruning prune={int > 0}: prunes links beyond `prune` nearest neighbours prune='estimate': searches for the smallest value that retains a fully connected graph
def _find_classes_param(self): """ Searches the wrapped model for the classes_ parameter. """ for attr in ["classes_"]: try: return getattr(self.estimator, attr) except AttributeError: continue raise YellowbrickTypeError( "could not find classes_ param on {}".format( self.estimator.__class__.__name__ ) )
Searches the wrapped model for the classes_ parameter.
def stop(self): """Stop the Client, disconnect from queue """ if self.__end.is_set(): return self.__end.set() self.__send_retry_requests_timer.cancel() self.__threadpool.stop() self.__crud_threadpool.stop() self.__amqplink.stop() self.__network_retry_thread.join() # Clear out remaining pending requests with self.__requests: shutdown = LinkShutdownException('Client stopped') for req in self.__requests.values(): req.exception = shutdown req._set() self.__clear_references(req, remove_request=False) if self.__requests: logger.warning('%d unfinished request(s) discarded', len(self.__requests)) self.__requests.clear() # self.__network_retry_thread = None self.__network_retry_queue = None self.__container_params = None
Stop the Client, disconnect from queue
def execute_action(self, agent, action): """Change agent's location and/or location's status; track performance. Score 10 for each dirt cleaned; -1 for each move.""" if action == 'Right': agent.location = loc_B agent.performance -= 1 elif action == 'Left': agent.location = loc_A agent.performance -= 1 elif action == 'Suck': if self.status[agent.location] == 'Dirty': agent.performance += 10 self.status[agent.location] = 'Clean'
Change agent's location and/or location's status; track performance. Score 10 for each dirt cleaned; -1 for each move.