code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def nmap_smb_vulnscan(): """ Scans available smb services in the database for smb signing and ms17-010. """ service_search = ServiceSearch() services = service_search.get_services(ports=['445'], tags=['!smb_vulnscan'], up=True) services = [service for service in services] service_dict = {} for service in services: service.add_tag('smb_vulnscan') service_dict[str(service.address)] = service nmap_args = "-Pn -n --disable-arp-ping --script smb-security-mode.nse,smb-vuln-ms17-010.nse -p 445".split(" ") if services: result = nmap(nmap_args, [str(s.address) for s in services]) parser = NmapParser() report = parser.parse_fromstring(result) smb_signing = 0 ms17 = 0 for nmap_host in report.hosts: for script_result in nmap_host.scripts_results: script_result = script_result.get('elements', {}) service = service_dict[str(nmap_host.address)] if script_result.get('message_signing', '') == 'disabled': print_success("({}) SMB Signing disabled".format(nmap_host.address)) service.add_tag('smb_signing_disabled') smb_signing += 1 if script_result.get('CVE-2017-0143', {}).get('state', '') == 'VULNERABLE': print_success("({}) Vulnerable for MS17-010".format(nmap_host.address)) service.add_tag('MS17-010') ms17 += 1 service.update(tags=service.tags) print_notification("Completed, 'smb_signing_disabled' tag added to systems with smb signing disabled, 'MS17-010' tag added to systems that did not apply MS17-010.") stats = {'smb_signing': smb_signing, 'MS17_010': ms17, 'scanned_services': len(services)} Logger().log('smb_vulnscan', 'Scanned {} smb services for vulnerabilities'.format(len(services)), stats) else: print_notification("No services found to scan.")
Scans available smb services in the database for smb signing and ms17-010.
def main(command_line=True, **kwargs): """ NAME iodp_jr6_magic.py DESCRIPTION converts shipboard .jr6 format files to magic_measurements format files SYNTAX iodp_jr6_magic.py [command line options] OPTIONS -h: prints the help message and quits. -f FILE: specify input file, or -F FILE: specify output file, default is magic_measurements.txt -fsa FILE: specify er_samples.txt file for sample name lookup , default is 'er_samples.txt' -loc HOLE : specify hole name (U1456A) -A: don't average replicate measurements INPUT JR6 .jr6 format file """ def fix_separation(filename, new_filename): old_file = open(filename, 'r') data = old_file.readlines() new_data = [] for line in data: new_line = line.replace('-', ' -') new_line = new_line.replace(' ', ' ') new_data.append(new_line) new_file = open(new_filename, 'w') for s in new_data: new_file.write(s) old_file.close() new_file.close() return new_filename def old_fix_separation(filename, new_filename): old_file = open(filename, 'r') data = old_file.readlines() new_data = [] for line in data: new_line = [] for i in line.split(): if '-' in i[1:]: lead_char = '-' if i[0] == '-' else '' if lead_char: v = i[1:].split('-') else: v = i.split('-') new_line.append(lead_char + v[0]) new_line.append('-' + v[1]) else: new_line.append(i) new_line = (' '.join(new_line)) + '\n' new_data.append(new_line) new_file = open(new_filename, 'w') for s in new_data: new_file.write(s) new_file.close() old_file.close() return new_filename # initialize some stuff noave=0 volume=2.5**3 #default volume is a 2.5cm cube inst="" samp_con,Z='5',"" missing=1 demag="N" er_location_name="unknown" citation='This study' args=sys.argv meth_code="LP-NO" version_num=pmag.get_version() dir_path='.' MagRecs=[] samp_file = 'er_samples.txt' meas_file = 'magic_measurements.txt' mag_file = '' # # get command line arguments # if command_line: if '-WD' in sys.argv: ind = sys.argv.index('-WD') dir_path=sys.argv[ind+1] if '-ID' in sys.argv: ind = sys.argv.index('-ID') input_dir_path = sys.argv[ind+1] else: input_dir_path = dir_path output_dir_path = dir_path if "-h" in args: print(main.__doc__) return False if '-F' in args: ind=args.index("-F") meas_file = args[ind+1] if '-fsa' in args: ind = args.index("-fsa") samp_file = args[ind+1] if samp_file[0]!='/': samp_file = os.path.join(input_dir_path, samp_file) try: open(samp_file,'r') ErSamps,file_type=pmag.magic_read(samp_file) except: print(samp_file,' not found: ') print(' download csv file and import to MagIC with iodp_samples_magic.py') if '-f' in args: ind = args.index("-f") mag_file= args[ind+1] if "-loc" in args: ind=args.index("-loc") er_location_name=args[ind+1] if "-A" in args: noave=1 if not command_line: dir_path = kwargs.get('dir_path', '.') input_dir_path = kwargs.get('input_dir_path', dir_path) output_dir_path = dir_path meas_file = kwargs.get('meas_file', 'magic_measurements.txt') mag_file = kwargs.get('mag_file', '') samp_file = kwargs.get('samp_file', 'er_samples.txt') specnum = kwargs.get('specnum', 1) samp_con = kwargs.get('samp_con', '1') if len(str(samp_con)) > 1: samp_con, Z = samp_con.split('-') else: Z = '' er_location_name = kwargs.get('er_location_name', '') noave = kwargs.get('noave', 0) # default (0) means DO average meth_code = kwargs.get('meth_code', "LP-NO") # format variables meth_code=meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V" meth_code=meth_code.strip(":") if mag_file: mag_file = os.path.join(input_dir_path, mag_file) samp_file = os.path.join(input_dir_path, samp_file) meas_file = os.path.join(output_dir_path, meas_file) # validate variables if not mag_file: print("You must provide an IODP_jr6 format file") return False, "You must provide an IODP_jr6 format file" if not os.path.exists(mag_file): print('The input file you provided: {} does not exist.\nMake sure you have specified the correct filename AND correct input directory name.'.format(mag_file)) return False, 'The input file you provided: {} does not exist.\nMake sure you have specified the correct filename AND correct input directory name.'.format(mag_file) if not os.path.exists(samp_file): print("Your input directory:\n{}\nmust contain an er_samples.txt file, or you must explicitly provide one".format(input_dir_path)) return False, "Your input directory:\n{}\nmust contain an er_samples.txt file, or you must explicitly provide one".format(input_dir_path) # parse data temp = os.path.join(output_dir_path, 'temp.txt') fix_separation(mag_file, temp) samples, filetype = pmag.magic_read(samp_file) with open(temp, 'r') as finput: lines = finput.readlines() os.remove(temp) for line in lines: MagRec = {} line = line.split() spec_text_id = line[0].split('_')[1] SampRecs=pmag.get_dictitem(samples,'er_sample_alternatives',spec_text_id,'has') if len(SampRecs)>0: # found one MagRec['er_specimen_name']=SampRecs[0]['er_sample_name'] MagRec['er_sample_name']=MagRec['er_specimen_name'] MagRec['er_site_name']=MagRec['er_specimen_name'] MagRec["er_citation_names"]="This study" MagRec['er_location_name']=er_location_name MagRec['magic_software_packages']=version_num MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin MagRec["measurement_flag"]='g' MagRec["measurement_standard"]='u' MagRec["measurement_number"]='1' MagRec["treatment_ac_field"]='0' volume=float(SampRecs[0]['sample_volume']) x = float(line[4]) y = float(line[3]) negz = float(line[2]) cart=np.array([x,y,-negz]).transpose() direction = pmag.cart2dir(cart).transpose() expon = float(line[5]) magn_volume = direction[2] * (10.0**expon) moment = magn_volume * volume MagRec["measurement_magn_moment"]=str(moment) MagRec["measurement_magn_volume"]=str(magn_volume)#str(direction[2] * (10.0 ** expon)) MagRec["measurement_dec"]='%7.1f'%(direction[0]) MagRec["measurement_inc"]='%7.1f'%(direction[1]) step = line[1] if step == 'NRM': meas_type="LT-NO" elif step[0:2] == 'AD': meas_type="LT-AF-Z" treat=float(step[2:]) MagRec["treatment_ac_field"]='%8.3e' %(treat*1e-3) # convert from mT to tesla elif step[0:2] == 'TD': meas_type="LT-T-Z" treat=float(step[2:]) MagRec["treatment_temp"]='%8.3e' % (treat+273.) # temp in kelvin elif step[0:3]=='ARM': # meas_type="LT-AF-I" treat=float(row['step'][3:]) MagRec["treatment_ac_field"]='%8.3e' %(treat*1e-3) # convert from mT to tesla MagRec["treatment_dc_field"]='%8.3e' %(50e-6) # assume 50uT DC field MagRec["measurement_description"]='Assumed DC field - actual unknown' elif step[0:3]=='IRM': # meas_type="LT-IRM" treat=float(step[3:]) MagRec["treatment_dc_field"]='%8.3e' %(treat*1e-3) # convert from mT to tesla else: print('unknown treatment type for ',row) return False, 'unknown treatment type for ',row MagRec['magic_method_codes']=meas_type MagRecs.append(MagRec.copy()) else: print('sample name not found: ',row['specname']) MagOuts=pmag.measurements_methods(MagRecs,noave) file_created, error_message = pmag.magic_write(meas_file,MagOuts,'magic_measurements') if file_created: return True, meas_file else: return False, 'Results not written to file'
NAME iodp_jr6_magic.py DESCRIPTION converts shipboard .jr6 format files to magic_measurements format files SYNTAX iodp_jr6_magic.py [command line options] OPTIONS -h: prints the help message and quits. -f FILE: specify input file, or -F FILE: specify output file, default is magic_measurements.txt -fsa FILE: specify er_samples.txt file for sample name lookup , default is 'er_samples.txt' -loc HOLE : specify hole name (U1456A) -A: don't average replicate measurements INPUT JR6 .jr6 format file
def use(cls, ec): """ use will duplicate a new Model class and bind ec ec is Engine name or Sesstion object """ if isinstance(ec, (str, unicode)): m = get_model(cls._alias, ec, signal=False) else: m = cls._use(ec) return m
use will duplicate a new Model class and bind ec ec is Engine name or Sesstion object
def is_orderable(cls): """ Checks if the provided class is specified as an orderable in settings.ORDERABLE_MODELS. If it is return its settings. """ if not getattr(settings, 'ORDERABLE_MODELS', None): return False labels = resolve_labels(cls) if labels['app_model'] in settings.ORDERABLE_MODELS: return settings.ORDERABLE_MODELS[labels['app_model']] return False
Checks if the provided class is specified as an orderable in settings.ORDERABLE_MODELS. If it is return its settings.
def ensure_matplotlib_figure(obj): """Extract the current figure from a matplotlib object or return the object if it's a figure. raises ValueError if the object can't be converted. """ import matplotlib from matplotlib.figure import Figure if obj == matplotlib.pyplot: obj = obj.gcf() elif not isinstance(obj, Figure): if hasattr(obj, "figure"): obj = obj.figure # Some matplotlib objects have a figure function if not isinstance(obj, Figure): raise ValueError( "Only matplotlib.pyplot or matplotlib.pyplot.Figure objects are accepted.") if not obj.gca().has_data(): raise ValueError( "You attempted to log an empty plot, pass a figure directly or ensure the global plot isn't closed.") return obj
Extract the current figure from a matplotlib object or return the object if it's a figure. raises ValueError if the object can't be converted.
def set( self, key, value, loader_identifier=None, tomlfy=False, dotted_lookup=True, is_secret=False, ): """Set a value storing references for the loader :param key: The key to store :param value: The value to store :param loader_identifier: Optional loader name e.g: toml, yaml etc. :param tomlfy: Bool define if value is parsed by toml (defaults False) :param is_secret: Bool define if secret values is hidden on logs. """ if "." in key and dotted_lookup is True: return self._dotted_set( key, value, loader_identifier=loader_identifier, tomlfy=tomlfy ) value = parse_conf_data(value, tomlfy=tomlfy) key = key.strip().upper() existing = getattr(self, key, None) if existing is not None and existing != value: value = self._merge_before_set(key, existing, value, is_secret) if isinstance(value, dict): value = DynaBox(value, box_it_up=True) setattr(self, key, value) self.store[key] = value self._deleted.discard(key) # set loader identifiers so cleaners know which keys to clean if loader_identifier and loader_identifier in self.loaded_by_loaders: self.loaded_by_loaders[loader_identifier][key] = value elif loader_identifier: self.loaded_by_loaders[loader_identifier] = {key: value} elif loader_identifier is None: # if .set is called without loader identifier it becomes # a default value and goes away only when explicitly unset self._defaults[key] = value
Set a value storing references for the loader :param key: The key to store :param value: The value to store :param loader_identifier: Optional loader name e.g: toml, yaml etc. :param tomlfy: Bool define if value is parsed by toml (defaults False) :param is_secret: Bool define if secret values is hidden on logs.
def _write_init_models(self, filenames): """ Write init file Args: filenames (dict): dict of filename and classes """ self.write(destination=self.output_directory, filename="__init__.py", template_name="__init_model__.py.tpl", filenames=self._prepare_filenames(filenames), class_prefix=self._class_prefix, product_accronym=self._product_accronym, header=self.header_content)
Write init file Args: filenames (dict): dict of filename and classes
def link(self, stream_instance): """Set my input stream""" if isinstance(stream_instance, collections.Iterable): self.input_stream = stream_instance elif getattr(stream_instance, 'output_stream', None): self.input_stream = stream_instance.output_stream else: raise RuntimeError('Calling link() with unknown instance type %s' % type(stream_instance))
Set my input stream
def _is_gs_folder(cls, result): """Return ``True`` if GS standalone folder object. GS will create a 0 byte ``<FOLDER NAME>_$folder$`` key as a pseudo-directory place holder if there are no files present. """ return (cls.is_key(result) and result.size == 0 and result.name.endswith(cls._gs_folder_suffix))
Return ``True`` if GS standalone folder object. GS will create a 0 byte ``<FOLDER NAME>_$folder$`` key as a pseudo-directory place holder if there are no files present.
def buttonbox(msg="", title=" ", choices=("Button[1]", "Button[2]", "Button[3]"), image=None, root=None, default_choice=None, cancel_choice=None): """ Display a msg, a title, an image, and a set of buttons. The buttons are defined by the members of the choices list. :param str msg: the msg to be displayed :param str title: the window title :param list choices: a list or tuple of the choices to be displayed :param str image: Filename of image to display :param str default_choice: The choice you want highlighted when the gui appears :param str cancel_choice: If the user presses the 'X' close, which button should be pressed :return: the text of the button that the user selected """ global boxRoot, __replyButtonText, buttonsFrame # If default is not specified, select the first button. This matches old # behavior. if default_choice is None: default_choice = choices[0] # Initialize __replyButtonText to the first choice. # This is what will be used if the window is closed by the close button. __replyButtonText = choices[0] if root: root.withdraw() boxRoot = Toplevel(master=root) boxRoot.withdraw() else: boxRoot = Tk() boxRoot.withdraw() boxRoot.title(title) boxRoot.iconname('Dialog') boxRoot.geometry(st.rootWindowPosition) boxRoot.minsize(400, 100) # ------------- define the messageFrame --------------------------------- messageFrame = Frame(master=boxRoot) messageFrame.pack(side=TOP, fill=BOTH) # ------------- define the imageFrame --------------------------------- if image: tk_Image = None try: tk_Image = ut.load_tk_image(image) except Exception as inst: print(inst) if tk_Image: imageFrame = Frame(master=boxRoot) imageFrame.pack(side=TOP, fill=BOTH) label = Label(imageFrame, image=tk_Image) label.image = tk_Image # keep a reference! label.pack(side=TOP, expand=YES, fill=X, padx='1m', pady='1m') # ------------- define the buttonsFrame --------------------------------- buttonsFrame = Frame(master=boxRoot) buttonsFrame.pack(side=TOP, fill=BOTH) # -------------------- place the widgets in the frames ------------------- messageWidget = Message(messageFrame, text=msg, width=400) messageWidget.configure( font=(st.PROPORTIONAL_FONT_FAMILY, st.PROPORTIONAL_FONT_SIZE)) messageWidget.pack(side=TOP, expand=YES, fill=X, padx='3m', pady='3m') __put_buttons_in_buttonframe(choices, default_choice, cancel_choice) # -------------- the action begins ----------- boxRoot.deiconify() boxRoot.mainloop() boxRoot.destroy() if root: root.deiconify() return __replyButtonText
Display a msg, a title, an image, and a set of buttons. The buttons are defined by the members of the choices list. :param str msg: the msg to be displayed :param str title: the window title :param list choices: a list or tuple of the choices to be displayed :param str image: Filename of image to display :param str default_choice: The choice you want highlighted when the gui appears :param str cancel_choice: If the user presses the 'X' close, which button should be pressed :return: the text of the button that the user selected
def draw_flat_samples(**kwargs): ''' Draw samples for uniform in mass Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- array The first mass array The second mass ''' #PDF doesnt match with sampler nsamples = kwargs.get('nsamples', 1) min_mass = kwargs.get('min_mass', 1.) max_mass = kwargs.get('max_mass', 2.) m1 = np.random.uniform(min_mass, max_mass, nsamples) m2 = np.random.uniform(min_mass, max_mass, nsamples) return np.maximum(m1, m2), np.minimum(m1, m2)
Draw samples for uniform in mass Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- array The first mass array The second mass
def safe_urlencode(params, doseq=0): """ UTF-8-safe version of safe_urlencode The stdlib safe_urlencode prior to Python 3.x chokes on UTF-8 values which can't fail down to ascii. """ if IS_PY3: return urlencode(params, doseq) if hasattr(params, "items"): params = params.items() new_params = [] for k, v in params: k = k.encode("utf-8") if isinstance(v, (list, tuple)): new_params.append((k, [force_bytes(i) for i in v])) else: new_params.append((k, force_bytes(v))) return urlencode(new_params, doseq)
UTF-8-safe version of safe_urlencode The stdlib safe_urlencode prior to Python 3.x chokes on UTF-8 values which can't fail down to ascii.
def handle_json_GET_routepatterns(self, params): """Given a route_id generate a list of patterns of the route. For each pattern include some basic information and a few sample trips.""" schedule = self.server.schedule route = schedule.GetRoute(params.get('route', None)) if not route: self.send_error(404) return time = int(params.get('time', 0)) date = params.get('date', "") sample_size = 3 # For each pattern return the start time for this many trips pattern_id_trip_dict = route.GetPatternIdTripDict() patterns = [] for pattern_id, trips in pattern_id_trip_dict.items(): time_stops = trips[0].GetTimeStops() if not time_stops: continue has_non_zero_trip_type = False; # Iterating over a copy so we can remove from trips inside the loop trips_with_service = [] for trip in trips: service_id = trip.service_id service_period = schedule.GetServicePeriod(service_id) if date and not service_period.IsActiveOn(date): continue trips_with_service.append(trip) if trip['trip_type'] and trip['trip_type'] != '0': has_non_zero_trip_type = True # We're only interested in the trips that do run on the specified date trips = trips_with_service name = u'%s to %s, %d stops' % (time_stops[0][2].stop_name, time_stops[-1][2].stop_name, len(time_stops)) transitfeed.SortListOfTripByTime(trips) num_trips = len(trips) if num_trips <= sample_size: start_sample_index = 0 num_after_sample = 0 else: # Will return sample_size trips that start after the 'time' param. # Linear search because I couldn't find a built-in way to do a binary # search with a custom key. start_sample_index = len(trips) for i, trip in enumerate(trips): if trip.GetStartTime() >= time: start_sample_index = i break num_after_sample = num_trips - (start_sample_index + sample_size) if num_after_sample < 0: # Less than sample_size trips start after 'time' so return all the # last sample_size trips. num_after_sample = 0 start_sample_index = num_trips - sample_size sample = [] for t in trips[start_sample_index:start_sample_index + sample_size]: sample.append( (t.GetStartTime(), t.trip_id) ) patterns.append((name, pattern_id, start_sample_index, sample, num_after_sample, (0,1)[has_non_zero_trip_type])) patterns.sort() return patterns
Given a route_id generate a list of patterns of the route. For each pattern include some basic information and a few sample trips.
def start(self): """ Handle EventHub events for SmartContract decorators """ self._events_to_write = [] self._new_contracts_to_write = [] @events.on(SmartContractEvent.CONTRACT_CREATED) @events.on(SmartContractEvent.CONTRACT_MIGRATED) def call_on_success_event(sc_event: SmartContractEvent): self.on_smart_contract_created(sc_event) @events.on(SmartContractEvent.RUNTIME_NOTIFY) def call_on_event(sc_event: NotifyEvent): self.on_smart_contract_event(sc_event) Blockchain.Default().PersistCompleted.on_change += self.on_persist_completed
Handle EventHub events for SmartContract decorators
def update_table(self, tablename, throughput=None, global_indexes=None, index_updates=None): """ Update the throughput of a table and/or global indexes Parameters ---------- tablename : str Name of the table to update throughput : :class:`~dynamo3.fields.Throughput`, optional The new throughput of the table global_indexes : dict, optional DEPRECATED. Use index_updates now. Map of index name to :class:`~dynamo3.fields.Throughput` index_updates : list of :class:`~dynamo3.fields.IndexUpdate`, optional List of IndexUpdates to perform """ kwargs = { 'TableName': tablename } all_attrs = set() if throughput is not None: kwargs['ProvisionedThroughput'] = throughput.schema() if index_updates is not None: updates = [] for update in index_updates: all_attrs.update(update.get_attrs()) updates.append(update.serialize()) kwargs['GlobalSecondaryIndexUpdates'] = updates elif global_indexes is not None: kwargs['GlobalSecondaryIndexUpdates'] = [ { 'Update': { 'IndexName': key, 'ProvisionedThroughput': value.schema(), } } for key, value in six.iteritems(global_indexes) ] if all_attrs: attr_definitions = [attr.definition() for attr in all_attrs] kwargs['AttributeDefinitions'] = attr_definitions return self.call('update_table', **kwargs)
Update the throughput of a table and/or global indexes Parameters ---------- tablename : str Name of the table to update throughput : :class:`~dynamo3.fields.Throughput`, optional The new throughput of the table global_indexes : dict, optional DEPRECATED. Use index_updates now. Map of index name to :class:`~dynamo3.fields.Throughput` index_updates : list of :class:`~dynamo3.fields.IndexUpdate`, optional List of IndexUpdates to perform
def getChecked(self): """Gets the checked attributes :returns: list<str> -- checked attribute names """ attrs = [] layout = self.layout() for i in range(layout.count()): w = layout.itemAt(i).widget() if w.isChecked(): attrs.append(str(w.text())) return attrs
Gets the checked attributes :returns: list<str> -- checked attribute names
def get_passage(self, objectId, subreference): """ Retrieve the passage identified by the parameters :param objectId: Collection Identifier :type objectId: str :param subreference: Subreference of the passage :type subreference: str :return: An object bearing metadata and its text :rtype: InteractiveTextualNode """ passage = self.resolver.getTextualNode( textId=objectId, subreference=subreference, metadata=True ) return passage
Retrieve the passage identified by the parameters :param objectId: Collection Identifier :type objectId: str :param subreference: Subreference of the passage :type subreference: str :return: An object bearing metadata and its text :rtype: InteractiveTextualNode
def stage_name(self): """ Get stage name of current job instance. Because instantiating job instance could be performed in different ways and those return different results, we have to check where from to get name of the stage. :return: stage name. """ if 'stage_name' in self.data and self.data.stage_name: return self.data.get('stage_name') else: return self.stage.data.name
Get stage name of current job instance. Because instantiating job instance could be performed in different ways and those return different results, we have to check where from to get name of the stage. :return: stage name.
def _write_family(family, filename): """ Write a family to a csv file. :type family: :class:`eqcorrscan.core.match_filter.Family` :param family: Family to write to file :type filename: str :param filename: File to write to. """ with open(filename, 'w') as f: for detection in family.detections: det_str = '' for key in detection.__dict__.keys(): if key == 'event' and detection.__dict__[key] is not None: value = str(detection.event.resource_id) elif key in ['threshold', 'detect_val', 'threshold_input']: value = format(detection.__dict__[key], '.32f').rstrip('0') else: value = str(detection.__dict__[key]) det_str += key + ': ' + value + '; ' f.write(det_str + '\n') return
Write a family to a csv file. :type family: :class:`eqcorrscan.core.match_filter.Family` :param family: Family to write to file :type filename: str :param filename: File to write to.
def _get_MAP_spikes(F, c_hat, theta, dt, tol=1E-6, maxiter=100, verbosity=0): """ Used internally by deconvolve to compute the maximum a posteriori spike train for a given set of fluorescence traces and model parameters. See the documentation for deconvolve for the meaning of the arguments Returns: n_hat_best, c_hat_best, LL_best """ npix, nt = F.shape sigma, alpha, beta, lamb, gamma = theta # we project everything onto the alpha mask so that we only ever have to # deal with 1D vector norms alpha_ss = np.dot(alpha, alpha) c = np.dot(alpha, F) - np.dot(alpha, beta) # used for computing the LL and gradient scale_var = 1. / (2 * sigma * sigma) lD = lamb * dt # used for computing the gradient (M.T.dot(lamb * dt)) grad_lnprior = np.zeros(nt, dtype=DTYPE) grad_lnprior[1:] = lD grad_lnprior[:-1] -= lD * gamma # initialize the weight of the barrier term to 1 z = 1. # initial estimate of spike probabilities n_hat = c_hat[1:] - gamma * c_hat[:-1] # assert not np.any(n_hat < 0), "spike probabilities < 0" # (actual - predicted) fluorescence res = c - alpha_ss * c_hat # best overall posterior log-likelihood of the fluorescence LL_best = _post_LL(n_hat, res, scale_var, lD, z) LL_barrier = LL_best nloop1 = 0 terminate_interior = False # in the outer loop we'll progressively reduce the weight of the barrier # term and check the interior point termination criteria while not terminate_interior: nloop2 = 0 terminate_barrier = False # converge for this barrier weight while not terminate_barrier: # by projecting everything onto alpha, we reduce this to a 1D # vector norm res = c - alpha_ss * c_hat # compute direction of newton step d = _direction(n_hat, res, alpha_ss, gamma, scale_var, grad_lnprior, z) terminate_linesearch = False # find the largest step we can take in direction d without # violating the non-negativity constraint on n_hat s_upper_bnd = -n_hat / (d[1:] - gamma * d[:-1]) # we are only interested in positive step sizes feasible = (s_upper_bnd > 0) if np.any(feasible): # largest allowable step size is 1. s = min(1., 0.999 * np.min(s_upper_bnd[feasible])) else: # if there is no step size that will keep n_hat >= 0, just # reduce the barrier weight and try again terminate_linesearch = True terminate_barrier = True if verbosity >= 2: print("skipping linesearch: no positive step size will " "keep n_hat >= 0") nloop3 = 0 # backtracking line search for the largest step size that increases # the posterior log-likelihood of the fluorescence while not terminate_linesearch: # update estimated calcium c_hat_line = c_hat + (s * d) # update spike probabilities n_hat_line = c_hat_line[1:] - gamma * c_hat_line[:-1] # assert not np.any(n_hat_line < 0), "spike probabilities < 0" # (actual - predicted) fluorescence res = c - alpha_ss * c_hat_line # compute the new posterior log-likelihood LL_line = _post_LL(n_hat_line, res, scale_var, lD, z) # assert not np.any(np.isnan(LL1)), "nan LL" if verbosity >= 2: print('spikes: iter=%3i, %3i, %3i; z=%-10.4g; s=%-10.4g;' ' LL=%-10.4g' % (nloop1, nloop2, nloop3, z, s, LL_line)) # if the step size gets too small without making any progress, # we terminate the linesearch and reduce the barrier weight if s < S_TOL: if verbosity >= 2: print('--> terminated linesearch: s < %.3g on %i ' 'iterations' % (S_TOL, nloop3)) terminate_linesearch = True terminate_barrier = True # only update c_hat & LL if LL improved if LL_line > LL_barrier: LL_barrier, n_hat, c_hat = LL_line, n_hat_line, c_hat_line terminate_linesearch = True # reduce the step size else: s /= S_FAC nloop3 += 1 # if d gets too small, reduce the barrier weight if (np.linalg.norm(d) < D_TOL): terminate_barrier = True nloop2 += 1 # only test for convergence if we were actually able to enter the # linesearch if nloop3: delta_LL = -(LL_barrier - LL_best) / LL_best LL_best = LL_barrier if (delta_LL < tol): terminate_interior = True elif z < Z_TOL: if verbosity >= 2: print('MAP spike train failed to converge before z < %.3g' % Z_TOL) terminate_interior = True elif nloop1 > maxiter: if verbosity >= 2: print('MAP spike train failed to converge within maxiter (%i)' % maxiter) terminate_interior = True # increment the outer loop counter, reduce the barrier weight nloop1 += 1 z /= Z_FAC return n_hat, c_hat, LL_best
Used internally by deconvolve to compute the maximum a posteriori spike train for a given set of fluorescence traces and model parameters. See the documentation for deconvolve for the meaning of the arguments Returns: n_hat_best, c_hat_best, LL_best
def _socket_reconnect_and_wait_ready(self): """ sync_socket & async_socket recreate :return: (ret, msg) """ logger.info("Start connecting: host={}; port={};".format(self.__host, self.__port)) with self._lock: self._status = ContextStatus.Connecting # logger.info("try connecting: host={}; port={};".format(self.__host, self.__port)) ret, msg, conn_id = self._net_mgr.connect((self.__host, self.__port), self, 5) if ret == RET_OK: self._conn_id = conn_id else: logger.warning(msg) if ret == RET_OK: while True: with self._lock: if self._sync_req_ret is not None: if self._sync_req_ret.ret == RET_OK: self._status = ContextStatus.Ready else: ret, msg = self._sync_req_ret.ret, self._sync_req_ret.msg self._sync_req_ret = None break sleep(0.01) if ret == RET_OK: ret, msg = self.on_api_socket_reconnected() else: self._wait_reconnect() return RET_OK, ''
sync_socket & async_socket recreate :return: (ret, msg)
def pick(self, filenames: Iterable[str]) -> str: """Pick one filename based on priority rules.""" filenames = sorted(filenames, reverse=True) # e.g., v2 before v1 for priority in sorted(self.rules.keys(), reverse=True): patterns = self.rules[priority] for pattern in patterns: for filename in filenames: if pattern.search(filename): return filename return filenames[0]
Pick one filename based on priority rules.
def solveConsKinkedR(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rboro,Rsave, PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool): ''' Solves a single period consumption-saving problem with CRRA utility and risky income (subject to permanent and transitory shocks), and different interest factors on borrowing and saving. Restriction: Rboro >= Rsave. Currently cannot construct a cubic spline consumption function, only linear. Can gen- erate a value function if requested. Parameters ---------- solution_next : ConsumerSolution The solution to next period's one period problem. IncomeDstn : [np.array] A list containing three arrays of floats, representing a discrete approximation to the income process between the period being solved and the one immediately following (in solution_next). Order: event probabilities, permanent shocks, transitory shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. Rboro: float Interest factor on assets between this period and the succeeding period when assets are negative. Rsave: float Interest factor on assets between this period and the succeeding period when assets are positive. PermGroFac : float Expected permanent income growth factor at the end of this period. BoroCnstArt: float or None Borrowing constraint for the minimum allowable assets to end the period with. If it is less than the natural borrowing constraint, then it is irrelevant; BoroCnstArt=None indicates no artificial bor- rowing constraint. aXtraGrid: np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. vFuncBool: boolean An indicator for whether the value function should be computed and included in the reported solution. CubicBool: boolean Indicator for whether the solver should use cubic or linear interpolation. Returns ------- solution_now : ConsumerSolution The solution to the single period consumption-saving problem. Includes a consumption function cFunc (using cubic or linear splines), a marginal value function vPfunc, a minimum acceptable level of normalized market resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also have a value function vFunc. ''' solver = ConsKinkedRsolver(solution_next,IncomeDstn,LivPrb, DiscFac,CRRA,Rboro,Rsave,PermGroFac,BoroCnstArt, aXtraGrid,vFuncBool,CubicBool) solver.prepareToSolve() solution = solver.solve() return solution
Solves a single period consumption-saving problem with CRRA utility and risky income (subject to permanent and transitory shocks), and different interest factors on borrowing and saving. Restriction: Rboro >= Rsave. Currently cannot construct a cubic spline consumption function, only linear. Can gen- erate a value function if requested. Parameters ---------- solution_next : ConsumerSolution The solution to next period's one period problem. IncomeDstn : [np.array] A list containing three arrays of floats, representing a discrete approximation to the income process between the period being solved and the one immediately following (in solution_next). Order: event probabilities, permanent shocks, transitory shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. Rboro: float Interest factor on assets between this period and the succeeding period when assets are negative. Rsave: float Interest factor on assets between this period and the succeeding period when assets are positive. PermGroFac : float Expected permanent income growth factor at the end of this period. BoroCnstArt: float or None Borrowing constraint for the minimum allowable assets to end the period with. If it is less than the natural borrowing constraint, then it is irrelevant; BoroCnstArt=None indicates no artificial bor- rowing constraint. aXtraGrid: np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. vFuncBool: boolean An indicator for whether the value function should be computed and included in the reported solution. CubicBool: boolean Indicator for whether the solver should use cubic or linear interpolation. Returns ------- solution_now : ConsumerSolution The solution to the single period consumption-saving problem. Includes a consumption function cFunc (using cubic or linear splines), a marginal value function vPfunc, a minimum acceptable level of normalized market resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also have a value function vFunc.
def main(argv=None): """ben-elastic entry point""" arguments = cli_common(__doc__, argv=argv) es_export = ESExporter(arguments['CAMPAIGN-DIR'], arguments['--es']) es_export.export() if argv is not None: return es_export
ben-elastic entry point
def get_sources(src_dir='src', ending='.cpp'): """Function to get a list of files ending with `ending` in `src_dir`.""" return [os.path.join(src_dir, fnm) for fnm in os.listdir(src_dir) if fnm.endswith(ending)]
Function to get a list of files ending with `ending` in `src_dir`.
def command_gen(self, *names): ''' Runs generator functions. Run `docs` generator function:: ./manage.py sqla:gen docs Run `docs` generator function with `count=10`:: ./manage.py sqla:gen docs:10 ''' if not names: sys.exit('Please provide generator names') for name in names: name, count = name, 0 if ':' in name: name, count = name.split(':', 1) count = int(count) create = self.generators[name] print('Generating `{0}` count={1}'.format(name, count)) create(self.session, count) self.session.commit()
Runs generator functions. Run `docs` generator function:: ./manage.py sqla:gen docs Run `docs` generator function with `count=10`:: ./manage.py sqla:gen docs:10
def pdftojpg(filehandle, meta): """Converts a PDF to a JPG and places it back onto the FileStorage instance passed to it as a BytesIO object. Optional meta arguments are: * resolution: int or (int, int) used for wand to determine resolution, defaults to 300. * width: new width of the image for resizing, defaults to 1080 * bgcolor: new background color, defaults to 'white' """ resolution = meta.get('resolution', 300) width = meta.get('width', 1080) bgcolor = Color(meta.get('bgcolor', 'white')) stream = BytesIO() with Image(blob=filehandle.stream, resolution=resolution) as img: img.background_color = bgcolor img.alpha_channel = False img.format = 'jpeg' ratio = width / img.width img.resize(width, int(ratio * img.height)) img.compression_quality = 90 img.save(file=stream) stream.seek(0) filehandle.stream = stream return filehandle
Converts a PDF to a JPG and places it back onto the FileStorage instance passed to it as a BytesIO object. Optional meta arguments are: * resolution: int or (int, int) used for wand to determine resolution, defaults to 300. * width: new width of the image for resizing, defaults to 1080 * bgcolor: new background color, defaults to 'white'
def filter(self, query: Query, entity: type) -> Tuple[Query, Any]: """Apply the `_method` to all childs of the node. :param query: The sqlachemy query. :type query: Query :param entity: The entity model of the query. :type entity: type :return: A tuple with in first place the updated query and in second place the list of filters to apply to the query. :rtype: Tuple[Query, Any] """ new_query = query c_filter_list = [] for child in self._childs: new_query, f_list = child.filter(new_query, entity) c_filter_list.append(f_list) return ( new_query, self._method(*c_filter_list) )
Apply the `_method` to all childs of the node. :param query: The sqlachemy query. :type query: Query :param entity: The entity model of the query. :type entity: type :return: A tuple with in first place the updated query and in second place the list of filters to apply to the query. :rtype: Tuple[Query, Any]
def clear(self): """ Clears grid to be EMPTY """ self.grid = [[EMPTY for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]
Clears grid to be EMPTY
def writeRunSetInfoToLog(self, runSet): """ This method writes the information about a run set into the txt_file. """ runSetInfo = "\n\n" if runSet.name: runSetInfo += runSet.name + "\n" runSetInfo += "Run set {0} of {1} with options '{2}' and propertyfile '{3}'\n\n".format( runSet.index, len(self.benchmark.run_sets), " ".join(runSet.options), runSet.propertyfile) titleLine = self.create_output_line(runSet, "inputfile", "status", "cpu time", "wall time", "host", self.benchmark.columns, True) runSet.simpleLine = "-" * (len(titleLine)) runSetInfo += titleLine + "\n" + runSet.simpleLine + "\n" # write into txt_file self.txt_file.append(runSetInfo)
This method writes the information about a run set into the txt_file.
def dmp_path(regex, kwargs=None, name=None, app_name=None): ''' Creates a DMP-style, convention-based pattern that resolves to various view functions based on the 'dmp_page' value. The following should exist as 1) regex named groups or 2) items in the kwargs dict: dmp_app Should resolve to a name in INSTALLED_APPS. If missing, defaults to DEFAULT_APP. dmp_page The page name, which should resolve to a module: project_dir/{dmp_app}/views/{dmp_page}.py If missing, defaults to DEFAULT_PAGE. dmp_function The function name (or View class name) within the module. If missing, defaults to 'process_request' dmp_urlparams The urlparams string to parse. If missing, defaults to ''. The reason for this convenience function is to be similar to Django functions like url(), re_path(), and path(). ''' return PagePattern(regex, kwargs, name, app_name)
Creates a DMP-style, convention-based pattern that resolves to various view functions based on the 'dmp_page' value. The following should exist as 1) regex named groups or 2) items in the kwargs dict: dmp_app Should resolve to a name in INSTALLED_APPS. If missing, defaults to DEFAULT_APP. dmp_page The page name, which should resolve to a module: project_dir/{dmp_app}/views/{dmp_page}.py If missing, defaults to DEFAULT_PAGE. dmp_function The function name (or View class name) within the module. If missing, defaults to 'process_request' dmp_urlparams The urlparams string to parse. If missing, defaults to ''. The reason for this convenience function is to be similar to Django functions like url(), re_path(), and path().
def ex6_2(n): """ Generate a triangle pulse as described in Example 6-2 of Chapter 6. You need to supply an index array n that covers at least [-2, 5]. The function returns the hard-coded signal of the example. Parameters ---------- n : time index ndarray covering at least -2 to +5. Returns ------- x : ndarray of signal samples in x Examples -------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from sk_dsp_comm import sigsys as ss >>> n = np.arange(-5,8) >>> x = ss.ex6_2(n) >>> plt.stem(n,x) # creates a stem plot of x vs n """ x = np.zeros(len(n)) for k, nn in enumerate(n): if nn >= -2 and nn <= 5: x[k] = 8 - nn return x
Generate a triangle pulse as described in Example 6-2 of Chapter 6. You need to supply an index array n that covers at least [-2, 5]. The function returns the hard-coded signal of the example. Parameters ---------- n : time index ndarray covering at least -2 to +5. Returns ------- x : ndarray of signal samples in x Examples -------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from sk_dsp_comm import sigsys as ss >>> n = np.arange(-5,8) >>> x = ss.ex6_2(n) >>> plt.stem(n,x) # creates a stem plot of x vs n
def addResource(self, key, filePath, text): """ The add resource operation allows the administrator to add a file resource, for example, the organization's logo or custom banner. The resource can be used by any member of the organization. File resources use storage space from your quota and are scanned for viruses. Inputs: key - The name the resource should be stored under. filePath - path of file to upload text - Some text to be written (for example, JSON or JavaScript) directly to the resource from a web client. """ url = self.root + "/addresource" params = { "f": "json", "token" : self._securityHandler.token, "key" : key, "text" : text } files = {} files['file'] = filePath res = self._post(url=url, param_dict=params, files=files, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) return res
The add resource operation allows the administrator to add a file resource, for example, the organization's logo or custom banner. The resource can be used by any member of the organization. File resources use storage space from your quota and are scanned for viruses. Inputs: key - The name the resource should be stored under. filePath - path of file to upload text - Some text to be written (for example, JSON or JavaScript) directly to the resource from a web client.
def reading_order(e1, e2): """ A comparator to sort bboxes from top to bottom, left to right """ b1 = e1.bbox b2 = e2.bbox if round(b1[y0]) == round(b2[y0]) or round(b1[y1]) == round(b2[y1]): return float_cmp(b1[x0], b2[x0]) return float_cmp(b1[y0], b2[y0])
A comparator to sort bboxes from top to bottom, left to right
def _adjustSyllabification(adjustedPhoneList, syllableList): ''' Inserts spaces into a syllable if needed Originally the phone list and syllable list contained the same number of phones. But the adjustedPhoneList may have some insertions which are not accounted for in the syllableList. ''' i = 0 retSyllableList = [] for syllableNum, syllable in enumerate(syllableList): j = len(syllable) if syllableNum == len(syllableList) - 1: j = len(adjustedPhoneList) - i tmpPhoneList = adjustedPhoneList[i:i + j] numBlanks = -1 phoneList = tmpPhoneList[:] while numBlanks != 0: numBlanks = tmpPhoneList.count(u"''") if numBlanks > 0: tmpPhoneList = adjustedPhoneList[i + j:i + j + numBlanks] phoneList.extend(tmpPhoneList) j += numBlanks for k, phone in enumerate(phoneList): if phone == u"''": syllable.insert(k, u"''") i += j retSyllableList.append(syllable) return retSyllableList
Inserts spaces into a syllable if needed Originally the phone list and syllable list contained the same number of phones. But the adjustedPhoneList may have some insertions which are not accounted for in the syllableList.
def publish(self, topic, obj, reference_message=None): """ Sends an object out over the pubsub connection, properly formatted, and conforming to the protocol. Handles pickling for the wire, etc. This method should *not* be subclassed. """ logging.debug("Publishing topic (%s): \n%s" % (topic, obj)) e = Event( data=obj, type=topic, ) if hasattr(obj, "sender"): e.sender = obj.sender if reference_message: original_incoming_event_hash = None if hasattr(reference_message, "original_incoming_event_hash"): original_incoming_event_hash = reference_message.original_incoming_event_hash elif hasattr(reference_message, "source") and hasattr(reference_message.source, "hash"): original_incoming_event_hash = reference_message.source.hash elif hasattr(reference_message, "source") and hasattr(reference_message.source, "original_incoming_event_hash"): original_incoming_event_hash = reference_message.source.original_incoming_event_hash elif hasattr(reference_message, "hash"): original_incoming_event_hash = reference_message.hash if original_incoming_event_hash: e.original_incoming_event_hash = original_incoming_event_hash return self.publish_to_backend( self._localize_topic(topic), self.encrypt(e) )
Sends an object out over the pubsub connection, properly formatted, and conforming to the protocol. Handles pickling for the wire, etc. This method should *not* be subclassed.
def _getch_unix(_getall=False): """ # --- current algorithm --- # 1. switch to char-by-char input mode # 2. turn off echo # 3. wait for at least one char to appear # 4. read the rest of the character buffer (_getall=True) # 5. return list of characters (_getall on) # or a single char (_getall off) """ import sys, termios fd = sys.stdin.fileno() # save old terminal settings old_settings = termios.tcgetattr(fd) chars = [] try: # change terminal settings - turn off canonical mode and echo. # in canonical mode read from stdin returns one line at a time # and we need one char at a time (see DESIGN.rst for more info) newattr = list(old_settings) newattr[3] &= ~termios.ICANON newattr[3] &= ~termios.ECHO newattr[6][termios.VMIN] = 1 # block until one char received newattr[6][termios.VTIME] = 0 # TCSANOW below means apply settings immediately termios.tcsetattr(fd, termios.TCSANOW, newattr) # [ ] this fails when stdin is redirected, like # ls -la | pager.py # [ ] also check on Windows ch = sys.stdin.read(1) chars = [ch] if _getall: # move rest of chars (if any) from input buffer # change terminal settings - enable non-blocking read newattr = termios.tcgetattr(fd) newattr[6][termios.VMIN] = 0 # CC structure newattr[6][termios.VTIME] = 0 termios.tcsetattr(fd, termios.TCSANOW, newattr) while True: ch = sys.stdin.read(1) if ch != '': chars.append(ch) else: break finally: # restore terminal settings. Do this when all output is # finished - TCSADRAIN flag termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) if _getall: return chars else: return chars[0]
# --- current algorithm --- # 1. switch to char-by-char input mode # 2. turn off echo # 3. wait for at least one char to appear # 4. read the rest of the character buffer (_getall=True) # 5. return list of characters (_getall on) # or a single char (_getall off)
async def send_nym(self, did: str, verkey: str = None, alias: str = None, role: Role = None) -> None: """ Send input anchor's cryptonym (including DID, verification key, plus optional alias and role) to the distributed ledger. Raise BadLedgerTxn on failure, BadIdentifier for bad DID, or BadRole for bad role. :param did: anchor DID to send to ledger :param verkey: optional anchor verification key :param alias: optional alias :param role: anchor role on the ledger (default value of USER) """ LOGGER.debug( 'AnchorSmith.send_nym >>> did: %s, verkey: %s, alias: %s, role: %s', did, verkey, alias, role) if not ok_did(did): LOGGER.debug('AnchorSmith.send_nym <!< Bad DID %s', did) raise BadIdentifier('Bad DID {}'.format(did)) req_json = await ledger.build_nym_request(self.did, did, verkey, alias, (role or Role.USER).token()) await self._sign_submit(req_json) LOGGER.debug('AnchorSmith.send_nym <<<')
Send input anchor's cryptonym (including DID, verification key, plus optional alias and role) to the distributed ledger. Raise BadLedgerTxn on failure, BadIdentifier for bad DID, or BadRole for bad role. :param did: anchor DID to send to ledger :param verkey: optional anchor verification key :param alias: optional alias :param role: anchor role on the ledger (default value of USER)
def parse_legacy_argstring(argstring): ''' Preparses CLI input: ``arg1,arg2`` => ``['arg1', 'arg2']`` ``[item1, item2],arg2`` => ``[['item1', 'item2'], arg2]`` ''' argstring = argstring.replace(',', ' , ') argstring = argstring.replace('[', ' [ ') argstring = argstring.replace(']', ' ] ') argbits = shlex.split(argstring) args = [] arg_buff = [] list_buff = [] in_list = False for bit in argbits: if bit == '[' and not in_list: in_list = True continue elif bit == ']' and in_list: in_list = False args.append(list_buff) list_buff = [] continue elif bit == ',': if not in_list and arg_buff: args.append(''.join(arg_buff)) arg_buff = [] continue # Restore any broken up ,[]s bit = bit.replace(' , ', ',') bit = bit.replace(' [ ', '[') bit = bit.replace(' ] ', ']') if in_list: list_buff.append(bit) else: arg_buff.append(bit) if arg_buff: args.append(' '.join(arg_buff)) return args
Preparses CLI input: ``arg1,arg2`` => ``['arg1', 'arg2']`` ``[item1, item2],arg2`` => ``[['item1', 'item2'], arg2]``
def handle_read(self): """We got some output from a remote shell, this is one of the state machine""" if self.state == STATE_DEAD: return global nr_handle_read nr_handle_read += 1 new_data = self._handle_read_chunk() if self.debug: self.print_debug(b'==> ' + new_data) if self.handle_read_fast_case(self.read_buffer): return lf_pos = new_data.find(b'\n') if lf_pos >= 0: # Optimization: we knew there were no '\n' in the previous read # buffer, so we searched only in the new_data and we offset the # found index by the length of the previous buffer lf_pos += len(self.read_buffer) - len(new_data) elif self.state is STATE_NOT_STARTED and \ options.password is not None and \ b'password:' in self.read_buffer.lower(): self.dispatch_write('{}\n'.format(options.password).encode()) self.read_buffer = b'' return while lf_pos >= 0: # For each line in the buffer line = self.read_buffer[:lf_pos + 1] if callbacks.process(line): pass elif self.state in (STATE_IDLE, STATE_RUNNING): self.print_lines(line) elif self.state is STATE_NOT_STARTED: self.read_in_state_not_started += line if b'The authenticity of host' in line: msg = line.strip(b'\n') + b' Closing connection.' self.disconnect() elif b'REMOTE HOST IDENTIFICATION HAS CHANGED' in line: msg = b'Remote host identification has changed.' else: msg = None if msg: self.print_lines(msg + b' Consider manually connecting or ' b'using ssh-keyscan.') # Go to the next line in the buffer self.read_buffer = self.read_buffer[lf_pos + 1:] if self.handle_read_fast_case(self.read_buffer): return lf_pos = self.read_buffer.find(b'\n') if self.state is STATE_NOT_STARTED and not self.init_string_sent: self.dispatch_write(self.init_string) self.init_string_sent = True
We got some output from a remote shell, this is one of the state machine
def config_diff(args): """Compare method configuration definitions across workspaces. Ignores methodConfigVersion if the verbose argument is not set""" config_1 = config_get(args).splitlines() args.project = args.Project args.workspace = args.Workspace cfg_1_name = args.config if args.Config is not None: args.config = args.Config if args.Namespace is not None: args.namespace = args.Namespace config_2 = config_get(args).splitlines() if not args.verbose: config_1 = skip_cfg_ver(config_1) config_2 = skip_cfg_ver(config_2) return list(unified_diff(config_1, config_2, cfg_1_name, args.config, lineterm=''))
Compare method configuration definitions across workspaces. Ignores methodConfigVersion if the verbose argument is not set
def create_build(self, tarball_url, env=None, app_name=None): """Creates an app-setups build. Returns response data as a dict. :param tarball_url: URL of a tarball containing an ``app.json``. :param env: Dict containing environment variable overrides. :param app_name: Name of the Heroku app to create. :returns: Response data as a ``dict``. """ data = { 'source_blob': { 'url': tarball_url } } if env: data['overrides'] = {'env': env} if app_name: data['app'] = {'name': app_name} return self.api_request('POST', '/app-setups', data=data)
Creates an app-setups build. Returns response data as a dict. :param tarball_url: URL of a tarball containing an ``app.json``. :param env: Dict containing environment variable overrides. :param app_name: Name of the Heroku app to create. :returns: Response data as a ``dict``.
def compact_interval_string(value_list): """Compact a list of integers into a comma-separated string of intervals. Args: value_list: A list of sortable integers such as a list of numbers Returns: A compact string representation, such as "1-5,8,12-15" """ if not value_list: return '' value_list.sort() # Start by simply building up a list of separate contiguous intervals interval_list = [] curr = [] for val in value_list: if curr and (val > curr[-1] + 1): interval_list.append((curr[0], curr[-1])) curr = [val] else: curr.append(val) if curr: interval_list.append((curr[0], curr[-1])) # For each interval collapse it down to "first, last" or just "first" if # if first == last. return ','.join([ '{}-{}'.format(pair[0], pair[1]) if pair[0] != pair[1] else str(pair[0]) for pair in interval_list ])
Compact a list of integers into a comma-separated string of intervals. Args: value_list: A list of sortable integers such as a list of numbers Returns: A compact string representation, such as "1-5,8,12-15"
def write(self, inline): """ Write a line to stdout if it isn't in a blacklist Try to get the name of the calling module to see if we want to filter it. If there is no calling module, use current frame in case there's a traceback before there is any calling module """ frame = inspect.currentframe().f_back if frame: mod = frame.f_globals.get('__name__') else: mod = sys._getframe(0).f_globals.get('__name__') if not mod in self.modulenames: self.stdout.write(inline)
Write a line to stdout if it isn't in a blacklist Try to get the name of the calling module to see if we want to filter it. If there is no calling module, use current frame in case there's a traceback before there is any calling module
def rollout(self, batch_info: BatchInfo, model: RlModel, number_of_steps: int) -> Rollout: """ Calculate env rollout """ assert not model.is_recurrent, "Replay env roller does not support recurrent models" accumulator = TensorAccumulator() episode_information = [] # List of dictionaries with episode information for step_idx in range(number_of_steps): step = model.step(self.last_observation) if self.action_noise is not None: step['actions'] = self.action_noise(step['actions'], batch_info=batch_info) replay_extra_information = {} accumulator.add('observations', self.last_observation_cpu) # Add step to the tensor accumulator for name, tensor in step.items(): tensor_cpu = tensor.cpu() accumulator.add(name, tensor_cpu) if name != 'actions': replay_extra_information[name] = tensor_cpu.numpy() actions_numpy = step['actions'].detach().cpu().numpy() new_obs, new_rewards, new_dones, new_infos = self.environment.step(actions_numpy) # Store rollout in the experience replay buffer self.replay_buffer.store_transition( frame=self.last_observation_cpu.numpy(), action=actions_numpy, reward=new_rewards, done=new_dones, extra_info=replay_extra_information ) if self.ret_rms is not None: self.accumulated_returns = new_rewards + self.discount_factor * self.accumulated_returns self.ret_rms.update(self.accumulated_returns) # Done is flagged true when the episode has ended AND the frame we see is already a first frame from the # next episode dones_tensor = torch.from_numpy(new_dones.astype(np.float32)).clone() accumulator.add('dones', dones_tensor) if self.action_noise is not None: self.action_noise.reset_training_state(dones_tensor, batch_info=batch_info) self.accumulated_returns = self.accumulated_returns * (1.0 - new_dones.astype(np.float32)) self.last_observation_cpu = torch.from_numpy(new_obs).clone() self.last_observation = self.last_observation_cpu.to(self.device) if self.ret_rms is not None: new_rewards = np.clip(new_rewards / np.sqrt(self.ret_rms.var + 1e-8), -self.clip_obs, self.clip_obs) accumulator.add('rewards', torch.from_numpy(new_rewards.astype(np.float32)).clone()) episode_information.append(new_infos) accumulated_tensors = accumulator.result() return Trajectories( num_steps=accumulated_tensors['observations'].size(0), num_envs=accumulated_tensors['observations'].size(1), environment_information=episode_information, transition_tensors=accumulated_tensors, rollout_tensors={} ).to_transitions()
Calculate env rollout
def _sendline(self, line): """Send exactly one line to the device Args: line str: data send to device """ self.lines = [] try: self._read() except socket.error: logging.debug('Nothing cleared') logger.debug('sending [%s]', line) self._write(line + '\r\n') # wait for write to complete time.sleep(0.5)
Send exactly one line to the device Args: line str: data send to device
def decorate(decorator_cls, *args, **kwargs): """Creates a decorator function that applies the decorator_cls that was passed in.""" global _wrappers wrapper_cls = _wrappers.get(decorator_cls, None) if wrapper_cls is None: class PythonWrapper(decorator_cls): pass wrapper_cls = PythonWrapper wrapper_cls.__name__ = decorator_cls.__name__ + "PythonWrapper" _wrappers[decorator_cls] = wrapper_cls def decorator(fn): wrapped = wrapper_cls(fn, *args, **kwargs) _update_wrapper(wrapped, fn) return wrapped return decorator
Creates a decorator function that applies the decorator_cls that was passed in.
def import_orm(self): #TODO: check docstring """ Import ORM classes for oedb access depending on input in config in self.config which is loaded from 'config_db_tables.cfg' Returns ------- int Descr #TODO check type """ orm = {} data_source = self.config['input_data_source']['input_data'] mv_grid_districts_name = self.config[data_source]['mv_grid_districts'] mv_stations_name = self.config[data_source]['mv_stations'] lv_load_areas_name = self.config[data_source]['lv_load_areas'] lv_grid_district_name = self.config[data_source]['lv_grid_district'] lv_stations_name = self.config[data_source]['lv_stations'] conv_generators_name = self.config[data_source]['conv_generators'] re_generators_name = self.config[data_source]['re_generators'] from egoio.db_tables import model_draft as orm_model_draft, \ supply as orm_supply, \ demand as orm_demand, \ grid as orm_grid if data_source == 'model_draft': orm['orm_mv_grid_districts'] = orm_model_draft.__getattribute__(mv_grid_districts_name) orm['orm_mv_stations'] = orm_model_draft.__getattribute__(mv_stations_name) orm['orm_lv_load_areas'] = orm_model_draft.__getattribute__(lv_load_areas_name) orm['orm_lv_grid_district'] = orm_model_draft.__getattribute__(lv_grid_district_name) orm['orm_lv_stations'] = orm_model_draft.__getattribute__(lv_stations_name) orm['orm_conv_generators'] = orm_model_draft.__getattribute__(conv_generators_name) orm['orm_re_generators'] = orm_model_draft.__getattribute__(re_generators_name) orm['version_condition_mvgd'] = 1 == 1 orm['version_condition_mv_stations'] = 1 == 1 orm['version_condition_la'] = 1 == 1 orm['version_condition_lvgd'] = 1 == 1 orm['version_condition_mvlvst'] = 1 == 1 orm['version_condition_re'] = 1 == 1 orm['version_condition_conv'] = 1 == 1 elif data_source == 'versioned': orm['orm_mv_grid_districts'] = orm_grid.__getattribute__(mv_grid_districts_name) orm['orm_mv_stations'] = orm_grid.__getattribute__(mv_stations_name) orm['orm_lv_load_areas'] = orm_demand.__getattribute__(lv_load_areas_name) orm['orm_lv_grid_district'] = orm_grid.__getattribute__(lv_grid_district_name) orm['orm_lv_stations'] = orm_grid.__getattribute__(lv_stations_name) orm['orm_conv_generators'] = orm_supply.__getattribute__(conv_generators_name) orm['orm_re_generators'] = orm_supply.__getattribute__(re_generators_name) orm['data_version'] = self.config[data_source]['version'] orm['version_condition_mvgd'] =\ orm['orm_mv_grid_districts'].version == orm['data_version'] orm['version_condition_mv_stations'] = \ orm['orm_mv_stations'].version == orm['data_version'] orm['version_condition_la'] =\ orm['orm_lv_load_areas'].version == orm['data_version'] orm['version_condition_lvgd'] =\ orm['orm_lv_grid_district'].version == orm['data_version'] orm['version_condition_mvlvst'] =\ orm['orm_lv_stations'].version == orm['data_version'] orm['version_condition_re'] =\ orm['orm_re_generators'].columns.version == orm['data_version'] orm['version_condition_conv'] =\ orm['orm_conv_generators'].columns.version == orm['data_version'] else: logger.error("Invalid data source {} provided. Please re-check the file " "`config_db_tables.cfg`".format(data_source)) raise NameError("{} is no valid data source!".format(data_source)) return orm
Import ORM classes for oedb access depending on input in config in self.config which is loaded from 'config_db_tables.cfg' Returns ------- int Descr #TODO check type
def dumps(self): r"""Turn the Latex Object into a string in Latex format.""" string = "" if self.row_height is not None: row_height = Command('renewcommand', arguments=[ NoEscape(r'\arraystretch'), self.row_height]) string += row_height.dumps() + '%\n' if self.col_space is not None: col_space = Command('setlength', arguments=[ NoEscape(r'\tabcolsep'), self.col_space]) string += col_space.dumps() + '%\n' return string + super().dumps()
r"""Turn the Latex Object into a string in Latex format.
def visit_List(self, node: ast.List) -> List[Any]: """Visit the elements and assemble the results into a list.""" if isinstance(node.ctx, ast.Store): raise NotImplementedError("Can not compute the value of a Store on a list") result = [self.visit(node=elt) for elt in node.elts] self.recomputed_values[node] = result return result
Visit the elements and assemble the results into a list.
def reindex_like(self, other, method=None, tolerance=None, copy=True): """Conform this object onto the indexes of another object, filling in missing values with NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to pandas.Index objects, which provides coordinates upon which to index the variables in this dataset. The indexes on this other object need not be the same as the indexes on this dataset. Any mis-matched index values will be filled in with NaN, and any mis-matched dimension names will simply be ignored. method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional Method to use for filling index values from other not found in this dataset: * None (default): don't fill gaps * pad / ffill: propagate last valid index value forward * backfill / bfill: propagate next valid index value backward * nearest: use nearest valid index value (requires pandas>=0.16) tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Requires pandas>=0.17. copy : bool, optional If ``copy=True``, data in the return value is always copied. If ``copy=False`` and reindexing is unnecessary, or can be performed with only slice operations, then the output may share memory with the input. In either case, a new xarray object is always returned. Returns ------- reindexed : Dataset Another dataset, with this dataset's data but coordinates from the other object. See Also -------- Dataset.reindex align """ indexers = alignment.reindex_like_indexers(self, other) return self.reindex(indexers=indexers, method=method, copy=copy, tolerance=tolerance)
Conform this object onto the indexes of another object, filling in missing values with NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to pandas.Index objects, which provides coordinates upon which to index the variables in this dataset. The indexes on this other object need not be the same as the indexes on this dataset. Any mis-matched index values will be filled in with NaN, and any mis-matched dimension names will simply be ignored. method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional Method to use for filling index values from other not found in this dataset: * None (default): don't fill gaps * pad / ffill: propagate last valid index value forward * backfill / bfill: propagate next valid index value backward * nearest: use nearest valid index value (requires pandas>=0.16) tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Requires pandas>=0.17. copy : bool, optional If ``copy=True``, data in the return value is always copied. If ``copy=False`` and reindexing is unnecessary, or can be performed with only slice operations, then the output may share memory with the input. In either case, a new xarray object is always returned. Returns ------- reindexed : Dataset Another dataset, with this dataset's data but coordinates from the other object. See Also -------- Dataset.reindex align
def get(self, url=None, delimiter="/"): """Path is an s3 url. Ommiting the path or providing "s3://" as the path will return a list of all buckets. Otherwise, all subdirectories and their contents will be shown. """ params = {'Delimiter': delimiter} bucket, obj_key = _parse_url(url) if bucket: params['Bucket'] = bucket else: return self.call("ListBuckets", response_data_key="Buckets") if obj_key: params['Prefix'] = obj_key objects = self.call("ListObjects", response_data_key="Contents", **params) if objects: for obj in objects: obj['url'] = "s3://{0}/{1}".format(bucket, obj['Key']) return objects
Path is an s3 url. Ommiting the path or providing "s3://" as the path will return a list of all buckets. Otherwise, all subdirectories and their contents will be shown.
def get_id_constraints(pkname, pkey): """Returns primary key consraints. :pkname: if a string, returns a dict with pkname=pkey. pkname and pkey must be enumerables of matching length. """ if isinstance(pkname, str): return {pkname: pkey} else: return dict(zip(pkname, pkey))
Returns primary key consraints. :pkname: if a string, returns a dict with pkname=pkey. pkname and pkey must be enumerables of matching length.
def _get_lane_properties(self, node): """ Parses the given XML node Args: node (xml): XML node. .. code-block:: xml <bpmn2:lane id="Lane_8" name="Lane 8"> <bpmn2:extensionElements> <camunda:properties> <camunda:property value="foo,bar" name="perms"/> </camunda:properties> </bpmn2:extensionElements> </bpmn2:lane> Returns: {'perms': 'foo,bar'} """ lane_name = self.get_lane(node.get('id')) lane_data = {'name': lane_name} for a in self.xpath(".//bpmn:lane[@name='%s']/*/*/" % lane_name): lane_data[a.attrib['name']] = a.attrib['value'].strip() return lane_data
Parses the given XML node Args: node (xml): XML node. .. code-block:: xml <bpmn2:lane id="Lane_8" name="Lane 8"> <bpmn2:extensionElements> <camunda:properties> <camunda:property value="foo,bar" name="perms"/> </camunda:properties> </bpmn2:extensionElements> </bpmn2:lane> Returns: {'perms': 'foo,bar'}
def get_volume_object_info(self, location): """ Fetches information about single volume object - usually file :param location: object location :return: """ param = {'location': location} data = self._api.get(url=self._URL['object'].format( id=self.id), params=param).json() return VolumeObject(api=self._api, **data)
Fetches information about single volume object - usually file :param location: object location :return:
def start(self, exceptions): """Start the Heartbeat Checker. :param list exceptions: :return: """ if not self._interval: return False self._running.set() with self._lock: self._threshold = 0 self._reads_since_check = 0 self._writes_since_check = 0 self._exceptions = exceptions LOGGER.debug('Heartbeat Checker Started') return self._start_new_timer()
Start the Heartbeat Checker. :param list exceptions: :return:
def GetFormattedMessages(self, event): """Retrieves the formatted messages related to the event. Args: event (EventObject): event. Returns: tuple: containing: str: full message string or None if no event formatter was found. str: short message string or None if no event formatter was found. """ event_formatter = self.GetEventFormatter(event) if not event_formatter: return None, None return event_formatter.GetMessages(self._formatter_mediator, event)
Retrieves the formatted messages related to the event. Args: event (EventObject): event. Returns: tuple: containing: str: full message string or None if no event formatter was found. str: short message string or None if no event formatter was found.
def generic_visit(self, node): """Surround node statement with a try/except block to catch errors. This method is called for every node of the parsed code, and only changes statement lines. Args: node (ast.AST): node statement to surround. """ if (isinstance(node, ast.stmt) and not isinstance(node, ast.FunctionDef)): new_node = self.wrap_with_try(node) # handling try except statement if isinstance(node, self.ast_try_except): self.try_except_handler(node) return new_node # Run recursively on all sub nodes super(ErrorsCatchTransformer, self).generic_visit(node) return new_node # Run recursively on all sub nodes return super(ErrorsCatchTransformer, self).generic_visit(node)
Surround node statement with a try/except block to catch errors. This method is called for every node of the parsed code, and only changes statement lines. Args: node (ast.AST): node statement to surround.
def calculate(self, **state): """ Calculate dynamic viscosity at the specified temperature and composition: :param T: [K] temperature :param y: [mass fraction] composition dictionary , e.g. \ {'SiO2': 0.25, 'CaO': 0.25, 'MgO': 0.25, 'FeO': 0.25} :returns: [Pa.s] dynamic viscosity The **state parameter contains the keyword argument(s) specified above\ that are used to describe the state of the material. """ T = state['T'] y = state['y'] x = amount_fractions(y) return super().calculate(T=T, x=x)
Calculate dynamic viscosity at the specified temperature and composition: :param T: [K] temperature :param y: [mass fraction] composition dictionary , e.g. \ {'SiO2': 0.25, 'CaO': 0.25, 'MgO': 0.25, 'FeO': 0.25} :returns: [Pa.s] dynamic viscosity The **state parameter contains the keyword argument(s) specified above\ that are used to describe the state of the material.
def search_directory(self, **kwargs): """ SearchAccount is deprecated, using SearchDirectory :param query: Query string - should be an LDAP-style filter string (RFC 2254) :param limit: The maximum number of accounts to return (0 is default and means all) :param offset: The starting offset (0, 25, etc) :param domain: The domain name to limit the search to :param applyCos: applyCos - Flag whether or not to apply the COS policy to account. Specify 0 (false) if only requesting attrs that aren't inherited from COS :param applyConfig: whether or not to apply the global config attrs to account. specify 0 (false) if only requesting attrs that aren't inherited from global config :param sortBy: Name of attribute to sort on. Default is the account name. :param types: Comma-separated list of types to return. Legal values are: accounts|distributionlists|aliases|resources|domains|coses (default is accounts) :param sortAscending: Whether to sort in ascending order. Default is 1 (true) :param countOnly: Whether response should be count only. Default is 0 (false) :param attrs: Comma-seperated list of attrs to return ("displayName", "zimbraId", "zimbraAccountStatus") :return: dict of list of "account" "alias" "dl" "calresource" "domain" "cos" """ search_response = self.request('SearchDirectory', kwargs) result = {} items = { "account": zobjects.Account.from_dict, "domain": zobjects.Domain.from_dict, "dl": zobjects.DistributionList.from_dict, "cos": zobjects.COS.from_dict, "calresource": zobjects.CalendarResource.from_dict # "alias": TODO, } for obj_type, func in items.items(): if obj_type in search_response: if isinstance(search_response[obj_type], list): result[obj_type] = [ func(v) for v in search_response[obj_type]] else: result[obj_type] = func(search_response[obj_type]) return result
SearchAccount is deprecated, using SearchDirectory :param query: Query string - should be an LDAP-style filter string (RFC 2254) :param limit: The maximum number of accounts to return (0 is default and means all) :param offset: The starting offset (0, 25, etc) :param domain: The domain name to limit the search to :param applyCos: applyCos - Flag whether or not to apply the COS policy to account. Specify 0 (false) if only requesting attrs that aren't inherited from COS :param applyConfig: whether or not to apply the global config attrs to account. specify 0 (false) if only requesting attrs that aren't inherited from global config :param sortBy: Name of attribute to sort on. Default is the account name. :param types: Comma-separated list of types to return. Legal values are: accounts|distributionlists|aliases|resources|domains|coses (default is accounts) :param sortAscending: Whether to sort in ascending order. Default is 1 (true) :param countOnly: Whether response should be count only. Default is 0 (false) :param attrs: Comma-seperated list of attrs to return ("displayName", "zimbraId", "zimbraAccountStatus") :return: dict of list of "account" "alias" "dl" "calresource" "domain" "cos"
def update(self, obj, **kwargs): "Update the tree item when the object name changes" # search for the old name: child = self.tree.FindItem(self.root, kwargs['name']) if DEBUG: print "update child", child, kwargs if child: self.tree.ScrollTo(child) self.tree.SetCurrentItem(child) self.tree.SelectItem(child) child.Selected = True # update the new name self.tree.SetItemText(child, obj.name, 0)
Update the tree item when the object name changes
def mtie_phase_fast(phase, rate=1.0, data_type="phase", taus=None): """ fast binary decomposition algorithm for MTIE See: STEFANO BREGNI "Fast Algorithms for TVAR and MTIE Computation in Characterization of Network Synchronization Performance" """ rate = float(rate) phase = np.asarray(phase) k_max = int(np.floor(np.log2(len(phase)))) phase = phase[0:pow(2, k_max)] # truncate data to 2**k_max datapoints assert len(phase) == pow(2, k_max) #k = 1 taus = [ pow(2,k) for k in range(k_max)] #while k <= k_max: # tau = pow(2, k) # taus.append(tau) #print tau # k += 1 print("taus N=", len(taus), " ",taus) devs = np.zeros(len(taus)) deverrs = np.zeros(len(taus)) ns = np.zeros(len(taus)) taus_used = np.array(taus) # [(1.0/rate)*t for t in taus] # matrices to store results mtie_max = np.zeros((len(phase)-1, k_max)) mtie_min = np.zeros((len(phase)-1, k_max)) for kidx in range(k_max): k = kidx+1 imax = len(phase)-pow(2, k)+1 #print k, imax tie = np.zeros(imax) ns[kidx]=imax #print np.max( tie ) for i in range(imax): if k == 1: mtie_max[i, kidx] = max(phase[i], phase[i+1]) mtie_min[i, kidx] = min(phase[i], phase[i+1]) else: p = int(pow(2, k-1)) mtie_max[i, kidx] = max(mtie_max[i, kidx-1], mtie_max[i+p, kidx-1]) mtie_min[i, kidx] = min(mtie_min[i, kidx-1], mtie_min[i+p, kidx-1]) #for i in range(imax): tie[i] = mtie_max[i, kidx] - mtie_min[i, kidx] #print tie[i] devs[kidx] = np.amax(tie) # maximum along axis #print "maximum %2.4f" % devs[kidx] #print np.amax( tie ) #for tau in taus: #for devs = np.array(devs) print("devs N=",len(devs)," ",devs) print("taus N=", len(taus_used), " ",taus_used) return remove_small_ns(taus_used, devs, deverrs, ns)
fast binary decomposition algorithm for MTIE See: STEFANO BREGNI "Fast Algorithms for TVAR and MTIE Computation in Characterization of Network Synchronization Performance"
def trust_key(keyid=None, fingerprint=None, trust_level=None, user=None): ''' Set the trust level for a key in GPG keychain keyid The keyid of the key to set the trust level for. fingerprint The fingerprint of the key to set the trust level for. trust_level The trust level to set for the specified key, must be one of the following: expired, unknown, not_trusted, marginally, fully, ultimately user Which user's keychain to access, defaults to user Salt is running as. Passing the user as ``salt`` will set the GnuPG home directory to the ``/etc/salt/gpgkeys``. CLI Example: .. code-block:: bash salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally' salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted' salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username' ''' ret = { 'res': True, 'message': '' } _VALID_TRUST_LEVELS = ['expired', 'unknown', 'not_trusted', 'marginally', 'fully', 'ultimately'] if fingerprint and keyid: ret['res'] = False ret['message'] = 'Only specify one argument, fingerprint or keyid' return ret if not fingerprint: if keyid: key = get_key(keyid, user=user) if key: if 'fingerprint' not in key: ret['res'] = False ret['message'] = 'Fingerprint not found for keyid {0}'.format(keyid) return ret fingerprint = key['fingerprint'] else: ret['res'] = False ret['message'] = 'KeyID {0} not in GPG keychain'.format(keyid) return ret else: ret['res'] = False ret['message'] = 'Required argument, fingerprint or keyid' return ret if trust_level not in _VALID_TRUST_LEVELS: return 'ERROR: Valid trust levels - {0}'.format(','.join(_VALID_TRUST_LEVELS)) stdin = '{0}:{1}\n'.format(fingerprint, NUM_TRUST_DICT[trust_level]) cmd = [_gpg(), '--import-ownertrust'] _user = user if user == 'salt': homeDir = os.path.join(__salt__['config.get']('config_dir'), 'gpgkeys') cmd.extend(['--homedir', homeDir]) _user = 'root' res = __salt__['cmd.run_all'](cmd, stdin=stdin, runas=_user, python_shell=False) if not res['retcode'] == 0: ret['res'] = False ret['message'] = res['stderr'] else: if res['stderr']: _match = re.findall(r'\d', res['stderr']) if len(_match) == 2: ret['fingerprint'] = fingerprint ret['message'] = 'Changing ownership trust from {0} to {1}.'.format( INV_NUM_TRUST_DICT[_match[0]], INV_NUM_TRUST_DICT[_match[1]] ) else: ret['fingerprint'] = fingerprint ret['message'] = 'Setting ownership trust to {0}.'.format(INV_NUM_TRUST_DICT[_match[0]]) else: ret['message'] = res['stderr'] return ret
Set the trust level for a key in GPG keychain keyid The keyid of the key to set the trust level for. fingerprint The fingerprint of the key to set the trust level for. trust_level The trust level to set for the specified key, must be one of the following: expired, unknown, not_trusted, marginally, fully, ultimately user Which user's keychain to access, defaults to user Salt is running as. Passing the user as ``salt`` will set the GnuPG home directory to the ``/etc/salt/gpgkeys``. CLI Example: .. code-block:: bash salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally' salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted' salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
def _block(self, rdd, bsize, dtype): """Execute the blocking process on the given rdd. Parameters ---------- rdd : pyspark.rdd.RDD Distributed data to block bsize : int or None The desired size of the blocks Returns ------- rdd : pyspark.rdd.RDD Blocked rdd. """ return rdd.mapPartitions(lambda x: _block_tuple(x, dtype, bsize))
Execute the blocking process on the given rdd. Parameters ---------- rdd : pyspark.rdd.RDD Distributed data to block bsize : int or None The desired size of the blocks Returns ------- rdd : pyspark.rdd.RDD Blocked rdd.
def _pop_import_LOAD_ATTRs(module_name, queue): """ Pop LOAD_ATTR instructions for an import of the form:: import a.b.c as d which should generate bytecode like this:: 1 0 LOAD_CONST 0 (0) 3 LOAD_CONST 1 (None) 6 IMPORT_NAME 0 (a.b.c.d) 9 LOAD_ATTR 1 (b) 12 LOAD_ATTR 2 (c) 15 LOAD_ATTR 3 (d) 18 STORE_NAME 3 (d) """ popped = popwhile(is_a(instrs.LOAD_ATTR), queue, side='left') if popped: expected = module_name.split('.', maxsplit=1)[1] actual = '.'.join(map(op.attrgetter('arg'), popped)) if expected != actual: raise DecompilationError( "Decompiling import of module %s, but LOAD_ATTRS imply %s" % ( expected, actual, ) ) return popped
Pop LOAD_ATTR instructions for an import of the form:: import a.b.c as d which should generate bytecode like this:: 1 0 LOAD_CONST 0 (0) 3 LOAD_CONST 1 (None) 6 IMPORT_NAME 0 (a.b.c.d) 9 LOAD_ATTR 1 (b) 12 LOAD_ATTR 2 (c) 15 LOAD_ATTR 3 (d) 18 STORE_NAME 3 (d)
def on_all_ok(self): """ This method is called when all tasks reach S_OK Ir runs `mrgddb` in sequential on the local machine to produce the final DDB file in the outdir of the `Work`. """ # Merge DDB files. out_ddb = self.merge_ddb_files() return self.Results(node=self, returncode=0, message="DDB merge done")
This method is called when all tasks reach S_OK Ir runs `mrgddb` in sequential on the local machine to produce the final DDB file in the outdir of the `Work`.
def linear_reaction_coefficients(model, reactions=None): """Coefficient for the reactions in a linear objective. Parameters ---------- model : cobra model the model object that defined the objective reactions : list an optional list for the reactions to get the coefficients for. All reactions if left missing. Returns ------- dict A dictionary where the key is the reaction object and the value is the corresponding coefficient. Empty dictionary if there are no linear terms in the objective. """ linear_coefficients = {} reactions = model.reactions if not reactions else reactions try: objective_expression = model.solver.objective.expression coefficients = objective_expression.as_coefficients_dict() except AttributeError: return linear_coefficients for rxn in reactions: forward_coefficient = coefficients.get(rxn.forward_variable, 0) reverse_coefficient = coefficients.get(rxn.reverse_variable, 0) if forward_coefficient != 0: if forward_coefficient == -reverse_coefficient: linear_coefficients[rxn] = float(forward_coefficient) return linear_coefficients
Coefficient for the reactions in a linear objective. Parameters ---------- model : cobra model the model object that defined the objective reactions : list an optional list for the reactions to get the coefficients for. All reactions if left missing. Returns ------- dict A dictionary where the key is the reaction object and the value is the corresponding coefficient. Empty dictionary if there are no linear terms in the objective.
def describe_alarms(self, action_prefix=None, alarm_name_prefix=None, alarm_names=None, max_records=None, state_value=None, next_token=None): """ Retrieves alarms with the specified names. If no name is specified, all alarms for the user are returned. Alarms can be retrieved by using only a prefix for the alarm name, the alarm state, or a prefix for any action. :type action_prefix: string :param action_name: The action name prefix. :type alarm_name_prefix: string :param alarm_name_prefix: The alarm name prefix. AlarmNames cannot be specified if this parameter is specified. :type alarm_names: list :param alarm_names: A list of alarm names to retrieve information for. :type max_records: int :param max_records: The maximum number of alarm descriptions to retrieve. :type state_value: string :param state_value: The state value to be used in matching alarms. :type next_token: string :param next_token: The token returned by a previous call to indicate that there is more data. :rtype list """ params = {} if action_prefix: params['ActionPrefix'] = action_prefix if alarm_name_prefix: params['AlarmNamePrefix'] = alarm_name_prefix elif alarm_names: self.build_list_params(params, alarm_names, 'AlarmNames.member.%s') if max_records: params['MaxRecords'] = max_records if next_token: params['NextToken'] = next_token if state_value: params['StateValue'] = state_value return self.get_list('DescribeAlarms', params, [('MetricAlarms', MetricAlarms)])[0]
Retrieves alarms with the specified names. If no name is specified, all alarms for the user are returned. Alarms can be retrieved by using only a prefix for the alarm name, the alarm state, or a prefix for any action. :type action_prefix: string :param action_name: The action name prefix. :type alarm_name_prefix: string :param alarm_name_prefix: The alarm name prefix. AlarmNames cannot be specified if this parameter is specified. :type alarm_names: list :param alarm_names: A list of alarm names to retrieve information for. :type max_records: int :param max_records: The maximum number of alarm descriptions to retrieve. :type state_value: string :param state_value: The state value to be used in matching alarms. :type next_token: string :param next_token: The token returned by a previous call to indicate that there is more data. :rtype list
def _check_hla_alleles( alleles, valid_alleles=None): """ Given a list of HLA alleles and an optional list of valid HLA alleles, return a set of alleles that we will pass into the MHC binding predictor. """ require_iterable_of(alleles, string_types, "HLA alleles") # Don't run the MHC predictor twice for homozygous alleles, # only run it for unique alleles alleles = { normalize_allele_name(allele.strip().upper()) for allele in alleles } if valid_alleles: # For some reason netMHCpan drops the '*' in names, so # 'HLA-A*03:01' becomes 'HLA-A03:01' missing_alleles = [ allele for allele in alleles if allele not in valid_alleles ] if len(missing_alleles) > 0: raise UnsupportedAllele( "Unsupported HLA alleles: %s" % missing_alleles) return list(alleles)
Given a list of HLA alleles and an optional list of valid HLA alleles, return a set of alleles that we will pass into the MHC binding predictor.
def which(program, add_win_suffixes=True): """Mimic 'which' command behavior. Adapted from https://stackoverflow.com/a/377028 """ def is_exe(fpath): """Determine if program exists and is executable.""" return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if add_win_suffixes and platform.system().lower() == 'windows' and not ( fname.endswith('.exe') or fname.endswith('.cmd')): fnames = [fname + '.exe', fname + '.cmd'] else: fnames = [fname] for i in fnames: if fpath: exe_file = os.path.join(fpath, i) if is_exe(exe_file): return exe_file else: for path in os.environ['PATH'].split(os.pathsep): exe_file = os.path.join(path, i) if is_exe(exe_file): return exe_file return None
Mimic 'which' command behavior. Adapted from https://stackoverflow.com/a/377028
def prettify_json_file(file_list): """ prettify JSON testcase format """ for json_file in set(file_list): if not json_file.endswith(".json"): logger.log_warning("Only JSON file format can be prettified, skip: {}".format(json_file)) continue logger.color_print("Start to prettify JSON file: {}".format(json_file), "GREEN") dir_path = os.path.dirname(json_file) file_name, file_suffix = os.path.splitext(os.path.basename(json_file)) outfile = os.path.join(dir_path, "{}.pretty.json".format(file_name)) with io.open(json_file, 'r', encoding='utf-8') as stream: try: obj = json.load(stream) except ValueError as e: raise SystemExit(e) with io.open(outfile, 'w', encoding='utf-8') as out: json.dump(obj, out, indent=4, separators=(',', ': ')) out.write('\n') print("success: {}".format(outfile))
prettify JSON testcase format
def latexify(obj, **kwargs): """Render an object in LaTeX appropriately. """ if hasattr(obj, '__pk_latex__'): return obj.__pk_latex__(**kwargs) if isinstance(obj, text_type): from .unicode_to_latex import unicode_to_latex return unicode_to_latex(obj) if isinstance(obj, bool): # isinstance(True, int) = True, so gotta handle this first. raise ValueError('no well-defined LaTeXification of bool %r' % obj) if isinstance(obj, float): nplaces = kwargs.get('nplaces') if nplaces is None: return '$%f$' % obj return '$%.*f$' % (nplaces, obj) if isinstance(obj, int): return '$%d$' % obj if isinstance(obj, binary_type): if all(c in _printable_ascii for c in obj): return obj.decode('ascii') raise ValueError('no safe LaTeXification of binary string %r' % obj) raise ValueError('can\'t LaTeXify %r' % obj)
Render an object in LaTeX appropriately.
def _asarray(self, vec): """Convert ``x`` to an array. Here the indices are changed such that the "outer" indices come last in order to have the access order as `numpy.linalg.svd` needs it. This is the inverse of `_asvector`. """ shape = self.domain[0, 0].shape + self.pshape arr = np.empty(shape, dtype=self.domain.dtype) for i, xi in enumerate(vec): for j, xij in enumerate(xi): arr[..., i, j] = xij.asarray() return arr
Convert ``x`` to an array. Here the indices are changed such that the "outer" indices come last in order to have the access order as `numpy.linalg.svd` needs it. This is the inverse of `_asvector`.
def file_download(context, id, file_id, target): """file_download(context, id, path) Download a job file >>> dcictl job-download-file [OPTIONS] :param string id: ID of the job to download file [required] :param string file_id: ID of the job file to download [required] :param string target: Destination file [required] """ dci_file.download(context, id=id, file_id=file_id, target=target)
file_download(context, id, path) Download a job file >>> dcictl job-download-file [OPTIONS] :param string id: ID of the job to download file [required] :param string file_id: ID of the job file to download [required] :param string target: Destination file [required]
def add(addon, dev, interactive): """Add a dependency. Examples: $ django add dynamic-rest==1.5.0 + dynamic-rest == 1.5.0 """ application = get_current_application() application.add( addon, dev=dev, interactive=interactive )
Add a dependency. Examples: $ django add dynamic-rest==1.5.0 + dynamic-rest == 1.5.0
def get_locale(): ''' Get the current system locale CLI Example: .. code-block:: bash salt '*' locale.get_locale ''' ret = '' lc_ctl = salt.utils.systemd.booted(__context__) # localectl on SLE12 is installed but the integration is still broken in latest SP3 due to # config is rewritten by by many %post installation hooks in the older packages. # If you use it -- you will break your config. This is not the case in SLE15 anymore. if lc_ctl and not (__grains__['os_family'] in ['Suse'] and __grains__['osmajorrelease'] in [12]): ret = (_parse_dbus_locale() if dbus is not None else _localectl_status()['system_locale']).get('LANG', '') else: if 'Suse' in __grains__['os_family']: cmd = 'grep "^RC_LANG" /etc/sysconfig/language' elif 'RedHat' in __grains__['os_family']: cmd = 'grep "^LANG=" /etc/sysconfig/i18n' elif 'Debian' in __grains__['os_family']: # this block only applies to Debian without systemd cmd = 'grep "^LANG=" /etc/default/locale' elif 'Gentoo' in __grains__['os_family']: cmd = 'eselect --brief locale show' return __salt__['cmd.run'](cmd).strip() elif 'Solaris' in __grains__['os_family']: cmd = 'grep "^LANG=" /etc/default/init' else: # don't waste time on a failing cmd.run raise CommandExecutionError('Error: "{0}" is unsupported!'.format(__grains__['oscodename'])) if cmd: try: ret = __salt__['cmd.run'](cmd).split('=')[1].replace('"', '') except IndexError as err: log.error('Error occurred while running "%s": %s', cmd, err) return ret
Get the current system locale CLI Example: .. code-block:: bash salt '*' locale.get_locale
def libvlc_media_list_player_new(p_instance): '''Create new media_list_player. @param p_instance: libvlc instance. @return: media list player instance or NULL on error. ''' f = _Cfunctions.get('libvlc_media_list_player_new', None) or \ _Cfunction('libvlc_media_list_player_new', ((1,),), class_result(MediaListPlayer), ctypes.c_void_p, Instance) return f(p_instance)
Create new media_list_player. @param p_instance: libvlc instance. @return: media list player instance or NULL on error.
def cv(params, dtrain, num_boost_round=10, nfold=3, stratified=False, folds=None, metrics=(), obj=None, feval=None, maximize=False, early_stopping_rounds=None, fpreproc=None, as_pandas=True, verbose_eval=None, show_stdv=True, seed=0, callbacks=None, shuffle=True): # pylint: disable = invalid-name """Cross-validation with given parameters. Parameters ---------- params : dict Booster params. dtrain : DMatrix Data to be trained. num_boost_round : int Number of boosting iterations. nfold : int Number of folds in CV. stratified : bool Perform stratified sampling. folds : a KFold or StratifiedKFold instance or list of fold indices Sklearn KFolds or StratifiedKFolds object. Alternatively may explicitly pass sample indices for each fold. For ``n`` folds, **folds** should be a length ``n`` list of tuples. Each tuple is ``(in,out)`` where ``in`` is a list of indices to be used as the training samples for the ``n`` th fold and ``out`` is a list of indices to be used as the testing samples for the ``n`` th fold. metrics : string or list of strings Evaluation metrics to be watched in CV. obj : function Custom objective function. feval : function Custom evaluation function. maximize : bool Whether to maximize feval. early_stopping_rounds: int Activates early stopping. CV error needs to decrease at least every <early_stopping_rounds> round(s) to continue. Last entry in evaluation history is the one from best iteration. fpreproc : function Preprocessing function that takes (dtrain, dtest, param) and returns transformed versions of those. as_pandas : bool, default True Return pd.DataFrame when pandas is installed. If False or pandas is not installed, return np.ndarray verbose_eval : bool, int, or None, default None Whether to display the progress. If None, progress will be displayed when np.ndarray is returned. If True, progress will be displayed at boosting stage. If an integer is given, progress will be displayed at every given `verbose_eval` boosting stage. show_stdv : bool, default True Whether to display the standard deviation in progress. Results are not affected, and always contains std. seed : int Seed used to generate the folds (passed to numpy.random.seed). callbacks : list of callback functions List of callback functions that are applied at end of each iteration. It is possible to use predefined callbacks by using :ref:`Callback API <callback_api>`. Example: .. code-block:: python [xgb.callback.reset_learning_rate(custom_rates)] shuffle : bool Shuffle data before creating folds. Returns ------- evaluation history : list(string) """ if stratified is True and not SKLEARN_INSTALLED: raise XGBoostError('sklearn needs to be installed in order to use stratified cv') if isinstance(metrics, str): metrics = [metrics] if isinstance(params, list): _metrics = [x[1] for x in params if x[0] == 'eval_metric'] params = dict(params) if 'eval_metric' in params: params['eval_metric'] = _metrics else: params = dict((k, v) for k, v in params.items()) if (not metrics) and 'eval_metric' in params: if isinstance(params['eval_metric'], list): metrics = params['eval_metric'] else: metrics = [params['eval_metric']] params.pop("eval_metric", None) results = {} cvfolds = mknfold(dtrain, nfold, params, seed, metrics, fpreproc, stratified, folds, shuffle) # setup callbacks callbacks = [] if callbacks is None else callbacks if early_stopping_rounds is not None: callbacks.append(callback.early_stop(early_stopping_rounds, maximize=maximize, verbose=False)) if isinstance(verbose_eval, bool) and verbose_eval: callbacks.append(callback.print_evaluation(show_stdv=show_stdv)) else: if isinstance(verbose_eval, int): callbacks.append(callback.print_evaluation(verbose_eval, show_stdv=show_stdv)) callbacks_before_iter = [ cb for cb in callbacks if cb.__dict__.get('before_iteration', False)] callbacks_after_iter = [ cb for cb in callbacks if not cb.__dict__.get('before_iteration', False)] for i in range(num_boost_round): for cb in callbacks_before_iter: cb(CallbackEnv(model=None, cvfolds=cvfolds, iteration=i, begin_iteration=0, end_iteration=num_boost_round, rank=0, evaluation_result_list=None)) for fold in cvfolds: fold.update(i, obj) res = aggcv([f.eval(i, feval) for f in cvfolds]) for key, mean, std in res: if key + '-mean' not in results: results[key + '-mean'] = [] if key + '-std' not in results: results[key + '-std'] = [] results[key + '-mean'].append(mean) results[key + '-std'].append(std) try: for cb in callbacks_after_iter: cb(CallbackEnv(model=None, cvfolds=cvfolds, iteration=i, begin_iteration=0, end_iteration=num_boost_round, rank=0, evaluation_result_list=res)) except EarlyStopException as e: for k in results: results[k] = results[k][:(e.best_iteration + 1)] break if as_pandas: try: import pandas as pd results = pd.DataFrame.from_dict(results) except ImportError: pass return results
Cross-validation with given parameters. Parameters ---------- params : dict Booster params. dtrain : DMatrix Data to be trained. num_boost_round : int Number of boosting iterations. nfold : int Number of folds in CV. stratified : bool Perform stratified sampling. folds : a KFold or StratifiedKFold instance or list of fold indices Sklearn KFolds or StratifiedKFolds object. Alternatively may explicitly pass sample indices for each fold. For ``n`` folds, **folds** should be a length ``n`` list of tuples. Each tuple is ``(in,out)`` where ``in`` is a list of indices to be used as the training samples for the ``n`` th fold and ``out`` is a list of indices to be used as the testing samples for the ``n`` th fold. metrics : string or list of strings Evaluation metrics to be watched in CV. obj : function Custom objective function. feval : function Custom evaluation function. maximize : bool Whether to maximize feval. early_stopping_rounds: int Activates early stopping. CV error needs to decrease at least every <early_stopping_rounds> round(s) to continue. Last entry in evaluation history is the one from best iteration. fpreproc : function Preprocessing function that takes (dtrain, dtest, param) and returns transformed versions of those. as_pandas : bool, default True Return pd.DataFrame when pandas is installed. If False or pandas is not installed, return np.ndarray verbose_eval : bool, int, or None, default None Whether to display the progress. If None, progress will be displayed when np.ndarray is returned. If True, progress will be displayed at boosting stage. If an integer is given, progress will be displayed at every given `verbose_eval` boosting stage. show_stdv : bool, default True Whether to display the standard deviation in progress. Results are not affected, and always contains std. seed : int Seed used to generate the folds (passed to numpy.random.seed). callbacks : list of callback functions List of callback functions that are applied at end of each iteration. It is possible to use predefined callbacks by using :ref:`Callback API <callback_api>`. Example: .. code-block:: python [xgb.callback.reset_learning_rate(custom_rates)] shuffle : bool Shuffle data before creating folds. Returns ------- evaluation history : list(string)
def resample_melody_series(times, frequencies, voicing, times_new, kind='linear'): """Resamples frequency and voicing time series to a new timescale. Maintains any zero ("unvoiced") values in frequencies. If ``times`` and ``times_new`` are equivalent, no resampling will be performed. Parameters ---------- times : np.ndarray Times of each frequency value frequencies : np.ndarray Array of frequency values, >= 0 voicing : np.ndarray Boolean array which indicates voiced or unvoiced times_new : np.ndarray Times to resample frequency and voicing sequences to kind : str kind parameter to pass to scipy.interpolate.interp1d. (Default value = 'linear') Returns ------- frequencies_resampled : np.ndarray Frequency array resampled to new timebase voicing_resampled : np.ndarray, dtype=bool Boolean voicing array resampled to new timebase """ # If the timebases are already the same, no need to interpolate if times.shape == times_new.shape and np.allclose(times, times_new): return frequencies, voicing.astype(np.bool) # Warn when the delta between the original times is not constant, # unless times[0] == 0. and frequencies[0] == frequencies[1] (see logic at # the beginning of to_cent_voicing) if not (np.allclose(np.diff(times), np.diff(times).mean()) or (np.allclose(np.diff(times[1:]), np.diff(times[1:]).mean()) and frequencies[0] == frequencies[1])): warnings.warn( "Non-uniform timescale passed to resample_melody_series. Pitch " "will be linearly interpolated, which will result in undesirable " "behavior if silences are indicated by missing values. Silences " "should be indicated by nonpositive frequency values.") # Round to avoid floating point problems times = np.round(times, 10) times_new = np.round(times_new, 10) # Add in an additional sample if we'll be asking for a time too large if times_new.max() > times.max(): times = np.append(times, times_new.max()) frequencies = np.append(frequencies, 0) voicing = np.append(voicing, 0) # We need to fix zero transitions if interpolation is not zero or nearest if kind != 'zero' and kind != 'nearest': # Fill in zero values with the last reported frequency # to avoid erroneous values when resampling frequencies_held = np.array(frequencies) for n, frequency in enumerate(frequencies[1:]): if frequency == 0: frequencies_held[n + 1] = frequencies_held[n] # Linearly interpolate frequencies frequencies_resampled = scipy.interpolate.interp1d(times, frequencies_held, kind)(times_new) # Retain zeros frequency_mask = scipy.interpolate.interp1d(times, frequencies, 'zero')(times_new) frequencies_resampled *= (frequency_mask != 0) else: frequencies_resampled = scipy.interpolate.interp1d(times, frequencies, kind)(times_new) # Use nearest-neighbor for voicing if it was used for frequencies if kind == 'nearest': voicing_resampled = scipy.interpolate.interp1d(times, voicing, kind)(times_new) # otherwise, always use zeroth order else: voicing_resampled = scipy.interpolate.interp1d(times, voicing, 'zero')(times_new) return frequencies_resampled, voicing_resampled.astype(np.bool)
Resamples frequency and voicing time series to a new timescale. Maintains any zero ("unvoiced") values in frequencies. If ``times`` and ``times_new`` are equivalent, no resampling will be performed. Parameters ---------- times : np.ndarray Times of each frequency value frequencies : np.ndarray Array of frequency values, >= 0 voicing : np.ndarray Boolean array which indicates voiced or unvoiced times_new : np.ndarray Times to resample frequency and voicing sequences to kind : str kind parameter to pass to scipy.interpolate.interp1d. (Default value = 'linear') Returns ------- frequencies_resampled : np.ndarray Frequency array resampled to new timebase voicing_resampled : np.ndarray, dtype=bool Boolean voicing array resampled to new timebase
def set_eol_chars(self, text): """Set widget end-of-line (EOL) characters from text (analyzes text)""" if not is_text_string(text): # testing for QString (PyQt API#1) text = to_text_string(text) eol_chars = sourcecode.get_eol_chars(text) is_document_modified = eol_chars is not None and self.eol_chars is not None self.eol_chars = eol_chars if is_document_modified: self.document().setModified(True) if self.sig_eol_chars_changed is not None: self.sig_eol_chars_changed.emit(eol_chars)
Set widget end-of-line (EOL) characters from text (analyzes text)
def flip(self): """ Provide flip view to compare how key/value pair is defined in each environment for administrative usage. :rtype: dict """ self._load() groups = self.config.keys() tabular = {} for g in groups: config = self.config[g] for k in config: r = tabular.get(k, {}) r[g] = config[k] tabular[k] = r return tabular
Provide flip view to compare how key/value pair is defined in each environment for administrative usage. :rtype: dict
def contains_rva(self, rva): """Check whether the section contains the address provided.""" # Check if the SizeOfRawData is realistic. If it's bigger than the size of # the whole PE file minus the start address of the section it could be # either truncated or the SizeOfRawData contains a misleading value. # In either of those cases we take the VirtualSize # if len(self.pe.__data__) - self.pe.adjust_FileAlignment( self.PointerToRawData, self.pe.OPTIONAL_HEADER.FileAlignment ) < self.SizeOfRawData: # PECOFF documentation v8 says: # VirtualSize: The total size of the section when loaded into memory. # If this value is greater than SizeOfRawData, the section is zero-padded. # This field is valid only for executable images and should be set to zero # for object files. # size = self.Misc_VirtualSize else: size = max(self.SizeOfRawData, self.Misc_VirtualSize) VirtualAddress_adj = self.pe.adjust_SectionAlignment( self.VirtualAddress, self.pe.OPTIONAL_HEADER.SectionAlignment, self.pe.OPTIONAL_HEADER.FileAlignment ) # Check whether there's any section after the current one that starts before the # calculated end for the current one. If so, cut the current section's size # to fit in the range up to where the next section starts. if (self.next_section_virtual_address is not None and self.next_section_virtual_address > self.VirtualAddress and VirtualAddress_adj + size > self.next_section_virtual_address): size = self.next_section_virtual_address - VirtualAddress_adj return VirtualAddress_adj <= rva < VirtualAddress_adj + size
Check whether the section contains the address provided.
def find_best_root(self, force_positive=True, slope=None): """ determine the position on the tree that minimizes the bilinear product of the inverse covariance and the data vectors. Returns ------- best_root : (dict) dictionary with the node, the fraction `x` at which the branch is to be split, and the regression parameters """ self._calculate_averages() best_root = {"chisq": np.inf} for n in self.tree.find_clades(): if n==self.tree.root: continue tv = self.tip_value(n) bv = self.branch_value(n) var = self.branch_variance(n) x, chisq = self._optimal_root_along_branch(n, tv, bv, var, slope=slope) if (chisq<best_root["chisq"]): tmpQ = self.propagate_averages(n, tv, bv*x, var*x) \ + self.propagate_averages(n, tv, bv*(1-x), var*(1-x), outgroup=True) reg = base_regression(tmpQ, slope=slope) if reg["slope"]>=0 or (force_positive==False): best_root = {"node":n, "split":x} best_root.update(reg) if 'node' not in best_root: print("TreeRegression.find_best_root: No valid root found!", force_positive) return None if 'hessian' in best_root: # calculate differentials with respect to x deriv = [] n = best_root["node"] tv = self.tip_value(n) bv = self.branch_value(n) var = self.branch_variance(n) for dx in [-0.001, 0.001]: y = min(1.0, max(0.0, best_root["split"]+dx)) tmpQ = self.propagate_averages(n, tv, bv*y, var*y) \ + self.propagate_averages(n, tv, bv*(1-y), var*(1-y), outgroup=True) reg = base_regression(tmpQ, slope=slope) deriv.append([y,reg['chisq'], tmpQ[tavgii], tmpQ[davgii]]) estimator_hessian = np.zeros((3,3)) estimator_hessian[:2,:2] = best_root['hessian'] estimator_hessian[2,2] = (deriv[0][1] + deriv[1][1] - 2.0*best_root['chisq'])/(deriv[0][0] - deriv[1][0])**2 # estimator_hessian[2,0] = (deriv[0][2] - deriv[1][2])/(deriv[0][0] - deriv[1][0]) # estimator_hessian[2,1] = (deriv[0][3] - deriv[1][3])/(deriv[0][0] - deriv[1][0]) estimator_hessian[0,2] = estimator_hessian[2,0] estimator_hessian[1,2] = estimator_hessian[2,1] best_root['hessian'] = estimator_hessian best_root['cov'] = np.linalg.inv(estimator_hessian) return best_root
determine the position on the tree that minimizes the bilinear product of the inverse covariance and the data vectors. Returns ------- best_root : (dict) dictionary with the node, the fraction `x` at which the branch is to be split, and the regression parameters
def pipe_dateformat(context=None, _INPUT=None, conf=None, **kwargs): """Formats a datetime value. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipedatebuilder pipe like object (iterable of date timetuples) conf : { 'format': {'value': <'%B %d, %Y'>}, 'timezone': {'value': <'EST'>} } Yields ------ _OUTPUT : formatted dates """ conf = DotDict(conf) loop_with = kwargs.pop('with', None) date_format = conf.get('format', **kwargs) # timezone = conf.get('timezone', **kwargs) for item in _INPUT: _with = item.get(loop_with, **kwargs) if loop_with else item try: # todo: check that all PHP formats are covered by Python date_string = time.strftime(date_format, _with) except TypeError as e: if context and context.verbose: print 'Error formatting date: %s' % item print e continue else: yield date_string
Formats a datetime value. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipedatebuilder pipe like object (iterable of date timetuples) conf : { 'format': {'value': <'%B %d, %Y'>}, 'timezone': {'value': <'EST'>} } Yields ------ _OUTPUT : formatted dates
def add(self, device): """Add device.""" if not isinstance(device, Device): raise TypeError() self.__devices.append(device)
Add device.
def connect(self, From, to, protocolName, clientFactory, chooser): """ Issue an INBOUND command, creating a virtual connection to the peer, given identifying information about the endpoint to connect to, and a protocol factory. @param clientFactory: a *Client* ProtocolFactory instance which will generate a protocol upon connect. @return: a Deferred which fires with the protocol instance that was connected, or fails with AttemptsFailed if the connection was not possible. """ publicIP = self._determinePublicIP() A = dict(From=From, to=to, protocol=protocolName) if self.service.dispatcher is not None: # Tell them exactly where they can shove it A['udp_source'] = (publicIP, self.service.sharedUDPPortnum) else: # Don't tell them because we don't know log.msg("dispatcher unavailable when connecting") D = self.callRemote(Inbound, **A) def _connected(answer): listenersD = defer.maybeDeferred(chooser, answer['listeners']) def gotListeners(listeners): allConnectionAttempts = [] for listener in listeners: d = self.attemptConnectionMethods( listener['methods'], listener['id'], From, to, protocolName, clientFactory, ) allConnectionAttempts.append(d) return defer.DeferredList(allConnectionAttempts) listenersD.addCallback(gotListeners) def finishedAllAttempts(results): succeededAny = False failures = [] if not results: return Failure(NoAttemptsMade( "there was no available path for connections " "(%r->%r/%s)" % (From, to, protocolName))) for succeeded, result in results: if succeeded: succeededAny = True randomConnection = result break else: failures.append(result) if not succeededAny: return Failure( AttemptsFailed( [failure.getBriefTraceback() for failure in failures] ) ) # XXX TODO: this connection is really random; connectQ2Q should # not return one of the connections it's made, put it into your # protocol's connectionMade handler return randomConnection return listenersD.addCallback(finishedAllAttempts) return D.addCallback(_connected)
Issue an INBOUND command, creating a virtual connection to the peer, given identifying information about the endpoint to connect to, and a protocol factory. @param clientFactory: a *Client* ProtocolFactory instance which will generate a protocol upon connect. @return: a Deferred which fires with the protocol instance that was connected, or fails with AttemptsFailed if the connection was not possible.
def pickle_dump(self): """Save the status of the object in pickle format.""" with open(os.path.join(self.workdir, self.PICKLE_FNAME), mode="wb") as fh: pickle.dump(self, fh)
Save the status of the object in pickle format.
def mcp_als(X, rank, mask, random_state=None, init='randn', **options): """Fits CP Decomposition with missing data using Alternating Least Squares (ALS). Parameters ---------- X : (I_1, ..., I_N) array_like A tensor with ``X.ndim >= 3``. rank : integer The `rank` sets the number of components to be computed. mask : (I_1, ..., I_N) array_like A binary tensor with the same shape as ``X``. All entries equal to zero correspond to held out or missing data in ``X``. All entries equal to one correspond to observed entries in ``X`` and the decomposition is fit to these datapoints. random_state : integer, ``RandomState``, or ``None``, optional (default ``None``) If integer, sets the seed of the random number generator; If RandomState instance, random_state is the random number generator; If None, use the RandomState instance used by ``numpy.random``. init : str, or KTensor, optional (default ``'randn'``). Specifies initial guess for KTensor factor matrices. If ``'randn'``, Gaussian random numbers are used to initialize. If ``'rand'``, uniform random numbers are used to initialize. If KTensor instance, a copy is made to initialize the optimization. options : dict, specifying fitting options. tol : float, optional (default ``tol=1E-5``) Stopping tolerance for reconstruction error. max_iter : integer, optional (default ``max_iter = 500``) Maximum number of iterations to perform before exiting. min_iter : integer, optional (default ``min_iter = 1``) Minimum number of iterations to perform before exiting. max_time : integer, optional (default ``max_time = np.inf``) Maximum computational time before exiting. verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``) Display progress. Returns ------- result : FitResult instance Object which holds the fitted results. It provides the factor matrices in form of a KTensor, ``result.factors``. Notes ----- Fitting CP decompositions with missing data can be exploited to perform cross-validation. References ---------- Williams, A. H. "Solving Least-Squares Regression with Missing Data." http://alexhwilliams.info/itsneuronalblog/2018/02/26/censored-lstsq/ """ # Check inputs. optim_utils._check_cpd_inputs(X, rank) # Initialize problem. U, _ = optim_utils._get_initial_ktensor(init, X, rank, random_state, scale_norm=False) result = FitResult(U, 'MCP_ALS', **options) normX = np.linalg.norm((X * mask)) # Main optimization loop. while result.still_optimizing: # Iterate over each tensor mode. for n in range(X.ndim): # i) Normalize factors to prevent singularities. U.rebalance() # ii) Unfold data and mask along the nth mode. unf = unfold(X, n) # i_n x N m = unfold(mask, n) # i_n x N # iii) Form Khatri-Rao product of factors matrices. components = [U[j] for j in range(X.ndim) if j != n] krt = khatri_rao(components).T # N x r # iv) Broadcasted solve of linear systems. # Left hand side of equations, R x R x X.shape[n] # Right hand side of equations, X.shape[n] x R x 1 lhs_stack = np.matmul(m[:, None, :] * krt[None, :, :], krt.T[None, :, :]) rhs_stack = np.dot(unf * m, krt.T)[:, :, None] # vi) Update factor. U[n] = np.linalg.solve(lhs_stack, rhs_stack).reshape(X.shape[n], rank) # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Update the optimization result, checks for convergence. # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Compute objective function # grams *= U[-1].T.dot(U[-1]) # obj = np.sqrt(np.sum(grams) - 2*sci.sum(p*U[-1]) + normX**2) / normX obj = linalg.norm(mask * (U.full() - X)) / normX # Update result result.update(obj) # Finalize and return the optimization result. return result.finalize()
Fits CP Decomposition with missing data using Alternating Least Squares (ALS). Parameters ---------- X : (I_1, ..., I_N) array_like A tensor with ``X.ndim >= 3``. rank : integer The `rank` sets the number of components to be computed. mask : (I_1, ..., I_N) array_like A binary tensor with the same shape as ``X``. All entries equal to zero correspond to held out or missing data in ``X``. All entries equal to one correspond to observed entries in ``X`` and the decomposition is fit to these datapoints. random_state : integer, ``RandomState``, or ``None``, optional (default ``None``) If integer, sets the seed of the random number generator; If RandomState instance, random_state is the random number generator; If None, use the RandomState instance used by ``numpy.random``. init : str, or KTensor, optional (default ``'randn'``). Specifies initial guess for KTensor factor matrices. If ``'randn'``, Gaussian random numbers are used to initialize. If ``'rand'``, uniform random numbers are used to initialize. If KTensor instance, a copy is made to initialize the optimization. options : dict, specifying fitting options. tol : float, optional (default ``tol=1E-5``) Stopping tolerance for reconstruction error. max_iter : integer, optional (default ``max_iter = 500``) Maximum number of iterations to perform before exiting. min_iter : integer, optional (default ``min_iter = 1``) Minimum number of iterations to perform before exiting. max_time : integer, optional (default ``max_time = np.inf``) Maximum computational time before exiting. verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``) Display progress. Returns ------- result : FitResult instance Object which holds the fitted results. It provides the factor matrices in form of a KTensor, ``result.factors``. Notes ----- Fitting CP decompositions with missing data can be exploited to perform cross-validation. References ---------- Williams, A. H. "Solving Least-Squares Regression with Missing Data." http://alexhwilliams.info/itsneuronalblog/2018/02/26/censored-lstsq/
def display_task_progress( self, instance, project, region, request_id=None, user=None, poll_interval=60): """Displays the overall progress of tasks in a Turbinia job. Args: instance (string): The name of the Turbinia instance project (string): The project containing the disk to process region (string): Region where turbinia is configured. request_id (string): The request ID provided by Turbinia. user (string): The username to filter tasks by. poll_interval (int): The interval at which to poll for new results. """ total_completed = 0 while True: task_results = self.client.get_task_data( instance, project, region, request_id=request_id, user=user) tasks = {task['id']: task for task in task_results} completed_tasks = set() pending_tasks = set() for task in tasks.values(): if task.get('successful') is not None: completed_tasks.add(task['id']) else: pending_tasks.add(task['id']) if len(completed_tasks) > total_completed or not completed_tasks: total_completed = len(completed_tasks) print('Task status update (completed: {0:d} | pending: {1:d})'.format( len(completed_tasks), len(pending_tasks))) print('Completed tasks:') for task_id in completed_tasks: self._print_task_data(tasks[task_id]) print('Pending tasks:') for task_id in pending_tasks: self._print_task_data(tasks[task_id]) if len(completed_tasks) == len(task_results) and completed_tasks: print('All {0:d} Tasks completed'.format(len(task_results))) return time.sleep(poll_interval)
Displays the overall progress of tasks in a Turbinia job. Args: instance (string): The name of the Turbinia instance project (string): The project containing the disk to process region (string): Region where turbinia is configured. request_id (string): The request ID provided by Turbinia. user (string): The username to filter tasks by. poll_interval (int): The interval at which to poll for new results.
def is_valid_filename(filename, return_ext=False): """Check whether the argument is a filename.""" ext = Path(filename).suffixes if len(ext) > 2: logg.warn('Your filename has more than two extensions: {}.\n' 'Only considering the two last: {}.'.format(ext, ext[-2:])) ext = ext[-2:] # cases for gzipped/bzipped text files if len(ext) == 2 and ext[0][1:] in text_exts and ext[1][1:] in ('gz', 'bz2'): return ext[0][1:] if return_ext else True elif ext and ext[-1][1:] in avail_exts: return ext[-1][1:] if return_ext else True elif ''.join(ext) == '.soft.gz': return 'soft.gz' if return_ext else True elif ''.join(ext) == '.mtx.gz': return 'mtx.gz' if return_ext else True else: if return_ext: raise ValueError('"{}" does not end on a valid extension.\n' 'Please, provide one of the available extensions.\n{}\n' 'Text files with .gz and .bz2 extensions are also supported.' .format(filename, avail_exts)) else: return False
Check whether the argument is a filename.
def fuzzy_get_value(obj, approximate_key, default=None, **kwargs): """ Like fuzzy_get, but assume the obj is dict-like and return the value without the key Notes: Argument order is in reverse order relative to `fuzzywuzzy.process.extractOne()` but in the same order as get(self, key) method on dicts Arguments: obj (dict-like): object to run the get method on using the key that is most similar to one within the dict approximate_key (str): key to look for a fuzzy match within the dict keys default (obj): the value to return if a similar key cannote be found in the `possible_keys` similarity (str): fractional similiarity between the approximate_key and the dict key (0.9 means 90% of characters must be identical) tuple_joiner (str): Character to use as delimitter/joiner between tuple elements. Used to create keys of any tuples to be able to use fuzzywuzzy string matching on it. key_and_value (bool): Whether to return both the key and its value (True) or just the value (False). Default is the same behavior as dict.get (i.e. key_and_value=False) dict_keys (list of str): if you already have a set of keys to search, this will save this funciton a little time and RAM Examples: >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e')}, 'sail') == set(['e']) True >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e'), 'camera': object()}, 'SLR') 2.7 >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e'), 'camera': object()}, 'I') == set(['e']) True >>> fuzzy_get_value({'word': tuple('word'), 'noun': tuple('noun')}, 'woh!', similarity=.3) ('w', 'o', 'r', 'd') >>> df = pd.DataFrame(np.arange(6*2).reshape(2,6), columns=('alpha','beta','omega','begin','life','end')) >>> fuzzy_get_value(df, 'life')[0], fuzzy_get(df, 'omega')[0] (4, 2) """ dict_obj = OrderedDict(obj) try: return dict_obj[list(dict_obj.keys())[int(approximate_key)]] except (ValueError, IndexError): pass return fuzzy_get(dict_obj, approximate_key, key_and_value=False, **kwargs)
Like fuzzy_get, but assume the obj is dict-like and return the value without the key Notes: Argument order is in reverse order relative to `fuzzywuzzy.process.extractOne()` but in the same order as get(self, key) method on dicts Arguments: obj (dict-like): object to run the get method on using the key that is most similar to one within the dict approximate_key (str): key to look for a fuzzy match within the dict keys default (obj): the value to return if a similar key cannote be found in the `possible_keys` similarity (str): fractional similiarity between the approximate_key and the dict key (0.9 means 90% of characters must be identical) tuple_joiner (str): Character to use as delimitter/joiner between tuple elements. Used to create keys of any tuples to be able to use fuzzywuzzy string matching on it. key_and_value (bool): Whether to return both the key and its value (True) or just the value (False). Default is the same behavior as dict.get (i.e. key_and_value=False) dict_keys (list of str): if you already have a set of keys to search, this will save this funciton a little time and RAM Examples: >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e')}, 'sail') == set(['e']) True >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e'), 'camera': object()}, 'SLR') 2.7 >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e'), 'camera': object()}, 'I') == set(['e']) True >>> fuzzy_get_value({'word': tuple('word'), 'noun': tuple('noun')}, 'woh!', similarity=.3) ('w', 'o', 'r', 'd') >>> df = pd.DataFrame(np.arange(6*2).reshape(2,6), columns=('alpha','beta','omega','begin','life','end')) >>> fuzzy_get_value(df, 'life')[0], fuzzy_get(df, 'omega')[0] (4, 2)
def render(self, template, context=None, at_paths=None, at_encoding=anytemplate.compat.ENCODING, **kwargs): """ :param template: Template file path :param context: A dict or dict-like object to instantiate given template file :param at_paths: Template search paths :param at_encoding: Template encoding :param kwargs: Keyword arguments passed to the template engine to render templates with specific features enabled. :return: Rendered string """ kwargs = self.filter_options(kwargs, self.render_valid_options()) paths = anytemplate.utils.mk_template_paths(template, at_paths) if context is None: context = {} LOGGER.debug("Render template %s %s context, options=%s", template, "without" if context is None else "with a", str(kwargs)) return self.render_impl(template, context, at_paths=paths, at_encoding=at_encoding, **kwargs)
:param template: Template file path :param context: A dict or dict-like object to instantiate given template file :param at_paths: Template search paths :param at_encoding: Template encoding :param kwargs: Keyword arguments passed to the template engine to render templates with specific features enabled. :return: Rendered string
def covar(X, remove_mean=False, modify_data=False, weights=None, sparse_mode='auto', sparse_tol=0.0): """ Computes the covariance matrix of X Computes .. math: C_XX &=& X^\top X while exploiting zero or constant columns in the data matrix. WARNING: Directly use moments_XX if you can. This function does an additional constant-matrix multiplication and does not return the mean. Parameters ---------- X : ndarray (T, M) Data matrix remove_mean : bool True: remove column mean from the data, False: don't remove mean. modify_data : bool If remove_mean=True, the mean will be removed in the data matrix X, without creating an independent copy. This option is faster but might lead to surprises because your input array is changed. weights : None or ndarray(T, ) weights assigned to each trajectory point of X. If None, all data points have weight one. If ndarray, each data point is assigned a separate weight. sparse_mode : str one of: * 'dense' : always use dense mode * 'sparse' : always use sparse mode if possible * 'auto' : automatic sparse_tol: float Threshold for considering column to be zero in order to save computing effort when the data is sparse or almost sparse. If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y is not given) of the covariance matrix will be set to zero. If Y is given and max(abs(Y[:, i])) < sparse_tol, then column i of the covariance matrix will be set to zero. Returns ------- C_XX : ndarray (M, M) Covariance matrix of X See also -------- moments_XX """ w, s, M = moments_XX(X, remove_mean=remove_mean, weights=weights, modify_data=modify_data, sparse_mode=sparse_mode, sparse_tol=sparse_tol) return M / float(w)
Computes the covariance matrix of X Computes .. math: C_XX &=& X^\top X while exploiting zero or constant columns in the data matrix. WARNING: Directly use moments_XX if you can. This function does an additional constant-matrix multiplication and does not return the mean. Parameters ---------- X : ndarray (T, M) Data matrix remove_mean : bool True: remove column mean from the data, False: don't remove mean. modify_data : bool If remove_mean=True, the mean will be removed in the data matrix X, without creating an independent copy. This option is faster but might lead to surprises because your input array is changed. weights : None or ndarray(T, ) weights assigned to each trajectory point of X. If None, all data points have weight one. If ndarray, each data point is assigned a separate weight. sparse_mode : str one of: * 'dense' : always use dense mode * 'sparse' : always use sparse mode if possible * 'auto' : automatic sparse_tol: float Threshold for considering column to be zero in order to save computing effort when the data is sparse or almost sparse. If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y is not given) of the covariance matrix will be set to zero. If Y is given and max(abs(Y[:, i])) < sparse_tol, then column i of the covariance matrix will be set to zero. Returns ------- C_XX : ndarray (M, M) Covariance matrix of X See also -------- moments_XX
def asset_path(path, format_kwargs={}, keep_slash=False): """Get absolute path to asset in package. ``path`` can be just a package name like 'package' or it can be a package name and a relative file system path like 'package:util'. If ``path`` ends with a slash, it will be stripped unless ``keep_slash`` is set (for use with ``rsync``, for example). >>> file_path = os.path.normpath(__file__) >>> dir_name = os.path.dirname(file_path) >>> file_name = os.path.basename(file_path) >>> os.chdir(dir_name) >>> >>> asset_path('runcommands.util') == dir_name True >>> asset_path('runcommands.util:path.py') == file_path True >>> asset_path('runcommands.util:{name}.py', format_kwargs={'name': 'path'}) == file_path True >>> asset_path('runcommands.util:dir/') == (dir_name + '/dir') True >>> asset_path('runcommands.util:dir/', keep_slash=True) == (dir_name + '/dir/') True """ if format_kwargs: path = path.format_map(format_kwargs) has_slash = path.endswith(os.sep) if ':' in path: package_name, *rel_path = path.split(':', 1) else: package_name, rel_path = path, () try: package = importlib.import_module(package_name) except ImportError: raise ValueError( 'Could not get asset path for {path}; could not import package: {package_name}' .format_map(locals())) if not hasattr(package, '__file__'): raise ValueError("Can't compute path relative to namespace package") package_path = os.path.dirname(package.__file__) path = os.path.join(package_path, *rel_path) path = os.path.normpath(path) if has_slash and keep_slash: path = '{path}{slash}'.format(path=path, slash=os.sep) return path
Get absolute path to asset in package. ``path`` can be just a package name like 'package' or it can be a package name and a relative file system path like 'package:util'. If ``path`` ends with a slash, it will be stripped unless ``keep_slash`` is set (for use with ``rsync``, for example). >>> file_path = os.path.normpath(__file__) >>> dir_name = os.path.dirname(file_path) >>> file_name = os.path.basename(file_path) >>> os.chdir(dir_name) >>> >>> asset_path('runcommands.util') == dir_name True >>> asset_path('runcommands.util:path.py') == file_path True >>> asset_path('runcommands.util:{name}.py', format_kwargs={'name': 'path'}) == file_path True >>> asset_path('runcommands.util:dir/') == (dir_name + '/dir') True >>> asset_path('runcommands.util:dir/', keep_slash=True) == (dir_name + '/dir/') True
def price_options(S=100.0, K=100.0, sigma=0.25, r=0.05, days=260, paths=10000): """ Price European and Asian options using a Monte Carlo method. Parameters ---------- S : float The initial price of the stock. K : float The strike price of the option. sigma : float The volatility of the stock. r : float The risk free interest rate. days : int The number of days until the option expires. paths : int The number of Monte Carlo paths used to price the option. Returns ------- A tuple of (E. call, E. put, A. call, A. put) option prices. """ import numpy as np from math import exp,sqrt h = 1.0/days const1 = exp((r-0.5*sigma**2)*h) const2 = sigma*sqrt(h) stock_price = S*np.ones(paths, dtype='float64') stock_price_sum = np.zeros(paths, dtype='float64') for j in range(days): growth_factor = const1*np.exp(const2*np.random.standard_normal(paths)) stock_price = stock_price*growth_factor stock_price_sum = stock_price_sum + stock_price stock_price_avg = stock_price_sum/days zeros = np.zeros(paths, dtype='float64') r_factor = exp(-r*h*days) euro_put = r_factor*np.mean(np.maximum(zeros, K-stock_price)) asian_put = r_factor*np.mean(np.maximum(zeros, K-stock_price_avg)) euro_call = r_factor*np.mean(np.maximum(zeros, stock_price-K)) asian_call = r_factor*np.mean(np.maximum(zeros, stock_price_avg-K)) return (euro_call, euro_put, asian_call, asian_put)
Price European and Asian options using a Monte Carlo method. Parameters ---------- S : float The initial price of the stock. K : float The strike price of the option. sigma : float The volatility of the stock. r : float The risk free interest rate. days : int The number of days until the option expires. paths : int The number of Monte Carlo paths used to price the option. Returns ------- A tuple of (E. call, E. put, A. call, A. put) option prices.
def _add_constraints(self, relation): """Add the given relation as one or more constraints. Return a list of the names of the constraints added. """ expression = relation.expression constr_count = sum(True for _ in expression.value_sets()) if constr_count == 0: return [] row_indices = count(swiglpk.glp_add_rows(self._p, constr_count)) names = [] for i, value_set in zip(row_indices, expression.value_sets()): value_set = list(value_set) var_indices = swiglpk.intArray(1 + len(value_set)) var_values = swiglpk.doubleArray(1 + len(value_set)) for j, (variable, coeff) in enumerate(value_set): var_indices[1 + j] = self._variables[variable] var_values[1 + j] = float(coeff) swiglpk.glp_set_mat_row( self._p, i, len(value_set), var_indices, var_values) if relation.sense == RelationSense.Greater: swiglpk.glp_set_row_bnds( self._p, i, swiglpk.GLP_LO, -float(expression.offset), 0) elif relation.sense == RelationSense.Less: swiglpk.glp_set_row_bnds( self._p, i, swiglpk.GLP_UP, 0, -float(expression.offset)) else: swiglpk.glp_set_row_bnds( self._p, i, swiglpk.GLP_FX, -float(expression.offset), 0) names.append(i) self._do_presolve = True return names
Add the given relation as one or more constraints. Return a list of the names of the constraints added.
def format_path(path): '''Formats a path as a string, placing / between each component. @param path A path in rtctree format, as a tuple with the port name as the second component. Examples: >>> format_path((['localhost:30000', 'manager', 'comp0.rtc'], None)) 'localhost:30000/manager/comp0.rtc' >>> format_path((['localhost', 'manager', 'comp0.rtc'], 'in')) 'localhost/manager/comp0.rtc:in' >>> format_path((['/', 'localhost', 'manager', 'comp0.rtc'], None)) '/localhost/manager/comp0.rtc' >>> format_path((['/', 'localhost', 'manager', 'comp0.rtc'], 'in')) '/localhost/manager/comp0.rtc:in' >>> format_path((['manager', 'comp0.rtc'], None)) 'manager/comp0.rtc' >>> format_path((['comp0.rtc'], None)) 'comp0.rtc' ''' if path[1]: port = ':' + path[1] else: port = '' if type(path[0]) is str: # Don't add slashes if the path is singular return path[0] + port if path[0][0] == '/': starter = '/' path = path[0][1:] else: starter = '' path = path[0] return starter + '/'.join(path) + port
Formats a path as a string, placing / between each component. @param path A path in rtctree format, as a tuple with the port name as the second component. Examples: >>> format_path((['localhost:30000', 'manager', 'comp0.rtc'], None)) 'localhost:30000/manager/comp0.rtc' >>> format_path((['localhost', 'manager', 'comp0.rtc'], 'in')) 'localhost/manager/comp0.rtc:in' >>> format_path((['/', 'localhost', 'manager', 'comp0.rtc'], None)) '/localhost/manager/comp0.rtc' >>> format_path((['/', 'localhost', 'manager', 'comp0.rtc'], 'in')) '/localhost/manager/comp0.rtc:in' >>> format_path((['manager', 'comp0.rtc'], None)) 'manager/comp0.rtc' >>> format_path((['comp0.rtc'], None)) 'comp0.rtc'
def add_pegasus_profile(self, namespace, key, value): """ Add a Pegasus profile to this job which will be written to the dax as <profile namespace="NAMESPACE" key="KEY">VALUE</profile> This can be used to add classads to particular jobs in the DAX @param namespace: A valid Pegasus namespace, e.g. condor. @param key: The name of the attribute. @param value: The value of the attribute. """ self.__pegasus_profile.append((str(namespace),str(key),str(value)))
Add a Pegasus profile to this job which will be written to the dax as <profile namespace="NAMESPACE" key="KEY">VALUE</profile> This can be used to add classads to particular jobs in the DAX @param namespace: A valid Pegasus namespace, e.g. condor. @param key: The name of the attribute. @param value: The value of the attribute.
def repackage_to_staging(output_path): """Repackage it from local installed location and copy it to GCS.""" import google.datalab.ml as ml # Find the package root. __file__ is under [package_root]/mltoolbox/image/classification. package_root = os.path.join(os.path.dirname(__file__), '../../../') # We deploy setup.py in the same dir for repackaging purpose. setup_py = os.path.join(os.path.dirname(__file__), 'setup.py') staging_package_url = os.path.join(output_path, 'staging', 'image_classification.tar.gz') ml.package_and_copy(package_root, setup_py, staging_package_url) return staging_package_url
Repackage it from local installed location and copy it to GCS.
def create_qualification_type(Name=None, Keywords=None, Description=None, QualificationTypeStatus=None, RetryDelayInSeconds=None, Test=None, AnswerKey=None, TestDurationInSeconds=None, AutoGranted=None, AutoGrantedValue=None): """ The CreateQualificationType operation creates a new Qualification type, which is represented by a QualificationType data structure. See also: AWS API Documentation :example: response = client.create_qualification_type( Name='string', Keywords='string', Description='string', QualificationTypeStatus='Active'|'Inactive', RetryDelayInSeconds=123, Test='string', AnswerKey='string', TestDurationInSeconds=123, AutoGranted=True|False, AutoGrantedValue=123 ) :type Name: string :param Name: [REQUIRED] The name you give to the Qualification type. The type name is used to represent the Qualification to Workers, and to find the type using a Qualification type search. It must be unique across all of your Qualification types. :type Keywords: string :param Keywords: One or more words or phrases that describe the Qualification type, separated by commas. The keywords of a type make the type easier to find during a search. :type Description: string :param Description: [REQUIRED] A long description for the Qualification type. On the Amazon Mechanical Turk website, the long description is displayed when a Worker examines a Qualification type. :type QualificationTypeStatus: string :param QualificationTypeStatus: [REQUIRED] The initial status of the Qualification type. Constraints: Valid values are: Active | Inactive :type RetryDelayInSeconds: integer :param RetryDelayInSeconds: The number of seconds that a Worker must wait after requesting a Qualification of the Qualification type before the worker can retry the Qualification request. Constraints: None. If not specified, retries are disabled and Workers can request a Qualification of this type only once, even if the Worker has not been granted the Qualification. It is not possible to disable retries for a Qualification type after it has been created with retries enabled. If you want to disable retries, you must delete existing retry-enabled Qualification type and then create a new Qualification type with retries disabled. :type Test: string :param Test: The questions for the Qualification test a Worker must answer correctly to obtain a Qualification of this type. If this parameter is specified, TestDurationInSeconds must also be specified. Constraints: Must not be longer than 65535 bytes. Must be a QuestionForm data structure. This parameter cannot be specified if AutoGranted is true. Constraints: None. If not specified, the Worker may request the Qualification without answering any questions. :type AnswerKey: string :param AnswerKey: The answers to the Qualification test specified in the Test parameter, in the form of an AnswerKey data structure. Constraints: Must not be longer than 65535 bytes. Constraints: None. If not specified, you must process Qualification requests manually. :type TestDurationInSeconds: integer :param TestDurationInSeconds: The number of seconds the Worker has to complete the Qualification test, starting from the time the Worker requests the Qualification. :type AutoGranted: boolean :param AutoGranted: Specifies whether requests for the Qualification type are granted immediately, without prompting the Worker with a Qualification test. Constraints: If the Test parameter is specified, this parameter cannot be true. :type AutoGrantedValue: integer :param AutoGrantedValue: The Qualification value to use for automatically granted Qualifications. This parameter is used only if the AutoGranted parameter is true. :rtype: dict :return: { 'QualificationType': { 'QualificationTypeId': 'string', 'CreationTime': datetime(2015, 1, 1), 'Name': 'string', 'Description': 'string', 'Keywords': 'string', 'QualificationTypeStatus': 'Active'|'Inactive', 'Test': 'string', 'TestDurationInSeconds': 123, 'AnswerKey': 'string', 'RetryDelayInSeconds': 123, 'IsRequestable': True|False, 'AutoGranted': True|False, 'AutoGrantedValue': 123 } } """ pass
The CreateQualificationType operation creates a new Qualification type, which is represented by a QualificationType data structure. See also: AWS API Documentation :example: response = client.create_qualification_type( Name='string', Keywords='string', Description='string', QualificationTypeStatus='Active'|'Inactive', RetryDelayInSeconds=123, Test='string', AnswerKey='string', TestDurationInSeconds=123, AutoGranted=True|False, AutoGrantedValue=123 ) :type Name: string :param Name: [REQUIRED] The name you give to the Qualification type. The type name is used to represent the Qualification to Workers, and to find the type using a Qualification type search. It must be unique across all of your Qualification types. :type Keywords: string :param Keywords: One or more words or phrases that describe the Qualification type, separated by commas. The keywords of a type make the type easier to find during a search. :type Description: string :param Description: [REQUIRED] A long description for the Qualification type. On the Amazon Mechanical Turk website, the long description is displayed when a Worker examines a Qualification type. :type QualificationTypeStatus: string :param QualificationTypeStatus: [REQUIRED] The initial status of the Qualification type. Constraints: Valid values are: Active | Inactive :type RetryDelayInSeconds: integer :param RetryDelayInSeconds: The number of seconds that a Worker must wait after requesting a Qualification of the Qualification type before the worker can retry the Qualification request. Constraints: None. If not specified, retries are disabled and Workers can request a Qualification of this type only once, even if the Worker has not been granted the Qualification. It is not possible to disable retries for a Qualification type after it has been created with retries enabled. If you want to disable retries, you must delete existing retry-enabled Qualification type and then create a new Qualification type with retries disabled. :type Test: string :param Test: The questions for the Qualification test a Worker must answer correctly to obtain a Qualification of this type. If this parameter is specified, TestDurationInSeconds must also be specified. Constraints: Must not be longer than 65535 bytes. Must be a QuestionForm data structure. This parameter cannot be specified if AutoGranted is true. Constraints: None. If not specified, the Worker may request the Qualification without answering any questions. :type AnswerKey: string :param AnswerKey: The answers to the Qualification test specified in the Test parameter, in the form of an AnswerKey data structure. Constraints: Must not be longer than 65535 bytes. Constraints: None. If not specified, you must process Qualification requests manually. :type TestDurationInSeconds: integer :param TestDurationInSeconds: The number of seconds the Worker has to complete the Qualification test, starting from the time the Worker requests the Qualification. :type AutoGranted: boolean :param AutoGranted: Specifies whether requests for the Qualification type are granted immediately, without prompting the Worker with a Qualification test. Constraints: If the Test parameter is specified, this parameter cannot be true. :type AutoGrantedValue: integer :param AutoGrantedValue: The Qualification value to use for automatically granted Qualifications. This parameter is used only if the AutoGranted parameter is true. :rtype: dict :return: { 'QualificationType': { 'QualificationTypeId': 'string', 'CreationTime': datetime(2015, 1, 1), 'Name': 'string', 'Description': 'string', 'Keywords': 'string', 'QualificationTypeStatus': 'Active'|'Inactive', 'Test': 'string', 'TestDurationInSeconds': 123, 'AnswerKey': 'string', 'RetryDelayInSeconds': 123, 'IsRequestable': True|False, 'AutoGranted': True|False, 'AutoGrantedValue': 123 } }