text
stringlengths
81
112k
Send messages through RabbitMQ's default exchange, which will be delivered through routing_key (sess_id). This method only used for un-authenticated users, i.e. login process. Args: sess_id string: Session id message dict: Message object. def send_to_default_exchange(self, sess_id, message=None): """ Send messages through RabbitMQ's default exchange, which will be delivered through routing_key (sess_id). This method only used for un-authenticated users, i.e. login process. Args: sess_id string: Session id message dict: Message object. """ msg = json.dumps(message, cls=ZEngineJSONEncoder) log.debug("Sending following message to %s queue through default exchange:\n%s" % ( sess_id, msg)) self.get_channel().publish(exchange='', routing_key=sess_id, body=msg)
Send messages through logged in users private exchange. Args: user_id string: User key message dict: Message object def send_to_prv_exchange(self, user_id, message=None): """ Send messages through logged in users private exchange. Args: user_id string: User key message dict: Message object """ exchange = 'prv_%s' % user_id.lower() msg = json.dumps(message, cls=ZEngineJSONEncoder) log.debug("Sending following users \"%s\" exchange:\n%s " % (exchange, msg)) self.get_channel().publish(exchange=exchange, routing_key='', body=msg)
compose 2 graphs to CGR :param other: Molecule or CGR Container :return: CGRContainer def compose(self, other): """ compose 2 graphs to CGR :param other: Molecule or CGR Container :return: CGRContainer """ if not isinstance(other, Compose): raise TypeError('CGRContainer or MoleculeContainer [sub]class expected') cgr = self._get_subclass('CGRContainer') common = self._node.keys() & other if not common: if not (isinstance(self, cgr) or isinstance(other, cgr)): return cgr() | self | other return self | other unique_reactant = self._node.keys() - common unique_product = other._node.keys() - common h = cgr() atoms = h._node bonds = [] common_adj = {n: {} for n in common} common_bonds = [] r_atoms = {} r_skin = defaultdict(list) if isinstance(self, cgr): for n in unique_reactant: h.add_atom(self._node[n], n) for m, bond in self._adj[n].items(): if m not in atoms: if m in common: # bond to common atoms is broken bond r_bond = bond._reactant if r_bond is None: # skip None>None continue r_skin[n].append(m) bond = DynBond.__new__(DynBond) bond.__init_copy__(r_bond, None) bonds.append((n, m, bond)) for n in common: r_atoms[n] = self._node[n]._reactant for m, bond in self._adj[n].items(): if m not in r_atoms and m in common: tmp = [bond._reactant, None] common_adj[n][m] = common_adj[m][n] = tmp common_bonds.append((n, m, tmp)) else: for n in unique_reactant: atom = DynAtom.__new__(DynAtom) # add unique atom into CGR atom.__init_copy__(self._node[n], self._node[n]) h.add_atom(atom, n) for m, r_bond in self._adj[n].items(): # unique atom neighbors if m not in atoms: # bond not analyzed yet bond = DynBond.__new__(DynBond) if m in common: # bond to common atoms r_skin[n].append(m) bond.__init_copy__(r_bond, None) else: # bond static bond.__init_copy__(r_bond, r_bond) bonds.append((n, m, bond)) for n in common: r_atoms[n] = self._node[n] for m, bond in self._adj[n].items(): if m not in r_atoms and m in common: # analyze only common atoms bonds tmp = [bond, None] # reactant state only common_adj[n][m] = common_adj[m][n] = tmp common_bonds.append((n, m, tmp)) p_atoms = {} p_skin = defaultdict(list) if isinstance(other, cgr): for n in unique_product: h.add_atom(other._node[n], n) for m, bond in other._adj[n].items(): if m not in atoms: if m in common: # bond to common atoms is new bond p_bond = bond._product if p_bond is None: # skip None>None continue p_skin[n].append(m) bond = DynBond.__new__(DynBond) bond.__init_copy__(None, p_bond) bonds.append((n, m, bond)) for n in common: p_atoms[n] = other._node[n]._product n_bonds = common_adj[n] for m, bond in other._adj[n].items(): if m in n_bonds: n_bonds[m][1] = bond._product elif m not in p_atoms and m in common: # new bond of reaction p_bond = bond._product if p_bond is None: # skip None>None continue bond = DynBond.__new__(DynBond) bond.__init_copy__(None, p_bond) bonds.append((n, m, bond)) else: for n in unique_product: atom = DynAtom.__new__(DynAtom) atom.__init_copy__(other._node[n], other._node[n]) h.add_atom(atom, n) for m, p_bond in other._adj[n].items(): if m not in atoms: bond = DynBond.__new__(DynBond) if m in common: p_skin[n].append(m) bond.__init_copy__(None, p_bond) else: bond.__init_copy__(p_bond, p_bond) bonds.append((n, m, bond)) for n in common: p_atoms[n] = other._node[n] n_bonds = common_adj[n] for m, p_bond in other._adj[n].items(): if m in n_bonds: # set product state of changed bond n_bonds[m][1] = p_bond elif m not in p_atoms and m in common: # new bond of reaction bond = DynBond.__new__(DynBond) bond.__init_copy__(None, p_bond) bonds.append((n, m, bond)) for n, r_atom in r_atoms.items(): # prepare common DynAtom's p_atom = p_atoms[n] if r_atom.element != p_atom.element or r_atom.isotope != p_atom.isotope: raise ValueError('atom-to-atom mapping invalid') atom = DynAtom.__new__(DynAtom) atom.__init_copy__(r_atom, p_atom) h.add_atom(atom, n) for n, m, (r_bond, p_bond) in common_bonds: if r_bond is p_bond is None: # skip None>None continue bond = DynBond.__new__(DynBond) bond.__init_copy__(r_bond, p_bond) h.add_bond(n, m, bond) for n, m, bond in bonds: h.add_bond(n, m, bond) return h
decompose CGR to pair of Molecules, which represents reactants and products state of reaction :return: tuple of two molecules def decompose(self): """ decompose CGR to pair of Molecules, which represents reactants and products state of reaction :return: tuple of two molecules """ mc = self._get_subclass('MoleculeContainer') reactants = mc() products = mc() for n, atom in self.atoms(): reactants.add_atom(atom._reactant, n) products.add_atom(atom._product, n) for n, m, bond in self.bonds(): if bond._reactant is not None: reactants.add_bond(n, m, bond._reactant) if bond._product is not None: products.add_bond(n, m, bond._product) return reactants, products
Get data from JIRA for cycle/flow times and story points size change. Build a numerically indexed data frame with the following 'fixed' columns: `key`, 'url', 'issue_type', `summary`, `status`, and `resolution` from JIRA, as well as the value of any fields set in the `fields` dict in `settings`. If `known_values` is set (a dict of lists, with field names as keys and a list of known values for each field as values) and a field in `fields` contains a list of values, only the first value in the list of known values will be used. If 'query_attribute' is set in `settings`, a column with this name will be added, and populated with the `value` key, if any, from each criteria block under `queries` in settings. In addition, `cycle_time` will be set to the time delta between the first `accepted`-type column and the first `complete` column, or None. The remaining columns are the names of the items in the configured cycle, in order. Each cell contains the last date/time stamp when the relevant status was set. If an item moves backwards through the cycle, subsequent date/time stamps in the cycle are erased. def cycle_data(self, verbose=False, result_cycle=None, result_size=None, result_edges=None,changelog=True): """Get data from JIRA for cycle/flow times and story points size change. Build a numerically indexed data frame with the following 'fixed' columns: `key`, 'url', 'issue_type', `summary`, `status`, and `resolution` from JIRA, as well as the value of any fields set in the `fields` dict in `settings`. If `known_values` is set (a dict of lists, with field names as keys and a list of known values for each field as values) and a field in `fields` contains a list of values, only the first value in the list of known values will be used. If 'query_attribute' is set in `settings`, a column with this name will be added, and populated with the `value` key, if any, from each criteria block under `queries` in settings. In addition, `cycle_time` will be set to the time delta between the first `accepted`-type column and the first `complete` column, or None. The remaining columns are the names of the items in the configured cycle, in order. Each cell contains the last date/time stamp when the relevant status was set. If an item moves backwards through the cycle, subsequent date/time stamps in the cycle are erased. """ cycle_names = [s['name'] for s in self.settings['cycle']] accepted_steps = set(s['name'] for s in self.settings['cycle'] if s['type'] == StatusTypes.accepted) completed_steps = set(s['name'] for s in self.settings['cycle'] if s['type'] == StatusTypes.complete) series = { 'key': {'data': [], 'dtype': str}, 'url': {'data': [], 'dtype': str}, 'issue_type': {'data': [], 'dtype': str}, 'summary': {'data': [], 'dtype': str}, 'status': {'data': [], 'dtype': str}, 'resolution': {'data': [], 'dtype': str}, 'cycle_time': {'data': [], 'dtype': 'timedelta64[ns]'}, 'completed_timestamp': {'data': [], 'dtype': 'datetime64[ns]'}, 'created_timestamp': {'data': [], 'dtype': 'datetime64[ns]'} } if sys.platform.startswith('win'): buffer = open("cycledata.tmp", "w+",1) # Opens a file for writing only in binary format. Overwrites the file if the file exists. # buffering value is 1 # Windows users seem to have a problem with spooled file else: buffer = tempfile.SpooledTemporaryFile(max_size=50000, mode='w+t') #issuelinks = open("issuelinks.csv", "w+", 1) #df_edges = pd.DataFrame() #df_edges = pd.DataFrame(columns=['Source', 'OutwardLink', 'Target', 'Inwardlink','LinkType']) #df_edges.to_csv(issuelinks, columns=['Source', 'OutwardLink', 'Target', 'Inwardlink','LinkType'], header=True, index=None, sep='\t',encoding='utf-8') df_size_history = pd.DataFrame( columns=['key','fromDate','toDate','size']) df_size_history.to_csv(buffer, columns=['key', 'fromDate', 'toDate', 'size'], header=True, index=None, sep='\t',encoding='utf-8') for cycle_name in cycle_names: series[cycle_name] = {'data': [], 'dtype': 'datetime64[ns]'} for name in self.fields.keys(): series[name] = {'data': [], 'dtype': 'object'} if self.settings['query_attribute']: series[self.settings['query_attribute']] = {'data': [], 'dtype': str} for criteria in self.settings['queries']: for issue in self.find_issues(criteria, order='updatedDate DESC', verbose=verbose, changelog=changelog): # Deal with the differences in strings between Python 2 & 3 if (sys.version_info > (3, 0)): # Python 3 code in this block item = { 'key': issue.key, 'url': "%s/browse/%s" % (self.jira._options['server'], issue.key,), 'issue_type': issue.fields.issuetype.name, 'summary': issue.fields.summary, # .encode('utf-8'), 'status': issue.fields.status.name, 'resolution': issue.fields.resolution.name if issue.fields.resolution else None, 'cycle_time': None, 'completed_timestamp': None, 'created_timestamp': issue.fields.created[:19] } else: # Python 2 code in this block item = { 'key': issue.key, 'url': "%s/browse/%s" % (self.jira._options['server'], issue.key,), 'issue_type': issue.fields.issuetype.name, 'summary': issue.fields.summary.encode('utf-8'), 'status': issue.fields.status.name, 'resolution': issue.fields.resolution.name if issue.fields.resolution else None, 'cycle_time': None, 'completed_timestamp': None, 'created_timestamp': issue.fields.created[:19] } for name, field_name in self.fields.items(): item[name] = self.resolve_field_value(issue, name, field_name) if self.settings['query_attribute']: item[self.settings['query_attribute']] = criteria.get('value', None) for cycle_name in cycle_names: item[cycle_name] = None # Get the relationships for this issue edges = [] # Source, Target, Inward Link, Outward Link, Type issuelinks = issue.fields.issuelinks # It is seems that having an Epic Parent does not record an Epic Link, just the name "Epic Name" # Creating Epic relationship requires more work. Also each Jira instance will have different customfields for Epic data # Remove this code. #issueEpic = issue.fields.customfield_10008 if issue.fields.customfield_10008 else None # Epic Link #if issueEpic is not None: # data = {'Source':issueEpic, 'Target':issue.key, 'InwardLink':'Belongs to Epic', 'OutwardLink':'Issue in Epic', 'LinkType':'EpicIssue'} # edges.append(data) for link in issuelinks: inwardissue = None outwardissue = None try: inwardissue = link.inwardIssue.key except: outwardissue = link.outwardIssue.key if inwardissue is not None: data = {'LinkID':link.id,'Source':inwardissue, 'Target':issue.key, 'InwardLink':link.type.inward, 'OutwardLink': link.type.outward, 'LinkType':link.type.name} else: data = {'LinkID':link.id,'Source':issue.key, 'Target': outwardissue, 'InwardLink':link.type.inward, 'OutwardLink':link.type.outward, 'LinkType':link.type.name} edges.append(data) if len(edges)>0: try: df_edges except NameError: #print('Not found') df_edges = pd.DataFrame(edges) else: df_links = pd.DataFrame(edges) df_edges=df_edges.append(df_links) # = pd.DataFrame(edges) # Got all the relationships for this issue rows = [] try: for snapshot in self.iter_size_changes(issue): data= {'key':snapshot.key,'fromDate':snapshot.date,'size':snapshot.size} rows.append(data) df = pd.DataFrame(rows) # Create the toDate column df_toDate=df['fromDate'].shift(-1) df_toDate.loc[len(df_toDate)-1] = datetime.datetime.now(pytz.utc) df['toDate'] = df_toDate except: df = pd.DataFrame(columns = ['key', 'fromDate', 'toDate', 'size']) # Round Down datetimes to full dates df['fromDate'] = df['fromDate'].apply(lambda dt: datetime.datetime(dt.year, dt.month, dt.day)) df['toDate'] = df['toDate'].apply(lambda dt: datetime.datetime(dt.year, dt.month, dt.day)) # If we only have one row of size changes and current issue has a size then it must have been created with a size value at creation. # This size will not be recorded in the size_change record. # Hence update the single row we have with the current issue size. # Get Story Points size changes history #If condition is met update the size cell if getattr(item, 'StoryPoints', None) is not None and (df.shape[0]==1): #if (item['StoryPoints'] is not None ) and (len(df)==1): df.loc[df.index[0], 'size'] = item['StoryPoints'] # Append to csv file df.to_csv(buffer, columns=['key', 'fromDate', 'toDate', 'size'], header=None, mode='a', sep='\t', date_format='%Y-%m-%d',encoding='utf-8') #print(rows) # If the first column in item lifecycle was scipted put the created data in it. if item[cycle_names[0]] is None: item[cycle_names[0]] = dateutil.parser.parse(item['created_timestamp']) #item['created_timestamp'] # Figure out why the first Column does not have created date #print(dateutil.parser.parse(item['created_timestamp'])) # Record date of status changes for snapshot in self.iter_changes(issue, True): snapshot_cycle_step = self.settings['cycle_lookup'].get(snapshot.status.lower(), None) if snapshot_cycle_step is None: if verbose: print(issue.key, "transitioned to unknown JIRA status", snapshot.status) continue snapshot_cycle_step_name = snapshot_cycle_step['name'] # Keep the first time we entered a step if item[snapshot_cycle_step_name] is None: item[snapshot_cycle_step_name] = snapshot.date # Wipe any subsequent dates, in case this was a move backwards found_cycle_name = False for cycle_name in cycle_names: if not found_cycle_name and cycle_name == snapshot_cycle_step_name: found_cycle_name = True continue elif found_cycle_name and item[cycle_name] is not None: if verbose: print(issue.key, "moved backwards to", snapshot_cycle_step_name, "wiping date for subsequent step", cycle_name) item[cycle_name] = None # Wipe timestamps if items have moved backwards; calculate cycle time previous_timestamp = None accepted_timestamp = None completed_timestamp = None for cycle_name in cycle_names: if item[cycle_name] is not None: previous_timestamp = item[cycle_name] if accepted_timestamp is None and previous_timestamp is not None and cycle_name in accepted_steps: accepted_timestamp = previous_timestamp if completed_timestamp is None and previous_timestamp is not None and cycle_name in completed_steps: completed_timestamp = previous_timestamp if accepted_timestamp is not None and completed_timestamp is not None: item['cycle_time'] = completed_timestamp - accepted_timestamp item['completed_timestamp'] = completed_timestamp for k, v in item.items(): series[k]['data'].append(v) data = {} for k, v in series.items(): data[k] = pd.Series(v['data'], dtype=v['dtype']) result_cycle = pd.DataFrame(data, columns=['key', 'url', 'issue_type', 'summary', 'status', 'resolution'] + sorted(self.fields.keys()) + ([self.settings['query_attribute']] if self.settings['query_attribute'] else []) + ['cycle_time', 'completed_timestamp'] + cycle_names ) result_size = pd.DataFrame() buffer.seek(0) result_size = result_size.from_csv(buffer, sep='\t') buffer.close() try: df_edges except NameError: # print('Not found') df_edges = pd.DataFrame() try: df_edges = df_edges[['Source', 'OutwardLink', 'Target', 'InwardLink','LinkType','LinkID']] # Specify dataframe sort order #df_edges.to_csv("myedges.csv", sep='\t', index=False,encoding='utf-8') except KeyError: print('Info: No issue edges found.') result_edges=df_edges # There maybe no result_size data is we might not have any change history try: result_size.set_index('key') except KeyError: result_size = pd.DataFrame(index= ['key'],columns = ['fromDate', 'toDate', 'size']) result_size['toDate'] = pd.to_datetime(result_size['toDate'], format=('%Y-%m-%d')) result_size['fromDate'] = pd.to_datetime(result_size['fromDate'], format=('%Y-%m-%d')) return result_cycle, result_size, result_edges
Return the a DataFrame, indexed by day, with columns containing story size for each issue. In addition, columns are soted by Jira Issue key. First by Project and then by id number. def size_history(self,size_data): """Return the a DataFrame, indexed by day, with columns containing story size for each issue. In addition, columns are soted by Jira Issue key. First by Project and then by id number. """ def my_merge(df1, df2): # http://stackoverflow.com/questions/34411495/pandas-merge-several-dataframes res = pd.merge(df1, df2, how='outer', left_index=True, right_index=True) cols = sorted(res.columns) pairs = [] for col1, col2 in zip(cols[:-1], cols[1:]): if col1.endswith('_x') and col2.endswith('_y'): pairs.append((col1, col2)) for col1, col2 in pairs: res[col1[:-2]] = res[col1].combine_first(res[col2]) res = res.drop([col1, col2], axis=1) return res dfs_key = [] # Group the dataframe by regiment, and for each regiment, for name, group in size_data.groupby('key'): dfs = [] for row in group.itertuples(): # print(row.Index, row.fromDate,row.toDate, row.size) dates = pd.date_range(start=row.fromDate, end=row.toDate) sizes = [row.size] * len(dates) data = {'date': dates, 'size': sizes} df2 = pd.DataFrame(data, columns=['date', 'size']) pd.to_datetime(df2['date'], format=('%Y-%m-%d')) df2.set_index(['date'], inplace=True) dfs.append(df2) # df_final = reduce(lambda left,right: pd.merge(left,right), dfs) df_key = (reduce(my_merge, dfs)) df_key.columns = [name if x == 'size' else x for x in df_key.columns] dfs_key.append(df_key) df_all = (reduce(my_merge, dfs_key)) # Sort the columns based on Jira Project code and issue number mykeys = df_all.columns.values.tolist() mykeys.sort(key=lambda x: x.split('-')[0] + '-' + str(int(x.split('-')[1])).zfill(6)) df_all = df_all[mykeys] # Reindex to make sure we have all dates start, end = df_all.index.min(), df_all.index.max() df_all = df_all.reindex(pd.date_range(start, end, freq='D'), method='ffill') return df_all
Return the data to build a cumulative flow diagram: a DataFrame, indexed by day, with columns containing cumulative counts for each of the items in the configured cycle. In addition, a column called `cycle_time` contains the approximate average cycle time of that day based on the first "accepted" status and the first "complete" status. If stacked = True then return dataframe suitable for plotting as stacked area chart else return for platting as non-staked or line chart. def cfd(self, cycle_data,size_history= None, pointscolumn= None, stacked = True ): """Return the data to build a cumulative flow diagram: a DataFrame, indexed by day, with columns containing cumulative counts for each of the items in the configured cycle. In addition, a column called `cycle_time` contains the approximate average cycle time of that day based on the first "accepted" status and the first "complete" status. If stacked = True then return dataframe suitable for plotting as stacked area chart else return for platting as non-staked or line chart. """ # Define helper function def cumulativeColumnStates(df,stacked): """ Calculate the column sums, were the incoming matrix columns represents items in workflow states States progress from left to right. We what to zero out items, other than right most value to avoid counting items in prior states. :param df: :return: pandas dataframe row with sum of column items """ # Helper functions to return the right most cells in 2D array def last_number(lst): if all(map(lambda x: x == 0, lst)): return 0 elif lst[-1] != 0: return len(lst) - 1 else: return last_number(lst[:-1]) def fill_others(lst): new_lst = [0] * len(lst) new_lst[last_number(lst)] = lst[last_number(lst)] return new_lst df_zeroed = df.fillna(value=0) # ,inplace = True Get rid of non numeric items. Make a ?deep? copy if stacked: df_result = df_zeroed.apply(lambda x: fill_others(x.values.tolist()), axis=1) else: df_result = df_zeroed sum_row = df_result[df.columns].sum() # Sum Columns return pd.DataFrame(data=sum_row).T # Transpose into row dataframe and return # Helper function to return the right most cells in 2D array def keeprightmoststate(df): """ Incoming matrix columns represents items in workflow states States progress from left to right. We what to zero out items, other than right most value. :param df: :return: pandas dataframe row with sum of column items """ def last_number(lst): if all(map(lambda x: x == 0, lst)): return 0 elif lst[-1] != 0: return len(lst) - 1 else: return last_number(lst[:-1]) def fill_others(lst): new_lst = [0] * len(lst) new_lst[last_number(lst)] = lst[last_number(lst)] return new_lst df_zeroed = df.fillna(value=0) # ,inplace = True Get rid of non numeric items. Make a ?deep? copy df_result = df_zeroed.apply(lambda x: fill_others(x.values.tolist()), axis=1) return df_result # Define helper function def hide_greater_than_date(cell, adate): """ Helper function to compare date values in cells """ result = False try: celldatetime = datetime.date(cell.year, cell.month, cell.day) except: return True if celldatetime > adate: return True return False # We have a date value in cell and it is less than or equal to input date # Helper function def appendDFToCSV(df, csvFilePath, sep="\t",date_format='%Y-%m-%d', encoding='utf-8'): import os if not os.path.isfile(csvFilePath): df.to_csv(csvFilePath, mode='a', index=False, sep=sep, date_format=date_format, encoding=encoding) elif len(df.columns) != len(pd.read_csv(csvFilePath, nrows=1, sep=sep).columns): raise Exception( "Columns do not match!! Dataframe has " + str(len(df.columns)) + " columns. CSV file has " + str( len(pd.read_csv(csvFilePath, nrows=1, sep=sep).columns)) + " columns.") elif not (df.columns == pd.read_csv(csvFilePath, nrows=1, sep=sep).columns).all(): raise Exception("Columns and column order of dataframe and csv file do not match!!") else: df.to_csv(csvFilePath, mode='a', index=False, sep=sep, header=False, date_format=date_format, encoding=encoding) #print(pointscolumn) # List of all state change columns that may have date value in them cycle_names = [s['name'] for s in self.settings['cycle']] # Create list of columns that we want to return in our results dataFrame slice_columns = list(self.settings['none_sized_statuses']) # Make a COPY of the list so that we dont modify the reference. if pointscolumn: for size_state in self.settings['sized_statuses']: # states_to_size: sizedStateName = size_state + 'Sized' slice_columns.append(sizedStateName) # Check that it works if we use all columns as sized. slice_columns = [] for size_state in cycle_names: sizedStateName = size_state + 'Sized' slice_columns.append(sizedStateName) else: slice_columns = cycle_names # Build a dataframe of just the "date" columns df = cycle_data[cycle_names].copy() # Strip out times from all dates df = pd.DataFrame( np.array(df.values, dtype='<M8[ns]').astype('<M8[D]').astype('<M8[ns]'), columns=df.columns, index=df.index ) # No history provided this thus we return dataframe with just column headers. if size_history is None: return df # Get a list of dates that a issue changed state state_changes_on_dates_set = set() for state in cycle_names: state_changes_on_dates_set = state_changes_on_dates_set.union(set(df[state])) # How many unique days did a issue stage state # Remove non timestamp vlaues and sort the list state_changes_on_dates = filter(lambda x: type(x.date()) == datetime.date, sorted(list(state_changes_on_dates_set))) # Replace missing NaT values (happens if a status is skipped) with the subsequent timestamp df = df.fillna(method='bfill', axis=1) if pointscolumn: storypoints = cycle_data[pointscolumn] # As at today ids = cycle_data['key'] # create blank results dataframe df_results = pd.DataFrame() # For each date on which we had a issue state change we want to count and sum the totals for each of the given states # 'Open','Analysis','Backlog','In Process','Done','Withdrawn' timenowstr = datetime.datetime.now().strftime('-run-%Y-%m-%d_%H-%M-%S') for date_index,statechangedate in enumerate(state_changes_on_dates): if date_index%10 == 0: # Print out Progress every tenth pass #print("CFD state change {} of {} ".format(date_index,len(state_changes_on_dates))) if type(statechangedate.date()) == datetime.date: # filterdate.year,filterdate.month,filterdate.day filterdate = datetime.date(statechangedate.year, statechangedate.month, statechangedate.day) # statechangedate.datetime() # Apply function to each cell and only make it visible if issue was in state on or after the filter date df_filtered = df.applymap(lambda x: 0 if hide_greater_than_date(x, filterdate) else 1) if stacked: df_filtered=keeprightmoststate(df_filtered) if pointscolumn and (size_history is not None): # For debug #if filterdate.isoformat() == '2016-11-22': # size_history.loc[filterdate.isoformat()].to_csv("debug-size-history.csv") storypoints_series_on = size_history.loc[filterdate.isoformat()].T df_size_on_day = pd.Series.to_frame(storypoints_series_on) df_size_on_day.columns = [pointscolumn] # Make sure get size data in the same sequence as ids. left = pd.Series.to_frame(ids) right = df_size_on_day result = left.join(right, on=['key']) # http://pandas.pydata.org/pandas-docs/stable/merging.html\ df_countable = pd.concat([result, df_filtered], axis=1) # for debuging and analytics append the days state to file df_countable['date'] = filterdate.isoformat() if stacked: file_name = "daily-cfd-stacked-run-at"+ timenowstr + ".csv" else: file_name = "daily-cfd-run-at" + timenowstr + ".csv" appendDFToCSV(df_countable, file_name ) else: df_countable = df_filtered # Because we size issues with Story Points we need to add some additional columns # for each state based on size not just count if pointscolumn: for size_state in self.settings['sized_statuses']: #states_to_size: sizedStateName = size_state + 'Sized' df_countable[sizedStateName] = df_countable.apply( lambda row: (row[pointscolumn] * row[size_state] ), axis=1) # For debugging write dataframe to sheet for current day. #file_name="countable-cfd-for-day-"+ filterdate.isoformat()+timenowstr+".csv" #df_countable.to_csv(file_name, sep='\t', encoding='utf-8', quoting=csv.QUOTE_ALL) df_slice = df_countable.loc[:,slice_columns].copy() df_sub_sum = cumulativeColumnStates(df_slice,stacked) final_table = df_sub_sum.rename(index={0: filterdate}) # append to results df_results = df_results.append(final_table) df_results.sort_index(inplace=True) df= df_results # Count number of times each date occurs, preserving column order #df = pd.concat({col: df[col].value_counts() for col in df}, axis=1)[cycle_names] # Fill missing dates with 0 and run a cumulative sum #df = df.fillna(0).cumsum(axis=0) # Reindex to make sure we have all dates start, end = df.index.min(), df.index.max() try: # If we have no change history we will not have any data in the df and will get a ValueError on reindex df = df.reindex(pd.date_range(start, end, freq='D'), method='ffill') except ValueError: pass return df
Return histogram data for the cycle times in `cycle_data`. Returns a dictionary with keys `bin_values` and `bin_edges` of numpy arrays def histogram(self, cycle_data, bins=10): """Return histogram data for the cycle times in `cycle_data`. Returns a dictionary with keys `bin_values` and `bin_edges` of numpy arrays """ values, edges = np.histogram(cycle_data['cycle_time'].astype('timedelta64[D]').dropna(), bins=bins) index = [] for i, v in enumerate(edges): if i == 0: continue index.append("%.01f to %.01f" % (edges[i - 1], edges[i],)) return pd.Series(values, name="Items", index=index)
Return a data frame with columns `completed_timestamp` of the given frequency, either `count`, where count is the number of items 'sum', where sum is the sum of value specified by pointscolumn. Expected to be 'StoryPoints' completed at that timestamp (e.g. daily). def throughput_data(self, cycle_data, frequency='1D',pointscolumn= None): """Return a data frame with columns `completed_timestamp` of the given frequency, either `count`, where count is the number of items 'sum', where sum is the sum of value specified by pointscolumn. Expected to be 'StoryPoints' completed at that timestamp (e.g. daily). """ if len(cycle_data)<1: return None # Note completed items yet, return None if pointscolumn: return cycle_data[['completed_timestamp', pointscolumn]] \ .rename(columns={pointscolumn: 'sum'}) \ .groupby('completed_timestamp').sum() \ .resample(frequency).sum() \ .fillna(0) else: return cycle_data[['completed_timestamp', 'key']] \ .rename(columns={'key': 'count'}) \ .groupby('completed_timestamp').count() \ .resample(frequency).sum() \ .fillna(0)
Return scatterplot data for the cycle times in `cycle_data`. Returns a data frame containing only those items in `cycle_data` where values are set for `completed_timestamp` and `cycle_time`, and with those two columns as the first two, both normalised to whole days, and with `completed_timestamp` renamed to `completed_date`. def scatterplot(self, cycle_data): """Return scatterplot data for the cycle times in `cycle_data`. Returns a data frame containing only those items in `cycle_data` where values are set for `completed_timestamp` and `cycle_time`, and with those two columns as the first two, both normalised to whole days, and with `completed_timestamp` renamed to `completed_date`. """ columns = list(cycle_data.columns) columns.remove('cycle_time') columns.remove('completed_timestamp') columns = ['completed_timestamp', 'cycle_time'] + columns data = ( cycle_data[columns] .dropna(subset=['cycle_time', 'completed_timestamp']) .rename(columns={'completed_timestamp': 'completed_date'}) ) data['cycle_time'] = data['cycle_time'].astype('timedelta64[D]') data['completed_date'] = data['completed_date'].map(pd.Timestamp.date) return data
Is NSQ running and have space to receive messages? def _is_ready(self, topic_name): ''' Is NSQ running and have space to receive messages? ''' url = 'http://%s/stats?format=json&topic=%s' % (self.nsqd_http_address, topic_name) #Cheacking for ephmeral channels if '#' in topic_name: topic_name, tag =topic_name.split("#", 1) try: data = self.session.get(url).json() ''' data = {u'start_time': 1516164866, u'version': u'1.0.0-compat', \ u'health': u'OK', u'topics': [{u'message_count': 19019, \ u'paused': False, u'topic_name': u'test_topic', u'channels': [], \ u'depth': 19019, u'backend_depth': 9019, u'e2e_processing_latency': {u'count': 0, \ u'percentiles': None}}]} ''' topics = data.get('topics', []) topics = [t for t in topics if t['topic_name'] == topic_name] if not topics: raise Exception('topic_missing_at_nsq') topic = topics[0] depth = topic['depth'] depth += sum(c.get('depth', 0) for c in topic['channels']) self.log.debug('nsq_depth_check', topic=topic_name, depth=depth, max_depth=self.nsq_max_depth) if depth < self.nsq_max_depth: return else: raise Exception('nsq_is_full_waiting_to_clear') except: raise
QueryContainer < MoleculeContainer QueryContainer < QueryContainer[more general] QueryContainer < QueryCGRContainer[more general] def _matcher(self, other): """ QueryContainer < MoleculeContainer QueryContainer < QueryContainer[more general] QueryContainer < QueryCGRContainer[more general] """ if isinstance(other, MoleculeContainer): return GraphMatcher(other, self, lambda x, y: y == x, lambda x, y: y == x) elif isinstance(other, (QueryContainer, QueryCGRContainer)): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only query-molecule, query-query or query-cgr_query possible')
QueryCGRContainer < CGRContainer QueryContainer < QueryCGRContainer[more general] def _matcher(self, other): """ QueryCGRContainer < CGRContainer QueryContainer < QueryCGRContainer[more general] """ if isinstance(other, CGRContainer): return GraphMatcher(other, self, lambda x, y: y == x, lambda x, y: y == x) elif isinstance(other, QueryCGRContainer): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only cgr_query-cgr or cgr_query-cgr_query possible')
recalculate 2d coordinates. currently rings can be calculated badly. :param scale: rescale calculated positions. :param force: ignore existing coordinates of atoms def calculate2d(self, force=False, scale=1): """ recalculate 2d coordinates. currently rings can be calculated badly. :param scale: rescale calculated positions. :param force: ignore existing coordinates of atoms """ dist = {} # length forces for n, m_bond in self._adj.items(): dist[n] = {} for m in m_bond: dist[n][m] = .825 # angle forces for n, m_bond in self._adj.items(): if len(m_bond) == 2: # single-single or single-double bonds has angle = 120, other 180 (m1, b1), (m2, b2) = m_bond.items() dist[m1][m2] = dist[m2][m1] = 1.43 if b1.order + b2.order in (2, 3) else 1.7 # +.05 elif len(m_bond) == 3: m1, m2, m3 = m_bond dist[m1][m2] = dist[m1][m3] = dist[m2][m3] = dist[m3][m2] = dist[m2][m1] = dist[m3][m1] = 1.43 elif len(m_bond) == 4: # 1 # # 2 X 4 # # 3 m1, m2, m3, m4 = m_bond dist[m1][m2] = dist[m1][m4] = dist[m2][m1] = dist[m2][m3] = 1.17 dist[m3][m2] = dist[m3][m4] = dist[m4][m1] = dist[m4][m3] = 1.17 dist[m1][m3] = dist[m3][m1] = dist[m2][m4] = dist[m4][m2] = 1.7 # +.05 # cycle forces for r in self.sssr: if len(r) == 6: # 6 # # 1 5 # # 2 4 # # 3 m1, m2, m3, m4, m5, m6 = r dist[m1][m4] = dist[m4][m1] = dist[m2][m5] = dist[m5][m2] = dist[m3][m6] = dist[m6][m3] = 1.7 # +.05 if force: pos = None else: pos = {n: (atom.x or uniform(0, .01), atom.y or uniform(0, .01)) for n, atom in self.atoms()} for n, xy in kamada_kawai_layout(self, dist=dict(dist), pos=pos, scale=scale).items(): atom = self._node[n] atom.x, atom.y = xy self.flush_cache()
Finds all possible knight moves :type: position Board :rtype: list def possible_moves(self, position): """ Finds all possible knight moves :type: position Board :rtype: list """ for direction in [0, 1, 2, 3]: angles = self._rotate_direction_ninety_degrees(direction) for angle in angles: try: end_loc = self.location.shift(angle).shift(direction).shift(direction) if position.is_square_empty(end_loc): status = notation_const.MOVEMENT elif not position.piece_at_square(end_loc).color == self.color: status = notation_const.CAPTURE else: continue yield Move(end_loc=end_loc, piece=self, status=status, start_loc=self.location) except IndexError: pass
get a list of lists of atoms of reaction centers def centers_list(self): """ get a list of lists of atoms of reaction centers """ center = set() adj = defaultdict(set) for n, atom in self.atoms(): if atom._reactant != atom._product: center.add(n) for n, m, bond in self.bonds(): if bond._reactant != bond._product: adj[n].add(m) adj[m].add(n) center.add(n) center.add(m) out = [] while center: n = center.pop() if n in adj: c = set(self.__plain_bfs(adj, n)) out.append(list(c)) center.difference_update(c) else: out.append([n]) return out
get list of atoms of reaction center (atoms with dynamic: bonds, charges, radicals). def center_atoms(self): """ get list of atoms of reaction center (atoms with dynamic: bonds, charges, radicals). """ nodes = set() for n, atom in self.atoms(): if atom._reactant != atom._product: nodes.add(n) for n, m, bond in self.bonds(): if bond._reactant != bond._product: nodes.add(n) nodes.add(m) return list(nodes)
get list of bonds of reaction center (bonds with dynamic orders). def center_bonds(self): """ get list of bonds of reaction center (bonds with dynamic orders). """ return [(n, m) for n, m, bond in self.bonds() if bond._reactant != bond._product]
set or reset hyb and neighbors marks to atoms. def reset_query_marks(self): """ set or reset hyb and neighbors marks to atoms. """ for i, atom in self.atoms(): neighbors = 0 hybridization = 1 p_neighbors = 0 p_hybridization = 1 # hyb 1- sp3; 2- sp2; 3- sp1; 4- aromatic for j, bond in self._adj[i].items(): isnth = self._node[j].element != 'H' order = bond.order if order: if isnth: neighbors += 1 if hybridization not in (3, 4): if order == 4: hybridization = 4 elif order == 3: hybridization = 3 elif order == 2: if hybridization == 2: hybridization = 3 else: hybridization = 2 order = bond.p_order if order: if isnth: p_neighbors += 1 if p_hybridization not in (3, 4): if order == 4: p_hybridization = 4 elif order == 3: p_hybridization = 3 elif order == 2: if p_hybridization == 2: p_hybridization = 3 else: p_hybridization = 2 atom._reactant._neighbors = neighbors atom._reactant._hybridization = hybridization atom._product._neighbors = p_neighbors atom._product._hybridization = p_hybridization atom.__dict__.clear() # flush cache self.flush_cache()
create substructure containing atoms from nbunch list :param atoms: list of atoms numbers of substructure :param meta: if True metadata will be copied to substructure :param as_view: If True, the returned graph-view provides a read-only view of the original structure scaffold without actually copying any data def substructure(self, atoms, meta=False, as_view=True): """ create substructure containing atoms from nbunch list :param atoms: list of atoms numbers of substructure :param meta: if True metadata will be copied to substructure :param as_view: If True, the returned graph-view provides a read-only view of the original structure scaffold without actually copying any data """ s = super().substructure(atoms, meta, as_view) if as_view: s.reset_query_marks = frozen return s
CGRContainer < CGRContainer def _matcher(self, other): """ CGRContainer < CGRContainer """ if isinstance(other, CGRContainer): return GraphMatcher(other, self, lambda x, y: x == y, lambda x, y: x == y) raise TypeError('only cgr-cgr possible')
modified NX fast BFS node generator def __plain_bfs(adj, source): """modified NX fast BFS node generator""" seen = set() nextlevel = {source} while nextlevel: thislevel = nextlevel nextlevel = set() for v in thislevel: if v not in seen: yield v seen.add(v) nextlevel.update(adj[v])
Returns authorization token provided by Cocaine. The real meaning of the token is determined by its type. For example OAUTH2 token will have "bearer" type. :return: A tuple of token type and body. def token(self): """ Returns authorization token provided by Cocaine. The real meaning of the token is determined by its type. For example OAUTH2 token will have "bearer" type. :return: A tuple of token type and body. """ if self._token is None: token_type = os.getenv(TOKEN_TYPE_KEY, '') token_body = os.getenv(TOKEN_BODY_KEY, '') self._token = _Token(token_type, token_body) return self._token
Send a message lazy formatted with args. External log attributes can be passed via named attribute `extra`, like in logging from the standart library. Note: * Attrs must be dict, otherwise the whole message would be skipped. * The key field in an attr is converted to string. * The value is sent as is if isinstance of (str, unicode, int, float, long, bool), otherwise we convert the value to string. def _send(self): """ Send a message lazy formatted with args. External log attributes can be passed via named attribute `extra`, like in logging from the standart library. Note: * Attrs must be dict, otherwise the whole message would be skipped. * The key field in an attr is converted to string. * The value is sent as is if isinstance of (str, unicode, int, float, long, bool), otherwise we convert the value to string. """ buff = BytesIO() while True: msgs = list() try: msg = yield self.queue.get() # we need to connect first, as we issue verbosity request just after connection # and channels should strictly go in ascending order if not self._connected: yield self.connect() try: while True: msgs.append(msg) counter = next(self.counter) msgpack_pack([counter, EMIT, msg], buff) msg = self.queue.get_nowait() except queues.QueueEmpty: pass try: yield self.pipe.write(buff.getvalue()) except Exception: pass # clean the buffer or we will end up without memory buff.truncate(0) except Exception: for message in msgs: self._log_to_fallback(message)
Finds moves in a given direction :type: direction: lambda :type: position: Board :rtype: list def moves_in_direction(self, direction, position): """ Finds moves in a given direction :type: direction: lambda :type: position: Board :rtype: list """ current_square = self.location while True: try: current_square = direction(current_square) except IndexError: return if self.contains_opposite_color_piece(current_square, position): yield self.create_move(current_square, notation_const.CAPTURE) if not position.is_square_empty(current_square): return yield self.create_move(current_square, notation_const.MOVEMENT)
Returns all possible rook moves. :type: position: Board :rtype: list def possible_moves(self, position): """ Returns all possible rook moves. :type: position: Board :rtype: list """ for move in itertools.chain(*[self.moves_in_direction(fn, position) for fn in self.cross_fn]): yield move
Check overlap between two arrays. Parameters ---------- a, b : array-like Arrays to check. Assumed to be in the same unit. Returns ------- result : {'full', 'partial', 'none'} * 'full' - ``a`` is within or same as ``b`` * 'partial' - ``a`` partially overlaps with ``b`` * 'none' - ``a`` does not overlap ``b`` def overlap_status(a, b): """Check overlap between two arrays. Parameters ---------- a, b : array-like Arrays to check. Assumed to be in the same unit. Returns ------- result : {'full', 'partial', 'none'} * 'full' - ``a`` is within or same as ``b`` * 'partial' - ``a`` partially overlaps with ``b`` * 'none' - ``a`` does not overlap ``b`` """ # Get the endpoints a1, a2 = a.min(), a.max() b1, b2 = b.min(), b.max() # Do the comparison if a1 >= b1 and a2 <= b2: result = 'full' elif a2 < b1 or b2 < a1: result = 'none' else: result = 'partial' return result
Check integrated flux for invalid values. Parameters ---------- totalflux : float Integrated flux. Raises ------ synphot.exceptions.SynphotError Input is zero, negative, or not a number. def validate_totalflux(totalflux): """Check integrated flux for invalid values. Parameters ---------- totalflux : float Integrated flux. Raises ------ synphot.exceptions.SynphotError Input is zero, negative, or not a number. """ if totalflux <= 0.0: raise exceptions.SynphotError('Integrated flux is <= 0') elif np.isnan(totalflux): raise exceptions.SynphotError('Integrated flux is NaN') elif np.isinf(totalflux): raise exceptions.SynphotError('Integrated flux is infinite')
Check wavelengths for ``synphot`` compatibility. Wavelengths must satisfy these conditions: * valid unit type, if given * no zeroes * monotonic ascending or descending * no duplicate values Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. Raises ------ synphot.exceptions.SynphotError Wavelengths unit type is invalid. synphot.exceptions.DuplicateWavelength Wavelength array contains duplicate entries. synphot.exceptions.UnsortedWavelength Wavelength array is not monotonic. synphot.exceptions.ZeroWavelength Negative or zero wavelength occurs in wavelength array. def validate_wavelengths(wavelengths): """Check wavelengths for ``synphot`` compatibility. Wavelengths must satisfy these conditions: * valid unit type, if given * no zeroes * monotonic ascending or descending * no duplicate values Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. Raises ------ synphot.exceptions.SynphotError Wavelengths unit type is invalid. synphot.exceptions.DuplicateWavelength Wavelength array contains duplicate entries. synphot.exceptions.UnsortedWavelength Wavelength array is not monotonic. synphot.exceptions.ZeroWavelength Negative or zero wavelength occurs in wavelength array. """ if isinstance(wavelengths, u.Quantity): units.validate_wave_unit(wavelengths.unit) wave = wavelengths.value else: wave = wavelengths if np.isscalar(wave): wave = [wave] wave = np.asarray(wave) # Check for zeroes if np.any(wave <= 0): raise exceptions.ZeroWavelength( 'Negative or zero wavelength occurs in wavelength array', rows=np.where(wave <= 0)[0]) # Check for monotonicity sorted_wave = np.sort(wave) if not np.alltrue(sorted_wave == wave): if np.alltrue(sorted_wave[::-1] == wave): pass # Monotonic descending is allowed else: raise exceptions.UnsortedWavelength( 'Wavelength array is not monotonic', rows=np.where(sorted_wave != wave)[0]) # Check for duplicate values if wave.size > 1: dw = sorted_wave[1:] - sorted_wave[:-1] if np.any(dw == 0): raise exceptions.DuplicateWavelength( 'Wavelength array contains duplicate entries', rows=np.where(dw == 0)[0])
Generate wavelength array to be used for spectrum sampling. .. math:: minwave \\le \\lambda < maxwave Parameters ---------- minwave, maxwave : float Lower and upper limits of the wavelengths. These must be values in linear space regardless of ``log``. num : int The number of wavelength values. This is only used when ``delta=None``. delta : float or `None` Delta between wavelength values. When ``log=True``, this is the spacing in log space. log : bool If `True`, the wavelength values are evenly spaced in log scale. Otherwise, spacing is linear. wave_unit : str or `~astropy.units.core.Unit` Wavelength unit. Default is Angstrom. Returns ------- waveset : `~astropy.units.quantity.Quantity` Generated wavelength set. waveset_str : str Info string associated with the result. def generate_wavelengths(minwave=500, maxwave=26000, num=10000, delta=None, log=True, wave_unit=u.AA): """Generate wavelength array to be used for spectrum sampling. .. math:: minwave \\le \\lambda < maxwave Parameters ---------- minwave, maxwave : float Lower and upper limits of the wavelengths. These must be values in linear space regardless of ``log``. num : int The number of wavelength values. This is only used when ``delta=None``. delta : float or `None` Delta between wavelength values. When ``log=True``, this is the spacing in log space. log : bool If `True`, the wavelength values are evenly spaced in log scale. Otherwise, spacing is linear. wave_unit : str or `~astropy.units.core.Unit` Wavelength unit. Default is Angstrom. Returns ------- waveset : `~astropy.units.quantity.Quantity` Generated wavelength set. waveset_str : str Info string associated with the result. """ wave_unit = units.validate_unit(wave_unit) if delta is not None: num = None waveset_str = 'Min: {0}, Max: {1}, Num: {2}, Delta: {3}, Log: {4}'.format( minwave, maxwave, num, delta, log) # Log space if log: logmin = np.log10(minwave) logmax = np.log10(maxwave) if delta is None: waveset = np.logspace(logmin, logmax, num, endpoint=False) else: waveset = 10 ** np.arange(logmin, logmax, delta) # Linear space else: if delta is None: waveset = np.linspace(minwave, maxwave, num, endpoint=False) else: waveset = np.arange(minwave, maxwave, delta) return waveset.astype(np.float64) * wave_unit, waveset_str
Return the union of the two sets of wavelengths using :func:`numpy.union1d`. The merged wavelengths may sometimes contain numbers which are nearly equal but differ at levels as small as 1e-14. Having values this close together can cause problems down the line. So, here we test whether any such small differences are present, with a small difference defined as less than ``threshold``. If a small difference is present, the lower of the too-close pair is removed. Parameters ---------- waveset1, waveset2 : array-like or `None` Wavelength values, assumed to be in the same unit already. Also see :func:`~synphot.models.get_waveset`. threshold : float, optional Merged wavelength values are considered "too close together" when the difference is smaller than this number. The default is 1e-12. Returns ------- out_wavelengths : array-like or `None` Merged wavelengths. `None` if undefined. def merge_wavelengths(waveset1, waveset2, threshold=1e-12): """Return the union of the two sets of wavelengths using :func:`numpy.union1d`. The merged wavelengths may sometimes contain numbers which are nearly equal but differ at levels as small as 1e-14. Having values this close together can cause problems down the line. So, here we test whether any such small differences are present, with a small difference defined as less than ``threshold``. If a small difference is present, the lower of the too-close pair is removed. Parameters ---------- waveset1, waveset2 : array-like or `None` Wavelength values, assumed to be in the same unit already. Also see :func:`~synphot.models.get_waveset`. threshold : float, optional Merged wavelength values are considered "too close together" when the difference is smaller than this number. The default is 1e-12. Returns ------- out_wavelengths : array-like or `None` Merged wavelengths. `None` if undefined. """ if waveset1 is None and waveset2 is None: out_wavelengths = None elif waveset1 is not None and waveset2 is None: out_wavelengths = waveset1 elif waveset1 is None and waveset2 is not None: out_wavelengths = waveset2 else: out_wavelengths = np.union1d(waveset1, waveset2) delta = out_wavelengths[1:] - out_wavelengths[:-1] i_good = np.where(delta > threshold) # Remove "too close together" duplicates if len(i_good[0]) < delta.size: out_wavelengths = np.append( out_wavelengths[i_good], out_wavelengths[-1]) return out_wavelengths
Download CDBS data files to given root directory. Download is skipped if a data file already exists. Parameters ---------- cdbs_root : str Root directory for CDBS data files. verbose : bool Print extra information to screen. dry_run : bool Go through the logic but skip the actual download. This would return a list of files that *would have been* downloaded without network calls. Use this option for debugging or testing. Raises ------ OSError Problem with directory. Returns ------- file_list : list of str A list of downloaded files. def download_data(cdbs_root, verbose=True, dry_run=False): """Download CDBS data files to given root directory. Download is skipped if a data file already exists. Parameters ---------- cdbs_root : str Root directory for CDBS data files. verbose : bool Print extra information to screen. dry_run : bool Go through the logic but skip the actual download. This would return a list of files that *would have been* downloaded without network calls. Use this option for debugging or testing. Raises ------ OSError Problem with directory. Returns ------- file_list : list of str A list of downloaded files. """ from .config import conf # Avoid potential circular import if not os.path.exists(cdbs_root): os.makedirs(cdbs_root, exist_ok=True) if verbose: # pragma: no cover print('Created {}'.format(cdbs_root)) elif not os.path.isdir(cdbs_root): raise OSError('{} must be a directory'.format(cdbs_root)) host = 'http://ssb.stsci.edu/cdbs/' file_list = [] if not cdbs_root.endswith(os.sep): cdbs_root += os.sep # See https://github.com/astropy/astropy/issues/8524 for cfgitem in conf.__class__.__dict__.values(): if (not isinstance(cfgitem, ConfigItem) or not cfgitem.name.endswith('file')): continue url = cfgitem() if not url.startswith(host): if verbose: # pragma: no cover print('{} is not from {}, skipping download'.format( url, host)) continue dst = url.replace(host, cdbs_root).replace('/', os.sep) if os.path.exists(dst): if verbose: # pragma: no cover print('{} already exists, skipping download'.format(dst)) continue # Create sub-directories, if needed. subdirs = os.path.dirname(dst) os.makedirs(subdirs, exist_ok=True) if not dry_run: # pragma: no cover try: src = download_file(url) copyfile(src, dst) except Exception as exc: print('Download failed - {}'.format(str(exc))) continue file_list.append(dst) if verbose: # pragma: no cover print('{} downloaded to {}'.format(url, dst)) return file_list
Demonstrate functionality of PyVLX. async def main(loop): """Demonstrate functionality of PyVLX.""" pyvlx = PyVLX('pyvlx.yaml', loop=loop) # Alternative: # pyvlx = PyVLX(host="192.168.2.127", password="velux123", loop=loop) # Runing scenes: await pyvlx.load_scenes() await pyvlx.scenes["All Windows Closed"].run() # Changing position of windows: await pyvlx.load_nodes() await pyvlx.nodes['Bath'].open() await pyvlx.nodes['Bath'].close() await pyvlx.nodes['Bath'].set_position(Position(position_percent=45)) # Changing of on-off switches: # await pyvlx.nodes['CoffeeMaker'].set_on() # await pyvlx.nodes['CoffeeMaker'].set_off() # You can easily rename nodes: # await pyvlx.nodes["Window 10"].rename("Window 11") await pyvlx.disconnect()
Return Payload. def get_payload(self): """Return Payload.""" if self.password is None: raise PyVLXException("password is none") if len(self.password) > self.MAX_SIZE: raise PyVLXException("password is too long") return string_to_bytes(self.password, self.MAX_SIZE)
Add Node, replace existing node if node with node_id is present. def add(self, node): """Add Node, replace existing node if node with node_id is present.""" if not isinstance(node, Node): raise TypeError() for i, j in enumerate(self.__nodes): if j.node_id == node.node_id: self.__nodes[i] = node return self.__nodes.append(node)
Load nodes from KLF 200, if no node_id is specified all nodes are loaded. async def load(self, node_id=None): """Load nodes from KLF 200, if no node_id is specified all nodes are loaded.""" if node_id is not None: await self._load_node(node_id=node_id) else: await self._load_all_nodes()
Load single node via API. async def _load_node(self, node_id): """Load single node via API.""" get_node_information = GetNodeInformation(pyvlx=self.pyvlx, node_id=node_id) await get_node_information.do_api_call() if not get_node_information.success: raise PyVLXException("Unable to retrieve node information") notification_frame = get_node_information.notification_frame node = convert_frame_to_node(self.pyvlx, notification_frame) if node is not None: self.add(node)
Load all nodes via API. async def _load_all_nodes(self): """Load all nodes via API.""" get_all_nodes_information = GetAllNodesInformation(pyvlx=self.pyvlx) await get_all_nodes_information.do_api_call() if not get_all_nodes_information.success: raise PyVLXException("Unable to retrieve node information") self.clear() for notification_frame in get_all_nodes_information.notification_frames: node = convert_frame_to_node(self.pyvlx, notification_frame) if node is not None: self.add(node)
Handle incoming API frame, return True if this was the expected frame. async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetAllNodesInformationConfirmation): self.number_of_nodes = frame.number_of_nodes # We are still waiting for FrameGetAllNodesInformationNotification return False if isinstance(frame, FrameGetAllNodesInformationNotification): self.notification_frames.append(frame) if isinstance(frame, FrameGetAllNodesInformationFinishedNotification): if self.number_of_nodes != len(self.notification_frames): PYVLXLOG.warning("Number of received scenes does not match expected number") self.success = True return True return False
Handle incoming API frame, return True if this was the expected frame. async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetNodeInformationConfirmation) and frame.node_id == self.node_id: # We are still waiting for GetNodeInformationNotification return False if isinstance(frame, FrameGetNodeInformationNotification) and frame.node_id == self.node_id: self.notification_frame = frame self.success = True return True return False
Create loop task. def start(self): """Create loop task.""" self.run_task = self.pyvlx.loop.create_task( self.loop())
Stop heartbeat. async def stop(self): """Stop heartbeat.""" self.stopped = True self.loop_event.set() # Waiting for shutdown of loop() await self.stopped_event.wait()
Pulse every timeout seconds until stopped. async def loop(self): """Pulse every timeout seconds until stopped.""" while not self.stopped: self.timeout_handle = self.pyvlx.connection.loop.call_later( self.timeout_in_seconds, self.loop_timeout) await self.loop_event.wait() if not self.stopped: self.loop_event.clear() await self.pulse() self.cancel_loop_timeout() self.stopped_event.set()
Send get state request to API to keep the connection alive. async def pulse(self): """Send get state request to API to keep the connection alive.""" get_state = GetState(pyvlx=self.pyvlx) await get_state.do_api_call() if not get_state.success: raise PyVLXException("Unable to send get state.")
Return Payload. def get_payload(self): """Return Payload.""" payload = bytes([self.gateway_state.value, self.gateway_sub_state.value]) payload += bytes(4) # State date, reserved for future use return payload
Init frame from binary data. def from_payload(self, payload): """Init frame from binary data.""" self.gateway_state = GatewayState(payload[0]) self.gateway_sub_state = GatewaySubState(payload[1])
Convert string to bytes add padding. def string_to_bytes(string, size): """Convert string to bytes add padding.""" if len(string) > size: raise PyVLXException("string_to_bytes::string_to_large") encoded = bytes(string, encoding='utf-8') return encoded + bytes(size-len(encoded))
Convert bytes to string. def bytes_to_string(raw): """Convert bytes to string.""" ret = bytes() for byte in raw: if byte == 0x00: return ret.decode("utf-8") ret += bytes([byte]) return ret.decode("utf-8")
Return Payload. def get_payload(self): """Return Payload.""" payload = bytes([self.node_id]) payload += bytes([self.state]) payload += bytes(self.current_position.raw) payload += bytes(self.target.raw) payload += bytes(self.current_position_fp1.raw) payload += bytes(self.current_position_fp2.raw) payload += bytes(self.current_position_fp3.raw) payload += bytes(self.current_position_fp4.raw) payload += bytes([self.remaining_time >> 8 & 255, self.remaining_time & 255]) payload += struct.pack(">I", self.timestamp) return payload
Init frame from binary data. def from_payload(self, payload): """Init frame from binary data.""" self.node_id = payload[0] self.state = payload[1] self.current_position = Parameter(payload[2:4]) self.target = Parameter(payload[4:6]) self.current_position_fp1 = Parameter(payload[6:8]) self.current_position_fp2 = Parameter(payload[8:10]) self.current_position_fp3 = Parameter(payload[10:12]) self.current_position_fp4 = Parameter(payload[12:14]) self.remaining_time = payload[14] * 256 + payload[15] # @VELUX: looks like your timestamp is wrong. Looks like # you are only transmitting the two lower bytes. self.timestamp = struct.unpack(">I", payload[16:20])[0]
Enable house status monitor. async def house_status_monitor_enable(pyvlx): """Enable house status monitor.""" status_monitor_enable = HouseStatusMonitorEnable(pyvlx=pyvlx) await status_monitor_enable.do_api_call() if not status_monitor_enable.success: raise PyVLXException("Unable enable house status monitor.")
Disable house status monitor. async def house_status_monitor_disable(pyvlx): """Disable house status monitor.""" status_monitor_disable = HouseStatusMonitorDisable(pyvlx=pyvlx) await status_monitor_disable.do_api_call() if not status_monitor_disable.success: raise PyVLXException("Unable disable house status monitor.")
Handle incoming API frame, return True if this was the expected frame. async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FrameHouseStatusMonitorEnableConfirmation): return False self.success = True return True
Handle incoming API frame, return True if this was the expected frame. async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FrameHouseStatusMonitorDisableConfirmation): return False self.success = True return True
An 'argument type' for integrations with the argparse module. For more information, see https://docs.python.org/2/library/argparse.html#type Of particular interest to us is this bit: ``type=`` can take any callable that takes a single string argument and returns the converted value I.e., ``type`` can be a function (such as this function) or a class which implements the ``__call__`` method. Example usage of the bitmath.BitmathType argparser type: >>> import bitmath >>> import argparse >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> parser.parse_args("--file-size 1337MiB".split()) Namespace(file_size=MiB(1337.0)) Invalid usage includes any input that the bitmath.parse_string function already rejects. Additionally, **UNQUOTED** arguments with spaces in them are rejected (shlex.split used in the following examples to conserve single quotes in the parse_args call): >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> import shlex >>> # The following is ACCEPTABLE USAGE: ... >>> parser.parse_args(shlex.split("--file-size '1337 MiB'")) Namespace(file_size=MiB(1337.0)) >>> # The following is INCORRECT USAGE because the string "1337 MiB" is not quoted! ... >>> parser.parse_args(shlex.split("--file-size 1337 MiB")) error: argument --file-size: 1337 can not be parsed into a valid bitmath object def BitmathType(bmstring): """An 'argument type' for integrations with the argparse module. For more information, see https://docs.python.org/2/library/argparse.html#type Of particular interest to us is this bit: ``type=`` can take any callable that takes a single string argument and returns the converted value I.e., ``type`` can be a function (such as this function) or a class which implements the ``__call__`` method. Example usage of the bitmath.BitmathType argparser type: >>> import bitmath >>> import argparse >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> parser.parse_args("--file-size 1337MiB".split()) Namespace(file_size=MiB(1337.0)) Invalid usage includes any input that the bitmath.parse_string function already rejects. Additionally, **UNQUOTED** arguments with spaces in them are rejected (shlex.split used in the following examples to conserve single quotes in the parse_args call): >>> parser = argparse.ArgumentParser() >>> parser.add_argument("--file-size", type=bitmath.BitmathType) >>> import shlex >>> # The following is ACCEPTABLE USAGE: ... >>> parser.parse_args(shlex.split("--file-size '1337 MiB'")) Namespace(file_size=MiB(1337.0)) >>> # The following is INCORRECT USAGE because the string "1337 MiB" is not quoted! ... >>> parser.parse_args(shlex.split("--file-size 1337 MiB")) error: argument --file-size: 1337 can not be parsed into a valid bitmath object """ try: argvalue = bitmath.parse_string(bmstring) except ValueError: raise argparse.ArgumentTypeError("'%s' can not be parsed into a valid bitmath object" % bmstring) else: return argvalue
Updates the widget with the current NIST/SI speed. Basically, this calculates the average rate of update and figures out how to make a "pretty" prefix unit def update(self, pbar): """Updates the widget with the current NIST/SI speed. Basically, this calculates the average rate of update and figures out how to make a "pretty" prefix unit""" if pbar.seconds_elapsed < 2e-6 or pbar.currval < 2e-6: scaled = bitmath.Byte() else: speed = pbar.currval / pbar.seconds_elapsed scaled = bitmath.Byte(speed).best_prefix(system=self.system) return scaled.format(self.format)
Handle incoming API frame, return True if this was the expected frame. async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameCommandSendConfirmation) and frame.session_id == self.session_id: if frame.status == CommandSendConfirmationStatus.ACCEPTED: self.success = True return not self.wait_for_completion if isinstance(frame, FrameCommandRemainingTimeNotification) and frame.session_id == self.session_id: # Ignoring FrameCommandRemainingTimeNotification return False if isinstance(frame, FrameCommandRunStatusNotification) and frame.session_id == self.session_id: # At the moment I don't reall understand what the FrameCommandRunStatusNotification is good for. # Ignoring these packets for now return False if isinstance(frame, FrameSessionFinishedNotification) and frame.session_id == self.session_id: return True return False
Construct initiating frame. def request_frame(self): """Construct initiating frame.""" self.session_id = get_new_session_id() return FrameCommandSendRequest(node_ids=[self.node_id], parameter=self.parameter, session_id=self.session_id)
Read configuration file. def read_config(self, path): """Read configuration file.""" PYVLXLOG.info('Reading config file: %s', path) try: with open(path, 'r') as filehandle: doc = yaml.safe_load(filehandle) self.test_configuration(doc, path) self.host = doc['config']['host'] self.password = doc['config']['password'] if 'port' in doc['config']: self.port = doc['config']['port'] except FileNotFoundError as ex: raise PyVLXException('file does not exist: {0}'.format(ex))
Setup apiv2 when using PyQt4 and Python2. def setup_apiv2(): """ Setup apiv2 when using PyQt4 and Python2. """ # setup PyQt api to version 2 if sys.version_info[0] == 2: logging.getLogger(__name__).debug( 'setting up SIP API to version 2') import sip try: sip.setapi("QString", 2) sip.setapi("QVariant", 2) except ValueError: logging.getLogger(__name__).critical( "failed to set up sip api to version 2 for PyQt4") raise ImportError('PyQt4')
Auto-detects and use the first available QT_API by importing them in the following order: 1) PyQt5 2) PyQt4 3) PySide def autodetect(): """ Auto-detects and use the first available QT_API by importing them in the following order: 1) PyQt5 2) PyQt4 3) PySide """ logging.getLogger(__name__).debug('auto-detecting QT_API') try: logging.getLogger(__name__).debug('trying PyQt5') import PyQt5 os.environ[QT_API] = PYQT5_API[0] logging.getLogger(__name__).debug('imported PyQt5') except ImportError: try: logging.getLogger(__name__).debug('trying PyQt4') setup_apiv2() import PyQt4 os.environ[QT_API] = PYQT4_API[0] logging.getLogger(__name__).debug('imported PyQt4') except ImportError: try: logging.getLogger(__name__).debug('trying PySide') import PySide os.environ[QT_API] = PYSIDE_API[0] logging.getLogger(__name__).debug('imported PySide') except ImportError: raise PythonQtError('No Qt bindings could be found')
Read roller shutter from config. def from_config(cls, pyvlx, item): """Read roller shutter from config.""" name = item['name'] ident = item['id'] subtype = item['subtype'] typeid = item['typeId'] return cls(pyvlx, ident, name, subtype, typeid)
Handle incoming API frame, return True if this was the expected frame. async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if isinstance(frame, FrameGetSceneListConfirmation): self.count_scenes = frame.count_scenes if self.count_scenes == 0: self.success = True return True # We are still waiting for FrameGetSceneListNotification(s) return False if isinstance(frame, FrameGetSceneListNotification): self.scenes.extend(frame.scenes) if frame.remaining_scenes != 0: # We are still waiting for FrameGetSceneListConfirmation(s) return False if self.count_scenes != len(self.scenes): PYVLXLOG.warning("Warning: number of received scenes does not match expected number") self.success = True return True return False
Return Payload. def get_payload(self): """Return Payload.""" ret = bytes([len(self.scenes)]) for number, name in self.scenes: ret += bytes([number]) ret += string_to_bytes(name, 64) ret += bytes([self.remaining_scenes]) return ret
Init frame from binary data. def from_payload(self, payload): """Init frame from binary data.""" number_of_objects = payload[0] self.remaining_scenes = payload[-1] predicted_len = number_of_objects * 65 + 2 if len(payload) != predicted_len: raise PyVLXException('scene_list_notification_wrong_length') self.scenes = [] for i in range(number_of_objects): scene = payload[(i*65+1):(i*65+66)] number = scene[0] name = bytes_to_string(scene[1:]) self.scenes.append((number, name))
Read FITS or ASCII spectrum from a remote location. Parameters ---------- filename : str Spectrum filename. encoding, cache, show_progress See :func:`~astropy.utils.data.get_readable_fileobj`. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. def read_remote_spec(filename, encoding='binary', cache=True, show_progress=True, **kwargs): """Read FITS or ASCII spectrum from a remote location. Parameters ---------- filename : str Spectrum filename. encoding, cache, show_progress See :func:`~astropy.utils.data.get_readable_fileobj`. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. """ with get_readable_fileobj(filename, encoding=encoding, cache=cache, show_progress=show_progress) as fd: header, wavelengths, fluxes = read_spec(fd, fname=filename, **kwargs) return header, wavelengths, fluxes
Read FITS or ASCII spectrum. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. fname : str Filename. This is *only* used if ``filename`` is a pointer. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. Raises ------ synphot.exceptions.SynphotError Read failed. def read_spec(filename, fname='', **kwargs): """Read FITS or ASCII spectrum. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. fname : str Filename. This is *only* used if ``filename`` is a pointer. kwargs : dict Keywords acceptable by :func:`read_fits_spec` (if FITS) or :func:`read_ascii_spec` (if ASCII). Returns ------- header : dict Metadata. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. Raises ------ synphot.exceptions.SynphotError Read failed. """ if isinstance(filename, str): fname = filename elif not fname: # pragma: no cover raise exceptions.SynphotError('Cannot determine filename.') if fname.endswith('fits') or fname.endswith('fit'): read_func = read_fits_spec else: read_func = read_ascii_spec return read_func(filename, **kwargs)
Read ASCII spectrum. ASCII table must have following columns: #. Wavelength data #. Flux data It can have more than 2 columns but the rest is ignored. Comments are discarded. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. kwargs : dict Keywords accepted by :func:`astropy.io.ascii.ui.read`. Returns ------- header : dict This is just an empty dictionary, so returned values are the same as :func:`read_fits_spec`. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. They are set to 'float64' percision. def read_ascii_spec(filename, wave_unit=u.AA, flux_unit=units.FLAM, **kwargs): """Read ASCII spectrum. ASCII table must have following columns: #. Wavelength data #. Flux data It can have more than 2 columns but the rest is ignored. Comments are discarded. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. kwargs : dict Keywords accepted by :func:`astropy.io.ascii.ui.read`. Returns ------- header : dict This is just an empty dictionary, so returned values are the same as :func:`read_fits_spec`. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. They are set to 'float64' percision. """ header = {} dat = ascii.read(filename, **kwargs) wave_unit = units.validate_unit(wave_unit) flux_unit = units.validate_unit(flux_unit) wavelengths = dat.columns[0].data.astype(np.float64) * wave_unit fluxes = dat.columns[1].data.astype(np.float64) * flux_unit return header, wavelengths, fluxes
Read FITS spectrum. Wavelength and flux units are extracted from ``TUNIT1`` and ``TUNIT2`` keywords, respectively, from data table (not primary) header. If these keywords are not present, units are taken from ``wave_unit`` and ``flux_unit`` instead. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. ext: int FITS extension with table data. Default is 1. wave_col, flux_col : str Wavelength and flux column names (case-insensitive). wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. These are *only* used if ``TUNIT1`` and ``TUNIT2`` keywords are not present in table (not primary) header. Returns ------- header : dict Primary header only. Extension header is discarded. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. def read_fits_spec(filename, ext=1, wave_col='WAVELENGTH', flux_col='FLUX', wave_unit=u.AA, flux_unit=units.FLAM): """Read FITS spectrum. Wavelength and flux units are extracted from ``TUNIT1`` and ``TUNIT2`` keywords, respectively, from data table (not primary) header. If these keywords are not present, units are taken from ``wave_unit`` and ``flux_unit`` instead. Parameters ---------- filename : str or file pointer Spectrum file name or pointer. ext: int FITS extension with table data. Default is 1. wave_col, flux_col : str Wavelength and flux column names (case-insensitive). wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. These are *only* used if ``TUNIT1`` and ``TUNIT2`` keywords are not present in table (not primary) header. Returns ------- header : dict Primary header only. Extension header is discarded. wavelengths, fluxes : `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. """ fs = fits.open(filename) header = dict(fs[str('PRIMARY')].header) wave_dat = fs[ext].data.field(wave_col).copy() flux_dat = fs[ext].data.field(flux_col).copy() fits_wave_unit = fs[ext].header.get('TUNIT1') fits_flux_unit = fs[ext].header.get('TUNIT2') if fits_wave_unit is not None: try: wave_unit = units.validate_unit(fits_wave_unit) except (exceptions.SynphotError, ValueError) as e: # pragma: no cover warnings.warn( '{0} from FITS header is not valid wavelength unit, using ' '{1}: {2}'.format(fits_wave_unit, wave_unit, e), AstropyUserWarning) if fits_flux_unit is not None: try: flux_unit = units.validate_unit(fits_flux_unit) except (exceptions.SynphotError, ValueError) as e: # pragma: no cover warnings.warn( '{0} from FITS header is not valid flux unit, using ' '{1}: {2}'.format(fits_flux_unit, flux_unit, e), AstropyUserWarning) wave_unit = units.validate_unit(wave_unit) flux_unit = units.validate_unit(flux_unit) wavelengths = wave_dat * wave_unit fluxes = flux_dat * flux_unit if isinstance(filename, str): fs.close() return header, wavelengths, fluxes
Write FITS spectrum. .. warning:: If data is being written out as single-precision but wavelengths are in double-precision, some rows may be omitted. Parameters ---------- filename : str Output spectrum filename. wavelengths, fluxes : array-like or `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. pri_header, ext_header : dict Metadata to be added to primary and given extension FITS header, respectively. Do *not* use this to define column names and units. overwrite : bool Overwrite existing file. Defaults to `False`. trim_zero : bool Remove rows with zero-flux. Default is `True`. pad_zero_ends : bool Pad each end of the spectrum with a row of zero flux like :func:`synphot.spectrum.BaseSpectrum.taper`. This is unnecessary if input is already tapered. precision : {`None`, 'single', 'double'} Precision of values in output file. Use native flux precision by default. epsilon : float Single-precision :math:`\\epsilon` value, taken from IRAF SYNPHOT FAQ. This is the minimum separation in wavelengths necessary for SYNPHOT to read the entries as distinct single-precision numbers. This is *only* used if ``precision='single'`` but data are in double-precision. Default from the FAQ is 0.00032. wave_col, flux_col : str Wavelength and flux column names (case-insensitive). wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. These are *only* used if wavelengths and fluxes are not in astropy quantities. Raises ------ synphot.exceptions.SynphotError Wavelengths and fluxes have difference shapes or value precision is not supported. def write_fits_spec(filename, wavelengths, fluxes, pri_header={}, ext_header={}, overwrite=False, trim_zero=True, pad_zero_ends=True, precision=None, epsilon=0.00032, wave_col='WAVELENGTH', flux_col='FLUX', wave_unit=u.AA, flux_unit=units.FLAM): """Write FITS spectrum. .. warning:: If data is being written out as single-precision but wavelengths are in double-precision, some rows may be omitted. Parameters ---------- filename : str Output spectrum filename. wavelengths, fluxes : array-like or `~astropy.units.quantity.Quantity` Wavelength and flux of the spectrum. pri_header, ext_header : dict Metadata to be added to primary and given extension FITS header, respectively. Do *not* use this to define column names and units. overwrite : bool Overwrite existing file. Defaults to `False`. trim_zero : bool Remove rows with zero-flux. Default is `True`. pad_zero_ends : bool Pad each end of the spectrum with a row of zero flux like :func:`synphot.spectrum.BaseSpectrum.taper`. This is unnecessary if input is already tapered. precision : {`None`, 'single', 'double'} Precision of values in output file. Use native flux precision by default. epsilon : float Single-precision :math:`\\epsilon` value, taken from IRAF SYNPHOT FAQ. This is the minimum separation in wavelengths necessary for SYNPHOT to read the entries as distinct single-precision numbers. This is *only* used if ``precision='single'`` but data are in double-precision. Default from the FAQ is 0.00032. wave_col, flux_col : str Wavelength and flux column names (case-insensitive). wave_unit, flux_unit : str or `~astropy.units.core.Unit` Wavelength and flux units, which default to Angstrom and FLAM, respectively. These are *only* used if wavelengths and fluxes are not in astropy quantities. Raises ------ synphot.exceptions.SynphotError Wavelengths and fluxes have difference shapes or value precision is not supported. """ if isinstance(wavelengths, u.Quantity): wave_unit = wavelengths.unit wave_value = wavelengths.value else: wave_value = wavelengths if isinstance(fluxes, u.Quantity): flux_unit = fluxes.unit flux_value = fluxes.value else: flux_value = fluxes wave_unit = units.validate_unit(wave_unit).to_string().upper() flux_unit = units.validate_unit(flux_unit).to_string().upper() if wave_value.shape != flux_value.shape: raise exceptions.SynphotError( 'Wavelengths have shape {0} but fluxes have shape {1}'.format( wave_value.shape, flux_value.shape)) # Remove rows with zero flux. Putting this before precision logic to avoid # keeping duplicate wavelengths with zero flux. if trim_zero: idx = np.where(flux_value != 0) wave_value = wave_value[idx] flux_value = flux_value[idx] n_thrown = wave_value.size - len(idx[0]) if n_thrown != 0: log.info('{0} zero-flux rows are thrown out'.format(n_thrown)) # Only these Numpy types are supported # 'f' np.float32 # 'd' np.float64 pcodes = {'d': 'D', 'f': 'E'} # Numpy to FITS conversion # Use native flux precision if precision is None: precision = flux_value.dtype.char if precision not in pcodes: raise exceptions.SynphotError('flux is not float32 or float64') # Use user specified precision else: precision = precision.lower() if precision == 'single': precision = 'f' elif precision == 'double': precision = 'd' else: raise exceptions.SynphotError( 'precision must be single or double') # Now check wavelength precision wave_precision = wave_value.dtype.char if wave_precision not in pcodes: raise exceptions.SynphotError( 'wavelength is not float32 or float64') # If wavelength is double-precision but data is written out as # single-precision, wavelength values have to be recalculated # so that they will still be sorted with no duplicates. if wave_precision == 'd' and precision == 'f': orig_size = wave_value.size idx = np.where(np.abs(wave_value[1:] - wave_value[:-1]) > epsilon) wave_value = np.append(wave_value[idx], wave_value[-1]) flux_value = np.append(flux_value[idx], flux_value[-1]) n_thrown = orig_size - wave_value.size if n_thrown != 0: warnings.warn( '{0} rows are thrown out in converting wavelengths from ' 'double- to single-precision'.format(n_thrown), AstropyUserWarning) # Keep one zero at each end if pad_zero_ends: w1 = wave_value[0] ** 2 / wave_value[1] w2 = wave_value[-1] ** 2 / wave_value[-2] wave_value = np.insert(wave_value, [0, wave_value.size], [w1, w2]) flux_value = np.insert(flux_value, [0, flux_value.size], [0.0, 0.0]) # Construct the columns cw = fits.Column(name=wave_col, array=wave_value, unit=wave_unit, format=pcodes[precision]) cf = fits.Column(name=flux_col, array=flux_value, unit=flux_unit, format=pcodes[precision]) # These are written to the primary header: # 1. Filename # 2. Origin # 3. User dictionary (can overwrite defaults) hdr_hdu = fits.PrimaryHDU() hdr_hdu.header['filename'] = (os.path.basename(filename), 'name of file') hdr_hdu.header['origin'] = ('synphot', 'Version {0}'.format(__version__)) for key, val in pri_header.items(): hdr_hdu.header[key] = val # Make the extension HDU and include user dictionary in extension header. tab_hdu = fits.BinTableHDU.from_columns(fits.ColDefs([cw, cf])) for key, val in ext_header.items(): tab_hdu.header[key] = val # Write to file hdulist = fits.HDUList([hdr_hdu]) hdulist.append(tab_hdu) hdulist.writeto(filename, overwrite=overwrite)
Flux equivalencies between PHOTLAM and VEGAMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). vegaflux : `~astropy.units.quantity.Quantity` Flux of Vega at ``wav``. Returns ------- eqv : list List of equivalencies. def spectral_density_vega(wav, vegaflux): """Flux equivalencies between PHOTLAM and VEGAMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). vegaflux : `~astropy.units.quantity.Quantity` Flux of Vega at ``wav``. Returns ------- eqv : list List of equivalencies. """ vega_photlam = vegaflux.to( PHOTLAM, equivalencies=u.spectral_density(wav)).value def converter(x): """Set nan/inf to -99 mag.""" val = -2.5 * np.log10(x / vega_photlam) result = np.zeros(val.shape, dtype=np.float64) - 99 mask = np.isfinite(val) if result.ndim > 0: result[mask] = val[mask] elif mask: result = np.asarray(val) return result def iconverter(x): return vega_photlam * 10**(-0.4 * x) return [(PHOTLAM, VEGAMAG, converter, iconverter)]
Flux equivalencies between PHOTLAM and count/OBMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). area : `~astropy.units.quantity.Quantity` Telescope collecting area. Returns ------- eqv : list List of equivalencies. def spectral_density_count(wav, area): """Flux equivalencies between PHOTLAM and count/OBMAG. Parameters ---------- wav : `~astropy.units.quantity.Quantity` Quantity associated with values being converted (e.g., wavelength or frequency). area : `~astropy.units.quantity.Quantity` Telescope collecting area. Returns ------- eqv : list List of equivalencies. """ from .binning import calculate_bin_widths, calculate_bin_edges wav = wav.to(u.AA, equivalencies=u.spectral()) area = area.to(AREA) bin_widths = calculate_bin_widths(calculate_bin_edges(wav)) factor = bin_widths.value * area.value def converter_count(x): return x * factor def iconverter_count(x): return x / factor def converter_obmag(x): return -2.5 * np.log10(x * factor) def iconverter_obmag(x): return 10**(-0.4 * x) / factor return [(PHOTLAM, u.count, converter_count, iconverter_count), (PHOTLAM, OBMAG, converter_obmag, iconverter_obmag)]
Perform conversion for :ref:`supported flux units <synphot-flux-units>`. Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. If not a Quantity, assumed to be in Angstrom. fluxes : array-like or `~astropy.units.quantity.Quantity` Flux values. If not a Quantity, assumed to be in PHOTLAM. out_flux_unit : str or `~astropy.units.core.Unit` Output flux unit. area : float or `~astropy.units.quantity.Quantity` Area that fluxes cover. If not a Quantity, assumed to be in :math:`cm^{2}`. This value *must* be provided for conversions involving OBMAG and count, otherwise it is not needed. vegaspec : `~synphot.spectrum.SourceSpectrum` Vega spectrum from :func:`~synphot.spectrum.SourceSpectrum.from_vega`. This is *only* used for conversions involving VEGAMAG. Returns ------- out_flux : `~astropy.units.quantity.Quantity` Converted flux values. Raises ------ astropy.units.core.UnitsError Conversion failed. synphot.exceptions.SynphotError Area or Vega spectrum is not given when needed. def convert_flux(wavelengths, fluxes, out_flux_unit, **kwargs): """Perform conversion for :ref:`supported flux units <synphot-flux-units>`. Parameters ---------- wavelengths : array-like or `~astropy.units.quantity.Quantity` Wavelength values. If not a Quantity, assumed to be in Angstrom. fluxes : array-like or `~astropy.units.quantity.Quantity` Flux values. If not a Quantity, assumed to be in PHOTLAM. out_flux_unit : str or `~astropy.units.core.Unit` Output flux unit. area : float or `~astropy.units.quantity.Quantity` Area that fluxes cover. If not a Quantity, assumed to be in :math:`cm^{2}`. This value *must* be provided for conversions involving OBMAG and count, otherwise it is not needed. vegaspec : `~synphot.spectrum.SourceSpectrum` Vega spectrum from :func:`~synphot.spectrum.SourceSpectrum.from_vega`. This is *only* used for conversions involving VEGAMAG. Returns ------- out_flux : `~astropy.units.quantity.Quantity` Converted flux values. Raises ------ astropy.units.core.UnitsError Conversion failed. synphot.exceptions.SynphotError Area or Vega spectrum is not given when needed. """ if not isinstance(fluxes, u.Quantity): fluxes = fluxes * PHOTLAM out_flux_unit = validate_unit(out_flux_unit) out_flux_unit_name = out_flux_unit.to_string() in_flux_unit_name = fluxes.unit.to_string() # No conversion necessary if in_flux_unit_name == out_flux_unit_name: return fluxes in_flux_type = fluxes.unit.physical_type out_flux_type = out_flux_unit.physical_type # Wavelengths must Quantity if not isinstance(wavelengths, u.Quantity): wavelengths = wavelengths * u.AA eqv = u.spectral_density(wavelengths) # Use built-in astropy equivalencies try: out_flux = fluxes.to(out_flux_unit, eqv) # Use PHOTLAM as in-between unit except u.UnitConversionError: # Convert input unit to PHOTLAM if fluxes.unit == PHOTLAM: flux_photlam = fluxes elif in_flux_type != 'unknown': flux_photlam = fluxes.to(PHOTLAM, eqv) else: flux_photlam = _convert_flux( wavelengths, fluxes, PHOTLAM, **kwargs) # Convert PHOTLAM to output unit if out_flux_unit == PHOTLAM: out_flux = flux_photlam elif out_flux_type != 'unknown': out_flux = flux_photlam.to(out_flux_unit, eqv) else: out_flux = _convert_flux( wavelengths, flux_photlam, out_flux_unit, **kwargs) return out_flux
Flux conversion for PHOTLAM <-> X. def _convert_flux(wavelengths, fluxes, out_flux_unit, area=None, vegaspec=None): """Flux conversion for PHOTLAM <-> X.""" flux_unit_names = (fluxes.unit.to_string(), out_flux_unit.to_string()) if PHOTLAM.to_string() not in flux_unit_names: raise exceptions.SynphotError( 'PHOTLAM must be one of the conversion units but get ' '{0}.'.format(flux_unit_names)) # VEGAMAG if VEGAMAG.to_string() in flux_unit_names: from .spectrum import SourceSpectrum if not isinstance(vegaspec, SourceSpectrum): raise exceptions.SynphotError('Vega spectrum is missing.') flux_vega = vegaspec(wavelengths) out_flux = fluxes.to( out_flux_unit, equivalencies=spectral_density_vega(wavelengths, flux_vega)) # OBMAG or count elif (u.count in (fluxes.unit, out_flux_unit) or OBMAG.to_string() in flux_unit_names): if area is None: raise exceptions.SynphotError( 'Area is compulsory for conversion involving count or OBMAG.') elif not isinstance(area, u.Quantity): area = area * AREA out_flux = fluxes.to( out_flux_unit, equivalencies=spectral_density_count(wavelengths, area)) else: raise u.UnitsError('{0} and {1} are not convertible'.format( fluxes.unit, out_flux_unit)) return out_flux
Validate unit. To be compatible with existing SYNPHOT data files: * 'angstroms' and 'inversemicrons' are accepted although unrecognized by astropy units * 'transmission', 'extinction', and 'emissivity' are converted to astropy dimensionless unit Parameters ---------- input_unit : str or `~astropy.units.core.Unit` Unit to validate. Returns ------- output_unit : `~astropy.units.core.Unit` Validated unit. Raises ------ synphot.exceptions.SynphotError Invalid unit. def validate_unit(input_unit): """Validate unit. To be compatible with existing SYNPHOT data files: * 'angstroms' and 'inversemicrons' are accepted although unrecognized by astropy units * 'transmission', 'extinction', and 'emissivity' are converted to astropy dimensionless unit Parameters ---------- input_unit : str or `~astropy.units.core.Unit` Unit to validate. Returns ------- output_unit : `~astropy.units.core.Unit` Validated unit. Raises ------ synphot.exceptions.SynphotError Invalid unit. """ if isinstance(input_unit, str): input_unit_lowcase = input_unit.lower() # Backward-compatibility if input_unit_lowcase == 'angstroms': output_unit = u.AA elif input_unit_lowcase == 'inversemicrons': output_unit = u.micron ** -1 elif input_unit_lowcase in ('transmission', 'extinction', 'emissivity'): output_unit = THROUGHPUT elif input_unit_lowcase == 'jy': output_unit = u.Jy # Work around mag unit limitations elif input_unit_lowcase in ('stmag', 'mag(st)'): output_unit = u.STmag elif input_unit_lowcase in ('abmag', 'mag(ab)'): output_unit = u.ABmag else: try: # astropy.units is case-sensitive output_unit = u.Unit(input_unit) except ValueError: # synphot is case-insensitive output_unit = u.Unit(input_unit_lowcase) elif isinstance(input_unit, (u.UnitBase, u.LogUnit)): output_unit = input_unit else: raise exceptions.SynphotError( '{0} must be a recognized string or ' 'astropy.units.core.Unit'.format(input_unit)) return output_unit
Like :func:`validate_unit` but specific to wavelength. def validate_wave_unit(wave_unit): """Like :func:`validate_unit` but specific to wavelength.""" output_unit = validate_unit(wave_unit) unit_type = output_unit.physical_type if unit_type not in ('length', 'wavenumber', 'frequency'): raise exceptions.SynphotError( 'wavelength physical type is not length, wave number, or ' 'frequency: {0}'.format(unit_type)) return output_unit
Validate quantity (value and unit). .. note:: For flux conversion, use :func:`convert_flux` instead. Parameters ---------- input_value : number, array-like, or `~astropy.units.quantity.Quantity` Quantity to validate. If not a Quantity, assumed to be already in output unit. output_unit : str or `~astropy.units.core.Unit` Output quantity unit. equivalencies : list of equivalence pairs, optional See `astropy.units`. Returns ------- output_value : `~astropy.units.quantity.Quantity` Validated quantity in given unit. def validate_quantity(input_value, output_unit, equivalencies=[]): """Validate quantity (value and unit). .. note:: For flux conversion, use :func:`convert_flux` instead. Parameters ---------- input_value : number, array-like, or `~astropy.units.quantity.Quantity` Quantity to validate. If not a Quantity, assumed to be already in output unit. output_unit : str or `~astropy.units.core.Unit` Output quantity unit. equivalencies : list of equivalence pairs, optional See `astropy.units`. Returns ------- output_value : `~astropy.units.quantity.Quantity` Validated quantity in given unit. """ output_unit = validate_unit(output_unit) if isinstance(input_value, u.Quantity): output_value = input_value.to(output_unit, equivalencies=equivalencies) else: output_value = input_value * output_unit return output_value
Add device. def add(self, device): """Add device.""" if not isinstance(device, Device): raise TypeError() self.__devices.append(device)
Import data from json response. def data_import(self, json_response): """Import data from json response.""" if 'data' not in json_response: raise PyVLXException('no element data found: {0}'.format( json.dumps(json_response))) data = json_response['data'] for item in data: if 'category' not in item: raise PyVLXException('no element category: {0}'.format( json.dumps(item))) category = item['category'] if category == 'Window opener': self.load_window_opener(item) elif category in ['Roller shutter', 'Dual Shutter']: self.load_roller_shutter(item) elif category in ['Blind']: self.load_blind(item) else: self.pyvlx.logger.warning( 'WARNING: Could not parse product: %s', category)
Load window opener from JSON. def load_window_opener(self, item): """Load window opener from JSON.""" window = Window.from_config(self.pyvlx, item) self.add(window)
Load roller shutter from JSON. def load_roller_shutter(self, item): """Load roller shutter from JSON.""" rollershutter = RollerShutter.from_config(self.pyvlx, item) self.add(rollershutter)
Load blind from JSON. def load_blind(self, item): """Load blind from JSON.""" blind = Blind.from_config(self.pyvlx, item) self.add(blind)
Return tuple containing columns and rows of controlling terminal, trying harder than shutil.get_terminal_size to find a tty before returning fallback. Theoretically, stdout, stderr, and stdin could all be different ttys that could cause us to get the wrong measurements (instead of using the fallback) but the much more common case is that IO is piped. def get_terminal_size(fallback=(80, 24)): """ Return tuple containing columns and rows of controlling terminal, trying harder than shutil.get_terminal_size to find a tty before returning fallback. Theoretically, stdout, stderr, and stdin could all be different ttys that could cause us to get the wrong measurements (instead of using the fallback) but the much more common case is that IO is piped. """ for stream in [sys.__stdout__, sys.__stderr__, sys.__stdin__]: try: # Make WINSIZE call to terminal data = fcntl.ioctl(stream.fileno(), TIOCGWINSZ, b"\x00\x00\00\x00") except OSError: pass else: # Unpack two shorts from ioctl call lines, columns = struct.unpack("hh", data) break else: columns, lines = fallback return columns, lines
Run checks on self.files, printing diff of styled/unstyled output to stdout. def run_diff(self): """ Run checks on self.files, printing diff of styled/unstyled output to stdout. """ files = tuple(self.files) # Use same header as more. header, footer = (termcolor.colored("{0}\n{{}}\n{0}\n".format( ":" * 14), "cyan"), "\n") if len(files) > 1 else ("", "") for file in files: print(header.format(file), end="") try: results = self._check(file) except Error as e: termcolor.cprint(e.msg, "yellow", file=sys.stderr) continue # Display results if results.diffs: print() print(*self.diff(results.original, results.styled), sep="\n") print() conjunction = "And" else: termcolor.cprint("Looks good!", "green") conjunction = "But" if results.diffs: for type, c in sorted(self._warn_chars): color, verb = ("on_green", "insert") if type == "+" else ("on_red", "delete") termcolor.cprint(c, None, color, end="") termcolor.cprint(" means that you should {} a {}.".format( verb, "newline" if c == "\\n" else "tab"), "yellow") if results.comment_ratio < results.COMMENT_MIN: termcolor.cprint("{} consider adding more comments!".format(conjunction), "yellow") if (results.comment_ratio < results.COMMENT_MIN or self._warn_chars) and results.diffs: print()
Run checks on self.files, printing json object containing information relavent to the CS50 IDE plugin at the end. def run_json(self): """ Run checks on self.files, printing json object containing information relavent to the CS50 IDE plugin at the end. """ checks = {} for file in self.files: try: results = self._check(file) except Error as e: checks[file] = { "error": e.msg } else: checks[file] = { "score": results.score, "comments": results.comment_ratio >= results.COMMENT_MIN, "diff": "<pre>{}</pre>".format("\n".join(self.html_diff(results.original, results.styled))), } json.dump(checks, sys.stdout, indent=4) print()
Run checks on self.files, printing raw percentage to stdout. def run_score(self): """ Run checks on self.files, printing raw percentage to stdout. """ diffs = 0 lines = 0 for file in self.files: try: results = self._check(file) except Error as e: termcolor.cprint(e.msg, "yellow", file=sys.stderr) continue diffs += results.diffs lines += results.lines try: print(max(1 - diffs / lines, 0.0)) except ZeroDivisionError: print(0.0)
Run apropriate check based on `file`'s extension and return it, otherwise raise an Error def _check(self, file): """ Run apropriate check based on `file`'s extension and return it, otherwise raise an Error """ if not os.path.exists(file): raise Error("file \"{}\" not found".format(file)) _, extension = os.path.splitext(file) try: check = self.extension_map[extension[1:]] except KeyError: magic_type = magic.from_file(file) for name, cls in self.magic_map.items(): if name in magic_type: check = cls break else: raise Error("unknown file type \"{}\", skipping...".format(file)) try: with open(file) as f: code = "\n".join(line.rstrip() for line in f) except UnicodeDecodeError: raise Error("file does not seem to contain text, skipping...") # Ensure we don't warn about adding trailing newline try: if code[-1] != '\n': code += '\n' except IndexError: pass return check(code)
Returns a generator yielding the side-by-side diff of `old` and `new`). def split_diff(old, new): """ Returns a generator yielding the side-by-side diff of `old` and `new`). """ return map(lambda l: l.rstrip(), icdiff.ConsoleDiff(cols=COLUMNS).make_table(old.splitlines(), new.splitlines()))
Returns a generator yielding a unified diff between `old` and `new`. def unified(old, new): """ Returns a generator yielding a unified diff between `old` and `new`. """ for diff in difflib.ndiff(old.splitlines(), new.splitlines()): if diff[0] == " ": yield diff elif diff[0] == "?": continue else: yield termcolor.colored(diff, "red" if diff[0] == "-" else "green", attrs=["bold"])
Return HTML formatted character-based diff between old and new (used for CS50 IDE). def html_diff(self, old, new): """ Return HTML formatted character-based diff between old and new (used for CS50 IDE). """ def html_transition(old_type, new_type): tags = [] for tag in [("/", old_type), ("", new_type)]: if tag[1] not in ["+", "-"]: continue tags.append("<{}{}>".format(tag[0], "ins" if tag[1] == "+" else "del")) return "".join(tags) return self._char_diff(old, new, html_transition, fmt=cgi.escape)
Return color-coded character-based diff between `old` and `new`. def char_diff(self, old, new): """ Return color-coded character-based diff between `old` and `new`. """ def color_transition(old_type, new_type): new_color = termcolor.colored("", None, "on_red" if new_type == "-" else "on_green" if new_type == "+" else None) return "{}{}".format(termcolor.RESET, new_color[:-len(termcolor.RESET)]) return self._char_diff(old, new, color_transition)
Returns a char-based diff between `old` and `new` where each character is formatted by `fmt` and transitions between blocks are determined by `transition`. def _char_diff(self, old, new, transition, fmt=lambda c: c): """ Returns a char-based diff between `old` and `new` where each character is formatted by `fmt` and transitions between blocks are determined by `transition`. """ differ = difflib.ndiff(old, new) # Type of difference. dtype = None # Buffer for current line. line = [] while True: # Get next diff or None if we're at the end. d = next(differ, (None,)) if d[0] != dtype: line += transition(dtype, d[0]) dtype = d[0] if dtype is None: break if d[2] == "\n": if dtype != " ": self._warn_chars.add((dtype, "\\n")) # Show added/removed newlines. line += [fmt(r"\n"), transition(dtype, " ")] # Don't yield a line if we are removing a newline if dtype != "-": yield "".join(line) line.clear() line.append(transition(" ", dtype)) elif dtype != " " and d[2] == "\t": # Show added/removed tabs. line.append(fmt("\\t")) self._warn_chars.add((dtype, "\\t")) else: line.append(fmt(d[2])) # Flush buffer before quitting. last = "".join(line) # Only print last line if it contains non-ANSI characters. if re.sub(r"\x1b[^m]*m", "", last): yield last
Count lines of code (by default ignores empty lines, but child could override to do more). def count_lines(self, code): """ Count lines of code (by default ignores empty lines, but child could override to do more). """ return sum(bool(line.strip()) for line in code.splitlines())
Run `command` passing it stdin from `input`, throwing a DependencyError if comand is not found. Throws Error if exit code of command is not `exit` (unless `exit` is None). def run(command, input=None, exit=0, shell=False): """ Run `command` passing it stdin from `input`, throwing a DependencyError if comand is not found. Throws Error if exit code of command is not `exit` (unless `exit` is None). """ if isinstance(input, str): input = input.encode() # Only pipe stdin if we have input to pipe. stdin = {} if input is None else {"stdin": subprocess.PIPE} try: child = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **stdin) except FileNotFoundError as e: # Extract name of command. name = command.split(' ', 1)[0] if isinstance(command, str) else command[0] raise DependencyError(name) stdout, _ = child.communicate(input=input) if exit is not None and child.returncode != exit: raise Error("failed to stylecheck code") return stdout.decode()
Create and return frame from raw bytes. def frame_from_raw(raw): """Create and return frame from raw bytes.""" command, payload = extract_from_frame(raw) frame = create_frame(command) if frame is None: PYVLXLOG.warning("Command %s not implemented, raw: %s", command, ":".join("{:02x}".format(c) for c in raw)) return None frame.validate_payload_len(payload) frame.from_payload(payload) return frame
Create and return empty Frame from Command. def create_frame(command): """Create and return empty Frame from Command.""" # pylint: disable=too-many-branches,too-many-return-statements if command == Command.GW_ERROR_NTF: return FrameErrorNotification() if command == Command.GW_COMMAND_SEND_REQ: return FrameCommandSendRequest() if command == Command.GW_COMMAND_SEND_CFM: return FrameCommandSendConfirmation() if command == Command.GW_COMMAND_RUN_STATUS_NTF: return FrameCommandRunStatusNotification() if command == Command.GW_COMMAND_REMAINING_TIME_NTF: return FrameCommandRemainingTimeNotification() if command == Command.GW_SESSION_FINISHED_NTF: return FrameSessionFinishedNotification() if command == Command.GW_PASSWORD_ENTER_REQ: return FramePasswordEnterRequest() if command == Command.GW_PASSWORD_ENTER_CFM: return FramePasswordEnterConfirmation() if command == Command.GW_CS_DISCOVER_NODES_REQ: return FrameDiscoverNodesRequest() if command == Command.GW_CS_DISCOVER_NODES_CFM: return FrameDiscoverNodesConfirmation() if command == Command.GW_CS_DISCOVER_NODES_NTF: return FrameDiscoverNodesNotification() if command == Command.GW_GET_SCENE_LIST_REQ: return FrameGetSceneListRequest() if command == Command.GW_GET_SCENE_LIST_CFM: return FrameGetSceneListConfirmation() if command == Command.GW_GET_SCENE_LIST_NTF: return FrameGetSceneListNotification() if command == Command.GW_GET_NODE_INFORMATION_REQ: return FrameGetNodeInformationRequest() if command == Command.GW_GET_NODE_INFORMATION_CFM: return FrameGetNodeInformationConfirmation() if command == Command.GW_GET_NODE_INFORMATION_NTF: return FrameGetNodeInformationNotification() if command == Command.GW_GET_ALL_NODES_INFORMATION_REQ: return FrameGetAllNodesInformationRequest() if command == Command.GW_GET_ALL_NODES_INFORMATION_CFM: return FrameGetAllNodesInformationConfirmation() if command == Command.GW_GET_ALL_NODES_INFORMATION_NTF: return FrameGetAllNodesInformationNotification() if command == Command.GW_GET_ALL_NODES_INFORMATION_FINISHED_NTF: return FrameGetAllNodesInformationFinishedNotification() if command == Command.GW_ACTIVATE_SCENE_REQ: return FrameActivateSceneRequest() if command == Command.GW_ACTIVATE_SCENE_CFM: return FrameActivateSceneConfirmation() if command == Command.GW_GET_VERSION_REQ: return FrameGetVersionRequest() if command == Command.GW_GET_VERSION_CFM: return FrameGetVersionConfirmation() if command == Command.GW_GET_PROTOCOL_VERSION_REQ: return FrameGetProtocolVersionRequest() if command == Command.GW_GET_PROTOCOL_VERSION_CFM: return FrameGetProtocolVersionConfirmation() if command == Command.GW_SET_NODE_NAME_REQ: return FrameSetNodeNameRequest() if command == Command.GW_SET_NODE_NAME_CFM: return FrameSetNodeNameConfirmation() if command == Command.GW_NODE_INFORMATION_CHANGED_NTF: return FrameNodeInformationChangedNotification() if command == Command.GW_GET_STATE_REQ: return FrameGetStateRequest() if command == Command.GW_GET_STATE_CFM: return FrameGetStateConfirmation() if command == Command.GW_SET_UTC_REQ: return FrameSetUTCRequest() if command == Command.GW_SET_UTC_CFM: return FrameSetUTCConfirmation() if command == Command.GW_ACTIVATION_LOG_UPDATED_NTF: return FrameActivationLogUpdatedNotification() if command == Command.GW_HOUSE_STATUS_MONITOR_ENABLE_REQ: return FrameHouseStatusMonitorEnableRequest() if command == Command.GW_HOUSE_STATUS_MONITOR_ENABLE_CFM: return FrameHouseStatusMonitorEnableConfirmation() if command == Command.GW_HOUSE_STATUS_MONITOR_DISABLE_REQ: return FrameHouseStatusMonitorDisableRequest() if command == Command.GW_HOUSE_STATUS_MONITOR_DISABLE_CFM: return FrameHouseStatusMonitorDisableConfirmation() if command == Command.GW_NODE_STATE_POSITION_CHANGED_NTF: return FrameNodeStatePositionChangedNotification() return None
Handle incoming API frame, return True if this was the expected frame. async def handle_frame(self, frame): """Handle incoming API frame, return True if this was the expected frame.""" if not isinstance(frame, FramePasswordEnterConfirmation): return False if frame.status == PasswordEnterConfirmationStatus.FAILED: PYVLXLOG.warning('Failed to authenticate with password "%s****"', self.password[:2]) self.success = False if frame.status == PasswordEnterConfirmationStatus.SUCCESSFUL: self.success = True return True
Return Payload. def get_payload(self): """Return Payload.""" return bytes( [self.major_version >> 8 & 255, self.major_version & 255, self.minor_version >> 8 & 255, self.minor_version & 255])
Init frame from binary data. def from_payload(self, payload): """Init frame from binary data.""" self.major_version = payload[0] * 256 + payload[1] self.minor_version = payload[2] * 256 + payload[3]
Handle data received. def data_received(self, data): """Handle data received.""" self.tokenizer.feed(data) while self.tokenizer.has_tokens(): raw = self.tokenizer.get_next_token() frame = frame_from_raw(raw) if frame is not None: self.frame_received_cb(frame)