docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Plots one or more stacked in subplots sharing same x-axis.
Args:
ss: list of Spectrum objects
title=None: window title
num_rows=None: (optional) number of rows for subplot grid. If not passed,
num_rows will be the number of plots, and the number of columns will be 1.
If passed, number of columns is calculated automatically.
setup: PlotSpectrumSetup object
|
def plot_spectra_stacked(ss, title=None, num_rows=None, setup=_default_setup):
draw_spectra_stacked(ss, title, num_rows, setup)
plt.show()
| 920,663
|
Plots one or more spectra in the same plot.
Args:
ss: list of Spectrum objects
title=None: window title
setup: PlotSpectrumSetup object
|
def plot_spectra_overlapped(ss, title=None, setup=_default_setup):
plt.figure()
draw_spectra_overlapped(ss, title, setup)
plt.show()
| 920,664
|
Plots spectra, overlapped, in small wavelength intervals into a PDF file,
one interval per page of the PDF file.
Args:
ss: list of Spectrum objects
aint: wavelength interval for each plot
pdf_filename: name of output file
setup: PlotSpectrumSetup object
**Note** overrides setup.fmt_xlabel; leaves y-labell and title blank
|
def plot_spectra_pieces_pdf(ss, aint=10, pdf_filename='pieces.pdf', setup=_default_setup):
import f311.explorer as ex
xmin, xmax, ymin_, ymax, _, yspan = calc_max_min(ss)
ymin = ymin_ if setup.ymin is None else setup.ymin
num_pages = int(math.ceil((xmax-xmin)/aint)) # rightmost point may be left out...or not
# num_spectra = len(ss)
a99.format_BLB()
# pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_filename)
pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_filename)
logger = a99.get_python_logger()
for h in range(num_pages):
fig = plt.figure()
lambda0 = xmin+h*aint
lambda1 = lambda0+aint
logger.info("Printing page {0:d}/{1:d} ([{2:g}, {3:g}])".format(h+1, num_pages, lambda0, lambda1))
for i, s in enumerate(ss):
s_cut = ex.cut_spectrum(s, lambda0, lambda1)
ax = plt.gca()
ax.plot(s_cut.x, s_cut.y, label=s.title)
if setup.flag_xlabel and setup.fmt_xlabel:
plt.xlabel('Wavelength (interval: [{0:g}, {1:g}])'.format(lambda0, lambda1))
xspan = lambda1-lambda0
ax.set_xlim([lambda0 - xspan * _T, lambda1 + xspan * _T])
ax.set_ylim([ymin - yspan * _T, ymax + yspan * _T])
if setup.flag_legend:
leg = plt.legend(loc=0)
a99.format_legend(leg)
plt.tight_layout()
pdf.savefig(fig)
plt.close()
# for fig in xrange(1, figure().number): ## will open an empty extra figure :(
# pdf.savefig( fig )
pdf.close()
logger.info("File {0!s} successfully created.".format(pdf_filename))
| 920,666
|
Plots spectra into a PDF file, one spectrum per page.
Splits into several pieces of width
Args:
ss: list of Spectrum objects
pdf_filename: name of output file
|
def plot_spectra_pages_pdf(ss, pdf_filename='pages.pdf', setup=_default_setup):
logger = a99.get_python_logger()
xmin, xmax, ymin_, ymax, xspan, yspan = calc_max_min(ss)
ymin = ymin_ if setup.ymin is None else setup.ymin
num_pages = len(ss)
a99.format_BLB()
pdf = matplotlib.backends.backend_pdf.PdfPages(pdf_filename)
for i, s in enumerate(ss):
title = s.title
fig = plt.figure()
plt.plot(s.x, s.y, c=_FAV_COLOR)
if setup.flag_xlabel and setup.fmt_xlabel:
_set_plot(plt.xlabel, setup.fmt_xlabel, s)
if setup.flag_ylabel and setup.fmt_ylabel:
_set_plot(plt.ylabel, setup.fmt_ylabel, s)
_set_plot(plt.title, setup.fmt_title, s)
plt.xlim([xmin-xspan*_T, xmax+xspan*_T])
plt.ylim([ymin-yspan*_T, ymax+yspan*_T])
plt.tight_layout()
plt.subplots_adjust(top=0.94) # workaround for cropped title
logger.info("Printing page {0:d}/{1:d} ('{2!s}')".format(i+1, num_pages, title))
pdf.savefig(fig)
plt.close()
pdf.close()
logger.info("File {0!s} successfully created.".format(pdf_filename))
| 920,667
|
Resolve django-like path eg. object2__object3 for object
Args:
obj: The object the view is displaying.
path (str, optional): Description
Returns:
A oject at end of resolved path
|
def _resolve_path(obj, path=None):
if path:
for attr_name in path.split('__'):
obj = getattr(obj, attr_name)
return obj
| 920,804
|
Given a field, will place all subfields into a dictionary
Parameters:
* field - tuple: The field to get subfields for
Returns: a dictionary, codes as keys and a list of values as the value
|
def field_get_subfields(field):
pairs = {}
for key, value in field[0]:
if key in pairs and pairs[key] != value:
pairs[key].append(value)
else:
pairs[key] = [value]
return pairs
| 921,661
|
Take the model_klass and generate a fixure for it
Args:
model_class (MongoEngine Document): model for which a fixture
is needed
kwargs (dict): any overrides instead of random values
Returns:
dict for now, other fixture types are not implemented yet
|
def make_fixture(model_class, **kwargs):
all_fields = get_fields(model_class)
fields_for_random_generation = map(
lambda x: getattr(model_class, x), all_fields
)
overrides = {}
for kwarg, value in kwargs.items():
if kwarg in all_fields:
kwarg_field = getattr(model_class, kwarg)
fields_for_random_generation.remove(kwarg_field)
overrides.update({kwarg_field: value})
random_values = get_random_values(fields_for_random_generation)
values = dict(overrides, **random_values)
assert len(all_fields) == len(values), (
"Mismatch in values, {} != {}".format(
len(all_fields), len(values)
)
)
data = {k.name: v for k, v in values.items()}
return model_class(**data)
| 922,024
|
String Field has three constraints (apart from anything
in the super class)
Args:
field (StringField): actual string field object from a
model declaration
Returns:
random string value
|
def make_string_field_value(cls, field):
if field.regex is not None:
raise NotImplementedError
string_range = cls.get_range(field)
return cls.get_random_string(string_range)
| 922,242
|
Returns a normalized version of a DataFrame or Series
Parameters:
df - DataFrame or Series
The data to normalize
style - function or string, default 'mean'
The style to use when computing the norms. Takes 'mean' or 'minmax' to
do mean or min-max normalization respectively. User-defined functions that take
a pandas Series as input and return a normalized pandas Series are also accepted
|
def normalize(df, style = 'mean'):
if style == 'mean':
df_mean,df_std = df.mean(),df.std()
return (df-df_mean)/df_std
elif style == 'minmax':
col_min,col_max = df.min(),df.max()
return (df-col_min)/(col_max-col_min)
else:
return style(df)
| 922,288
|
Returns a column name that isn't in the specified DataFrame
Parameters:
df - DataFrame
DataFrame to analyze
col_name - string, default 'unnamed_col'
Column name to use as the base value for the generated column name
|
def colname_gen(df,col_name = 'unnamed_col'):
if col_name not in df.keys():
yield col_name
id_number = 0
while True:
col_name = col_name + str(id_number)
if col_name in df.keys():
id_number+=1
else:
return col_name
| 922,427
|
Cleans the column names on a DataFrame
Parameters:
df - DataFrame
The DataFrame to clean
|
def clean_colnames(df):
col_list = []
for index in range(_dutils.cols(df)):
col_list.append(df.columns[index].strip().lower().replace(' ','_'))
df.columns = col_list
| 922,428
|
Performs str.strip() a column of a DataFrame
Parameters:
df - DataFrame
DataFrame to operate on
col_name - string
Name of column to strip
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
|
def col_strip(df,col_name,dest = False):
if dest:
df[col_name] = df[col_name].str.strip()
else:
return df[col_name].str.strip()
| 922,429
|
Coerces a column in a DataFrame to numeric
Parameters:
df - DataFrame
DataFrame to operate on
col_name - string
Name of column to coerce
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
|
def col_to_numeric(df,col_name, dest = False):
new_col = _pd.to_numeric(df[col_name], errors = 'coerce')
if dest:
set_col(df,col_name,new_col)
else:
return new_col
| 922,431
|
Coerces a column in a DataFrame to datetime
Parameters:
df - DataFrame
DataFrame to operate on
col_name - string
Name of column to coerce
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
|
def col_to_dt(df,col_name,set_format = None,infer_format = True, dest = False):
new_col = _pd.to_datetime(df[col_name],errors = 'coerce',
format = set_format,infer_datetime_format = infer_format)
if dest:
set_col(df,col_name,new_col)
else:
return new_col
| 922,432
|
Coerces a column in a DataFrame to categorical
Parameters:
df - DataFrame
DataFrame to operate on
col_name - string
Name of column to coerce
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
|
def col_to_cat(df,col_name, dest = False):
new_col = df[col_name].astype('category')
if dest:
set_col(df,col_name,new_col)
else:
return new_col
| 922,433
|
Changes a column name in a DataFrame
Parameters:
df - DataFrame
DataFrame to operate on
col_name - string
Name of column to change
new_col_name - string
New name of column
|
def col_rename(df,col_name,new_col_name):
col_list = list(df.columns)
for index,value in enumerate(col_list):
if value == col_name:
col_list[index] = new_col_name
break
df.columns = col_list
| 922,434
|
Changes a column of a DataFrame according to a given function
Parameters:
df - DataFrame
DataFrame to operate on
col_name - string
Name of column to modify
func - function
The function to use to modify the column
|
def col_mod(df,col_name,func,*args,**kwargs):
backup = df[col_name].copy()
try:
return_val = func(df,col_name,*args,**kwargs)
if return_val is not None:
set_col(df,col_name,return_val)
except:
df[col_name] = backup
| 922,435
|
Performs str.strip() a column of a DataFrame
Parameters:
df - DataFrame
DataFrame to operate on
col_list - list of strings
names of columns to strip
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
|
def cols_strip(df,col_list, dest = False):
if not dest:
return _pd.DataFrame({col_name:col_strip(df,col_name) for col_name in col_list})
for col_name in col_list:
col_strip(df,col_name,dest)
| 922,436
|
Coerces a list of columns to numeric
Parameters:
df - DataFrame
DataFrame to operate on
col_list - list of strings
names of columns to coerce
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
|
def cols_to_numeric(df, col_list,dest = False):
if not dest:
return _pd.DataFrame({col_name:col_to_numeric(df,col_name) for col_name in col_list})
for col_name in col_list:
col_to_numeric(df,col_name,dest)
| 922,437
|
Coerces a list of columns to datetime
Parameters:
df - DataFrame
DataFrame to operate on
col_list - list of strings
names of columns to coerce
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
|
def cols_to_dt(df, col_list,set_format = None,infer_format = True,dest = False):
if not dest:
return _pd.DataFrame({col_name:col_to_dt(df,col_name,set_format,infer_format) for col_name in col_list})
for col_name in col_list:
col_to_dt(df,col_name,set_format,infer_format,dest)
| 922,438
|
Coerces a list of columns to categorical
Parameters:
df - DataFrame
DataFrame to operate on
col_list - list of strings
names of columns to coerce
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
|
def cols_to_cat(df, col_list,dest = False):
# Convert a list of columns to categorical
if not dest:
return _pd.DataFrame({col_name:col_to_cat(df,col_name) for col_name in col_list})
for col_name in col_list:
col_to_cat(df,col_name,dest)
| 922,439
|
Do a function over a list of columns and return the result
Parameters:
df - DataFrame
DataFrame to operate on
col_list - list of strings
names of columns to coerce
func - function
function to use
|
def cols_(df,col_list,func,*args,**kwargs):
return _pd.DataFrame({col_name:func(df,col_name,*args,**kwargs) for col_name in col_list})
| 922,440
|
Rename a set of columns in a DataFrame
Parameters:
df - DataFrame
DataFrame to operate on
col_names - list of strings
names of columns to change
new_col_names - list of strings
new names for old columns (order should be same as col_names)
|
def cols_rename(df,col_names, new_col_names):
assert len(col_names) == len(new_col_names)
for old_name,new_name in zip(col_names,new_col_names):
col_rename(df,old_name,new_name)
| 922,441
|
Returns dictionary of datatypes in a DataFrame (uses string representation)
Parameters:
df - DataFrame
The DataFrame to return the object types of
Pandas datatypes are as follows:
object,number,bool,datetime,category,timedelta,datetimetz
This method uses queues and iterates over the columns in linear time.
It does extra steps to ensure that no further work with numpy datatypes needs
to be done.
|
def col_dtypes(df): # Does some work to reduce possibility of errors and stuff
test_list = [col_isobj,col_isnum,col_isbool,col_isdt,col_iscat,col_istdelt,col_isdtz]
deque_list = [(deque(col_method(df)),name) \
for col_method,name in zip(test_list,_globals.__dtype_names) if len(col_method(df))]
type_dict = {}
for que, name in deque_list:
while len(que):
type_dict[que.popleft()] = name
return type_dict
| 922,442
|
Returns a list of columns that are of type object. If col_name is specified, returns
whether the column in the DataFrame is of type 'object' instead.
Parameters:
df - DataFrame
DataFrame to check
col_name - string, default None
If specified, this function will True if df[col_name] is of type 'object'
|
def col_isobj(df, col_name = None):
col_list = df.select_dtypes(include = 'object').columns
if col_name is None:
return col_list
else:
return col_name in col_list
| 922,443
|
Returns a list of columns that are of type 'number'. If col_name is specified, returns
whether the column in the DataFrame is of type 'number' instead.
Parameters:
df - DataFrame
DataFrame to check
col_name - string, default None
If specified, this function will True if df[col_name] is of type 'number'
|
def col_isnum(df,col_name = None):
col_list = df.select_dtypes(include = 'number').columns
if col_name is None:
return col_list
else:
return col_name in col_list
| 922,444
|
Returns a list of columns that are of type 'bool'. If col_name is specified, returns
whether the column in the DataFrame is of type 'bool' instead.
Parameters:
df - DataFrame
DataFrame to check
col_name - string, default None
If specified, this function will True if df[col_name] is of type 'bool'
|
def col_isbool(df,col_name = None):
col_list = df.select_dtypes(include = 'bool').columns
if col_name is None:
return col_list
else:
return col_name in col_list
| 922,445
|
Returns a list of columns that are of type 'datetime'. If col_name is specified, returns
whether the column in the DataFrame is of type 'datetime' instead.
Parameters:
df - DataFrame
DataFrame to check
col_name - string, default None
If specified, this function will True if df[col_name] is of type 'datetime'
|
def col_isdt(df,col_name = None):
col_list = df.select_dtypes(include = 'datetime').columns
if col_name is None:
return col_list
else:
return col_name in col_list
| 922,446
|
Returns a list of columns that are of type 'category'. If col_name is specified, returns
whether the column in the DataFrame is of type 'category' instead.
Parameters:
df - DataFrame
DataFrame to check
col_name - string, default None
If specified, this function will True if df[col_name] is of type 'category'
|
def col_iscat(df,col_name = None):
col_list = df.select_dtypes(include = 'category').columns
if col_name is None:
return col_list
else:
return col_name in col_list
| 922,447
|
Returns a list of columns that are of type 'timedelta'. If col_name is specified, returns
whether the column in the DataFrame is of type 'timedelta' instead.
Parameters:
df - DataFrame
DataFrame to check
col_name - string, default None
If specified, this function will True if df[col_name] is of type 'timedelta'
|
def col_istdelt(df,col_name = None):
col_list = df.select_dtypes(include = 'timedelta').columns
if col_name is None:
return col_list
else:
return col_name in col_list
| 922,448
|
Returns a list of columns that are of type 'datetimetz'. If col_name is specified, returns
whether the column in the DataFrame is of type 'datetimetz' instead.
Parameters:
df - DataFrame
DataFrame to check
col_name - string, default None
If specified, this function will True if df[col_name] is of type 'datetimetz'
|
def col_isdtz(df,col_name = None):
col_list = df.select_dtypes(include = 'datetimetz').columns
if col_name is None:
return col_list
else:
return col_name in col_list
| 922,449
|
Initialization.
Arguments:
node_name (str): The name of the node.
external_id (str): The external ID of the node.
|
def __init__(self, node_name: str, external_id: str = None):
# We must not only declare the properties but also initialize them,
# otherwise the IDE will show warnings wherever the properties are accessed.
self.are_neighbors_cached: bool = False
self.edges_where_source: List["DBEdge"] = []
self.edges_where_target: List["DBEdge"] = []
self.name: str = node_name
self.external_id: Optional[str] = external_id.strip() if external_id is not None else None
raise NotImplementedError("DBNode is just an abstract base class that defines the "
"interface of actual node model objects. {}".format(node_name))
| 922,451
|
Initialization.
Arguments:
source_name (str): The name of the source node of the edge.
target_name (str): The name of the target node of the edge.
weight (float): The weight of the edge.
|
def __init__(self, source_name: str, target_name: str, weight: float = 1):
# We must not only declare the properties but also initialize them,
# otherwise the IDE will show warnings wherever the properties are accessed.
self.source_name: str = source_name
self.target_name: str = target_name
self.weight: float = weight
raise NotImplementedError("DBEdge is just an abstract base class that defines "
"the interface of actual edge model objects. "
"{} - {} ({})".format(source_name, target_name, weight))
| 922,452
|
Takes in a dataframe and returns a dataframe with
information on the data-types present in each column.
Parameters:
df - DataFrame
Dataframe to summarize
|
def dtypes_summary(df):
output_df = pd.DataFrame([])
row_count = df.shape[0]
row_indexes = ['rows_numerical','rows_string','rows_date_time','category_count','largest_category','rows_na','rows_total']
for colname in df:
data = df[colname] # data is the pandas series associated with this column
# number of numerical values in the column
rows_numerical = pd.to_numeric(data,errors = 'coerce').count()
# number of values that can't be coerced to a numerical
rows_string = row_count - rows_numerical
# number of values that can be coerced to a date-time object
rows_date_time = pd.to_datetime(data,errors = 'coerce',infer_datetime_format = True).count()
# categories in column
value_counts = data.value_counts().reset_index()
# number of different values in the dataframe
categories = len(value_counts)
# largest category
largest_category = value_counts.iloc[0,1]
# number of null/missing values
rows_na = data.isnull().sum()
# build the output list
output_data = [rows_numerical, rows_string, rows_date_time, categories,
largest_category,rows_na,row_count]
# add to dataframe
output_df.loc[:,colname] = pd.Series(output_data)
# row names
output_df.index = row_indexes
return output_df
| 922,534
|
Finds outliers in the dataframe.
Parameters:
df - DataFrame
The DataFrame to analyze.
sensitivity - number, default 1.5
The value to multipy by the iter-quartile range when determining outliers. This number is used
for categorical data as well.
|
def df_outliers(df,sensitivity = 1.5):
outlier_df = df.copy()
dtypes = _basics.col_dtypes(df)
for col_name in df.columns:
outlier_df.loc[~outliers(df[col_name],'bool',dtypes[col_name],sensitivity),col_name] = np.nan
outlier_df = outlier_df.dropna(how = 'all')
return outlier_df
| 922,535
|
Takes a series of ordered frequencies and returns the value at a specified quantile
Parameters:
series - Series
The series to analyze
q - number
Quantile to get the value of
|
def cum_percentile(series,q):
total = series.sum()
cum_sum = series.cumsum()
return sum(cum_sum < total*q)
| 922,537
|
u"""Create user for the Merchant given in the X-Mcash-Merchant header.
Arguments:
user_id:
Identifier for the user
roles:
Role
netmask:
Limit user connections by netmask, for example 192.168.1.0/24
secret:
Secret used when authenticating with mCASH
pubkey:
RSA key used for authenticating by signing
|
def create_user(self, user_id,
roles=None, netmask=None,
secret=None, pubkey=None):
u
arguments = {'id': user_id,
'roles': roles,
'netmask': netmask,
'secret': secret,
'pubkey': pubkey}
return self.do_req('POST', self.merchant_api_base_url + '/user/', arguments).json()
| 922,743
|
Update user. Returns the raw response object.
Arguments:
user_id:
User id of user to update
roles:
Role
netmask:
Limit user connections by netmask, for example 192.168.1.0/24
secret:
Secret used when authenticating with mCASH
pubkey:
RSA key used for authenticating by signing
|
def update_user(self, user_id,
roles=None, netmask=None,
secret=None, pubkey=None):
arguments = {'roles': roles,
'netmask': netmask,
'secret': secret,
'pubkey': pubkey}
return self.do_req('PUT',
self.merchant_api_base_url + '/user/' +
user_id + '/', arguments)
| 922,744
|
Create POS resource
Arguments:
name:
Human-readable name of the POS, used for displaying payment
request origin to end user
pos_type:
POS type
location:
Merchant location
pos_id:
The ID of the POS that is to be created. Has to be unique for
the merchant
|
def create_pos(self, name, pos_type,
pos_id, location=None):
arguments = {'name': name,
'type': pos_type,
'id': pos_id,
'location': location}
return self.do_req('POST', self.merchant_api_base_url + '/pos/', arguments).json()
| 922,745
|
Update POS resource. Returns the raw response object.
Arguments:
pos_id:
POS id as chosen on registration
name:
Human-readable name of the POS, used for displaying payment
request origin to end user
pos_type:
POS type
location:
Merchant location
|
def update_pos(self, pos_id, name, pos_type, location=None):
arguments = {'name': name,
'type': pos_type,
'location': location}
return self.do_req('PUT',
self.merchant_api_base_url + '/pos/' +
pos_id + '/', arguments)
| 922,746
|
post a chat message
Arguments:
channel_id:
Scan token
|
def post_chat_message(self, merchant_id, channel_id, message):
return self.do_req('POST',
self.base_url + '/chat/v1/merchant/%s/channel/%s/message/' % (merchant_id, channel_id),
message)
| 922,749
|
Register new shortlink
Arguments:
callback_uri:
URI called by mCASH when user scans shortlink
description:
Shortlink description displayed in confirmation dialogs
serial_number:
Serial number on printed QR codes. This field is only used when
registering printed stickers issued by mCASH
|
def create_shortlink(self, callback_uri=None,
description=None, serial_number=None):
arguments = {'callback_uri': callback_uri,
'description': description,
'serial_number': serial_number}
return self.do_req('POST', self.merchant_api_base_url + '/shortlink/',
arguments).json()
| 922,751
|
Update existing shortlink registration
Arguments:
shortlink_id:
Shortlink id assigned by mCASH
|
def update_shortlink(self, shortlink_id, callback_uri=None,
description=None):
arguments = {'callback_uri': callback_uri,
'description': description}
return self.do_req('PUT',
self.merchant_api_base_url + '/shortlink/' +
shortlink_id + '/', arguments)
| 922,752
|
Retrieve registered shortlink info
Arguments:
shortlink_id_or_url:
Shortlink id or url, assigned by mCASH
|
def get_shortlink(self, shortlink_id_or_url):
if "://" not in shortlink_id_or_url:
shortlink_id_or_url = self.merchant_api_base_url + '/shortlink/' + shortlink_id_or_url + '/'
return self.do_req('GET', shortlink_id_or_url).json()
| 922,753
|
Update ledger info
Arguments:
ledger_id:
Ledger id assigned by mCASH
description:
Description of the Ledger and it's usage
|
def update_ledger(self, ledger_id, description=None):
arguments = {'description': description}
return self.do_req('PUT',
self.merchant_api_base_url + '/ledger/' +
ledger_id + '/', arguments)
| 922,755
|
Get report info
Arguments:
ledger_id:
Id for ledger for report
report_id:
Report id assigned by mCASH
|
def get_report(self, ledger_id, report_id):
return self.do_req('GET',
self.merchant_api_base_url + '/ledger/' +
ledger_id + '/report/' +
report_id + '/').json()
| 922,757
|
Gets all surveys in account
Args:
None
Returns:
list: a list of all surveys
|
def get_surveys(self):
payload = {
'Request': 'getSurveys',
'Format': 'JSON'
}
r = self._session.get(QUALTRICS_URL, params=payload)
output = r.json()
return output['Result']['Surveys']
| 922,983
|
Retrieves the content of an input given a DataSource. The input acts like a filter over the outputs of the DataSource.
Args:
name (str): The name of the input.
ds (openflow.DataSource): The DataSource that will feed the data.
Returns:
pandas.DataFrame: The content of the input.
|
def get_input(self, name, ds):
columns = self.inputs.get(name)
df = ds.get_dataframe()
# set defaults
for column in columns:
if column not in df.columns:
df[column] = self.defaults.get(column)
return df[columns]
| 923,001
|
This method runs the refactoring on all the Protobuf files in the
Dropsonde repo.
Args:
dest_dir (str): directory where the Protobuf files lives.
namespace (str): the desired package name (i.e. "dropsonde.py2")
namespace_path (str): the desired path corresponding to the package
name (i.e. "dropsonde/py2")
|
def proto_refactor_files(dest_dir, namespace, namespace_path):
for dn, dns, fns in os.walk(dest_dir):
for fn in fns:
fn = os.path.join(dn, fn)
if fnmatch.fnmatch(fn, '*.proto'):
data = proto_refactor(fn, namespace, namespace_path)
with open(fn, 'w') as f:
f.write(data)
| 923,688
|
Copies the source Protobuf files into a build directory.
Args:
source_dir (str): source directory of the Protobuf files
dest_dir (str): destination directory of the Protobuf files
|
def clone_source_dir(source_dir, dest_dir):
if os.path.isdir(dest_dir):
print('removing', dest_dir)
shutil.rmtree(dest_dir)
shutil.copytree(source_dir, dest_dir)
| 923,689
|
Runs the "protoc" command on the refactored Protobuf files to generate
the source python/python3 files.
Args:
lang (str): the language to compile with "protoc"
(i.e. python, python3)
output_dir (str): the output directory for the generated source files
proto_path (str): the root protobuf build path in which to run "protoc"
refactored_dir (str): the input directory of the Protobuf files
|
def protoc_command(lang, output_dir, proto_path, refactored_dir):
proto_files = glob.glob(os.path.join(refactored_dir, '*.proto'))
cmd = ['protoc', '-I', proto_path, '--{}_out'.format(lang), output_dir]
cmd.extend(proto_files)
print(' '.join(cmd))
p = subprocess.Popen(
cmd, stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin,
cwd=proto_path)
p.communicate()
| 923,690
|
Initialize darun attributes
Args:
filename (str): Absolute path of file name as a string with `hdf5` extension
damethod (str): Name of the assimilation method used, i.e. `enkf`.
date (str): Date of the experiment `MM-DD-YYYY:HHHH`
ensize (int): ensemble size
|
def __init__(self, filename, damethod, date, ensize):
self.filename = filename
self.damethod = damethod
self.date = date
self.ensize = ensize
# Create the file
self.dafile = h5py.File(self.filename, "a")
# Set the meta-data as attributes on the root group
self.dafile.attrs['damethod'] = self.damethod
self.dafile.attrs['date'] = self.date
self.dafile.attrs['ensize'] = self.ensize
# Create main groups for the darunfile
self.dafile.create_group("Observation")
self.dafile.create_group("Parameter")
self.dafile.create_group("State")
self.dafile.create_group("StateObservation")
self.dafile.create_group("Simulation")
self.dafile.create_group("Inflation")
| 923,826
|
Returns a tuple representing the index of an item in a 2D matrix.
Arguments:
- val (str) Value to look for
- matrix (list) 2D matrix to search for val in
Returns:
- (tuple) Ordered pair representing location of val
|
def find_in_matrix_2d(val, matrix):
dim = len(matrix[0])
item_index = 0
for row in matrix:
for i in row:
if i == val:
break
item_index += 1
if i == val:
break
loc = (int(item_index / dim), item_index % dim)
return loc
| 924,144
|
Computes a modified Levenshtein distance between two strings, comparing the
lowercase versions of each string and accounting for QWERTY distance.
Arguments:
- a (str) String to compare to 'b'
- b (str) String to compare to 'a'
Returns:
- (int) Number representing closeness of 'a' and 'b' (lower is better)
|
def compute_distance(a, b):
# check simple cases first
if not a:
return len(b)
if not b:
return len(a)
if a == b or str.lower(a) == str.lower(b):
return 0
# lowercase each string
a = str.lower(a)
b = str.lower(b)
# create empty vectors to store costs
vector_1 = [-1] * (len(b) + 1)
vector_2 = [-1] * (len(b) + 1)
# set default values
for i in range(len(vector_1)):
vector_1[i] = i
# compute distance
for i in range(len(a)):
vector_2[0] = i + 1
for j in range(len(b)):
penalty = 0 if a[i] == b[j] else compute_qwerty_distance(a[i], b[j])
vector_2[j + 1] = min(vector_2[j] + 1, vector_1[j + 1] + 1, vector_1[j] + penalty)
for j in range(len(vector_1)):
vector_1[j] = vector_2[j]
return vector_2[len(b)]
| 924,146
|
Reads file for configuration defaults.
Arguments:
- path (str) Absolute filepath (usually ~/.licenser)
Returns:
- (dict) Defaults for name, email, license, .txt extension
|
def get_defaults(path):
defaults = {}
if os.path.isfile(path):
with open(path) as f:
for line in f:
line = line.strip()
if '=' not in line or line.startswith('#'):
continue
k, v = line.split('=', 1)
v = v.strip('"').strip("'")
defaults[k] = v
return defaults
else:
return {}
| 924,147
|
Returns the closest match to the requested license.
Arguments:
- name (str) License to use
Returns:
- (str) License that most closely matches the 'name' parameter
|
def get_license(name):
filenames = os.listdir(cwd + licenses_loc)
licenses = dict(zip(filenames, [-1] * len(filenames)))
for l in licenses:
licenses[l] = compute_distance(name, l)
return min(licenses, key=(lambda k: licenses[k]))
| 924,148
|
Parse command line args & override defaults.
Arguments:
- path (str) Absolute filepath
Returns:
- (tuple) Name, email, license, project, ext, year
|
def get_args(path):
defaults = get_defaults(path)
licenses = ', '.join(os.listdir(cwd + licenses_loc))
p = parser(description='tool for adding open source licenses to your projects. available licenses: %s' % licenses)
_name = False if defaults.get('name') else True
_email = False if defaults.get('email') else True
_license = False if defaults.get('license') else True
p.add_argument('-n', dest='name', required=_name, help='name')
p.add_argument('-e', dest='email', required=_email, help='email')
p.add_argument('-l', dest='license', required=_license, help='license')
p.add_argument('-p', dest='project', required=False, help='project')
p.add_argument('-v', '--version', action='version', version='%(prog)s {version}'.format(version=version))
p.add_argument('--txt', action='store_true', required=False, help='add .txt to filename')
args = p.parse_args()
name = args.name if args.name else defaults.get('name')
email = args.email if args.email else defaults.get('email')
license = get_license(args.license) if args.license else defaults.get('license')
project = args.project if args.project else os.getcwd().split('/')[-1]
ext = '.txt' if args.txt else ''
year = str(date.today().year)
return (name, email, license, project, ext, year)
| 924,149
|
Creates a LICENSE or LICENSE.txt file in the current directory. Reads from
the 'assets' folder and looks for placeholders enclosed in curly braces.
Arguments:
- (tuple) Name, email, license, project, ext, year
|
def generate_license(args):
with open(cwd + licenses_loc + args[2]) as f:
license = f.read()
license = license.format(name=args[0],
email=args[1],
license=args[2],
project=args[3],
year=args[5])
with open('LICENSE' + args[4], 'w') as f:
f.write(license)
print('licenser: license file added to current directory')
| 924,150
|
Add autodetected commands as entry points.
Args:
dist: The distutils Distribution object for the project being
installed.
_: The keyword used in the setup function. Unused.
value: The value set to the keyword in the setup function. If the value
is not True, this function will do nothing.
|
def setup_keyword(dist, _, value):
# type: (setuptools.dist.Distribution, str, bool) -> None
if value is not True:
return
dist.entry_points = _ensure_entry_points_is_dict(dist.entry_points)
for command, subcommands in six.iteritems(_get_commands(dist)):
entry_point = '{command} = rcli.dispatcher:main'.format(
command=command)
entry_points = dist.entry_points.setdefault('console_scripts', [])
if entry_point not in entry_points:
entry_points.append(entry_point)
dist.entry_points.setdefault('rcli', []).extend(subcommands)
| 924,337
|
Read rcli configuration and write it out to the egg info.
Args:
cmd: An egg info command instance to use for writing.
basename: The basename of the file to write.
filename: The full path of the file to write into the egg info.
|
def egg_info_writer(cmd, basename, filename):
# type: (setuptools.command.egg_info.egg_info, str, str) -> None
setupcfg = next((f for f in setuptools.findall()
if os.path.basename(f) == 'setup.cfg'), None)
if not setupcfg:
return
parser = six.moves.configparser.ConfigParser() # type: ignore
parser.read(setupcfg)
if not parser.has_section('rcli') or not parser.items('rcli'):
return
config = dict(parser.items('rcli')) # type: typing.Dict[str, typing.Any]
for k, v in six.iteritems(config):
if v.lower() in ('y', 'yes', 'true'):
config[k] = True
elif v.lower() in ('n', 'no', 'false'):
config[k] = False
else:
try:
config[k] = json.loads(v)
except ValueError:
pass
cmd.write_file(basename, filename, json.dumps(config))
| 924,339
|
Find all commands belonging to the given distribution.
Args:
dist: The Distribution to search for docopt-compatible docstrings that
can be used to generate command entry points.
Returns:
A dictionary containing a mapping of primary commands to sets of
subcommands.
|
def _get_commands(dist # type: setuptools.dist.Distribution
):
# type: (...) -> typing.Dict[str, typing.Set[str]]
py_files = (f for f in setuptools.findall()
if os.path.splitext(f)[1].lower() == '.py')
pkg_files = (f for f in py_files if _get_package_name(f) in dist.packages)
commands = {} # type: typing.Dict[str, typing.Set[str]]
for file_name in pkg_files:
with open(file_name) as py_file:
module = typing.cast(ast.Module, ast.parse(py_file.read()))
module_name = _get_module_name(file_name)
_append_commands(commands, module_name, _get_module_commands(module))
_append_commands(commands, module_name, _get_class_commands(module))
_append_commands(commands, module_name, _get_function_commands(module))
return commands
| 924,340
|
Append entry point strings representing the given Command objects.
Args:
dct: The dictionary to append with entry point strings. Each key will
be a primary command with a value containing a list of entry point
strings representing a Command.
module_name: The name of the module in which the command object
resides.
commands: A list of Command objects to convert to entry point strings.
|
def _append_commands(dct, # type: typing.Dict[str, typing.Set[str]]
module_name, # type: str
commands # type:typing.Iterable[_EntryPoint]
):
# type: (...) -> None
for command in commands:
entry_point = '{command}{subcommand} = {module}{callable}'.format(
command=command.command,
subcommand=(':{}'.format(command.subcommand)
if command.subcommand else ''),
module=module_name,
callable=(':{}'.format(command.callable)
if command.callable else ''),
)
dct.setdefault(command.command, set()).add(entry_point)
| 924,341
|
Yield all Command objects represented by the python module.
Module commands consist of a docopt-style module docstring and a callable
Command class.
Args:
module: An ast.Module object used to retrieve docopt-style commands.
Yields:
Command objects that represent entry points to append to setup.py.
|
def _get_module_commands(module):
# type: (ast.Module) -> typing.Generator[_EntryPoint, None, None]
cls = next((n for n in module.body
if isinstance(n, ast.ClassDef) and n.name == 'Command'), None)
if not cls:
return
methods = (n.name for n in cls.body if isinstance(n, ast.FunctionDef))
if '__call__' not in methods:
return
docstring = ast.get_docstring(module)
for commands, _ in usage.parse_commands(docstring):
yield _EntryPoint(commands[0], next(iter(commands[1:]), None), None)
| 924,342
|
Yield all Command objects represented by python functions in the module.
Function commands consist of all top-level functions that contain
docopt-style docstrings.
Args:
module: An ast.Module object used to retrieve docopt-style commands.
Yields:
Command objects that represent entry points to append to setup.py.
|
def _get_function_commands(module):
# type: (ast.Module) -> typing.Generator[_EntryPoint, None, None]
nodes = (n for n in module.body if isinstance(n, ast.FunctionDef))
for func in nodes:
docstring = ast.get_docstring(func)
for commands, _ in usage.parse_commands(docstring):
yield _EntryPoint(commands[0], next(iter(commands[1:]), None),
func.name)
| 924,343
|
Gets a logger
Arguments:
name - the name you wish to log as
Returns:
A logger!
|
def get_logger(name):
logger = logging.getLogger(name)
logger.addHandler(logging.NullHandler())
return logger
| 924,561
|
Calculate sunrise or sunset based on:
Parameters:
jd: Julian Day
lat: latitude
lon: longitude
stage: sunrise or sunset
|
def _calc(cls, **kwargs):
zenith = 90.833333 # offical value
jd = kwargs.get("jd", None)
lat = kwargs.get("lat", None)
lon = kwargs.get("lon", None)
stage = kwargs.get("stage", None)
if jd is None or stage is None or lat is None or lon is None:
raise ValueError("Must supply an 'jd', 'lat, 'lon', and 'stage' parameter")
if stage != SunCycles.RISING and stage != SunCycles.SETTING:
raise ValueError("'stage' parameter must be %s or %s" % (SunCycles.RISING, SunCycles.SETTING))
longhr = lon / 15.
if stage == SunCycles.RISING:
apx = jd + ( (6 - longhr) / 24 )
elif stage == SunCycles.SETTING:
apx = jd + ( (18 - longhr) / 24 )
sun_mean_anom = ( 0.9856 * apx ) - 3.289 # sun's mean anomaly
#sun's longitude
sun_lon = sun_mean_anom + (1.916 * np.sin( np.radians(sun_mean_anom) )) \
+ (0.02 * np.sin( np.radians(2 * sun_mean_anom) )) + 282.634
if sun_lon > 360:
sun_lon = sun_lon - 360
elif sun_lon < 0:
sun_lon = sun_lon + 360
right_ascension = np.degrees(np.arctan( 0.91764 * np.tan( np.radians(sun_lon) ) )) # sun's right ascension
if right_ascension > 360:
right_ascension = right_ascension - 360
elif right_ascension < 0:
right_ascension = right_ascension + 360
# put sun's right ascension value in the same quadrant as the sun's
# true longitude
lQuad = 90. * np.floor(sun_lon / 90.)
raQuad = 90. * np.floor(right_ascension / 90.)
right_ascension = right_ascension + ( lQuad - raQuad)
right_ascension = right_ascension / 15. # Convert to hours
# Sun's declination
sinDecl = 0.39782 * np.sin( np.radians(sun_lon) )
cosDecl = np.cos( np.arcsin( sinDecl ) )
# Sun's local hour angle
cosHr = (np.cos( np.radians(zenith) ) - ( sinDecl * np.sin(np.radians(lat)) )) \
/ ( cosDecl * np.cos( np.radians(lat) ) )
if cosHr > 1: # Sun doesnt rise on this loc on this date
return -1, -1
elif cosHr < -1: # Sun doesnt set on this location on this date
return -1, -1
elif stage == SunCycles.RISING: # Sunrise
hr = 360 - np.degrees(np.arccos(cosHr))
elif stage == SunCycles.SETTING: # Sunset
hr = np.degrees(np.arccos(cosHr))
hr = hr / 15. # Convert angle to hours
localTime = hr + right_ascension - ( 0.06571 * apx ) - 6.622# local meantime of rise/set
UTtime = localTime - longhr # adjust to UTC
if UTtime < 0:
UTtime = UTtime + 24
elif UTtime > 24:
UTtime = UTtime - 24
hour = np.floor(UTtime)
minute = (UTtime - hour) * 60
if minute == 60:
hour = hour + 1
minute = 0
return hour, minute
| 924,913
|
Calculate the vertical swimming speed of this behavior.
Takes into account the vertical swimming speed and the
variance.
Parameters:
method: "gaussian" (default) or "random"
"random" (vss - variance) < X < (vss + variance)
|
def calculate_vss(self, method=None):
if self.variance == float(0):
return self.vss
else:
# Calculate gausian distribution and return
if method == "gaussian" or method is None:
return gauss(self.vss, self.variance)
elif method == "random":
return uniform(self.vss - self.variance, self.vss + self.variance)
else:
raise ValueError("Method of vss calculation not recognized, please use 'gaussian' or 'random'")
| 925,150
|
Transforms a DataFrame in place. Computes all outputs of the DataFrame.
Args:
df (pandas.DataFrame): DataFrame to transform.
|
def transform(self, df):
for name, function in self.outputs:
df[name] = function(df)
| 925,237
|
Preprocesses then transforms the return of fetch().
Args:
force_computation (bool, optional) : Defaults to False. If set to True, forces the computation of DataFrame at each call.
Returns:
pandas.DataFrame: Preprocessed and transformed DataFrame.
|
def get_dataframe(self, force_computation=False):
# returns df if it was already computed
if self.df is not None and not force_computation: return self.df
self.df = self.fetch(self.context)
# compute df = transform(preprocess(df)
self.df = self.preprocess(self.df)
self.transform(self.df)
return self.df
| 925,238
|
Instantiate the exception with a descriptive message.
Args:
parameter: The CLI parameter with the invalid value.
value: The invalid value passed to the CLI parameter.
valid_values: The values that would have been accepted by the
parameter.
|
def __init__(self, parameter, value, valid_values=None):
# type: (str, str, typing.Sequence[str]) -> None
msg = 'Invalid value "{value}" supplied to {parameter}.'.format(
parameter=parameter, value=value)
if valid_values:
msg += ' Valid options are: {}'.format(', '.join(valid_values))
super(InvalidCliValueError, self).__init__(msg)
| 925,604
|
Instantiate the exception with a descriptive message.
Args:
type_: The type to which the cast was attempting to convert the
value.
value: The value that was attempted to be cast.
|
def __init__(self, type_, value):
# type: (type, typing.Any) -> None
self.type_ = type_
self.value = value
super(CastError, self).__init__(
'Unable to cast "{}" to {}.'.format(value, type_.__name__))
| 925,605
|
Call the function with args normalized and cast to the correct types.
Args:
func: The function to call.
args: The arguments parsed by docopt.
Returns:
The return value of func.
|
def call(func, args):
assert hasattr(func, '__call__'), 'Cannot call func: {}'.format(
func.__name__)
raw_func = (
func if isinstance(func, FunctionType) else func.__class__.__call__)
hints = collections.defaultdict(lambda: Any, get_type_hints(raw_func))
argspec = _getargspec(raw_func)
named_args = {}
varargs = ()
for k, nk, v in _normalize(args):
if nk == argspec.varargs:
hints[nk] = Tuple[hints[nk], ...]
elif nk not in argspec.args and argspec.varkw in hints:
hints[nk] = hints[argspec.varkw]
try:
value = cast(hints[nk], v)
except TypeError as e:
_LOGGER.exception(e)
six.raise_from(exc.InvalidCliValueError(k, v), e)
if nk == argspec.varargs:
varargs = value
elif (nk in argspec.args or argspec.varkw) and (
nk not in named_args or named_args[nk] is None):
named_args[nk] = value
return func(*varargs, **named_args)
| 925,916
|
Yield a 3-tuple containing the key, a normalized key, and the value.
Args:
args: The arguments parsed by docopt.
Yields:
A 3-tuple that contains the docopt parameter name, the parameter name
normalized to be a valid python identifier, and the value assigned to
the parameter.
|
def _normalize(args):
# type: (Dict[str, Any]) -> Generator[Tuple[str, str, Any], None, None]
for k, v in six.iteritems(args):
nk = re.sub(r'\W|^(?=\d)', '_', k).strip('_').lower()
do_not_shadow = dir(six.moves.builtins) # type: ignore
if keyword.iskeyword(nk) or nk in do_not_shadow:
nk += '_'
_LOGGER.debug('Normalized "%s" to "%s".', k, nk)
yield k, nk, v
| 925,919
|
Starts filesystem analysis. Detects supported filesystems and \
loads :attr:`partitions` array.
Args:
filename - Path to file or device for reading.
Raises:
IOError - File/device does not exist or is not readable.
|
def load(self, filename, bs=512):
self.__filename = filename
self.__volumes = []
# Detect partitioning scheme
self.__partition_scheme = rawdisk.scheme.common.detect_scheme(filename)
plugin_objects = [plugin.plugin_object for plugin in self.__fs_plugins]
fs_detector = FilesystemDetector(fs_plugins=plugin_objects)
if self.__partition_scheme == PartitionScheme.SCHEME_MBR:
self.__load_mbr_volumes(filename, fs_detector, bs)
elif self.__partition_scheme == PartitionScheme.SCHEME_GPT:
self.__load_gpt_volumes(filename, fs_detector, bs)
else:
self.logger.warning('Partitioning scheme could not be determined.')
# try detecting standalone volume
volume = fs_detector.detect_standalone(filename, offset=0)
if volume is not None:
volume.load(filename, offset=0)
self.__volumes.append(volume)
else:
self.logger.warning(
'Were not able to detect standalone volume type')
| 925,969
|
Create a Xeoma Object for interacting with Xeoma web server
Arguments:
base_url: the url of the Xeoma web server
new_version: True if the Xeoma version > 17.5
login: the Xeoma web server username
password: the Xeoma web server password
|
def __init__(self, base_url, login=None, password=None):
self._base_url = base_url.rstrip('/')
self._login = login
self._password = password
| 925,973
|
Grab a single image from the Xeoma web server
Arguments:
image_name: the name of the image to fetch (i.e. image01)
username: the username to directly access this image
password: the password to directly access this image
|
async def async_get_camera_image(self, image_name, username=None, password=None):
try:
data = await self.async_fetch_image_data(
image_name, username, password)
if data is None:
raise XeomaError('Unable to authenticate with Xeoma web '
'server')
return data
except asyncio.TimeoutError:
raise XeomaError('Connection timeout while fetching camera image.')
except aiohttp.ClientError as e:
raise XeomaError('Unable to fetch image: {}'.format(e))
| 925,975
|
Fetch image data from the Xeoma web server
Arguments:
image_name: the name of the image to fetch (i.e. image01)
username: the username to directly access this image
password: the password to directly access this image
|
async def async_fetch_image_data(self, image_name, username, password):
params = {}
cookies = self.get_session_cookie()
if username is not None and password is not None:
params['user'] = self.encode_user(username, password)
else:
params['user'] = ''
async with aiohttp.ClientSession(cookies=cookies) as session:
resp = await session.get(
'{}/{}.jpg'.format(self._base_url, image_name),
params=params
)
if resp.headers['Content-Type'] == 'image/jpeg':
data = await resp.read()
else:
data = None
return data
| 925,976
|
Return the function for the specified subcommand.
Args:
name: The name of a subcommand.
Returns:
The loadable object from the entry point represented by the subcommand.
|
def _get_subcommand(name):
# type: (str) -> config.RcliEntryPoint
_LOGGER.debug('Accessing subcommand "%s".', name)
if name not in settings.subcommands:
raise ValueError(
'"{subcommand}" is not a {command} command. \'{command} help -a\' '
'lists all available subcommands.'.format(
command=settings.command, subcommand=name)
)
return settings.subcommands[name]
| 926,126
|
Extract the command name and arguments to pass to docopt.
Args:
argv: The argument list being used to run the command.
Returns:
A tuple containing the name of the command and the arguments to pass
to docopt.
|
def _get_command_and_argv(argv):
# type: (typing.List[str]) -> typing.Tuple[str, typing.List[str]]
command_name = argv[0]
if not command_name:
argv = argv[1:]
elif command_name == settings.command:
argv.remove(command_name)
return command_name, argv
| 926,128
|
Parse the docstring with docopt.
Args:
command_name: The name of the subcommand to parse.
doc: A docopt-parseable string.
argv: The list of arguments to pass to docopt during parsing.
Returns:
The docopt results dictionary. If the subcommand has the same name as
the primary command, the subcommand value will be added to the
dictionary.
|
def _get_parsed_args(command_name, doc, argv):
# type: (str, str, typing.List[str]) -> typing.Dict[str, typing.Any]
_LOGGER.debug('Parsing docstring: with arguments %s.', doc, argv)
args = docopt(doc, argv=argv)
if command_name == settings.command:
args[command_name] = True
return args
| 926,129
|
Returns Initialized attribute object based on attr_type \
(eg. :class:`MftAttrStandardInformation`)
Args:
attr_type (uint): Attribute type number (eg. 0x10 - \
$STANDARD_INFORMATION)
data (byte array): Data to initialize attribute object with.
|
def factory(attr_type, data):
constructors = {
MFT_ATTR_STANDARD_INFORMATION: MftAttrStandardInformation,
MFT_ATTR_ATTRIBUTE_LIST: MftAttrAttributeList,
MFT_ATTR_FILENAME: MftAttrFilename,
MFT_ATTR_OBJECT_ID: MftAttrObjectId,
MFT_ATTR_SECURITY_DESCRIPTOR: MftAttrSecurityDescriptor,
MFT_ATTR_VOLUME_NAME: MftAttrVolumeName,
MFT_ATTR_VOLUME_INFO: MftAttrVolumeInfo,
MFT_ATTR_DATA: MftAttrData,
MFT_ATTR_INDEX_ROOT: MftAttrIndexRoot,
MFT_ATTR_INDEX_ALLOCATION: MftAttrIndexAllocation,
MFT_ATTR_BITMAP: MftAttrBitmap,
MFT_ATTR_REPARSE_POINT: MftAttrReparsePoint,
MFT_ATTR_LOGGED_TOOLSTREAM: MftAttrLoggedToolstream,
}
if attr_type not in constructors:
return None
return constructors[attr_type](data)
| 926,393
|
data_list is the data to get or create
We generate the query and set all the record keys based on passed in queryset
Then we loop over each item in the data_list, which has the keys already! No need to generate them. Should save a lot of time
Use values instead of the whole object, much faster
Args:
data_list:
Returns:
|
def bulk_get_or_create(self, data_list):
items_to_create = dict()
for record_key, record_config in data_list.items():
if record_key not in items_to_create:
record = self.get_instance(record_key)
if not record:
items_to_create[record_key] = self.model_cls(**record_config)
if items_to_create:
self.model_cls.objects.bulk_create(items_to_create.values())
self.set_record_lookup(True)
return self.record_lookup
| 926,403
|
A timed block to run tasks with titles and success/failure messages.
Args:
msg: The header message to print at the beginning of the timed block.
|
def timed_display(msg):
def print_header(msg, newline=True):
if sys.stdout.isatty():
print('\r', end=Style.BRIGHT + Fore.BLUE)
print(' {} '.format(msg).center(_ncols(), '='),
end='\n{}'.format(Style.RESET_ALL)
if newline else Style.RESET_ALL)
sys.stdout.flush()
def print_message(msg):
if sys.stdout.isatty():
print('\r', end='')
msg = msg.ljust(_ncols())
print(msg, end='')
sys.stdout.flush()
start = time.time()
print_header(msg)
with hidden_cursor():
try:
yield print_message
finally:
delta = time.time() - start
print_header('completed in {:.2f}s'.format(delta), False)
| 926,421
|
Run a group of tasks with a header, footer and success/failure messages.
Args:
header: A message to print in the header bar before the tasks are run.
tasks: A list of tuples containing a task title, a task, and a weight.
If the tuple only contains two values, the weight is assumed to be
one.
|
def run_tasks(header, tasks):
tasks = list(tasks)
with timed_display(header) as print_message:
with tqdm(tasks, position=1, desc='Progress', disable=None,
bar_format='{desc}{percentage:3.0f}% |{bar}|',
total=sum(t[2] if len(t) > 2 else 1 for t in tasks),
dynamic_ncols=True) as pbar:
for task in tasks:
print_message(task[0])
with display_status():
try:
task[1]()
finally:
pbar.update(task[2] if len(task) > 2 else 1)
| 926,422
|
Initialize the exception.
Args:
message: A six character status message to display on the terminal.
color: An ANSI color code value to use while displaying the
message.
exc: An exception that caused the non-standard status message. If
exc is supplied, it will be raised after the status message is
displayed.
|
def __init__(self, message, color, exc=None):
super(Status, self).__init__()
self.msg = message
self.color = color
self.exc = exc
| 926,423
|
Constructor
Arguments:
pusher_api_key: the key to authenticate with pusher with
callback_chan: the channel to use to receive callbacks
|
def __init__(self, pusher_api_key, callback_chan, logger=None):
self.logger = logger or logging.getLogger(__name__)
self.pusher_connected_listeners = []
self.pos_callback_chan = callback_chan
self.pusher = pusherclient.Pusher(pusher_api_key)
self.pusher.connection.logger.setLevel(logging.WARNING)
self.pusher.connection.bind('pusher:connection_established',
self._pusher_connect_handler)
self.pusher.connect()
self.pusherthread_stop = Event()
self.pusherthread = Thread(target=self._runForever,
args=(self.pusherthread_stop,))
| 926,428
|
Runs the main loop
Arguments:
stop_event: threading.Event() as a stop signal
|
def _runForever(self, stop_event):
while(not stop_event.is_set()):
state = self.pusher.connection.state
if (state is not "connecting" and
state is not "connected"):
self.logger.warning(
"Pusher seems to be disconnected, trying to reconnect")
self.pusher.connect()
stop_event.wait(0.5)
| 926,430
|
Loads GPT partition table.
Args:
filename (str): path to file or device to open for reading
bs (uint): Block size of the volume, default: 512
Raises:
IOError: If file does not exist or not readable
|
def load(self, filename, bs=512):
with open(filename, 'rb') as f:
f.seek(GPT_HEADER_OFFSET + 0x0C)
header_size = struct.unpack("<I", f.read(4))[0]
f.seek(GPT_HEADER_OFFSET)
header_data = f.read(header_size)
self.header = GPT_HEADER(header_data)
if (self.header.signature != GPT_SIGNATURE):
raise Exception("Invalid GPT signature")
self.__load_partition_entries(f, bs)
| 926,433
|
Loads the list of :class:`GptPartition` partition entries
Args:
bs (uint): Block size of the volume
|
def __load_partition_entries(self, fd, bs):
fd.seek(self.header.part_lba * bs)
for p in range(0, self.header.num_partitions):
data = fd.read(self.header.part_size)
entry = GptPartitionEntry(data)
if entry.type_guid != uuid.UUID(
'{00000000-0000-0000-0000-000000000000}'
):
self.__partition_entries.append(entry)
else:
# stop loading on empty partition entry
break
| 926,434
|
Used in plugin's registration routine,
to associate it's detection method with given filesystem id
Args:
fs_id: filesystem id that is read from MBR partition entry
plugin: plugin that supports this filesystem
|
def register_mbr_plugin(self, fs_id, plugin):
self.logger.debug('MBR: {}, FS ID: {}'
.format(self.__get_plugin_name(plugin), fs_id))
self.__mbr_plugins[fs_id].append(plugin)
| 926,474
|
Used in plugin's registration routine,
to associate it's detection method with given filesystem guid
Args:
fs_guid: filesystem guid that is read from GPT partition entry
plugin: plugin that supports this filesystem
|
def register_gpt_plugin(self, fs_guid, plugin):
key = uuid.UUID(fs_guid.lower())
self.logger.debug('GPT: {}, GUID: {}'
.format(self.__get_plugin_name(plugin), fs_guid))
self.__gpt_plugins[key].append(plugin)
| 926,475
|
Used by rawdisk.session.Session to match mbr partitions against
filesystem plugins.
Args:
filename: device or file that it will read in order to detect
the filesystem fs_id: filesystem id to match (ex. 0x07)
offset: offset for the filesystem that is being matched
Returns:
Volume object supplied by matched plugin.
If there is no match, None is returned
|
def detect_mbr(self, filename, offset, fs_id):
self.logger.debug('Detecting MBR partition type')
if fs_id not in self.__mbr_plugins:
return None
else:
plugins = self.__mbr_plugins.get(fs_id)
for plugin in plugins:
if plugin.detect(filename, offset):
return plugin.get_volume_object()
return None
| 926,477
|
Used by rawdisk.session.Session to match gpt partitions agains
filesystem plugins.
Args:
filename: device or file that it will read in order to detect the
filesystem
fs_id: filesystem guid to match
(ex. {EBD0A0A2-B9E5-4433-87C0-68B6B72699C7})
offset: offset for the filesystem that is being matched
Returns:
Volume object supplied by matched plugin.
If there is no match, None is returned
|
def detect_gpt(self, filename, offset, fs_guid):
self.logger.debug('Detecting GPT partition type')
if fs_guid not in self.__gpt_plugins:
return None
else:
plugins = self.__gpt_plugins.get(fs_guid)
for plugin in plugins:
if plugin.detect(filename, offset):
return plugin.get_volume_object()
return None
| 926,478
|
Loads NTFS volume information
Args:
filename (str): Path to file/device to read the volume \
information from.
offset (uint): Valid NTFS partition offset from the beginning \
of the file/device.
Raises:
IOError: If source file/device does not exist or is not readable
|
def load(self, filename, offset):
self.offset = offset
self.filename = filename
self.bootsector = BootSector(
filename=filename,
length=NTFS_BOOTSECTOR_SIZE,
offset=self.offset)
self.mft_table = MftTable(
mft_entry_size=self.bootsector.mft_record_size,
filename=self.filename,
offset=self.mft_table_offset
)
self.mft_table.preload_entries(NUM_SYSTEM_ENTRIES)
self._load_volume_information()
| 926,670
|
Detects partitioning scheme of the source
Args:
filename (str): path to file or device for detection of \
partitioning scheme.
Returns:
SCHEME_MBR, SCHEME_GPT or SCHEME_UNKNOWN
Raises:
IOError: The file doesn't exist or cannot be opened for reading
>>> from rawdisk.scheme.common import *
>>> scheme = detect_scheme('/dev/disk1')
>>> if scheme == PartitionScheme.SCHEME_MBR:
>>> <...>
|
def detect_scheme(filename):
logger = logging.getLogger(__name__)
logger.info('Detecting partitioning scheme')
with open(filename, 'rb') as f:
# Look for MBR signature first
f.seek(mbr.MBR_SIG_OFFSET)
data = f.read(mbr.MBR_SIG_SIZE)
signature = struct.unpack("<H", data)[0]
if signature != mbr.MBR_SIGNATURE:
# Something else
logger.debug('Unknown partitioning scheme')
return PartitionScheme.SCHEME_UNKNOWN
else:
# Could be MBR or GPT, look for GPT header
f.seek(gpt.GPT_HEADER_OFFSET)
data = f.read(gpt.GPT_SIG_SIZE)
signature = struct.unpack("<8s", data)[0]
if signature != gpt.GPT_SIGNATURE:
logger.debug('MBR scheme detected')
return PartitionScheme.SCHEME_MBR
else:
logger.debug('GPT scheme detected')
return PartitionScheme.SCHEME_GPT
| 926,932
|
Returns the particlees age (how long it has been forced) in a variety of units.
Rounded to 8 decimal places.
Parameters:
units (optional) = 'days' (default), 'hours', 'minutes', or 'seconds'
|
def get_age(self, **kwargs):
try:
units = kwargs.get('units', None)
if units is None:
return self._age
units = units.lower()
if units == "days":
z = self._age
elif units == "hours":
z = self._age * 24.
elif units == "minutes":
z = self._age * 24. * 60.
elif units == "seconds":
z = self._age * 24. * 60. * 60.
else:
raise
return round(z,8)
except StandardError:
raise KeyError("Could not return age of particle")
| 927,066
|
Return an error message and write a log file if logging was not enabled.
Args:
exc: The unexpected exception.
Returns:
A message to display to the user concerning the unexpected exception.
|
def handle_unexpected_exception(exc):
# type: (BaseException) -> str
try:
write_logfile()
addendum = 'Please see the log file for more information.'
except IOError:
addendum = 'Unable to write log file.'
try:
message = str(exc)
return '{}{}{}'.format(message, '\n' if message else '', addendum)
except Exception: # pylint: disable=broad-except
return str(exc)
| 927,266
|
Configure the root logger and a logfile handler.
Args:
log_level: The logging level to set the logger handler.
|
def enable_logging(log_level):
# type: (typing.Union[None, int]) -> None
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
logfile_handler = logging.StreamHandler(_LOGFILE_STREAM)
logfile_handler.setLevel(logging.DEBUG)
logfile_handler.setFormatter(logging.Formatter(
'%(levelname)s [%(asctime)s][%(name)s] %(message)s'))
root_logger.addHandler(logfile_handler)
if signal.getsignal(signal.SIGTERM) == signal.SIG_DFL:
signal.signal(signal.SIGTERM, _logfile_sigterm_handler)
if log_level:
handler = logging.StreamHandler()
handler.setFormatter(_LogColorFormatter())
root_logger.setLevel(log_level)
root_logger.addHandler(handler)
| 927,267
|
Get the log level from the CLI arguments.
Removes logging arguments from sys.argv.
Args:
args: The parsed docopt arguments to be used to determine the logging
level.
Returns:
The correct log level based on the three CLI arguments given.
Raises:
ValueError: Raised if the given log level is not in the acceptable
list of values.
|
def get_log_level(args):
# type: (typing.Dict[str, typing.Any]) -> int
index = -1
log_level = None
if '<command>' in args and args['<command>']:
index = sys.argv.index(args['<command>'])
if args.get('--debug'):
log_level = 'DEBUG'
if '--debug' in sys.argv and sys.argv.index('--debug') < index:
sys.argv.remove('--debug')
elif '-d' in sys.argv and sys.argv.index('-d') < index:
sys.argv.remove('-d')
elif args.get('--verbose'):
log_level = 'INFO'
if '--verbose' in sys.argv and sys.argv.index('--verbose') < index:
sys.argv.remove('--verbose')
elif '-v' in sys.argv and sys.argv.index('-v') < index:
sys.argv.remove('-v')
elif args.get('--log-level'):
log_level = args['--log-level']
sys.argv.remove('--log-level')
sys.argv.remove(log_level)
if log_level not in (None, 'DEBUG', 'INFO', 'WARN', 'ERROR'):
raise exceptions.InvalidLogLevelError(log_level)
return getattr(logging, log_level) if log_level else None
| 927,268
|
Format the log record with timestamps and level based colors.
Args:
record: The log record to format.
Returns:
The formatted log record.
|
def format(self, record):
# type: (logging.LogRecord) -> str
if record.levelno >= logging.ERROR:
color = colorama.Fore.RED
elif record.levelno >= logging.WARNING:
color = colorama.Fore.YELLOW
elif record.levelno >= logging.INFO:
color = colorama.Fore.RESET
else:
color = colorama.Fore.CYAN
format_template = (
'{}{}%(levelname)s{} [%(asctime)s][%(name)s]{} %(message)s')
if sys.stdout.isatty():
self._fmt = format_template.format(
colorama.Style.BRIGHT,
color,
colorama.Fore.RESET,
colorama.Style.RESET_ALL
)
else:
self._fmt = format_template.format(*[''] * 4)
if six.PY3:
self._style._fmt = self._fmt # pylint: disable=protected-access
return super(_LogColorFormatter, self).format(record)
| 927,270
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.