docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Get movies that match the search query string from the API.
Args:
q (optional): plain text search query; remember to URI encode
page_limit (optional): number of search results to show per page,
default=30
page (optional): results page number, default=1
Returns:
A dict respresentation of the JSON returned from the API.
|
def search(self, **kwargs):
path = self._get_path('search')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
| 1,015,625
|
Find the tty device for a given usbserial devices identifiers.
Args:
vendor: (int) something like 0x0000
product: (int) something like 0x0000
Returns:
String, like /dev/ttyACM0 or /dev/tty.usb...
|
def find_usbserial(vendor, product):
if platform.system() == 'Linux':
vendor, product = [('%04x' % (x)).strip() for x in (vendor, product)]
return linux_find_usbserial(vendor, product)
elif platform.system() == 'Darwin':
return osx_find_usbserial(vendor, product)
else:
raise NotImplementedError('Cannot find serial ports on %s'
% platform.system())
| 1,015,666
|
Dispatcher for the info generators.
Determines which __info_*_gen() should be used based on the supplied
parameters.
Args:
code: The status code for the command response.
message: The status message for the command reponse.
compressed: Force decompression. Useful for xz* commands.
Returns:
An info generator.
|
def info_gen(self, code, message, compressed=False):
if "COMPRESS=GZIP" in message:
return self.__info_gzip_gen()
if compressed:
return self.__info_yenczlib_gen()
return self.__info_plain_gen()
| 1,015,867
|
Generator for the LIST ACTIVE command.
Generates a list of active newsgroups that match the specified pattern.
If no pattern is specfied then all active groups are generated.
See <http://tools.ietf.org/html/rfc3977#section-7.6.3>
Args:
pattern: Glob matching newsgroups of intrest.
Yields:
A tuple containing the name, low water mark, high water mark,
and status for the newsgroup.
|
def list_active_gen(self, pattern=None):
args = pattern
if args is None:
cmd = "LIST"
else:
cmd = "LIST ACTIVE"
code, message = self.command(cmd, args)
if code != 215:
raise NNTPReplyError(code, message)
for line in self.info_gen(code, message):
yield utils.parse_newsgroup(line)
| 1,015,879
|
Generator for the LIST NEWSGROUPS command.
Generates a list of newsgroups including the name and a short
description.
See <http://tools.ietf.org/html/rfc3977#section-7.6.6>
Args:
pattern: Glob matching newsgroups of intrest.
Yields:
A tuple containing the name, and description for the newsgroup.
|
def list_newsgroups_gen(self, pattern=None):
args = pattern
code, message = self.command("LIST NEWSGROUPS", args)
if code != 215:
raise NNTPReplyError(code, message)
for line in self.info_gen(code, message):
parts = line.strip().split()
name, description = parts[0], ""
if len(parts) > 1:
description = parts[1]
yield name, description
| 1,015,881
|
XZHDR command.
Args:
msgid_range: A message-id as a string, or an article number as an
integer, or a tuple of specifying a range of article numbers in
the form (first, [last]) - if last is omitted then all articles
after first are included. A msgid_range of None (the default)
uses the current article.
|
def xzhdr(self, header, msgid_range=None):
args = header
if msgid_range is not None:
args += " " + utils.unparse_msgid_range(msgid_range)
code, message = self.command("XZHDR", args)
if code != 221:
raise NNTPReplyError(code, message)
return self.info(code, message, compressed=True)
| 1,015,893
|
Prepare the transformer to convert data and return the processed table.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
|
def transform(self, col):
out = pd.DataFrame(index=col.index)
out[self.col_name] = col.apply(self.get_val, axis=1)
if self.subtype == 'int':
out[self.col_name] = out[self.col_name].astype(int)
return out
| 1,015,988
|
Converts data back into original format.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
|
def reverse_transform(self, col):
output = pd.DataFrame(index=col.index)
output[self.col_name] = col.apply(self.safe_round, axis=1)
if self.subtype == 'int':
output[self.col_name] = output[self.col_name].astype(int)
return output
| 1,015,989
|
Returns a converter that takes in a value and turns it into an integer, if necessary.
Args:
col_name(str): Name of the column.
subtype(str): Numeric subtype of the values.
Returns:
function
|
def safe_round(self, x):
val = x[self.col_name]
if np.isposinf(val):
val = sys.maxsize
elif np.isneginf(val):
val = -sys.maxsize
if np.isnan(val):
val = self.default_val
if self.subtype == 'integer':
return int(round(val))
return val
| 1,015,992
|
Gets the top box office earning movies from the API.
Sorted by most recent weekend gross ticket sales.
Args:
limit (optional): limits the number of movies returned, default=10
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
|
def movies_box_office(self, **kwargs):
path = self._get_path('movies_box_office')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
| 1,016,073
|
Gets the movies currently in theaters from the API.
Args:
page_limit (optional): number of movies to show per page, default=16
page (optional): results page number, default=1
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
|
def movies_in_theaters(self, **kwargs):
path = self._get_path('movies_in_theaters')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
| 1,016,074
|
Gets the current opening movies from the API.
Args:
limit (optional): limits the number of movies returned, default=10
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
|
def movies_opening(self, **kwargs):
path = self._get_path('movies_opening')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
| 1,016,075
|
Gets the upcoming movies from the API.
Args:
page_limit (optional): number of movies to show per page, default=16
page (optional): results page number, default=1
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
|
def movies_upcoming(self, **kwargs):
path = self._get_path('movies_upcoming')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
| 1,016,076
|
Gets the current opening movies from the API.
Args:
limit (optional): limits the number of movies returned, default=10
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
|
def dvds_top_rentals(self, **kwargs):
path = self._get_path('dvds_top_rentals')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
| 1,016,078
|
Gets the upcoming movies from the API.
Args:
page_limit (optional): number of movies to show per page, default=16
page (optional): results page number, default=1
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
|
def dvds_current_releases(self, **kwargs):
path = self._get_path('dvds_current_releases')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
| 1,016,079
|
Gets the upcoming movies from the API.
Args:
page_limit (optional): number of movies to show per page, default=16
page (optional): results page number, default=1
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
|
def dvds_new_releases(self, **kwargs):
path = self._get_path('dvds_new_releases')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
| 1,016,080
|
Gets the upcoming movies from the API.
Args:
page_limit (optional): number of movies to show per page, default=16
page (optional): results page number, default=1
country (optional): localized data for selected country, default="us"
Returns:
A dict respresentation of the JSON returned from the API.
|
def dvds_upcoming(self, **kwargs):
path = self._get_path('dvds_upcoming')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
| 1,016,081
|
Returns the predictions for ``X``.
Under the hood this method simply goes through the outputs of ``iter_predict`` and returns
the final one.
Arguments:
X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples.
Sparse matrices are accepted only if they are supported by the weak model.
Returns:
array of shape (n_samples,) containing the predicted values.
|
def predict(self, X):
return collections.deque(self.iter_predict(X), maxlen=1).pop()
| 1,016,115
|
Returns the predicted probabilities for ``X``.
Arguments:
X (array-like or sparse matrix of shape (n_samples, n_features)): The input samples.
Sparse matrices are accepted only if they are supported by the weak model.
Returns:
array of shape (n_samples, n_classes) containing the predicted probabilities.
|
def predict_proba(self, X):
return collections.deque(self.iter_predict_proba(X), maxlen=1).pop()
| 1,016,122
|
Creates an endpoint and call bindToEndpoint(endpoint). This is a convenience method.
Parameters:
instance -- the object instance to which the getter, setter and changedSignal belong
setter -- the value setter method
valueChangedSignal -- the pyqtSignal that is emitted with the value changes
getter -- the value getter method (default None)
If None, the signal argument(s) are passed to the setter method
|
def bind(self,instance,setter,valueChangedSignal,getter = None):
endpoint = BindingEndpoint(instance,setter,valueChangedSignal,getter=getter)
self.bindToEndPoint(endpoint)
| 1,016,315
|
Anonymize in `table_data` the fields in `pii_fields`.
Args:
table_data (pandas.DataFrame): Original dataframe/table.
pii_fields (list[dict]): Metadata for the fields to transform.
Result:
pandas.DataFrame: Anonymized table.
|
def _anonymize_table(cls, table_data, pii_fields):
for pii_field in pii_fields:
field_name = pii_field['name']
transformer = cls.get_class(TRANSFORMERS['categorical'])(pii_field)
table_data[field_name] = transformer.anonymize_column(table_data)
return table_data
| 1,016,319
|
Load the contents of meta_file and the corresponding data.
If fields containing Personally Identifiable Information are detected in the metadata
they are anonymized before asign them into `table_dict`.
Args:
base_dir(str): Root folder of the dataset files.
Returns:
dict: Mapping str -> tuple(pandas.DataFrame, dict)
|
def _get_tables(self, base_dir):
table_dict = {}
for table in self.metadata['tables']:
if table['use']:
relative_path = os.path.join(base_dir, self.metadata['path'], table['path'])
data_table = pd.read_csv(relative_path)
pii_fields = self._get_pii_fields(table)
data_table = self._anonymize_table(data_table, pii_fields)
table_dict[table['name']] = (data_table, table)
return table_dict
| 1,016,320
|
Apply the stored transformers to `table`.
Args:
table(pandas.DataFrame): Contents of the table to be transformed.
table_meta(dict): Metadata for the given table.
missing(bool): Wheter or not use NullTransformer to handle missing values.
Returns:
pandas.DataFrame: Transformed table.
|
def transform_table(self, table, table_meta, missing=None):
if missing is None:
missing = self.missing
else:
self.missing = missing
warnings.warn(DEPRECATION_MESSAGE.format('transform_table'), DeprecationWarning)
content = {}
columns = []
table_name = table_meta['name']
for field in table_meta['fields']:
column_name = field['name']
if missing and table[column_name].isnull().any():
null_transformer = transformers.NullTransformer(field)
clean_column = null_transformer.fit_transform(table[column_name])
null_name = '?' + column_name
columns.append(null_name)
content[null_name] = clean_column[null_name].values
column = clean_column[column_name]
else:
column = table[column_name].to_frame()
transformer = self.transformers[(table_name, column_name)]
content[column_name] = transformer.transform(column)[column_name].values
columns.append(column_name)
return pd.DataFrame(content, columns=columns)
| 1,016,326
|
Transform a `table` back to its original format.
Args:
table(pandas.DataFrame): Contents of the table to be transformed.
table_meta(dict): Metadata for the given table.
missing(bool): Wheter or not use NullTransformer to handle missing values.
Returns:
pandas.DataFrame: Table in original format.
|
def reverse_transform_table(self, table, table_meta, missing=None):
if missing is None:
missing = self.missing
else:
self.missing = missing
warnings.warn(
DEPRECATION_MESSAGE.format('reverse_transform_table'), DeprecationWarning)
result = pd.DataFrame(index=table.index)
table_name = table_meta['name']
for field in table_meta['fields']:
new_column = self._reverse_transform_column(table, field, table_name)
if new_column is not None:
result[field['name']] = new_column
return result
| 1,016,327
|
Encrypt File
Args:
filename: Pass the filename to encrypt.
Returns:
No return.
|
def encrpyt_file(self, filename):
if not os.path.exists(filename):
print "Invalid filename %s. Does not exist" % filename
return
if self.vault_password is None:
print "ENV Variable PYANSI_VAULT_PASSWORD not set"
return
if self.is_file_encrypted(filename):
# No need to do anything.
return
cipher = 'AES256'
vaulteditor = VaultEditor(cipher, self.vault_password, filename)
vaulteditor.encrypt_file()
| 1,016,425
|
Decrypt File
Args:
filename: Pass the filename to encrypt.
Returns:
No return.
|
def decrypt_file(self, filename):
if not os.path.exists(filename):
print "Invalid filename %s. Does not exist" % filename
return
if self.vault_password is None:
print "ENV Variable PYANSI_VAULT_PASSWORD not set"
return
if not self.is_file_encrypted(filename):
# No need to do anything.
return
cipher = 'AES256'
vaulteditor = VaultEditor(cipher, self.vault_password, filename)
vaulteditor.decrypt_file()
| 1,016,426
|
Run converter.
Args:
language: (unicode) language code.
|
def convert(self, language, *args, **kwargs):
for f in find_pos(language):
PoToXls(src=f, **kwargs).convert()
| 1,016,458
|
Prepare the transformer to convert data and return the processed table.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
|
def transform(self, col):
out = pd.DataFrame(index=col.index)
out[self.col_name] = col.fillna(self.default_value)
out[self.new_name] = (pd.notnull(col) * 1).astype(int)
return out
| 1,016,487
|
Converts data back into original format.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
|
def reverse_transform(self, col):
output = pd.DataFrame()
new_name = '?' + self.col_name
col.loc[col[new_name] == 0, self.col_name] = np.nan
output[self.col_name] = col[self.col_name]
return output
| 1,016,488
|
Init conversion.
Args:
src: (unicode or string) path to ".po" file.
|
def __init__(self, src, *args, **kwargs):
self.quiet = kwargs.pop("quiet", False)
if os.path.exists(src):
self.src = src
else:
if not self.quiet:
sys.stderr.write("ERROR: File '{src}' does not exists.".format(**{"src": src, }))
self.logger.error("ERROR: File '{src}' does not exists.".format(**{"src": src, }))
sys.exit(-1)
self.po = polib.pofile(self.src)
self.result = xlwt.Workbook(encoding="utf-8")
| 1,016,518
|
Write sheet header.
Args:
sheet: (xlwt.Worksheet.Worksheet) instance of xlwt sheet.
name: (unicode) name of sheet.
|
def header(self, sheet, name):
header = sheet.row(0)
for i, column in enumerate(self.headers[name]):
header.write(i, self.headers[name][i])
| 1,016,519
|
Create a map of the empirical probability for each category.
Args:
col(pandas.DataFrame): Data to transform.
|
def _fit(self, col):
column = col[self.col_name].replace({np.nan: np.inf})
frequencies = column.groupby(column).count().rename({np.inf: None}).to_dict()
# next set probability ranges on interval [0,1]
start = 0
end = 0
num_vals = len(col)
for val in frequencies:
prob = frequencies[val] / num_vals
end = start + prob
interval = (start, end)
mean = np.mean(interval)
std = prob / 6
self.probability_map[val] = (interval, mean, std)
start = end
| 1,016,701
|
Prepare the transformer to convert data and return the processed table.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
|
def transform(self, col):
out = pd.DataFrame()
# Make sure all nans are handled the same by replacing with None
column = col[self.col_name].replace({np.nan: None})
out[self.col_name] = column.apply(self.get_val)
return out
| 1,016,702
|
Prepare the transformer and return processed data.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
|
def fit_transform(self, col):
if self.anonymize:
col = self.anonymize_column(col)
self._fit(col)
return self.transform(col)
| 1,016,703
|
Converts data back into original format.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
|
def reverse_transform(self, col):
output = pd.DataFrame()
output[self.col_name] = self.get_category(col[self.col_name])
return output
| 1,016,704
|
Returns categories for the specified numeric values
Args:
column(pandas.Series): Values to transform into categories
Returns:
pandas.Series
|
def get_category(self, column):
result = pd.Series(index=column.index)
for category, stats in self.probability_map.items():
start, end = stats[0]
result[(start < column) & (column < end)] = category
return result
| 1,016,706
|
Parse timezone to offset in seconds.
Args:
value: A timezone in the '+0000' format. An integer would also work.
Returns:
The timezone offset from GMT in seconds as an integer.
|
def _offset(value):
o = int(value)
if o == 0:
return 0
a = abs(o)
s = a*36+(a%100)*24
return (o//a)*s
| 1,016,716
|
Convert timestamp string to time in seconds since epoch.
Timestamps strings like '18 Jun 2013 12:00:00 GMT' are able to be converted
by this function.
Args:
value: A timestamp string in the format '%d %b %Y %H:%M:%S GMT'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
Note: The timezone is ignored it is simply assumed to be UTC/GMT.
|
def timestamp_d_b_Y_H_M_S(value):
d, b, Y, t, Z = value.split()
H, M, S = t.split(":")
return int(calendar.timegm((
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S), 0, 0, 0
)))
| 1,016,717
|
Convert timestamp string to a datetime object.
Timestamps strings like '18 Jun 2013 12:00:00 GMT' are able to be converted
by this function.
Args:
value: A timestamp string in the format '%d %b %Y %H:%M:%S GMT'.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
Note: The timezone is ignored it is simply assumed to be UTC/GMT.
|
def datetimeobj_d_b_Y_H_M_S(value):
d, b, Y, t, Z = value.split()
H, M, S = t.split(":")
return datetime.datetime(
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S), tzinfo=TZ_GMT
)
| 1,016,718
|
Convert timestamp string to time in seconds since epoch.
Timestamps strings like 'Tue, 18 Jun 2013 22:00:00 +1000' are able to be
converted by this function.
Args:
value: A timestamp string in the format '%a, %d %b %Y %H:%M:%S %z'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
|
def timestamp_a__d_b_Y_H_M_S_z(value):
a, d, b, Y, t, z = value.split()
H, M, S = t.split(":")
return int(calendar.timegm((
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S), 0, 0, 0
))) - _offset(z)
| 1,016,719
|
Convert timestamp string to a datetime object.
Timestamps strings like 'Tue, 18 Jun 2013 22:00:00 +1000' are able to be
converted by this function.
Args:
value: A timestamp string in the format '%a, %d %b %Y %H:%M:%S %z'.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
KeyError: If the abbrieviated month is invalid.
|
def datetimeobj_a__d_b_Y_H_M_S_z(value):
a, d, b, Y, t, z = value.split()
H, M, S = t.split(":")
return datetime.datetime(
int(Y), _months[b.lower()], int(d), int(H), int(M), int(S),
tzinfo=dateutil.tz.tzoffset(None, _offset(z))
)
| 1,016,720
|
Convert timestamp string to time in seconds since epoch.
Timestamps strings like '20130618120000' are able to be converted by this
function.
Args:
value: A timestamp string in the format '%Y%m%d%H%M%S'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
Note: The timezone is assumed to be UTC/GMT.
|
def timestamp_YmdHMS(value):
i = int(value)
S = i
M = S//100
H = M//100
d = H//100
m = d//100
Y = m//100
return int(calendar.timegm((
Y % 10000, m % 100, d % 100, H % 100, M % 100, S % 100, 0, 0, 0)
))
| 1,016,721
|
Convert timestamp string to a datetime object.
Timestamps strings like '20130618120000' are able to be converted by this
function.
Args:
value: A timestamp string in the format '%Y%m%d%H%M%S'.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
Note: The timezone is assumed to be UTC/GMT.
|
def datetimeobj_YmdHMS(value):
i = int(value)
S = i
M = S//100
H = M//100
d = H//100
m = d//100
Y = m//100
return datetime.datetime(
Y % 10000, m % 100, d % 100, H % 100, M % 100, S % 100, tzinfo=TZ_GMT
)
| 1,016,722
|
Convert timestamp string to a datetime object.
Timestamps strings like '1383470155' are able to be converted by this
function.
Args:
value: A timestamp string as seconds since epoch.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
|
def datetimeobj_epoch(value):
return datetime.datetime.utcfromtimestamp(int(value)).replace(tzinfo=TZ_GMT)
| 1,016,723
|
Convert timestamp string to time in seconds since epoch.
Wraps the datetime.datetime.strptime(). This is slow use the other
timestamp_*() functions if possible.
Args:
value: A timestamp string.
fmt: A timestamp format string.
Returns:
The time in seconds since epoch as an integer.
|
def timestamp_fmt(value, fmt):
return int(calendar.timegm(
datetime.datetime.strptime(value, fmt).utctimetuple()
))
| 1,016,724
|
Convert timestamp string to time in seconds since epoch.
Most timestamps strings are supported in fact this wraps the
dateutil.parser.parse() method. This is SLOW use the other timestamp_*()
functions if possible.
Args:
value: A timestamp string.
Returns:
The time in seconds since epoch as an integer.
|
def timestamp_any(value):
return int(calendar.timegm(dateutil.parser.parse(value).utctimetuple()))
| 1,016,725
|
Applies an exponential to values to turn them positive numbers.
Args:
column (pandas.DataFrame): Data to transform.
Returns:
pd.DataFrame
|
def transform(self, column):
self.check_data_type()
return pd.DataFrame({self.col_name: np.exp(column[self.col_name])})
| 1,016,768
|
Applies the natural logarithm function to turn positive values into real ranged values.
Args:
column (pandas.DataFrame): Data to transform.
Returns:
pd.DataFrame
|
def reverse_transform(self, column):
self.check_data_type()
return pd.DataFrame({self.col_name: np.log(column[self.col_name])})
| 1,016,769
|
Return the contents and metadata of a given table.
Args:
table_name(str): Name of the table.
meta_file(str): Path to the meta.json file.
meta(dict): Contents of meta.json.
Returns:
tuple(pandas.DataFrame, dict)
|
def load_data_table(table_name, meta_file, meta):
for table in meta['tables']:
if table['name'] == table_name:
prefix = os.path.dirname(meta_file)
relative_path = os.path.join(prefix, meta['path'], table['path'])
return pd.read_csv(relative_path), table
| 1,016,864
|
Return the content and metadata of a fiven column.
Args:
table_name(str): Name of the table.
col_name(str): Name of the column.
meta_file(str): Path to the meta.json file.
Returns:
tuple(pandas.Series, dict)
|
def get_col_info(table_name, col_name, meta_file):
with open(meta_file, 'r') as f:
meta = json.load(f)
data_table, table = load_data_table(table_name, meta_file, meta)
for field in table['fields']:
if field['name'] == col_name:
col_meta = field
col = data_table[col_name]
return (col, col_meta)
| 1,016,865
|
Parse a newsgroup info line to python types.
Args:
line: An info response line containing newsgroup info.
Returns:
A tuple of group name, low-water as integer, high-water as integer and
posting status.
Raises:
ValueError: If the newsgroup info cannot be parsed.
Note:
Posting status is a character is one of (but not limited to):
"y" posting allowed
"n" posting not allowed
"m" posting is moderated
|
def parse_newsgroup(line):
parts = line.split()
try:
group = parts[0]
low = int(parts[1])
high = int(parts[2])
status = parts[3]
except (IndexError, ValueError):
raise ValueError("Invalid newsgroup info")
return group, low, high, status
| 1,016,882
|
Parse a header line.
Args:
line: A header line as a string.
Returns:
None if end of headers is found. A string giving the continuation line
if a continuation is found. A tuple of name, value when a header line is
found.
Raises:
ValueError: If the line cannot be parsed as a header.
|
def parse_header(line):
if not line or line == "\r\n":
return None
if line[0] in " \t":
return line[1:].rstrip()
name, value = line.split(":", 1)
return (name.strip(), value.strip())
| 1,016,883
|
Parse a string a iterable object (including file like objects) to a
python dictionary.
Args:
obj: An iterable object including file-like objects.
Returns:
An dictionary of headers. If a header is repeated then the last value
for that header is given.
Raises:
ValueError: If the first line is a continuation line or the headers
cannot be parsed.
|
def parse_headers(obj):
if isinstance(obj, basestring):
obj = cStringIO.StringIO(obj)
hdrs = []
for line in obj:
hdr = parse_header(line)
if not hdr:
break
if isinstance(hdr, basestring):
if not hdrs:
raise ValueError("First header is a continuation")
hdrs[-1] = (hdrs[-1][0], hdrs[-1][1] + hdr)
continue
hdrs.append(hdr)
return iodict.IODict(hdrs)
| 1,016,884
|
Parse a dictionary of headers to a string.
Args:
hdrs: A dictionary of headers.
Returns:
The headers as a string that can be used in an NNTP POST.
|
def unparse_headers(hdrs):
return "".join([unparse_header(n, v) for n, v in hdrs.items()]) + "\r\n"
| 1,016,885
|
Initialize the build rule.
Args:
**kwargs: Assorted parameters; see subclass implementations for
details.
|
def __init__(self, **kwargs):
self.address = self.name = address.new(kwargs['name'])
# TODO: eliminate use of .name
self.subgraph = None
self.params = {}
log.debug('New target: %s', self.address)
try:
for param_name, param_type in self.required_params:
self.params[param_name] = kwargs.pop(param_name)
assert isinstance(self.params[param_name], param_type)
except AssertionError as err:
if isinstance(param_type, tuple) and len(param_type) > 1:
msg = 'one of: %s' % ', '.join(param_type.__name__)
else:
msg = str(param_type.__name__)
raise error.InvalidRule(
'While loading %s: Invalid type for %s. '
'Expected: %s. Actual: %s.' % (
self.address, param_name, msg,
repr(self.params[param_name])))
except KeyError as err:
log.error(err)
raise error.InvalidRule(
'While loading %s: Required parameter %s not given.' % repr(
self.address, param_name))
for (param_name, param_type, param_default) in self.optional_params:
if param_name not in kwargs:
self.params[param_name] = param_default
else:
self.params[param_name] = kwargs.pop(param_name)
if not isinstance(self.params[param_name], param_type):
msg = str(param_type.__name__)
if isinstance(param_type, tuple) and len(param_type) > 1:
msg = 'one of: %s' % ', '.join(param_type.__name__)
raise error.InvalidRule(
'While loading %s: Invalid type for %s. '
'Expected: %s. Actual: %s.' % (
self.address, param_name, msg,
repr(self.params[param_name])))
if kwargs: # There are leftover arguments.
raise error.InvalidRule(
'[%s]: Unknown argument(s): %s' % (
self.address, ', '.join(kwargs.keys())))
if self.graphcontext is not None:
self.graphcontext.add_node(self.address, target_obj=self)
# TODO: process deps here?
try:
self.validate_args()
except AssertionError as err:
raise error.InvalidRule('Error in %s: %s' % (self.address, err))
| 1,017,070
|
Construct request for the Retsly API
Args:
client (dict): Retsly client
method (string): method
url (string): url
query (list): query
|
def __init__(self, client, method, url, query=None):
self.client = client
self.method = method
self.url = url
self.query = query if query is not None else {}
| 1,017,076
|
Get the contents of a specific subtag from Clusterpoint Storage's response's content tag.
Args:
name -- A name string of the content's subtag to be returned.
Returns:
A dict representing the contents of the specified field or a list of dicts
if there are multiple fields with that tag name. Returns None if no field found.
|
def get_content_field(self, name):
fields = self._content.findall(name)
if not fields:
return None
elif len(fields) == 1:
return etree_to_dict(fields[0])[name]
else:
return [etree_to_dict(field)[name] for field in fields]
| 1,017,273
|
Run the tests that are loaded by each of the strings provided.
Arguments:
tests (iterable):
the collection of tests (specified as `str` s) to run
reporter (Reporter):
a `Reporter` to use for the run. If unprovided, the default
is to return a `virtue.reporters.Counter` (which produces no
output).
stop_after (int):
a number of non-successful tests to allow before stopping the run.
|
def run(tests=(), reporter=None, stop_after=None):
if reporter is None:
reporter = Counter()
if stop_after is not None:
reporter = _StopAfterWrapper(reporter=reporter, limit=stop_after)
locator = ObjectLocator()
cases = (
case
for test in tests
for loader in locator.locate_by_name(name=test)
for case in loader.load()
)
suite = unittest.TestSuite(cases)
getattr(reporter, "startTestRun", lambda: None)()
suite.run(reporter)
getattr(reporter, "stopTestRun", lambda: None)()
return reporter
| 1,017,386
|
Generates the path to a file in the mh cache.
The generated path does not imply the file's existence!
Args:
filename: Filename relative to buildroot
rule: A targets.SomeBuildRule object
metahash: hash object
|
def path_in_cache(self, filename, metahash):
cpath = self._genpath(filename, metahash)
if os.path.exists(cpath):
return cpath
else:
raise CacheMiss
| 1,017,424
|
Put a file in the cache.
Args:
filepath: Path to file on disk.
buildroot: Path to buildroot
buildrule: The rule that generated this file.
metahash: hash object
|
def putfile(self, filepath, buildroot, metahash):
def gen_obj_path(filename):
filehash = util.hash_file(filepath).hexdigest()
return filehash, os.path.join(self.obj_cachedir, filehash[0:2],
filehash[2:4], filehash)
filepath_relative = filepath.split(buildroot)[1][1:] # Strip leading /
# Path for the metahashed reference:
incachepath = self._genpath(filepath_relative, metahash)
filehash, obj_path = gen_obj_path(filepath)
if not os.path.exists(obj_path):
obj_dir = os.path.dirname(obj_path)
if not os.path.exists(obj_dir):
os.makedirs(obj_dir)
log.debug('Adding to obj cache: %s -> %s', filepath, obj_path)
os.link(filepath, obj_path)
if os.path.exists(incachepath):
existingfile_hash = util.hash_file(incachepath).hexdigest()
if filehash != existingfile_hash:
log.warn('File found in mh cache, but checksum differs. '
'Replacing with this new version. (File: %s)',
filepath)
log.warn('Possible reasons for this:')
log.warn(' 1. This build is not hermetic, and something '
'differs about the build environment compared to the '
'previous build.')
log.warn(' 2. This file has a timestamp or other build-time '
'related data encoded into it, which will always '
'cause the checksum to differ when built.')
log.warn(' 3. Everything is terrible and nothing works.')
os.unlink(incachepath)
if not os.path.exists(incachepath):
log.debug('Adding to mh cache: %s -> %s', filepath, incachepath)
if not os.path.exists(os.path.dirname(incachepath)):
os.makedirs(os.path.dirname(incachepath))
os.link(obj_path, incachepath)
| 1,017,426
|
Returns true if object is cached.
Args:
objpath: Filename relative to buildroot.
metahash: hash object
|
def in_cache(self, objpath, metahash):
try:
self.path_in_cache(objpath, metahash)
return True
except CacheMiss:
return False
| 1,017,427
|
Get object from cache, write it to dst_path.
Args:
objpath: filename relative to buildroot
(example: mini-boot/blahblah/somefile.bin)
metahash: metahash. See targets/base.py
dst_path: Absolute path where the file should be written.
Raises:
CacheMiss: if the item is not in the cache
|
def get_obj(self, objpath, metahash, dst_path):
incachepath = self.path_in_cache(objpath, metahash)
if not os.path.exists(incachepath):
raise CacheMiss('%s not in cache.' % incachepath)
else:
log.debug('Cache hit! %s~%s', objpath, metahash.hexdigest())
if not os.path.exists(os.path.dirname(dst_path)):
os.makedirs(os.path.dirname(dst_path))
os.link(incachepath, dst_path)
| 1,017,428
|
Connect given term strings or list(s) of term strings with an AND operator for querying.
Args:
An arbitrary number of either strings or lists of strings representing query terms.
Returns
A query string consisting of argument terms and'ed together.
|
def and_terms(*args):
args = [arg if not isinstance(arg, list) else ' '.join(arg) for arg in args]
return '({0})'.format(' '.join(args))
| 1,017,930
|
Connect given term strings or list(s) of term strings with a OR operator for querying.
Args:
An arbitrary number of either strings or lists of strings representing query terms.
Returns
A query string consisting of argument terms or'ed together.
|
def or_terms(*args):
args = [arg if not isinstance(arg, list) else ' '.join(arg) for arg in args]
return '{{{0}}}'.format(' '.join(args))
| 1,017,931
|
Runs iSort (https://pypi.python.org/pypi/isort)
Args:
amend: whether or not to commit results
stage: whether or not to stage changes
|
def sort(amend: bool = False, stage: bool = False):
_sort(amend, stage)
| 1,018,113
|
Pass file as a filename, open file object, or None to return the request bytes
Args:
url (str): URL of file to download
file (Union[str, io, None]): One of the following:
- Filename of output file
- File opened in binary write mode
- None: Return raw bytes instead
Returns:
Union[bytes, None]: Bytes of file if file is None
|
def download(url, file=None):
import urllib.request
import shutil
if isinstance(file, str):
file = open(file, 'wb')
try:
with urllib.request.urlopen(url) as response:
if file:
shutil.copyfileobj(response, file)
else:
return response.read()
finally:
if file:
file.close()
| 1,018,402
|
Download and extract the tar at the url to the given folder
Args:
tar_url (str): URL of tar file to download
folder (str): Location of parent directory to extract to. Doesn't have to exist
tar_filename (str): Location to download tar. Default is to a temp file
|
def download_extract_tar(tar_url, folder, tar_filename=''):
try:
makedirs(folder)
except OSError:
if not isdir(folder):
raise
data_file = tar_filename
if not data_file:
fd, data_file = mkstemp('.tar.gz')
download(tar_url, os.fdopen(fd, 'wb'))
else:
download(tar_url, data_file)
with tarfile.open(data_file) as tar:
tar.extractall(path=folder)
| 1,018,403
|
Install or update a tar package that has an md5
Args:
tar_url (str): URL of package to download
folder (str): Location to extract tar. Will be created if doesn't exist
md5_url (str): URL of md5 to use to check for updates
on_download (Callable): Function that gets called when downloading a new update
on_complete (Callable): Function that gets called when a new download is complete
Returns:
bool: Whether the package was updated
|
def install_package(tar_url, folder, md5_url='{tar_url}.md5',
on_download=lambda: None, on_complete=lambda: None):
data_file = join(folder, basename(tar_url))
md5_url = md5_url.format(tar_url=tar_url)
try:
remote_md5 = download(md5_url).decode('utf-8').split(' ')[0]
except (UnicodeDecodeError, URLError):
raise ValueError('Invalid MD5 url: ' + md5_url)
if remote_md5 != calc_md5(data_file):
on_download()
if isfile(data_file):
try:
with tarfile.open(data_file) as tar:
for i in reversed(list(tar)):
try:
os.remove(join(folder, i.path))
except OSError:
pass
except (OSError, EOFError):
pass
download_extract_tar(tar_url, folder, data_file)
on_complete()
if remote_md5 != calc_md5(data_file):
raise ValueError('MD5 url does not match tar: ' + md5_url)
return True
return False
| 1,018,404
|
Return a date created from the last 6 digits of a uuid.
Arguments:
uuid The unique identifier to parse.
century The first 2 digits to assume in the year. Default is '20'.
Examples:
>>> uuid_to_date('e8820616-1462-49b6-9784-e99a32120201')
datetime.date(2012, 2, 1)
>>> uuid_to_date('e8820616-1462-49b6-9784-e99a32120201', '18')
datetime.date(1812, 2, 1)
|
def uuid_to_date(uuid, century='20'):
day = int(uuid[-2:])
month = int(uuid[-4:-2])
year = int('%s%s' % (century, uuid[-6:-4]))
return datetime.date(year=year, month=month, day=day)
| 1,018,462
|
Adds a line to the .gitignore file of the repo
Args:
line: line to add
|
def add_to_gitignore(line: str):
if not line.endswith('\n'):
line = f'{line}\n'
if GIT_IGNORE.exists():
if line in GIT_IGNORE.read_text(encoding='utf8'):
return
previous_content = GIT_IGNORE.read_text(encoding='utf8')
else:
previous_content = ''
GIT_IGNORE.write_text(previous_content + line, encoding='utf8')
| 1,018,466
|
Initialize from a list of options with random weights.
The weights assigned to each object are uniformally random
integers between ``1`` and ``len(options)``
Args:
options (list): The list of options of any type this object
can return with the ``get()`` method.
Returns:
SoftOptions: A newly constructed instance
|
def with_random_weights(cls, options):
return cls([(value, random.randint(1, len(options)))
for value in options])
| 1,018,620
|
Convert an ``(r, g, b)`` color tuple to a hexadecimal string.
Alphabetical characters in the output will be capitalized.
Args:
color (tuple): An rgb color tuple of form: (int, int, int)
Returns: string
Example:
>>> SoftColor.rgb_to_hex((0, 0, 0))
'#000000'
>>> SoftColor.rgb_to_hex((255, 255, 255))
'#FFFFFF'
|
def rgb_to_hex(cls, color):
return '#{0:02x}{1:02x}{2:02x}'.format(
cls._bound_color_value(color[0]),
cls._bound_color_value(color[1]),
cls._bound_color_value(color[2])).upper()
| 1,018,629
|
Parse spss header file
Arguments:
path {str} -- path al fichero de cabecera.
leyend_position -- posicion del la leyenda en el header.
|
def parse_spss_headerfile(path, **kwargs):
headers_clean = {}
try:
with codecs.open(path, 'r', kwargs.get('encoding', 'latin-1')) as file_:
raw_file = file_.read()
raw_splited = exclude_empty_values(raw_file.split('.\r\n'))
# suposse that by default spss leyend is in position 0.
leyend = parse_spss_header_leyend(
raw_leyend=raw_splited.pop(kwargs.get('leyend_position', 0)),
leyend=headers_clean)
if not leyend:
raise Exception('Empty leyend')
# only want VARIABLE(S) LABEL(S) & VALUE(S) LABEL(S)
for label in [x for x in raw_splited if 'label' in x.lower()]:
values = parse_spss_header_labels(
raw_labels=label,
headers=leyend)
except Exception as ex:
logger.error('Fail to parse spss headerfile - {}'.format(ex), header_file=path)
headers_clean = {}
return headers_clean
| 1,018,655
|
Parse spss data file
Arguments:
path {str} -- path al fichero de cabecera.
**kwargs {[dict]} -- otros argumentos que puedan llegar
|
def parse_spss_datafile(path, **kwargs):
data_clean = []
with codecs.open(path, 'r', kwargs.get('encoding', 'latin-1')) as file_:
raw_file = file_.read()
data_clean = raw_file.split('\r\n')
return exclude_empty_values(data_clean)
| 1,018,656
|
Parse pdf to a list of strings using the pdfminer lib.
Args:
no_laparams=False,
all_texts=None,
detect_vertical=None, word_margin=None, char_margin=None,
line_margin=None, boxes_flow=None, codec='utf-8',
strip_control=False, maxpages=0, page_numbers=None, password="",
scale=1.0, rotation=0, layoutmode='normal', debug=False,
disable_caching=False,
|
def pdf_to_text(pdf_filepath='', **kwargs):
result = []
try:
if not os.path.exists(pdf_filepath):
raise ValueError("No valid pdf filepath introduced..")
# TODO: REVIEW THIS PARAMS
# update params if not defined
kwargs['outfp'] = kwargs.get('outfp', StringIO())
kwargs['laparams'] = kwargs.get('laparams', pdfminer.layout.LAParams())
kwargs['imagewriter'] = kwargs.get('imagewriter', None)
kwargs['output_type'] = kwargs.get('output_type', "text")
kwargs['codec'] = kwargs.get('codec', 'utf-8')
kwargs['disable_caching'] = kwargs.get('disable_caching', False)
with open(pdf_filepath, "rb") as f_pdf:
pdfminer.high_level.extract_text_to_fp(f_pdf, **kwargs)
result = kwargs.get('outfp').getvalue()
except Exception:
logger.error('fail pdf to text parsing')
return result
| 1,018,659
|
Function to set a default namespace/include.
It returns a decorator with the namespace/include argument already set.
Arguments:
- include: A custom URL list, previously
set on the module's urls.py;
- namespace: the URL's namespace.
|
def uconf(**kwargs):
if len(kwargs) != 1:
# this function must have exactly
# one specific argument (namespace or include)
raise TypeError(
'uconf() takes exactly 1 argument. ({} given)'.format(len(kwargs))
)
# gets the argument name
arg_name = list(kwargs.keys()).pop(0)
# checks if it has a valid name (it must be 'namespace' or 'include')
if arg_name not in ['include', 'namespace']:
# if it's not a valid name, raise a TypeError
raise TypeError(
'Invalid argument: {}'.format(arg_name)
)
# creates the decorator with namespace/include already set.
return partial(umap, **kwargs)
| 1,018,744
|
Runs Pyup's Safety tool (https://pyup.io/safety/)
Args:
amend: whether or not to commit results
stage: whether or not to stage changes
|
def pep8(amend: bool = False, stage: bool = False):
_pep8(amend, stage)
| 1,018,749
|
It is just stub for simplify setting timeout.
Args:
timeout (int): timeout in milliseconds, after which callback will be called
callback (callable): usually, just a function that will be called each time after timeout
*callback_args (any type): arguments that will be passed to callback function
|
def set_refresh(self, timeout, callback, *callback_args):
GObject.timeout_add(timeout, callback, *callback_args)
| 1,018,919
|
Add mouse right click menu item.
Args:
command (callable): function that will be called after left mouse click on title
title (str): label that will be shown in menu
|
def add_menu_item(self, command, title):
m_item = Gtk.MenuItem()
m_item.set_label(title)
m_item.connect('activate', command)
self.menu.append(m_item)
self.menu.show_all()
| 1,018,920
|
Return packages mentioned in the given file.
Args:
requirements_file (str): path to the requirements file to be parsed.
Returns:
(list): 3rd-party package dependencies contained in the file.
|
def requirements(requirements_file):
return [
str(pkg.req) for pkg in parse_requirements(
requirements_file, session=pip_download.PipSession()) if pkg.req is not None]
| 1,019,015
|
Read from fileobj stream, return hash of its contents.
Args:
fileobj: File-like object with read()
hasher: Hash object such as hashlib.sha1(). Defaults to sha1.
blocksize: Read from fileobj this many bytes at a time.
|
def hash_stream(fileobj, hasher=None, blocksize=65536):
hasher = hasher or hashlib.sha1()
buf = fileobj.read(blocksize)
while buf:
hasher.update(buf)
buf = fileobj.read(blocksize)
return hasher
| 1,019,110
|
Returns list of paths matching one or more wildcard patterns.
Args:
include_dirs: Include directories in the output
|
def glob(*args):
if len(args) is 1 and isinstance(args[0], list):
args = args[0]
matches = []
for pattern in args:
for item in glob2.glob(pattern):
if not os.path.isdir(item):
matches.append(item)
return matches
| 1,019,112
|
Build an audio chunk and progress the oscillator states.
Args:
oscillators (list): A list of oscillator.Oscillator objects
to build chunks from
Returns:
str: a string of audio sample bytes ready to be written to a wave file
|
def build_chunk(oscillators):
step_random_processes(oscillators)
subchunks = []
for osc in oscillators:
osc.amplitude.step_amp()
osc_chunk = osc.get_samples(config.CHUNK_SIZE)
if osc_chunk is not None:
subchunks.append(osc_chunk)
if len(subchunks):
new_chunk = sum(subchunks)
else:
new_chunk = numpy.zeros(config.CHUNK_SIZE)
# If we exceed the maximum amplitude, handle it gracefully
chunk_amplitude = amplitude.find_amplitude(new_chunk)
if chunk_amplitude > config.MAX_AMPLITUDE:
# Normalize the amplitude chunk to mitigate immediate clipping
new_chunk = amplitude.normalize_amplitude(new_chunk,
config.MAX_AMPLITUDE)
# Pick some of the offending oscillators (and some random others)
# and lower their drift targets
avg_amp = (sum(osc.amplitude.value for osc in oscillators) /
len(oscillators))
for osc in oscillators:
if (osc.amplitude.value > avg_amp and rand.prob_bool(0.1) or
rand.prob_bool(0.01)):
osc.amplitude.drift_target = rand.weighted_rand(
[(-5, 1), (0, 10)])
osc.amplitude.change_rate = rand.weighted_rand(
osc.amplitude.change_rate_weights)
return new_chunk.astype(config.SAMPLE_DATA_TYPE).tostring()
| 1,019,168
|
Read default and user config files.
Args:
all_linters (dict): Names and classes of all available linters.
|
def __init__(self, all_linters):
self._all_linters = all_linters
default_cfg = self._read_default_file()
user_cfg = self._read_user_files()
self._config = self._merge(default_cfg, user_cfg)
self.user_linters = [] # chosen by the user
self.linters = {} # chosen by the user or all of them
self._set_linters()
| 1,019,225
|
r"""Converts the given class into a static one, by changing all the methods of it into static methods.
Args:
cls (class): The class to be converted.
|
def static(cls):
r
for attr in dir(cls):
im_func = getattr(getattr(cls, attr), 'im_func', None)
if im_func:
setattr(cls, attr, staticmethod(im_func))
return cls
| 1,019,600
|
Generates a two digit string based on a provided string
Args:
rule_name (str): A configured rule name 'pytest_mark3'.
Returns:
str: A two digit code based on the provided string '03'
|
def _generate_mark_code(rule_name):
code = ''.join([i for i in str(rule_name) if i.isdigit()])
code = code.zfill(2)
return code
| 1,019,601
|
Validate filename against a pattern if the filename passes the filter.
Args:
filename (str): The name of the file being parsed by flake8.
rule_name (str): The name of the rule.
rule_conf (dict): The dictionary containing the properties of the rule
class_type (class): The class that this rule was called from
Yields:
tuple: (int, int, str, type) the tuple used by flake8 to construct a violation
|
def rule_n5xx(filename, rule_name, rule_conf, class_type):
line_num = 0
code = _generate_mark_code(rule_name)
message = "N5{} filename failed regex validation '{}'".format(code, rule_conf['filename_regex'])
sanitized_filename = splitext(basename(filename))[0] # Strip path and extension
if re.match(rule_conf['filter_regex'], sanitized_filename):
if not re.match(rule_conf['filename_regex'], sanitized_filename):
yield (line_num, 0, message, class_type)
| 1,019,602
|
r"""Gives the details on the args of the given func.
Args:
func (function): The function to get details on.
|
def _getFuncArgs(func):
r
code = func.func_code
Defaults = func.func_defaults
nargs = code.co_argcount
ArgNames = code.co_varnames[:nargs]
Args = OrderedDict()
argCount = len(ArgNames)
defCount = len(Defaults) if Defaults else 0
diff = argCount - defCount
for i in range(0, diff):
Args[ArgNames[i]] = {}
for i in range(diff, argCount):
Args[ArgNames[i]] = {'default': Defaults[i - diff]}
return Args
| 1,019,940
|
Remove a node from ``self.node_list`` and links pointing to it.
If ``node`` is not in the graph, do nothing.
Args:
node (Node): The node to be removed
Returns: None
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> graph = Graph([node_1])
>>> graph.remove_node(node_1)
>>> len(graph.node_list)
0
|
def remove_node(self, node):
if node not in self.node_list:
return
self.node_list.remove(node)
# Remove links pointing to the deleted node
for n in self.node_list:
n.link_list = [link for link in n.link_list if
link.target != node]
| 1,020,128
|
Delete all nodes in ``self.node_list`` with the value ``value``.
Args:
value (Any): The value to find and delete owners of.
Returns: None
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> graph = Graph([node_1])
>>> graph.remove_node_by_value('One')
>>> len(graph.node_list)
0
|
def remove_node_by_value(self, value):
self.node_list = [node for node in self.node_list
if node.value != value]
# Remove links pointing to the deleted node
for node in self.node_list:
node.link_list = [link for link in node.link_list if
link.target.value != value]
| 1,020,129
|
Whether any node in ``self.node_list`` has the value ``value``.
Args:
value (Any): The value to find in ``self.node_list``
Returns: bool
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> graph = Graph([node_1])
>>> graph.has_node_with_value('One')
True
>>> graph.has_node_with_value('Foo')
False
|
def has_node_with_value(self, value):
for node in self.node_list:
if node.value == value:
return True
else:
return False
| 1,020,130
|
Initialize the only Config object and assign it to other classes.
Args:
config (Config): Config object.
all_linters (dict): Names and classes of all available linters.
|
def __init__(self, config=None, all_linters=None):
self._classes = all_linters or LINTERS
self._config = config or Config(self._classes)
LinterRunner.config = self._config
| 1,020,503
|
Run linters in parallel and sort all results.
Args:
targets (list): List of files and folders to lint.
|
def lint(self, targets):
LinterRunner.targets = targets
linters = self._config.get_linter_classes()
with Pool() as pool:
out_err_none = pool.map(LinterRunner.run, linters)
out_err = [item for item in out_err_none if item is not None]
stdout, stderr = zip(*out_err)
return sorted(chain.from_iterable(stdout)), chain.from_iterable(stderr)
| 1,020,504
|
Read arguments, run and print results.
Args:
args (dict): Arguments parsed by docopt.
|
def run_from_cli(self, args):
if args['--dump-config']:
self._config.print_config()
else:
stdout, stderr = self.lint(args['<path>'])
self.print_results(stdout, stderr)
| 1,020,505
|
Takes a list of files which are assumed to be jinja2 templates and tries to
parse the contents of the files
Args:
file_contents (str): File contents of a jinja file
Raises:
Exception: An exception is raised if the contents of the file cannot be
parsed.
|
def parse(file_contents, file_name):
env = Environment()
result = ""
try:
env.parse(file_contents)
except Exception:
_, exc_value, _ = sys.exc_info()
result += "ERROR: Jinja2 Template File: {0}".format(file_name)
result += repr(exc_value) + '\n'
return result
| 1,020,603
|
r"""Helps to interactively get user input.
Args:
desc (str): The description for input.
type (type / CustomType): The type of the input (defaults to None).
|
def get(type=None, **ArgConfig):
r
ArgConfig.update(type=type)
return gatherInput(**reconfigArg(ArgConfig))
| 1,020,657
|
Get version info from executable
Args:
path: path to the executable
Returns: VersionInfo
|
def get_product_version(path: typing.Union[str, Path]) -> VersionInfo:
path = Path(path).absolute()
pe_info = pefile.PE(str(path))
try:
for file_info in pe_info.FileInfo: # pragma: no branch
if isinstance(file_info, list):
result = _parse_file_info(file_info)
if result:
return result
else:
result = _parse_file_info(pe_info.FileInfo)
if result:
return result
raise RuntimeError(f'unable to obtain version from {path}')
except (KeyError, AttributeError) as exc:
traceback.print_exc()
raise RuntimeError(f'unable to obtain version from {path}') from exc
| 1,020,680
|
This is a blocking function call that will wait for the queuing
thread to complete.
parameters:
waiting_func - this function will be called every one second while
waiting for the queuing thread to quit. This allows
for logging timers, status indicators, etc.
|
def wait_for_completion(self, waiting_func=None):
self.logger.debug("waiting to join queuingThread")
self._responsive_join(self.queuing_thread, waiting_func)
| 1,020,722
|
Initialize a new thread.
parameters:
config - the configuration from configman
task_queue - a reference to the queue from which to fetch jobs
|
def __init__(self, config, task_queue):
super(TaskThread, self).__init__()
self.task_queue = task_queue
self.config = config
| 1,020,729
|
Find a value in the cumulative distribution function of a normal curve.
See https://en.wikipedia.org/wiki/Normal_distribution
Args:
x (float): Value to feed into the normal function
mean (float): Mean of the normal function
variance (float): Variance of the normal function
Returns: float
Example:
>>> round(_normal_function(0, 0, 5), 4)
0.1784
|
def _normal_function(x, mean, variance):
e_power = -1 * (((x - mean) ** 2) / (2 * variance))
return (1 / math.sqrt(2 * variance * math.pi)) * (math.e ** e_power)
| 1,020,760
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.