code stringlengths 52 7.75k | docs stringlengths 1 5.85k |
|---|---|
def inverse(self, N):
if N == 0:
return 0
lm, hm = 1, 0
low, high = N % self.P, self.P
while low > 1:
r = high//low
nm, new = hm - lm * r, high - low * r
lm, low, hm, high = nm, new, lm, low
return lm % self.P | Returns the modular inverse of an integer with respect to the field
characteristic, P.
Use the Extended Euclidean Algorithm:
https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm |
def is_on_curve(self, point):
X, Y = point.X, point.Y
return (
pow(Y, 2, self.P) - pow(X, 3, self.P) - self.a * X - self.b
) % self.P == 0 | Checks whether a point is on the curve.
Args:
point (AffinePoint): Point to be checked.
Returns:
bool: True if point is on the curve, False otherwise. |
def generate_private_key(self):
random_string = base64.b64encode(os.urandom(4096)).decode('utf-8')
binary_data = bytes(random_string, 'utf-8')
hash_object = hashlib.sha256(binary_data)
message_digest_bin = hash_object.digest()
message_digest_hex = binascii.hexlify(messag... | Generates a private key based on the password.
SHA-256 is a member of the SHA-2 cryptographic hash functions designed by
the NSA. SHA stands for Secure Hash Algorithm. The password is converted
to bytes and hashed with SHA-256. The binary output is converted to a hex
representation.
... |
def generate_public_key(self):
private_key = int(self.private_key, 16)
if private_key >= self.N:
raise Exception('Invalid private key.')
G = JacobianPoint(self.Gx, self.Gy, 1)
public_key = G * private_key
x_hex = '{0:0{1}x}'.format(public_key.X, 64)
... | Generates a public key from the hex-encoded private key using elliptic
curve cryptography. The private key is multiplied by a predetermined point
on the elliptic curve called the generator point, G, resulting in the
corresponding private key. The generator point is always the same for all
... |
def generate_address(self):
binary_pubkey = binascii.unhexlify(self.public_key)
binary_digest_sha256 = hashlib.sha256(binary_pubkey).digest()
binary_digest_ripemd160 = hashlib.new('ripemd160', binary_digest_sha256).digest()
binary_version_byte = bytes([0])
binary_with_v... | Creates a Bitcoin address from the public key.
Details of the steps for creating the address are outlined in this link:
https://en.bitcoin.it/wiki/Technical_background_of_version_1_Bitcoin_addresses
The last step is Base58Check encoding, which is similar to Base64 encoding but
... |
def double(self):
X1, Y1, Z1 = self.X, self.Y, self.Z
if Y1 == 0:
return POINT_AT_INFINITY
S = (4 * X1 * Y1 ** 2) % self.P
M = (3 * X1 ** 2 + self.a * Z1 ** 4) % self.P
X3 = (M ** 2 - 2 * S) % self.P
Y3 = (M * (S - X3) - 8 * Y1 ** 4) % self.P
... | Doubles this point.
Returns:
JacobianPoint: The point corresponding to `2 * self`. |
def to_affine(self):
X, Y, Z = self.x, self.y, self.inverse(self.z)
return ((X * Z ** 2) % P, (Y * Z ** 3) % P) | Converts this point to an affine representation.
Returns:
AffinePoint: The affine reprsentation. |
def double(self):
X1, Y1, a, P = self.X, self.Y, self.a, self.P
if self.infinity:
return self
S = ((3 * X1 ** 2 + a) * self.inverse(2 * Y1)) % P
X2 = (S ** 2 - (2 * X1)) % P
Y2 = (S * (X1 - X2) - Y1) % P
return AffinePoint(X2, Y2) | Doubles this point.
Returns:
AffinePoint: The point corresponding to `2 * self`. |
def slope(self, other):
X1, Y1, X2, Y2 = self.X, self.Y, other.X, other.Y
Y3 = Y1 - Y2
X3 = X1 - X2
return (Y3 * self.inverse(X3)) % self.P | Determines the slope between this point and another point.
Args:
other (AffinePoint): The second point.
Returns:
int: Slope between self and other. |
def to_jacobian(self):
if not self:
return JacobianPoint(X=0, Y=0, Z=0)
return JacobianPoint(X=self.X, Y=self.Y, Z=1) | Converts this point to a Jacobian representation.
Returns:
JacobianPoint: The Jacobian representation. |
def import_model(self, name, path="floyd.db.models"):
if name in self._model_cache:
return self._model_cache[name]
try:
model = getattr(__import__(path, None, None, [name]), name)
self._model_cache[name] = model
except ImportError:
return False
return model | imports a model of name from path, returning from local model
cache if it has been previously loaded otherwise importing |
def parse_md(self):
post_content = _MARKDOWN.convert(self.raw_src)
if hasattr(_MARKDOWN, 'Meta'):
# 'Meta' in _MARKDOWN and _MARKDOWN.Meta:
for key in _MARKDOWN.Meta:
print "\t meta: %s: %s (%s)" % (key, _MARKDOWN.Meta[key][0], type(_MARKDOWN.Meta[key][0]))
if key == 'pubdate':... | Takes a post path and returns a dictionary of variables |
def filter(self, **kwargs):
# @TODO refactor with models as dicts
f_field = kwargs.keys()[0]
f_value = kwargs[f_field]
_newset = []
for m in self._dataset:
if hasattr(m, f_field):
if getattr(m, f_field) == f_value:
_newset.append(m)
self._dataset = _newset
r... | filter results of dataset eg.
Query('Posts').filter(post_type='post') |
def sort_by(self, sb):
self._dataset = self._dataset.sort(key=lambda x: x.pubdate, reverse=True)
return self | Sort results |
def execute_train_task_with_dependencies(self, task_cls, **kwargs):
log.info("Task {0}".format(get_task_name(task_cls)))
#Instantiate the task
task_inst = task_cls()
#Grab arguments from the task instance and set them
for arg in task_inst.args:
if arg not in ... | Run the training, as well as any dependencies of the training
task_cls - class of a task |
def execute_predict_task(self, task_inst, predict_data, **kwargs):
result = task_inst.predict(predict_data, **task_inst.args)
return result | Do a prediction
task_inst - instance of a task |
def train(self, **kwargs):
log.info("Starting to train...")
if not self.setup_run:
self.setup()
self.trained_tasks = []
for task in self.tasks:
data = self.reformatted_input[task.data_format]['data']
target = self.reformatted_input[task.data_f... | Do the workflow training |
def predict(self, **kwargs):
reformatted_predict = self.reformat_predict_data()
results = {}
for task_inst in self.trained_tasks:
predict = reformatted_predict[task_inst.data_format]['predict']
kwargs['predict']=predict
results.update({get_task_name(t... | Do the workflow prediction (done after training, with new data) |
def read_input(self, input_cls, filename, **kwargs):
input_inst = input_cls()
input_inst.read_input(filename)
return input_inst.get_data() | Read in input and do some minimal preformatting
input_cls - the class to use to read the input
filename - input filename |
def reformat_file(self, input_file, input_format, output_format):
#Return none if input_file or input_format do not exist
if input_file is None or input_format is None:
return None
#Find the needed input class and read the input stream
try:
input_cls = se... | Reformat input data files to a format the tasks can use |
def reformat_input(self, **kwargs):
reformatted_input = {}
needed_formats = []
for task_cls in self.tasks:
needed_formats.append(task_cls.data_format)
self.needed_formats = list(set(needed_formats))
for output_format in self.needed_formats:
refor... | Reformat input data |
def _create_modulename(cdef_sources, source, sys_version):
key = '\x00'.join([sys_version[:3], source, cdef_sources])
key = key.encode('utf-8')
k1 = hex(binascii.crc32(key[0::2]) & 0xffffffff)
k1 = k1.lstrip('0x').rstrip('L')
k2 = hex(binascii.crc32(key[1::2]) & 0xffffffff)
k2 = k2.lstrip('... | This is the same as CFFI's create modulename except we don't include the
CFFI version. |
def server_identity_is_verified(self):
# Encrypt a uuid token for the server
server_verify_token = self.gpg.encrypt(self._nonce0,
self.server_fingerprint, always_trust=True)
if not server_verify_token.ok:
raise GPGAuthStage0Exce... | GPGAuth stage0 |
def user_auth_token(self):
# stage0 is a prequisite
if not self.server_identity_is_verified:
return False
server_login_response = post_log_in(
self,
keyid=self.user_fingerprint
)
if not check_server_login_stage1_response(server_logi... | GPGAuth Stage1 |
def is_authenticated_with_token(self):
""" Send back the token to the server to get auth cookie """
server_login_response = post_log_in(
self,
keyid=self.user_fingerprint,
user_token_result=self.user_auth_token
)
if not check_server_login_st... | GPGAuth Stage 2 |
def publish(self,message,message_type,topic=''):
if message_type == MULTIPART:
raise Exception("Unsupported request type")
super(Publisher,self).send(message,message_type,topic) | Publish the message on the PUB socket with the given topic name.
Args:
- message: the message to publish
- message_type: the type of message being sent
- topic: the topic on which to send the message. Defaults to ''. |
def load(self, cls, run_id):
id_code = self.generate_load_identifier(cls, run_id)
inst = self.store.load(id_code)
return inst | Load a workflow
cls - workflow class (to get __name__ from)
run_id - id given to the specific run |
def save(self, obj, run_id):
id_code = self.generate_save_identifier(obj, run_id)
self.store.save(obj, id_code) | Save a workflow
obj - instance of a workflow to save
run_id - unique id to give the run |
def setup_tasks(self, tasks):
task_classes = []
for task in tasks:
category, namespace, name = task.split(".")
try:
cls = find_in_registry(category=category, namespace=namespace, name=name)[0]
except TypeError:
log.error("Could... | Find task classes from category.namespace.name strings
tasks - list of strings |
def initialize_workflow(self, workflow):
self.workflow = workflow()
self.workflow.tasks = self.tasks
self.workflow.input_file = self.input_file
self.workflow.input_format = self.input_format
self.workflow.target_file = self.target_file
self.workflow.target_forma... | Create a workflow
workflow - a workflow class |
def reformat_filepath(self, config_file, filename):
if not filename.startswith("/"):
filename = self.config_file_format.format(config_file, filename)
return filename | Convert relative paths in config file to absolute |
def item_lister(command, _connection, page_size, page_number, sort_by,
sort_order, item_class, result_set, **kwargs):
# pylint: disable=R0913
page = page_number
while True:
item_collection = _connection.get_list(command,
page_size=page_size,
... | A generator function for listing Video and Playlist objects. |
def get_manifest(self, asset_xml):
# pylint: disable=E1101
manifest = '<?xml version="1.0" encoding="utf-8"?>'
manifest += '<publisher-upload-manifest publisher-id="%s" ' % \
self.publisher_id
manifest += 'preparer="%s" ' % self.preparer
if self.report_succes... | Construct and return the xml manifest to deliver along with video file. |
def _send_file(self, filename):
# pylint: disable=E1101
ftp = ftplib.FTP(host=self.host)
ftp.login(user=self.user, passwd=self.password)
ftp.set_pasv(True)
ftp.storbinary("STOR %s" % os.path.basename(filename),
file(filename, 'rb')) | Sends a file via FTP. |
def _post(self, data, file_to_upload=None):
# pylint: disable=E1101
params = {"JSONRPC": simplejson.dumps(data)}
req = None
if file_to_upload:
req = http_core.HttpRequest(self.write_url)
req.method = 'POST'
req.add_body_part("JSONRPC", simplej... | Make the POST request. |
def _get_response(self, **kwargs):
# pylint: disable=E1101
url = self.read_url + "?output=JSON&token=%s" % self.read_token
for key in kwargs:
if key and kwargs[key]:
val = kwargs[key]
if isinstance(val, (list, tuple)):
val ... | Make the GET request. |
def get_list(self, command, item_class, page_size, page_number, sort_by,
sort_order, **kwargs):
# pylint: disable=R0913,W0221
data = self._get_response(command=command,
page_size=page_size,
page_number=page_number,
... | Not intended to be called directly, but rather through an by the
ItemResultSet object iterator. |
def initialize_renderer(extensions=None):
if extensions is None:
extensions = []
if isinstance(extensions, str):
extensions = [extension.strip() for extension in extensions.split(',')]
for extension in getattr(settings, 'MARKYMARK_EXTENSIONS', DEFAULT_MARKYMARK_EXTENSIONS):
ex... | Initializes the renderer by setting up the extensions (taking a comma separated
string or iterable of extensions). These extensions are added alongside with the
configured always-on extensions.
Returns a markdown renderer instance. |
def setup_formats(self):
methods = self.get_methods()
for m in methods:
#Methods named "from_X" will be assumed to convert from format X to the common format
if m.startswith("from_"):
self.input_formats.append(re.sub("from_" , "",m))
#Methods ... | Inspects its methods to see what it can convert from and to |
def read_input(self, input_data, data_format):
if data_format not in self.input_formats:
raise Exception("Input format {0} not available with this class. Available formats are {1}.".format(data_format, self.input_formats))
data_converter = getattr(self, "from_" + data_format)
... | Reads the input data and converts to common format
input_data - the output from one of the input classes (ie CSVInput)
data_format - the format of the data. See utils.input.dataformats |
def get_data(self, data_format):
if data_format not in self.output_formats:
raise Exception("Output format {0} not available with this class. Available formats are {1}.".format(data_format, self.output_formats))
data_converter = getattr(self, "to_" + data_format)
return data... | Reads the common format and converts to output data
data_format - the format of the output data. See utils.input.dataformats |
def from_csv(self, input_data):
reformatted_data = []
for (i,row) in enumerate(input_data):
if i==0:
headers = row
else:
data_row = {}
for (j,h) in enumerate(headers):
data_row.update({h : row[j]})
... | Reads csv format input data and converts to json. |
def to_dataframe(self):
keys = self.data[0].keys()
column_list =[]
for k in keys:
key_list = []
for i in xrange(0,len(self.data)):
key_list.append(self.data[i][k])
column_list.append(key_list)
df = DataFrame(np.asarray(column_l... | Reads the common format self.data and writes out to a dataframe. |
def check_extensions(extensions: Set[str], allow_multifile: bool = False):
check_var(extensions, var_types=set, var_name='extensions')
# -- check them one by one
for ext in extensions:
check_extension(ext, allow_multifile=allow_multifile) | Utility method to check that all extensions in the provided set are valid
:param extensions:
:param allow_multifile:
:return: |
def check_extension(extension: str, allow_multifile: bool = False):
check_var(extension, var_types=str, var_name='extension')
# Extension should either be 'multifile' or start with EXT_SEPARATOR and contain only one EXT_SEPARATOR
if (extension.startswith(EXT_SEPARATOR) and extension.count(EXT_SEPARATO... | Utility method to check that the provided extension is valid. Extension should either be MULTIFILE_EXT
(='multifile') or start with EXT_SEPARATOR (='.') and contain only one occurence of EXT_SEPARATOR
:param extension:
:param allow_multifile:
:return: |
def get_parsing_plan_log_str(obj_on_fs_to_parse, desired_type, log_only_last: bool, parser):
loc = obj_on_fs_to_parse.get_pretty_location(blank_parent_part=(log_only_last
and not GLOBAL_CONFIG.full_paths_in_logs),
... | Utility method used by several classes to log a message indicating that a given file object is planned to be parsed
to the given object type with the given parser. It is in particular used in str(ParsingPlan), but not only.
:param obj_on_fs_to_parse:
:param desired_type:
:param log_only_last: a flag to... |
def are_worth_chaining(parser, to_type: Type[S], converter: Converter[S, T]) -> bool:
if not parser.can_chain:
# The base parser prevents chaining
return False
elif not is_any_type(to_type) and is_any_type(converter.to_type):
# we gain the capability to gene... | Utility method to check if it makes sense to chain this parser with the given destination type, and the given
converter to create a parsing chain. Returns True if it brings value to chain them.
To bring value,
* the converter's output should not be a parent class of the parser's output. Otherwi... |
def create_for_caught_error(parser: _BaseParserDeclarationForRegistries, desired_type: Type[T],
obj: PersistedObject, caught: Exception, options: Dict[str, Dict[str, Any]]):
try:
typ = get_pretty_type_str(desired_type)
except:
typ = str(de... | Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param parser:
:param desired_type:
:param obj:
:param caught:
:param options:
:return: |
def create_for_wrong_result_type(parser: _BaseParserDeclarationForRegistries, desired_type: Type[T],
obj: PersistedObject, result: T, options: Dict[str, Dict[str, Any]]):
msg = "Error while parsing {obj} as a {typ} with parser {p} using options=({opts}) - parser ret... | Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param parser:
:param desired_type:
:param obj:
:param result:
:param options:
:return: |
def execute(self, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T:
try:
res = self._execute(logger, options)
except Exception as e:
raise ParsingException.create_for_caught_error(self.parser, self.obj_type, self.obj_on_fs_to_parse, e,
... | Called to parse the object as described in this parsing plan, using the provided arguments for the parser.
* Exceptions are caught and wrapped into ParsingException
* If result does not match expected type, an error is thrown
:param logger: the logger to use during parsing (optional: None is su... |
def _execute(self, logger: Logger, options: Dict[str, Dict[str, Any]]) -> T:
pass | Implementing classes should perform the parsing here, possibly using custom methods of self.parser.
:param logger:
:param options:
:return: |
def _get_applicable_options(self, options: Dict[str, Dict[str, Any]]):
return get_options_for_id(options, self.get_id_for_options()) | Returns the options that are applicable to this particular parser, from the full map of options.
It first uses 'get_id_for_options()' to know the id of this parser, and then simply extracts the contents of
the options corresponding to this id, or returns an empty dict().
:param options: a dicti... |
def create_parsing_plan(self, desired_type: Type[T], filesystem_object: PersistedObject, logger: Logger,
options: Dict[str, Dict[str, Any]]) -> ParsingPlan[T]:
pass | Creates a parsing plan to parse the given filesystem object into the given desired_type.
Implementing classes may wish to support additional parameters.
:param desired_type: the type of object that should be created as the output of parsing plan execution.
:param filesystem_object: the persiste... |
def add(self, f_ipaddr, f_macaddr, f_hostname, f_netbios_name, f_engineer, f_asset_group, f_confirmed):
return self.send.host_add(f_ipaddr, f_macaddr, f_hostname, f_netbios_name, f_engineer,
f_asset_group, f_confirmed) | Add a t_hosts record
:param f_ipaddr: IP address
:param f_macaddr: MAC Address
:param f_hostname: Hostname
:param f_netbios_name: NetBIOS Name
:param f_engineer: Engineer username
:param f_asset_group: Asset group
:param f_confirmed: Confirmed boolean
:re... |
def parse_now_field(s):
if not s.startswith('UTC:'):
return None # Invalid string
s = s[4:]
# isoformat can return strings both with and without microseconds - we
# account for both
try:
dt = datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f')
except ValueError:
... | Return a datetime instance from a string generated by now_field.
IMPORTANT: the datetime will be in UTC |
def get_ftp(ftp_conf, debug=0):
server = ftp_conf.get('server')
user = ftp_conf.get('user')
password = ftp_conf.get('password')
start_path = ftp_conf.get('start_path')
slog.info("Connecting FTP server %s ......", server)
ftpStr = 'ftp://%s/'%server
if start_path:
ftpStr = ftpStr... | 得到一个 已经打开的FTP 实例,和一个 ftp 路径。
:param dict ftp_conf: ftp配置文件,格式如下:
>>> {
>>> 'server':'127.0.0.1',
>>> 'start_path':None,
>>> 'user':'admin',
>>> 'password':'123456',
>>> }
:returns: ftp, ftpserverstr
:rtype: :class:`ftplib.FTP` , str |
def upload_file(file_path, remote_path, ftp_conf, remove_file=False):
check_ftp_conf(ftp_conf)
ftp, ftpStr = get_ftp(ftp_conf)
lf = open(file_path, 'rb')
slog.info('Uploading "%s" to "%s/%s" ......'%(file_path, ftpStr, remote_path))
ftp.storbinary("STOR %s"%remote_path, lf)
filelist = ftp.... | 上传第一个指定的文件到 FTP 服务器。
:param str file_path: 待上传文件的绝对路径。
:param str remote_path: 文件在 FTP 服务器上的相对路径(相对于 FTP 服务器的初始路径)。
:param dict ftp_conf: ftp配置文件,详见 :func:`get_ftp` 。
:param bool remove_file: 上传成功后是否删除本地文件。
:returns: FTP 服务器上的文件列表
:rtype: list |
def retrieve_data(self):
#==== Retrieve data ====#
df = self.manager.get_historic_data(self.start.date(), self.end.date())
df.replace(0, np.nan, inplace=True)
return df | Retrives data as a DataFrame. |
def get_min_risk(self, weights, cov_matrix):
def func(weights):
"""The objective function that minimizes variance."""
return np.matmul(np.matmul(weights.transpose(), cov_matrix), weights)
def func_deriv(weights):
"""The derivative of the objective function.... | Minimizes the variance of a portfolio. |
def get_max_return(self, weights, returns):
def func(weights):
"""The objective function that maximizes returns."""
return np.dot(weights, returns.values) * -1
constraints = ({'type': 'eq', 'fun': lambda weights: (weights.sum() - 1)})
solution = self.solve_mini... | Maximizes the returns of a portfolio. |
def efficient_frontier(
self,
returns,
cov_matrix,
min_return,
max_return,
count
):
columns = [coin for coin in self.SUPPORTED_COINS]
# columns.append('Return')
# columns.append('Risk')
values = pd.DataFrame(columns=columns)
... | Returns a DataFrame of efficient portfolio allocations for `count` risk
indices. |
def solve_minimize(
self,
func,
weights,
constraints,
lower_bound=0.0,
upper_bound=1.0,
func_deriv=False
):
bounds = ((lower_bound, upper_bound), ) * len(self.SUPPORTED_COINS)
return minimize(
fun=func, x0=weights, jac=fun... | Returns the solution to a minimization problem. |
def allocate(self):
df = self.manager.get_historic_data()[self.SUPPORTED_COINS]
#==== Calculate the daily changes ====#
change_columns = []
for column in df:
if column in self.SUPPORTED_COINS:
change_column = '{}_change'.format(column)
... | Returns an efficient portfolio allocation for the given risk index. |
def handle_default_options(options):
if options.settings:
#Set the percept_settings_module (picked up by settings in conf.base)
os.environ['PERCEPT_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
#Append the pythonpath and the directory one up from the pythonpath to sys.... | Pass in a Values instance from OptionParser. Handle settings and pythonpath
options - Values from OptionParser |
def create_parser(self, prog_name, subcommand):
parser = OptionParser(prog=prog_name,
usage=self.usage(subcommand),
option_list=self.option_list)
return parser | Create an OptionParser
prog_name - Name of a command
subcommand - Name of a subcommand |
def hook(name=None, *args, **kwargs):
def decorator(f):
if not hasattr(f, "hooks"):
f.hooks = []
f.hooks.append((name or f.__name__, args, kwargs))
return f
return decorator | Decorator to register the function as a hook |
def register_hooks(func, hooks, obj):
for name, args, kwargs in hooks:
hook = getattr(obj, name)
force_call = kwargs.pop("_force_call", False)
if force_call or len(args) > 0 or len(kwargs) > 0:
hook = hook(*args, **kwargs)
hook(func) | Register func on obj via hooks.
Hooks should be a tuple of (name, args, kwargs) where
name is a method name of obj. If args or kwargs are not empty,
the method will be called first and expect a new function as return. |
def action(*args, **kwargs):
def decorator(f):
return ActionFunction(f, *args, **kwargs)
return decorator | Transforms functions or class methods into actions.
Optionnaly, you can define a function to be used as the view initializer:
@action()
def my_action():
pass
@my_action.init_view
def my_action_init_view(view, options):
pass |
def with_actions(actions_or_group_name, actions=None):
group = None
if isinstance(actions_or_group_name, str):
group = actions_or_group_name
else:
actions = actions_or_group_name
def decorator(f):
if isinstance(f, WithActionsDecorator):
dec = f
else:
... | Executes the list of actions before/after the function
Actions should be a list where items are action names as
strings or a dict. See frasco.actions.loaders.load_action(). |
def expose(rule, **options):
def decorator(f):
if not hasattr(f, "urls"):
f.urls = []
if isinstance(rule, (list, tuple)):
f.urls.extend(rule)
else:
f.urls.append((rule, options))
return f
return decorator | Decorator to add an url rule to a function |
def _create_unicode_map():
unicode_map = {}
for beta, uni in _map.BETACODE_MAP.items():
# Include decomposed equivalent where necessary.
norm = unicodedata.normalize('NFC', uni)
unicode_map[norm] = beta
unicode_map[uni] = beta
# Add the final sigmas.
final_sigma_no... | Create the inverse map from unicode to betacode.
Returns:
The hash map to convert unicode characters to the beta code representation. |
def _create_conversion_trie(strict):
t = pygtrie.CharTrie()
for beta, uni in _map.BETACODE_MAP.items():
if strict:
t[beta] = uni
else:
# The order of accents is very strict and weak. Allow for many orders of
# accents between asterisk and letter or after... | Create the trie for betacode conversion.
Args:
text: The beta code text to convert. All of this text must be betacode.
strict: Flag to allow for flexible diacritic order on input.
Returns:
The trie for conversion. |
def _find_max_beta_token_len():
max_beta_len = -1
for beta, uni in _map.BETACODE_MAP.items():
if len(beta) > max_beta_len:
max_beta_len = len(beta)
return max_beta_len | Finds the maximum length of a single betacode token.
Returns:
The length of the longest key in the betacode map, which corresponds to the
longest single betacode token. |
def beta_to_uni(text, strict=False):
# Check if the requested configuration for conversion already has a trie
# stored otherwise convert it.
param_key = (strict,)
try:
t = _BETA_CONVERSION_TRIES[param_key]
except KeyError:
t = _create_conversion_trie(*param_key)
_BETA_CON... | Converts the given text from betacode to unicode.
Args:
text: The beta code text to convert. All of this text must be betacode.
strict: Flag to allow for flexible diacritic order on input.
Returns:
The converted text. |
def uni_to_beta(text):
u = _UNICODE_MAP
transform = []
for ch in text:
try:
conv = u[ch]
except KeyError:
conv = ch
transform.append(conv)
converted = ''.join(transform)
return converted | Convert unicode text to a betacode equivalent.
This method can handle tónos or oxeîa characters in the input.
Args:
text: The text to convert to betacode. This text does not have to all be
Greek polytonic text, and only Greek characters will be converted. Note
that in this case, you cannot... |
def __calculate_order(self, node_dict):
if len(node_dict.keys()) != len(set(node_dict.keys())):
raise DependencyTreeException("Duplicate Keys Exist in node dictionary!")
valid_order = [node for node, dependencies in node_dict.items() if len(dependencies) == 0]
remaining_node... | Determine a valid ordering of the nodes in which a node is not called before all of it's dependencies.
Raise an error if there is a cycle, or nodes are missing. |
def read_input(self, filename, has_header=True):
stream = open(filename)
reader = csv.reader(stream)
csv_data = []
for (i, row) in enumerate(reader):
if i==0:
if not has_header:
csv_data.append([str(i) for i in xrange(0,len(row))])... | filename is any filename, or something on which open() can be called
for example:
csv_input = CSVInput()
csv_input.read_input("csvfile.csv") |
def pprint_out(dct: Dict):
for name, val in dct.items():
print(name + ':')
pprint(val, indent=4) | Utility methods to pretty-print a dictionary that is typically outputted by parsyfiles (an ordered dict)
:param dct:
:return: |
def warn_import_error(type_of_obj_support: str, caught: ImportError):
msg = StringIO()
msg.writelines('Import Error while trying to add support for ' + type_of_obj_support + '. You may continue but '
'the associated parsers and converters wont be available : \n')
traceback.print_tb(c... | Utility method to print a warning message about failed import of some modules
:param type_of_obj_support:
:param caught:
:return: |
def create_parser_options(lazy_mfcollection_parsing: bool = False) -> Dict[str, Dict[str, Any]]:
return {MultifileCollectionParser.__name__: {'lazy_parsing': lazy_mfcollection_parsing}} | Utility method to create a default options structure with the lazy parsing inside
:param lazy_mfcollection_parsing:
:return: the options structure filled with lazyparsing option (for the MultifileCollectionParser) |
def add_parser_options(options: Dict[str, Dict[str, Any]], parser_id: str, parser_options: Dict[str, Dict[str, Any]],
overwrite: bool = False):
if parser_id in options.keys() and not overwrite:
raise ValueError('There are already options in this dictionary for parser id ' + parse... | Utility method to add options for a given parser, to the provided options structure
:param options:
:param parser_id:
:param parser_options:
:param overwrite: True to silently overwrite. Otherwise an error will be thrown
:return: |
def parse_item(location: str, item_type: Type[T], item_name_for_log: str = None,
file_mapping_conf: FileMappingConfiguration = None,
logger: Logger = default_logger, lazy_mfcollection_parsing: bool = False) -> T:
rp = _create_parser_from_default(logger)
opts = create_parser_op... | Creates a RootParser() and calls its parse_item() method
:param location:
:param item_type:
:param item_name_for_log:
:param file_mapping_conf:
:param logger:
:param lazy_mfcollection_parsing:
:return: |
def parse_collection(location: str, base_item_type: Type[T], item_name_for_log: str = None,
file_mapping_conf: FileMappingConfiguration = None, logger: Logger = default_logger,
lazy_mfcollection_parsing: bool = False)\
-> Dict[str, T]:
rp = _create_parser_from_... | Utility method to create a RootParser() with default configuration and call its parse_collection() method
:param location:
:param base_item_type:
:param item_name_for_log:
:param file_mapping_conf:
:param logger:
:param lazy_mfcollection_parsing:
:return: |
def install_basic_multifile_support(self):
if not self.multifile_installed:
self.register_parser(MultifileCollectionParser(self))
self.register_parser(MultifileObjectParser(self, self))
self.multifile_installed = True
else:
raise Exception('Multif... | Utility method for users who created a RootParser with register_default_plugins=False, in order to register only
the multifile support
:return: |
def parse_collection(self, item_file_prefix: str, base_item_type: Type[T], item_name_for_log: str = None,
file_mapping_conf: FileMappingConfiguration = None,
options: Dict[str, Dict[str, Any]] = None) -> Dict[str, T]:
# -- item_name_for_log
item... | Main method to parse a collection of items of type 'base_item_type'.
:param item_file_prefix:
:param base_item_type:
:param item_name_for_log:
:param file_mapping_conf:
:param options:
:return: |
def parse_item(self, location: str, item_type: Type[T], item_name_for_log: str = None,
file_mapping_conf: FileMappingConfiguration = None, options: Dict[str, Dict[str, Any]] = None) -> T:
# -- item_name_for_log
item_name_for_log = item_name_for_log or ''
check_var(it... | Main method to parse an item of type item_type
:param location:
:param item_type:
:param item_name_for_log:
:param file_mapping_conf:
:param options:
:return: |
def _parse__item(self, item_type: Type[T], item_file_prefix: str,
file_mapping_conf: FileMappingConfiguration = None,
options: Dict[str, Dict[str, Any]] = None) -> T:
# for consistency : if options is None, default to the default values of create_parser_option... | Common parsing steps to parse an item
:param item_type:
:param item_file_prefix:
:param file_mapping_conf:
:param options:
:return: |
def findSubCommand(args):
# If the only command we find is the first element of args, we've found the
# driver script itself and re-executing it will cause an infinite loop, so
# don't even look at the first element on its own.
for n in range(len(args) - 1):
command = '-'.join(args[:(len(args) - n)])
... | Given a list ['foo','bar', 'baz'], attempts to create a command name in the
format 'foo-bar-baz'. If that command exists, we run it. If it doesn't, we
check to see if foo-bar exists, in which case we run `foo-bar baz`. We keep
taking chunks off the end of the command name and adding them to the argument
list un... |
def SpamsumDistance(ssA, ssB):
'''
returns the spamsum distance between ssA and ssB
if they use a different block size, assume maximum distance
otherwise returns the LevDistance
'''
mA = re.match('^(\d+)[:](.*)$', ssA)
mB = re.match('^(\d+)[:](.*)$', ssB)
if mA == None or mB == None:
... | returns the spamsum distance between ssA and ssB
if they use a different block size, assume maximum distance
otherwise returns the LevDistance |
def terms(cls, tags, minimum_match=None):
'''
A query that match on any (configurable) of the provided terms. This is a simpler syntax query for using a bool query with several term queries in the should clauses. For example:
{
"terms" : {
"tags" : [ "blue", "pill" ]... | A query that match on any (configurable) of the provided terms. This is a simpler syntax query for using a bool query with several term queries in the should clauses. For example:
{
"terms" : {
"tags" : [ "blue", "pill" ],
"minimum_match" : 1
}
} |
def match(cls, field, query, operator=None):
'''
A family of match queries that accept text/numerics/dates, analyzes it, and constructs a query out of it. For example:
{
"match" : {
"message" : "this is a test"
}
}
Note, message is the nam... | A family of match queries that accept text/numerics/dates, analyzes it, and constructs a query out of it. For example:
{
"match" : {
"message" : "this is a test"
}
}
Note, message is the name of a field, you can subsitute the name of any field (including ... |
def fuzzy(cls, field, value, boost=None, min_similarity=None, prefix_length=None):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/fuzzy-query.html
A fuzzy based query that uses similarity based on Levenshtein (edit distance) algorithm.
'''
instance = cls(fuzzy={field:... | http://www.elasticsearch.org/guide/reference/query-dsl/fuzzy-query.html
A fuzzy based query that uses similarity based on Levenshtein (edit distance) algorithm. |
def has_child(cls, child_type, query):
'''
http://www.elasticsearch.org/guide/reference/query-dsl/has-child-query.html
The has_child query accepts a query and the child type to run against, and results in parent documents that have child docs matching the query.
> child_query = ElasticQ... | http://www.elasticsearch.org/guide/reference/query-dsl/has-child-query.html
The has_child query accepts a query and the child type to run against, and results in parent documents that have child docs matching the query.
> child_query = ElasticQuery().term(tag='something')
> query = ElasticQuery... |
def queryByPortSensor(portiaConfig, edgeId, port, sensor, last=False, params={ 'from': None, 'to': None, 'order': None, 'precision': 'ms', 'limit': None }):
header = {'Accept': 'text/csv'}
if last == False:
endpoint = '/select/device/{0}/port/{1}/sensor/{2}{3}'.format( edgeId, port, sensor, utils... | Returns a pandas data frame with the portia select resultset |
def try_parse_num_and_booleans(num_str):
if isinstance(num_str, str):
# bool
if num_str.lower() == 'true':
return True
elif num_str.lower() == 'false':
return False
# int
if num_str.isdigit():
return int(num_str)
# float
... | Tries to parse the provided string as a number or boolean
:param num_str:
:return: |
def read_dict_from_properties(desired_type: Type[dict], file_object: TextIOBase,
logger: Logger, conversion_finder: ConversionFinder, **kwargs) -> Dict[str, Any]:
# right now jprops relies on a byte stream. So we convert back our nicely decoded Text stream to a unicode
# byte... | Helper method to read a dictionary from a .properties file (java-style) using jprops.
Since jprops does not provide automatic handling for boolean and numbers, this tries to add the feature.
:param file_object:
:return: |
def get_default_jprops_parsers(parser_finder: ParserFinder, conversion_finder: ConversionFinder) -> List[AnyParser]:
return [SingleFileParserFunction(parser_function=read_dict_from_properties,
streaming_mode=True, custom_name='read_dict_from_properties',
... | Utility method to return the default parsers able to parse a dictionary from a properties file.
:return: |
def hook(event=None, dependencies=None):
def wrapper(func):
"""I'm a simple wrapper that manages event hooking"""
func.__deps__ = dependencies
EVENTS.hook(func, event, dependencies)
return func
return wrapper | Hooking decorator. Just `@hook(event, dependencies)` on your function
Kwargs:
event (str): String or Iterable with events to hook
dependencies (str): String or Iterable with modules whose hooks have
to be called before this one for **this** event
Wraps :func:`EventList.hook` |
def load(path):
importpath = path.replace("/", ".").replace("\\", ".")
if importpath[-3:] == ".py":
importpath = importpath[:-3]
try:
importlib.import_module(importpath)
except (ModuleNotFoundError, TypeError):
exec(open(path).read()) | Helper function that tries to load a filepath (or python module notation)
as a python module and on failure `exec` it.
Args:
path (str): Path or module to load
The function tries to import `example.module` when either `example.module`,
`example/module` or `example/module.py` is given. |
def add_image(self, image_path, annotations):
self.image_paths.append(image_path)
self.bounding_boxes.append([bounding_box_from_annotation(**a) for a in annotations]) | Adds an image and its bounding boxes to the current list of files
The bounding boxes are automatically estimated based on the given annotations.
**Parameters:**
``image_path`` : str
The file name of the image, including its full path
``annotations`` : [dict]
A list of annotations, i.e., ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.