id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
283754 | <gh_stars>0
from typing import Any, Set
from sqlalchemy import or_
from sqlalchemy.orm import Query
from rabbitai import security_manager
from rabbitai.views.base import BaseFilter
class DatabaseFilter(BaseFilter):
# TODO(bogdan): consider caching.
def schema_access_databases(self) -> Set[str]: # noqa pylint: disable=no-self-use
return {
security_manager.unpack_schema_perm(vm)[0]
for vm in security_manager.user_view_menu_names("schema_access")
}
def apply(self, query: Query, value: Any) -> Query:
if security_manager.can_access_all_databases():
return query
database_perms = security_manager.user_view_menu_names("database_access")
# TODO(bogdan): consider adding datasource access here as well.
schema_access_databases = self.schema_access_databases()
return query.filter(
or_(
self.model.perm.in_(database_perms),
self.model.database_name.in_(schema_access_databases),
)
)
| StarcoderdataPython |
3308440 | <filename>prestans/devel/__init__.py<gh_stars>0
# -*- coding: utf-8 -*-
#
# prestans, A WSGI compliant REST micro-framework
# http://prestans.org
#
# Copyright (c) 2017, Anomaly Software Pty Ltd.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Anomaly Software nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANOMALY SOFTWARE BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
Contains development tools for prestans.
"""
__all__ = ["ArgParserFactory", "CommandDispatcher"]
import argparse
import os
from prestans.devel import exception
from prestans import __version__
class ArgParserFactory(object):
"""
Argument parser factory.
"""
def __init__(self):
self._arg_parser = argparse.ArgumentParser(
description="command line tools to compliment the prestans framework",
epilog="pride is distributed by the prestans project <http://github.com/anomaly/prestans/> under the the New BSD license."
)
self._subparsers_handle = self._arg_parser.add_subparsers(dest="sub_command")
self._add_generate_sub_commands()
self._arg_parser.add_argument(
'--version',
action='version',
version='%(prog)s ' + __version__
)
def parse(self):
"""
Method to start the argument parsing.
"""
return self._arg_parser.parse_args()
def _add_generate_sub_commands(self):
"""
Sub commands for generating models for usage by clients.
Currently supports Google Closure, and ImmutableJS.
"""
gen_parser = self._subparsers_handle.add_parser(
name="gen",
help="generate client side model stubs, filters"
)
gen_parser.add_argument(
"-t",
"--template",
choices=['closure.model', 'closure.filter', 'immutable.model', 'immutable.filter'],
default='closure.model',
required=True,
dest="template",
help="template to use for client side code generation"
)
gen_parser.add_argument(
"-m",
"--model",
required=True,
dest="models_definition",
help="path to models definition file or package"
)
gen_parser.add_argument(
"-o",
"--output",
default=".",
dest="output",
help="output path for generated code"
)
gen_parser.add_argument(
"-n",
"--namespace",
required=True,
dest="namespace",
help="namespace to use with template e.g prestans.data.model"
)
gen_parser.add_argument(
"-fn",
"--filter-namespace",
required=False,
default=None,
dest="filter_namespace",
help="filter namespace to use with template e.g prestans.data.filter"
)
class CommandDispatcher(object):
"""
Processes the user's commands.
"""
def __init__(self, args):
self._args = args
def dispatch(self):
"""
Start processing the user's commands.
"""
if self._args.sub_command == "gen":
self._dispatch_gen()
def _dispatch_gen(self):
"""
Process the generate subset of commands.
"""
if not os.path.isdir(self._args.output):
raise exception.Base("%s is not a writeable directory" % self._args.output)
if not os.path.isfile(self._args.models_definition):
if not self.check_package_exists(self._args.models_definition):
raise exception.Base("failed to locate package or models definitions file at: %s" % self._args.models_definition)
from prestans.devel.gen import Preplate
preplate = Preplate(
template_type=self._args.template,
models_definition=self._args.models_definition,
namespace=self._args.namespace,
filter_namespace=self._args.filter_namespace,
output_directory=self._args.output)
preplate.run()
@classmethod
def check_package_exists(cls, package):
import importlib
package_spec = importlib.util.find_spec(name=package)
return package_spec is not None
| StarcoderdataPython |
12843644 | import pytest
@pytest.fixture
def star_quality_metric(pipeline, analysis_step_run, bam_file):
return {
'status': "finished",
'pipeline': pipeline['uuid'],
'step_run': analysis_step_run['uuid'],
'schema_version': '2',
'quality_metric_of': [bam_file['uuid']]
}
def test_star_quality_metric_upgrade(registry, star_quality_metric,
bam_file, lab, award):
from snovault import UPGRADER
upgrader = registry[UPGRADER]
value = upgrader.upgrade('star_quality_metric',
star_quality_metric, registry=registry,
current_version='2', target_version='3')
assert value['lab'] == lab['@id'] and value['award'] == award['@id']
| StarcoderdataPython |
1719964 | <gh_stars>0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0118-Pascals-Triangle.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-02-15
=================================================================="""
import sys
import time
from typing import List
# import collections
"""
LeetCode - 0118 - (Easy) - Pascal's Triangle
https://leetcode.com/problems/pascals-triangle/
Description & Requirement:
Given an integer numRows, return the first numRows of Pascal's triangle.
In Pascal's triangle, each number is the sum of the two numbers directly above it.
Example 1:
Input: numRows = 5
Output: [[1],[1,1],[1,2,1],[1,3,3,1],[1,4,6,4,1]]
Example 2:
Input: numRows = 1
Output: [[1]]
Constraints:
1 <= numRows <= 30
"""
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
# exception case
assert isinstance(numRows, int) and numRows > 0
if numRows == 1:
return [[1]]
if numRows == 2:
return [[1], [1, 1]]
# main method: (from the ends to the center, 1-Dim Dynamic Programming)
return self._generate(numRows)
def _generate(self, numRows: int) -> List[List[int]]:
"""
Runtime: 28 ms, faster than 92.48% of Python3 online submissions for Pascal's Triangle.
Memory Usage: 13.9 MB, less than 91.64% of Python3 online submissions for Pascal's Triangle.
"""
assert isinstance(numRows, int) and numRows > 2
res = [[1], [1, 1]]
while numRows > 2:
last_layer = res[-1]
new_layer = [1] + [0 for _ in range(len(last_layer) - 1)] + [1] # construct new layer
new_layer_len = len(new_layer)
for index in range(1, (new_layer_len >> 1) + 1): # deal with the left half
new_layer[index] = last_layer[index - 1] + last_layer[index] # get sum from the last layer
new_layer[new_layer_len - 1 - index] = new_layer[index] # copy the left one to its right counterpart
res.append(new_layer) # done new layer, push it in
numRows -= 1
return res
def main():
# Example 1: Output: [[1],[1,1],[1,2,1],[1,3,3,1],[1,4,6,4,1]]
numRows = 5
# Example 2: Output: [[1]]
# numRows = 1
# Example 3: Output:
# numRows = 30
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.generate(numRows)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
3212273 | <filename>12 - Esfera.py<gh_stars>0
R = float(input())
PI = 3.14159
VOLUME = (4/3.0) * PI * (R ** 3)
print(f'VOLUME = {VOLUME:.3f}') | StarcoderdataPython |
242954 | <filename>lightcone_resample/find_galaxies.py
#!/usr/bin/env python2.7
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as clr
import dtk
import h5py
import sys
import time
from numpy.random import normal
import pandas as pd
def get_hfiles(fname, healpix_pixels):
if len(healpix_pixels) == 0:
healpix_pixels = ['']
hfiles =[]
for healpix_pixel in healpix_pixels:
print(healpix_pixel)
if "#z_range#" in fname:
for z_range in ["0_1", "1_2", "2_3"]:
ffname = fname.replace('#healpix#',str(healpix_pixel)).replace("#z_range#", z_range)
hfiles.append(h5py.File(ffname, 'r'))
else:
hfiles.append(h5py.File(fname.replace('#healpix#',str(healpix_pixel)),'r'))
return hfiles
def get_val(hfiles, var_name, remove_nan=None):
sub_result = []
for hfile in hfiles:
sub_result.append(hfile['galaxyProperties/'+var_name].value)
result = np.concatenate(sub_result)
if remove_nan is not None:
result[~np.isfinite(result)]=remove_nan
return result
def get_mag(hfiles, filter_type, frame, band):
remove_nan = None
band_filter_frame = '{filter_type}_filters/magnitude:{filter_type}_{band}:{frame}:dustAtlas';
model_band_filter_frame= 'baseDC2/restframe_extincted_sdss_abs_mag{band}'
if frame == 'obs':
frame = 'observed'
if filter_type == 'model':
assert frame=='rest'
var_name = model_band_filter_frame.format(**{'band': band})
remove_nan = -14
else:
var_name = band_filter_frame.format(**{'filter_type':filter_type,
'frame':frame,
'band':band,})
return get_val(hfiles,var_name, remove_nan = remove_nan)
if __name__ == "__main__":
fname = sys.argv[1]
healpix_pixels = sys.argv[2:]
hfiles = get_hfiles(fname, healpix_pixels)
print(len(hfiles))
#Slack
target_ra, target_dec = 54.37508357,-32.40874507
#Tricia
target_ra, target_dec = 54.37836208,-32.40704645
# Lensed cluster pos
# target_ra, target_dec = 54.3827, -32.4219
tolerance = 0.1
print("\n")
print("we are trying to find the above")
print("RA:{} Dec:{}".format(target_ra, target_dec))
pos_true = True
if pos_true:
ra = get_val(hfiles, 'ra_true')
dec = get_val(hfiles, 'dec_true')
else:
ra = get_val(hfiles, 'ra')
dec = get_val(hfiles, 'dec')
redshift = get_val(hfiles, 'redshift')
halo_id = get_val(hfiles,'uniqueHaloID')
mass = get_val(hfiles, 'hostHaloMass')
isCentral = get_val(hfiles, 'isCentral')
# x = get_val(hfiles, "x")
# y = get_val(hfiles, "y")
mag = get_mag(hfiles, "LSST", "obs", "r")
mag_i = get_mag(hfiles, "LSST", "obs", "i")
mag_cut = 25
slct1 = np.abs(ra - target_ra) < tolerance
slct2 = np.abs(dec - target_dec) < tolerance
slct3 = mag < mag_cut
slct4 = halo_id == 106387004279
slct_central = isCentral == isCentral
slct = slct1 & slct2 & slct3
plt.figure()
#plt.scatter(ra[slct],dec[slct], s = (28-mag), marker='o', alpha = 0.3)
plt.scatter(ra[slct],dec[slct], marker='o', c=isCentral[slct], alpha = 1.0, label='galaxies')
cb = plt.colorbar()
cb.set_label('central')
plt.plot(target_ra, target_dec, 'rx')
plt.title("Mag_r < {}".format(mag_cut))
if pos_true:
plt.xlabel('Ra True')
plt.ylabel('Dec True')
else:
plt.xlabel('Ra Lesned')
plt.ylabel('Dec Lensed')
plt.tight_layout()
plt.figure()
plt.scatter(ra[slct], redshift[slct], c=isCentral[slct], cmap='coolwarm')
plt.ylabel('redshift')
plt.xlabel('ra')
# plt.figure()
# plt.scatter(ra[slct], y[slct], alpha=0.3)
# plt.axvline(x=target_ra,ls='--', c='r')
# plt.figure()
# plt.scatter(x[slct], dec[slct], alpha=0.3)
# plt.axhline(y=target_dec, ls='--', c='r')
print(halo_id[slct])
print(mass[slct])
print("showing..")
pd_dict = {'redshift':redshift[slct],
'mag_r': mag[slct],
'mag_i': mag_i[slct],
'ra': ra[slct],
'dec': dec[slct],
'central': isCentral[slct],
'halo_id': halo_id[slct],
}
df = pd.DataFrame.from_dict(pd_dict,)
df.to_csv("~/tmp/weird_cluster.csv", index=False)
plt.show()
| StarcoderdataPython |
4952746 | <gh_stars>1-10
"""
Raw Python version of jupyter notebooks from fast.ai course
"""
__author__ = "<NAME>"
from fastai import *
from fastai.vision import *
def create_own_dataset_from_google():
| StarcoderdataPython |
1998593 | # encoding:utf-8
import sys
sys.path.extend(["../../","../","./"])
import time
import torch.optim.lr_scheduler
import torch.nn as nn
import random
import argparse
from driver.Config import *
from driver.Model import *
from driver.Labeler import *
from data.Dataloader import *
import pickle
import os
import re
from driver.BertTokenHelper import BertTokenHelper
from driver.BertModel import BertExtractor
from driver.language_mlp import LanguageMLP
from driver.modeling import BertModel as AdapterBERTModel
from driver.modeling import BertConfig
from transformers import AdamW, get_linear_schedule_with_warmup
from driver.adapterPGNBERT import AdapterPGNBertModel
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def train(data, dev_data, test_data, labeler, vocab, config, bert, language_embedder):
optimizer = Optimizer(filter(lambda p: p.requires_grad, labeler.model.parameters()), config)
optimizer_lang = Optimizer(filter(lambda p: p.requires_grad, language_embedder.parameters()), config)
optimizer_bert = AdamW(filter(lambda p: p.requires_grad, bert.parameters()), lr=5e-6, eps=1e-8)
batch_num = int(np.ceil(len(data) / float(config.train_batch_size)))
# scheduler_bert = WarmupLinearSchedule(optimizer_bert, warmup_steps=0, t_total=config.train_iters * batch_num)
scheduler_bert = get_linear_schedule_with_warmup(optimizer_bert, num_warmup_steps=0,
num_training_steps=config.train_iters * batch_num)
global_step = 0
best_score = -1
batch_num = int(np.ceil(len(data) / float(config.train_batch_size)))
for iter in range(config.train_iters):
total_stats = Statistics()
print('Iteration: ' + str(iter))
batch_iter = 0
for onebatch in data_iter(data, config.train_batch_size, True):
words, extwords, predicts, inmasks, labels, outmasks, \
bert_indices_tensor, bert_segments_tensor, bert_pieces_tensor, lang_ids = \
batch_data_variable(onebatch, vocab)
labeler.model.train()
language_embedder.train()
bert.train()
if config.use_cuda:
bert_indices_tensor = bert_indices_tensor.cuda()
bert_segments_tensor = bert_segments_tensor.cuda()
bert_pieces_tensor = bert_pieces_tensor.cuda()
lang_embedding = language_embedder(lang_ids)
bert_hidden = bert(input_ids=bert_indices_tensor, token_type_ids=bert_segments_tensor, bert_pieces=bert_pieces_tensor, lang_embedding=lang_embedding)
labeler.forward(words, extwords, predicts, inmasks, bert_hidden)
loss, stat = labeler.compute_loss(labels, outmasks)
loss = loss / config.update_every
loss.backward()
total_stats.update(stat)
total_stats.print_out(global_step, iter, batch_iter, batch_num)
batch_iter += 1
if batch_iter % config.update_every == 0 or batch_iter == batch_num:
nn.utils.clip_grad_norm_(filter(lambda p: p.requires_grad, labeler.model.parameters()), \
max_norm=config.clip)
optimizer.step()
optimizer_lang.step()
optimizer_bert.step()
labeler.model.zero_grad()
optimizer_lang.zero_grad()
optimizer_bert.zero_grad()
global_step += 1
if batch_iter % config.validate_every == 0 or batch_iter == batch_num:
gold_num, predict_num, correct_num, \
gold_agent_num, predict_agent_num, correct_agent_num, \
gold_target_num, predict_target_num, correct_target_num, \
binary_gold_num, binary_predict_num, binary_gold_correct_num, binary_predict_correct_num, \
binary_gold_agent_num, binary_predict_agent_num, binary_gold_correct_agent_num, binary_predict_correct_agent_num, \
binary_gold_target_num, binary_predict_target_num, binary_gold_correct_target_num, binary_predict_correct_target_num, \
prop_gold_num, prop_predict_num, prop_gold_correct_num, prop_predict_correct_num, \
prop_gold_agent_num, prop_predict_agent_num, prop_gold_correct_agent_num, prop_predict_correct_agent_num, \
prop_gold_target_num, prop_predict_target_num, prop_gold_correct_target_num, prop_predict_correct_target_num \
= evaluate(dev_data, labeler, vocab, config.target_dev_file + '.' + str(global_step))
dev_score = 200.0 * correct_num / (gold_num + predict_num) if correct_num > 0 else 0.0
print("Exact Dev: Recall = %d/%d = %.2f, Precision = %d/%d =%.2f, F-measure = %.2f" % \
(correct_num, gold_num, 100.0 * correct_num / gold_num if correct_num > 0 else 0.0, \
correct_num, predict_num, 100.0 * correct_num / predict_num if correct_num > 0 else 0.0, \
dev_score))
dev_agent_score = 200.0 * correct_agent_num / (
gold_agent_num + predict_agent_num) if correct_agent_num > 0 else 0.0
print("Exact Dev Agent: Recall = %d/%d = %.2f, Precision = %d/%d =%.2f, F-measure = %.2f" % \
(correct_agent_num, gold_agent_num,
100.0 * correct_agent_num / gold_agent_num if correct_agent_num > 0 else 0.0, \
correct_agent_num, predict_agent_num,
100.0 * correct_agent_num / predict_agent_num if correct_agent_num > 0 else 0.0, \
dev_agent_score))
dev_target_score = 200.0 * correct_target_num / (
gold_target_num + predict_target_num) if correct_target_num > 0 else 0.0
print("Exact Dev Target: Recall = %d/%d = %.2f, Precision = %d/%d =%.2f, F-measure = %.2f" % \
(correct_target_num, gold_target_num,
100.0 * correct_target_num / gold_target_num if correct_target_num > 0 else 0.0, \
correct_target_num, predict_target_num,
100.0 * correct_target_num / predict_target_num if correct_target_num > 0 else 0.0, \
dev_target_score))
print()
binary_dev_P = binary_predict_correct_num / binary_predict_num if binary_predict_num > 0 else 0.0
binary_dev_R = binary_gold_correct_num / binary_gold_num if binary_gold_num > 0 else 0.0
dev_binary_score = 200 * binary_dev_P * binary_dev_R / (
binary_dev_P + binary_dev_R) if binary_dev_P + binary_dev_R > 0 else 0.0
print("Binary Dev: Recall = %d/%d = %.2f, Precision = %d/%d =%.2f, F-measure = %.2f" % \
(binary_gold_correct_num, binary_gold_num, 100.0 * binary_dev_R, \
binary_predict_correct_num, binary_predict_num, 100.0 * binary_dev_P, \
dev_binary_score))
binary_dev_agent_P = binary_predict_correct_agent_num / binary_predict_agent_num if binary_predict_agent_num > 0 else 0.0
binary_dev_agent_R = binary_gold_correct_agent_num / binary_gold_agent_num if binary_gold_agent_num > 0 else 0.0
dev_binary_agent_score = 200 * binary_dev_agent_P * binary_dev_agent_R / (
binary_dev_agent_P + binary_dev_agent_R) if binary_dev_agent_P + binary_dev_agent_R > 0 else 0.0
print("Binary Dev Agent: Recall = %d/%d = %.2f, Precision = %d/%d =%.2f, F-measure = %.2f" % \
(binary_gold_correct_agent_num, binary_gold_agent_num, 100.0 * binary_dev_agent_R, \
binary_predict_correct_agent_num, binary_predict_agent_num, 100.0 * binary_dev_agent_P, \
dev_binary_agent_score))
binary_dev_target_P = binary_predict_correct_target_num / binary_predict_target_num if binary_predict_target_num > 0 else 0.0
binary_dev_target_R = binary_gold_correct_target_num / binary_gold_target_num if binary_gold_target_num > 0 else 0.0
dev_binary_target_score = 200 * binary_dev_target_P * binary_dev_target_R / (
binary_dev_target_P + binary_dev_target_R) if binary_dev_target_P + binary_dev_target_R > 0 else 0.0
print("Binary Dev Target: Recall = %d/%d = %.2f, Precision = %d/%d =%.2f, F-measure = %.2f" % \
(binary_gold_correct_target_num, binary_gold_target_num, 100.0 * binary_dev_target_R, \
binary_predict_correct_target_num, binary_predict_target_num, 100.0 * binary_dev_target_P, \
dev_binary_target_score))
print()
prop_dev_P = prop_predict_correct_num / prop_predict_num if prop_predict_num > 0 else 0.0
prop_dev_R = prop_gold_correct_num / prop_gold_num if prop_gold_num > 0 else 0.0
dev_prop_score = 200 * prop_dev_P * prop_dev_R / (
prop_dev_P + prop_dev_R) if prop_dev_P + prop_dev_R > 0 else 0.0
print("Prop Dev: Recall = %.2f/%d = %.2f, Precision = %.2f/%d =%.2f, F-measure = %.2f" % \
(prop_gold_correct_num, prop_gold_num, 100.0 * prop_dev_R, \
prop_predict_correct_num, prop_predict_num, 100.0 * prop_dev_P, \
dev_prop_score))
prop_dev_agent_P = prop_predict_correct_agent_num / prop_predict_agent_num if prop_predict_agent_num > 0 else 0.0
prop_dev_agent_R = prop_gold_correct_agent_num / prop_gold_agent_num if prop_gold_agent_num > 0 else 0.0
dev_prop_agent_score = 200 * prop_dev_agent_P * prop_dev_agent_R / (
prop_dev_agent_P + prop_dev_agent_R) if prop_dev_agent_P + prop_dev_agent_R > 0 else 0.0
print("Prop Dev Agent: Recall = %.2f/%d = %.2f, Precision = %.2f/%d =%.2f, F-measure = %.2f" % \
(prop_gold_correct_agent_num, prop_gold_agent_num, 100.0 * prop_dev_agent_R, \
prop_predict_correct_agent_num, prop_predict_agent_num, 100.0 * prop_dev_agent_P, \
dev_prop_agent_score))
prop_dev_target_P = prop_predict_correct_target_num / prop_predict_target_num if prop_predict_target_num > 0 else 0.0
prop_dev_target_R = prop_gold_correct_target_num / prop_gold_target_num if prop_gold_target_num > 0 else 0.0
dev_prop_target_score = 200 * prop_dev_target_P * prop_dev_target_R / (
prop_dev_target_P + prop_dev_target_R) if prop_dev_target_P + prop_dev_target_R > 0 else 0.0
print("Prop Dev Target: Recall = %.2f/%d = %.2f, Precision = %.2f/%d =%.2f, F-measure = %.2f" % \
(prop_gold_correct_target_num, prop_gold_target_num, 100.0 * prop_dev_target_R, \
prop_predict_correct_target_num, prop_predict_target_num, 100.0 * prop_dev_target_P, \
dev_prop_target_score))
print()
'''
Test
'''
test_gold_num, test_predict_num, test_correct_num, \
test_gold_agent_num, test_predict_agent_num, test_correct_agent_num, \
test_gold_target_num, test_predict_target_num, test_correct_target_num, \
test_binary_gold_num, test_binary_predict_num, test_binary_gold_correct_num, test_binary_predict_correct_num, \
test_binary_gold_agent_num, test_binary_predict_agent_num, test_binary_gold_correct_agent_num, test_binary_predict_correct_agent_num, \
test_binary_gold_target_num, test_binary_predict_target_num, test_binary_gold_correct_target_num, test_binary_predict_correct_target_num, \
test_prop_gold_num, test_prop_predict_num, test_prop_gold_correct_num, test_prop_predict_correct_num, \
test_prop_gold_agent_num, test_prop_predict_agent_num, test_prop_gold_correct_agent_num, test_prop_predict_correct_agent_num, \
test_prop_gold_target_num, test_prop_predict_target_num, test_prop_gold_correct_target_num, test_prop_predict_correct_target_num \
= evaluate(test_data, labeler, vocab, config.target_test_file + '.' + str(global_step))
test_score = 200.0 * test_correct_num / (test_gold_num + test_predict_num) \
if test_correct_num > 0 else 0.0
print("Exact Test: Recall = %d/%d = %.2f, Precision = %d/%d =%.2f, F-measure = %.2f" % \
(test_correct_num, test_gold_num, \
100.0 * test_correct_num / test_gold_num if test_correct_num > 0 else 0.0, \
test_correct_num, test_predict_num, \
100.0 * test_correct_num / test_predict_num if test_correct_num > 0 else 0.0, \
test_score))
test_agent_score = 200.0 * test_correct_agent_num / (
test_gold_agent_num + test_predict_agent_num) if test_correct_agent_num > 0 else 0.0
print("Exact Test Agent: Recall = %d/%d = %.2f, Precision = %d/%d =%.2f, F-measure = %.2f" % \
(test_correct_agent_num, test_gold_agent_num,
100.0 * test_correct_agent_num / test_gold_agent_num if test_correct_agent_num > 0 else 0.0, \
test_correct_agent_num, test_predict_agent_num,
100.0 * test_correct_agent_num / test_predict_agent_num if test_correct_agent_num > 0 else 0.0, \
test_agent_score))
test_target_score = 200.0 * test_correct_target_num / (
test_gold_target_num + test_predict_target_num) if test_correct_target_num > 0 else 0.0
print("Exact Test Target: Recall = %d/%d = %.2f, Precision = %d/%d =%.2f, F-measure = %.2f" % \
(test_correct_target_num, test_gold_target_num,
100.0 * test_correct_target_num / test_gold_target_num if test_correct_target_num > 0 else 0.0, \
test_correct_target_num, test_predict_target_num,
100.0 * test_correct_target_num / test_predict_target_num if test_correct_target_num > 0 else 0.0, \
test_target_score))
print()
binary_test_P = test_binary_predict_correct_num / test_binary_predict_num if test_binary_predict_num > 0 else 0.0
binary_test_R = test_binary_gold_correct_num / test_binary_gold_num if test_binary_gold_num > 0 else 0.0
binary_test_score = 200 * binary_test_P * binary_test_R / (
binary_test_P + binary_test_R) if binary_test_P + binary_test_R > 0 else 0.0
print("Binary Test: Recall = %d/%d = %.2f, Precision = %d/%d =%.2f, F-measure = %.2f" % \
(test_binary_gold_correct_num, test_binary_gold_num, 100.0 * binary_test_R, \
test_binary_predict_correct_num, test_binary_predict_num, 100.0 * binary_test_P, \
binary_test_score))
binary_test_agent_P = test_binary_predict_correct_agent_num / test_binary_predict_agent_num if test_binary_predict_agent_num > 0 else 0.0
binary_test_agent_R = test_binary_gold_correct_agent_num / test_binary_gold_agent_num if test_binary_gold_agent_num > 0 else 0.0
binary_test_agent_score = 200 * binary_test_agent_P * binary_test_agent_R / (
binary_test_agent_P + binary_test_agent_R) if binary_test_agent_P + binary_test_agent_R > 0 else 0.0
print("Binary Test Agent: Recall = %d/%d = %.2f, Precision = %d/%d =%.2f, F-measure = %.2f" % \
(test_binary_gold_correct_agent_num, test_binary_gold_agent_num, 100.0 * binary_test_agent_R, \
test_binary_predict_correct_agent_num, test_binary_predict_agent_num,
100.0 * binary_test_agent_P, \
binary_test_agent_score))
binary_test_target_P = test_binary_predict_correct_target_num / test_binary_predict_target_num if test_binary_predict_target_num > 0 else 0.0
binary_test_target_R = test_binary_gold_correct_target_num / test_binary_gold_target_num if test_binary_gold_target_num > 0 else 0.0
binary_test_target_score = 200 * binary_test_target_P * binary_test_target_R / (
binary_test_target_P + binary_test_target_R) if binary_test_target_P + binary_test_target_R > 0 else 0.0
print("Binary Test Target: Recall = %d/%d = %.2f, Precision = %d/%d =%.2f, F-measure = %.2f" % \
(test_binary_gold_correct_target_num, test_binary_gold_target_num, 100.0 * binary_test_target_R, \
test_binary_predict_correct_target_num, test_binary_predict_target_num,
100.0 * binary_test_target_P, \
binary_test_target_score))
print()
prop_test_P = test_prop_predict_correct_num / test_prop_predict_num if test_prop_predict_num > 0 else 0.0
prop_test_R = test_prop_gold_correct_num / test_prop_gold_num if test_prop_gold_num > 0 else 0.0
prop_test_score = 200 * prop_test_P * prop_test_R / (
prop_test_P + prop_test_R) if prop_test_P + prop_test_R > 0 else 0.0
print("Prop Test: Recall = %.2f/%d = %.2f, Precision = %.2f/%d =%.2f, F-measure = %.2f" % \
(test_prop_gold_correct_num, test_prop_gold_num, 100.0 * prop_test_R, \
test_prop_predict_correct_num, test_prop_predict_num, 100.0 * prop_test_P, \
prop_test_score))
prop_test_agent_P = test_prop_predict_correct_agent_num / test_prop_predict_agent_num if test_prop_predict_agent_num > 0 else 0.0
prop_test_agent_R = test_prop_gold_correct_agent_num / test_prop_gold_agent_num if test_prop_gold_agent_num > 0 else 0.0
prop_test_agent_score = 200 * prop_test_agent_P * prop_test_agent_R / (
prop_test_agent_P + prop_test_agent_R) if prop_test_agent_P + prop_test_agent_R > 0 else 0.0
print("prop Test Agent: Recall = %.2f/%d = %.2f, Precision = %.2f/%d =%.2f, F-measure = %.2f" % \
(test_prop_gold_correct_agent_num, test_prop_gold_agent_num, 100.0 * prop_test_agent_R, \
test_prop_predict_correct_agent_num, test_prop_predict_agent_num,
100.0 * prop_test_agent_P, \
prop_test_agent_score))
prop_test_target_P = test_prop_predict_correct_target_num / test_prop_predict_target_num if test_prop_predict_target_num > 0 else 0.0
prop_test_target_R = test_prop_gold_correct_target_num / test_prop_gold_target_num if test_prop_gold_target_num > 0 else 0.0
prop_test_target_score = 200 * prop_test_target_P * prop_test_target_R / (
prop_test_target_P + prop_test_target_R) if prop_test_target_P + prop_test_target_R > 0 else 0.0
print("Prop Test Target: Recall = %.2f/%d = %.2f, Precision = %.2f/%d =%.2f, F-measure = %.2f" % \
(test_prop_gold_correct_target_num, test_prop_gold_target_num,
100.0 * prop_test_target_R, \
test_prop_predict_correct_target_num, test_prop_predict_target_num,
100.0 * prop_test_target_P, \
prop_test_target_score))
if dev_score > best_score:
print("Exceed best score: history = %.2f, current = %.2f" %(best_score, dev_score))
best_score = dev_score
if config.save_after > 0 and iter > config.save_after:
torch.save(labeler.model.state_dict(), config.save_model_path)
def evaluate(data, labeler, vocab, outputFile):
start = time.time()
labeler.model.eval()
language_embedder.eval()
bert.eval()
output = open(outputFile, 'w', encoding='utf-8')
total_gold_entity_num, total_predict_entity_num, total_correct_entity_num = 0, 0, 0
total_gold_agent_entity_num, total_predict_agent_entity_num, total_correct_agent_entity_num = 0, 0, 0
total_gold_target_entity_num, total_predict_target_entity_num, total_correct_target_entity_num = 0, 0, 0
binary_total_gold_entity_num, binary_total_predict_entity_num, binary_gold_total_correct_entity_num, binary_predict_total_correct_entity_num = 0, 0, 0, 0
binary_total_gold_agent_entity_num, binary_total_predict_agent_entity_num, binary_gold_total_correct_agent_entity_num, binary_predict_total_correct_agent_entity_num = 0, 0, 0, 0
binary_total_gold_target_entity_num, binary_total_predict_target_entity_num, binary_gold_total_correct_target_entity_num, binary_predict_total_correct_target_entity_num = 0, 0, 0, 0
prop_total_gold_entity_num, prop_total_predict_entity_num, prop_gold_total_correct_entity_num, prop_predict_total_correct_entity_num = 0, 0, 0, 0
prop_total_gold_agent_entity_num, prop_total_predict_agent_entity_num, prop_gold_total_correct_agent_entity_num, prop_predict_total_correct_agent_entity_num = 0, 0, 0, 0
prop_total_gold_target_entity_num, prop_total_predict_target_entity_num, prop_gold_total_correct_target_entity_num, prop_predict_total_correct_target_entity_num = 0, 0, 0, 0
for onebatch in data_iter(data, config.test_batch_size, False, False):
words, extwords, predicts, inmasks, labels, outmasks, \
bert_indices_tensor, bert_segments_tensor, bert_pieces_tensor, lang_ids = \
batch_data_variable(onebatch, vocab)
if config.use_cuda:
bert_indices_tensor = bert_indices_tensor.cuda()
bert_segments_tensor = bert_segments_tensor.cuda()
bert_pieces_tensor = bert_pieces_tensor.cuda()
count = 0
lang_embedding = language_embedder(lang_ids)
bert_hidden = bert(input_ids=bert_indices_tensor, token_type_ids=bert_segments_tensor, bert_pieces=bert_pieces_tensor, lang_embedding=lang_embedding)
predict_labels = labeler.label(words, extwords, predicts, inmasks, bert_hidden)
for result in batch_variable_srl(onebatch, predict_labels, vocab):
printSRL(output, result)
gold_entity_num, predict_entity_num, correct_entity_num, \
gold_agent_entity_num, predict_agent_entity_num, correct_agent_entity_num, \
gold_target_entity_num, predict_target_entity_num, correct_target_entity_num = evalSRLExact(onebatch[count],
result)
total_gold_entity_num += gold_entity_num
total_predict_entity_num += predict_entity_num
total_correct_entity_num += correct_entity_num
total_gold_agent_entity_num += gold_agent_entity_num
total_predict_agent_entity_num += predict_agent_entity_num
total_correct_agent_entity_num += correct_agent_entity_num
total_gold_target_entity_num += gold_target_entity_num
total_predict_target_entity_num += predict_target_entity_num
total_correct_target_entity_num += correct_target_entity_num
binary_gold_entity_num, binary_predict_entity_num, binary_gold_correct_entity_num, binary_predict_correct_entity_num, \
binary_gold_agent_entity_num, binary_predict_agent_entity_num, binary_gold_correct_agent_entity_num, binary_predict_correct_agent_entity_num, \
binary_gold_target_entity_num, binary_predict_target_entity_num, binary_gold_correct_target_entity_num, binary_predict_correct_target_entity_num = evalSRLBinary(
onebatch[count], result)
binary_total_gold_entity_num += binary_gold_entity_num
binary_total_predict_entity_num += binary_predict_entity_num
binary_gold_total_correct_entity_num += binary_gold_correct_entity_num
binary_predict_total_correct_entity_num += binary_predict_correct_entity_num
binary_total_gold_agent_entity_num += binary_gold_agent_entity_num
binary_total_predict_agent_entity_num += binary_predict_agent_entity_num
binary_gold_total_correct_agent_entity_num += binary_gold_correct_agent_entity_num
binary_predict_total_correct_agent_entity_num += binary_predict_correct_agent_entity_num
binary_total_gold_target_entity_num += binary_gold_target_entity_num
binary_total_predict_target_entity_num += binary_predict_target_entity_num
binary_gold_total_correct_target_entity_num += binary_gold_correct_target_entity_num
binary_predict_total_correct_target_entity_num += binary_predict_correct_target_entity_num
prop_gold_entity_num, prop_predict_entity_num, prop_gold_correct_entity_num, prop_predict_correct_entity_num, \
prop_gold_agent_entity_num, prop_predict_agent_entity_num, prop_gold_correct_agent_entity_num, prop_predict_correct_agent_entity_num, \
prop_gold_target_entity_num, prop_predict_target_entity_num, prop_gold_correct_target_entity_num, prop_predict_correct_target_entity_num = evalSRLProportional(
onebatch[count], result)
prop_total_gold_entity_num += prop_gold_entity_num
prop_total_predict_entity_num += prop_predict_entity_num
prop_gold_total_correct_entity_num += prop_gold_correct_entity_num
prop_predict_total_correct_entity_num += prop_predict_correct_entity_num
prop_total_gold_agent_entity_num += prop_gold_agent_entity_num
prop_total_predict_agent_entity_num += prop_predict_agent_entity_num
prop_gold_total_correct_agent_entity_num += prop_gold_correct_agent_entity_num
prop_predict_total_correct_agent_entity_num += prop_predict_correct_agent_entity_num
prop_total_gold_target_entity_num += prop_gold_target_entity_num
prop_total_predict_target_entity_num += prop_predict_target_entity_num
prop_gold_total_correct_target_entity_num += prop_gold_correct_target_entity_num
prop_predict_total_correct_target_entity_num += prop_predict_correct_target_entity_num
count += 1
output.close()
#R = np.float64(total_correct_entity_num) * 100.0 / np.float64(total_gold_entity_num)
#P = np.float64(total_correct_entity_num) * 100.0 / np.float64(total_predict_entity_num)
#F = np.float64(total_correct_entity_num) * 200.0 / np.float64(total_gold_entity_num + total_predict_entity_num)
end = time.time()
during_time = float(end - start)
print("sentence num: %d, parser time = %.2f " % (len(data), during_time))
return total_gold_entity_num, total_predict_entity_num, total_correct_entity_num, \
total_gold_agent_entity_num, total_predict_agent_entity_num, total_correct_agent_entity_num, \
total_gold_target_entity_num, total_predict_target_entity_num, total_correct_target_entity_num, \
binary_total_gold_entity_num, binary_total_predict_entity_num, binary_gold_total_correct_entity_num, binary_predict_total_correct_entity_num, \
binary_total_gold_agent_entity_num, binary_total_predict_agent_entity_num, binary_gold_total_correct_agent_entity_num, binary_predict_total_correct_agent_entity_num, \
binary_total_gold_target_entity_num, binary_total_predict_target_entity_num, binary_gold_total_correct_target_entity_num, binary_predict_total_correct_target_entity_num, \
prop_total_gold_entity_num, prop_total_predict_entity_num, prop_gold_total_correct_entity_num, prop_predict_total_correct_entity_num, \
prop_total_gold_agent_entity_num, prop_total_predict_agent_entity_num, prop_gold_total_correct_agent_entity_num, prop_predict_total_correct_agent_entity_num, \
prop_total_gold_target_entity_num, prop_total_predict_target_entity_num, prop_gold_total_correct_target_entity_num, prop_predict_total_correct_target_entity_num
class Optimizer:
def __init__(self, parameter, config):
self.optim = torch.optim.Adam(parameter, lr=config.learning_rate, betas=(config.beta_1, config.beta_2),
eps=config.epsilon)
#self.optim = torch.optim.Adadelta(parameter, lr=1.0, rho=0.95)
decay, decay_step = config.decay, config.decay_steps
l = lambda epoch: decay ** (epoch // decay_step)
self.scheduler = torch.optim.lr_scheduler.LambdaLR(self.optim, lr_lambda=l)
def step(self):
self.optim.step()
self.schedule()
self.optim.zero_grad()
def schedule(self):
self.scheduler.step()
def zero_grad(self):
self.optim.zero_grad()
@property
def lr(self):
return self.scheduler.get_lr()
if __name__ == '__main__':
random.seed(666)
np.random.seed(666)
torch.cuda.manual_seed(666)
torch.manual_seed(666)
### gpu
gpu = torch.cuda.is_available()
print("GPU available: ", gpu)
print("CuDNN: \n", torch.backends.cudnn.enabled)
argparser = argparse.ArgumentParser()
argparser.add_argument('--config_file', default='expdata/opinion.cfg')
argparser.add_argument('--thread', default=4, type=int, help='thread num')
argparser.add_argument('--use-cuda', action='store_true', default=False)
args, extra_args = argparser.parse_known_args()
config = Configurable(args.config_file, extra_args)
vocab = creat_vocab(config.source_train_file, config.target_train_file, config.min_occur_count)
vec = vocab.load_pretrained_embs(config.pretrained_embeddings_file)
pickle.dump(vocab, open(config.save_vocab_path, 'wb'))
args, extra_args = argparser.parse_known_args()
config = Configurable(args.config_file, extra_args)
torch.set_num_threads(args.thread)
config.use_cuda = False
if gpu and args.use_cuda: config.use_cuda = True
print("\nGPU using status: ", config.use_cuda)
language_embedder = LanguageMLP(config=config)
model = eval(config.model)(vocab, config, vec)
# bert = BertExtractor(config)
bert_config = BertConfig.from_json_file(config.bert_config_path)
bert_config.use_adapter = config.use_adapter
bert_config.use_language_emb = config.use_language_emb
bert_config.num_adapters = config.num_adapters
bert_config.adapter_size = config.adapter_size
bert_config.language_emb_size = config.language_emb_size
bert_config.num_language_features = config.language_features
bert_config.nl_project = config.nl_project
bert = AdapterBERTModel.from_pretrained(config.bert_path, config=bert_config)
if config.use_cuda:
torch.backends.cudnn.enabled = True
model = model.cuda()
bert = bert.cuda()
language_embedder = language_embedder.cuda()
labeler = SRLLabeler(model)
bert_token = BertTokenHelper(config.bert_path)
in_language_list = config.in_langs
out_language_list = config.out_langs
lang_dic = {}
lang_dic['in'] = in_language_list
lang_dic['out'] = out_language_list
source_data = read_corpus(config.source_train_file, bert_token, lang_dic)
target_data = read_corpus(config.target_train_file, bert_token, lang_dic)
data = source_data + target_data
dev_data = read_corpus(config.target_dev_file, bert_token, lang_dic)
test_data = read_corpus(config.target_test_file, bert_token, lang_dic)
train(data, dev_data, test_data, labeler, vocab, config, bert, language_embedder)
| StarcoderdataPython |
1816347 | <gh_stars>0
#!/usr/bin/python3
import sys
import re
import json
import socket
import os
import subprocess
from pathlib import Path
executable = Path(sys.argv[0]).name
if os.getuid() != 0:
print(f"{executable} requires root access")
exit(1)
if "/etc/pve" not in Path("/proc/mounts").read_text():
print("ERROR: /etc/pve is not mounted")
exit(1)
nodes = set()
vms = {}
with open("/etc/pve/.vmlist") as f:
vmlist = json.load(f)
for vm, info in vmlist["ids"].items():
nodes.add(info["node"])
vms[vm] = info["node"]
if len(nodes) == 0:
print("ERROR: no nodes present")
exit(1)
this_node = socket.gethostname()
dump_script = """
#!/usr/bin/perl -T
use PVE::CLI::pct;
# Iterate over all defined commands
while (my($k, $v) = each %$PVE::CLI::pct::cmddef) {
# Skip any aliases
next if (ref($v) ne 'ARRAY');
print $k . '=' . join(',', @{$v->[2]}) . "\n";
}
"""
# Gather native pct's capabilities
actions = {}
argless = { "list" }
for cmd in subprocess.run("perl", text=True, input=dump_script, stdout=subprocess.PIPE).stdout.rstrip().split("\n"):
name, args = cmd.split("=")
args = tuple(arg for arg in args.split(",") if arg);
# Only actions that have a vmid parameter
if "vmid" in args or name in argless:
actions[name] = args
actions = dict(sorted(actions.items()))
def usage():
print(f"Usage: {executable} <COMMAND> [ARGS] [OPTIONS]\n")
for name, args in actions.items():
print(f" {executable} {name}", end="")
for arg in args:
print(f" <{arg}>", end="")
print()
if len(sys.argv) < 2:
print("ERROR: no command specified")
usage()
exit(1)
cmd = sys.argv[1]
if cmd not in actions:
print(f"ERROR: unknown command '{executable} {cmd}'")
usage()
exit(1)
if cmd in argless:
if len(sys.argv) > 2:
print("ERROR: too many arguments")
exit(1)
# Perform argless commands on all nodes, with some formatting
# Longest node name + 3, at least 11
node_col_width = max(9, max(map(lambda e: len(e), nodes))) + 3
# First on our own node, then all other nodes
for node in [this_node, *nodes.difference({this_node})]:
argv = ["pct", cmd]
if node != this_node:
argv[0:0] = ["ssh", f"root@{node}"]
res = subprocess.run(argv, text=True, stdout=subprocess.PIPE)
if node == this_node:
print("Node".ljust(node_col_width), end="")
print(res.stdout.split("\n")[0])
for line in res.stdout.rstrip().split("\n")[1:]:
print(node.ljust(node_col_width), end="")
print(line)
else:
# If there's arguments, the first argument will be the vmid. Find this, and execute on the specified node
vmid = sys.argv[2]
if not vmid.isdigit():
# Resolve vmid from vm name
# Check every node's shared config
for node in nodes:
conf: Path
for conf in (Path("/etc/pve/nodes") / node / "lxc").glob("*.conf"):
# Unknown VM, shouldn't happen
if conf.stem not in vms:
continue
with conf.open() as c:
for line in c:
line = line.rstrip()
if line.startswith("hostname") and line.split(" ")[1] == vmid:
vmid = conf.stem
break
else:
continue
break
else:
continue
break
if not vmid.isdigit():
print(f"ERROR: unknown vm name: '{vmid}'")
exit(1)
target_node = vms[vmid]
# Forward arguments
binary = "/usr/sbin/pct"
argv = ["pct", cmd, vmid, *sys.argv[3:]]
if target_node != this_node:
binary = "/usr/bin/ssh"
argv[0:0] = ["ssh", "-t", "-o", "LogLevel=QUIET", f"root@{target_node}"]
os.execvp(binary, argv)
| StarcoderdataPython |
3574078 | #!/usr/bin/env python
# encoding: utf-8
# <NAME>, 2006-2010 (ita)
"""
Support for translation tools such as msgfmt and intltool
Usage::
def configure(conf):
conf.load('gnu_dirs intltool')
def build(bld):
# process the .po files into .gmo files, and install them in LOCALEDIR
bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}")
# process an input file, substituting the translations from the po dir
bld(
features = "intltool_in",
podir = "../po",
flags = ["-d", "-q", "-u", "-c"],
source = 'kupfer.desktop.in',
install_path = "${DATADIR}/applications",
)
Usage of the :py:mod:`waflib.Tools.gnu_dirs` is recommended, but not obligatory.
"""
import os, re
from waflib import Configure, TaskGen, Task, Utils, Runner, Options, Build, Logs
import waflib.Tools.ccroot
from waflib.TaskGen import feature, before_method
from waflib.Logs import error
@before_method('process_source')
@feature('intltool_in')
def apply_intltool_in_f(self):
"""
Create tasks to translate files by intltool-merge::
def build(bld):
bld(
features = "intltool_in",
podir = "../po",
flags = ["-d", "-q", "-u", "-c"],
source = 'kupfer.desktop.in',
install_path = "${DATADIR}/applications",
)
:param podir: location of the .po files
:type podir: string
:param source: source files to process
:type source: list of string
:param flags: compilation flags ("-quc" by default)
:type flags: list of string
:param install_path: installation path
:type install_path: string
"""
try: self.meths.remove('process_source')
except ValueError: pass
if not self.env.LOCALEDIR:
self.env.LOCALEDIR = self.env.PREFIX + '/share/locale'
for i in self.to_list(self.source):
node = self.path.find_resource(i)
podir = getattr(self, 'podir', 'po')
podirnode = self.path.find_dir(podir)
if not podirnode:
error("could not find the podir %r" % podir)
continue
cache = getattr(self, 'intlcache', '.intlcache')
self.env['INTLCACHE'] = os.path.join(self.path.bldpath(), podir, cache)
self.env['INTLPODIR'] = podirnode.bldpath()
self.env['INTLFLAGS'] = getattr(self, 'flags', ['-q', '-u', '-c'])
task = self.create_task('intltool', node, node.change_ext(''))
inst = getattr(self, 'install_path', '${LOCALEDIR}')
if inst:
self.bld.install_files(inst, task.outputs)
@feature('intltool_po')
def apply_intltool_po(self):
"""
Create tasks to process po files::
def build(bld):
bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}")
The relevant task generator arguments are:
:param podir: directory of the .po files
:type podir: string
:param appname: name of the application
:type appname: string
:param install_path: installation directory
:type install_path: string
The file LINGUAS must be present in the directory pointed by *podir* and list the translation files to process.
"""
try: self.meths.remove('process_source')
except ValueError: pass
if not self.env.LOCALEDIR:
self.env.LOCALEDIR = self.env.PREFIX + '/share/locale'
appname = getattr(self, 'appname', 'set_your_app_name')
podir = getattr(self, 'podir', '')
inst = getattr(self, 'install_path', '${LOCALEDIR}')
linguas = self.path.find_node(os.path.join(podir, 'LINGUAS'))
if linguas:
# scan LINGUAS file for locales to process
file = open(linguas.abspath())
langs = []
for line in file.readlines():
# ignore lines containing comments
if not line.startswith('#'):
langs += line.split()
file.close()
re_linguas = re.compile('[-a-zA-Z_@.]+')
for lang in langs:
# Make sure that we only process lines which contain locales
if re_linguas.match(lang):
node = self.path.find_resource(os.path.join(podir, re_linguas.match(lang).group() + '.po'))
task = self.create_task('po', node, node.change_ext('.mo'))
if inst:
filename = task.outputs[0].name
(langname, ext) = os.path.splitext(filename)
inst_file = inst + os.sep + langname + os.sep + 'LC_MESSAGES' + os.sep + appname + '.mo'
self.bld.install_as(inst_file, task.outputs[0], chmod=getattr(self, 'chmod', Utils.O644), env=task.env)
else:
Logs.pprint('RED', "Error no LINGUAS file found in po directory")
class po(Task.Task):
"""
Compile .po files into .gmo files
"""
run_str = '${MSGFMT} -o ${TGT} ${SRC}'
color = 'BLUE'
class intltool(Task.Task):
"""
Let intltool-merge translate an input file
"""
run_str = '${INTLTOOL} ${INTLFLAGS} ${INTLCACHE} ${INTLPODIR} ${SRC} ${TGT}'
color = 'BLUE'
def configure(conf):
"""
Detect the program *msgfmt* and set *conf.env.MSGFMT*.
Detect the program *intltool-merge* and set *conf.env.INTLTOOL*.
It is possible to set INTLTOOL in the environment, but it must not have spaces in it::
$ INTLTOOL="/path/to/the program/intltool" waf configure
If a C/C++ compiler is present, execute a compilation test to find the header *locale.h*.
"""
conf.find_program('msgfmt', var='MSGFMT')
conf.find_perl_program('intltool-merge', var='INTLTOOL')
prefix = conf.env.PREFIX
datadir = conf.env.DATADIR
if not datadir:
datadir = os.path.join(prefix,'share')
conf.define('LOCALEDIR', os.path.join(datadir, 'locale').replace('\\', '\\\\'))
conf.define('DATADIR', datadir.replace('\\', '\\\\'))
if conf.env.CC or conf.env.CXX:
conf.check(header_name='locale.h')
| StarcoderdataPython |
1725345 | # -*- coding: utf-8 -*-
"""TcEx Runtime App Test Case"""
import os
from six import string_types
from .test_case import TestCase
class TestCaseJob(TestCase):
"""App TestCase Class"""
_output_variables = None
redis_client = None
@staticmethod
def create_shelf_dir(shelf_path):
"""Create a directory in log with the context name containing the batch data."""
if not os.path.isdir(shelf_path):
os.makedirs(shelf_path)
with open(os.path.join(shelf_path, 'DEBUG'), 'a'):
os.utime(os.path.join(shelf_path, 'DEBUG'), None)
def run(self, args): # pylint: disable=too-many-return-statements
"""Run the Playbook App.
Args:
args (dict): The App CLI args.
Returns:
[type]: [description]
"""
# resolve env vars
for k, v in list(args.items()):
if isinstance(v, string_types):
args[k] = self.resolve_env_args(v)
self.log_data('run', 'args', args)
self.app = self.app_init(args)
# Start
exit_code = self.run_app_method(self.app, 'start')
if exit_code != 0:
return exit_code
# Run
exit_code = self.run_app_method(self.app, 'run')
if exit_code != 0:
return exit_code
# Done
exit_code = self.run_app_method(self.app, 'done')
if exit_code != 0:
return exit_code
try:
# call exit for message_tc output, but don't exit
self.app.tcex.playbook.exit(msg=self.app.exit_message)
except SystemExit:
pass
return self._exit(self.app.tcex.exit_code)
def run_profile(self, profile):
"""Run an App using the profile name."""
if isinstance(profile, str):
profile = self.init_profile(profile)
args = {'tc_temp_path': os.path.join(self._app_path, 'log', self.context)}
self.create_shelf_dir(args['tc_temp_path'])
# build args from install.json
args.update(profile.get('inputs', {}).get('required', {}))
args.update(profile.get('inputs', {}).get('optional', {}))
# run the App
exit_code = self.run(args)
return exit_code
| StarcoderdataPython |
4909072 | <reponame>Socrats/Axelrod
import random
from axelrod.action import Action
from axelrod.player import Player, obey_axelrod
from axelrod.strategies import TitForTat
from axelrod.strategy_transformers import NiceTransformer
from numpy.random import choice
from ._strategies import all_strategies
from .hunter import (AlternatorHunter, CooperatorHunter, CycleHunter,
DefectorHunter, EventualCycleHunter, MathConstantHunter,
RandomHunter)
# Needs to be computed manually to prevent circular dependency
ordinary_strategies = [s for s in all_strategies if obey_axelrod(s)]
C, D = Action.C, Action.D
class MetaPlayer(Player):
"""
A generic player that has its own team of players.
Names:
- Meta Player: Original name by <NAME>
"""
name = "<NAME>"
classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": True,
"makes_use_of": {"game", "length"},
"long_run_time": True,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, team=None):
super().__init__()
# The default is to use all strategies available, but we need to import
# the list at runtime, since _strategies import also _this_ module
# before defining the list.
if team:
self.team = team
else:
# Needs to be computed manually to prevent circular dependency
self.team = ordinary_strategies
# Make sure we don't use any meta players to avoid infinite recursion.
self.team = [t for t in self.team if not issubclass(t, MetaPlayer)]
self.nteam = len(self.team)
# Initiate all the player in our team.
self.team = [t() for t in self.team]
# This player inherits the classifiers of its team.
# Note that memory_depth is not simply the max memory_depth of the team.
for key in [
"stochastic",
"inspects_source",
"manipulates_source",
"manipulates_state",
]:
self.classifier[key] = any(t.classifier[key] for t in self.team)
for t in self.team:
self.classifier["makes_use_of"].update(t.classifier["makes_use_of"])
def __repr__(self):
team_size = len(self.team)
return "{}: {} player{}".format(
self.name, team_size, "s" if team_size > 1 else ""
)
def strategy(self, opponent):
# Get the results of all our players.
results = []
for player in self.team:
play = player.strategy(opponent)
player.history.append(play)
results.append(play)
# A subclass should just define a way to choose the result based on
# team results.
return self.meta_strategy(results, opponent)
def meta_strategy(self, results, opponent):
"""Determine the meta result based on results of all players.
Override this function in child classes."""
return C
class MetaMajority(MetaPlayer):
"""A player who goes by the majority vote of all other non-meta players.
Names:
- Meta Marjority: Original name by <NAME>
"""
name = "Meta Majority"
def __init__(self, team=None):
super().__init__(team=team)
@staticmethod
def meta_strategy(results, opponent):
if results.count(D) > results.count(C):
return D
return C
class MetaMinority(MetaPlayer):
"""A player who goes by the minority vote of all other non-meta players.
Names:
- Meta Minority: Original name by <NAME>
"""
name = "Meta Minority"
def __init__(self, team=None):
super().__init__(team=team)
@staticmethod
def meta_strategy(results, opponent):
if results.count(D) < results.count(C):
return D
return C
class MetaWinner(MetaPlayer):
"""A player who goes by the strategy of the current winner.
Names:
- Meta Winner: Original name by <NAME>
"""
name = "Meta Winner"
def __init__(self, team=None):
super().__init__(team=team)
# For each player, we will keep the history of proposed moves and
# a running score since the beginning of the game.
self.scores = [0] * len(self.team)
self.classifier["long_run_time"] = True
def _update_scores(self, opponent):
# Update the running score for each player, before determining the
# next move.
game = self.match_attributes["game"]
if len(self.history):
for i, player in enumerate(self.team):
last_round = (player.history[-1], opponent.history[-1])
s = game.scores[last_round][0]
self.scores[i] += s
def meta_strategy(self, results, opponent):
self._update_scores(opponent)
# Choice an action based on the collection of scores
bestscore = max(self.scores)
beststrategies = [
i for (i, score) in enumerate(self.scores) if score == bestscore
]
bestproposals = [results[i] for i in beststrategies]
bestresult = C if C in bestproposals else D
return bestresult
NiceMetaWinner = NiceTransformer()(MetaWinner)
class MetaWinnerEnsemble(MetaWinner):
"""A variant of MetaWinner that chooses one of the top scoring strategies
at random against each opponent. Note this strategy is always stochastic
regardless of the team.
Names:
- Meta Winner Ensemble: Original name by <NAME>
"""
name = "Meta Winner Ensemble"
def meta_strategy(self, results, opponent):
self._update_scores(opponent)
# Sort by score
scores = [(score, i) for (i, score) in enumerate(self.scores)]
# Choose one of the best scorers at random
scores.sort(reverse=True)
prop = max(1, int(len(scores) * 0.08))
index = choice([i for (s, i) in scores[:prop]])
return results[index]
NiceMetaWinnerEnsemble = NiceTransformer()(MetaWinnerEnsemble)
class MetaHunter(MetaPlayer):
"""A player who uses a selection of hunters.
Names
- Meta Hunter: Original name by <NAME>
"""
name = "Meta Hunter"
classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self):
# Notice that we don't include the cooperator hunter, because it leads
# to excessive defection and therefore bad performance against
# unforgiving strategies. We will stick to hunters that use defections
# as cues. However, a really tangible benefit comes from combining
# Random Hunter and Math Constant Hunter, since together they catch
# strategies that are lightly randomized but still quite constant
# (the tricky/suspicious ones).
team = [
DefectorHunter,
AlternatorHunter,
RandomHunter,
MathConstantHunter,
CycleHunter,
EventualCycleHunter,
]
super().__init__(team=team)
@staticmethod
def meta_strategy(results, opponent):
# If any of the hunters smells prey, then defect!
if D in results:
return D
# Tit-for-tat might seem like a better default choice, but in many
# cases it complicates the heuristics of hunting and creates
# false-positives. So go ahead and use it, but only for longer
# histories.
if len(opponent.history) > 100:
return D if opponent.history[-1:] == [D] else C
else:
return C
class MetaHunterAggressive(MetaPlayer):
"""A player who uses a selection of hunters.
Names
- Meta Hunter Aggressive: Original name by <NAME>
"""
name = "Meta Hunter Aggressive"
classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": False,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, team=None):
# This version uses CooperatorHunter
if team is None:
team = [
DefectorHunter,
AlternatorHunter,
RandomHunter,
MathConstantHunter,
CycleHunter,
EventualCycleHunter,
CooperatorHunter,
]
super().__init__(team=team)
@staticmethod
def meta_strategy(results, opponent):
# If any of the hunters smells prey, then defect!
if D in results:
return D
# Tit-for-tat might seem like a better default choice, but in many
# cases it complicates the heuristics of hunting and creates
# false-positives. So go ahead and use it, but only for longer
# histories.
if len(opponent.history) > 100:
return D if opponent.history[-1:] == [D] else C
else:
return C
class MetaMajorityMemoryOne(MetaMajority):
"""MetaMajority with the team of Memory One players
Names
- Meta Majority Memory One: Original name by <NAME>
"""
name = "Meta Majority Memory One"
def __init__(self):
team = [s for s in ordinary_strategies if s().classifier["memory_depth"] <= 1]
super().__init__(team=team)
self.classifier["long_run_time"] = False
class MetaMajorityFiniteMemory(MetaMajority):
"""MetaMajority with the team of Finite Memory Players
Names
- Meta Majority Finite Memory: Original name by <NAME>
"""
name = "Meta Majority Finite Memory"
def __init__(self):
team = [
s
for s in ordinary_strategies
if s().classifier["memory_depth"] < float("inf")
]
super().__init__(team=team)
class MetaMajorityLongMemory(MetaMajority):
"""MetaMajority with the team of Long (infinite) Memory Players
Names
- Meta Majority Long Memory: Original name by <NAME>
"""
name = "Meta Majority Long Memory"
def __init__(self):
team = [
s
for s in ordinary_strategies
if s().classifier["memory_depth"] == float("inf")
]
super().__init__(team=team)
class MetaWinnerMemoryOne(MetaWinner):
"""MetaWinner with the team of Memory One players
Names
- Meta Winner Memory Memory One: Original name by <NAME>
"""
name = "Meta Winner Memory One"
def __init__(self):
team = [s for s in ordinary_strategies if s().classifier["memory_depth"] <= 1]
super().__init__(team=team)
self.classifier["long_run_time"] = False
class MetaWinnerFiniteMemory(MetaWinner):
"""MetaWinner with the team of Finite Memory Players
Names
- Meta Winner Finite Memory: Original name by <NAME>
"""
name = "Meta Winner Finite Memory"
def __init__(self):
team = [
s
for s in ordinary_strategies
if s().classifier["memory_depth"] < float("inf")
]
super().__init__(team=team)
class MetaWinnerLongMemory(MetaWinner):
"""MetaWinner with the team of Long (infinite) Memory Players
Names
- Meta Winner Long Memory: Original name by <NAME>
"""
name = "Meta Winner Long Memory"
def __init__(self):
team = [
s
for s in ordinary_strategies
if s().classifier["memory_depth"] == float("inf")
]
super().__init__(team=team)
class MetaWinnerDeterministic(MetaWinner):
"""Meta Winner with the team of Deterministic Players.
Names
- Meta Winner Deterministic: Original name by <NAME>
"""
name = "Meta Winner Deterministic"
def __init__(self):
team = [s for s in ordinary_strategies if not s().classifier["stochastic"]]
super().__init__(team=team)
self.classifier["stochastic"] = False
class MetaWinnerStochastic(MetaWinner):
"""Meta Winner with the team of Stochastic Players.
Names
- Meta Winner Stochastic: Original name by <NAME>
"""
name = "Meta Winner Stochastic"
def __init__(self):
team = [s for s in ordinary_strategies if s().classifier["stochastic"]]
super().__init__(team=team)
class MetaMixer(MetaPlayer):
"""A player who randomly switches between a team of players.
If no distribution is passed then the player will uniformly choose between
sub players.
In essence this is creating a Mixed strategy.
Parameters
team : list of strategy classes, optional
Team of strategies that are to be randomly played
If none is passed will select the ordinary strategies.
distribution : list representing a probability distribution, optional
This gives the distribution from which to select the players.
If none is passed will select uniformly.
Names
- Meta Mixer: Original name by <NAME>
"""
name = "Meta Mixer"
classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": True,
"makes_use_of": set(),
"long_run_time": True,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self, team=None, distribution=None):
self.distribution = distribution
super().__init__(team=team)
def meta_strategy(self, results, opponent):
"""Using the numpy.random choice function to sample with weights"""
return choice(results, p=self.distribution)
class NMWEDeterministic(NiceMetaWinnerEnsemble):
"""Nice Meta Winner Ensemble with the team of Deterministic Players.
Names
- Nice Meta Winner Ensemble Deterministic: Original name by <NAME>
"""
name = "NMWE Deterministic"
def __init__(self):
team = [s for s in ordinary_strategies if not s().classifier["stochastic"]]
super().__init__(team=team)
self.classifier["stochastic"] = True
class NMWEStochastic(NiceMetaWinnerEnsemble):
"""Nice Meta Winner Ensemble with the team of Stochastic Players.
Names
- Nice Meta Winner Ensemble Stochastic: Original name by <NAME>
"""
name = "NMWE Stochastic"
def __init__(self):
team = [s for s in ordinary_strategies if s().classifier["stochastic"]]
super().__init__(team=team)
class NMWEFiniteMemory(NiceMetaWinnerEnsemble):
"""Nice Meta Winner Ensemble with the team of Finite Memory Players.
Names
- Nice Meta Winner Ensemble Finite Memory: Original name by <NAME>
"""
name = "NMWE Finite Memory"
def __init__(self):
team = [
s
for s in ordinary_strategies
if s().classifier["memory_depth"] < float("inf")
]
super().__init__(team=team)
class NMWELongMemory(NiceMetaWinnerEnsemble):
"""Nice Meta Winner Ensemble with the team of Long Memory Players.
Names
- Nice Meta Winner Ensemble Long Memory: Original name by <NAME>
"""
name = "NMWE Long Memory"
def __init__(self):
team = [
s
for s in ordinary_strategies
if s().classifier["memory_depth"] == float("inf")
]
super().__init__(team=team)
class NMWEMemoryOne(NiceMetaWinnerEnsemble):
"""Nice Meta Winner Ensemble with the team of Memory One Players.
Names
- Nice Meta Winner Ensemble Memory One: Original name by <NAME>
"""
name = "NMWE Memory One"
def __init__(self):
team = [s for s in ordinary_strategies if s().classifier["memory_depth"] <= 1]
super().__init__(team=team)
self.classifier["long_run_time"] = False
class MemoryDecay(MetaPlayer):
"""
A player utilizes the (default) Tit for Tat strategy for the first (default) 15 turns,
at the same time memorizing the opponent's decisions. After the 15 turns have
passed, the player calculates a 'net cooperation score' (NCS) for their opponent,
weighing decisions to Cooperate as (default) 1, and to Defect as (default)
-2. If the opponent's NCS is below 0, the player defects; otherwise,
they cooperate.
The player's memories of the opponent's decisions have a random chance to be
altered (i.e., a C decision becomes D or vice versa; default probability
is 0.03) or deleted (default probability is 0.1).
It is possible to pass a different axelrod player class to change the inital
player behavior.
Name: <NAME>
"""
name = "<NAME>"
classifier = {
"memory_depth": float("inf"),
"long_run_time": False,
"stochastic": True,
"makes_use_of": set(),
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(
self,
p_memory_delete: float = 0.1,
p_memory_alter: float = 0.03,
loss_value: float = -2,
gain_value: float = 1,
memory: list = None,
start_strategy: Player = TitForTat,
start_strategy_duration: int = 15,
):
super().__init__(team=[start_strategy])
self.classifier["stochastic"] = True
self.p_memory_delete = p_memory_delete
self.p_memory_alter = p_memory_alter
self.loss_value = loss_value
self.gain_value = gain_value
self.memory = [] if memory == None else memory
self.start_strategy_duration = start_strategy_duration
def __repr__(self):
return Player.__repr__(self)
def gain_loss_translate(self):
"""
Translates the actions (D and C) to numeric values (loss_value and
gain_value).
"""
values = {C: self.gain_value, D: self.loss_value}
self.gloss_values = [values[action] for action in self.memory]
def memory_alter(self):
"""
Alters memory entry, i.e. puts C if there's a D and vice versa.
"""
alter = choice(range(0, len(self.memory)))
self.memory[alter] = self.memory[alter].flip()
def memory_delete(self):
"""
Deletes memory entry.
"""
self.memory.pop(choice(range(0, len(self.memory))))
def strategy(self, opponent):
try:
self.memory.append(opponent.history[-1])
except IndexError:
pass
if len(self.history) < self.start_strategy_duration:
play = self.team[0].strategy(opponent)
self.team[0].history.append(play)
return play
else:
if random.random() <= self.p_memory_alter:
self.memory_alter()
if random.random() <= self.p_memory_delete:
self.memory_delete()
self.gain_loss_translate()
if sum(self.gloss_values) < 0:
return D
else:
return C
| StarcoderdataPython |
1915851 | from __future__ import annotations
from abc import ABCMeta, abstractmethod
from typing import Tuple, Type
from io import BytesIO
class MessageException(Exception):
pass
class Message(metaclass=ABCMeta):
"""
header: information required to e.g. route to correct subsystem
It is limited to carry only string values.
body: information that the message is carrying
attachments: lazy evaluated files that can be transported
"""
@abstractmethod
def loads_message(self, header: dict, body: str, attachments: list):
pass
@abstractmethod
def dumps_message(self) -> Tuple[dict, str, list]:
pass
@staticmethod
@abstractmethod
def get_message_type() -> str:
return "NoneType"
def dumps(self) -> bytes:
header, body, attachments = self.dumps_message()
header = header.copy()
header["type"] = self.get_message_type()
return self._encode(header, body, attachments)
@staticmethod
def process(data: bytes) -> Tuple[str, dict, str, list]:
header, body, attachments = Message._decode(data)
typename = header.pop("type")
return typename, header, body, attachments
def loads(self, data: bytes) -> Type[Message]:
processed = Message.process(data)
self.loads_message(*processed[1:])
return self
@staticmethod
def _encode(header, body, attachments):
buf = BytesIO()
Message._encode_header(buf, header)
Message._encode_body(buf, body)
Message._encode_attachments(buf, attachments)
buf.seek(0)
return buf.read()
@staticmethod
def _encode_header(buf, header):
for key, value in header.items():
buf.write("{}: {}\r\n".format(key, value).encode())
buf.write(b"\r\n")
@staticmethod
def _encode_body(buf, body):
buf.write(body.encode())
buf.write(b"\r\n\r\n")
@staticmethod
def _encode_attachments(buf, attachments):
pass
@staticmethod
def _decode(data):
header_data, body, data = data.split(b"\r\n\r\n", 2)
header = Message._decode_header(header_data)
body = body.decode()
attachments = Message._decode_attachments(data)
return header, body, attachments
@staticmethod
def _decode_header(data) -> dict:
header = {}
data = data.decode()
while data:
if "\r\n" in data:
value, data = data.split("\r\n", 1)
else:
value, data = data, None
if ": " in value:
lvalue, rvalue = value.split(": ", 1)
header[lvalue] = rvalue
else:
print(f"Illformated {value.decode()}")
return header
@staticmethod
def _decode_attachments(data) -> list:
return []
class MessageProcessor:
def __init__(self, *message_cls):
self.message_types = {}
for cls in message_cls:
self.register(cls)
def register(self, message_cl):
self.message_types[message_cl.get_message_type()] = message_cl
def loads(self, data) -> Type[Message]:
typename, *content = Message.process(data)
if typename in self.message_types:
msg = self.message_types[typename]()
msg.loads_message(*content)
return msg
raise MessageException(f"Unknown message type {typename}")
| StarcoderdataPython |
1659593 | """Gives users direct access to class and functions."""
from shamirs.shamirs import share, shares, interpolate
| StarcoderdataPython |
1758015 | <gh_stars>1-10
import datetime
from sqlalchemy import or_
from lib.util_sqlalchemy import ResourceMixin
from coder.extensions import db
from coder.blueprints.billing.models.credit_card import CreditCard
from coder.blueprints.billing.models.coupon import Coupon
from coder.blueprints.billing.gateways.stripecom import (
Customer as PaymentCustomer,
Charge as PaymentCharge,
Invoice as PaymentInvoice
)
class Invoice(ResourceMixin, db.Model):
__tablename__ = 'invoices'
id = db.Column(db.Integer, primary_key=True)
# Relationships.
user_id = db.Column(db.Integer, db.ForeignKey('users.id',
onupdate='CASCADE',
ondelete='CASCADE'),
index=True, nullable=False)
user = db.relationship('User')
# Invoice details.
plan = db.Column(db.String(128), index=True)
receipt_number = db.Column(db.String(128), index=True)
description = db.Column(db.String(128))
period_start_on = db.Column(db.Date)
period_end_on = db.Column(db.Date)
currency = db.Column(db.String(8))
tax = db.Column(db.Integer())
tax_percent = db.Column(db.Float())
total = db.Column(db.Integer())
# De-normalize the card details so we can render a user's history properly
# even if they have no active subscription or changed cards at some point.
brand = db.Column(db.String(32))
last4 = db.Column(db.Integer)
exp_date = db.Column(db.Date, index=True)
def __init__(self, **kwargs):
# Call Flask-SQLAlchemy's constructor.
super(Invoice, self).__init__(**kwargs)
@classmethod
def search(cls, query):
"""
Search a resource by 1 or more fields.
:param query: Search query
:type query: str
:return: SQLAlchemy filter
"""
from coder.blueprints.user.models import User
if not query:
return ''
search_query = '%{0}%'.format(query)
search_chain = (User.email.ilike(search_query),
User.username.ilike(search_query))
return or_(*search_chain)
@classmethod
def parse_from_event(cls, payload):
"""
Parse and return the invoice information that will get saved locally.
:return: dict
"""
data = payload['data']['object']
plan_info = data['lines']['data'][0]['plan']
period_start_on = datetime.datetime.utcfromtimestamp(
data['lines']['data'][0]['period']['start']).date()
period_end_on = datetime.datetime.utcfromtimestamp(
data['lines']['data'][0]['period']['end']).date()
invoice = {
'payment_id': data['customer'],
'plan': plan_info['name'],
'receipt_number': data['receipt_number'],
'description': plan_info['statement_descriptor'],
'period_start_on': period_start_on,
'period_end_on': period_end_on,
'currency': data['currency'],
'tax': data['tax'],
'tax_percent': data['tax_percent'],
'total': data['total']
}
return invoice
@classmethod
def parse_from_api(cls, payload):
"""
Parse and return the invoice information we are interested in.
:return: dict
"""
plan_info = payload['lines']['data'][0]['plan']
date = datetime.datetime.utcfromtimestamp(payload['date'])
invoice = {
'plan': plan_info['name'],
'description': plan_info['statement_descriptor'],
'next_bill_on': date,
'amount_due': payload['amount_due'],
'interval': plan_info['interval']
}
return invoice
@classmethod
def prepare_and_save(cls, parsed_event):
"""
Potentially save the invoice after argument the event fields.
:param parsed_event: Event params to be saved
:type parsed_event: dict
:return: User instance
"""
# Avoid circular imports.
from coder.blueprints.user.models import User
# Only save the invoice if the user is valid at this point.
id = parsed_event.get('payment_id')
user = User.query.filter((User.payment_id == id)).first()
if user and user.credit_card:
parsed_event['user_id'] = user.id
parsed_event['brand'] = user.credit_card.brand
parsed_event['last4'] = user.credit_card.last4
parsed_event['exp_date'] = user.credit_card.exp_date
del parsed_event['payment_id']
invoice = Invoice(**parsed_event)
invoice.save()
return user
@classmethod
def upcoming(cls, customer_id):
"""
Return the upcoming invoice item.
:param customer_id: Stripe customer id
:type customer_id: int
:return: Stripe invoice object
"""
invoice = PaymentInvoice.upcoming(customer_id)
return Invoice.parse_from_api(invoice)
def create(self, user=None, currency=None, amount=None, coins=None,
coupon=None, token=None):
"""
Create an invoice item.
:param user: User to apply the subscription to
:type user: User instance
:param amount: Stripe currency
:type amount: str
:param amount: Amount in cents
:type amount: int
:param coins: Amount of coins
:type coins: int
:param coupon: Coupon code to apply
:type coupon: str
:param token: Token returned by JavaScript
:type token: str
:return: bool
"""
if token is None:
return False
customer = PaymentCustomer.create(token=token, email=user.email)
if coupon:
self.coupon = coupon.upper()
coupon = Coupon.query.filter(Coupon.code == self.coupon).first()
amount = coupon.apply_discount_to(amount)
charge = PaymentCharge.create(customer.id, currency, amount)
# Redeem the coupon.
if coupon:
coupon.redeem()
# Add the coins to the user.
user.coins += coins
# Create the invoice item.
period_on = datetime.datetime.utcfromtimestamp(charge.get('created'))
card_params = CreditCard.extract_card_params(customer)
self.user_id = user.id
self.plan = '—'
self.receipt_number = charge.get('receipt_number')
self.description = charge.get('statement_descriptor')
self.period_start_on = period_on
self.period_end_on = period_on
self.currency = charge.get('currency')
self.tax = None
self.tax_percent = None
self.total = charge.get('amount')
self.brand = card_params.get('brand')
self.last4 = card_params.get('last4')
self.exp_date = card_params.get('exp_date')
db.session.add(user)
db.session.add(self)
db.session.commit()
return True
| StarcoderdataPython |
8167321 | <gh_stars>1-10
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
import numpy as np
from typing import Dict
import torch
from detectron2.layers import ShapeSpec, batched_nms_rotated
from detectron2.structures import Instances, RotatedBoxes, pairwise_iou_rotated
from detectron2.utils.events import get_event_storage
from ..box_regression import Box2BoxTransformRotated
from ..poolers import ROIPooler
from ..proposal_generator.proposal_utils import add_ground_truth_to_proposals
from .box_head import build_box_head
from .fast_rcnn import FastRCNNOutputLayers, FastRCNNOutputs
from .roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
logger = logging.getLogger(__name__)
"""
Shape shorthand in this module:
N: number of images in the minibatch
R: number of ROIs, combined over all images, in the minibatch
Ri: number of ROIs in image i
K: number of foreground classes. E.g.,there are 80 foreground classes in COCO.
Naming convention:
deltas: refers to the 5-d (dx, dy, dw, dh, da) deltas that parameterize the box2box
transform (see :class:`box_regression.Box2BoxTransformRotated`).
pred_class_logits: predicted class scores in [-inf, +inf]; use
softmax(pred_class_logits) to estimate P(class).
gt_classes: ground-truth classification labels in [0, K], where [0, K) represent
foreground object classes and K represents the background class.
pred_proposal_deltas: predicted rotated box2box transform deltas for transforming proposals
to detection box predictions.
gt_proposal_deltas: ground-truth rotated box2box transform deltas
"""
def fast_rcnn_inference_rotated(
boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image
):
"""
Call `fast_rcnn_inference_single_image_rotated` for all images.
Args:
boxes (list[Tensor]): A list of Tensors of predicted class-specific or class-agnostic
boxes for each image. Element i has shape (Ri, K * 5) if doing
class-specific regression, or (Ri, 5) if doing class-agnostic
regression, where Ri is the number of predicted objects for image i.
This is compatible with the output of :meth:`FastRCNNOutputs.predict_boxes`.
scores (list[Tensor]): A list of Tensors of predicted class scores for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i. Compatible with the output of :meth:`FastRCNNOutputs.predict_probs`.
image_shapes (list[tuple]): A list of (width, height) tuples for each image in the batch.
score_thresh (float): Only return detections with a confidence score exceeding this
threshold.
nms_thresh (float): The threshold to use for box non-maximum suppression. Value in [0, 1].
topk_per_image (int): The number of top scoring detections to return. Set < 0 to return
all detections.
Returns:
instances: (list[Instances]): A list of N instances, one for each image in the batch,
that stores the topk most confidence detections.
kept_indices: (list[Tensor]): A list of 1D tensor of length of N, each element indicates
the corresponding boxes/scores index in [0, Ri) from the input, for image i.
"""
result_per_image = [
fast_rcnn_inference_single_image_rotated(
boxes_per_image, scores_per_image, image_shape, score_thresh, nms_thresh, topk_per_image
)
for scores_per_image, boxes_per_image, image_shape in zip(scores, boxes, image_shapes)
]
return [x[0] for x in result_per_image], [x[1] for x in result_per_image]
def fast_rcnn_inference_single_image_rotated(
boxes, scores, image_shape, score_thresh, nms_thresh, topk_per_image
):
"""
Single-image inference. Return rotated bounding-box detection results by thresholding
on scores and applying rotated non-maximum suppression (Rotated NMS).
Args:
Same as `fast_rcnn_inference_rotated`, but with rotated boxes, scores, and image shapes
per image.
Returns:
Same as `fast_rcnn_inference_rotated`, but for only one image.
"""
valid_mask = torch.isfinite(boxes).all(dim=1) & torch.isfinite(scores).all(dim=1)
if not valid_mask.all():
boxes = boxes[valid_mask]
scores = scores[valid_mask]
B = 5 # box dimension
scores = scores[:, :-1]
num_bbox_reg_classes = boxes.shape[1] // B
# Convert to Boxes to use the `clip` function ...
boxes = RotatedBoxes(boxes.reshape(-1, B))
boxes.clip(image_shape)
boxes = boxes.tensor.view(-1, num_bbox_reg_classes, B) # R x C x B
# Filter results based on detection scores
filter_mask = scores > score_thresh # R x K
# R' x 2. First column contains indices of the R predictions;
# Second column contains indices of classes.
filter_inds = filter_mask.nonzero()
if num_bbox_reg_classes == 1:
boxes = boxes[filter_inds[:, 0], 0]
else:
boxes = boxes[filter_mask]
scores = scores[filter_mask]
# Apply per-class Rotated NMS
keep = batched_nms_rotated(boxes, scores, filter_inds[:, 1], nms_thresh)
if topk_per_image >= 0:
keep = keep[:topk_per_image]
boxes, scores, filter_inds = boxes[keep], scores[keep], filter_inds[keep]
result = Instances(image_shape)
result.pred_boxes = RotatedBoxes(boxes)
result.scores = scores
result.pred_classes = filter_inds[:, 1]
return result, filter_inds[:, 0]
class RotatedFastRCNNOutputs(FastRCNNOutputs):
"""
A class that stores information about outputs of a Fast R-CNN head with RotatedBoxes.
"""
def inference(self, score_thresh, nms_thresh, topk_per_image):
"""
Args:
score_thresh (float): same as `fast_rcnn_inference_rotated`.
nms_thresh (float): same as `fast_rcnn_inference_rotated`.
topk_per_image (int): same as `fast_rcnn_inference_rotated`.
Returns:
list[Instances]: same as `fast_rcnn_inference_rotated`.
list[Tensor]: same as `fast_rcnn_inference_rotated`.
"""
boxes = self.predict_boxes()
scores = self.predict_probs()
image_shapes = self.image_shapes
return fast_rcnn_inference_rotated(
boxes, scores, image_shapes, score_thresh, nms_thresh, topk_per_image
)
@ROI_HEADS_REGISTRY.register()
class RROIHeads(StandardROIHeads):
"""
This class is used by Rotated RPN (RRPN).
For now, it just supports box head but not mask or keypoints.
"""
def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):
super().__init__(cfg, input_shape)
self.box2box_transform = Box2BoxTransformRotated(
weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS
)
assert (
not self.mask_on and not self.keypoint_on
), "Mask/Keypoints not supported in Rotated ROIHeads."
def _init_box_head(self, cfg, input_shape):
# fmt: off
pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION
pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO
pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE
# fmt: on
# If StandardROIHeads is applied on multiple feature maps (as in FPN),
# then we share the same predictors and therefore the channel counts must be the same
in_channels = [input_shape[f].channels for f in self.in_features]
# Check all channel counts are equal
assert len(set(in_channels)) == 1, in_channels
in_channels = in_channels[0]
assert pooler_type in ["ROIAlignRotated"]
self.box_pooler = ROIPooler(
output_size=pooler_resolution,
scales=pooler_scales,
sampling_ratio=sampling_ratio,
pooler_type=pooler_type,
)
self.box_head = build_box_head(
cfg, ShapeSpec(channels=in_channels, height=pooler_resolution, width=pooler_resolution)
)
self.box_predictor = FastRCNNOutputLayers(
input_size=self.box_head.output_size,
num_classes=self.num_classes,
cls_agnostic_bbox_reg=self.cls_agnostic_bbox_reg,
box_dim=5,
)
@torch.no_grad()
def label_and_sample_proposals(self, proposals, targets):
"""
Prepare some proposals to be used to train the RROI heads.
It performs box matching between `proposals` and `targets`, and assigns
training labels to the proposals.
It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes,
with a fraction of positives that is no larger than `self.positive_sample_fraction.
Args:
See :meth:`StandardROIHeads.forward`
Returns:
list[Instances]: length `N` list of `Instances`s containing the proposals
sampled for training. Each `Instances` has the following fields:
- proposal_boxes: the rotated proposal boxes
- gt_boxes: the ground-truth rotated boxes that the proposal is assigned to
(this is only meaningful if the proposal has a label > 0; if label = 0
then the ground-truth box is random)
- gt_classes: the ground-truth classification lable for each proposal
"""
gt_boxes = [x.gt_boxes for x in targets]
if self.proposal_append_gt:
proposals = add_ground_truth_to_proposals(gt_boxes, proposals)
proposals_with_gt = []
num_fg_samples = []
num_bg_samples = []
for proposals_per_image, targets_per_image in zip(proposals, targets):
has_gt = len(targets_per_image) > 0
match_quality_matrix = pairwise_iou_rotated(
targets_per_image.gt_boxes, proposals_per_image.proposal_boxes
)
matched_idxs, matched_labels = self.proposal_matcher(match_quality_matrix)
sampled_idxs, gt_classes = self._sample_proposals(
matched_idxs, matched_labels, targets_per_image.gt_classes
)
proposals_per_image = proposals_per_image[sampled_idxs]
proposals_per_image.gt_classes = gt_classes
if has_gt:
sampled_targets = matched_idxs[sampled_idxs]
proposals_per_image.gt_boxes = targets_per_image.gt_boxes[sampled_targets]
else:
gt_boxes = RotatedBoxes(
targets_per_image.gt_boxes.tensor.new_zeros((len(sampled_idxs), 5))
)
proposals_per_image.gt_boxes = gt_boxes
num_bg_samples.append((gt_classes == self.num_classes).sum().item())
num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])
proposals_with_gt.append(proposals_per_image)
# Log the number of fg/bg samples that are selected for training ROI heads
storage = get_event_storage()
storage.put_scalar("roi_head/num_fg_samples", np.mean(num_fg_samples))
storage.put_scalar("roi_head/num_bg_samples", np.mean(num_bg_samples))
return proposals_with_gt
def _forward_box(self, features, proposals):
"""
Forward logic of the box prediction branch.
Args:
features (dict[str, Tensor]): mapping from feature map names to tensor.
Same as in :meth:`ROIHeads.forward`.
proposals (list[Instances]): the per-image object proposals with
their matching ground truth.
Each has fields "proposal_boxes", and "objectness_logits",
"gt_classes", "gt_boxes".
Returns:
In training, a dict of losses.
In inference, a list of `Instances`, the predicted instances.
"""
features = [features[f] for f in self.in_features]
box_features = self.box_pooler(features, [x.proposal_boxes for x in proposals])
box_features = self.box_head(box_features)
pred_class_logits, pred_proposal_deltas = self.box_predictor(box_features)
del box_features
outputs = RotatedFastRCNNOutputs(
self.box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
self.smooth_l1_beta,
)
if self.training:
return outputs.losses()
else:
pred_instances, _ = outputs.inference(
self.test_score_thresh, self.test_nms_thresh, self.test_detections_per_img
)
return pred_instances
| StarcoderdataPython |
8040051 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from lighttree import TreeBasedObj
from pandagg.tree.mappings import _mappings
from pandagg.interactive._field_agg_factory import field_classes_per_name
from pandagg.utils import DSLMixin
class IMappings(DSLMixin, TreeBasedObj):
"""Interactive wrapper upon mappings tree, allowing field navigation and quick access to single clause aggregations
computation.
"""
_REPR_NAME = "Mappings"
_NODE_PATH_ATTR = "name"
def __init__(
self,
mappings,
client=None,
index=None,
depth=1,
root_path=None,
initial_tree=None,
):
if mappings is None:
raise ValueError("mappings cannot be None")
self._client = client
self._index = index
super(IMappings, self).__init__(
tree=_mappings(mappings),
root_path=root_path,
depth=depth,
initial_tree=initial_tree,
)
# if we reached a leave, add aggregation capabilities based on reached mappings type
self._set_agg_property_if_required()
def _clone(self, nid, root_path, depth):
return IMappings(
self._tree.subtree(nid)[1],
client=self._client,
root_path=root_path,
depth=depth,
initial_tree=self._initial_tree,
index=self._index,
)
def _set_agg_property_if_required(self):
if self._client is not None and not self._tree.children(self._tree.root):
_, field_node = self._tree.get(self._tree.root)
if field_node.KEY in field_classes_per_name:
search_class = self._get_dsl_type("search")
self.a = field_classes_per_name[field_node.KEY](
search=search_class(
using=self._client,
index=self._index,
mappings=self._initial_tree,
repr_auto_execute=True,
nested_autocorrect=True,
),
field=self._root_path,
)
def __call__(self, *args, **kwargs):
print(
json.dumps(
self._tree.to_dict(), indent=2, sort_keys=True, separators=(",", ": ")
)
)
| StarcoderdataPython |
9667423 | from myproductivitytool.common.services import *
from myproductivitytool.project.models import *
from myproductivitytool.project.serializers import *
from django.db.models.functions import Concat
from django.db.models import F, Value, CharField
class BaseProjectEntityService(ModelService):
entity = BaseProjectEntity
entity_name = 'Base Project Entity'
entity_serializer = BaseProjectEntitySerializer
class TaskService(BaseModelService):
entity = Task
entity_name = 'Project Task'
entity_serializer = TaskSerializer
@classmethod
def get_context(cls, **kwargs):
try:
context = dict()
instance_id = kwargs.get('instance_id', None)
projects = list(Project.objects.filter(is_deleted=False).annotate(key=F('id'), value=F('id'), text=F('name')).values('key','value', 'text'))
context.update({
'projects': projects
})
if instance_id:
instance = cls.entity.objects.get(id=instance_id)
instance = cls.entity_serializer(instance).data
context.update({
'instance': instance
})
print(context)
return {'success': True, 'context': context}
except Exception as e:
print(e)
return {'success': False, 'message': 'We could not fetch context for {0}'.format(cls.entity_name)}
@classmethod
def generate_task_number(cls, **kwargs):
try:
return {'success': True, 'task_number':Task.objects.count()+1}
except Exception as e:
print(e)
return {'success': False, 'message': 'We could not generate task number'}
class ProjectService(BaseModelService):
entity = Project
entity_name = 'Project'
entity_serializer = ProjectSerializer
@classmethod
def delete(cls,**kwargs):
try:
requestor = kwargs.get('requestor')
instance_id = kwargs.get('instance_id')
if not cls.entity.objects.filter(id=instance_id).exists():
return {'success': False, 'message': 'We could not find the {0} you are trying to delete'.format(cls.entity_name)}
instance = cls.entity.objects.get(id=instance_id)
validation_data = cls.is_deletable(**{'instance': instance})
if not validation_data.get('success'):
return validation_data
# remove project from attached tasks
Task.objects.filter(project=instance).update(project=None)
instance.is_deleted = True
instance.deleted_on = timezone.now()
instance.deleted_by = requestor
instance.save()
return {'success': True, 'message': '{0} deleted successfully'.format(cls.entity_name)}
except Exception as e:
print(e)
return {'success': False, 'message': 'We could not delete the {0}'.format(cls.entity_name)}
class TaskCommentService(BaseModelService):
entity = TaskComment
entity_name = 'Task Comment'
entity_serializer = TaskCommentSerializer
class TaskCommentAttachmentService(BaseModelService):
entity = TaskCommentAttachment
entity_name = 'Task Comment Attachment'
entity_serializer = TaskCommentAttachmentSerializer
| StarcoderdataPython |
8000784 | import os
import itertools
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from helper import *
#Visualize scanpath for all participants based on I-VT fixations:
for h,j in itertools.product(sub_id,img_id):
file='Sub_'+str(h)+'_Image_'+str(j)+'.csv'
events=pd.read_csv(os.path.join(EVENTS_PATH,file),low_memory=False)
x=events['FPOG_X']
y=events['FPOG_Y']
fix_dur=events['FPOG_DUR']
fig, ax = plt.subplots(figsize=(20, 11))
ax.scatter(x,
y,
zorder=1
,marker='o',
s=fix_dur*10000,
color='lime',
alpha=0.5)
ax.plot(x,
y,
'-o',
linewidth=3,
color='blue')
img = plt.imread(IMG_PATH+"\S"+str(j)+".jpg")
plt.imshow(img,
zorder=0,
extent=[-960, 960, -540, 540],
aspect='auto')
for i in range(len(fix_dur)):
ax.annotate(str(i+1),
xy=(fix_dur.iloc[i],
fix_dur.iloc[i]),
xytext=(x.iloc[i],
y.iloc[i]),
fontsize=30,
color='black',
ha='center',
va='center')
plt.xlabel('X coordinates (in pixels)', size=20)
plt.ylabel('Y coordinates (in pixels)', size=20)
plt.title('Scanpath for Subject '+str(h)+' , Image '+str(j), size=30)
#draw a rectangle around the location of the star
target_coords=pd.read_csv(BEHAVIORAL_FILE)
slice=target_coords[(target_coords['Image']==j) &
(target_coords['participant']==h)]
left=int(slice['StarX'])-50 #X coordinate
bottom=int(slice['StarY'])-50 #Y coordinate
width=100
height=100
rect=mpatches.Rectangle((left,bottom),width, height,
fill=False,
color='orange',
linewidth=5)
plt.gca().add_patch(rect)
my_img='Subject_'+str(h)+'_Image_'+str(j)+'.png'
fig.savefig(os.path.join(IVT_SCANPATH,my_img))
#plt.show()
plt.close()
| StarcoderdataPython |
8153109 | <gh_stars>0
class PowerCellGrid:
def __init__(self, width, height, serial_number):
self.grid = list()
for y in range(1, height+1):
self.grid.append(list())
for x in range(1, width+1):
self.grid[y-1].append(self.find_power_level(x, y, serial_number))
def find_power_level(self, x, y, serial_number):
# Find the fuel cell's rack ID, which is its X coordinate plus 10.
rack_id = x + 10
# Begin with a power level of the rack ID times the Y coordinate.
power_level = rack_id * y
# Increase the power level by the value of the grid serial number (your puzzle input).
power_level += serial_number
# Set the power level to itself multiplied by the rack ID.
power_level *= rack_id
# Keep only the hundreds digit of the power level (so 12345 becomes 3; numbers with no hundreds digit become 0).
power_level = (power_level // 100) % 10
# Subtract 5 from the power level.
power_level -= 5
return power_level
def find_max_3_square(self):
max_power_level = 0
max_location = (0, 0)
for y in range(len(self.grid) - 3):
for x in range(len(self.grid[y]) - 3):
square_power_level = self.power_level_3_square(x, y)
if square_power_level > max_power_level:
max_power_level = square_power_level
max_location = (x + 1, y + 1)
print(f"Max Power Level is {max_power_level}")
print(f"Max Location is {max_location}")
def power_level_3_square(self, x, y):
square_sum = 0
for row in range(x, x + 3):
for col in range(y, y + 3):
square_sum += self.grid[col][row]
return square_sum
def find_max_square(self):
max_power_level = 0
max_power_size = 0
max_location = (0, 0)
for size in range(1, len(self.grid) + 1):
for y in range(len(self.grid) - size + 1):
for x in range(len(self.grid[y]) - size + 1):
square_power_level = self.power_level_square(x, y, size)
if square_power_level > max_power_level:
max_power_level = square_power_level
max_power_size = size
max_location = (x + 1, y + 1)
print(f"Calculated max for size {size}")
print(f"Max Power Level is {max_power_level}")
print(f"Max Location is {max_location}")
print(f"Square size is {max_power_size}")
def find_max_square_optimized(self):
max_power_level = 0
max_power_size = 0
max_location = (0, 0)
previous_square_sums = self.grid.copy()
for size in range(2, len(self.grid) + 1):
nested_square_sums = list()
local_max_power_level = -9999999999999999
local_max_location = (0, 0)
for y in range(len(previous_square_sums) - 1):
nested_square_sums.append(list())
for x in range(len(previous_square_sums) - 1):
square_power_level = self.find_square_sum(x, y, size, previous_square_sums)
nested_square_sums[y].append(square_power_level)
if square_power_level > local_max_power_level:
local_max_power_level = square_power_level
local_max_location = (x + 1, y + 1)
if square_power_level > max_power_level:
max_power_level = square_power_level
max_power_size = size
max_location = (x + 1, y + 1)
previous_square_sums = nested_square_sums
print(f"Calculated size {size}")
print(f"Local Max Power Level is {local_max_power_level}")
print(f"Local Max Power Location is {local_max_location}")
print(f"Best Square size is {max_power_size}")
print(f"Max Power Level is {max_power_level}")
print(f"Max Location is {max_location}")
def find_square_sum(self, x, y, size, previous_square_sums):
nested_square = previous_square_sums[y][x]
square_sum = nested_square
# add the column to the right
for row in range(y, y + size):
square_sum += self.grid[row][x + size - 1]
for col in range(x, x + size):
square_sum += self.grid[y + size - 1][col]
square_sum -= self.grid[y + size - 1][x + size - 1]
return square_sum
def power_level_square(self, x, y, size):
square_sum = 0
for row in range(x, x + size):
for col in range(y, y + size):
square_sum += self.grid[col][row]
return square_sum
def pretty_print(self):
for i in range(len(self.grid)):
print(self.grid[i])
grid = PowerCellGrid(300, 300, 4151)
# Part 1
grid.find_max_3_square()
# Part 2
# grid.find_max_square()
grid.find_max_square_optimized()
| StarcoderdataPython |
9745942 | #!/bin/python3
import sys
n = int(input().strip())
a = list(map(int, input().strip().split(' ')))
swapCount = 0
# bubble sort
for i in range(n):
for j in range(n - 1):
if a[j] > a[j + 1]:
temp = a[j]
a[j] = a[j + 1]
a[j + 1] = temp
swapCount += 1
print("Array is sorted in " + '%d' % swapCount + " swaps.")
print("First Element: " + '%s' % a[0])
print("Last Element: " + '%s' % a[-1])
| StarcoderdataPython |
372809 | <filename>flask_app/app.py
from pprint import pprint as pp
from pandas.core.indexes.datetimes import date_range
import requests
from flask import Flask, flash, redirect, url_for, Response, request
import os
import pandas as pd
import numpy as np
from query_from_models import predict_json, create_input_for_model
from access_gcp_data import concat_past_and_new_data, concat_past_and_new_prediction, delete_past_data_from_bucket, create_new_file_in_bucket, get_data_from_bucket_as_dataframe
import json
app = Flask(__name__)
# change for your GCP key
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "flask-app-test-317210-bdec872c665d.json"
PROJECT = "flask-app-test-317210" # change for your GCP project
# change for your GCP region (where your model is hosted)
REGION = "us-central1"
MODEL = ['hour_1', 'hour_2', 'hour_3', 'hour_4', 'hour_5']
# This predict function need to: get past_data.csv
# transform past_data to model input
# Call 5 models to predict
# Get data from 48 hours then create input from those data then take latest 5 hours to send to the models.
# Concat all 5 prediction to a list
# After predict, Save a csv file called current_prediction.csv save 5 hours only. This is overwritten any time the predict is called.
# Then save the first prediction to a file called past_prediction.csv. This file contains all prediction made from the time the program run.
# return the prediction list
@app.route('/predict_five', methods=['GET'])
def predict_five():
"""
Run prediction with new data, run every one hour
"""
data_df = get_data_from_bucket_as_dataframe()
data_df = data_df.tail(50)
data_df = data_df.astype({'time': 'datetime64[ns]', 'AQI_h': 'float'})
data_df.set_index(['site_id', 'time'], inplace=True)
all_result = np.array([])
all_hour = np.array([])
predict_data, label = create_input_for_model(
data_df, timesteps=[5], target_hour=[0])
tmp_predict_data = predict_data[-1].copy()
print(tmp_predict_data.shape)
tmp_predict_data = np.reshape(
tmp_predict_data, (1, predict_data.shape[1], predict_data.shape[2]))
for target_hour in range(0, 5):
preds = predict_json(project=PROJECT,
region=REGION,
model=MODEL[target_hour],
instances=tmp_predict_data)
all_result = np.append(all_result, preds)
all_hour = np.append(all_hour, MODEL[target_hour])
daterange = pd.date_range(
start=data_df.iloc[-1].name[1] + pd.Timedelta(hours=1), end=data_df.iloc[-1].name[1] + pd.Timedelta(hours=5), freq='H', name="time")
print(daterange)
all_result_df = pd.DataFrame(
[all_hour, all_result], index=daterange, columns=['hour_model', 'AQI_h'])
all_result_df.to_csv('current_prediction.csv')
delete_past_data_from_bucket(delete_file_name="current_prediction.csv")
create_new_file_in_bucket(upload_file='current_prediction.csv')
prediction_file = concat_past_and_new_prediction(all_result_df.head(1))
prediction_file.to_csv('past_prediction.csv')
delete_past_data_from_bucket(delete_file_name="past_prediction.csv")
create_new_file_in_bucket(upload_file='past_prediction.csv')
return Response(json.dumps(list(all_result)), mimetype='application/json')
# This get predict result need:
# data of past month label and past month prediction
# So first, get past_prediction.csv and get 30 latest spot
# Then get past_data.csv and get 30 latest spot.
# Get current_prediction.csv
# Return all those prediction
@app.route('/get_predict_result', methods=['GET'])
def get_predict_result():
past_prediction = get_data_from_bucket_as_dataframe(
filename="past_prediction.csv")
past_prediction = past_prediction.astype({'AQI_h': 'float'})
past_prediction = past_prediction.tail(30)
past_real_data = get_data_from_bucket_as_dataframe(
filename="past_data.csv")
past_real_data = past_real_data.astype({'AQI_h': 'float'})
past_real_data = past_real_data.tail(30)
current_prediction = get_data_from_bucket_as_dataframe(
filename="current_prediction.csv")
json_dict = {
'past_prediction_time': list(past_prediction['time'].values),
'past_prediction': list(past_prediction['AQI_h'].values),
'past_real_data': list(past_real_data['AQI_h'].values),
'current_prediction_time': list(current_prediction['time'].values),
'current_prediction': list(current_prediction['AQI_h'].values)}
return Response(json.dumps(json_dict), mimetype='application/json')
@app.route('/update', methods=['GET'])
def update():
"""update Update past_data.csv file in bucket to store new data gotten from AQINow API, this will be called every one hour
Returns:
None
"""
if request.method == 'GET':
data = concat_past_and_new_data()
data.to_csv('past_data.csv')
delete_past_data_from_bucket()
create_new_file_in_bucket(upload_file='past_data.csv')
return Response(json.dumps(json.loads(data.to_json())), mimetype='application/json')
@app.route('/')
def hello():
return 'Hello World!'
if __name__ == "__main__":
app.run(host='127.0.0.1', port=8080, debug=True)
| StarcoderdataPython |
11256249 | from .stopwatch import StopWatch
from .dateutils import DateUtils
from .counter import Counter
__all__=['StopWatch','DateUtils','Counter'] | StarcoderdataPython |
179744 | <reponame>Jmast/kombu-redis-priority<filename>tests/scheduler/test_roundrobin.py<gh_stars>1-10
import unittest
from ddt import ddt, data
from kombu_redis_priority.scheduling.round_robin import RoundRobinQueueScheduler
@ddt
class TestRoundRobinQueueScheduler(unittest.TestCase):
def test_round_robin_scheduler_gets_queue_at_top_of_list(self):
scheduler = RoundRobinQueueScheduler()
scheduler.update(['TimeMachine', 'FluxCapacitor'])
self.assertEqual(scheduler.next(), 'TimeMachine')
def test_round_robin_scheduler_next_with_empty(self):
scheduler = RoundRobinQueueScheduler()
scheduler.update([])
self.assertEqual(scheduler.next(), None)
def test_round_robin_scheduler_update_sets_internal_list(self):
scheduler = RoundRobinQueueScheduler()
scheduler.update(['TimeMachine', 'FluxCapacitor'])
self.assertEqual(scheduler.cycle.items, ['TimeMachine', 'FluxCapacitor'])
@data(True, False)
def test_round_robin_scheduler_rotate_rotates_queue_regardless_of_emptiness(self, was_empty):
scheduler = RoundRobinQueueScheduler()
scheduler.update(['TimeMachine', 'FluxCapacitor'])
scheduler.rotate('TimeMachine', was_empty)
self.assertEqual(scheduler.cycle.items, ['FluxCapacitor', 'TimeMachine'])
def test_round_robin_scheduler_rotate_full_rotation_empty(self):
scheduler = RoundRobinQueueScheduler()
scheduler.update(['TimeMachine', 'FluxCapacitor'])
# Have not made a full rotation, not fully empty yet
self.assertFalse(scheduler.rotate('TimeMachine', True))
# Made a full round trip and both queues were empty
self.assertTrue(scheduler.rotate('FluxCapacitor', True))
def test_round_robin_scheduler_rotate_full_rotation_state_tracking(self):
scheduler = RoundRobinQueueScheduler()
scheduler.update(['TimeMachine', 'FluxCapacitor', 'Delorean'])
# Have not made a full rotation, not fully empty yet
self.assertFalse(scheduler.rotate('TimeMachine', True))
self.assertFalse(scheduler.rotate('FluxCapacitor', True))
# Made a full rotation, but the last queue was not empty
self.assertFalse(scheduler.rotate('Delorean', False))
| StarcoderdataPython |
3512579 | from . import __version__ as app_version
app_name = "slnee"
app_title = "Slnee"
app_publisher = "Weslati Baha Eddine"
app_description = "Custom apps developed by Slnee engineers"
app_icon = "octicon octicon-file-directory"
app_color = "grey"
app_email = "<EMAIL>"
app_license = "MIT"
app_logo_url = "/assets/slnee/images/logo.png"
doc_events = {
"Company": {
"before_validate": "slnee.color.change_color"
}
}
jenv = {
"methods": [
"getqrcode:slnee.fatoora.getqrcode",
"test:slnee.test.test",
"money:slnee.data.money_in_words",
"encrypt:slnee.utils.print_format.encrypt"
]
}
#website_context = {
# "favicon" : "/assets/slnee/images/favicon.png",
# "splash_image" : "/assets/slnee/images/logo.png",
#}
fixtures = ["Custom Field",
#{"dt":"Print Format","filters":[["name","like","VAT E-invoice KSA"]]},
#{"dt":"Server Script","filters":[["name","like","columns number"]]},
{"dt":"Font"},
{"dt":"Translation"},
{"dt":"Navbar Settings"},
{"dt":"Website Settings"},
#{"dt":"File", "filters":[["attached_to_field","like","flag"]]},
{"dt":"Country"},
{"dt":"Report", "filters":[['name','in',['Fonts','Sales Analytics']]]},
{"dt":"Workspace","filters":[['name','in',['Accounting','HR']]]},
{"dt":"Translation"}
#{"dt":"Custom Print Format","filters":[["name","like","invoice"]]},
#{"dt":"div","filters":[["parent","like","invoice"]]},
#{"dt":"Print Format","filters":[["name","like","invoice"]]}
]
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
#app_include_css = "/assets/slnee/css/printview.css"
# app_include_js = "/assets/slnee/js/slnee.js"
# include js, css files in header of web template
# web_include_css = "/assets/slnee/css/slnee.css"
# web_include_js = "/assets/slnee/js/slnee.js"
# include custom scss in every website theme (without file extension ".scss")
# website_theme_scss = "slnee/public/scss/website"
# include js, css files in header of web form
# webform_include_js = {"doctype": "public/js/doctype.js"}
# webform_include_css = {"doctype": "public/css/doctype.css"}
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "slnee.install.before_install"
# after_install = "slnee.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "slnee.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# DocType Class
# ---------------
# Override standard doctype classes
# override_doctype_class = {
# "ToDo": "custom_app.overrides.CustomToDo"
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "slnee.tasks.all"
# ],
# "daily": [
# "slnee.tasks.daily"
# ],
# "hourly": [
# "slnee.tasks.hourly"
# ],
# "weekly": [
# "slnee.tasks.weekly"
# ]
# "monthly": [
# "slnee.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "slnee.install.before_tests"
# Overriding Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "slnee.event.get_events"
# }
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "slnee.task.get_dashboard_data"
# }
# exempt linked doctypes from being automatically cancelled
#
# auto_cancel_exempted_doctypes = ["Auto Repeat"]
# User Data Protection
# --------------------
user_data_fields = [
{
"doctype": "{doctype_1}",
"filter_by": "{filter_by}",
"redact_fields": ["{field_1}", "{field_2}"],
"partial": 1,
},
{
"doctype": "{doctype_2}",
"filter_by": "{filter_by}",
"partial": 1,
},
{
"doctype": "{doctype_3}",
"strict": False,
},
{
"doctype": "{doctype_4}"
}
]
# Authentication and authorization
# --------------------------------
# auth_hooks = [
# "slnee.auth.validate"
# ]
| StarcoderdataPython |
3460788 | <reponame>yotamitai/AuToPN<gh_stars>0
from itertools import combinations
from tools import *
def get_similar(sim, p0, p1):
if p0 in sim.keys():
sim[p0].append(p1)
else:
flag = True
for v in sim.values():
if p0 in v:
flag = False
break
flag = True
if flag:
sim[p0] = [p0, p1]
def action_diversity(set_plans, plans_list):
a_d = {}
similar_plans = []
for plan in combinations(plans_list, 2):
p1 = set_plans[plan[0]]
p2 = set_plans[plan[1]]
sym_diff = len(p1.symmetric_difference(p2))
accumulated = len(p1) + len(p2)
distance = float(sym_diff) / accumulated
a_d[plan] = distance
if not sym_diff:
if similar_plans:
for grp in similar_plans:
if plan[0] in grp or plan[1] in grp:
grp += [plan[0], plan[1]]
else:
similar_plans += [[plan[0], plan[1]]]
similar_dict = {}
for i in range(len(similar_plans)):
grp = set(tuple(similar_plans[i]))
similar_dict[i] = tuple(grp)
return a_d, similar_dict
def landmark_diversity(plans_string, landmark_list, plan_lm_dict):
lm_dist = {}
similar_plans = {}
n_disjunct_lm = len([x for x in landmark_list if len(x) > 1])
for plan in combinations(plans_string, 2):
value = 0
for lm in landmark_list:
lm_i = landmark_list.index(lm)
lm_p1 = set(plan_lm_dict[plan[0]][lm_i])
lm_p2 = set(plan_lm_dict[plan[1]][lm_i])
sym_diff = len(lm_p1.symmetric_difference(lm_p2))
accumulated = len(lm_p1.union(lm_p2))
fraction = float(sym_diff) / accumulated
value += fraction
distance = 1.0 / n_disjunct_lm * value
lm_dist[plan] = distance
return lm_dist, similar_plans
def maximal_set(dist_mat, plans_list, k, plans, zero_diversity_dict):
""" get the maximal diversity set"""
if OPTIMIZATIONS['PLANSET_DIV'] == 'Max':
relevant_plan_list = get_non_zero_plans(plans_list, k, zero_diversity_dict, plans)
set_dict = {}
for c in combinations(relevant_plan_list, k):
set_dict[c] = Div(c, dist_mat)
max_val = max(set_dict.values())
max_list = [x for x in set_dict if set_dict[x] == max_val]
"""get random max value plan pair:"""
set_plans = random.choice(max_list)
else:
max_val = Div(plans_list, dist_mat)
set_plans = plans_list
zero_list = []
for z in zero_diversity_dict:
zero_list += zero_diversity_dict[z]
num_zero_plans = sum([1 for x in set_plans if x in zero_list]) - len(zero_diversity_dict)
return set_plans, max_val, num_zero_plans
def get_non_zero_plans(p_list, k, zero_dict, plans):
"""removes plans that have zero diversity and returns the relevant plans"""
new_list = []
zero_list = []
"""add 1 plan from each plan cluster"""
for z in zero_dict:
if OPTIMIZATIONS['SHORT-PLANS']:
"""we prefer saving the first plans as they will more often be shorter and better"""
s = [len(plans[y]) for y in zero_dict[z]]
idx = s.index(min(s))
new_list.append(zero_dict[z][idx])
else:
new_list.append(random.choice(zero_dict[z]))
zero_list += zero_dict[z]
"""add all unique plans"""
unique_list = [x for x in p_list if x not in zero_list]
for u in unique_list:
new_list.append(u)
"""if needed, add more similar plans"""
for i in range(k-len(new_list)):
y = [j for j in zero_list if j not in new_list]
x = random.choice(y)
new_list.append(x)
return sorted(new_list)
def Div(plan_set, dist_mat):
"""Div(Pi) - The diversity score of a given plan set Pi"""
keys = []
for c in combinations(plan_set, 2):
keys.append(c)
return sum([dist_mat[x] for x in keys])/(len(keys))
def get_plan_states():
abs_file_path = DIRECTORIES["PLAN_OUTPUT_DIR"] + '/states.py'
plan_states = {}
i = 0
file_object = open(abs_file_path, 'r')
x = file_object.readline()
my_list = []
x = x[14:].replace(' ', '').replace('"', '').replace("\n", "").split(';')
my_list.append(x)
x = file_object.readline()
while x:
x = x.replace(' ', '').replace('"', '').replace("\n", "").split(';')
my_list.append(x)
x = file_object.readline()
if "],[" in x:
plan_states[i] = my_list
i += 1
my_list = []
x = x[3:]
plan_states[i] = my_list[:-1]
file_object.close()
return plan_states
def get_landmark_diversity(plans, new_plan_states, n_set_plans, landmark_set):
"""for each plan, find its specific disjunctive landmarks from the state space of the plan"""
plans_lm_dict = {}
for i in plans:
plan = new_plan_states[i - 1]
plan_lm = defaultdict(lambda: [])
for state in plan:
for disjunc_lm in landmark_set:
disj_i = landmark_set.index(disjunc_lm)
for lm in disjunc_lm:
if lm in plan_lm[disj_i]:
continue
if lm in state:
plan_lm[disj_i].append(lm)
plans_lm_dict[i] = plan_lm
"""create plan "combinations" string"""
plan_list = sorted(plans_lm_dict.keys())
"""get diversity scores"""
plan_distances, no_diversity_dict = landmark_diversity(plan_list, landmark_set, plans_lm_dict)
"""find maximal diversity"""
plan_set, lm_diversity_score, n_zero_div_plans = maximal_set(plan_distances, plan_list, n_set_plans,
plans, no_diversity_dict)
"""return diversity"""
best_diversity_score = lm_diversity_score
return best_diversity_score, plan_set, plan_distances
def get_action_diversity(plans, n_set_plans):
"""get the set of unique actions in each plan"""
set_plans = {}
for i in plans:
if TEST_PARAMS['PROBLEM_TYPE'] == 'Temporal':
set_plans[i] = set(([x[0] for x in plans[i].actions]))
else:
set_plans[i] = set(plans[i])
plan_list = sorted(set_plans.keys())
"""get diversity scores"""
plan_distances, no_diversity_dict = action_diversity(set_plans, plan_list)
"""find maximal diversity"""
plan_set, diversity_of_planset, n_zero_div_plans = maximal_set(plan_distances, plan_list, n_set_plans,
plans, no_diversity_dict)
"""return diversity"""
return diversity_of_planset, plan_set, plan_distances
def get_landmark_set():
abs_file_path = DIRECTORIES["PLAN_OUTPUT_DIR"] + '/landmarks.py'
landmarks = []
file_object = open(abs_file_path, 'r')
x = file_object.readline()
x = file_object.readline()
while x:
x = x.replace("\n", "").replace(' ', '').replace('"', '').split(';')
landmarks.append(x)
x = file_object.readline()[2:]
file_object.close()
return landmarks
def get_plan_set(plans, n_set_plans):
"""retrive the plan-set """
# TODO not implemented for temporal. also missing work on zero diversity plan choice
# """LANDMARK DIVERSITY"""
# if TEST_PARAMS['DIVERSITY'] == 'landmark':
# """load plan states"""
# plan_states = get_plan_states()
# landmark_set = get_landmark_set()
# best_diversity_score, plan_set, plan_distances = get_landmark_diversity(plans, plan_states, n_set_plans, landmark_set)
"""ACTION SET DIVERSITY"""
diversity_of_planset, plan_set, plan_distances = get_action_diversity(plans, n_set_plans)
"""get max diversity between 2 plans"""
keys = []
for c in combinations(plan_set, 2):
keys.append(c)
max_div_in_planset = max([plan_distances[x] for x in keys])
return list(plan_set), diversity_of_planset, max_div_in_planset
| StarcoderdataPython |
6473117 | <gh_stars>0
from django.db import models
class Idol(models.Model):
name = models.CharField(unique=True, max_length=255)
google_calender = models.CharField(max_length=255, null=True)
is_group = models.BooleanField(default=False)
| StarcoderdataPython |
11262454 | # Databricks notebook source
# MAGIC %md # CCU002_02-D07-covid19
# MAGIC
# MAGIC **Description** This notebook determines the COVID19 infection and hospital outcomes.
# MAGIC
# MAGIC **Author(s)** <NAME>
# COMMAND ----------
# MAGIC %md ## Define functions
# COMMAND ----------
# Define create table function by <NAME>
# Source: Workspaces/dars_nic_391419_j3w9t_collab/DATA_CURATION_wrang000_functions
def create_table(table_name:str, database_name:str='dars_nic_391419_j3w9t_collab', select_sql_script:str=None) -> None:
"""Will save to table from a global_temp view of the same name as the supplied table name (if no SQL script is supplied)
Otherwise, can supply a SQL script and this will be used to make the table with the specificed name, in the specifcied database."""
spark.conf.set("spark.sql.legacy.allowCreatingManagedTableUsingNonemptyLocation","true")
if select_sql_script is None:
select_sql_script = f"SELECT * FROM global_temp.{table_name}"
spark.sql(f"""CREATE TABLE {database_name}.{table_name} AS
{select_sql_script}
""")
spark.sql(f"ALTER TABLE {database_name}.{table_name} OWNER TO {database_name}")
def drop_table(table_name:str, database_name:str='dars_nic_391419_j3w9t_collab', if_exists=True):
if if_exists:
IF_EXISTS = 'IF EXISTS'
else:
IF_EXISTS = ''
spark.sql(f"DROP TABLE {IF_EXISTS} {database_name}.{table_name}")
# COMMAND ----------
# MAGIC %md ## Define COVID19 events
# COMMAND ----------
# MAGIC %sql -- Create global temporary view containing all confirmed COVID19 diagnoses
# MAGIC CREATE
# MAGIC OR REPLACE GLOBAL TEMPORARY VIEW ccu002_vacc_covid19 AS
# MAGIC -- Events from HES admitted patient care
# MAGIC SELECT PERSON_ID_DEID AS NHS_NUMBER_DEID,
# MAGIC min(EPISTART) AS DATE,
# MAGIC "HES_APC" AS SOURCE,
# MAGIC "confirmed" AS STATUS
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu001_hes_apc_all_years
# MAGIC WHERE ((DIAG_4_CONCAT RLIKE 'U071') OR (DIAG_4_CONCAT RLIKE 'U07.1'))
# MAGIC GROUP BY NHS_NUMBER_DEID, SOURCE, STATUS
# MAGIC -- Events from HES outpatient care
# MAGIC UNION ALL
# MAGIC SELECT PERSON_ID_DEID AS NHS_NUMBER_DEID,
# MAGIC min(APPTDATE) AS DATE,
# MAGIC "HES_OP" AS SOURCE,
# MAGIC "confirmed" AS STATUS
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu001_hes_op_all_years
# MAGIC WHERE ((DIAG_4_CONCAT RLIKE 'U071') OR (DIAG_4_CONCAT RLIKE 'U07.1'))
# MAGIC GROUP BY NHS_NUMBER_DEID, SOURCE, STATUS
# MAGIC -- Lab results from primary care
# MAGIC UNION ALL
# MAGIC SELECT NHS_NUMBER_DEID,
# MAGIC min(DATE) AS DATE,
# MAGIC "GDPPR" AS SOURCE,
# MAGIC "confirmed_lab" AS STATUS
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu001_gdppr_dars_nic_391419_j3w9t
# MAGIC WHERE CODE IN (SELECT code
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu001_codelists
# MAGIC WHERE codelist = 'covid19_lab_confirmed_incidence')
# MAGIC GROUP BY NHS_NUMBER_DEID, SOURCE, STATUS
# MAGIC -- Events from primary care
# MAGIC UNION ALL
# MAGIC SELECT NHS_NUMBER_DEID,
# MAGIC min(DATE) AS DATE,
# MAGIC "GDPPR" AS SOURCE,
# MAGIC "confirmed" AS STATUS
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu001_gdppr_dars_nic_391419_j3w9t
# MAGIC WHERE CODE IN (SELECT code
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu001_codelists
# MAGIC WHERE codelist = 'covid19_clinically_confirmed')
# MAGIC -- Test results from SGSS
# MAGIC UNION ALL
# MAGIC SELECT Person_ID_DEID AS NHS_NUMBER_DEID,
# MAGIC min(specimen_date) AS DATE,
# MAGIC "SGSS" AS SOURCE,
# MAGIC (CASE WHEN REPORTING_LAB_ID = '840' THEN 'confirmed_pillar2' ELSE 'confirmed_pillar1' END) AS STATUS
# MAGIC FROM dars_nic_391419_j3w9t_collab.ccu001_sgss_dars_nic_391419_j3w9t
# MAGIC WHERE Person_ID_DEID IS NOT NULL
# MAGIC GROUP BY NHS_NUMBER_DEID, SOURCE, STATUS
# COMMAND ----------
# MAGIC %md ## Save COVID19 events
# COMMAND ----------
# Replace global temp view of COVID19 cases with table
drop_table('ccu002_vacc_covid19')
create_table('ccu002_vacc_covid19')
| StarcoderdataPython |
3358874 | <filename>sd_estimator/estimator.py
from .theoretical_estimates import *
from math import inf, ceil, log2, comb
from prettytable import PrettyTable
from progress.bar import Bar
from scipy.special import binom as binom_sp
from scipy.optimize import fsolve
from warnings import filterwarnings
filterwarnings("ignore", category=RuntimeWarning)
def binom(n, k):
return comb(int(n), int(k))
def __truncate(x, precision):
"""
Truncates a float
INPUT:
- ``x`` -- value to be truncated
- ``precision`` -- number of decimal places to after which the ``x`` is truncated
"""
return float(int(x * 10 ** precision) / 10 ** precision)
def __concat_pretty_tables(t1, t2):
v = t1.split("\n")
v2 = t2.split("\n")
vnew = ""
for i in range(len(v)):
vnew += v[i] + v2[i][1:] + "\n"
return vnew[:-1]
def __round_or_truncate_to_given_precision(T, M, truncate, precision):
if truncate:
T, M = __truncate(T, precision), __truncate(M, precision)
else:
T, M = round(T, precision), round(M, precision)
return '{:.{p}f}'.format(T, p=precision), '{:.{p}f}'.format(M, p=precision)
def __memory_access_cost(mem, memory_access):
if memory_access == 0:
return 0
elif memory_access == 1:
return log2(mem)
elif memory_access == 2:
return mem / 2
elif memory_access == 3:
return mem / 3
elif callable(memory_access):
return memory_access(mem)
return 0
def _gaussian_elimination_complexity(n, k, r):
"""
Complexity estimate of Gaussian elimination routine
INPUT:
- ``n`` -- Row additons are perfomed on ``n`` coordinates
- ``k`` -- Matrix consists of ``n-k`` rows
- ``r`` -- Blocksize of method of the four russian for inversion, default is zero
[Bar07]_ <NAME>.: Algorithms for solving linear and polynomial systems of equations over finite fields
with applications to cryptanalysis. Ph.D. thesis (2007)
[BLP08] <NAME>., <NAME>., <NAME>.: Attacking and defending the mceliece cryptosystem.
In: International Workshop on Post-Quantum Cryptography. pp. 31–46. Springer (2008)
EXAMPLES::
>>> from .estimator import _gaussian_elimination_complexity
>>> _gaussian_elimination_complexity(n=100,k=20,r=1) # doctest: +SKIP
"""
if r != 0:
return (r ** 2 + 2 ** r + (n - k - r)) * int(((n + r - 1) / r))
return (n - k) ** 2
def _optimize_m4ri(n, k, mem=inf):
"""
Find optimal blocksize for Gaussian elimination via M4RI
INPUT:
- ``n`` -- Row additons are perfomed on ``n`` coordinates
- ``k`` -- Matrix consists of ``n-k`` rows
"""
(r, v) = (0, inf)
for i in range(n - k):
tmp = log2(_gaussian_elimination_complexity(n, k, i))
if v > tmp and r < mem:
r = i
v = tmp
return r
def _mem_matrix(n, k, r):
"""
Memory usage of parity check matrix in vector space elements
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``r`` -- block size of M4RI procedure
EXAMPLES::
>>> from .estimator import _mem_matrix
>>> _mem_matrix(n=100,k=20,r=0) # doctest: +SKIP
"""
return n - k + 2 ** r
def _list_merge_complexity(L, l, hmap):
"""
Complexity estimate of merging two lists exact
INPUT:
- ``L`` -- size of lists to be merged
- ``l`` -- amount of bits used for matching
- ``hmap`` -- indicates if hashmap is being used (Default 0: no hashmap)
EXAMPLES::
>>> from .estimator import _list_merge_complexity
>>> _list_merge_complexity(L=2**16,l=16,hmap=1) # doctest: +SKIP
"""
if L == 1:
return 1
if not hmap:
return max(1, 2 * int(log2(L)) * L + L ** 2 // 2 ** l)
else:
return 2 * L + L ** 2 // 2 ** l
def _indyk_motwani_complexity(L, l, w, hmap):
"""
Complexity of Indyk-Motwani nearest neighbor search
INPUT:
- ``L`` -- size of lists to be matched
- ``l`` -- amount of bits used for matching
- ``w`` -- target weight
- ``hmap`` -- indicates if hashmap is being used (Default 0: no hashmap)
EXAMPLES::
>>> from .estimator import _indyk_motwani_complexity
>>> _indyk_motwani_complexity(L=2**16,l=16,w=2,hmap=1) # doctest: +SKIP
"""
if w == 0:
return _list_merge_complexity(L, l, hmap)
lam = max(0, int(min(ceil(log2(L)), l - 2 * w)))
return binom(l, lam) // binom(l - w, lam) * _list_merge_complexity(L, lam, hmap)
def _mitm_nn_complexity(L, l, w, hmap):
"""
Complexity of Indyk-Motwani nearest neighbor search
INPUT:
- ``L`` -- size of lists to be matched
- ``l`` -- amount of bits used for matching
- ``w`` -- target weight
- ``hmap`` -- indicates if hashmap is being used (Default 0: no hashmap)
EXAMPLES::
>>> from .estimator import _indyk_motwani_complexity
>>> _indyk_motwani_complexity(L=2**16,l=16,w=2,hmap=1) # doctest: +SKIP
"""
if w == 0:
return _list_merge_complexity(L, l, hmap)
L1 = L * binom(l / 2, w / 2)
return _list_merge_complexity(L1, l, hmap)
def prange_complexity(n, k, w, mem=inf, memory_access=0):
"""
Complexity estimate of Prange's ISD algorithm
[Pra62] <NAME>.: The use of information sets in decoding cyclic codes. IRE Transactions
on Information Theory 8(5), 5–9 (1962)
expected weight distribution::
+--------------------------------+-------------------------------+
| <----------+ n - k +---------> | <----------+ k +------------> |
| w | 0 |
+--------------------------------+-------------------------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2(bits)), default unlimited
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import prange_complexity
>>> prange_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
solutions = max(0, log2(binom(n, w)) - (n - k))
r = _optimize_m4ri(n, k, mem)
Tp = max(log2(binom(n, w)) - log2(binom(n - k, w)) - solutions, 0)
Tg = log2(_gaussian_elimination_complexity(n, k, r))
time = Tp + Tg
memory = log2(_mem_matrix(n, k, r))
time += __memory_access_cost(memory, memory_access)
params = [r]
par = {"r": params[0]}
res = {"time": time, "memory": memory, "parameters": par}
return res
def stern_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):
"""
Complexity estimate of Stern's ISD algorithm
[Ste88] <NAME>.: A method for finding codewords of small weight. In: International
Colloquium on Coding Theory and Applications. pp. 106–113. Springer (1988)
[BLP08] <NAME>., <NAME>., <NAME>.: Attacking and defending the mceliece cryptosystem.
In: International Workshop on Post-Quantum Cryptography. pp. 31–46. Springer (2008)
expected weight distribution::
+-------------------------+---------+-------------+-------------+
| <----+ n - k - l +----> |<-- l -->|<--+ k/2 +-->|<--+ k/2 +-->|
| w - 2p | 0 | p | p |
+-------------------------+---------+-------------+-------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import stern_complexity
>>> stern_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
solutions = max(0, log2(binom(n, w)) - (n - k))
r = _optimize_m4ri(n, k, mem)
time = inf
memory = 0
params = [-1 for i in range(2)]
i_val = [20]
i_val_inc = [10]
k1 = k // 2
while True:
stop = True
for p in range(min(k1, w // 2, i_val[0])):
L1 = binom(k1, p)
l_val = int(log2(L1))
if log2(L1) > time:
continue
for l in range(max(l_val - i_val_inc[0], 0), l_val + i_val_inc[0]):
tmp_mem = log2(2 * L1 + _mem_matrix(n, k, r))
if tmp_mem > mem:
continue
Tp = max(0,
log2(binom(n, w)) - log2(binom(n - k, w - 2 * p)) - log2(binom(k1, p) ** 2) - solutions)
# We use Indyk-Motwani (IM) taking into account the possibility of multiple existing solutions
# with correct weight distribution, decreasing the amount of necessary projections
# remaining_sol denotes the number of expected solutions per permutation
# l_part_iterations is the expected number of projections need by IM to find one of those solutions
remaining_sol = (binom(n - k, w - 2 * p) * binom(k1, p) ** 2 * binom(n, w) // 2 ** (n - k)) // binom(n,
w)
l_part_iterations = binom(n - k, w - 2 * p) // binom(n - k - l, w - 2 * p)
if remaining_sol > 0:
l_part_iterations //= max(1, remaining_sol)
l_part_iterations = max(1, l_part_iterations)
Tg = _gaussian_elimination_complexity(n, k, r)
tmp = Tp + log2(Tg + _list_merge_complexity(L1, l, hmap) * l_part_iterations)
tmp += __memory_access_cost(tmp_mem, memory_access)
time = min(time, tmp)
if tmp == time:
memory = tmp_mem
params = [p, l]
for i in range(len(i_val)):
if params[i] == i_val[i] - 1:
stop = False
i_val[i] += i_val_inc[i]
if stop:
break
par = {"l": params[1], "p": params[0]}
res = {"time": time, "memory": memory, "parameters": par}
return res
def dumer_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):
"""
Complexity estimate of Dumer's ISD algorithm
[Dum91] <NAME>.: On minimum distance decoding of linear codes. In: Proc. 5th Joint
Soviet-Swedish Int. Workshop Inform. Theory. pp. 50–52 (1991)
expected weight distribution::
+--------------------------+------------------+-------------------+
| <-----+ n - k - l +----->|<-- (k + l)/2 +-->|<--+ (k + l)/2 +-->|
| w - 2p | p | p |
+--------------------------+------------------+-------------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import dumer_complexity
>>> dumer_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
solutions = max(0, log2(binom(n, w)) - (n - k))
time = inf
memory = 0
r = _optimize_m4ri(n, k, mem)
i_val = [10, 40]
i_val_inc = [10, 10]
params = [-1 for _ in range(2)]
while True:
stop = True
for p in range(min(w // 2, i_val[0])):
for l in range(min(n - k - (w - p), i_val[1])):
k1 = (k + l) // 2
L1 = binom(k1, p)
if log2(L1) > time:
continue
tmp_mem = log2(2 * L1 + _mem_matrix(n, k, r))
if tmp_mem > mem:
continue
Tp = max(log2(binom(n, w)) - log2(binom(n - k - l, w - 2 * p)) - log2(binom(k1, p) ** 2) - solutions, 0)
Tg = _gaussian_elimination_complexity(n, k, r)
tmp = Tp + log2(Tg + _list_merge_complexity(L1, l, hmap))
tmp += __memory_access_cost(tmp_mem, memory_access)
time = min(time, tmp)
if tmp == time:
memory = tmp_mem
params = [p, l]
for i in range(len(i_val)):
if params[i] == i_val[i] - 1:
stop = False
i_val[i] += i_val_inc[i]
if stop:
break
par = {"l": params[1], "p": params[0]}
res = {"time": time, "memory": memory, "parameters": par}
return res
def ball_collision_decoding_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):
"""
Complexity estimate of the ball collision decodding algorithm
[BLP11] <NAME>., <NAME>., <NAME>.: Smaller decoding exponents: ball-collision decoding.
In: Annual Cryptology Conference. pp. 743–760. Springer (2011)
expected weight distribution::
+------------------+---------+---------+-------------+-------------+
| <-+ n - k - l +->|<- l/2 ->|<- l/2 ->|<--+ k/2 +-->|<--+ k/2 +-->|
| w - 2p - 2pl | pl | pl | p | p |
+------------------+---------+---------+-------------+-------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import ball_collision_decoding_complexity
>>> ball_collision_decoding_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
solutions = max(0, log2(binom(n, w)) - (n - k))
time = inf
memory = 0
r = _optimize_m4ri(n, k, mem)
i_val = [10, 80, 4]
i_val_inc = [10, 10, 10]
params = [-1 for _ in range(3)]
k1 = k // 2
while True:
stop = True
for p in range(min(w // 2, i_val[0])):
for l in range(min(n - k - (w - 2 * p), i_val[1])):
for pl in range(min(i_val[2], (w - 2 * p) // 2, l // 2 + 1)):
L1 = binom(k1, p)
L1 *= max(1, binom(l // 2, pl))
if log2(L1) > time:
continue
tmp_mem = log2(2 * L1 + _mem_matrix(n, k, r))
if tmp_mem > mem:
continue
Tp = max(
log2(binom(n, w)) - log2(binom(n - k - l, w - 2 * p - 2 * pl)) - 2 * log2(
binom(k1, p)) - 2 * log2(
binom(l // 2, pl)) - solutions, 0)
Tg = _gaussian_elimination_complexity(n, k, r)
tmp = Tp + log2(Tg + _list_merge_complexity(L1, l, hmap))
tmp += __memory_access_cost(tmp_mem, memory_access)
time = min(time, tmp)
if tmp == time:
memory = tmp_mem
params = [p, pl, l]
for i in range(len(i_val)):
if params[i] == i_val[i] - 1:
stop = False
i_val[i] += i_val_inc[i]
if stop:
break
par = {"l": params[2], "p": params[0], "pl": params[1]}
res = {"time": time, "memory": memory, "parameters": par}
return res
def bjmm_complexity(n, k, w, mem=inf, hmap=1, only_depth_two=0, memory_access=0):
"""
Complexity estimate of BJMM algorithm
[MMT11] <NAME>., <NAME>., <NAME>.: Decoding random linear codes in 2^(0.054n). In: International Conference
on the Theory and Application of Cryptology and Information Security. pp. 107–124. Springer (2011)
[BJMM12] <NAME>., <NAME>., <NAME>., <NAME>.: Decoding random binary linear codes in 2^(n/20): How 1+ 1= 0
improves information set decoding. In: Annual international conference on the theory and applications of
cryptographic techniques. pp. 520–536. Springer (2012)
expected weight distribution::
+--------------------------+-------------------+-------------------+
| <-----+ n - k - l +----->|<--+ (k + l)/2 +-->|<--+ (k + l)/2 +-->|
| w - 2p | p | p |
+--------------------------+-------------------+-------------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import bjmm_complexity
>>> bjmm_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
d2 = bjmm_depth_2_complexity(n, k, w, mem, hmap, memory_access)
d3 = bjmm_depth_3_complexity(n, k, w, mem, hmap, memory_access)
return d2 if d2["time"] < d3["time"] or only_depth_two else d3
def bjmm_depth_2_complexity(n, k, w, mem=inf, hmap=1, memory_access=0, mmt=0):
"""
Complexity estimate of BJMM algorithm in depth 2
[MMT11] <NAME>., <NAME>., <NAME>.: Decoding random linear codes in 2^(0.054n). In: International Conference
on the Theory and Application of Cryptology and Information Security. pp. 107–124. Springer (2011)
[BJMM12] <NAME>., <NAME>., <NAME>., <NAME>.: Decoding random binary linear codes in 2^(n/20): How 1+ 1= 0
improves information set decoding. In: Annual international conference on the theory and applications of
cryptographic techniques. pp. 520–536. Springer (2012)
expected weight distribution::
+--------------------------+-------------------+-------------------+
| <-----+ n - k - l +----->|<--+ (k + l)/2 +-->|<--+ (k + l)/2 +-->|
| w - 2p | p | p |
+--------------------------+-------------------+-------------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
- ``mmt`` -- restrict optimization to use of MMT algorithm (precisely enforce p1=p/2)
EXAMPLES::
>>> from .estimator import bjmm_depth_2_complexity
>>> bjmm_depth_2_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
solutions = max(0, log2(binom(n, w)) - (n - k))
time = inf
memory = 0
r = _optimize_m4ri(n, k, mem)
i_val = [35, 500, 35]
i_val_inc = [10, 10, 10]
params = [-1 for _ in range(3)]
while True:
stop = True
for p in range(max(params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2):
for l in range(max(params[1] - i_val_inc[1] // 2, 0), min(n - k - (w - 2 * p), min(i_val[1], n - k))):
for p1 in range(max(params[2] - i_val_inc[2] // 2, (p + 1) // 2), min(w, i_val[2])):
if mmt and p1 != p // 2:
continue
k1 = (k + l) // 2
L1 = binom(k1, p1)
if log2(L1) > time:
continue
if k1 - p < p1 - p / 2:
continue
reps = (binom(p, p / 2) * binom(k1 - p, p1 - p / 2)) ** 2
l1 = int(ceil(log2(reps)))
if l1 > l:
continue
L12 = max(1, L1 ** 2 // 2 ** l1)
tmp_mem = log2((2 * L1 + L12) + _mem_matrix(n, k, r))
if tmp_mem > mem:
continue
Tp = max(log2(binom(n, w)) - log2(binom(n - k - l, w - 2 * p)) - 2 * log2(
binom((k + l) // 2, p)) - solutions, 0)
Tg = _gaussian_elimination_complexity(n, k, r)
T_tree = 2 * _list_merge_complexity(L1, l1, hmap) + _list_merge_complexity(L12,
l - l1,
hmap)
T_rep = int(ceil(2 ** (l1 - log2(reps))))
tmp = Tp + log2(Tg + T_rep * T_tree)
tmp += __memory_access_cost(tmp_mem, memory_access)
time = min(tmp, time)
if tmp == time:
memory = tmp_mem
params = [p, l, p1]
for i in range(len(i_val)):
if params[i] == i_val[i] - 1:
stop = False
i_val[i] += i_val_inc[i]
if stop:
break
par = {"l": params[1], "p": params[0], "p1": params[2], "depth": 2}
res = {"time": time, "memory": memory, "parameters": par}
return res
def bjmm_depth_3_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):
"""
Complexity estimate of BJMM algorithm in depth 3
[MMT11] <NAME>., <NAME>., <NAME>.: Decoding random linear codes in 2^(0.054n). In: International Conference
on the Theory and Application of Cryptology and Information Security. pp. 107–124. Springer (2011)
[BJMM12] <NAME>., <NAME>., <NAME>., <NAME>.: Decoding random binary linear codes in 2^(n/20): How 1+ 1= 0
improves information set decoding. In: Annual international conference on the theory and applications of
cryptographic techniques. pp. 520–536. Springer (2012)
expected weight distribution::
+--------------------------+-------------------+-------------------+
| <-----+ n - k - l +----->|<--+ (k + l)/2 +-->|<--+ (k + l)/2 +-->|
| w - 2p | p | p |
+--------------------------+-------------------+-------------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import bjmm_depth_3_complexity
>>> bjmm_depth_3_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
solutions = max(0, log2(binom(n, w)) - (n - k))
time = inf
memory = 0
r = _optimize_m4ri(n, k, mem)
params = [-1 for _ in range(4)]
i_val = [25, 400, 20, 10]
i_val_inc = [10, 10, 10, 10]
while True:
stop = True
for p in range(max(params[0] - i_val_inc[0] // 2 + (params[0] - i_val_inc[0] // 2) % 2, 0),
min(w // 2, i_val[0]), 2):
for l in range(max(params[1] - i_val_inc[1] // 2, 0), min(n - k - (w - 2 * p), min(n - k, i_val[1]))):
k1 = (k + l) // 2
for p2 in range(max(params[2] - i_val_inc[2] // 2, p // 2 + ((p // 2) % 2)), i_val[2], 2):
for p1 in range(max(params[3] - i_val_inc[3] // 2, (p2 + 1) // 2), i_val[3]):
L1 = binom(k1, p1)
if log2(L1) > time:
continue
reps1 = (binom(p2, p2 / 2) * binom(k1 - p2, p1 - p2 / 2)) ** 2
l1 = int((log2(reps1))) if reps1 != 1 else 0
L12 = max(1, L1 ** 2 // 2 ** l1)
reps2 = (binom(p, p / 2) * binom(k1 - p, p2 - p / 2)) ** 2
l2 = int(ceil(log2(reps2))) if reps2 != 1 else 0
L1234 = max(1, L12 ** 2 // 2 ** (l2 - l1))
tmp_mem = log2((2 * L1 + L12 + L1234) + _mem_matrix(n, k, r))
if tmp_mem > mem:
continue
Tp = max(log2(binom(n, w)) - log2(binom(n - k - l, w - 2 * p)) - 2 * log2(
binom((k + l) // 2, p)) - solutions, 0)
Tg = _gaussian_elimination_complexity(n, k, r)
T_tree = 4 * _list_merge_complexity(L1, l1, hmap) + 2 * _list_merge_complexity(L12,
l2 - l1,
hmap) + _list_merge_complexity(
L1234,
l - l2,
hmap)
T_rep = int(ceil(2 ** (3 * max(0, l1 - log2(reps1)) + max(0, l2 - log2(reps2)))))
tmp = Tp + log2(Tg + T_rep * T_tree)
tmp += __memory_access_cost(tmp_mem, memory_access)
if tmp < time:
time = tmp
memory = tmp_mem
params = [p, l, p2, p1]
for i in range(len(i_val)):
if params[i] >= i_val[i] - i_val_inc[i] / 2:
stop = False
i_val[i] += i_val_inc[i]
if stop:
break
par = {"l": params[1], "p": params[0], "p1": params[3], "p2": params[2], "depth": 3}
res = {"time": time, "memory": memory, "parameters": par}
return res
def bjmm_depth_2_partially_disjoint_weight_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):
"""
Complexity estimate of BJMM algorithm in depth 2 using partially disjoint weight, applying explicit MitM-NN search on second level
[MMT11] <NAME>., <NAME>., <NAME>.: Decoding random linear codes in 2^(0.054n). In: International Conference
on the Theory and Application of Cryptology and Information Security. pp. 107–124. Springer (2011)
[BJMM12] <NAME>., <NAME>., <NAME>., <NAME>.: Decoding random binary linear codes in 2^(n/20): How 1+ 1= 0
improves information set decoding. In: Annual international conference on the theory and applications of
cryptographic techniques. pp. 520–536. Springer (2012)
[EssBel21] <NAME>. and <NAME>.: Syndrome Decoding Estimator. In: IACR Cryptol. ePrint Arch. 2021 (2021), 1243
expected weight distribution::
+--------------------------+--------------------+--------------------+--------+--------+
| <-+ n - k - l1 - 2 l2 +->|<-+ (k + l1) / 2 +->|<-+ (k + l1) / 2 +->| l2 | l2 |
| w - 2 p - 2 w2 | p | p | w2 | w2 |
+--------------------------+--------------------+--------------------+--------+--------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import bjmm_depth_2_partially_disjoint_weight_complexity
>>> bjmm_depth_2_partially_disjoint_weight_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
solutions = max(0, log2(binom(n, w)) - (n - k))
time = inf
memory = 0
r = _optimize_m4ri(n, k, mem)
i_val = [30, 25, 5]
i_val_inc = [10, 10, 10, 10, 10]
params = [-1 for _ in range(5)]
while True:
stop = True
for p in range(max(params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2):
for p1 in range(max(params[1] - i_val_inc[1] // 2, (p + 1) // 2), min(w, i_val[1])):
for w2 in range(max(params[2] - i_val_inc[2] // 2, 0), min(w - p1, i_val[2])):
#############################################################################################
######choose start value for l1 close to the logarithm of the number of representations######
#############################################################################################
try:
f = lambda x: log2((binom(p, p // 2) * binom_sp((k + x) / 2 - p, p1 - p // 2))) * 2 - x
l1_val = int(fsolve(f, 0)[0])
except:
continue
if f(l1_val) < 0 or f(l1_val) > 1:
continue
#############################################################################################
for l1 in range(max(0, l1_val - i_val_inc[3] // 2), l1_val + i_val_inc[3] // 2):
k1 = (k + l1) // 2
reps = (binom(p, p // 2) * binom(k1 - p, p1 - p // 2)) ** 2
L1 = binom(k1, p1)
if log2(L1) > time:
continue
L12 = L1 ** 2 // 2 ** l1
L12 = max(L12, 1)
tmp_mem = log2((2 * L1 + L12) + _mem_matrix(n, k, r))
if tmp_mem > mem:
continue
#################################################################################
#######choose start value for l2 such that resultlist size is close to L12#######
#################################################################################
try:
f = lambda x: log2(int(L12)) + int(2) * log2(binom_sp(x, int(w2))) - int(2) * x
l2_val = int(fsolve(f, 0)[0])
except:
continue
if f(l2_val) < 0 or f(l2_val) > 1:
continue
################################################################################
l2_min = w2
l2_max = (n - k - l1 - (w - 2 * p - 2 * w2)) // 2
l2_range = [l2_val - i_val_inc[4] // 2, l2_val + i_val_inc[4] // 2]
for l2 in range(max(l2_min, l2_range[0]), min(l2_max, l2_range[1])):
Tp = max(
log2(binom(n, w)) - log2(binom(n - k - l1 - 2 * l2, w - 2 * p - 2 * w2)) - 2 * log2(
binom(k1, p)) - 2 * log2(binom(l2, w2)) - solutions, 0)
Tg = _gaussian_elimination_complexity(n, k, r)
T_tree = 2 * _list_merge_complexity(L1, l1, hmap) + _mitm_nn_complexity(L12, 2 * l2, 2 * w2,
hmap)
T_rep = int(ceil(2 ** max(l1 - log2(reps), 0)))
tmp = Tp + log2(Tg + T_rep * T_tree)
tmp += __memory_access_cost(tmp_mem, memory_access)
time = min(tmp, time)
if tmp == time:
memory = tmp_mem
params = [p, p1, w2, l2, l1]
for i in range(len(i_val)):
if params[i] >= i_val[i] - i_val_inc[i] / 2:
i_val[i] += i_val_inc[i]
stop = False
if stop:
break
break
par = {"l1": params[4], "p": params[0], "p1": params[1], "depth": 2, "l2": params[3], "w2": params[2]}
res = {"time": time, "memory": memory, "parameters": par}
return res
def bjmm_depth_2_disjoint_weight_complexity(n, k, w, mem=inf, hmap=1, p_range=[0, 25], memory_access=0):
"""
Complexity estimate of May-Ozerov algorithm in depth 2 using Indyk-Motwani for NN search
[MMT11] <NAME>., <NAME>., <NAME>.: Decoding random linear codes in 2^(0.054n). In: International Conference
on the Theory and Application of Cryptology and Information Security. pp. 107–124. Springer (2011)
[BJMM12] <NAME>., <NAME>., <NAME>., <NAME>.: Decoding random binary linear codes in 2^(n/20): How 1+ 1= 0
improves information set decoding. In: Annual international conference on the theory and applications of
cryptographic techniques. pp. 520–536. Springer (2012)
[EssBel21] <NAME>. and <NAME>.: Syndrome Decoding Estimator. In: IACR Cryptol. ePrint Arch. 2021 (2021), 1243
expected weight distribution::
+---------------------------+-------------+------------+----------+----------+----------+----------+
|<-+ n - k - 2 l1 - 2 l2 +->|<-+ k / 2 +->|<-+ k / 2 ->|<-+ l1 +->|<-+ l1 +->|<-+ l2 +->|<-+ l2 +->|
| w - 2 p - 2 w1 - 2 w2 | p | p | w1 | w1 | w2 | w2 |
+---------------------------+-------------+------------+----------+----------+----------+----------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``p_range`` -- interval in which the parameter p is searched (default: [0, 25], helps speeding up computation)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import bjmm_depth_2_disjoint_weight_complexity
>>> bjmm_depth_2_disjoint_weight_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
solutions = max(0, log2(binom(n, w)) - (n - k))
time = inf
memory = 0
r = _optimize_m4ri(n, k)
i_val = [p_range[1], 20, 10, 10, 5]
i_val_inc = [10, 10, 10, 10, 10, 10, 10]
params = [-1 for _ in range(7)]
while True:
stop = True
for p in range(max(p_range[0], params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2):
for p1 in range(max(params[1] - i_val_inc[1] // 2, (p + 1) // 2), min(w, i_val[1])):
s = max(params[2] - i_val_inc[2] // 2, 0)
for w1 in range(s - (s % 2), min(w // 2 - p, i_val[2]), 2):
for w11 in range(max(params[3] - i_val_inc[3] // 2, (w1 + 1) // 2), min(w, i_val[3])):
for w2 in range(max(params[4] - i_val_inc[4] // 2, 0), min(w // 2 - p - w1, i_val[4])):
##################################################################################
######choose start value for l1 such that representations cancel out exactly######
##################################################################################
try:
f = lambda x: 2 * log2((binom(p, p // 2) * binom(k // 2 - p, p1 - p // 2)) * (
binom_sp(x, w1 // 2) * binom_sp(x - w1, w11 - w1 // 2)) + 1) - 2 * x
l1_val = int(
fsolve(f, 2 * log2((binom(p, p // 2) * binom(k // 2 - p, p1 - p // 2))))[0])
except:
continue
if f(l1_val) < 0 or f(l1_val) > 10:
continue
#################################################################################
for l1 in range(max(l1_val - i_val_inc[5], w1, w11), l1_val + i_val_inc[5]):
k1 = k // 2
reps = (binom(p, p // 2) * binom(k1 - p, p1 - p // 2)) ** 2 * (
binom(w1, w1 // 2) * binom(l1 - w1, w11 - w1 // 2)) ** 2
reps = max(reps, 1)
L1 = binom(k1, p1)
if log2(L1) > time:
continue
L12 = L1 ** 2 * binom(l1, w11) ** 2 // 2 ** (2 * l1)
L12 = max(L12, 1)
tmp_mem = log2((2 * L1 + L12) + _mem_matrix(n, k, r))
if tmp_mem > mem:
continue
#################################################################################
#######choose start value for l2 such that resultlist size is equal to L12#######
#################################################################################
try:
f = lambda x: log2(L12) + 2 * log2(binom_sp(x, w2) + 1) - 2 * x
l2_val = int(fsolve(f, 50)[0])
except:
continue
if f(l2_val) < 0 or f(l2_val) > 10:
continue
################################################################################
l2_max = (n - k - 2 * l1 - (w - 2 * p - 2 * w1 - 2 * w2)) // 2
l2_min = w2
l2_range = [l2_val - i_val_inc[6] // 2, l2_val + i_val_inc[6] // 2]
for l2 in range(max(l2_min, l2_range[0]), min(l2_max, l2_range[1])):
Tp = max(
log2(binom(n, w)) - log2(
binom(n - k - 2 * l1 - 2 * l2, w - 2 * p - 2 * w1 - 2 * w2)) - 2 * log2(
binom(k1, p)) - 2 * log2(binom(l1, w1)) - 2 * log2(
binom(l2, w2)) - solutions, 0)
Tg = _gaussian_elimination_complexity(n, k, r)
T_tree = 2 * _mitm_nn_complexity(L1, 2 * l1, 2 * w11, hmap) + _mitm_nn_complexity(
L12, 2 * l2, 2 * w2, hmap)
T_rep = int(ceil(2 ** max(2 * l1 - log2(reps), 0)))
tmp = Tp + log2(Tg + T_rep * T_tree)
tmp += __memory_access_cost(tmp_mem, memory_access)
time = min(tmp, time)
if tmp == time:
memory = tmp_mem
params = [p, p1, w1, w11, w2, l2, l1 + l2]
for i in range(len(i_val)):
if params[i] >= i_val[i] - i_val_inc[i] / 2:
i_val[i] += i_val_inc[i]
stop = False
if stop:
break
break
par = {"l": params[6], "p": params[0], "p1": params[1], "w1": params[2], "w11": params[3], "l2": params[5],
"w2": params[4], "depth": 2}
res = {"time": time, "memory": memory, "parameters": par}
return res
def both_may_depth_2_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):
"""
Complexity estimate of Both-May algorithm in depth 2 using Indyk-Motwani and MitM for NN search
[BotMay18] <NAME>., <NAME>.: Decoding linear codes with high error rate and its impact for LPN security. In:
International Conference on Post-Quantum Cryptography. pp. 25--46. Springer (2018)
expected weight distribution::
+-------------------+---------+-------------------+-------------------+
| <--+ n - k - l+-->|<-+ l +->|<----+ k / 2 +---->|<----+ k / 2 +---->|
| w - w2 - 2p | w2 | p | p |
+-------------------+---------+-------------------+-------------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import both_may_depth_2_complexity
>>> both_may_depth_2_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
solutions = max(0, log2(binom(n, w)) - (n - k))
time = inf
memory = 0
r = _optimize_m4ri(n, k, mem)
i_val = [20, 160, 5, 4, 15]
i_val_inc = [10, 10, 10, 6, 6]
params = [-1 for _ in range(5)]
while True:
stop = True
for p in range(max(params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2):
for l in range(max(params[1] - i_val_inc[1] // 2, 0), min(n - k - (w - 2 * p), i_val[1])):
for w1 in range(max(params[2] - i_val_inc[2] // 2, 0), min(w, l + 1, i_val[2])):
for w2 in range(max(params[3] - i_val_inc[3] // 2, 0), min(w - 2 * p, l + 1, i_val[3], 2 * w1), 2):
for p1 in range(max(params[4] - i_val_inc[4] // 2, (p + 1) // 2), min(w, i_val[4])):
k1 = (k) // 2
reps = (binom(p, p / 2) * binom(k1 - p, p1 - p / 2)) ** 2 * binom(w2, w2 / 2) * binom(
l - w2,
w1 - w2 / 2)
reps = 1 if reps == 0 else reps
L1 = binom(k1, p1)
if log2(L1) > time:
continue
L12 = max(1, L1 ** 2 * binom(l, w1) // 2 ** l)
tmp_mem = log2((2 * L1 + L12) + _mem_matrix(n, k, r))
if tmp_mem > mem:
continue
Tp = max(log2(binom(n, w)) - log2(binom(n - k - l, w - w2 - 2 * p)) - 2 * log2(
binom(k1, p)) - log2(binom(l, w2)) - solutions, 0)
Tg = _gaussian_elimination_complexity(n, k, r)
first_level_nn = _indyk_motwani_complexity(L1, l, w1, hmap)
second_level_nn = _indyk_motwani_complexity(L12, n - k - l, w - 2 * p - w2, hmap)
T_tree = 2 * first_level_nn + second_level_nn
T_rep = int(ceil(2 ** max(0, l - log2(reps))))
tmp = Tp + log2(Tg + T_rep * T_tree)
tmp += __memory_access_cost(tmp_mem, memory_access)
time = min(tmp, time)
if tmp == time:
memory = tmp_mem
params = [p, l, w1, w2, p1, log2(L1), log2(L12)]
for i in range(len(i_val)):
if params[i] >= i_val[i] - i_val_inc[i] / 2:
i_val[i] += i_val_inc[i]
stop = False
if stop:
break
par = {"l": params[1], "p": params[0], "p1": params[4], "w1": params[2], "w2": params[3], "depth": 2}
res = {"time": time, "memory": memory, "parameters": par}
return res
def may_ozerov_complexity(n, k, w, mem=inf, hmap=1, only_depth_two=0, memory_access=0):
"""
Complexity estimate of May-Ozerov algorithm using Indyk-Motwani for NN search
[MayOze15] <NAME>. and <NAME>.: On computing nearest neighbors with applications to decoding of binary linear codes.
In: Annual International Conference on the Theory and Applications of Cryptographic Techniques. pp. 203--228. Springer (2015)
expected weight distribution::
+-------------------------+---------------------+---------------------+
| <-----+ n - k - l+----->|<--+ (k + l) / 2 +-->|<--+ (k + l) / 2 +-->|
| w - 2p | p | p |
+-------------------------+---------------------+---------------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import may_ozerov_complexity
>>> may_ozerov_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
d2 = may_ozerov_depth_2_complexity(n, k, w, mem, hmap, memory_access)
d3 = may_ozerov_depth_3_complexity(n, k, w, mem, hmap, memory_access)
return d2 if d2["time"] < d3["time"] or only_depth_two else d3
def may_ozerov_depth_2_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):
"""
Complexity estimate of May-Ozerov algorithm in depth 2 using Indyk-Motwani for NN search
[MayOze15] <NAME>. and <NAME>.: On computing nearest neighbors with applications to decoding of binary linear codes.
In: Annual International Conference on the Theory and Applications of Cryptographic Techniques. pp. 203--228. Springer (2015)
expected weight distribution::
+-------------------------+---------------------+---------------------+
| <-----+ n - k - l+----->|<--+ (k + l) / 2 +-->|<--+ (k + l) / 2 +-->|
| w - 2p | p | p |
+-------------------------+---------------------+---------------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import may_ozerov_depth_2_complexity
>>> may_ozerov_depth_2_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
solutions = max(0, log2(binom(n, w)) - (n - k))
time = inf
memory = 0
r = _optimize_m4ri(n, k, mem)
i_val = [30, 300, 25]
i_val_inc = [10, 10, 10]
params = [-1 for _ in range(3)]
while True:
stop = True
for p in range(max(params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2):
for l in range(max(params[1] - i_val_inc[1] // 2, 0), min(n - k - (w - 2 * p), i_val[1])):
for p1 in range(max(params[2] - i_val_inc[2] // 2, (p + 1) // 2), min(w, i_val[2])):
k1 = (k + l) // 2
reps = (binom(p, p // 2) * binom(k1 - p, p1 - p // 2)) ** 2
L1 = binom(k1, p1)
if log2(L1) > time:
continue
L12 = L1 ** 2 // 2 ** l
L12 = max(L12, 1)
tmp_mem = log2((2 * L1 + L12) + _mem_matrix(n, k, r))
if tmp_mem > mem:
continue
Tp = max(
log2(binom(n, w)) - log2(binom(n - k - l, w - 2 * p)) - 2 * log2(binom(k1, p)) - solutions, 0)
Tg = _gaussian_elimination_complexity(n, k, r)
T_tree = 2 * _list_merge_complexity(L1, l, hmap) + _indyk_motwani_complexity(L12,
n - k - l,
w - 2 * p,
hmap)
T_rep = int(ceil(2 ** max(l - log2(reps), 0)))
tmp = Tp + log2(Tg + T_rep * T_tree)
tmp += __memory_access_cost(tmp_mem, memory_access)
time = min(tmp, time)
if tmp == time:
memory = tmp_mem
params = [p, l, p1]
for i in range(len(i_val)):
if params[i] >= i_val[i] - i_val_inc[i] / 2:
i_val[i] += i_val_inc[i]
stop = False
if stop:
break
break
par = {"l": params[1], "p": params[0], "p1": params[2], "depth": 2}
res = {"time": time, "memory": memory, "parameters": par}
return res
def may_ozerov_depth_3_complexity(n, k, w, mem=inf, hmap=1, memory_access=0):
"""
Complexity estimate of May-Ozerov algorithm in depth 3 using Indyk-Motwani for NN search
[MayOze15] <NAME>. and <NAME>.: On computing nearest neighbors with applications to decoding of binary linear codes.
In: Annual International Conference on the Theory and Applications of Cryptographic Techniques. pp. 203--228. Springer (2015)
expected weight distribution::
+-------------------------+---------------------+---------------------+
| <-----+ n - k - l+----->|<--+ (k + l) / 2 +-->|<--+ (k + l) / 2 +-->|
| w - 2p | p | p |
+-------------------------+---------------------+---------------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``mem`` -- upper bound on the available memory (as log2), default unlimited
- ``hmap`` -- indicates if hashmap is being used (default: true)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import may_ozerov_depth_3_complexity
>>> may_ozerov_depth_3_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
solutions = max(0, log2(binom(n, w)) - (n - k))
time = inf
memory = 0
r = _optimize_m4ri(n, k, mem)
i_val = [20, 200, 20, 10]
i_val_inc = [10, 10, 10, 10]
params = [-1 for _ in range(4)]
while True:
stop = True
for p in range(max(params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2):
for l in range(max(params[1] - i_val_inc[1] // 2, 0), min(n - k - (w - 2 * p), i_val[1])):
k1 = (k + l) // 2
for p2 in range(max(params[2] - i_val_inc[2] // 2, p // 2 + ((p // 2) % 2)), p + i_val[2], 2):
for p1 in range(max(params[3] - i_val_inc[3] // 2, (p2 + 1) // 2),
min(p2 + i_val[3], k1 - p2 // 2)):
L1 = binom(k1, p1)
if log2(L1) > time:
continue
reps1 = (binom(p2, p2 // 2) * binom(k1 - p2, p1 - p2 // 2)) ** 2
l1 = int(ceil(log2(reps1)))
if l1 > l:
continue
L12 = max(1, L1 ** 2 // 2 ** l1)
reps2 = (binom(p, p // 2) * binom(k1 - p, p2 - p // 2)) ** 2
L1234 = max(1, L12 ** 2 // 2 ** (l - l1))
tmp_mem = log2((2 * L1 + L12 + L1234) + _mem_matrix(n, k, r))
if tmp_mem > mem:
continue
Tp = max(
log2(binom(n, w)) - log2(binom(n - k - l, w - 2 * p)) - 2 * log2(binom(k1, p)) - solutions,
0)
Tg = _gaussian_elimination_complexity(n, k, r)
T_tree = 4 * _list_merge_complexity(L1, l1, hmap) + 2 * _list_merge_complexity(L12,
l - l1,
hmap) + _indyk_motwani_complexity(
L1234,
n - k - l,
w - 2 * p,
hmap)
T_rep = int(ceil(2 ** (max(l - log2(reps2), 0) + 3 * max(l1 - log2(reps1), 0))))
tmp = Tp + log2(Tg + T_rep * T_tree)
tmp += __memory_access_cost(tmp_mem, memory_access)
if tmp < time:
time = tmp
memory = tmp_mem
params = [p, l, p2, p1]
for i in range(len(i_val)):
if params[i] >= i_val[i] - i_val_inc[i] / 2:
i_val[i] += i_val_inc[i]
stop = False
if stop:
break
break
par = {"l": params[1], "p": params[0], "p1": params[3], "p2": params[2], "depth": 3}
res = {"time": time, "memory": memory, "parameters": par}
return res
def quantum_prange_complexity(n, k, w, maxdepth=96, matrix_mult_constant=2.5):
"""
Optimistic complexity estimate of quantum version of Prange's algorithm
[Pra62] <NAME>.: The use of information sets in decoding cyclic codes. IRE Transactions
on Information Theory 8(5), 5–9 (1962)
[Ber10] <NAME>.: Grover vs. McEliece. In: International Workshop on Post-QuantumCryptography.
pp. 73–80. Springer (2010)
expected weight distribution::
+--------------------------------+-------------------------------+
| <----------+ n - k +---------> | <----------+ k +------------> |
| w | 0 |
+--------------------------------+-------------------------------+
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``maxdepth`` -- maximum allowed depth of the quantum circuit (default: 96)
- ``matrix_mult_constant`` -- used matrix multiplication constant (default: 2.5)
EXAMPLES::
>>> from .estimator import quantum_prange_complexity
>>> quantum_prange_complexity(n=100,k=50,w=10) # doctest: +SKIP
"""
Tg = matrix_mult_constant * log2(n - k)
if Tg > maxdepth:
return 0
full_circuit = Tg + (log2(binom(n, w)) - log2(binom(n - k, w))) / 2
if full_circuit < maxdepth:
return full_circuit
time = log2(binom(n, w)) - log2(binom(n - k, w)) + 2 * Tg - maxdepth
return time
def sd_estimate_display(n, k, w, memory_limit=inf, bit_complexities=1, hmap=1, skip=["BJMM-dw"], precision=1,
truncate=0,
all_parameters=0, theoretical_estimates=0, use_mo=1, workfactor_accuracy=1, limit_depth=0,
quantum_estimates=1,
maxdepth=96, matrix_mult_constant=2.5, memory_access=0):
"""
Output estimates of complexity to solve the syndrome decoding problem
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``memory_limit`` -- upper bound on the available memory (in log2) (default: unlimited)
- ``bit_complexities`` -- state security level in number of bitoperations, otherwise field operations (default: true)
- ``hmap`` -- indicates if hashmap is used for sorting lists (default: true)
- ``skip`` -- list of algorithms not to consider (default: ["BJMM-dw"] (this variant will take a long time to optimize))
- ``precision`` -- amount of decimal places displayed for complexity estimates (default: 1)
- ``truncate`` -- decimal places exceeding ``precision`` are truncated, otherwise rounded (default: false)
- ``all_parameters`` -- print values of all hyperparameters (default: false)
- ``theoretical_estimates`` -- compute theoretical workfactors for all algorithms (default: false)
- ``use_mo`` -- use may-ozerov nearest neighbor search in theoretical workfactor computation (default: true)
- ``workfactor_accuracy`` -- the higher the more accurate the workfactor computation, can slow down computations significantly, recommended range 0-2 (needs to be larger than 0) (default: 1)
- ``limit_depth`` -- restricts BJMM and May-Ozerov algorithms to depth two only (default: false)
- ``quantum_estimates`` -- compute quantum estimates of all algorithms (default: true)
- ``maxdepth`` -- maximum allowed depth of the quantum circuit (default: 96)
- ``matrix_mult_constant`` -- used matrix multiplication constant (default: 2.5)
- ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage)
EXAMPLES::
>>> from .estimator import *
>>> sd_estimate_display(n=600,k=400,w=22)
=========================================================================
Complexity estimation to solve the (600,400,22) syndrome decoding problem
=========================================================================
The following table states bit complexity estimates of the corresponding algorithms including an approximation of the polynomial factors inherent to the algorithm.
The quantum estimate gives a very optimistic estimation of the cost for a quantum aided attack with a circuit of limitted depth (should be understood as a lowerbound).
+----------------+---------------+---------+
| | estimate | quantum |
+----------------+------+--------+---------+
| algorithm | time | memory | time |
+----------------+------+--------+---------+
| Prange | 60.1 | 17.3 | 37.1 |
| Stern | 47.0 | 24.5 | -- |
| Dumer | 47.6 | 24.6 | -- |
| Ball Collision | 47.7 | 24.5 | -- |
| BJMM (MMT) | 47.6 | 22.7 | -- |
| BJMM-pdw | 47.7 | 21.7 | -- |
| May-Ozerov | 46.5 | 22.6 | -- |
| Both-May | 47.1 | 22.6 | -- |
+----------------+------+--------+---------+
>>> from .estimator import *
>>> sd_estimate_display(n=1000,k=500,w=100,all_parameters=1,theoretical_estimates=1,precision=2) # long time
===========================================================================
Complexity estimation to solve the (1000,500,100) syndrome decoding problem
===========================================================================
The following table states bit complexity estimates of the corresponding algorithms including an approximation of the polynomial factors inherent to the algorithm.
The approximation is based on the theoretical workfactor of the respective algorithms, disregarding all polynomial factors and using further approximations that introduce additional polynomial inaccurcies.
The quantum estimate gives a very optimistic estimation of the cost for a quantum aided attack with a circuit of limitted depth (should be understood as a lowerbound).
+----------------+-----------------+-----------------+---------+--------------------------------------------------------------------+
| | estimate | approximation | quantum | parameters |
+----------------+--------+--------+--------+--------+---------+--------------------------------------------------------------------+
| algorithm | time | memory | time | memory | time | classical |
+----------------+--------+--------+--------+--------+---------+--------------------------------------------------------------------+
| Prange | 134.46 | 19.26 | 108.03 | 0.00 | 76.39 | r : 7 |
| Stern | 117.04 | 38.21 | 104.02 | 31.39 | -- | l : 27 | p : 4 |
| Dumer | 116.82 | 38.53 | 103.76 | 33.68 | -- | l : 28 | p : 4 |
| Ball Collision | 117.04 | 38.21 | 103.76 | 32.67 | -- | l : 27 | p : 4 | pl : 0 |
| BJMM (MMT) | 112.39 | 73.15 | 90.17 | 67.76 | -- | l : 120 | p : 16 | p1 : 10 | depth : 2 |
| BJMM-pdw | 113.92 | 52.74 | -- | -- | -- | l1 : 35 | p : 10 | p1 : 6 | depth : 2 | l2 : 21 | w2 : 0 |
| May-Ozerov | 111.56 | 70.44 | 89.51 | 51.39 | -- | l : 69 | p : 14 | p1 : 10 | depth : 2 |
| Both-May | 113.68 | 68.58 | 87.60 | 64.13 | -- | l : 75 | p : 14 | p1 : 10 | w1 : 2 | w2 : 2 | depth : 2 |
+----------------+--------+--------+--------+--------+---------+--------------------------------------------------------------------+
TESTS::
>>> from .estimator import *
>>> sd_estimate_display(24646,12323,142,all_parameters=True) # long time
==============================================================================
Complexity estimation to solve the (24646,12323,142) syndrome decoding problem
==============================================================================
The following table states bit complexity estimates of the corresponding algorithms including an approximation of the polynomial factors inherent to the algorithm.
The quantum estimate gives a very optimistic estimation of the cost for a quantum aided attack with a circuit of limitted depth (should be understood as a lowerbound).
+----------------+----------------+---------+--------------------------------------------------------------------+
| | estimate | quantum | parameters |
+----------------+-------+--------+---------+--------------------------------------------------------------------+
| algorithm | time | memory | time | classical |
+----------------+-------+--------+---------+--------------------------------------------------------------------+
| Prange | 182.1 | 28.4 | 114.5 | r : 11 |
| Stern | 160.6 | 39.8 | -- | l : 33 | p : 2 |
| Dumer | 161.1 | 39.8 | -- | l : 28 | p : 2 |
| Ball Collision | 161.1 | 39.8 | -- | l : 28 | p : 2 | pl : 0 |
| BJMM (MMT) | 160.9 | 54.2 | -- | l : 74 | p : 4 | p1 : 3 | depth : 2 |
| BJMM-pdw | 160.9 | 55.0 | -- | l1 : 30 | p : 4 | p1 : 3 | depth : 2 | l2 : 22 | w2 : 0 |
| May-Ozerov | 160.4 | 55.0 | -- | l : 30 | p : 4 | p1 : 3 | depth : 2 |
| Both-May | 161.1 | 37.8 | -- | l : 4 | p : 2 | p1 : 1 | w1 : 1 | w2 : 0 | depth : 2 |
+----------------+-------+--------+---------+--------------------------------------------------------------------+
>>> from .estimator import *
>>> sd_estimate_display(300,200,20,all_parameters=True, skip=[])
=========================================================================
Complexity estimation to solve the (300,200,20) syndrome decoding problem
=========================================================================
The following table states bit complexity estimates of the corresponding algorithms including an approximation of the polynomial factors inherent to the algorithm.
The quantum estimate gives a very optimistic estimation of the cost for a quantum aided attack with a circuit of limitted depth (should be understood as a lowerbound).
+----------------+---------------+---------+-------------------------------------------------------------------------------------------+
| | estimate | quantum | parameters |
+----------------+------+--------+---------+-------------------------------------------------------------------------------------------+
| algorithm | time | memory | time | classical |
+----------------+------+--------+---------+-------------------------------------------------------------------------------------------+
| Prange | 52.5 | 15.3 | 33.5 | r : 5 |
| Stern | 40.7 | 21.5 | -- | l : 13 | p : 2 |
| Dumer | 41.1 | 26.9 | -- | l : 18 | p : 3 |
| Ball Collision | 41.3 | 21.5 | -- | l : 12 | p : 2 | pl : 0 |
| BJMM (MMT) | 41.1 | 27.5 | -- | l : 25 | p : 4 | p1 : 2 | depth : 2 |
| BJMM-pdw | 41.3 | 18.9 | -- | l1 : 3 | p : 2 | p1 : 1 | depth : 2 | l2 : 4 | w2 : 0 |
| BJMM-dw | 41.3 | 19.7 | -- | l : 6 | p : 2 | p1 : 1 | w1 : 0 | w11 : 1 | l2 : 5 | w2 : 0 | depth : 2 |
| May-Ozerov | 40.1 | 19.7 | -- | l : 2 | p : 2 | p1 : 1 | depth : 2 |
| Both-May | 40.4 | 19.7 | -- | l : 2 | p : 2 | p1 : 1 | w1 : 2 | w2 : 0 | depth : 2 |
+----------------+------+--------+---------+-------------------------------------------------------------------------------------------+
"""
complexities = _sd_estimate(n, k, w, theoretical_estimates, memory_limit, bit_complexities, hmap, skip, use_mo,
workfactor_accuracy, limit_depth, quantum_estimates, maxdepth, matrix_mult_constant,
memory_access)
headline = "Complexity estimation to solve the ({},{},{}) syndrome decoding problem".format(n, k, w)
print("=" * len(headline))
print(headline)
print("=" * len(headline))
if bit_complexities:
print(
"The following table states bit complexity estimates of the corresponding algorithms including an approximation of the polynomial factors inherent to the algorithm.")
else:
print(
"The following table states complexity estimates of the corresponding algorithms including an approximation of the polynomial factors inherent to the algorithm.")
print("The time complexity estimate is measured in the number of additions in (F_2)^n.")
print("The memory complexity estimate is given in the number of vector space elements that need to be stored.")
if theoretical_estimates:
print(
"The approximation is based on the theoretical workfactor of the respective algorithms, disregarding all polynomial factors and using further approximations that introduce additional polynomial inaccurcies.")
if quantum_estimates:
print(
"The quantum estimate gives a very optimistic estimation of the cost for a quantum aided attack with a circuit of limitted depth (should be understood as a lowerbound).")
tables = []
table_fields = ['algorithm']
tbl_names = PrettyTable(table_fields)
tbl_names.padding_width = 1
tbl_names.title = ' '
for i in complexities.keys():
tbl_names.add_row([i])
tbl_names.align["algorithm"] = "l"
tables.append(tbl_names)
table_fields = ['time', 'memory']
tbl_estimates = PrettyTable(table_fields)
tbl_estimates.padding_width = 1
tbl_estimates.title = 'estimate'
tbl_estimates.align["time"] = "r"
tbl_estimates.align["memory"] = "r"
for i in complexities.keys():
if complexities[i]["time"] != inf:
T, M = __round_or_truncate_to_given_precision(complexities[i]["time"], complexities[i]["memory"], truncate,
precision)
else:
T, M = "--", "--"
tbl_estimates.add_row([T, M])
tables.append(tbl_estimates)
if theoretical_estimates:
table_fields = ['time', 'memory']
tbl_approx = PrettyTable(table_fields)
tbl_approx.padding_width = 1
tbl_approx.title = 'approximation'
tbl_approx.align["time"] = "r"
tbl_approx.align["memory"] = "r"
for i in complexities.keys():
if complexities[i]["Workfactor time"] != 0:
T, M = __round_or_truncate_to_given_precision(complexities[i]["Workfactor time"] * n,
complexities[i]["Workfactor memory"] * n, truncate,
precision)
else:
T, M = "--", "--"
tbl_approx.add_row([T, M])
tables.append(tbl_approx)
if quantum_estimates:
table_fields = [' time']
tbl_quantum = PrettyTable(table_fields)
tbl_quantum.padding_width = 1
tbl_quantum.title = "quantum"
tbl_quantum.align["time"] = "r"
for i in complexities.keys():
if "quantum time" in complexities[i].keys() and complexities[i]["quantum time"] != 0:
T, M = __round_or_truncate_to_given_precision(complexities[i]["quantum time"], 0, truncate, precision)
else:
T = "--"
tbl_quantum.add_row([T])
tables.append(tbl_quantum)
if all_parameters:
table_fields = ['classical']
tbl_params = PrettyTable(table_fields)
tbl_params.padding_width = 1
tbl_params.title = "parameters"
tbl_params.align['classical'] = "l"
for i in complexities.keys():
row = ""
for j in complexities[i]["parameters"].keys():
row += "{:<{align}}".format(j, align=max(2, len(j))) + " : " + '{:3d}'.format(
complexities[i]["parameters"][j]) + " | "
tbl_params.add_row([row[:-3]])
tables.append(tbl_params)
tbl_join = __concat_pretty_tables(str(tables[0]), str(tables[1]))
for i in range(2, len(tables)):
tbl_join = __concat_pretty_tables(tbl_join, str(tables[i]))
print(tbl_join)
def _add_theoretical_estimates(complexities, n, k, w, memory_limit, skip, use_mo, workfactor_accuracy):
rate = k / n
omega = w / n
grid_std_accuracy = {"prange": [20, 150], "stern": [20, 150], "dumer": [20, 150], "ball_collision": [15, 150],
"bjmm": [10, 250], "may-ozerov": [5, 1000], "both-may": [5, 1000]}
if workfactor_accuracy != 1:
for i in grid_std_accuracy.keys():
for j in range(2):
grid_std_accuracy[i][j] = int(ceil(grid_std_accuracy[i][j] * workfactor_accuracy))
for i in complexities.keys():
complexities[i]["Workfactor time"] = 0
complexities[i]["Workfactor memory"] = 0
nr_algorithms = 7 - len(skip)
nr_algorithms += 1 if "BJMM-dw" in skip else 0
nr_algorithms += 1 if "BJMM-p-dw" in skip or "BJMM-pdw" in skip else 0
bar = Bar('Computing theoretical workfactors\t', max=nr_algorithms)
if "prange" not in skip:
T, M = prange_workfactor(rate, omega, grid_std_accuracy["prange"][0], grid_std_accuracy["prange"][1],
memory_limit)
complexities["Prange"]["Workfactor time"] = T
complexities["Prange"]["Workfactor memory"] = M
bar.next()
if "stern" not in skip:
T, M = stern_workfactor(rate, omega, grid_std_accuracy["stern"][0], grid_std_accuracy["stern"][1], memory_limit)
complexities["Stern"]["Workfactor time"] = T
complexities["Stern"]["Workfactor memory"] = M
bar.next()
if "dumer" not in skip:
T, M = dumer_workfactor(rate, omega, grid_std_accuracy["dumer"][0], grid_std_accuracy["dumer"][1], memory_limit)
complexities["Dumer"]["Workfactor time"] = T
complexities["Dumer"]["Workfactor memory"] = M
bar.next()
if "ball_collision" not in skip:
T, M = ball_collision_workfactor(rate, omega, grid_std_accuracy["ball_collision"][0],
grid_std_accuracy["ball_collision"][1], memory_limit)
complexities["Ball Collision"]["Workfactor time"] = T
complexities["Ball Collision"]["Workfactor memory"] = M
bar.next()
if "BJMM" not in skip and "MMT" not in skip:
T, M = bjmm_workfactor(rate, omega, grid_std_accuracy["bjmm"][0], grid_std_accuracy["bjmm"][1], memory_limit)
complexities["BJMM (MMT)"]["Workfactor time"] = T
complexities["BJMM (MMT)"]["Workfactor memory"] = M
bar.next()
if "MO" not in skip and "May-Ozerov" not in skip:
T, M = may_ozerov_workfactor(rate, omega, grid_std_accuracy["may-ozerov"][0],
grid_std_accuracy["may-ozerov"][1], memory_limit, use_mo)
complexities["May-Ozerov"]["Workfactor time"] = T
complexities["May-Ozerov"]["Workfactor memory"] = M
bar.next()
if "BM" not in skip and "Both-May" not in skip:
T, M = both_may_workfactor(rate, omega, grid_std_accuracy["both-may"][0], grid_std_accuracy["both-may"][1],
memory_limit, use_mo)
complexities["Both-May"]["Workfactor time"] = T
complexities["Both-May"]["Workfactor memory"] = M
bar.next()
bar.finish()
def _sd_estimate(n, k, w, theoretical_estimates, memory_limit, bit_complexities, hmap, skip, use_mo,
workfactor_accuracy, limit_depth, quantum_estimates, maxdepth, matrix_mult_constant, memory_access):
"""
Estimate complexity to solve syndrome decoding problem
INPUT:
- ``n`` -- length of the code
- ``k`` -- dimension of the code
- ``w`` -- Hamming weight of error vector
- ``memory_limit`` -- upper bound on the available memory (as log2(bits))
- ``hmap`` -- indicates if hashmap should be used for sorting lists
- ``skip`` -- list of algorithms not to consider
- ``use_mo`` -- use may-ozerov nearest neighbor search in theoretical workfactor computation
- ``workfactor_accuracy`` -- the higher the more accurate the workfactor computation, can slow down computations significantly, recommended range 0-2 (needs to be larger than 0)
"""
complexities = {}
if bit_complexities:
memory_limit -= log2(n)
nr_algorithms = 9 - len(skip)
bar = Bar('Computing estimates\t\t\t', max=nr_algorithms)
if "prange" not in skip:
complexities["Prange"] = prange_complexity(n, k, w, mem=memory_limit, memory_access=memory_access)
if quantum_estimates:
complexities["Prange"]["quantum time"] = quantum_prange_complexity(n, k, w, maxdepth=maxdepth,
matrix_mult_constant=matrix_mult_constant)
bar.next()
if "stern" not in skip:
complexities["Stern"] = stern_complexity(n, k, w, mem=memory_limit, hmap=hmap, memory_access=memory_access)
bar.next()
if "dumer" not in skip:
complexities["Dumer"] = dumer_complexity(n, k, w, mem=memory_limit, hmap=hmap, memory_access=memory_access)
bar.next()
if "ball_collision" not in skip:
complexities["Ball Collision"] = ball_collision_decoding_complexity(n, k, w, mem=memory_limit, hmap=hmap,
memory_access=memory_access)
bar.next()
if "BJMM" not in skip and "MMT" not in skip:
complexities["BJMM (MMT)"] = bjmm_complexity(n, k, w, mem=memory_limit, hmap=hmap, only_depth_two=limit_depth,
memory_access=memory_access)
bar.next()
if "BJMM-pdw" not in skip and "BJMM-p-dw" not in skip:
complexities["BJMM-pdw"] = bjmm_depth_2_partially_disjoint_weight_complexity(n, k, w, mem=memory_limit,
hmap=hmap,
memory_access=memory_access)
bar.next()
if "BJMM-dw" not in skip:
complexities["BJMM-dw"] = bjmm_depth_2_disjoint_weight_complexity(n, k, w, mem=memory_limit, hmap=hmap,
memory_access=memory_access)
bar.next()
if "MO" not in skip and "May-Ozerov" not in skip:
complexities["May-Ozerov"] = may_ozerov_complexity(n, k, w, mem=memory_limit, hmap=hmap,
only_depth_two=limit_depth, memory_access=memory_access)
bar.next()
if "BM" not in skip and "Both-May" not in skip:
complexities["Both-May"] = both_may_depth_2_complexity(n, k, w, mem=memory_limit, hmap=hmap,
memory_access=memory_access)
bar.next()
bar.finish()
if theoretical_estimates:
_add_theoretical_estimates(complexities, n, k, w, memory_limit, skip, use_mo, workfactor_accuracy)
if bit_complexities:
field_op = log2(n)
for i in complexities.keys():
complexities[i]["time"] += field_op
complexities[i]["memory"] += field_op
return complexities
| StarcoderdataPython |
1885188 |
# used - straight check, strings are cut
def test_contact_fields_on_home_page(app):
contact_from_home_page = app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.firstname == contact_from_edit_page.firstname
assert contact_from_home_page.lastname == contact_from_edit_page.lastname
assert contact_from_home_page.address == contact_from_edit_page.address
| StarcoderdataPython |
4812560 | <filename>Day45_46_BST/minimum_difference.py
#code
T = int(input())
for i in range(T):
N = int(input())
arr = list(map(int, input().split()))
arr.sort() # n
diff = 10**20 # initialize difference as infinite
for i in range(len(arr)-1): # nlogn
if((arr[i+1] - arr[i]) < diff):
diff = arr[i+1] - arr[i]
print(diff)
# Time Complexity - O(nlogn) | StarcoderdataPython |
5172608 | <reponame>Tymec/Playground
def count_boomers(lst):
boomer_list = []
for i in range(2, len(lst)):
if lst[i - 2] == lst[i] and lst[i - 1] != lst[i]: boomer_list.append(lst[i - 2:i + 1])
return boomer_list, len(boomer_list)
print(count_boomers([1, 5, 1, 5, 5, 6, 5, 3, -1, -2, 3, 2, 3]))
print(count_boomers([5, 5, 5, 1, 5, 7, 6, 5, 7, 3, 7]))
print(count_boomers([1, 1]))
print(count_boomers([1, 1, 1]))
print(count_boomers([1, 5, 1])) | StarcoderdataPython |
8192698 | <filename>setup.py
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
files = ["web/web.zip", "config/logging.json"]
setuptools.setup(
name="transposcope",
version="2.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="A package for visualizing read coverage in areas surrounding novel mobile element insertions.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/FenyoLab/transposcope",
classifiers=[
"Programming Language :: Python :: 3.6",
"Licencse :: OSI Approved :: MIT License",
"Operating System :: MacOS",
"Operating System :: Unix",
],
packages=["transposcope", "transposcope.viewer", "transposcope.parsers"],
package_data={"transposcope": files},
entry_points={"console_scripts": ["transposcope = transposcope.cli:main"]},
)
| StarcoderdataPython |
9733900 | import logging
import torch as T
from fairseq.data import encoders
from selsum.utils.posterior_generator import PosteriorGenerator
from selsum.utils.helpers.collators import collate_features
from selsum.utils.helpers.subsampling import sample_from_q
from fairseq.utils import apply_to_sample
from selsum.utils.constants.model import FEATS
logger = logging.getLogger(__name__)
class PosteriorInterface(T.nn.Module):
"""Posterior interface that selects reviews based on features. These features
are computed based on reviews and product summary.
"""
def __init__(self, args, task, model):
super().__init__()
self.args = args
self.task = task
self.model = model
self.bpe = encoders.build_bpe(args)
# this is useful for determining the device
self.register_buffer('_float_tensor', T.tensor([0], dtype=T.float))
@property
def device(self):
return self._float_tensor.device
def infer(self, feats, ndocs, **kwargs):
"""Runs the inference network. Returns selected
formatted document indices and
"""
coll_sizes = [len(f) for f in feats]
sample = self._build_sample([{FEATS: f} for f in feats])
doc_indxs, q_probs = sample_from_q(self.model, sample=sample,
sample_size=ndocs, **kwargs)
# sorting by document indxs as the encoder is order agnostic
bsz = doc_indxs.size(0)
sort_indxs = T.argsort(doc_indxs, dim=-1)
doc_indxs = doc_indxs[T.arange(bsz).unsqueeze(-1), sort_indxs]
q_probs = q_probs[T.arange(bsz).unsqueeze(-1), sort_indxs]
form_doc_indxs = self._format_output(doc_indxs, coll_sizes)
form_q_probs = self._format_output(q_probs, coll_sizes)
return form_doc_indxs, form_q_probs
def _format_output(self, entries, coll_sizes):
"""Removes padded entries. Converts to list of numpy arrays."""
entries = entries.cpu().numpy()
coll = []
for _entry, _coll_size in zip(entries, coll_sizes):
_entry = _entry[:_coll_size]
coll.append(_entry)
return coll
def _build_sample(self, feats_sample):
"""Builds a sample for running the posterior network."""
sample = collate_features(feats_sample, add_dummy=True)
sample = apply_to_sample(lambda tensor: tensor.to(self.device), sample)
return sample
| StarcoderdataPython |
1819488 | <filename>Med_Cabinet/data/Leafly_csv_Wrangle.py<gh_stars>0
# Leafly_csv_Wrangle.py
# First wrangle to get unique effects for front end user survey and ML use
# Second wrangle to strip "[]"" from list of Effects in Effects column values
# and replace "," with " " in attempt for better neural networking fit.
# Imports
import pandas as pd
# Import Leafly csv
file_name = r"C:\Users\johnj\OneDrive\Documents\Lambda\BuildWeek3\data-science\Med_Cabinet\data\Leafly.csv"
df = pd.read_csv(file_name)
# Examine the Leafly csv data head
#print(df.head())
# First wrangle for unique effects
# Check type of Effects column data
print(type(df.Effects[1])) # <class 'str'>
# Strip and split the Effects column string data in order to get unique values
df.Effects.str.strip('[]').str.split(',')
stripped_effects = list(set([a for b in df.Effects.str.strip('[]').str.split(',') for a in b]))
# Verify the Effects column data had changed from string to set to list
print(type(stripped_effects))
# Function to get unique values
def unique(effects):
# Insert the list to the set
effects_set = set(stripped_effects)
# Convert the set to the list
unique_list_of_effects = (list(effects_set))
for x in unique_list_of_effects:
print(x)
# Commented out as job is done, and on to second wrangle
#print(unique(stripped_effects))
# 13 Unique Effects
# Dry Mouth - Not desireable, not included in 13
# Euphoric
# Happy
# Relaxed
# Focused
# Energetic
# Sleepy
# Talkative
# Tingly
# Aroused
# Giggly
# Creative
# Hungry
# Uplifted
# Second wrangle
# Make the stripped, split and replace in Effects column persist (uses strip and split from Wrangle 1)
df["Effects"] = df["Effects"].str.replace(","," ").astype(str)
# Check type after strip and split, which is <class 'pandas.core.series.Series'>
print(type(df['Effects']))
# Verify changes with printout to terminal
print(df['Effects'].head())
# Set pandas option to show all columns in final printout verification
pd.set_option('display.max_columns', None)
print(df.head())
# Export csv for testing in neural network baseline
file_name = r"C:\Users\johnj\OneDrive\Documents\Lambda\BuildWeek3\data-science\Med_Cabinet\data\Leafly_nolistcommas.csv"
df.to_csv(file_name, sep='\t', encoding='utf-8')
| StarcoderdataPython |
1873276 | from distutils.core import setup
setup(
name='capiq-python',
version='0.1',
packages=['capiq'],
url='https://github.com/guydmann/capiq-python',
license='',
author='guydmann',
author_email='<EMAIL>',
description='Thin Api Wrapper for Cap IQ'
)
| StarcoderdataPython |
5126169 | <reponame>williamlzw/MicroCls<gh_stars>0
import torch
import torch.nn as nn
from torch.nn.functional import adaptive_avg_pool2d
class ConvBNACT(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size,
stride=1, padding=0, groups=1):
super().__init__()
self.conv = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False)
self.bn = nn.BatchNorm2d(out_channels)
self.act = nn.GELU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
return x
class MicroBlock(nn.Module):
def __init__(self, nh, kernel_size):
super().__init__()
self.conv1 = ConvBNACT(nh, nh, kernel_size, groups=nh, padding=1)
self.conv2 = ConvBNACT(nh, nh, 1)
def forward(self, x):
x = x + self.conv1(x)
x = self.conv2(x)
return x
class MicroCls(nn.Module):
def __init__(self, nh=64, depth=2, nclass=60):
super().__init__()
assert(nh >= 2)
self.conv = ConvBNACT(3, nh, 4, 4)
self.blocks = nn.ModuleList()
for i in range(depth):
self.blocks.append(MicroBlock(nh, 3))
self.flatten = nn.Flatten(start_dim=1, end_dim=-1)
self.dropout = nn.Dropout(0.1)
self.fc = nn.Linear(nh, nclass)
def forward(self, x):
x = self.conv(x)
for block in self.blocks:
x = block(x)
x = adaptive_avg_pool2d(x, 1)
x = self.flatten(x)
x = self.dropout(x)
x = self.fc(x)
return x
if __name__ == '__main__':
import time
x = torch.randn(1, 3, 32, 32)
model = MicroCls(1024, depth=2, nclass=10)
t0 = time.time()
out = model(x)
t1 = time.time()
print(out.shape, (t1-t0)*1000)
#torch.save(model, 'micro.pth')
from torchsummaryX import summary
summary(model, x)
| StarcoderdataPython |
5126164 | #!/usr/bin/env python
import numpy as np
from spatialmath import SE3, base
import math
def p_servo(wTe, wTep, gain=2, threshold=0.1):
'''
Position-based servoing.
Returns the end-effector velocity which will cause the robot to approach
the desired pose.
:param wTe: The current pose of the end-effecor in the base frame.
:type wTe: SE3
:param wTep: The desired pose of the end-effecor in the base frame.
:type wTep: SE3
:param gain: The gain for the controller
:type gain: float
:param threshold: The threshold or tolerance of the final error between
the robot's pose and desired pose
:type threshold: float
:returns v: The velocity of the end-effecotr which will casue the robot
to approach wTep
:rtype v: ndarray(6)
:returns arrived: True if the robot is within the threshold of the final
pose
:rtype arrived: bool
'''
if not isinstance(wTe, SE3):
wTe = SE3(wTe)
if not isinstance(wTep, SE3):
wTep = SE3(wTep)
# Pose difference
eTep = wTe.inv() * wTep
# Translational velocity error
ev = eTep.t
# Angular velocity error
ew = eTep.rpy('rad')
# Form error vector
e = np.r_[ev, ew]
# Desired end-effector velocity
v = gain * e
if np.sum(np.abs(e)) < threshold:
arrived = True
else:
arrived = False
return v, arrived
# def _angle_axis(T, Td):
# d = base.transl(Td) - base.transl(T)
# R = base.t2r(Td) @ base.t2r(T).T
# li = np.r_[R[2, 1] - R[1, 2], R[0, 2] - R[2, 0], R[1, 0] - R[0, 1]]
# if base.iszerovec(li):
# # diagonal matrix case
# if np.trace(R) > 0:
# # (1,1,1) case
# a = np.zeros((3,))
# else:
# a = np.pi / 2 * (np.diag(R) + 1)
# else:
# # non-diagonal matrix case
# ln = base.norm(li)
# a = math.atan2(ln, np.trace(R) - 1) * li / ln
# return np.r_[d, a]
| StarcoderdataPython |
5038474 | <reponame>peter88213/PyWOffice
"""HmtlBookDescReader - Class for book summary. file operations and parsing.
Part of the PyWriter project.
Copyright (c) 2020 <NAME>
For further information see https://github.com/peter88213/PyWOffice
Published under the MIT License (https://opensource.org/licenses/mit-license.php)
"""
from html.parser import HTMLParser
from pywriter.html.html_form import *
from pywoffice.model.series import Series
class HtmlBookDesc(HTMLParser):
"""HTML file representation of a book series containing
a series summary and the series' book summaries
"""
_FILE_EXTENSION = 'html'
def __init__(self, filePath):
HTMLParser.__init__(self)
self._seriesSummary = ''
self._bookSummary = {}
self._lines = []
self._bkId = None
self._collectText = False
self._filePath = None
self.filePath = filePath
@property
def filePath(self):
return self._filePath
@filePath.setter
def filePath(self, filePath):
"""Accept only filenames with the right extension. """
if filePath.lower().endswith(self._FILE_EXTENSION):
self._filePath = filePath
def handle_starttag(self, tag, attrs):
"""Recognize the beginning of the book section.
Overwrites HTMLparser.handle_starttag()
"""
if tag == 'div':
if attrs[0][0] == 'id':
if attrs[0][1].startswith('SERIES'):
self._collectText = True
if attrs[0][1].startswith('BkID'):
self._bkId = re.search('[0-9]+', attrs[0][1]).group()
self._collectText = True
def handle_endtag(self, tag):
"""Recognize the end ot the series or book section and save data.
Overwrites HTMLparser.handle_endtag().
"""
if tag == 'div' and self._collectText:
if self._bkId is None:
self._seriesSummary = ''.join(self._lines)
else:
self._bookSummary[self._bkId] = ''.join(self._lines)
self._lines = []
self._collectText = False
elif tag == 'p':
self._lines.append('\n')
def handle_data(self, data):
"""Cllect data within series and book sections.
Overwrites HTMLparser.handle_data().
"""
if self._collectText:
self._lines.append(data.rstrip().lstrip())
def read(self, series: Series, collection):
"""Parse the html file located at filePath,
fetching the Series and book descriptions.
Return a message beginning with SUCCESS or ERROR.
"""
result = read_html_file(self._filePath)
if result[0].startswith('ERROR'):
return (result[0])
text = strip_markup(to_yw7(result[1]))
# Invoke HTML parser.
self.feed(text)
series.desc = self._seriesSummary
for bkId in self._bookSummary:
collection.books[bkId].desc = self._bookSummary[bkId]
return 'SUCCESS'
| StarcoderdataPython |
6457657 | """
:Author: <NAME> <<EMAIL>>
"""
import inspect
from imagination.decorator.validator import restrict_type
from tori.db.common import PseudoObjectId
from tori.db.exception import LockedIdException
from tori.db.metadata.helper import EntityMetadataHelper
def get_collection_name(cls):
raise RuntimeError('obsolete')
return cls.__collection_name__
def get_relational_map(cls):
raise RuntimeError('obsolete')
return cls.__relational_map__
def entity(*args, **kwargs):
""" Entity decorator
:param collection_name: the name of the collection
:type collection_name: str
:return: the decorated object
:rtype: object
"""
# Get the first parameter.
first_param = args[0] if args else None
# If the first parameter is really a reference to a class, then instantiate
# the singleton instance.
if args and inspect.isclass(first_param) and isinstance(first_param, type):
class_reference = first_param
return prepare_entity_class(class_reference)
# Otherwise, use the closure to handle the parameter.
def decorator(class_reference):
return prepare_entity_class(class_reference, *args, **kwargs)
return decorator
def prepare_entity_class(cls, collection_name=None, indexes=[]):
""" Create a entity class
:param cls: the document class
:type cls: object
:param collection_name: the name of the corresponding collection where the
default is the lowercase version of the name of the
given class (cls)
:type collection_name: str
The object decorated with this decorator will be automatically provided with
a few additional attributes.
=================== ======== =================== ==== =================================
Attribute Access Description Read Write
=================== ======== =================== ==== =================================
id Instance Document Identifier Yes Yes, ONLY ``id`` is undefined.
__t3_orm_meta__ Static Tori 3's Metadata Yes ONLY the property of the metadata
__session__ Instance DB Session Yes Yes, but NOT recommended.
=================== ======== =================== ==== =================================
The following attributes might stay around but are deprecated as soon as
the stable Tori 3.0 is released.
=================== ======== =================== ==== =================================
Attribute Access Description Read Write
=================== ======== =================== ==== =================================
__collection_name__ Static Collection Name Yes Yes, but NOT recommended.
__relational_map__ Static Relational Map Yes Yes, but NOT recommended.
__indexes__ Static Indexing List Yes Yes, but NOT recommended.
=================== ======== =================== ==== =================================
``__session__`` is used to resolve the managing rights in case of using
multiple sessions simutaneously.
For example,
.. code-block:: python
@entity
class Note(object):
def __init__(self, content, title=''):
self.content = content
self.title = title
where the collection name is automatically defined as "note".
.. versionchanged:: 3.0
The way Tori stores metadata objects in ``__collection_name__``,
``__relational_map__`` and ``__indexes__`` are now ignored by the ORM
in favour of ``__t3_orm_meta__`` which is an entity metadata object.
This change is made to allow easier future development.
.. tip::
You can define it as "notes" by replacing ``@entity`` with ``@entity('notes')``.
"""
if not cls:
raise ValueError('Expecting a valid type')
def get_id(self):
return self.__dict__['_id'] if '_id' in self.__dict__ else None
def set_id(self, id):
"""
Define the document ID if the original ID is not defined.
:param id: the ID of the document.
"""
if '_id' in self.__dict__ and self.__dict__['_id']\
and not isinstance(self.__dict__['_id'], PseudoObjectId):
raise LockedIdException('The ID is already assigned and cannot be changed.')
self._id = id
cls.__session__ = None
EntityMetadataHelper.imprint(
cls,
collection_name or cls.__name__.lower(),
indexes
)
cls.id = property(get_id, set_id)
return cls
class Entity(object):
""" Dynamic-attribute Basic Entity
:param attributes: key-value dictionary
:type attributes: dict
Here is an example on how to use this class.
.. code-block:: python
@entity
class Note(Entity): pass
"""
def __init__(self, **attributes):
for name in attributes:
self.__setattr__(name, attributes[name])
class Index(object):
""" Index
:param field_map: the map of field to index type
:type field_map: dict
:param unique: the unique flag
:type unique: bool
Unless a field is not in the map of fixed orders, the index will
instruct the repository to ensure all combinations of indexes are
defined whenever is necessary.
"""
def __init__(self, field_map, unique=False):
self._field_map = field_map
self._unique = unique
@property
def field_map(self):
return self._field_map
@property
def unique(self):
return self._unique
def to_list(self):
index_map = []
for field in self._field_map:
index_map.append((field, self._field_map[field]))
return index_map
class BasicAssociation(object):
""" Basic Association
:param origin: The origin of the association
:type origin: object
:param destination: The destination (endpoint) of the association
:type destination: object
.. note:: This class is used automatically by the association mapper.
"""
def __init__(self, origin, destination):
self.origin = origin
self.destination = destination | StarcoderdataPython |
6650711 | import requests
from AlertManager.how_to_create_alert import create_alert
from AlertManager.how_to_get_alert_by_id import get_alert
from AlertManager.how_to_get_alert_type_by_id import get_alert_type
from AlertManager.how_to_get_alert_types import get_alert_types
from AlertManager.how_to_get_alerts import get_alerts
from client import Client
from tools import handle_error_response, GrantType
def delete_alert(client, alert):
response = requests.delete(
f'{client.base_url}/api/alert_manager/v1/alerts/{alert["id"]}',
headers=client.auth_header,
)
handle_error_response(response)
return response.status_code == 204
def test_create_and_delete_alert():
client = Client(grant_type=GrantType.password)
alert = create_alert(client, dict(type='DrStorageHardQuotaExceeds'))
assert alert
assert delete_alert(client, alert)
def test_get_alerts():
client = Client(grant_type=GrantType.password)
assert get_alerts(client) is not None
def test_get_alert(alert):
client = Client(grant_type=GrantType.password)
alert_info = get_alert(client, alert['id'])
assert alert_info
assert alert['id'] == alert_info['id']
def test_alert_types():
client = Client(grant_type=GrantType.password)
alert_types = get_alert_types(client)
assert alert_types is not None
if alert_types:
assert get_alert_type(client, alert_types[0]['id'])
| StarcoderdataPython |
4950602 | from docker_update.dockerUpdate import DockerUpdate | StarcoderdataPython |
197679 | <gh_stars>1-10
#
# Copyright 2017 the original author or authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import arrow
import structlog
from twisted.internet.task import LoopingCall
from twisted.internet.defer import inlineCallbacks, returnValue
from voltha_protos.events_pb2 import Event, EventType, EventCategory, EventSubCategory, DeviceEvent, EventHeader
import six
log = structlog.get_logger()
# TODO: In the device adapter, the following events are still TBD
# (Taken from openolt_events)
# onu_alarm_ind
# onu_startup_failure_indication
# onu_signal_degrade_indication
# onu_drift_of_window_ind
# onu_loss_omci_ind
# onu_signals_fail_ind
# onu_tiwi_ind
# onu_activation_fail_ind
# onu_processing_error_ind
class AdapterEvents:
"""
Class for managing Events within a given Device Handler instance
"""
def __init__(self, core_proxy, device_id, logical_device_id, serial_number):
"""
Adapter event manager initializer
:param core_proxy: (CoreProxy) Core proxy reference
:param device_id: (str) Device handler's unique device id
:param logical_device_id: (str) Logical Device that the device is a member of
:param serial_number: (str) Serial number of the device(OLT) that created this instance
"""
self.lc = None
self.type_version = "0.1"
self.device_id = device_id
self.core_proxy = core_proxy
self.serial_number = serial_number
self.logical_device_id = logical_device_id
self.adapter_name = core_proxy.listening_topic
self.log = structlog.get_logger(device_id=device_id)
def format_id(self, event):
"""
Format the Unique Event ID for this event. This is provided in the events
'id' field
:param event: (str) The name of the event such as 'Discover' or 'LOS'
:return: (str) Event ID
"""
return 'voltha.{}.{}.{}'.format(self.adapter_name,
self.device_id, event)
def get_event_header(self, _type, category, sub_category, event, raised_ts):
"""
:return: (dict) Event header
"""
hdr = EventHeader(id=self.format_id(event),
category=category,
sub_category=sub_category,
type=_type,
type_version=self.type_version)
hdr.raised_ts.FromSeconds(raised_ts),
hdr.reported_ts.GetCurrentTime()
return hdr
@inlineCallbacks
def send_event(self, event_header, event_body):
"""
Send the event to the event bus
:param event_header: (dict) Event specific context data
:param event_body: (dict) Common Event information dictionary
"""
event = None
try:
self.log.debug('send_event')
if event_header.type == EventType.DEVICE_EVENT:
event = Event(header=event_header, device_event=event_body)
elif event_header.type == EventType.KPI_EVENT:
event = Event(header=event_header, kpi_event=event_body)
elif event_header.type == EventType.KPI_EVENT2:
event = Event(header=event_header, kpi_event2=event_body)
elif event_header.type == EventType.CONFIG_EVENT:
event = Event(header=event_header, config_event=event_body)
if event is not None:
yield self.core_proxy.submit_event(event)
except Exception as e:
self.log.exception('failed-to-send-event', e=e)
raise
log.debug('event-sent-to-kafka', event_type=event_header.type)
class DeviceEventBase(object):
"""Base class for device events"""
def __init__(self, event_mgr, raised_ts, object_type,
event, resource_id=None,
category=EventCategory.EQUIPMENT,
sub_category=EventSubCategory.PON):
"""
Initializer for the Event base class
:param event_mgr: (AdapterEvents) Reference to the device handler's Adapter
Event manager
:param object_type: (str) Type of device generating the event such as 'olt' or 'onu'
:param event: (str) A textual name for the event such as 'HeartBeat' or 'Discovery'
:param event_category: (EventCategory) Refers to functional category of
the event
:param event_category: (EventSubCategory) Refers to functional sub category of
the event
:param resource_id: (str) Identifier of the originating resource of the event
"""
self.event_mgr = event_mgr
self._object_type = object_type
self._event = event
self._category = category
self._sub_category = sub_category
self._type = EventType.DEVICE_EVENT
self._resource_id = resource_id
self.raised_ts = raised_ts
def format_description(self, _object, device_event, status):
"""
Format the textual description field of this event
:param _object: ()
:param device_event: (str) The name of the event such as 'Discover' or 'LOS'
:param status: (bool) If True, the event is active (it is being raised)
:return: (str) Event description
"""
return '{} Event - {} - {}'.format(_object.upper(),
device_event.upper(),
'Raised' if status else 'Cleared')
def get_device_event_data(self, status):
"""
Get the event specific data and format it into a dictionary. When the event
is being sent to the event bus, this dictionary provides a majority of the
fields for the events.
:param status: (bool) True if the event is active/raised
:return: (dict) Event data
"""
context_data = self.get_context_data()
current_context = {}
if isinstance(context_data, dict):
for key, value in six.iteritems(context_data):
current_context[key] = str(value)
# Always insert serial number of the OLT, ONU serial number comes in the context
current_context["serial-number"] = self.event_mgr.serial_number
return DeviceEvent(resource_id=self.event_mgr.device_id,
device_event_name="{}_{}".format(self._event, "RAISE_EVENT"),
description=self.format_description(self._object_type, self._event, status),
context=current_context)
def get_context_data(self):
"""
Get event specific context data. If an event has specific data to specify, it is
included in the context field in the published event
:return: (dict) Dictionary with event specific context data
"""
return {} # NOTE: You should override this if needed
def send(self, status):
"""
Called to send a device event to the event bus
"""
event_header = self.event_mgr.get_event_header(EventType.DEVICE_EVENT, self._category,
self._sub_category, self._event, self.raised_ts)
device_event_data = self.get_device_event_data(status)
self.event_mgr.send_event(event_header, device_event_data)
| StarcoderdataPython |
11325654 | import models
from django.db.models.base import ModelBase
import serializer
# imports Generic Views from django_template_project base app
from base.views import (
SerializerListView,
SerializerDetailView,
SerializerCreateView,
SerializerUpdateView
)
for name, cls in models.__dict__.items():
if isinstance(cls, ModelBase) and cls._meta.abstract is False:
view_name = '{0}SerializerListView'.format(name)
locals()[view_name] = type(
view_name,
(SerializerListView,),
{
'model': cls,
'serializer_name': '{0}Serializer'.format(name),
'serializer': serializer
}
)
view_name = '{0}SerializerDetailView'.format(name)
locals()[view_name] = type(
view_name,
(SerializerDetailView,),
{
'model': cls,
'serializer_name': '{0}Serializer'.format(name),
'serializer': serializer
}
)
view_name = '{0}SerializerCreateView'.format(name)
locals()[view_name] = type(
view_name,
(SerializerCreateView,),
{
'serializer_name': '{0}Serializer'.format(name),
'serializer': serializer
}
)
view_name = '{0}SerializerUpdateView'.format(name)
locals()[view_name] = type(
view_name,
(SerializerUpdateView,),
{
'serializer_name': '{0}Serializer'.format(name),
'serializer': serializer,
'model': cls,
}
)
| StarcoderdataPython |
1643560 | <filename>day05/day05_puz1.py<gh_stars>1-10
#! /usr/bin/env python
def run_opcode(code_list, programme_input=1):
"""Run the opcode as determined by the values in code_list
Before you enter the next loop, check to see if the opcode
(the first number in the sequence) is 99. If it is, then
you can stop and return the code as it stands.
Parameters
----------
code_list : list
The opcode
programme_input : int
The input to the programme, default 1
"""
# Start reading in the programme at position 0
opcode_loc = 0
opcode = None
output = None
while opcode != '99':
# Get and parse the opcode
code = code_list[opcode_loc]
opcode, parameter_mode_dict = parse_opcode(code)
if opcode == '01':
# Add the appropriate values together if you have an opcode of 1
code_list = apply_opcode1(code_list,
opcode_loc,
parameter_mode_dict)
# Increase the opcode_loc by 4 to keep yourself moving forwards
# through the code
opcode_loc += 4
if opcode == '02':
# Multiply the appropriate values together if you have an opcode
# of 2
code_list = apply_opcode2(code_list,
opcode_loc,
parameter_mode_dict)
# Increase the opcode_loc by 4 to keep yourself moving forwards
# through the code
opcode_loc += 4
if opcode == '03':
# Put the input value in the appropriate location if you have an
# opcode of 3
code_list = apply_opcode3(code_list,
opcode_loc,
programme_input=programme_input)
# Increase the opcode_loc by 2 to keep yourself moving forwards
# through the code
opcode_loc += 2
if opcode == '04':
# Return the output value if you have an opcode of 4
code_list, output = apply_opcode4(code_list,
opcode_loc,
parameter_mode_dict)
# Print the output value to screen
print(f'Output value: {output}')
# Increase the opcode_loc by 2 to keep yourself moving forwards
# through the code
opcode_loc += 2
# If the output is not 0 then check that it is followed by a 99
if output != 0:
check_next_opcode_99(opcode_loc, code_list)
return code_list, output
def load_computer_data(fname):
"""Read in input file with the computer's opcode as provided.
Parameters
----------
fname : string
File provided by advent of code competition
"""
# Create empty code list
code_list = []
# Read in each line, and split by comma
with open(fname, 'r') as f:
for line in f:
code_list += line.split(',')
# Convert all items to integer
code_list = [int(item) for item in code_list]
return code_list
def parse_opcode(code):
"""Each opcode is up to 5 digits long. The two on the furthest right
contain the instruction, and then the 3 on the left (reading from right
to left) indicate the mode (position or immediate) for each of the
parameters.
This function converts the number to a 0 padded string and then splits up
the 5 digits into the opcode and parameter modes.
Parameters
----------
code : int
instruction as integer that is up to 5 digits long
Returns
-------
opcode : str
two digit string corresponding to an instruction
parameter_mode_dict : dict
dictionary containing the parameter mode for each of the opcode
parameters
"""
code = f'{code:05}'
opcode = code[3:5]
parameter_mode_dict = {1: code[2], 2: code[1], 3: code[0]}
return opcode, parameter_mode_dict
# Define Python user-defined exceptions
# Adapted from https://www.programiz.com/python-programming/user-defined-exception # noqa
class Error(Exception):
"""Base class for other exceptions"""
pass
class ForbiddenValueError(Error):
"""Raised when the opcode mode is not permitted"""
pass
def apply_opcode1(code_list, opcode_loc, parameter_mode_dict):
"""When you've determined that the opcode is 1 - which means to add the
following two numbers (or the values at the position of those two numbers,
depending on the parameter mode) then you can use this function to adjust
code_list.
Parameters
----------
code_list : list
The whole programme
opcode_loc : int
The index of the opcode in code_list
parameter_mode_dict : dict
A dictionary indicating for the following 3 values after an opcode of 1
whether they should be considered in position (0) or immediate (1)
modes
Returns
-------
code_list : list
The whole programme
"""
opcode, param1, param2, param3 = code_list[opcode_loc:opcode_loc+4]
# If the mode is 1 then the parameter should be interpreted as it stands.
# If the mode is 0 then we need to get the value at that location in the
# code list
if parameter_mode_dict[1] == '0':
param1 = code_list[param1]
if parameter_mode_dict[2] == '0':
param2 = code_list[param2]
# The parameter mode for the 3rd parameter (which is the location that
# the answer will be stored) should never be anything other than 0, so
# we're going to raise an error if it is
if parameter_mode_dict[3] != '0':
print('Something has gone wrong! ' +
'The 3rd parameter should never be anything other than 0')
raise ForbiddenValueError
# Now lets actually do what the opcode says: add param1 and param2 and
# put the value at param3
code_list[param3] = param1 + param2
return code_list
def apply_opcode2(code_list, opcode_loc, parameter_mode_dict):
"""When you've determined that the opcode is 2 - which means to multiply
the following two numbers (or the values at the position of those two
numbers, depending on the parameter mode) then you can use this function to
adjust code_list.
Parameters
----------
code_list : list
The opcode
opcode_loc : int
The index of the opcode in code_list
parameter_mode_dict : dict
A dictionary indicating for the following 3 values after an opcode of 2
whether they should be considered in position (0) or immediate (1)
modes
Returns
-------
code_list : list
The whole programme
"""
opcode, param1, param2, param3 = code_list[opcode_loc:opcode_loc+4]
# If the mode is 1 then the parameter should be interpreted as it stands.
# If the mode is 0 then we need to get the value at that location in the
# code list
if parameter_mode_dict[1] == '0':
param1 = code_list[param1]
if parameter_mode_dict[2] == '0':
param2 = code_list[param2]
# The parameter mode for the 3rd parameter (which is the location that
# the answer will be stored) should never be anything other than 0, so
# we're going to raise an error if it is
if parameter_mode_dict[3] != '0':
print('Something has gone wrong! ' +
'The 3rd parameter should never be anything other than 0')
raise ForbiddenValueError
# Now lets actually do what the opcode says: multiply param1 and param2 and
# put the value at param3
code_list[param3] = param1 * param2
return code_list
def apply_opcode3(code_list, opcode_loc, programme_input=1):
"""When you've determined that the opcode is 3 - which means to take an
input value and store it in the location of its only parameter then you can
use this function to
adjust code_list.
Parameters
----------
code_list : list
The opcode
opcode_loc : int
The index of the opcode in code_list
programme_input : int
input value, default 1
Returns
-------
code_list : list
The whole programme
"""
opcode, param1 = code_list[opcode_loc:opcode_loc+2]
# Now lets actually do what the opcode says: put the input value at the
# location given by param1
code_list[param1] = programme_input
return code_list
def apply_opcode4(code_list, opcode_loc, parameter_mode_dict):
"""When you've determined that the opcode is 4 - which means to return a
value in the location of its only parameter as an output - you can use this
function to adjust code_list.
Parameters
----------
code_list : list
The opcode
opcode_loc : int
The index of the opcode in code_list
parameter_mode_dict : dict
A dictionary indicating for the following value after an opcode of 3
whether they should be considered in position (0) or immediate (1)
modes
Returns
-------
code_list : list
The whole programme
output : int
The value in the location determined by the parameter of the opcode
"""
opcode, param1 = code_list[opcode_loc:opcode_loc+2]
# If the mode is 1 then the parameter should be interpreted as it stands.
# If the mode is 0 then we need to get the value at that location in the
# code list
if parameter_mode_dict[1] == '0':
param1 = code_list[param1]
# Return that value as an output
output = param1
return code_list, output
def check_next_opcode_99(opcode_loc, code_list):
# A non-zero output value should only occur *right* before
# the programme ends.
# So we're going to check that the next opcode is 99, and raise
# an error if not
code = code_list[opcode_loc]
opcode, parameter_mode_dict = parse_opcode(code)
if not opcode == '99':
print(f'NEXT OPCODE: {opcode}')
print('Something has gone wrong! ' +
'There is a non-zero output that is not followed by a halt :(')
raise ForbiddenValueError
if __name__ == "__main__":
"""Load in the data, adjust it to the state before the computer caught fire,
then run the opcode and print the value in position 0 to the screen.
"""
code_list = load_computer_data('day05/input.txt')
print('\n---- Day 5, Puzzle 1 ----')
code_list, output = run_opcode(code_list, programme_input=1)
| StarcoderdataPython |
6489281 | '''
The commands manager is an API to be used to create commands, bind commands to handlers and
activate them.
It's also possible to bind handlers to a given scope so that they're only active when a scope
is active.
# The basic usage is:
commands_manager.register_command('copy', 'Copy')
commands_manager.set_command_handler('copy', copy_to_clipboard)
commands_manager.activate('copy') # activates the copy action
# Then, if there was a special copy on some context,
# one would need to register the scope/handler:
commands_manager.register_scope('custom_scope')
commands_manager.set_command_handler('copy', copy_to_clipboard, 'custom_scope')
# And then active/deactivate such a context when needed:
commands_manager.activate_scope('custom_scope')
commands_manager.activate('copy')
commands_manager.deactivate_scope('custom_scope')
'''
from collections import namedtuple
from pyvmmonitor_core import implements, interface
class ICommandsManager(object):
DEFAULT_SCOPE = None
CURRENT_SCOPE = [] # Sentinel: any mutable (check with 'is')
def register_command(self, command_id, command_name, icon=None, status_tip=None):
'''
Registers a command and makes it available to be activated (if no handler is available
after being registered, nothing is done if it's activated).
:param str command_id:
:param str command_name:
:param object icon:
May be the actual icon or a way to identify it (at core it doesn't make
a difference, it just stores the value to be consumed later on).
:param str status_tip:
A tip for the command (if not given, a default one may be given based on the
command_name).
'''
def get_command_info(self, command_id):
'''
:param str command_id:
The command id for which we want the info.
:return: a namedtuple with command_id, command_name, icon, status_tip
'''
def set_command_handler(self, command_id, command_handler, scope=DEFAULT_SCOPE):
'''
Sets a handler to the given command id (optionally with a different scope).
The command_handler must be a callable -- it may accept arguments (which then will need to
be passed in #activate).
It's possible to pass None to set no command handler in the context (also see
remove_command_handler to remove a registered command handler -- in case it's registered
and then removed).
'''
def remove_command_handler(self, command_id, command_handler, scope=DEFAULT_SCOPE):
'''
Removes a registered handler if it's the current handler at a given scope (does nothing
if it's not the current handler).
'''
def activate(self, command_id, __scope__=CURRENT_SCOPE, **kwargs):
'''
Activates a given command.
kwargs are passed on to the handler of the command. Note that only arguments which are
simple python objects should be passed.
Namely: int/long/float/complex/str/bytes/bool/tuple/list/set (this restriction is enforced
so that clients can be sure that they can easily replicate a command invocation).
'''
def register_scope(self, scope):
'''
:param str scope:
The scope which can have a different set of handlers for the existing actions.
'''
def activate_scope(self, scope):
'''
Activates a given scope so that the commands registered in such a scope have precedence
over the commands in the default scope (or previously activated scopes).
'''
def deactivate_scope(self, scope):
'''
Deactivates a previously activated scope.
'''
def list_command_ids(self):
'''
Returns the available command ids.
'''
def list_active_scopes(self):
'''
Returns the current scope activation list.
:rtype: list(str)
'''
def create_default_commands_manager():
'''
Creates a default implementation for ICommandsManager.
'''
return _DefaultCommandsManager()
# --- Private API from now on ---
def _default_noop_handler(**kwargs):
pass
class CommandUndefinedEror(Exception):
pass
_CommandInfo = namedtuple('_CommandInfo', ('command_id', 'command_name', 'icon', 'status_tip'))
@interface.check_implements(ICommandsManager)
class _DefaultCommandsManager(object):
'''
Users should base on ICommandsManager (create_default_commands_manager can be used to create
a default implementation, this class is not exposed and can be removed -- use aggregation
to compose a new class if needed).
@see: create_default_commands_manager()
'''
def __init__(self):
self._command_id_to_scopes = {}
self._command_id_to_info = {}
self._activated_scopes = [ICommandsManager.DEFAULT_SCOPE]
self._valid_scopes = {ICommandsManager.DEFAULT_SCOPE}
@implements(ICommandsManager.list_command_ids)
def list_command_ids(self):
from pyvmmonitor_core import compat
return compat.keys(self._command_id_to_info)
@implements(ICommandsManager.list_active_scopes)
def list_active_scopes(self):
return self._activated_scopes[:]
@implements(ICommandsManager.register_scope)
def register_scope(self, scope):
self._valid_scopes.add(scope)
@implements(ICommandsManager.activate_scope)
def activate_scope(self, scope):
if scope not in self._valid_scopes:
raise ValueError('The passed scope (%s) was not registered.' % (scope,))
self._activated_scopes.append(scope)
if len(self._activated_scopes) > 20:
import sys
sys.stderr.write(
'It seems there is some issue in scopes not being deactivated!\nActivated scopes: %s' % # @IgnorePep8
(self._activated_scopes,))
@implements(ICommandsManager.deactivate_scope)
def deactivate_scope(self, scope):
from pyvmmonitor_core.list_utils import remove_last_occurrence
if scope == ICommandsManager.DEFAULT_SCOPE:
raise AssertionError('Default scope cannot be deactivated.')
if not remove_last_occurrence(self._activated_scopes, scope):
raise RuntimeError(
'Unable to deactivate scope not activated: %s. Active scopes: %s' %
(scope, self._activated_scopes))
@implements(ICommandsManager.register_command)
def register_command(self, command_id, command_name, icon=None, status_tip=None):
if command_id in self._command_id_to_info:
raise RuntimeError('Command: %s already registered' % (command_id,))
self._command_id_to_info[command_id] = _CommandInfo(
command_id, command_name, icon, status_tip)
self._command_id_to_scopes[command_id] = {
ICommandsManager.DEFAULT_SCOPE: _default_noop_handler
}
@implements(ICommandsManager.get_command_info)
def get_command_info(self, command_id):
try:
return self._command_id_to_info[command_id]
except KeyError:
raise CommandUndefinedEror('Command with id: %s is not defined.' % (command_id,))
@implements(ICommandsManager.set_command_handler)
def set_command_handler(self, command_id, command_handler,
scope=ICommandsManager.DEFAULT_SCOPE):
if scope not in self._valid_scopes:
raise ValueError('The passed scope (%s) was not registered.' % (scope,))
try:
scopes = self._command_id_to_scopes[command_id]
except KeyError:
raise CommandUndefinedEror('Command with id: %s is not defined.' % (command_id,))
else:
prev_command_handler = scopes.get(scope, _default_noop_handler)
scopes[scope] = command_handler
return prev_command_handler
@implements(ICommandsManager.remove_command_handler)
def remove_command_handler(self, command_id, command_handler,
scope=ICommandsManager.DEFAULT_SCOPE):
if scope not in self._valid_scopes:
raise ValueError('The passed scope (%s) was not registered.' % (scope,))
try:
scopes = self._command_id_to_scopes[command_id]
except KeyError:
raise CommandUndefinedEror('Command with id: %s is not defined.' % (command_id,))
else:
prev_command_handler = scopes.get(scope, _default_noop_handler)
if prev_command_handler is command_handler:
scopes[scope] = None
return True
@implements(ICommandsManager.activate)
def activate(self, command_id, __scope__=ICommandsManager.CURRENT_SCOPE, **kwargs):
try:
scopes = self._command_id_to_scopes[command_id]
except KeyError:
raise CommandUndefinedEror('Command with id: %s is not defined.' % (command_id,))
else:
if __scope__ is ICommandsManager.CURRENT_SCOPE:
for active_scope in reversed(self._activated_scopes):
handler = scopes.get(active_scope)
if handler is not None:
handler(**kwargs)
break
else:
# Use the passed scope.
handler = scopes.get(__scope__)
if handler is not None:
handler(**kwargs)
| StarcoderdataPython |
75990 | import shapefile
from shapely.geometry import shape
import csv
import sys
import matplotlib.pyplot as plt
import numpy as np
from random import randint
from subprocess import call
from array import array
from shapely.geometry import Polygon
from shapely.geometry.multipolygon import MultiPolygon
import glob
import math
import visualize_maps
import config
## Reads Congressional district files
def read_districts(state,alt_map):
# If an alternative map is provided
if(len(alt_map) > 0):
district_data = shapefile.Reader('data/' + alt_map)
districts = []
for tract in district_data.shapeRecords():
district_poly = shape(tract.shape.__geo_interface__)
districts.append(district_poly)
return districts
# Otherwise, default to 2016 Census data for districting
district_data = shapefile.Reader('data/cb_2016_us_cd115_500k/cb_2016_us_cd115_500k.shp')
districts = []
for tract in district_data.shapeRecords():
if(tract.record[0] == config.state_dict[state]):
district_poly = shape(tract.shape.__geo_interface__)
districts.append(district_poly)
return districts
## Finds district containing a tract
def shape_to_district(tract,districts):
ind = -1
shp_geom = shape(tract.shape.__geo_interface__)
centroid = shp_geom.centroid
for i in range(0,len(districts)):
if(districts[i].contains(centroid)):
ind = i
break
return ind
## Gets shapefile for state
def data_for_state(state):
# Can be replaced with 2017 data
return shapefile.Reader('data/cb_2016_' + config.state_dict[state] + '_tract_500k/cb_2016_' + config.state_dict[state] +'_tract_500k.shp')
## Loads all relevant data for a state
def read(state, alt_map):
print 'Reading ' + state + ' data...'
shape_data = data_for_state(state)
id_to_centroids = {} # GEOID -> centroid dictionary
id_to_district = {} # GEOID -> district index in state dictionary
num_dists = 0
district_keys = {} # district index in US list -> district index in state
# Load district data for state
districts = read_districts(state,alt_map)
# For every tract in shape_data, find containing district:
for tract in shape_data.shapeRecords():
aff_geo_id = tract.record[3] # GEOID
shp_geom = shape(tract.shape.__geo_interface__)
id_to_centroids[aff_geo_id] = shp_geom.centroid # Store centroid
ind = shape_to_district(tract,districts) # Index of containing district in US
if(ind == -1): # If the centroid failed to get a district
id_to_district[aff_geo_id] = 0
else:
if(ind in district_keys): # If district already re-mapped to in-state index
id_to_district[aff_geo_id] = district_keys[ind]
else: # Otherwise, create a new rank and add to the dictionary
district_keys[ind] = num_dists
id_to_district[aff_geo_id] = district_keys[ind]
num_dists = num_dists + 1
# Extract data for census units + containing districts
unit_data = []
unit_districts = []
num_valid_units = 0
tot_pop = 0
district_pops = [0] * num_dists
# American Community Survey (ACS) data from Census
# Contains population data for entire US.
with open('data/ACS_16_5YR_DP05_with_ann.csv') as csvDataFile:
csv_reader = csv.reader(csvDataFile)
for row in csv_reader:
key = row[0] # GEOID
if key in id_to_centroids: # If we have spatial data for this key...
centroid = id_to_centroids[key] # Get centroid
pop = int(row[3]) # Get population
# Append GEOID, centroid, and population to array
unit_data.append(int(key[config.id_0:]))
unit_data.append(centroid.x)
unit_data.append(centroid.y)
unit_data.append(pop)
# Append GEOID and district index to file
unit_districts.append(int(key[config.id_0:]))
unit_districts.append(id_to_district[key])
district_pops[id_to_district[key]] += pop
tot_pop += pop
num_valid_units = num_valid_units + 1
call('rm ' + config.temp_folder + state + '*', shell=True)
# Write unit data to file
output_file = open(config.temp_folder + state + config.unit_data_suffix, 'wb')
float_array = array('d', unit_data)
float_array.tofile(output_file)
output_file.close()
# Write containing district data to file
output_file = open(config.temp_folder + state + config.unit_district_suffix, 'wb')
float_array = array('d', unit_districts)
float_array.tofile(output_file)
output_file.close()
return [num_valid_units, num_dists, tot_pop]
# Main function without re-loading data
def run_with_data(state,k,max_iter,initial_state,ms_param,stopCrit,lb_frac,temp,annealing,verbose,driving_distance,mode,alt_map,p):
print state + ' has ' + str(p[0]) + ' tracts and ' + str(p[1]) + ' districts.'
print '\nRunning ' + state + '...'
lowerBound = round(lb_frac*p[2]/p[1])
if(verbose):
print '\nMinimum district population set to ' + str(lowerBound) + '.'
# Remove old files iteration files
call('rm ' + config.temp_folder + state + '*step*', shell=True)
# Call main algorithm
command = ['./district_mbo', config.temp_folder + state + config.unit_data_suffix, str(p[0]),str(p[1]),str(k),str(max_iter),str(initial_state),str(ms_param),str(stopCrit),str(lowerBound), config.temp_folder + state + config.unit_district_suffix,str(temp),str(annealing),str(verbose),str(driving_distance)]
call(command)
# Count number of data files saved
num_iter = len(glob.glob(config.temp_folder + state + '*step*'))
print str(num_iter) + ' iterations completed.'
# Remove old images
call('rm output/flow*', shell=True)
if mode != config.MODE_NONE:
visualize_maps.make_pics(state,data_for_state(state),p[1],num_iter,mode)
return num_iter
# Main function
def run(state,k,max_iter,initial_state,ms_param,stopCrit,lb_frac,temp,annealing,verbose,driving_distance,mode,alt_map):
p = read(state,alt_map)
num_iter = run_with_data(state,k,max_iter,initial_state,ms_param,stopCrit,lb_frac,temp,annealing,verbose,driving_distance,mode,alt_map,p)
return num_iter
# If running straight from the command line:
if __name__ == '__main__':
if(len(sys.argv) == 1):
state = 'VA' # Run VA by default
else:
state = sys.argv[1]
### Parameters for auction dynamics algorithm ###
k = 150 # Number of nearest neighbors
max_iter = 300 # Maximum number of iterations to run
initial_state = config.INIT_CURRENT # INIT_RAND: start from random, INIT_CURRENT: start with 2016 districts, INIT_ALT: alternative map
ms_param = 1 # Mumford-Shah parameter -- higher values means less likely splits
stopCrit = 0.00 # Stopping criteria
lb_frac = 0.985 # Lower bound on the population differences between
temp = .1 # Temperature
annealing = 0.985 # Multiplicative annealing term
verbose = 0 # 1: print during ms_mbo call, 0: suppress output
driving_distance = 0 # Make 1 to use driving distance for states where that data is available
mode = config.MODE_BEGIN_END # Generate visualization with mode : 0 (first and last), 1 (log sampling), 2 (all)
alt_map = '' # Alternative mapping file
run(state,k,max_iter,initial_state,ms_param,stopCrit,lb_frac,temp,annealing,verbose,driving_distance,mode,alt_map)
| StarcoderdataPython |
1766823 | # -*- coding: utf-8 -*-
import grok
from grokui.admin import representation
class ApplicationInformation(grok.ViewletManager):
grok.name('grokui_admin_appinfo')
grok.context(representation.IApplicationRepresentation)
| StarcoderdataPython |
4824668 | #!/usr/bin/env python3
# Generate and upload changelog
from git import Repo, exc
from github import Github
import os
import sys
upload_changelog = True
# Tracked repositories and their paths
# First entry in each pair is how the repository will appear in the changelog
# Second is the path relative to the script (or an absolute path, if that works)
TRACKED_REPOSITORIES = [
('Base repository', '.'),
('ROS Charging Station modules (ros_cs)', './ros_cs'),
('pymavlink with COEX patches', './pymavlink'),
('cmavnode with COEX patches', './cmavnode'),
('MAVLink library', './mavlink')
]
# Get changelog and start/end points
def get_repo_changelog(repo_path: str):
print('Opening repository at {}'.format(repo_path))
repo = Repo(repo_path)
git = repo.git()
try:
print('Unshallowing repository at {}'.format(repo_path))
git.fetch('--unshallow', '--tags')
except exc.GitCommandError:
print('Repository already unshallowed')
print('Attempting to get previous tag')
log_args = []
try:
base_tag = git.describe('--tags', '--abbrev=0', '{}^'.format('HEAD'))
print('Base tag set to {}'.format(base_tag))
history_brackets = (base_tag, 'HEAD')
log_args += ['{}...{}'.format(history_brackets[0], history_brackets[1])]
except exc.GitCommandError:
print('No tags found, ')
history_brackets = ('initial commit', 'HEAD')
log_args += ['--pretty=format:* %H %s *(%an)*']
changelog = git.log(*log_args)
return history_brackets, changelog
try:
travis_tag = os.environ['TRAVIS_TAG']
if travis_tag == '':
travis_tag = 'HEAD'
upload_changelog = False
print('TRAVIS_TAG is set to {}'.format(travis_tag))
except KeyError:
print('TRAVIS_TAG not set - not uploading changelog')
travis_tag = 'HEAD'
upload_changelog = False
try:
api_key = os.environ['GITHUB_OAUTH_TOKEN']
except KeyError:
print('GITHUB_OAUTH_TOKEN not set - not uploading changelog')
api_key = None
upload_changelog = False
try:
target_repo = os.environ['RELEASES_REPO']
except KeyError:
print('RELEASES_REPO not set - cannot determine remote repository')
target_repo = ''
#exit(1)
complete_changelog = ''
for (repo_name, repo_path) in TRACKED_REPOSITORIES:
brackets, changelog = get_repo_changelog(repo_path)
print('Changelog for {}:\n{}'.format(repo_name, changelog))
complete_changelog += '## {}\n\nChanges between {} and {}:\n\n{}\n\n'.format(repo_name,
brackets[0],
brackets[1],
changelog)
repo = Repo()
# Only interact with Github if uploading is enabled
if upload_changelog:
gh = Github(api_key)
gh_repo = gh.get_repo(target_repo)
# Get all releases and find ours by its tag name
gh_release = None
for release in gh_repo.get_releases():
if release.tag_name == travis_tag:
gh_release = release
if gh_release is None:
# We could not find the correct release, so here's our last resort. It will most likely fail.
gh_release = gh_repo.get_release(travis_tag)
gh_body = gh_release.body
if gh_body is None:
gh_body = ''
gh_body = '{}\n{}'.format(gh_body, complete_changelog)
print('New release body: {}'.format(gh_body))
gh_release.update_release(gh_release.tag_name, gh_body, draft=True, prerelease=True,
tag_name=gh_release.tag_name, target_commitish=gh_release.target_commitish)
| StarcoderdataPython |
9605013 | <filename>polybar/scripts/weather/parser.py
import argparse
USAGE_MESSAGE = """%(prog)s [-c [CITY_NAME]] [-u [UNIT]] [-a [API_KEY]] [-l [LANGUAGE]] [-v]
Some examples:
~$ %(prog)s
::> 275 K
~$ %(prog)s -c london
::> 291 K
~$ %(prog)s -u imperial -v
::> 79ºF, Scattered Clouds
~$ %(prog)s -v -C -u metric
::> 26ºC, Broken Clouds
~$ %(prog)s -c florida -u metric -v
::> 27ºC, Thunderstorm
~$ %(prog)s -c rio de janeiro -u metric -a 439d4b804bc8187953eb36d2a8c26a02 -v -l pt_br
::> 25ºC, Céu Limpo
"""
parser = argparse.ArgumentParser(
usage=USAGE_MESSAGE,
description="Display information about the weather.",
)
parser.add_argument(
"-c",
metavar="CITY",
dest="city",
type=str,
nargs="+",
help="city name",
)
parser.add_argument(
"-l",
metavar="LANG",
dest="lang",
type=str,
nargs=1,
help="language (en, es, fr, ja, pt, pt_br, ru, zh_cn)",
)
parser.add_argument(
"-u",
metavar="metric/imperial",
choices=("metric", "imperial"),
dest="unit",
type=str,
nargs=1,
help="unit of temperature (default: kelvin)",
)
parser.add_argument(
"-a",
metavar="API_KEY",
dest="api_key",
nargs=1,
help="API Key",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
dest="verbose",
help="verbose mode",
)
args = parser.parse_args()
| StarcoderdataPython |
3205752 | <gh_stars>0
def amplitude(n):
amplitude = ''
for x in range(1,n):
amplitude += f'(I Sin[x])^{x} Cos[x]^{n-x} + '
return amplitude[:-3]
with open('cut2.nb', 'w') as f:
for n in range(4,20,2):
f.write(f'a{n}[x_] := {n}*Abs[{amplitude(n)}]/Sqrt[2^{n}];\n')
for n in range(4,20,2):
f.write(f'Print[{amplitude(n)}//ComplexExpand]\n')
for n in range(4,20,2):
f.write(f'NMaximize[{{a{n}[x], 0 <= x <= Pi}}, x]\n') | StarcoderdataPython |
8014756 | class Node:
def __init__(self):
self.data = None
self.next = None
def setData(self, data):
self.data = data
def getData(self):
return self.data
def setNext(self, next):
self.next = next
def getNext(self):
return self.next
class SinglyLinkedList:
# constructor
def __init__(self):
self.head = None
# method for setting the head of the Linked List
def setHead(self, head):
self.head = head
# method for inserting a new node at the end of a Linked List
def insertAtEnd(self, data):
new_node = Node()
new_node.setData(data)
if self.head is None:
self.setHead(new_node)
return
current_node = self.head
while current_node.getNext() is not None:
current_node = current_node.getNext()
current_node.setNext(new_node)
| StarcoderdataPython |
4923814 | <gh_stars>100-1000
import argparse
import re
"""
Currently, litex outputs XDC constraints in which the create_clock commands
cannot be correctly parsed yet by the XDC yosys plugin.
Example of failing XDC command:
create_clock -name clk100 -period 10.0 [get_nets clk100]
Example of working XDC command:
create_clock -period 10.0 clk100
This script fixes the generated XDC and translates the failing commands
into the working ones.
This script is a temporary workaround and needs to be avoided.
"""
CREATE_CLOCK_REGEX = re.compile(
'(create_clock) -name ([a-zA-Z0-9_]+) (-period) ([0-9.]*) .*'
)
def main():
parser = argparse.ArgumentParser(
description="Fixup script to modify the XDC output of LiteX"
)
parser.add_argument("--xdc", required=True)
args = parser.parse_args()
lines_to_add = []
with open(args.xdc, "r") as xdc:
lines = xdc.readlines()
processing = False
for line in lines:
if 'Clock constraints' in line:
processing = True
if processing:
if line.startswith('create_clock'):
m = CREATE_CLOCK_REGEX.match(line)
if m:
# Old line: create_clock -name clk100 -period 10.0 [get_nets clk100]
# New line: create_clock -period 10.0 clk100
new_line = " ".join(
(
m.group(1), m.group(3), m.group(4), m.group(2),
'\n'
)
)
lines_to_add.append(new_line)
else:
lines_to_add.append(line)
with open(args.xdc, "w") as xdc:
for line in lines_to_add:
xdc.write(line)
if __name__ == "__main__":
main()
| StarcoderdataPython |
53795 | <gh_stars>0
# -*- coding: utf-8 -*-
import random
import copy
from local_searchs.heuristic import Heuristic
class Neighbor(object):
def __init__(self, state):
self.state = state
self.bagState = state[0]
self.itemState = state[1]
def generateState(self):
#State = [[V_1, V_2, V_3],[[I , B], [I,B] ...] | V = Volume | I = Item | B = Bolsa
newState = copy.deepcopy(self.state)
nBagState = newState[0]
nItemState = newState[1]
for i in range(len(nBagState)):
for j in range(len(nItemState)):
#Seleciona um item aleatório
index = random.randint(0, (len(nItemState) - 1))
#Verifica se o item está desalocado e se podemos alocá-lo
#sem que ultrapasse a capacidade da bolsa
if (len(nBagState)) == nItemState[index][1] and \
Heuristic(newState).bagWeight(i, nItemState[index][0]):
nItemState[index][1] = i
return newState
| StarcoderdataPython |
9708039 | <gh_stars>1-10
from signals.logging import SignalsError, warn
class Field(object):
DATE = "date"
DATETIME = "datetime"
INTEGER = "int"
DECIMAL = "decimal"
FLOAT = "float"
STRING = "string"
TEXT = "text"
BOOLEAN = "boolean"
VIDEO = "video"
IMAGE = "image"
TYPES = [DATE, DATETIME, INTEGER, DECIMAL, FLOAT, STRING, TEXT, BOOLEAN, VIDEO, IMAGE]
OPTIONAL = "optional"
PRIMARY_KEY = "primarykey"
ARRAY = "array"
optional = False
primary_key = False
array = False
field_type = None
def __init__(self, field_name, field_attributes):
self.name = field_name
for attribute in field_attributes:
self.process_attribute(attribute)
self.validate_field()
def process_attribute(self, attribute):
if attribute == self.OPTIONAL:
self.optional = True
elif attribute == self.PRIMARY_KEY:
self.primary_key = True
elif attribute == self.ARRAY:
self.array = True
elif attribute in self.TYPES:
self.field_type = attribute
else:
if attribute.startswith("$"):
raise SignalsError("Found an unexpected attribute: {} on {}. "
"Likely it's missing relationship type.".format(attribute, self.name))
warn("Found an unexpected attribute: {} on {}.".format(attribute, self.name))
def validate_field(self):
if self.field_type is None:
raise SignalsError("Didn't find field type for {}, exiting.".format(self.name))
class Relationship(Field):
ONE_TO_ONE = "O2O"
MANY_TO_MANY = "M2M"
ONE_TO_MANY = "O2M"
MANY_TO_ONE = "M2O"
TYPES = [ONE_TO_ONE, MANY_TO_MANY, ONE_TO_MANY, MANY_TO_ONE]
relationship_type = None
related_object = None
def process_attribute(self, attribute):
if attribute in self.TYPES:
self.relationship_type = attribute
elif attribute.startswith("$"):
self.related_object = attribute
else:
super(Relationship, self).process_attribute(attribute)
def validate_field(self):
if self.related_object is None:
raise SignalsError("Didn't find related object for {}, exiting.".format(self.name))
@staticmethod
def is_relationship(field_attributes):
# If the relationship type is specified in any of the attributes, then it is a relationship
return reduce(lambda x, y: x or y in Relationship.TYPES, field_attributes, False)
| StarcoderdataPython |
4952575 | from django.db import models
from django.conf import settings
from django.utils import timezone
from budgetbuddy.paychecks.choices import deduction_type_choices
class Paycheck(models.Model):
company = models.CharField(max_length=200)
annual_salary = models.DecimalField(max_digits=10, decimal_places=2)
paychecks_per_year = models.IntegerField()
active = models.BooleanField(default=True)
creation_date = models.DateField(default=timezone.now, blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.DO_NOTHING)
def __str__(self):
return self.company
class Deduction(models.Model):
paycheck = models.ForeignKey(Paycheck, on_delete=models.CASCADE)
description = models.CharField(max_length=200)
deduction_type = models.CharField(max_length=20,
choices=deduction_type_choices)
amount = models.DecimalField(max_digits=8, decimal_places=2)
active = models.BooleanField(default=True)
creation_date = models.DateTimeField(default=timezone.now, blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.DO_NOTHING)
def __str__(self):
return('{} - {}'.format(self.paycheck.company, self.description))
class Paystub(models.Model):
paycheck = models.ForeignKey(Paycheck, on_delete=models.DO_NOTHING)
gross_pay = models.DecimalField(max_digits=10, decimal_places=2)
start_date = models.DateField()
end_date = models.DateField()
creation_date = models.DateTimeField(default=timezone.now, blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.DO_NOTHING)
def __str__(self):
return('{} - {}'.format(self.paycheck.company, self.id))
class PayType(models.Model):
paychecks_per_year = models.IntegerField(primary_key=True)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
| StarcoderdataPython |
3335677 | # -*- coding: utf-8 -*-
'''
Created on 17/3/13.
@author: love
'''
import gevent
import gevent.monkey
gevent.monkey.patch_all()
from pymqant.module.app import mqant
from server.chat_module import ChatModule
from server.test_module import TestModule
if __name__ == "__main__":
app=mqant()
app.Run(True,ChatModule(),TestModule()) | StarcoderdataPython |
9625854 | <reponame>kubamahnert/panoramic-cli<filename>src/panoramic/cli/__init__.py
from panoramic.cli.cli import cli
from panoramic.cli.utils.logging import configure_logging
configure_logging()
__all__ = ['cli']
| StarcoderdataPython |
9776817 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Missing batteries for itertools.
For more batteries for itertools, see also the ``unpythonic.fold`` module.
``flatten`` based on Danny Yoo's version:
http://rightfootin.blogspot.fi/2006/09/more-on-python-flatten.html
``uniqify``, ``uniq``, ``take``, ``drop``, ``partition`` just package
``itertools`` recipes.
"""
__all__ = ["rev", "map", "map_longest",
"rmap", "rzip", "rmap_longest", "rzip_longest",
"mapr", "zipr", "mapr_longest", "zipr_longest",
"flatmap",
"uniqify", "uniq",
"take", "drop", "split_at",
"unpack",
"tail", "butlast", "butlastn",
"first", "second", "nth", "last", "lastn",
"scons", "pad",
"flatten", "flatten1", "flatten_in",
"iterate", "iterate1",
"partition",
"inn", "iindex",
"window", "chunked",
"within", "fixpoint"]
from builtins import map as stdlib_map
from operator import itemgetter
from itertools import tee, islice, zip_longest, starmap, chain, filterfalse, groupby, takewhile
from collections import deque
def rev(iterable):
"""Reverse an iterable.
If a sequence, the return value is ``reversed(iterable)``.
Otherwise the return value is ``reversed(tuple(iterable))``.
Hence generators will be fully evaluated until they stop; the input
``iterable`` must be finite for ``rev`` to make any sense.
"""
# Unlike further below, here we "return" instead of "yield from",
# because "rev" is such a thin layer of abstraction that it has become
# effectively transparent (PG, "On Lisp"). The call site expects
# reversed output, and the "reversed" generator is the standard
# pythonic representation for that.
try: # maybe a sequence?
return reversed(iterable)
except TypeError:
return reversed(tuple(iterable))
def map(function, iterable0, *iterables):
"""Curry-friendly map.
Thin wrapper around Python's builtin ``map``, making it mandatory to
provide at least one iterable, so we may say things such as::
from unpythonic import map, curry
oneplus = lambda x: 1 + x # noqa: E731
add_one = curry(map, oneplus)
assert tuple(add_one(range(5))) == tuple(range(1, 6))
"""
return stdlib_map(function, iterable0, *iterables)
# When completing an existing set of functions (map, zip, zip_longest),
# consistency wins over curry-friendliness.
def map_longest(func, *iterables, fillvalue=None):
"""Like map, but terminate on the longest input.
In the input to ``func``, missing elements (after end of shorter inputs)
are replaced by ``fillvalue``, which defaults to ``None``.
"""
# "yield from" semantically better here than "return", because the call site
# sees a "map_longest" generator object instead of a "starmap" generator
# object. This describes explicitly what the generator does, and is in line
# with the terminology used at the call site.
yield from starmap(func, zip_longest(*iterables, fillvalue=fillvalue))
def rmap(func, *iterables):
"""Like map, but from the right.
For multiple inputs with different lengths, ``rmap`` syncs the **right** ends.
See ``mapr`` for the variant that syncs the **left** ends.
``rev`` is applied to the inputs. Note this forces any generators.
Examples::
from operator import add
# just map, for comparison:
assert tuple(map(add, (1, 2, 3), (4, 5))) == (5, 7)
# reverse each, then map; syncs right ends:
# rmap(f, ...) = map(f, rev(s) for s in ...)
assert tuple(rmap(add, (1, 2, 3), (4, 5))) == (8, 6)
# map, then reverse; syncs left ends:
# mapr(f, ...) = rev(map(f, ...))
assert tuple(mapr(add, (1, 2, 3), (4, 5))) == (7, 5)
"""
yield from map(func, *(rev(s) for s in iterables))
def rzip(*iterables):
"""Like zip, but from the right.
For multiple inputs with different lengths, ``rzip`` syncs the **right** ends.
See ``zipr`` for the variant that syncs the **left** ends.
``rev`` is applied to the inputs. Note this forces any generators.
Examples::
# just zip, for comparison:
assert tuple(zip((1, 2, 3), (4, 5))) == ((1, 4), (2, 5))
# reverse each, then zip; syncs right ends:
# rzip(...) = zip(rev(s) for s in ...)
assert tuple(rzip((1, 2, 3), (4, 5))) == ((3, 5), (2, 4))
# zip, then reverse; syncs left ends:
# zipr(...) = rev(zip(...))
assert tuple(zipr((1, 2, 3), (4, 5))) == ((2, 5), (1, 4))
"""
yield from zip(*(rev(s) for s in iterables))
def rmap_longest(func, *iterables, fillvalue=None):
"""Like rmap, but terminate on the longest input."""
yield from map_longest(func, *(rev(s) for s in iterables), fillvalue=fillvalue)
def rzip_longest(*iterables, fillvalue=None):
"""Like rzip, but terminate on the longest input."""
yield from zip_longest(*(rev(s) for s in iterables), fillvalue=fillvalue)
def mapr(proc, *iterables):
"""Like map, but from the right.
For multiple inputs with different lengths, ``mapr`` syncs the **left** ends.
See ``rmap`` for the variant that syncs the **right** ends.
"""
yield from rev(map(proc, *iterables))
def zipr(*iterables):
"""Like zip, but from the right.
For multiple inputs with different lengths, ``zipr`` syncs the **left** ends.
See ``rzip`` for the variant that syncs the **right** ends.
"""
yield from rev(zip(*iterables))
def mapr_longest(proc, *iterables, fillvalue=None):
"""Like mapr, but terminate on the longest input."""
yield from rev(map_longest(proc, *iterables, fillvalue=fillvalue))
def zipr_longest(*iterables, fillvalue=None):
"""Like zipr, but terminate on the longest input."""
yield from rev(zip_longest(*iterables, fillvalue=fillvalue))
# Equivalent recursive process:
#def _mapr(proc, iterable0, *iterables, longest=False, fillvalue=None):
# z = zip if not longest else partial(zip_longest, fillvalue=fillvalue)
# xss = z(iterable0, *iterables)
# def _mapr_recurser():
# try:
# xs = next(xss)
# except StopIteration:
# return
# subgen = _mapr_recurser()
# yield from subgen
# yield proc(*xs)
# return _mapr_recurser()
#
#def _zipr(iterable0, *iterables, longest=False, fillvalue=None):
# def identity(*args): # unpythonic.fun.identity, but dependency loop
# return args
# return _mapr(identity, iterable0, *iterables,
# longest=longest, fillvalue=fillvalue)
def flatmap(f, iterable0, *iterables):
"""Map, then concatenate results.
At least one iterable (``iterable0``) is required. More are optional.
``f`` should accept as many arguments as iterables given (each argument
drawn from one of the iterables), and return an iterable.
Returns an iterator that yields the flatmapped result.
Example::
def msqrt(x): # multivalued sqrt
if x == 0.:
return (0.,)
else:
s = x**0.5
return (s, -s)
assert tuple(flatmap(msqrt, (0, 1, 4, 9))) == \\
(0., 1., -1., 2., -2., 3., -3.)
def add_and_tuplify(a, b):
return (a + b,)
assert tuple(flatmap(add_and_tuplify, (10, 20, 30), (1, 2, 3))) == \\
(11, 22, 33)
def sum_and_diff(a, b):
return (a + b, a - b)
assert tuple(flatmap(sum_and_diff, (10, 20, 30), (1, 2, 3))) == \\
(11, 9, 22, 18, 33, 27)
"""
yield from chain.from_iterable(map(f, iterable0, *iterables))
# for xs in map(f, iterable0, *iterables):
# yield from xs
def uniqify(iterable, *, key=None):
"""Skip duplicates in iterable.
Returns a generator that yields unique items from iterable, preserving
their original ordering.
If ``key`` is provided, the return value of ``key(elt)`` is tested instead
of ``elt`` itself to determine uniqueness.
This is ``unique_everseen`` from ``itertools`` recipes.
"""
it = iter(iterable)
seen = set()
seen_add = seen.add
if key is None:
for e in filterfalse(seen.__contains__, it):
seen_add(e)
yield e
else:
for e in it:
k = key(e)
if k not in seen:
seen_add(k)
yield e
def uniq(iterable, *, key=None):
"""Like uniqify, but for consecutive duplicates only.
Named after the *nix utility.
This is ``unique_justseen`` from ``itertools`` recipes.
"""
# the outer map retrieves the item from the subiterator in (key, subiterator).
yield from map(next, map(itemgetter(1), groupby(iterable, key)))
def take(n, iterable):
"""Return an iterator that yields the first n items of iterable, then stops.
Stops earlier if ``iterable`` has fewer than ``n`` items.
This is ``take`` from ``itertools`` recipes.
"""
if not isinstance(n, int):
raise TypeError("expected integer n, got {} with value {}".format(type(n), n))
if n < 0:
raise ValueError("expected n >= 0, got {}".format(n))
return islice(iter(iterable), n)
def drop(n, iterable):
"""Skip the first n elements of iterable, then yield the rest.
If ``n`` is ``None``, consume the iterable until it runs out.
This is ``consume`` from ``itertools`` recipes.
"""
if not isinstance(n, int):
raise TypeError("expected integer n, got {} with value {}".format(type(n), n))
if n < 0:
raise ValueError("expected n >= 0, got {}".format(n))
it = iter(iterable)
if n is None:
deque(it, maxlen=0)
else:
next(islice(it, n, n), None) # advance it to empty slice starting at n
return it
def split_at(n, iterable):
"""Split iterable at position n.
Returns a pair of iterators ``(first_part, second_part)``.
Based on ``itertools.tee``, ``take`` and ``drop``.
Examples::
a, b = split_at(5, range(10))
assert tuple(a) == tuple(range(5))
assert tuple(b) == tuple(range(5, 10))
a, b = map(tuple, split_at(5, range(3)))
assert a == tuple(range(3))
assert b == ()
"""
if not isinstance(n, int):
raise TypeError("expected integer n, got {} with value {}".format(type(n), n))
if n < 0:
raise ValueError("expected n >= 0, got {}".format(n))
ia, ib = tee(iter(iterable))
return take(n, ia), drop(n, ib)
def unpack(n, iterable, *, k=None, fillvalue=None):
"""From iterable, return the first n elements, and the kth tail.
Lazy generalization of sequence unpacking, works also for infinite iterables.
Default ``k=None`` means ``k = n``, i.e. return the tail that begins
right after the extracted items. Other values are occasionally useful,
e.g. to peek into the tail, while not permanently extracting an item.
The return value is a tuple containing the ``n`` first elements, and as its
last item, an iterator representing the tail of the iterable from item ``k``
onwards.
If there are fewer than ``n`` items in the iterable, the missing items
are returned as ``fillvalue``. The tail is then a generator that just
raises ``StopIteration``.
If ``k < n`` (tail overlaps with the extracted items), the tail
is formed by calling ``itertools.tee`` at the appropriate point
during the extraction. (Plan the client code accordingly; see the
caution in `itertools.tee`. Essentially, the original iterator should
no longer be used after it has been tee'd; only use the tee'd copy.)
If ``k == n`` (tail begins right after the extracted items), the tail
is the original iterator at the end of the extraction.
If ``k > n`` (skip some items after the first n), then after extraction,
the tail is formed by fast-forwarding the iterator using ``drop``.
"""
if not isinstance(n, int):
raise TypeError("expected integer n, got {} with value {}".format(type(n), n))
if n < 0:
raise ValueError("expected n >= 0, got {}".format(n))
k = k if k is not None else n # not "k or n", since k = 0 is valid
if not isinstance(k, int):
raise TypeError("expected integer k, got {} with value {}".format(type(k), k))
if k < 0:
raise ValueError("expected k >= 0, got {}".format(k))
out = []
tl = None
it = iter(iterable)
for j in range(n):
try:
if j == k: # tail is desired to overlap with the extracted items
it, tl = tee(it)
out.append(next(it))
except StopIteration: # had fewer than n items remaining
out += [fillvalue] * (n - len(out))
def empty_iterable():
yield from ()
tl = empty_iterable()
break
if not tl: # avoid replacing empty_iterable()
if k == n:
tl = it
elif k > n:
tl = drop(k - n, it)
out.append(tl)
return tuple(out)
def tail(iterable):
"""Return an iterator pointing to the tail of iterable.
Same as ```drop(1, iterable)```.
"""
return drop(1, iterable)
def butlast(iterable):
"""Yield all items from iterable, except the last one (if iterable is finite).
Return a generator.
Uses intermediate storage - do not use the original iterator after calling
``butlast``.
"""
return butlastn(1, iterable)
def butlastn(n, iterable):
"""Yield all items from iterable, except the last n (if iterable is finite).
Return a generator.
Uses intermediate storage - do not use the original iterator after calling
``butlastn``.
"""
# we let StopIteration propagate from anything that could raise it here.
it = iter(iterable)
q = deque()
for _ in range(n + 1):
q.append(next(it))
while True:
yield q.popleft()
q.append(next(it))
def first(iterable, *, default=None):
"""Like nth, but return the first item."""
return nth(0, iterable, default=default)
def second(iterable, *, default=None):
"""Like nth, but return the second item."""
return nth(1, iterable, default=default)
def nth(n, iterable, *, default=None):
"""Return the item at position n from an iterable.
The ``default`` is returned if there are fewer than ``n + 1`` items.
"""
if not isinstance(n, int):
raise TypeError("expected integer n, got {} with value {}".format(type(n), n))
if n < 0:
raise ValueError("expected n >= 0, got {}".format(n))
it = drop(n, iterable) if n else iter(iterable)
try:
return next(it)
except StopIteration:
return default
def last(iterable, *, default=None):
"""Return the last item from an iterable.
We consume the iterable until it runs out of items, then return the
last item seen.
The default value is returned if the iterable contained no items.
**Caution**: Will not terminate for infinite inputs.
"""
d = deque(iterable, maxlen=1) # C speed
return d.pop() if d else default
def lastn(n, iterable):
"""Yield the last n items from an iterable.
We consume the iterable until it runs out of items, then return a generator
that yields up to ``n`` last items seen, in the original order.
If there are fewer than ``n`` items in the iterable, the generator yields
them all.
**Caution**: Will not terminate for infinite inputs.
"""
d = deque(iterable, maxlen=n) # C speed
yield from d
def scons(x, iterable):
"""Prepend one element to the start of an iterable, return new iterable.
Same as ``itertools.chain((x,), iterable)``. The point is sometimes it is
convenient to be able to stuff one item in front of an existing iterator.
If ``iterable`` is a generator, this is somewhat like (stream-cons) in Racket.
If you need to prepend several values, just use ``itertools.chain``.
"""
return chain((x,), iterable)
def pad(n, fillvalue, iterable):
"""Pad iterable with copies of fillvalue so its length is at least ``n``.
Examples::
assert tuple(pad(5, None, range(3))) == (0, 1, 2, None, None)
assert tuple(pad(5, None, ())) == (None, None, None, None, None)
assert tuple(pad(5, None, range(6))) == tuple(range(6))
"""
k = 0 # used if iterable is empty
for k, x in enumerate(iterable, start=1):
yield x
for _ in range(k, n):
yield fillvalue
def flatten(iterable, pred=None):
"""Recursively remove nested structure from iterable.
Process tuples and lists inside the iterable; pass everything else through
(including any iterators stored in the iterable).
Returns a generator that yields the flattened output.
``pred`` is an optional predicate for filtering. It should accept a tuple
(or list), and return ``True`` if that tuple/list should be flattened.
When ``pred`` returns False, that tuple/list is passed through as-is.
E.g. to flatten only those items that contain only tuples::
is_nested = lambda e: all(isinstance(x, (list, tuple)) for x in e)
data = (((1, 2), (3, 4)), (5, 6))
assert tuple(flatten(data, is_nested)) == ((1, 2), (3, 4), (5, 6))
"""
return _flatten(iterable, pred, recursive=True)
def flatten1(iterable, pred=None):
"""Like flatten, but process outermost level only."""
if not pred:
return chain.from_iterable(iterable) # itertools recipes: fast, no pred
return _flatten(iterable, pred, recursive=False)
def _flatten(iterable, pred=None, recursive=True):
pred = pred or (lambda x: True) # unpythonic.fun.const(True), but dependency loop
it = iter(iterable)
for e in it:
if isinstance(e, (list, tuple)) and pred(e):
items = _flatten(e, pred) if recursive else e
for f in items:
yield f
else:
yield e
def flatten_in(iterable, pred=None):
"""Like flatten, but recurse also into tuples/lists not matching pred.
This makes also those items get the same flattening applied inside them.
Example::
is_nested = lambda e: all(isinstance(x, (list, tuple)) for x in e)
data = (((1, 2), ((3, 4), (5, 6)), 7), ((8, 9), (10, 11)))
assert tuple(flatten(data, is_nested)) == \\
(((1, 2), ((3, 4), (5, 6)), 7), (8, 9), (10, 11))
assert tuple(flatten_in(data, is_nested)) == \\
(((1, 2), (3, 4), (5, 6), 7), (8, 9), (10, 11))
"""
pred = pred or (lambda x: True)
it = iter(iterable)
for e in it:
if isinstance(e, (list, tuple)):
if pred(e):
for f in flatten_in(e, pred):
yield f
else:
t = type(e)
yield t(flatten_in(e, pred))
else:
yield e
def iterate1(f, x):
"""Return an infinite generator yielding x, f(x), f(f(x)), ..."""
while True:
yield x
x = f(x)
def iterate(f, *args):
"""Multiple-argument version of iterate1.
The function ``f`` should return a tuple or list of as many elements as it
takes positional arguments; this will be unpacked to the argument list in
the next call.
Or in other words, yield args, f(*args), f(*f(*args)), ...
"""
while True:
yield args
args = f(*args)
def partition(pred, iterable):
"""Partition an iterable to entries satifying and not satisfying a predicate.
Return two generators, ``(false-items, true-items)``, where each generator
yields those items from ``iterable`` for which ``pred`` gives the indicated value.
This is ``partition`` from ``itertools`` recipes.
**Caution**: infinite inputs require some care in order not to cause a blowup
in the amount of intermediate storage needed. The original iterable is walked
only once (because that's all we can generally do!), and depending on the
content of ``iterable`` and in which order the outputs are read, an indefinite
number of either false-items or true-items may build up in the intermediate storage.
(Example: partition the natural numbers, and only ever read the even numbers.
It will eventually run out of memory storing all the odd numbers "to be read
later".)
"""
# iterable is walked only once; tee handles the intermediate storage.
t1, t2 = tee(iterable)
return filterfalse(pred, t1), filter(pred, t2)
def inn(x, iterable):
"""Contains-check (``x in iterable``) with automatic termination.
``iterable`` may be infinite.
We assume ``iterable`` is **monotonic** and **divergent**. In other words,
we require ``it[k+1] >= it[k]`` (or ``it[k+1] <= it[k]``), and that the
sequence has no upper (or respectively lower) bound. If ``iterable``
does not fulfill these conditions, this function may fail to terminate.
This is fully duck-typed; we only require that ``x`` and the elements of
``iterable`` are comparable by ``==``, ``<=`` and ``>=``.
Examples::
from unpythonic import inn, s, imemoize, gmemoize
from itertools import count, takewhile
evens = imemoize(s(2, 4, ...))
assert inn(42, evens())
assert not inn(41, evens())
@gmemoize
def primes():
yield 2
for n in count(start=3, step=2):
if not any(n % p == 0 for p in takewhile(lambda x: x*x <= n, primes())):
yield n
assert inn(31337, primes())
assert not inn(1337, primes())
Whether the input is increasing or decreasing is determined automatically
from the first elements ``it[0]`` and ``it[j]``, for the first ``j > 0``
such that ``it[j] > it[0]`` or ``it[j] < it[0]``. After the direction has
been determined, the monotonicity of the input is no longer monitored.
The actual search is performed by ``itertools.takewhile``, terminating
(in the worst case) after we can be sure that ``x`` does not appear in
``iterable``.
The name is a weak pun on ``in``. We provide this functionality as a function
``inn`` instead of customizing ``unpythonic.mathseq.m.__contains__`` in order
to keep things explicit. The m-ness of an iterable is silently dropped by any
function that operates on general iterables, so the other solution could
easily lead to, by accident, performing a search that will not terminate
(on an infinite iterable that is not m'd and does not contain ``x``).
"""
it = iter(iterable)
try:
y0 = next(it)
except StopIteration:
return False
if y0 == x: return True
yj = y0
while yj == y0:
try:
yj = next(it)
except StopIteration:
return False
if yj == x: return True
d = yj - y0
assert d != 0
pred = (lambda elt: elt <= x) if d > 0 else (lambda elt: elt >= x)
return x in takewhile(pred, it)
def iindex(x, iterable):
"""Like list.index, but for a general iterable.
Note that just like ``x in iterable``, this will not terminate if ``iterable``
is infinite, and ``x`` is not in it.
Note that as usual when working with general iterables, the iterable will
be consumed, so this only makes sense for memoized iterables (and even then
it may be better to extract the desired part as a list and then search there).
"""
for j, elt in enumerate(iterable):
if elt == x:
return j
raise ValueError("{} is not in iterable".format(x))
def window(iterable, n=2):
"""Sliding length-n window iterator for a general iterable.
Acts like ``zip(s, s[1:], ..., s[n-1:])`` for a sequence ``s``, but the input
can be any iterable.
If there are fewer than ``n`` items in the input iterable, an empty iterator
is returned.
Inspired by ``with_next`` discussed in:
https://opensource.com/article/18/3/loop-better-deeper-look-iteration-python
"""
if n < 2:
raise ValueError("expected n >= 2, got {}".format(n))
it = iter(iterable)
xs = deque()
for _ in range(n):
try:
xs.append(next(it))
except StopIteration:
def empty_iterable():
yield from ()
return empty_iterable()
def windowed():
while True:
yield tuple(xs)
xs.popleft()
xs.append(next(it)) # let StopIteration propagate
return windowed()
def chunked(n, iterable):
"""Split an iterable into constant-length chunks.
Conceptually, whereas ``window`` slides its stencil through which the
original iterable is viewed, ``chunked`` partitions the iterable with
no overlap between consecutive stencil positions.
This returns a generator that yields the chunks. Unlike ``window``, to
remain storage-agnostic, each chunk itself is represented as an iterator
(so if you want tuples, convert each chunk yourself - see example below).
No temporary storage is allocated, this is essentially a stream filter
built on itertools.
Example::
chunks = chunked(3, range(9))
assert [tuple(chunk) for chunk in chunks] == [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
chunks = chunked(3, range(7))
assert [tuple(chunk) for chunk in chunks] == [(0, 1, 2), (3, 4, 5), (6,)]
Based on StackOverflow answers by <NAME> and reclosedev:
https://stackoverflow.com/questions/8991506/iterate-an-iterator-by-chunks-of-n-in-python
"""
if n < 2:
raise ValueError("expected n >= 2, got {}".format(n))
it = iter(iterable)
def chunker():
try:
while True:
cit = islice(it, n)
# we need the next() to see the StopIteration when the first empty slice occurs
yield scons(next(cit), cit)
except StopIteration:
return
return chunker()
def within(tol, iterable):
"""Yield items from iterable until successive items are close enough.
Items are yielded until `abs(a - b) <= tol` for successive items
`a` and `b`.
If `tol == 0`, one final duplicate value will be yielded. This makes the
last two yielded values always satisfy the condition, even when `tol == 0`.
**CAUTION**: Intended for converging mathematical sequences, preferably
Cauchy sequences. Use on arbitrary input will lead to nasty surprises
(infinite output, or terminating the output early if a part of it looks
like a converging sequence; think a local maximum of `cos(x)`).
"""
for a, b in window(iterable, n=2):
yield a
if abs(a - b) <= tol:
yield b
return
def fixpoint(f, x0, tol=0):
"""Compute the (arithmetic) fixed point of f, starting from the initial guess x0.
(Not to be confused with the logical fixed point with respect to the
definedness ordering.)
The fixed point must be attractive for this to work. See the Banach
fixed point theorem.
https://en.wikipedia.org/wiki/Banach_fixed-point_theorem
If the fixed point is attractive, and the values are represented in
floating point (hence finite precision), the computation should
eventually converge down to the last bit (barring roundoff or
catastrophic cancellation in the final few steps). Hence the default tol
of zero.
CAUTION: an arbitrary function from ℝ to ℝ **does not** necessarily
have a fixed point. Limit cycles and chaotic behavior of `f` will cause
non-termination. Keep in mind the classic example:
https://en.wikipedia.org/wiki/Logistic_map
Examples::
from math import cos, sqrt
from unpythonic import fixpoint, ulp
c = fixpoint(cos, x0=1)
# Actually "Newton's" algorithm for the square root was already known to the
# ancient Babylonians, ca. 2000 BCE. (<NAME>: History of mathematics)
def sqrt_newton(n):
def sqrt_iter(x): # has an attractive fixed point at sqrt(n)
return (x + n / x) / 2
return fixpoint(sqrt_iter, x0=n / 2)
assert abs(sqrt_newton(2) - sqrt(2)) <= ulp(1.414)
"""
return last(within(tol, iterate1(f, x0)))
| StarcoderdataPython |
5173052 | <filename>backend/app/exceptions.py
# -*- coding: future_fstrings -*-
from flask import jsonify
def template(message='An error has occurred', code=500):
return {'message': message, 'status_code': code}
USER_NOT_FOUND = template('User not found', code=404)
USER_ALREADY_REGISTERED = template('User already registered', code=422)
SESSION_NOT_FOUND = template('Invalid session', code=422)
UNKNOWN_ERROR = template(code=500)
class InvalidUsage(Exception):
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_json(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return jsonify(rv)
@classmethod
def user_not_found(cls):
return cls(**USER_NOT_FOUND)
@classmethod
def user_already_registered(cls):
return cls(**USER_ALREADY_REGISTERED)
@classmethod
def session_not_found(cls):
return cls(**SESSION_NOT_FOUND)
@classmethod
def unknown_error(cls):
return cls(**UNKNOWN_ERROR)
| StarcoderdataPython |
3373888 | <filename>tests/test_csv.py
import os
from kgx import PandasTransformer
cwd = os.path.abspath(os.path.dirname(__file__))
resource_dir = os.path.join(cwd, 'resources')
target_dir = os.path.join(cwd, 'target')
def test_load():
"""
Test for loading data into PandasTransformer
"""
t = PandasTransformer()
os.makedirs(target_dir, exist_ok=True)
t.parse(os.path.join(resource_dir, "x1_nodes.csv"), input_format='csv')
t.parse(os.path.join(resource_dir, "x1_edges.csv"), input_format='csv')
t.report()
t.save(os.path.join(target_dir, 'x1copy'))
# w = GraphMLTransformer(t.graph)
# w.save(os.path.join(target_dir, "x1n.graphml"))
def test_semmeddb_csv():
"""
Read nodes and edges from CSV and export the resulting graph as an archive
"""
t = PandasTransformer()
nodes_file = os.path.join(resource_dir, "semmed/semmeddb_test_nodes.csv")
edges_file = os.path.join(resource_dir, "semmed/semmeddb_test_edges.csv")
output = os.path.join(target_dir, "semmeddb_test_export")
t.parse(nodes_file)
t.parse(edges_file)
# save output as *.tar
t.save(output, output_format='csv', compression='tar')
# save output as *.tar.gz
t.save(output, output_format='csv', compression='tar.gz')
# save output as *tar.bz2
t.save(output, output_format='csv', compression='tar.bz2')
def test_semmeddb_csv_to_tsv():
"""
Read nodes and edges from CSV and export the resulting graph as an archive
"""
t = PandasTransformer()
nodes_file = os.path.join(resource_dir, "semmed/semmeddb_test_nodes.csv")
edges_file = os.path.join(resource_dir, "semmed/semmeddb_test_edges.csv")
output = os.path.join(target_dir, "semmeddb_test_tsv_export")
t.parse(nodes_file)
t.parse(edges_file)
# save output as TSV in a tar archive
t.save(output, output_format='tsv', compression='tar')
def test_read_achive():
"""
Test reading of tar, tar.gz and tar.bz2 archives
"""
tar_file = os.path.join(target_dir, "semmeddb_test_export.tar")
tar_gz_file = os.path.join(target_dir, "semmeddb_test_export.tar.gz")
tar_bz_file = os.path.join(target_dir, "semmeddb_test_export.tar.bz2")
pt = PandasTransformer()
pt.parse(tar_file, input_format='csv', compression='tar')
assert not pt.is_empty()
pt2 = PandasTransformer()
pt2.parse(tar_gz_file, input_format='csv', compression='tar.gz')
assert not pt2.is_empty()
pt3 = PandasTransformer()
pt3.parse(tar_bz_file, input_format='csv', compression='tar.bz2')
assert not pt3.is_empty()
| StarcoderdataPython |
6681556 | <filename>ch04/practice_two_layer_net.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 24 20:27:37 2019
@author: Emma
"""
import sys,os
sys.path.append(os.pardir)
from common.functions import *
from common.gradient import numerical_gradient
import numpy as np
class twoLayerNet:
def __init__(self,input_size,hidden_size,output_size,weight_init_std=0.01):
self.params={}
self.param['W1']=weight_init_std*np.random.randn(input_size,hidden_size)
self.p | StarcoderdataPython |
3452541 | # -*- coding: utf-8 -*-
#
# removeolduploads.py -- remove old and uploaded packages from Debexpo
#
# This file is part of debexpo - https://alioth.debian.org/projects/debexpo/
#
# Copyright © 2011 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
"""
Import RFS comments from debian-mentors
"""
__author__ = '<NAME>'
__copyright__ = 'Copyright © 2011 <NAME>'
__license__ = 'MIT'
from debexpo.cronjobs import BaseCronjob
from debexpo.lib.email import Email
from debexpo.lib.filesystem import CheckFiles
from debexpo.controllers.package import PackageController
from debexpo.controllers.packages import PackagesController
from debexpo.model.users import User
from debexpo.model.data_store import DataStore
from debexpo.model import meta
from debian import deb822
import socket
import re
import apt_pkg
import datetime
__namespace__ = '_remove_uploads_'
class RemoveOldUploads(BaseCronjob):
def _remove_package(self, package, version, reason):
user = meta.session.query(User).filter_by(id=package.user_id).one()
if user:
self.mailer.send([user.email, ],
package=package.name,
version=version,
reason=reason)
CheckFiles().delete_files_for_package(package)
meta.session.delete(package)
meta.session.commit()
def _process_changes(self, mail):
if mail.is_multipart():
self.log.debug("Changes message is multipart?!")
return
changes = mail.get_payload(decode=True)
try:
changes = deb822.Changes(changes)
except:
self.log.error('Could not open changes file; skipping mail "%s"' % (mail['subject']))
return
if not 'Source' in changes:
#self.log.debug('Changes file "%s" seems incomplete' % (mail['subject']))
return
package = self.pkg_controller._get_package(changes['Source'], from_controller=False)
if package != None:
for pv in package.package_versions:
if pv.distribution == changes['Distribution'] and apt_pkg.version_compare(changes['Version'], pv.version) == 0:
self.log.debug("Package %s was was uploaded to Debian - removing it from Expo" % (changes['Source']))
self._remove_package(package, pv.version, "Package was uploaded to official Debian repositories")
else:
#self.log.debug("Package %s was not uploaded to Expo before - ignoring it" % (changes['Source']))
pass
def _remove_uploaded_packages(self):
if self.mailer.connection_established():
lists = meta.session.query(DataStore).filter(DataStore.namespace == __namespace__).all()
for list_name in lists:
for message in self.mailer.unread_messages(list_name.code, list_name.value):
self._process_changes(message)
list_name.value = message['X-Debexpo-Message-Number']
self.log.debug("Processed all messages up to #%s on %s" % (list_name.value, list_name.code))
meta.session.merge(list_name)
meta.session.commit()
self.mailer.disconnect_from_server()
def _remove_old_packages(self):
now = datetime.datetime.now()
for package in self.pkgs_controller._get_packages():
if (now - package.package_versions[-1].uploaded) > datetime.timedelta(weeks = 20):
self.log.debug("Removing package %s - uploaded on %s" % (package.name, package.package_versions[-1].uploaded))
self._remove_package(package, "all versions", "Your package found no sponsor for 20 weeks")
def setup(self):
self.mailer = Email('upload_removed_from_expo')
self.mailer.connect_to_server()
self.pkg_controller = PackageController()
self.pkgs_controller = PackagesController()
apt_pkg.init_system()
self.last_cruft_run = datetime.datetime(year=1970, month=1, day=1)
self.log.debug("%s loaded successfully" % (__name__))
def teardown(self):
self.mailer.disconnect_from_server()
def invoke(self):
try:
self._remove_uploaded_packages()
except socket.error as e:
# better luck next time
self.log.debug("Socket error %s: skipping removals his time" % (e))
pass
# We don't need to run our garbage collection of old cruft that often
# It's ok if we purge old packages once a day.
if (datetime.datetime.now() - self.last_cruft_run) >= datetime.timedelta(hours = 24):
self.last_cruft_run = datetime.datetime.now()
self._remove_old_packages()
cronjob = RemoveOldUploads
schedule = datetime.timedelta(minutes = 10)
| StarcoderdataPython |
162602 | from opera.parser.tosca.v_1_3.node_filter_definition import (
NodeFilterDefinition,
)
class TestParse:
def test_full(self, yaml_ast):
NodeFilterDefinition.parse(yaml_ast(
"""
properties:
- num_cpus: { in_range: [ 3, 6 ] }
capabilities: []
"""
))
def test_minimal(self, yaml_ast):
NodeFilterDefinition.parse(yaml_ast("{}"))
| StarcoderdataPython |
9650822 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.6.0-bd605d07 on 2018-12-20.
# 2018, SMART Health IT.
import os
import io
import unittest
import json
from . import valueset
from .fhirdate import FHIRDate
class ValueSetTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("ValueSet", js["resourceType"])
return valueset.ValueSet(js)
def testValueSet1(self):
inst = self.instantiate_from("valueset-example.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet1(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet1(inst2)
def implValueSet1(self, inst):
self.assertTrue(inst.compose.inactive)
self.assertEqual(inst.compose.include[0].concept[0].code, "14647-2")
self.assertEqual(inst.compose.include[0].concept[0].display, "Cholesterol [Moles/Volume]")
self.assertEqual(inst.compose.include[0].concept[1].code, "2093-3")
self.assertEqual(inst.compose.include[0].concept[1].display, "Cholesterol [Mass/Volume]")
self.assertEqual(inst.compose.include[0].concept[2].code, "35200-5")
self.assertEqual(inst.compose.include[0].concept[2].display, "Cholesterol [Mass Or Moles/Volume]")
self.assertEqual(inst.compose.include[0].concept[3].code, "9342-7")
self.assertEqual(inst.compose.include[0].concept[3].display, "Cholesterol [Percentile]")
self.assertEqual(inst.compose.include[0].system, "http://loinc.org")
self.assertEqual(inst.compose.include[0].version, "2.36")
self.assertEqual(inst.compose.lockedDate.date, FHIRDate("2012-06-13").date)
self.assertEqual(inst.compose.lockedDate.as_json(), "2012-06-13")
self.assertEqual(inst.contact[0].name, "FHIR project team")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.copyright, "This content from LOINC ® is copyright © 1995 Regenstrief Institute, Inc. and the LOINC Committee, and available at no cost under the license at http://loinc.org/terms-of-use.")
self.assertEqual(inst.date.date, FHIRDate("2015-06-22").date)
self.assertEqual(inst.date.as_json(), "2015-06-22")
self.assertEqual(inst.description, "This is an example value set that includes all the LOINC codes for serum/plasma cholesterol from v2.36.")
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "example-extensional")
self.assertEqual(inst.identifier[0].system, "http://acme.com/identifiers/valuesets")
self.assertEqual(inst.identifier[0].value, "loinc-cholesterol-int")
self.assertEqual(inst.jurisdiction[0].coding[0].code, "US")
self.assertEqual(inst.jurisdiction[0].coding[0].system, "urn:iso:std:iso:3166")
self.assertEqual(inst.meta.profile[0], "http://hl7.org/fhir/StructureDefinition/shareablevalueset")
self.assertEqual(inst.name, "LOINC Codes for Cholesterol in Serum/Plasma")
self.assertEqual(inst.publisher, "HL7 International")
self.assertEqual(inst.purpose, "This value set was published by ACME Inc in order to make clear which codes are used for Cholesterol by AcmeClinicals (Adult Ambulatory care support in USA)")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/example-extensional")
self.assertEqual(inst.useContext[0].code.code, "age")
self.assertEqual(inst.useContext[0].code.system, "http://terminology.hl7.org/CodeSystem/usage-context-type")
self.assertEqual(inst.useContext[0].valueQuantity.code, "a")
self.assertEqual(inst.useContext[0].valueQuantity.comparator, ">")
self.assertEqual(inst.useContext[0].valueQuantity.system, "http://unitsofmeasure.org")
self.assertEqual(inst.useContext[0].valueQuantity.unit, "yrs")
self.assertEqual(inst.useContext[0].valueQuantity.value, 18)
self.assertEqual(inst.version, "20150622")
def testValueSet2(self):
inst = self.instantiate_from("valueset-example-hierarchical.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet2(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet2(inst2)
def implValueSet2(self, inst):
self.assertEqual(inst.compose.include[0].concept[0].code, "invalid")
self.assertEqual(inst.compose.include[0].concept[1].code, "structure")
self.assertEqual(inst.compose.include[0].concept[2].code, "required")
self.assertEqual(inst.compose.include[0].concept[3].code, "value")
self.assertEqual(inst.compose.include[0].concept[4].code, "processing")
self.assertEqual(inst.compose.include[0].concept[5].code, "duplicate")
self.assertEqual(inst.compose.include[0].concept[6].code, "not-found")
self.assertEqual(inst.compose.include[0].concept[7].code, "conflict")
self.assertEqual(inst.compose.include[0].concept[8].code, "lock")
self.assertEqual(inst.compose.include[0].concept[9].code, "exception")
self.assertEqual(inst.compose.include[0].extension[0].url, "http://hl7.org/fhir/StructureDefinition/valueset-expand-rules")
self.assertEqual(inst.compose.include[0].extension[0].valueCode, "groups-only")
self.assertEqual(inst.compose.include[0].extension[1].extension[0].url, "display")
self.assertEqual(inst.compose.include[0].extension[1].extension[0].valueString, "(Most common)")
self.assertEqual(inst.compose.include[0].extension[1].extension[1].url, "member")
self.assertEqual(inst.compose.include[0].extension[1].extension[1].valueCode, "login")
self.assertEqual(inst.compose.include[0].extension[1].extension[2].url, "member")
self.assertEqual(inst.compose.include[0].extension[1].extension[2].valueCode, "conflict")
self.assertEqual(inst.compose.include[0].extension[1].url, "http://hl7.org/fhir/StructureDefinition/valueset-expand-group")
self.assertEqual(inst.compose.include[0].extension[2].extension[0].url, "code")
self.assertEqual(inst.compose.include[0].extension[2].extension[0].valueString, "processing")
self.assertEqual(inst.compose.include[0].extension[2].extension[1].url, "member")
self.assertEqual(inst.compose.include[0].extension[2].extension[1].valueCode, "duplicate")
self.assertEqual(inst.compose.include[0].extension[2].extension[2].url, "member")
self.assertEqual(inst.compose.include[0].extension[2].extension[2].valueCode, "not-found")
self.assertEqual(inst.compose.include[0].extension[2].url, "http://hl7.org/fhir/StructureDefinition/valueset-expand-group")
self.assertEqual(inst.compose.include[0].extension[3].extension[0].url, "code")
self.assertEqual(inst.compose.include[0].extension[3].extension[0].valueString, "invalid")
self.assertEqual(inst.compose.include[0].extension[3].extension[1].url, "member")
self.assertEqual(inst.compose.include[0].extension[3].extension[1].valueCode, "structure")
self.assertEqual(inst.compose.include[0].extension[3].extension[2].url, "member")
self.assertEqual(inst.compose.include[0].extension[3].extension[2].valueCode, "required")
self.assertEqual(inst.compose.include[0].extension[3].extension[3].url, "value")
self.assertEqual(inst.compose.include[0].extension[3].extension[3].valueCode, "required")
self.assertEqual(inst.compose.include[0].extension[3].url, "http://hl7.org/fhir/StructureDefinition/valueset-expand-group")
self.assertEqual(inst.compose.include[0].extension[4].extension[0].url, "code")
self.assertEqual(inst.compose.include[0].extension[4].extension[0].valueString, "transient")
self.assertEqual(inst.compose.include[0].extension[4].extension[1].url, "member")
self.assertEqual(inst.compose.include[0].extension[4].extension[1].valueCode, "lock")
self.assertEqual(inst.compose.include[0].extension[4].extension[2].url, "member")
self.assertEqual(inst.compose.include[0].extension[4].extension[2].valueCode, "exception")
self.assertEqual(inst.compose.include[0].extension[4].extension[3].url, "value")
self.assertEqual(inst.compose.include[0].extension[4].extension[3].valueCode, "throttled")
self.assertEqual(inst.compose.include[0].extension[4].url, "http://hl7.org/fhir/StructureDefinition/valueset-expand-group")
self.assertEqual(inst.compose.include[0].extension[5].extension[0].url, "code")
self.assertEqual(inst.compose.include[0].extension[5].extension[0].valueString, "security")
self.assertEqual(inst.compose.include[0].extension[5].extension[1].url, "member")
self.assertEqual(inst.compose.include[0].extension[5].extension[1].valueCode, "login")
self.assertEqual(inst.compose.include[0].extension[5].extension[2].url, "member")
self.assertEqual(inst.compose.include[0].extension[5].extension[2].valueCode, "unknown")
self.assertEqual(inst.compose.include[0].extension[5].url, "http://hl7.org/fhir/StructureDefinition/valueset-expand-group")
self.assertEqual(inst.compose.include[0].system, "#hacked")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.contained[0].id, "hacked")
self.assertEqual(inst.date.date, FHIRDate("2018-07-20").date)
self.assertEqual(inst.date.as_json(), "2018-07-20")
self.assertEqual(inst.description, "Demonstration of extensions that build a hierarchical contains")
self.assertTrue(inst.expansion.contains[0].abstract)
self.assertEqual(inst.expansion.contains[0].contains[0].code, "login")
self.assertEqual(inst.expansion.contains[0].contains[0].display, "Login Required")
self.assertEqual(inst.expansion.contains[0].contains[0].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[0].contains[1].code, "conflict")
self.assertEqual(inst.expansion.contains[0].contains[1].display, "Edit Version Conflict")
self.assertEqual(inst.expansion.contains[0].contains[1].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[0].display, "(Most common)")
self.assertEqual(inst.expansion.contains[1].code, "processing")
self.assertEqual(inst.expansion.contains[1].contains[0].code, "duplicate")
self.assertEqual(inst.expansion.contains[1].contains[0].display, "Duplicate")
self.assertEqual(inst.expansion.contains[1].contains[0].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[1].contains[1].code, "not-found")
self.assertEqual(inst.expansion.contains[1].contains[1].display, "Not Found")
self.assertEqual(inst.expansion.contains[1].contains[1].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[1].display, "Processing Failure")
self.assertEqual(inst.expansion.contains[1].system, "http://hl7.org/fhir/hacked")
self.assertTrue(inst.expansion.contains[2].abstract)
self.assertEqual(inst.expansion.contains[2].code, "invalid")
self.assertEqual(inst.expansion.contains[2].contains[0].code, "structure")
self.assertEqual(inst.expansion.contains[2].contains[0].display, "Structural Issue")
self.assertEqual(inst.expansion.contains[2].contains[0].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[2].contains[1].code, "required")
self.assertEqual(inst.expansion.contains[2].contains[1].display, "Required element missing")
self.assertEqual(inst.expansion.contains[2].contains[1].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[2].contains[2].code, "value")
self.assertEqual(inst.expansion.contains[2].contains[2].display, "Element value invalid")
self.assertEqual(inst.expansion.contains[2].contains[2].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[2].display, "Invalid Content")
self.assertEqual(inst.expansion.contains[2].system, "http://hl7.org/fhir/hacked")
self.assertTrue(inst.expansion.contains[3].abstract)
self.assertEqual(inst.expansion.contains[3].code, "transient")
self.assertEqual(inst.expansion.contains[3].contains[0].code, "lock-error")
self.assertEqual(inst.expansion.contains[3].contains[0].display, "Lock Error")
self.assertEqual(inst.expansion.contains[3].contains[0].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[3].contains[1].code, "exception")
self.assertEqual(inst.expansion.contains[3].contains[1].display, "Exception")
self.assertEqual(inst.expansion.contains[3].contains[1].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[3].contains[2].code, "throttled")
self.assertEqual(inst.expansion.contains[3].contains[2].display, "Throttled")
self.assertEqual(inst.expansion.contains[3].contains[2].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[3].display, "Transient Issue")
self.assertEqual(inst.expansion.contains[3].system, "http://hl7.org/fhir/hacked")
self.assertTrue(inst.expansion.contains[4].abstract)
self.assertEqual(inst.expansion.contains[4].code, "security")
self.assertEqual(inst.expansion.contains[4].contains[0].code, "login")
self.assertEqual(inst.expansion.contains[4].contains[0].display, "Login Required")
self.assertEqual(inst.expansion.contains[4].contains[0].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[4].contains[1].code, "unknown")
self.assertEqual(inst.expansion.contains[4].contains[1].display, "Unknown User")
self.assertEqual(inst.expansion.contains[4].contains[1].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.contains[4].display, "Security Problem")
self.assertEqual(inst.expansion.contains[4].system, "http://hl7.org/fhir/hacked")
self.assertEqual(inst.expansion.identifier, "urn:uuid:42316ff8-2714-4680-9980-f37a6d1a71bc")
self.assertEqual(inst.expansion.parameter[0].name, "excludeNotForUI")
self.assertEqual(inst.expansion.parameter[0].valueUri, "false")
self.assertEqual(inst.expansion.timestamp.date, FHIRDate("2018-07-20T23:14:07+10:00").date)
self.assertEqual(inst.expansion.timestamp.as_json(), "2018-07-20T23:14:07+10:00")
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "example-hierarchical")
self.assertEqual(inst.meta.profile[0], "http://hl7.org/fhir/StructureDefinition/shareablevalueset")
self.assertEqual(inst.name, "Example Hierarchical ValueSet")
self.assertEqual(inst.publisher, "FHIR Project team")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/example-hierarchical")
self.assertEqual(inst.version, "3.6.0")
def testValueSet3(self):
inst = self.instantiate_from("valueset-example-expansion.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet3(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet3(inst2)
def implValueSet3(self, inst):
self.assertEqual(inst.compose.include[0].filter[0].op, "=")
self.assertEqual(inst.compose.include[0].filter[0].property, "parent")
self.assertEqual(inst.compose.include[0].filter[0].value, "LP43571-6")
self.assertEqual(inst.compose.include[0].system, "http://loinc.org")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.copyright, "This content from LOINC® is copyright © 1995 Regenstrief Institute, Inc. and the LOINC Committee, and available at no cost under the license at http://loinc.org/terms-of-use.")
self.assertEqual(inst.date.date, FHIRDate("2015-06-22").date)
self.assertEqual(inst.date.as_json(), "2015-06-22")
self.assertEqual(inst.description, "This is an example value set that includes all the LOINC codes for serum/plasma cholesterol from v2.36.")
self.assertEqual(inst.expansion.contains[0].code, "14647-2")
self.assertEqual(inst.expansion.contains[0].display, "Cholesterol [Moles/volume] in Serum or Plasma")
self.assertEqual(inst.expansion.contains[0].system, "http://loinc.org")
self.assertEqual(inst.expansion.contains[0].version, "2.50")
self.assertTrue(inst.expansion.contains[1].abstract)
self.assertEqual(inst.expansion.contains[1].contains[0].code, "2093-3")
self.assertEqual(inst.expansion.contains[1].contains[0].display, "Cholesterol [Mass/volume] in Serum or Plasma")
self.assertEqual(inst.expansion.contains[1].contains[0].system, "http://loinc.org")
self.assertEqual(inst.expansion.contains[1].contains[0].version, "2.50")
self.assertEqual(inst.expansion.contains[1].contains[1].code, "48620-9")
self.assertEqual(inst.expansion.contains[1].contains[1].display, "Cholesterol [Mass/volume] in Serum or Plasma ultracentrifugate")
self.assertEqual(inst.expansion.contains[1].contains[1].system, "http://loinc.org")
self.assertEqual(inst.expansion.contains[1].contains[1].version, "2.50")
self.assertEqual(inst.expansion.contains[1].contains[2].code, "9342-7")
self.assertEqual(inst.expansion.contains[1].contains[2].display, "Cholesterol [Percentile]")
self.assertEqual(inst.expansion.contains[1].contains[2].system, "http://loinc.org")
self.assertEqual(inst.expansion.contains[1].contains[2].version, "2.50")
self.assertEqual(inst.expansion.contains[1].display, "Cholesterol codes")
self.assertTrue(inst.expansion.contains[2].abstract)
self.assertEqual(inst.expansion.contains[2].contains[0].code, "2096-6")
self.assertEqual(inst.expansion.contains[2].contains[0].display, "Cholesterol/Triglyceride [Mass Ratio] in Serum or Plasma")
self.assertEqual(inst.expansion.contains[2].contains[0].system, "http://loinc.org")
self.assertEqual(inst.expansion.contains[2].contains[0].version, "2.50")
self.assertEqual(inst.expansion.contains[2].contains[1].code, "35200-5")
self.assertEqual(inst.expansion.contains[2].contains[1].display, "Cholesterol/Triglyceride [Mass Ratio] in Serum or Plasma")
self.assertEqual(inst.expansion.contains[2].contains[1].system, "http://loinc.org")
self.assertEqual(inst.expansion.contains[2].contains[1].version, "2.50")
self.assertEqual(inst.expansion.contains[2].contains[2].code, "48089-7")
self.assertEqual(inst.expansion.contains[2].contains[2].display, "Cholesterol/Apolipoprotein B [Molar ratio] in Serum or Plasma")
self.assertEqual(inst.expansion.contains[2].contains[2].system, "http://loinc.org")
self.assertEqual(inst.expansion.contains[2].contains[2].version, "2.50")
self.assertEqual(inst.expansion.contains[2].contains[3].code, "55838-7")
self.assertEqual(inst.expansion.contains[2].contains[3].display, "Cholesterol/Phospholipid [Molar ratio] in Serum or Plasma")
self.assertEqual(inst.expansion.contains[2].contains[3].system, "http://loinc.org")
self.assertEqual(inst.expansion.contains[2].contains[3].version, "2.50")
self.assertEqual(inst.expansion.contains[2].display, "Cholesterol Ratios")
self.assertEqual(inst.expansion.extension[0].url, "http://hl7.org/fhir/StructureDefinition/valueset-expansionSource")
self.assertEqual(inst.expansion.extension[0].valueUri, "http://hl7.org/fhir/ValueSet/example-extensional")
self.assertEqual(inst.expansion.identifier, "urn:uuid:42316ff8-2714-4680-9980-f37a6d1a71bc")
self.assertEqual(inst.expansion.offset, 0)
self.assertEqual(inst.expansion.parameter[0].name, "version")
self.assertEqual(inst.expansion.parameter[0].valueString, "2.50")
self.assertEqual(inst.expansion.timestamp.date, FHIRDate("2015-06-22T13:56:07Z").date)
self.assertEqual(inst.expansion.timestamp.as_json(), "2015-06-22T13:56:07Z")
self.assertEqual(inst.expansion.total, 8)
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "example-expansion")
self.assertEqual(inst.meta.profile[0], "http://hl7.org/fhir/StructureDefinition/shareablevalueset")
self.assertEqual(inst.name, "LOINC Codes for Cholesterol in Serum/Plasma")
self.assertEqual(inst.publisher, "FHIR Project team")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/example-expansion")
self.assertEqual(inst.version, "20150622")
def testValueSet4(self):
inst = self.instantiate_from("valueset-example-inactive.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet4(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet4(inst2)
def implValueSet4(self, inst):
self.assertTrue(inst.compose.inactive)
self.assertEqual(inst.compose.include[0].filter[0].op, "descendent-of")
self.assertEqual(inst.compose.include[0].filter[0].property, "concept")
self.assertEqual(inst.compose.include[0].filter[0].value, "_ActMoodPredicate")
self.assertEqual(inst.compose.include[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActMood")
self.assertEqual(inst.description, "HL7 v3 ActMood Predicate codes, including inactive codes")
self.assertEqual(inst.expansion.contains[0].code, "CRT")
self.assertEqual(inst.expansion.contains[0].display, "criterion")
self.assertTrue(inst.expansion.contains[0].inactive)
self.assertEqual(inst.expansion.contains[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActMood")
self.assertEqual(inst.expansion.contains[1].code, "EXPEC")
self.assertEqual(inst.expansion.contains[1].contains[0].code, "GOL")
self.assertEqual(inst.expansion.contains[1].contains[0].display, "goal")
self.assertEqual(inst.expansion.contains[1].contains[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActMood")
self.assertEqual(inst.expansion.contains[1].contains[1].code, "RSK")
self.assertEqual(inst.expansion.contains[1].contains[1].display, "risk")
self.assertEqual(inst.expansion.contains[1].contains[1].system, "http://terminology.hl7.org/CodeSystem/v3-ActMood")
self.assertEqual(inst.expansion.contains[1].display, "expectation")
self.assertEqual(inst.expansion.contains[1].system, "http://terminology.hl7.org/CodeSystem/v3-ActMood")
self.assertEqual(inst.expansion.contains[2].code, "OPT")
self.assertEqual(inst.expansion.contains[2].display, "option")
self.assertEqual(inst.expansion.contains[2].system, "http://terminology.hl7.org/CodeSystem/v3-ActMood")
self.assertEqual(inst.expansion.identifier, "urn:uuid:46c00b3f-003a-4f31-9d4b-ea2de58b2a99")
self.assertEqual(inst.expansion.timestamp.date, FHIRDate("2017-02-26T10:00:00Z").date)
self.assertEqual(inst.expansion.timestamp.as_json(), "2017-02-26T10:00:00Z")
self.assertEqual(inst.id, "inactive")
self.assertEqual(inst.name, "Example-inactive")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "Example with inactive codes")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/inactive")
self.assertEqual(inst.version, "3.6.0")
def testValueSet5(self):
inst = self.instantiate_from("valueset-example-filter.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet5(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet5(inst2)
def implValueSet5(self, inst):
self.assertEqual(inst.compose.include[0].filter[0].op, "=")
self.assertEqual(inst.compose.include[0].filter[0].property, "acme-plasma")
self.assertEqual(inst.compose.include[0].filter[0].value, "true")
self.assertEqual(inst.compose.include[0].system, "http://hl7.org/fhir/CodeSystem/example")
self.assertEqual(inst.contact[0].name, "FHIR project team")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.date.date, FHIRDate("2018-11-01").date)
self.assertEqual(inst.date.as_json(), "2018-11-01")
self.assertEqual(inst.description, "ACME Codes for Cholesterol: Plasma only - demonstrating the use of a filter defined in a CodeSystem")
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "example-filter")
self.assertEqual(inst.name, "ACMECholCodesPlasma")
self.assertEqual(inst.publisher, "HL7 International")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "ACME Codes for Cholesterol: Plasma only")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/example-filter")
self.assertEqual(inst.version, "3.6.0")
def testValueSet6(self):
inst = self.instantiate_from("valueset-example-yesnodontknow.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet6(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet6(inst2)
def implValueSet6(self, inst):
self.assertEqual(inst.compose.include[0].valueSet[0], "http://terminology.hl7.org/ValueSet/v2-0136")
self.assertEqual(inst.compose.include[1].concept[0].code, "asked-unknown")
self.assertEqual(inst.compose.include[1].concept[0].display, "Don't know")
self.assertEqual(inst.compose.include[1].system, "http://terminology.hl7.org/CodeSystem/data-absent-reason")
self.assertEqual(inst.description, "For Capturing simple yes-no-don't know answers")
self.assertEqual(inst.expansion.contains[0].code, "Y")
self.assertEqual(inst.expansion.contains[0].display, "Yes")
self.assertEqual(inst.expansion.contains[0].system, "http://terminology.hl7.org/CodeSystem/v2-0136")
self.assertEqual(inst.expansion.contains[1].code, "N")
self.assertEqual(inst.expansion.contains[1].display, "No")
self.assertEqual(inst.expansion.contains[1].system, "http://terminology.hl7.org/CodeSystem/v2-0136")
self.assertEqual(inst.expansion.contains[2].code, "asked-unknown")
self.assertEqual(inst.expansion.contains[2].display, "Don't know")
self.assertEqual(inst.expansion.contains[2].system, "http://terminology.hl7.org/CodeSystem/data-absent-reason")
self.assertEqual(inst.expansion.identifier, "urn:uuid:bf99fe50-2c2b-41ad-bd63-bee6919810b4")
self.assertEqual(inst.expansion.timestamp.date, FHIRDate("2015-07-14T10:00:00Z").date)
self.assertEqual(inst.expansion.timestamp.as_json(), "2015-07-14T10:00:00Z")
self.assertEqual(inst.id, "yesnodontknow")
self.assertEqual(inst.name, "Yes/No/Don't Know")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/yesnodontknow")
self.assertEqual(inst.version, "3.6.0")
def testValueSet7(self):
inst = self.instantiate_from("valueset-examplescenario-actor-type.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet7(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet7(inst2)
def implValueSet7(self, inst):
self.assertEqual(inst.compose.include[0].system, "http://hl7.org/fhir/examplescenario-actor-type")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.contact[0].telecom[1].system, "email")
self.assertEqual(inst.contact[0].telecom[1].value, "<EMAIL>")
self.assertEqual(inst.date.date, FHIRDate("2018-11-02T01:31:49+00:00").date)
self.assertEqual(inst.date.as_json(), "2018-11-02T01:31:49+00:00")
self.assertEqual(inst.description, "The type of actor - system or human.")
self.assertFalse(inst.experimental)
self.assertEqual(inst.extension[0].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-wg")
self.assertEqual(inst.extension[0].valueCode, "fhir")
self.assertEqual(inst.extension[1].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-standards-status")
self.assertEqual(inst.extension[1].valueCode, "trial-use")
self.assertEqual(inst.extension[2].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-fmm")
self.assertEqual(inst.extension[2].valueInteger, 0)
self.assertEqual(inst.id, "examplescenario-actor-type")
self.assertEqual(inst.identifier[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.identifier[0].value, "urn:oid:2.16.840.1.113883.4.642.3.858")
self.assertTrue(inst.immutable)
self.assertEqual(inst.meta.lastUpdated.date, FHIRDate("2018-11-02T01:31:49.644+00:00").date)
self.assertEqual(inst.meta.lastUpdated.as_json(), "2018-11-02T01:31:49.644+00:00")
self.assertEqual(inst.meta.profile[0], "http://hl7.org/fhir/StructureDefinition/shareablevalueset")
self.assertEqual(inst.name, "ExampleScenarioActorType")
self.assertEqual(inst.publisher, "HL7 (FHIR Project)")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "ExampleScenarioActorType")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/examplescenario-actor-type")
self.assertEqual(inst.version, "3.6.0")
def testValueSet8(self):
inst = self.instantiate_from("valueset-list-example-codes.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet8(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet8(inst2)
def implValueSet8(self, inst):
self.assertEqual(inst.compose.include[0].system, "http://terminology.hl7.org/CodeSystem/list-example-use-codes")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.date.date, FHIRDate("2018-11-02T01:31:49+00:00").date)
self.assertEqual(inst.date.as_json(), "2018-11-02T01:31:49+00:00")
self.assertEqual(inst.description, "Example use codes for the List resource - typical kinds of use.")
self.assertTrue(inst.experimental)
self.assertEqual(inst.extension[0].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-wg")
self.assertEqual(inst.extension[0].valueCode, "fhir")
self.assertEqual(inst.extension[1].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-standards-status")
self.assertEqual(inst.extension[1].valueCode, "draft")
self.assertEqual(inst.extension[2].url, "http://hl7.org/fhir/StructureDefinition/structuredefinition-fmm")
self.assertEqual(inst.extension[2].valueInteger, 1)
self.assertEqual(inst.id, "list-example-codes")
self.assertEqual(inst.identifier[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.identifier[0].value, "urn:oid:2.16.840.1.113883.4.642.3.316")
self.assertTrue(inst.immutable)
self.assertEqual(inst.meta.lastUpdated.date, FHIRDate("2018-11-02T01:31:49.644+00:00").date)
self.assertEqual(inst.meta.lastUpdated.as_json(), "2018-11-02T01:31:49.644+00:00")
self.assertEqual(inst.meta.profile[0], "http://hl7.org/fhir/StructureDefinition/shareablevalueset")
self.assertEqual(inst.name, "ExampleUseCodesForList")
self.assertEqual(inst.publisher, "FHIR Project")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.title, "Example Use Codes for List")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/list-example-codes")
self.assertEqual(inst.version, "3.6.0")
def testValueSet9(self):
inst = self.instantiate_from("valueset-example-intensional.json")
self.assertIsNotNone(inst, "Must have instantiated a ValueSet instance")
self.implValueSet9(inst)
js = inst.as_json()
self.assertEqual("ValueSet", js["resourceType"])
inst2 = valueset.ValueSet(js)
self.implValueSet9(inst2)
def implValueSet9(self, inst):
self.assertEqual(inst.compose.exclude[0].concept[0].code, "5932-9")
self.assertEqual(inst.compose.exclude[0].concept[0].display, "Cholesterol [Presence] in Blood by Test strip")
self.assertEqual(inst.compose.exclude[0].system, "http://loinc.org")
self.assertEqual(inst.compose.include[0].filter[0].op, "=")
self.assertEqual(inst.compose.include[0].filter[0].property, "parent")
self.assertEqual(inst.compose.include[0].filter[0].value, "LP43571-6")
self.assertEqual(inst.compose.include[0].system, "http://loinc.org")
self.assertEqual(inst.contact[0].name, "FHIR project team")
self.assertEqual(inst.contact[0].telecom[0].system, "url")
self.assertEqual(inst.contact[0].telecom[0].value, "http://hl7.org/fhir")
self.assertEqual(inst.copyright, "This content from LOINC® is copyright © 1995 Regenstrief Institute, Inc. and the LOINC Committee, and available at no cost under the license at http://loinc.org/terms-of-use")
self.assertEqual(inst.date.date, FHIRDate("2015-06-22").date)
self.assertEqual(inst.date.as_json(), "2015-06-22")
self.assertEqual(inst.description, "This is an example value set that includes all the LOINC codes for serum/plasma cholesterol from v2.36.")
self.assertTrue(inst.experimental)
self.assertEqual(inst.id, "example-intensional")
self.assertEqual(inst.identifier[0].system, "http://acme.com/identifiers/valuesets")
self.assertEqual(inst.identifier[0].value, "loinc-cholesterol-ext")
self.assertEqual(inst.meta.profile[0], "http://hl7.org/fhir/StructureDefinition/shareablevalueset")
self.assertEqual(inst.name, "LOINC Codes for Cholesterol in Serum/Plasma")
self.assertEqual(inst.publisher, "HL7 International")
self.assertEqual(inst.status, "draft")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.url, "http://hl7.org/fhir/ValueSet/example-intensional")
self.assertEqual(inst.version, "20150622")
| StarcoderdataPython |
9752013 | from __future__ import absolute_import
from phovea_processing_queue.task_definition import task, getLogger
_log = getLogger(__name__)
@task
def add(x, y):
return float(x) + float(y)
@task
def mul(x, y):
return float(x) * float(y)
@task
def xsum(numbers):
return sum(numbers)
| StarcoderdataPython |
257991 | <reponame>binyoucai/ProxyPool
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@content : 全局变量配置模块 db.py, getter.py, tester.py, scheduler.py
@Author : 北冥神君
@File : setting.py
@Software: PyCharm
"""
# ———————————————————————————————————————————华丽分割线——————————————————————————————————————————————————————————————————
# db.py模块全局变量 REDIS_HOST, REDIS_PORT, REDIS_PASSWORD, REDIS_KEY, MAX_SCORE, MIN_SCORE, INITIAL_SCORE
# Redis配置
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
REDIS_PASSWORD = None
REDIS_KEY = 'proxies'
# 代理分数配置
MAX_SCORE = 100
MIN_SCORE = 0
INITIAL_SCORE = 10
# ———————————————————————————————————————————华丽分割线——————————————————————————————————————————————————————————————————
# getter.py 全局变量 POOL_UPPER_THRESHOLD
# 代理池数量界限
POOL_UPPER_THRESHOLD = 50000
# ———————————————————————————————————————————华丽分割线——————————————————————————————————————————————————————————————————
# tester.py全局变量 TEST_URL, VALID_STATUS_CODES, BATCH_TEST_SIZE
# 代理ip测试站点,建议抓哪个网站填那个
TEST_URL = 'http://www.baidu.com'
# 测试响应状态码
VALID_STATUS_CODES = [200, 302]
# 最大批测试量
BATCH_TEST_SIZE = 10
# ———————————————————————————————————————————华丽分割线——————————————————————————————————————————————————————————————————
# scheduler.py 全局变量 TESTER_CYCLE, GETTER_CYCLE, API_HOST, API_PORT, TESTER_ENABLED, GETTER_ENABLED, API_ENABLED
# 检查周期
TESTER_CYCLE = 20
# 获取周期
GETTER_CYCLE = 300
# web API配置
API_HOST = '0.0.0.0'
API_PORT = 5555
# 测试器、获取器、api接口 开关
TESTER_ENABLED = True
GETTER_ENABLED = True
API_ENABLED = True
# ———————————————————————————————————————————华丽分割线——————————————————————————————————————————————————————————————————
| StarcoderdataPython |
11305787 | <reponame>owencole12/LdsHack
from advent import *
game = Game()
entry = game.new_location(
"Start of Game", """
Something will eventually go here
"""
)
| StarcoderdataPython |
34517 | <reponame>s-ai-kia/nasa_stf
# -*- coding: utf-8 -*-
"""mopitt_data_analysis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1bb_9kuO0Suo5761xgJioS84TrEdR5ojj
"""
import pandas as pd
df = pd.read_csv('MOP02J-20200101-L2V18.0.3.csv')
df.head()
df
tx = df[0:5000]
tx
import plotly.express as px
fig4 = px.density_mapbox(tx, lat='# Latitude', lon=' Longitude', z=' COTotalColumn', radius=10,
center=dict(lat=0, lon=180), zoom=0,
mapbox_style="stamen-terrain")
fig4.show()
import chart_studio
import chart_studio.plotly as py
import chart_studio.tools as tls
pip install chart_studio
username = 'saikia'
api_key = 'oIIOTBZOlv8hbx8XorKH'
chart_studio.tools.set_credentials_file(username=username, api_key=api_key)
py.plot(fig4, filename = 'csa-mopitt', auto_open=True)
| StarcoderdataPython |
4982243 | <reponame>ImanolGo/IngoLightAndBuilding
from openframeworks import *
from protopixel import Content
from random import randint
import math
print "Blackout"
#a global variable
size = 170
currentTransparency = 0
targetTransparency = 0
content = Content("Blackout")
content.FBO_SIZE = (170,170) #optional: define size of FBO, default=(100,100)
content.add_parameter("On", value=False)
@content.parameter_changed('On')
def parameter_changed(value):
"""
This function is called every time a a_integer is changed.
We get the new value as an argument
"""
global targetTransparency
if value == True:
targetTransparency = 255
print "Global Blackout ON "
else:
targetTransparency = 0
print "Global Blackout OFF"
def setup():
"""
This will be called at the beggining, you set your stuff here
"""
ofSetBackgroundAuto(False)
def update():
"""
For every frame, before drawing, we update stuff
"""
global currentTransparency, targetTransparency
currentTransparency = currentTransparency + ( targetTransparency - currentTransparency ) * 0.1
# if content["On"] == True:
# targetTransparency = 255
# else:
# targetTransparency = 0
def draw():
"""
For every frame draw stuff. Do not forget to clear the frmebuffer!
"""
global currentTransparency
ofSetColor(0,0,0,int(currentTransparency))
ofDrawRectangle(0,0,ofGetWidth(),ofGetHeight())
def exit():
"""
Before removing the script, in case you have pending business.
"""
pass
def on_enable():
"""
This function is called when this content just got enabled.
"""
pass
def on_disable():
"""
This function is called when this content just got disabled.
`update` and `draw` functions are not called while this content is disabled.
"""
pass
| StarcoderdataPython |
79344 | import csv
import os
import random
import numpy as np
import sys
from sklearn import svm
from keras.models import Sequential, model_from_yaml
from keras.layers import Dropout, Dense
from keras.callbacks import EarlyStopping
def open_csv(file_path):
# Input read as f_wh, f_wmt, f_posh, f_posmt, f_len, y
assert os.path.isfile(file_path)
raw_data = []
with open(file_path, 'r') as fid:
csv_reader = csv.reader(fid)
for row in csv_reader:
raw_data.append(row)
raw_data = raw_data[1:]
#random.shuffle(raw_data)
raw_data = np.array(raw_data).astype('float32')
features = raw_data[:, :-1]
tags = raw_data[:, -1].astype('int32')
return features, tags
def normalize(a):
mean = a.mean(1, keepdims=True)
std = a.std(1, keepdims=True)
b = np.subtract(a, mean)
c = np.divide(b, std)
return c
def evaluate_model(tags, predictions):
t_p = 0
t_n = 0
f_p = 0
f_n = 0
for idx in range(len(tags)):
# print("Tags: {}, Pred: {}".format(tags[idx], predictions[idx]))
if(tags[idx] == 1 and predictions[idx] == 1):
t_p += 1
elif(tags[idx] == 0 and predictions[idx] == 0):
t_n += 1
elif(tags[idx] == 0 and predictions[idx] == 1):
f_p += 1
else:
f_n += 1
precision = 0
if (t_p + f_p) > 0:
precision = float(t_p)/(t_p + f_p)
accuracy = 0
if (t_p + f_p + t_n + f_n) > 0:
accuracy = float((t_p + t_n))/(t_p + t_n + f_p + f_n)
recall = 0
if (t_p + f_n) > 0:
recall = float(t_p)/(t_p + f_n)
print("Precision: {}".format(precision))
print("Accuracy: {}".format(accuracy))
print("Recall: {}".format(recall))
def evaluate_svm_model(tags, predictions):
t_p = 0
t_n = 0
f_p = 0
f_n = 0
for idx in range(len(tags)):
# print("Tags: {}, Pred: {}".format(tags[idx], predictions[idx]))
if(tags[idx] == 1 and predictions[idx] == 1):
t_p += 1
elif(tags[idx] == 0 and predictions[idx] == 0):
t_n += 1
elif(tags[idx] == 0 and predictions[idx] == 1):
f_p += 1
else:
f_n += 1
precision = 0.
if (t_p + f_p) > 0:
precision = float(t_p)/(t_p + f_p)
accuracy = 0.
if (t_p + f_p + t_n + f_n) > 0:
accuracy = float((t_p + t_n))/(t_p + t_n + f_p + f_n)
recall = 0.
if (t_p + f_n) > 0:
recall = float(t_p)/(t_p + f_n)
print("Precision: {}".format(precision))
print("Accuracy: {}".format(accuracy))
print("Recall: {}".format(recall))
# PREDICTIONS
def mlp_predict(X, bsize=5):
'''
:param X: numpy array [n_samples, n_features] (input features)
:param model: path to yaml file containing model
:param weights: path to h5 file containing model weights
:return: prediction: numpy array with predictions
'''
model = model_from_yaml(open('models/mlp_architecture.yaml').read())
model.load_weights('models/mlp_model_weights.h5')
predictions = model.predict_classes(X, batch_size=bsize, verbose=1)
return predictions
def svm_predict(X):
from sklearn.externals import joblib
classifier = joblib.load('models/svm-model.pkl')
predictions = classifier.predict(X)
return predictions
path = sys.argv[1]
features, tags = open_csv(path)
features = normalize(features)
print("Predicting with svm")
predictions = svm_predict(features)
print(predictions)
real_tags = []
with open("test_tags.csv",'r') as f:
i = 0
csv_reader = csv.reader(f)
for line in csv_reader:
print(line)
i += 1
real_tags.append(line)
#print(real_tags)
real_tags = np.array(real_tags).astype('int32')
print(real_tags)
evaluate_svm_model(real_tags, predictions)
| StarcoderdataPython |
140769 | #!/usr/bin/python
#coding: utf-8
class Subject(object):
def __init__(self):
self._observers = []
def attach(self, observer):
if not observer in self._observers:
self._observers.append(observer)
def detach(self, observer):
try:
self._observers.remove(observer)
except ValueError:
pass
def notify(self, modifier=None):
for observer in self._observers:
if modifier != observer:
observer.update(self)
# Example usage
class DataSubject(Subject):
def __init__(self, name=""):
super(DataSubject, self).__init__()
self.name = name
self._data = 0
@property
def data(self):
return self._data
@data.setter
def data(self, data):
self._data = data
self.notify()
class Observer:
def __init__(self):
pass
def update(self, subject):
pass
class DataObserver(Observer):
def update(self, subject):
print ("DataSubject: %s has data %d") % (subject.name, subject.data)
def test():
d1 = DataSubject("DataSubject 1")
d2 = DataSubject("DataSubject 2")
ob1 = DataObserver()
ob2 = DataObserver()
d1.attach(ob1);
d1.attach(ob2);
d2.attach(ob1);
d2.attach(ob2);
print ("setting DataSubject 1 to 10")
print d1.data
d1.data = 10
print ("setting DataSubject 2 to 14")
d2.data = 14
print ("data 1 detach ob2")
d1.detach(ob2)
print ("setting DataSubject 1 to 20")
d1.data = 20
print ("data 1 detach ob1")
d1.detach(ob1)
print ("setting DataSubject 1 to 30")
d1.data = 30
if __name__ == '__main__':
test()
| StarcoderdataPython |
3220132 | """
import traceback
HTTP API for airfilter prometheus collector.
"""
import time
from prometheus_client import CONTENT_TYPE_LATEST, Summary, Counter, generate_latest
from werkzeug.routing import Map, Rule
from werkzeug.serving import run_simple
from werkzeug.wrappers import Request, Response
from werkzeug.exceptions import InternalServerError
from app.collector import collect_sensors
class AirfilterExporterApplication(object):
"""
Airfilter prometheus collector HTTP handler.
"""
def __init__(self, duration, errors):
self._duration = duration
self._errors = errors
self._url_map = Map([
Rule('/', endpoint='index'),
Rule('/metrics', endpoint='metrics'),
Rule('/sensors', endpoint='sensors'),
])
self._args = {
'sensors': ['sds011', 'sleep', 'ccs811']
}
self._views = {
'index': self.on_index,
'metrics': self.on_metrics,
'sensors': self.on_sensors,
}
def on_sensors(self, sds011='/dev/ttyUSB0', sleep=15, ccs811='false'):
"""
Request handler for /sensors route
"""
start = time.time()
output = collect_sensors(sds011, sleep, ccs811)
response = Response(output)
response.headers['content-type'] = CONTENT_TYPE_LATEST
self._duration.observe(time.time() - start)
return response
def on_metrics(self):
"""
Request handler for /metrics route
"""
response = Response(generate_latest())
response.headers['content-type'] = CONTENT_TYPE_LATEST
return response
def on_index(self):
"""
Request handler for index route (/).
"""
response = Response(
"""<html>
<head><title>Airfilter Exporter</title></head>
<body>
<h1>Airfilter Exporter</h1>
<p>Visit <code>/sensors?sds011="/dev/ttyUSB0"&sleep="15"</code> to use.</p>
</body>
</html>"""
)
response.headers['content-type'] = 'text/html'
return response
def view(self, endpoint, values, args):
"""
Werkzeug views mapping method.
"""
params = dict(values)
if endpoint in self._args:
params.update({key: args[key] for key in self._args[endpoint] if key in args})
try:
return self._views[endpoint](**params)
except Exception as error:
self._errors.inc()
raise InternalServerError(error)
@Request.application
def __call__(self, request):
urls = self._url_map.bind_to_environ(request.environ)
view_func = lambda endpoint, values: self.view(endpoint, values, request.args)
return urls.dispatch(view_func, catch_http_exceptions=True)
def start_http_server(port, address=''):
"""
Start a HTTP API server for airfilter prometheus collector.
"""
duration = Summary(
'airfilter_collection_duration_seconds',
'Duration of collections by the airfilter exporter',
)
errors = Counter(
'airfilter_request_errors_total',
'Errors in requests to airfilter exporter',
)
# Initialize metrics.
errors
duration
app = AirfilterExporterApplication(duration, errors)
run_simple(address, port, app, threaded=True, use_debugger=True)
| StarcoderdataPython |
3217104 | <reponame>EgorBolt/studying<filename>tooi/lab7/lab7a.py
import time
print('Введите число: ')
i = int(input())
while i > 0:
time.sleep(i)
print('Введите число: ')
i = int(input())
| StarcoderdataPython |
385839 | #!/usr/bin/python3
#
# This contains the main pyFvwm class which handles creation
# and management of the pyFvwm configuration database. In
# addition it handles formatting fvwm2rc files and building
# themes.
#
# Ensure that the home directory below is set to the location
# of pyFvwm's data files.
#
import os
import sys
import glob
import subprocess
import yaml
# Set this to the location of pyfvwm's install directory.
global home
home = "/usr/local/share/pyfvwm"
version = "0.2"
#
# Main pyFvwm class
#
class pyFvwm:
def __init__(self):
# Initialize Variables
self.home = home
self.version = version
if 'PYFVWMUSERDIR' in os.environ:
self.userdir = os.environ['PYFVWMUSERDIR']
else: self.userdir = "{}/.pyfvwm".format(os.environ['HOME'])
self.yaml = "config.yaml"
self.config = {}
self.defaults = {}
self.fvwm2rcsystem = []
self.fvwm2rcuser = []
self.themessystem = []
self.themesuser = []
self.defaulttheme = ""
self.currenttheme = {}
# Initialization Functions
self.loadconfig()
self.loadfvwm2rc()
self.loadthemes()
# Loads configuration from YAML file. Creates a blank file if none is found.
def loadconfig(self):
try:
f = open('{}/{}'.format(self.userdir, self.yaml), 'r')
self.config = yaml.safe_load(f)
f.close()
except:
try:
self.config = {}
self.saveconfig()
except:
print("pyFvwmError: Unable to access configuration file: {}/{}".format(self.userdir, self.yaml))
# Saves the configuration. Errors out if unable to write to file.
def saveconfig(self):
# Check if pyFvwm.userdir exists, if not create it.
if not os.path.exists(self.userdir):
os.makedirs(self.userdir)
# Try to open configuration file for writing.
try:
f = open('{}/{}'.format(self.userdir, self.yaml), 'w')
yaml.dump(self.config, f, default_flow_style=False)
f.close()
except:
print("pyFvwmError: Unable to write to configuration file: {}/{}".format(self.userdir, self.yaml))
sys.exit(1)
# Loads and default configuration file. This file contains
# the definitions needed of all configuration variables.
def loaddefaults(self):
if len(self.defaults) == 0:
f = open('{}/pyFvwm/defaults.yaml'.format(self.home), 'r')
self.defaults = yaml.safe_load(f)
f.close()
# Checks to see if a given configuration group is in the
# configuration file, and then compares the configuration
# group to the defaults.
def checkdefaults(self, group, config):
if not isinstance(config, dict): config = {}
if group not in config: config.update({group:{}})
self.checkgroupdefaults(group, config[group])
return config
# Uses the defaults file to check and/or define any
# variables needed in the configuration group.
def checkgroupdefaults(self, group, config):
self.loaddefaults()
if not group in self.defaults: return
defaults = self.defaults[group]
if group in self.defaults['defaulttests']:
defaulttests = self.defaults['defaulttests'][group]
else: defaulttests = {}
for key, value in defaults.items():
try:
data = config[key]
if key in defaulttests:
test = defaulttests[key]
if test['type'] == "bool":
if not isinstance(data, bool): raise
elif test['type'] == "int":
if not isinstance(data, int): raise
if 'values' in test:
if not (test['values'][0] <= int(data) <= test['values'][1]): raise
elif test['type'] == "inlist" and 'values' in test:
if data not in test['values']: raise
elif test['type'] == "list":
if not isinstance(data, list): raise
except: config.update({key:value})
# Sets viewport width and height.
def updatevpsize(self, width, height):
self.config['vpwidth'] = int(width)
self.config['vpheight'] = int(height)
self.saveconfig()
# Builds a list of both system and user themes available.
def loadthemes(self):
# System Themes
self.themessystem = []
for name in glob.glob("{}/themes/*.yaml".format(self.home)):
start = len(self.home) + 8
self.themessystem.append(name[start:-5])
self.themessystem.sort()
# User Themes
self.themesuser = []
for name in glob.glob("{}/themes/*.yaml".format(self.userdir)):
start = len(self.userdir) + 8
self.themesuser.append(name[start:-5])
self.themesuser.sort()
# Find default theme. If no default theme is found
# self.defaultname will be set to "" and warnings will
# be outputted to stdout.
if os.path.exists("{}/themes/default".format(self.userdir)):
if os.path.islink("{}/themes/default".format(self.userdir)):
lnk = os.readlink("{}/themes/default".format(self.userdir))[:-5]
if '/' in lnk: lnk = lnk[lnk.rfind('/')+1:]
else: print("Warning! {}/themes/default exists and is not a link.".format(self.userdir))
elif os.path.islink("{}/themes/default".format(self.home)):
lnk = os.readlink("{}/themes/default".format(self.home))[:-5]
if '/' in lnk: lnk = lnk[lnk.rfind('/')+1:]
else: print("Warning! No default theme found.")
if lnk in self.themessystem + self.themesuser:
self.defaulttheme = lnk
else: print("Warning! default link points to an invalid theme.")
# Loads a theme and places it at fvwm.currenttheme
def loadtheme(self, theme):
if theme == 'default':
fname = '{}/themes/default'.format(self.userdir)
if os.path.isfile(fname):
f = open(fname, 'r')
self.currenttheme = yaml.safe_load(f)
f.close()
else:
f = open('{}/themes/default'.format(self.home), 'r')
self.currenttheme = yaml.safe_load(f)
f.close()
elif theme in self.themesuser:
f = open('{}/themes/{}.yaml'.format(self.userdir, theme), 'r')
self.currenttheme = yaml.safe_load(f)
f.close()
elif theme in self.themessystem:
f = open('{}/themes/{}.yaml'.format(self.home, theme), 'r')
self.currenttheme = yaml.safe_load(f)
f.close()
# Builds a list of both system and user fvwm2rc files available.
def loadfvwm2rc(self):
# Build list of system fvwm2rc files
self.fvwm2rcsystem = []
for root, dirs, files in os.walk("{}/fvwm2rc/".format(self.home)):
start = len(self.home) + 9
for name in files:
if name.endswith(".fvwm2rc"):
self.fvwm2rcsystem.append(os.path.join(root,name)[start:-8])
self.fvwm2rcsystem.sort()
# Build list of user fvwm2rc files
self.fvwm2rcuser = []
for root, dirs, files, in os.walk("{}/fvwm2rc/".format(self.userdir)):
start = len(self.userdir) + 9
for name in files:
if name.endswith(".fvwm2rc"):
self.fvwm2rcuser.append(os.path.join(root,name)[start:-8])
self.fvwm2rcuser.sort()
# Builds a fvwm configuration file from a theme,
# which is just a list of fvwm2rc files to format.
def buildfvwm2rc(self, theme="default"):
self.loadtheme(theme)
if 'fvwm2rc' in self.currenttheme and len(self.currenttheme['fvwm2rc']) > 0:
fvwmout = "##### pyFvwm: generating theme {} #####\n".format(theme)
for fvwm2rc in self.currenttheme['fvwm2rc']:
fvwmout += self.formatfvwm2rc(fvwm2rc)
# Add Local configuration to end if it exists:
if os.path.isfile("{}/fvwm2rc/Local.fvwm2rc".format(self.userdir)):
fvwmout += self.formatfvwm2rc('Local')
return fvwmout
# Formats an fvwm2rc file by using a Python header which
# defines the function FvwmFormatter that returns the
# dictionary to use to format the remaining file using
# Python string formatting.
def formatfvwm2rc(self, rcfile):
if rcfile in self.fvwm2rcuser:
tmpfname = "{}/fvwm2rc/{}.fvwm2rc".format(self.userdir, rcfile)
elif rcfile in self.fvwm2rcsystem:
tmpfname = "{}/fvwm2rc/{}.fvwm2rc".format(self.home, rcfile)
else: return "\n#####pyFvwmError: Unknown fvwm2rc file: {}\n".format(rcfile)
fvwmout = '\n#####\n# pyFvwm: formatting {}\n#####\n'.format(tmpfname)
header = ''
pyFvwmBlock = False
try: tmpf = open(tmpfname, "r")
except: return "\n#####pyFvwmError: FileNotFound: {}\n".format(tmpfname)
data = tmpf.readlines()
tmpf.close()
# Separate pyFvwmBlock
for line in data:
if line.startswith("#pyFvwmStart"):
pyFvwmBlock = True
continue
if line.startswith("#pyFvwmEnd"):
pyFvwmBlock = False
continue
if pyFvwmBlock: header += line
else: fvwmout += line
# Run pyFvwmBlock code if found
if len(header) > 5:
try:
global fvwm
fvwm = self
exec(header, globals())
formatter = FvwmFormatter(self.config)
except Exception as err:
return "#####pyFvwmBlockError: {}".format(err)
try:
fmt = Fvwm2rcFormatter()
fvwmout = fmt.format(fvwmout, **formatter)
except: return "#####pyFvwmError: FvwmFormatterError"
return fvwmout
# Sends commands to a running fvwm instance.
# Currently only sends commands to fvwm via the shell
# command FvwmCommand using the corresponding fvwm
# module. ToDo: Add the ability to communicate via
# the fvwm module api.
def sendtofvwm(self, fvwm2rc, name="fvwm2rc", method="FvwmCommand"):
if method == "FvwmCommand":
self.FvwmCmd(fvwm2rc, name=name)
# Write a temporary file then tell fvwm to Read
# it via FvwmCommand.
def FvwmCmd(self, fvwm2rc, name="fvwm2rc"):
name = name.replace("/","-")
tmpfname = "{}/.{}.tmp".format(self.userdir, name)
try: tmpf = open(tmpfname, "w")
except: return
tmpf.write(fvwm2rc)
tmpf.write("\n# This message will self destruct in 30 seconds.\n")
tmpf.write("\nTest (f {name}) Schedule 30000 Exec exec rm {name}\n".format(name=tmpfname))
tmpf.close()
cmd = "Read {}".format(tmpfname)
subprocess.Popen(["FvwmCommand", cmd])
# Fvwm2rcFormatter is a custom formatter to ignore key errors
# from unmatched keys in the fvwm2rc file.
#from __future__ import print_function
import string
class Fvwm2rcFormatter(string.Formatter):
def __init__(self, default='{{{0}}}'):
self.default=default
def get_value(self, key, args, kwds):
if isinstance(key, str):
return kwds.get(key, self.default.format(key))
else:
string.Formatter.get_value(self, key, args, kwds)
| StarcoderdataPython |
6592601 | from .saveStrategy import SaveStrategy
import sqlite3
class DatabaseSave(SaveStrategy):
def __init__(self):
self.connection = sqlite3.connect("saves/saveDatabase.db")
self.cursor = self.connection.cursor()
self.createTable()
def load(self):
pass
def save(self, player):
name = player.name
score = player.scoreSheet.total()
self.dataEntry(name, score)
self.closeConnection()
def closeConnection(self):
self.connection.commit()
self.cursor.close()
self.connection.close()
def createTable(self):
self.cursor.execute(
'CREATE TABLE IF NOT EXISTS scoreToAdd(player TEXT, value REAL)')
def dataEntry(self, name, score):
self.cursor.execute(
'INSERT INTO scoreToAdd(player, value) VALUES (?, ?)',
(name, score))
| StarcoderdataPython |
4835174 | # number = None
# while (not number) or not (number > 0):
# try_number = input("Please enter a number > 0: ")
# try:
# number = float(try_number)
# print("Got it!")
# except ValueError as err:
# print("Error: ", err)
# try:
# file_handle = open("my_file")
# except IOError as err:
# print("Could not open file! Error: ", err)
# else:
# content = file_handle.read()
# result = analyse(content)
# finally:
# file_handle.close()
# key_list = ["key", "my_key", "bike_key", "transponder"]
# key_to_lock = {
# "my_key": "Finally I can go home again!",
# "bike_key": "This unlocks my Bike!",
# "transponder": "Back to work it is."
# }
# try:
# idx = int(input(f"A number bewteen 0 and {len(key_list)-1} please: "))
# key = key_list[idx]
# print(key_to_lock[key])
# except (IndexError, KeyError, ValueError) as err:
# print("Well this didn't work:", err)
# key_list = ["key", "my_key", "bike_key", "transponder"]
# key_to_lock = {
# "my_key": "Finally I can go home again!",
# "bike_key": "This unlocks my Bike!",
# "transponder": "Back to work it is."
# }
# try:
# idx = int(input(f"A number bewteen 0 and {len(key_list)-1} please: "))
# key = key_list[idx]
# print(key_to_lock[key])
# except IndexError as err:
# print("No, no. This index doesn't work.")
# except KeyError as err:
# print("Seems like that key has no lock. How strange.")
# except ValueError as err:
# print("That's not a number...")
# key_list = ["key", "my_key", "bike_key", "transponder"]
# key_to_lock = {
# "my_key": "Finally I can go home again!",
# "bike_key": "This unlocks my Bike!",
# "transponder": "Back to work it is."
# }
# try:
# idx = int(input(f"A number bewteen 0 and {len(key_list)-1} please: "))
# key = key_list[idx]
# print(key_to_lock[key])
# except (IndexError, ValueError) as err:
# print("That was not a valid index:", err)
# except KeyError:
# print("Oh no! That key has no lock!")
# def sub(a, b):
# return a + b
# assert sub(5, 4) == 1, '5 - 4 != 1'
# assert sub(7, 3) == 4, '7 - 3 != 4'
# def diff(a, b):
# """Returns the absolute difference of a and b"""
# sub = a - b
# return sub if sub >= 0 else -sub
# ##########
# help(diff)
# import turtle
# help(turtle.up)
# def get_number(message):
# number = None
# while number is None:
# try:
# value = input(message)
# number = float(value)
# except ValueError:
# print("That was no number.")
# return number
# def get_idx():
# number = get_number("A positive integer please: ")
# if number < 0 or not (int(number) == number):
# raise ValueError(f"{number} is no positive integer")
# return number
# def add(a, b):
# """Returns the sum of a and b
# Args:
# a : the left operand
# b : the right operand
# Returns:
# The sum of of a and b
# """
# return a + b
# def difficult_function(argument, other_arg=None):
# """Concise description.
# Longer description (if concise is not enough)
# which might need multiple lines.
# Or even some paragraphs.
# Args:
# argument: A description of this argument.
# other_arg: Another description.
# Returns:
# A short summary of what is returned, especially its format.
# Raises:
# ValueError: When does this occur?
# """
# pass
<<<<<<< HEAD
=======
import Untitled
>>>>>>> f02e5a0dc271881fab8d9e89529da082c88d8fae
| StarcoderdataPython |
1750046 | <gh_stars>0
import pytest
from insertion_sort import insertion_sort
# @pytest.fixture()
# def unsorted_lst():
# lst = [5, 2, 8, 1, 15]
def test_randomly_unsorted_list():
"""An unsorted list returns sorted"""
lst = [5, 2, 8, 1, 15]
expected = [1, 2, 5, 8, 15]
actual = insertion_sort(lst)
assert actual == expected
def test_sorted_list():
"""A sorted list will return the same"""
lst = [1, 2, 3, 4, 5]
expected = [1, 2, 3, 4, 5]
actual = insertion_sort(lst)
assert actual == expected
def test_empty_list():
"""Edgecase, empty list"""
lst = []
expected = []
actual = insertion_sort(lst)
assert actual == expected
def test_single_item():
lst = [55]
expected = [55]
actual = insertion_sort(lst)
assert actual == expected | StarcoderdataPython |
187349 | <reponame>happyandy2017/LeetCode<gh_stars>0
class Solution:
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if not strs:
return ''
zip_strs = zip(*strs)
for i, letter_group in enumerate(zip_strs):
if len(set(letter_group)) > 1:
return strs[0][:i] # return as there are letter not equal
return min(strs) # return as all letters are equal | StarcoderdataPython |
6428565 | <filename>api/apis/ArpTable.py
import os
import pymongo
from bson.objectid import ObjectId
from pymongo.collection import ReturnDocument
from flask_restplus import Namespace, Resource, fields
myclient = pymongo.MongoClient(os.getenv("DB_CONN"))
db = myclient[os.getenv("DB_NAME")]
arp_table_col = db["arp_tables"]
api = Namespace('arp_table', description='arp table related operations')
arp_table = api.model('ARP_TABLE', {
"interface": fields.String(description='Interface of the device'),
"mac": fields.String(description='MAC of the device'),
"ip": fields.String(description='IP of the device'),
"age": fields.Float(description='Age of the device'),
})
@api.route('/')
@api.response(404, 'arp_table not inserted')
@api.response(500, 'Server Error')
class ArpTables(Resource):
@api.doc('list_arp_tables')
def get(self):
return list(arp_table_col.find())
@api.doc('post_arp_table')
@api.expect(arp_table)
def post(self):
try:
result_id = arp_table_col.insert_one(api.payload).inserted_id
if result_id:
return {'msg': 'Inserted'}, 201
raise ValueError('arp_table not found')
except ValueError as ve:
print('arp_table exception', ve)
api.abort(404)
except Exception as e:
print('Server Error', e)
api.abort(500)
@api.route('/<id>')
@api.param('id', 'The arp_table identifier')
@api.response(404, 'arp_table not found')
@api.response(500, 'Server Error')
class ArpTable(Resource):
@api.doc('get_arp_table')
def get(self, id):
try:
result = arp_table_col.find_one({'_id': ObjectId(id)})
if result:
return result
raise ValueError('arp_table not found')
except ValueError as ve:
print('arp_table exception', ve)
api.abort(404)
except Exception as e:
print('Server Error', e)
api.abort(500)
@api.doc('put_arp_table')
@api.expect(arp_table)
def put(self, id):
try:
doc = api.payload
result = arp_table_col.find_one_and_update(
{'_id': ObjectId(id)},
{'$set': doc},
return_document=ReturnDocument.AFTER)
if result:
return {'msg': 'Updated'}, 200
raise ValueError('arp_table not found')
except ValueError as ve:
print('arp_table exception', ve)
api.abort(404)
except Exception as e:
print('Server Error', e)
api.abort(500)
@api.doc('delete_arp_table')
def delete(self, id):
try:
result = arp_table_col.find_one_and_delete({'_id': ObjectId(id)})
if result:
return {'msg': 'Deleted'}, 200
raise ValueError('arp_table not found')
except ValueError as ve:
print('arp_table exception', ve)
api.abort(404)
except Exception as e:
print('Server Error', e)
api.abort(500)
| StarcoderdataPython |
8065735 | <filename>HRD_201706.py
#coding=utf-8
from lib.device import Camera
from lib.process_new import getHR
import cv2
import numpy as np
import datetime
import serial
import socket
import sys
class getHeartRate(object):
def __init__(self):
self.cameras = []
self.selected_cam = 0
for i in range(0, 4):
camera = Camera(camera=i) # first camera by default
if camera.valid or not len(self.cameras):
self.cameras.append(camera)
else:
break
self.w, self.h = 0, 0
self.pressed = 0
self.processor = getHR(bpm_limits=[50, 160],
data_spike_limit=2500.,
face_detector_smoothness=10.)
# Init parameters for the cardiac data plot
self.bpm_plot = False
self.plot_title = "Data display - raw signal (top) and PSD (bottom)"
def toggle_cam(self):
if len(self.cameras) > 1:
self.processor.find_faces = True
self.bpm_plot = False
destroyWindow(self.plot_title)
self.selected_cam += 1
self.selected_cam = self.selected_cam % len(self.cameras)
# start or end to show heart rate
def toggle_search(self):
state = self.processor.find_faces_toggle()
print "face detection lock =", not state
# control of keys
def key_handler(self):
self.pressed = cv2.waitKey(10) & 255 # wait for keypress for 10 ms
if self.pressed == 27: # exit program on 'esc'
print "Exiting"
for cam in self.cameras:
cam.cam.release()
sys.exit()
if self.pressed == 32:
print "Start Detecting"
self.toggle_search()
def main_loop(self):
"""
Single iteration of the application's main loop.
"""
# Get current image frame from the camera
frame = self.cameras[self.selected_cam].get_frame()
self.h, self.w, _c = frame.shape
# set current image frame to the processor's input
self.processor.frame_in = frame
# process the image frame to perform all needed analysis
self.processor.run()
# collect the output frame for display
output_frame = self.processor.frame_out
# cv2.namedWindow("Heart Rate", 0)
# cv2.resizeWindow("Heart Rate", 800, 500)
# show the processed/annotated output frame
cv2.imshow("Heart Rate", output_frame)
# handle any key presses
self.key_handler()
if __name__ == "__main__":
hr = getHeartRate()
cv2.namedWindow("Heart Rate", cv2.WINDOW_NORMAL)
# cv2.setWindowProperty("Heart Rate", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
while True:
hr.main_loop()
| StarcoderdataPython |
1797596 | from talon import speech_system, Context
from talon.engines.w2l import W2lEngine
from talon.engines.webspeech import WebSpeechEngine
# engine = W2lEngine(model="en_US", debug=True)
# engine = W2lEngine(model="en_US-sconv-beta5", debug=True)
engine = W2lEngine(model="en_US-sconv-large-b2", debug=True)
# engine = W2lEngine(model="en_US-sconv-beta6", debug=True)
speech_system.add_engine(engine)
# set the default engine
ctx = Context()
ctx.settings = {
"speech.engine": "wav2letter",
}
# webspeech = WebSpeechEngine()
# speech_system.add_engine(webspeech)
# open http://localhost:7419 in the browser (chrome, firefox) for this to work, and set in
# something.talon:
# mode: dictation
# -
# settings():
# speech.engine = 'webspeech'
# speech.language = '' # some supported language
| StarcoderdataPython |
8181917 | # Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
# This file is automatically generated by mkgrokdump and should not
# be modified manually.
# List of known V8 instance types.
INSTANCE_TYPES = {
0: "INTERNALIZED_STRING_TYPE",
2: "EXTERNAL_INTERNALIZED_STRING_TYPE",
8: "ONE_BYTE_INTERNALIZED_STRING_TYPE",
10: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
18: "EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
34: "UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE",
42: "UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
50: "UNCACHED_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
64: "STRING_TYPE",
65: "CONS_STRING_TYPE",
66: "EXTERNAL_STRING_TYPE",
67: "SLICED_STRING_TYPE",
69: "THIN_STRING_TYPE",
72: "ONE_BYTE_STRING_TYPE",
73: "CONS_ONE_BYTE_STRING_TYPE",
74: "EXTERNAL_ONE_BYTE_STRING_TYPE",
75: "SLICED_ONE_BYTE_STRING_TYPE",
77: "THIN_ONE_BYTE_STRING_TYPE",
82: "EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
98: "UNCACHED_EXTERNAL_STRING_TYPE",
106: "UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE",
114: "UNCACHED_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
128: "SYMBOL_TYPE",
129: "HEAP_NUMBER_TYPE",
130: "BIGINT_TYPE",
131: "ODDBALL_TYPE",
132: "MAP_TYPE",
133: "CODE_TYPE",
134: "MUTABLE_HEAP_NUMBER_TYPE",
135: "FOREIGN_TYPE",
136: "BYTE_ARRAY_TYPE",
137: "BYTECODE_ARRAY_TYPE",
138: "FREE_SPACE_TYPE",
139: "FIXED_INT8_ARRAY_TYPE",
140: "FIXED_UINT8_ARRAY_TYPE",
141: "FIXED_INT16_ARRAY_TYPE",
142: "FIXED_UINT16_ARRAY_TYPE",
143: "FIXED_INT32_ARRAY_TYPE",
144: "FIXED_UINT32_ARRAY_TYPE",
145: "FIXED_FLOAT32_ARRAY_TYPE",
146: "FIXED_FLOAT64_ARRAY_TYPE",
147: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
148: "FIXED_BIGINT64_ARRAY_TYPE",
149: "FIXED_BIGUINT64_ARRAY_TYPE",
150: "FIXED_DOUBLE_ARRAY_TYPE",
151: "FEEDBACK_METADATA_TYPE",
152: "FILLER_TYPE",
153: "ACCESS_CHECK_INFO_TYPE",
154: "ACCESSOR_INFO_TYPE",
155: "ACCESSOR_PAIR_TYPE",
156: "ALIASED_ARGUMENTS_ENTRY_TYPE",
157: "ALLOCATION_MEMENTO_TYPE",
158: "ASYNC_GENERATOR_REQUEST_TYPE",
159: "DEBUG_INFO_TYPE",
160: "FUNCTION_TEMPLATE_INFO_TYPE",
161: "INTERCEPTOR_INFO_TYPE",
162: "INTERPRETER_DATA_TYPE",
163: "MODULE_INFO_ENTRY_TYPE",
164: "MODULE_TYPE",
165: "OBJECT_TEMPLATE_INFO_TYPE",
166: "PROMISE_CAPABILITY_TYPE",
167: "PROMISE_REACTION_TYPE",
168: "PROTOTYPE_INFO_TYPE",
169: "SCRIPT_TYPE",
170: "STACK_FRAME_INFO_TYPE",
171: "TUPLE2_TYPE",
172: "TUPLE3_TYPE",
173: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
174: "WASM_DEBUG_INFO_TYPE",
175: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
176: "CALLABLE_TASK_TYPE",
177: "CALLBACK_TASK_TYPE",
178: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
179: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
180: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
181: "WEAK_FACTORY_CLEANUP_JOB_TASK_TYPE",
182: "MICROTASK_QUEUE_TYPE",
183: "ALLOCATION_SITE_TYPE",
184: "FIXED_ARRAY_TYPE",
185: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
186: "HASH_TABLE_TYPE",
187: "ORDERED_HASH_MAP_TYPE",
188: "ORDERED_HASH_SET_TYPE",
189: "ORDERED_NAME_DICTIONARY_TYPE",
190: "NAME_DICTIONARY_TYPE",
191: "GLOBAL_DICTIONARY_TYPE",
192: "NUMBER_DICTIONARY_TYPE",
193: "SIMPLE_NUMBER_DICTIONARY_TYPE",
194: "STRING_TABLE_TYPE",
195: "EPHEMERON_HASH_TABLE_TYPE",
196: "SCOPE_INFO_TYPE",
197: "SCRIPT_CONTEXT_TABLE_TYPE",
198: "AWAIT_CONTEXT_TYPE",
199: "BLOCK_CONTEXT_TYPE",
200: "CATCH_CONTEXT_TYPE",
201: "DEBUG_EVALUATE_CONTEXT_TYPE",
202: "EVAL_CONTEXT_TYPE",
203: "FUNCTION_CONTEXT_TYPE",
204: "MODULE_CONTEXT_TYPE",
205: "NATIVE_CONTEXT_TYPE",
206: "SCRIPT_CONTEXT_TYPE",
207: "WITH_CONTEXT_TYPE",
208: "WEAK_FIXED_ARRAY_TYPE",
209: "DESCRIPTOR_ARRAY_TYPE",
210: "TRANSITION_ARRAY_TYPE",
211: "CALL_HANDLER_INFO_TYPE",
212: "CELL_TYPE",
213: "CODE_DATA_CONTAINER_TYPE",
214: "FEEDBACK_CELL_TYPE",
215: "FEEDBACK_VECTOR_TYPE",
216: "LOAD_HANDLER_TYPE",
217: "PRE_PARSED_SCOPE_DATA_TYPE",
218: "PROPERTY_ARRAY_TYPE",
219: "PROPERTY_CELL_TYPE",
220: "SHARED_FUNCTION_INFO_TYPE",
221: "SMALL_ORDERED_HASH_MAP_TYPE",
222: "SMALL_ORDERED_HASH_SET_TYPE",
223: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
224: "STORE_HANDLER_TYPE",
225: "UNCOMPILED_DATA_WITHOUT_PRE_PARSED_SCOPE_TYPE",
226: "UNCOMPILED_DATA_WITH_PRE_PARSED_SCOPE_TYPE",
227: "WEAK_ARRAY_LIST_TYPE",
1024: "JS_PROXY_TYPE",
1025: "JS_GLOBAL_OBJECT_TYPE",
1026: "JS_GLOBAL_PROXY_TYPE",
1027: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
1041: "JS_VALUE_TYPE",
1056: "JS_API_OBJECT_TYPE",
1057: "JS_OBJECT_TYPE",
1058: "JS_ARGUMENTS_TYPE",
1059: "JS_ARRAY_BUFFER_TYPE",
1060: "JS_ARRAY_ITERATOR_TYPE",
1061: "JS_ARRAY_TYPE",
1062: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
1063: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
1064: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
1065: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
1066: "JS_DATE_TYPE",
1067: "JS_ERROR_TYPE",
1068: "JS_GENERATOR_OBJECT_TYPE",
1069: "JS_MAP_TYPE",
1070: "JS_MAP_KEY_ITERATOR_TYPE",
1071: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
1072: "JS_MAP_VALUE_ITERATOR_TYPE",
1073: "JS_MESSAGE_OBJECT_TYPE",
1074: "JS_PROMISE_TYPE",
1075: "JS_REGEXP_TYPE",
1076: "JS_REGEXP_STRING_ITERATOR_TYPE",
1077: "JS_SET_TYPE",
1078: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
1079: "JS_SET_VALUE_ITERATOR_TYPE",
1080: "JS_STRING_ITERATOR_TYPE",
1081: "JS_WEAK_CELL_TYPE",
1082: "JS_WEAK_REF_TYPE",
1083: "JS_WEAK_FACTORY_CLEANUP_ITERATOR_TYPE",
1084: "JS_WEAK_FACTORY_TYPE",
1085: "JS_WEAK_MAP_TYPE",
1086: "JS_WEAK_SET_TYPE",
1087: "JS_TYPED_ARRAY_TYPE",
1088: "JS_DATA_VIEW_TYPE",
1089: "JS_INTL_V8_BREAK_ITERATOR_TYPE",
1090: "JS_INTL_COLLATOR_TYPE",
1091: "JS_INTL_DATE_TIME_FORMAT_TYPE",
1092: "JS_INTL_LIST_FORMAT_TYPE",
1093: "JS_INTL_LOCALE_TYPE",
1094: "JS_INTL_NUMBER_FORMAT_TYPE",
1095: "JS_INTL_PLURAL_RULES_TYPE",
1096: "JS_INTL_RELATIVE_TIME_FORMAT_TYPE",
1097: "JS_INTL_SEGMENT_ITERATOR_TYPE",
1098: "JS_INTL_SEGMENTER_TYPE",
1099: "WASM_EXCEPTION_TYPE",
1100: "WASM_GLOBAL_TYPE",
1101: "WASM_INSTANCE_TYPE",
1102: "WASM_MEMORY_TYPE",
1103: "WASM_MODULE_TYPE",
1104: "WASM_TABLE_TYPE",
1105: "JS_BOUND_FUNCTION_TYPE",
1106: "JS_FUNCTION_TYPE",
}
# List of known V8 maps.
KNOWN_MAPS = {
("RO_SPACE", 0x00139): (138, "FreeSpaceMap"),
("RO_SPACE", 0x00189): (132, "MetaMap"),
("RO_SPACE", 0x00209): (131, "NullMap"),
("RO_SPACE", 0x00279): (209, "DescriptorArrayMap"),
("RO_SPACE", 0x002d9): (208, "WeakFixedArrayMap"),
("RO_SPACE", 0x00329): (152, "OnePointerFillerMap"),
("RO_SPACE", 0x00379): (152, "TwoPointerFillerMap"),
("RO_SPACE", 0x003f9): (131, "UninitializedMap"),
("RO_SPACE", 0x00469): (8, "OneByteInternalizedStringMap"),
("RO_SPACE", 0x00509): (131, "UndefinedMap"),
("RO_SPACE", 0x00569): (129, "HeapNumberMap"),
("RO_SPACE", 0x005e9): (131, "TheHoleMap"),
("RO_SPACE", 0x00691): (131, "BooleanMap"),
("RO_SPACE", 0x00769): (136, "ByteArrayMap"),
("RO_SPACE", 0x007b9): (184, "FixedArrayMap"),
("RO_SPACE", 0x00809): (184, "FixedCOWArrayMap"),
("RO_SPACE", 0x00859): (186, "HashTableMap"),
("RO_SPACE", 0x008a9): (128, "SymbolMap"),
("RO_SPACE", 0x008f9): (72, "OneByteStringMap"),
("RO_SPACE", 0x00949): (196, "ScopeInfoMap"),
("RO_SPACE", 0x00999): (220, "SharedFunctionInfoMap"),
("RO_SPACE", 0x009e9): (133, "CodeMap"),
("RO_SPACE", 0x00a39): (203, "FunctionContextMap"),
("RO_SPACE", 0x00a89): (212, "CellMap"),
("RO_SPACE", 0x00ad9): (219, "GlobalPropertyCellMap"),
("RO_SPACE", 0x00b29): (135, "ForeignMap"),
("RO_SPACE", 0x00b79): (210, "TransitionArrayMap"),
("RO_SPACE", 0x00bc9): (215, "FeedbackVectorMap"),
("RO_SPACE", 0x00c69): (131, "ArgumentsMarkerMap"),
("RO_SPACE", 0x00d09): (131, "ExceptionMap"),
("RO_SPACE", 0x00da9): (131, "TerminationExceptionMap"),
("RO_SPACE", 0x00e51): (131, "OptimizedOutMap"),
("RO_SPACE", 0x00ef1): (131, "StaleRegisterMap"),
("RO_SPACE", 0x00f61): (205, "NativeContextMap"),
("RO_SPACE", 0x00fb1): (204, "ModuleContextMap"),
("RO_SPACE", 0x01001): (202, "EvalContextMap"),
("RO_SPACE", 0x01051): (206, "ScriptContextMap"),
("RO_SPACE", 0x010a1): (198, "AwaitContextMap"),
("RO_SPACE", 0x010f1): (199, "BlockContextMap"),
("RO_SPACE", 0x01141): (200, "CatchContextMap"),
("RO_SPACE", 0x01191): (207, "WithContextMap"),
("RO_SPACE", 0x011e1): (201, "DebugEvaluateContextMap"),
("RO_SPACE", 0x01231): (197, "ScriptContextTableMap"),
("RO_SPACE", 0x01281): (151, "FeedbackMetadataArrayMap"),
("RO_SPACE", 0x012d1): (184, "ArrayListMap"),
("RO_SPACE", 0x01321): (130, "BigIntMap"),
("RO_SPACE", 0x01371): (185, "ObjectBoilerplateDescriptionMap"),
("RO_SPACE", 0x013c1): (137, "BytecodeArrayMap"),
("RO_SPACE", 0x01411): (213, "CodeDataContainerMap"),
("RO_SPACE", 0x01461): (150, "FixedDoubleArrayMap"),
("RO_SPACE", 0x014b1): (191, "GlobalDictionaryMap"),
("RO_SPACE", 0x01501): (214, "ManyClosuresCellMap"),
("RO_SPACE", 0x01551): (184, "ModuleInfoMap"),
("RO_SPACE", 0x015a1): (134, "MutableHeapNumberMap"),
("RO_SPACE", 0x015f1): (190, "NameDictionaryMap"),
("RO_SPACE", 0x01641): (214, "NoClosuresCellMap"),
("RO_SPACE", 0x01691): (192, "NumberDictionaryMap"),
("RO_SPACE", 0x016e1): (214, "OneClosureCellMap"),
("RO_SPACE", 0x01731): (187, "OrderedHashMapMap"),
("RO_SPACE", 0x01781): (188, "OrderedHashSetMap"),
("RO_SPACE", 0x017d1): (189, "OrderedNameDictionaryMap"),
("RO_SPACE", 0x01821): (217, "PreParsedScopeDataMap"),
("RO_SPACE", 0x01871): (218, "PropertyArrayMap"),
("RO_SPACE", 0x018c1): (211, "SideEffectCallHandlerInfoMap"),
("RO_SPACE", 0x01911): (211, "SideEffectFreeCallHandlerInfoMap"),
("RO_SPACE", 0x01961): (211, "NextCallSideEffectFreeCallHandlerInfoMap"),
("RO_SPACE", 0x019b1): (193, "SimpleNumberDictionaryMap"),
("RO_SPACE", 0x01a01): (184, "SloppyArgumentsElementsMap"),
("RO_SPACE", 0x01a51): (221, "SmallOrderedHashMapMap"),
("RO_SPACE", 0x01aa1): (222, "SmallOrderedHashSetMap"),
("RO_SPACE", 0x01af1): (223, "SmallOrderedNameDictionaryMap"),
("RO_SPACE", 0x01b41): (194, "StringTableMap"),
("RO_SPACE", 0x01b91): (225, "UncompiledDataWithoutPreParsedScopeMap"),
("RO_SPACE", 0x01be1): (226, "UncompiledDataWithPreParsedScopeMap"),
("RO_SPACE", 0x01c31): (227, "WeakArrayListMap"),
("RO_SPACE", 0x01c81): (195, "EphemeronHashTableMap"),
("RO_SPACE", 0x01cd1): (106, "NativeSourceStringMap"),
("RO_SPACE", 0x01d21): (64, "StringMap"),
("RO_SPACE", 0x01d71): (73, "ConsOneByteStringMap"),
("RO_SPACE", 0x01dc1): (65, "ConsStringMap"),
("RO_SPACE", 0x01e11): (77, "ThinOneByteStringMap"),
("RO_SPACE", 0x01e61): (69, "ThinStringMap"),
("RO_SPACE", 0x01eb1): (67, "SlicedStringMap"),
("RO_SPACE", 0x01f01): (75, "SlicedOneByteStringMap"),
("RO_SPACE", 0x01f51): (66, "ExternalStringMap"),
("RO_SPACE", 0x01fa1): (82, "ExternalStringWithOneByteDataMap"),
("RO_SPACE", 0x01ff1): (74, "ExternalOneByteStringMap"),
("RO_SPACE", 0x02041): (98, "UncachedExternalStringMap"),
("RO_SPACE", 0x02091): (114, "UncachedExternalStringWithOneByteDataMap"),
("RO_SPACE", 0x020e1): (0, "InternalizedStringMap"),
("RO_SPACE", 0x02131): (2, "ExternalInternalizedStringMap"),
("RO_SPACE", 0x02181): (18, "ExternalInternalizedStringWithOneByteDataMap"),
("RO_SPACE", 0x021d1): (10, "ExternalOneByteInternalizedStringMap"),
("RO_SPACE", 0x02221): (34, "UncachedExternalInternalizedStringMap"),
("RO_SPACE", 0x02271): (50, "UncachedExternalInternalizedStringWithOneByteDataMap"),
("RO_SPACE", 0x022c1): (42, "UncachedExternalOneByteInternalizedStringMap"),
("RO_SPACE", 0x02311): (106, "UncachedExternalOneByteStringMap"),
("RO_SPACE", 0x02361): (140, "FixedUint8ArrayMap"),
("RO_SPACE", 0x023b1): (139, "FixedInt8ArrayMap"),
("RO_SPACE", 0x02401): (142, "FixedUint16ArrayMap"),
("RO_SPACE", 0x02451): (141, "FixedInt16ArrayMap"),
("RO_SPACE", 0x024a1): (144, "FixedUint32ArrayMap"),
("RO_SPACE", 0x024f1): (143, "FixedInt32ArrayMap"),
("RO_SPACE", 0x02541): (145, "FixedFloat32ArrayMap"),
("RO_SPACE", 0x02591): (146, "FixedFloat64ArrayMap"),
("RO_SPACE", 0x025e1): (147, "FixedUint8ClampedArrayMap"),
("RO_SPACE", 0x02631): (149, "FixedBigUint64ArrayMap"),
("RO_SPACE", 0x02681): (148, "FixedBigInt64ArrayMap"),
("RO_SPACE", 0x026d1): (131, "SelfReferenceMarkerMap"),
("RO_SPACE", 0x02739): (171, "Tuple2Map"),
("RO_SPACE", 0x027d9): (173, "ArrayBoilerplateDescriptionMap"),
("RO_SPACE", 0x02b19): (161, "InterceptorInfoMap"),
("RO_SPACE", 0x05039): (153, "AccessCheckInfoMap"),
("RO_SPACE", 0x05089): (154, "AccessorInfoMap"),
("RO_SPACE", 0x050d9): (155, "AccessorPairMap"),
("RO_SPACE", 0x05129): (156, "AliasedArgumentsEntryMap"),
("RO_SPACE", 0x05179): (157, "AllocationMementoMap"),
("RO_SPACE", 0x051c9): (158, "AsyncGeneratorRequestMap"),
("RO_SPACE", 0x05219): (159, "DebugInfoMap"),
("RO_SPACE", 0x05269): (160, "FunctionTemplateInfoMap"),
("RO_SPACE", 0x052b9): (162, "InterpreterDataMap"),
("RO_SPACE", 0x05309): (163, "ModuleInfoEntryMap"),
("RO_SPACE", 0x05359): (164, "ModuleMap"),
("RO_SPACE", 0x053a9): (165, "ObjectTemplateInfoMap"),
("RO_SPACE", 0x053f9): (166, "PromiseCapabilityMap"),
("RO_SPACE", 0x05449): (167, "PromiseReactionMap"),
("RO_SPACE", 0x05499): (168, "PrototypeInfoMap"),
("RO_SPACE", 0x054e9): (169, "ScriptMap"),
("RO_SPACE", 0x05539): (170, "StackFrameInfoMap"),
("RO_SPACE", 0x05589): (172, "Tuple3Map"),
("RO_SPACE", 0x055d9): (174, "WasmDebugInfoMap"),
("RO_SPACE", 0x05629): (175, "WasmExportedFunctionDataMap"),
("RO_SPACE", 0x05679): (176, "CallableTaskMap"),
("RO_SPACE", 0x056c9): (177, "CallbackTaskMap"),
("RO_SPACE", 0x05719): (178, "PromiseFulfillReactionJobTaskMap"),
("RO_SPACE", 0x05769): (179, "PromiseRejectReactionJobTaskMap"),
("RO_SPACE", 0x057b9): (180, "PromiseResolveThenableJobTaskMap"),
("RO_SPACE", 0x05809): (181, "WeakFactoryCleanupJobTaskMap"),
("RO_SPACE", 0x05859): (182, "MicrotaskQueueMap"),
("RO_SPACE", 0x058a9): (183, "AllocationSiteWithWeakNextMap"),
("RO_SPACE", 0x058f9): (183, "AllocationSiteWithoutWeakNextMap"),
("RO_SPACE", 0x05949): (216, "LoadHandler1Map"),
("RO_SPACE", 0x05999): (216, "LoadHandler2Map"),
("RO_SPACE", 0x059e9): (216, "LoadHandler3Map"),
("RO_SPACE", 0x05a39): (224, "StoreHandler0Map"),
("RO_SPACE", 0x05a89): (224, "StoreHandler1Map"),
("RO_SPACE", 0x05ad9): (224, "StoreHandler2Map"),
("RO_SPACE", 0x05b29): (224, "StoreHandler3Map"),
("MAP_SPACE", 0x00139): (1057, "ExternalMap"),
("MAP_SPACE", 0x00189): (1073, "JSMessageObjectMap"),
}
# List of known V8 objects.
KNOWN_OBJECTS = {
("RO_SPACE", 0x001d9): "NullValue",
("RO_SPACE", 0x00259): "EmptyDescriptorArray",
("RO_SPACE", 0x002c9): "EmptyWeakFixedArray",
("RO_SPACE", 0x003c9): "UninitializedValue",
("RO_SPACE", 0x004d9): "UndefinedValue",
("RO_SPACE", 0x00559): "NanValue",
("RO_SPACE", 0x005b9): "TheHoleValue",
("RO_SPACE", 0x00651): "HoleNanValue",
("RO_SPACE", 0x00661): "TrueValue",
("RO_SPACE", 0x00711): "FalseValue",
("RO_SPACE", 0x00759): "empty_string",
("RO_SPACE", 0x00c19): "EmptyScopeInfo",
("RO_SPACE", 0x00c29): "EmptyFixedArray",
("RO_SPACE", 0x00c39): "ArgumentsMarker",
("RO_SPACE", 0x00cd9): "Exception",
("RO_SPACE", 0x00d79): "TerminationException",
("RO_SPACE", 0x00e21): "OptimizedOut",
("RO_SPACE", 0x00ec1): "StaleRegister",
("RO_SPACE", 0x02721): "EmptyEnumCache",
("RO_SPACE", 0x02789): "EmptyPropertyArray",
("RO_SPACE", 0x02799): "EmptyByteArray",
("RO_SPACE", 0x027a9): "EmptyObjectBoilerplateDescription",
("RO_SPACE", 0x027c1): "EmptyArrayBoilerplateDescription",
("RO_SPACE", 0x02829): "EmptyFixedUint8Array",
("RO_SPACE", 0x02849): "EmptyFixedInt8Array",
("RO_SPACE", 0x02869): "EmptyFixedUint16Array",
("RO_SPACE", 0x02889): "EmptyFixedInt16Array",
("RO_SPACE", 0x028a9): "EmptyFixedUint32Array",
("RO_SPACE", 0x028c9): "EmptyFixedInt32Array",
("RO_SPACE", 0x028e9): "EmptyFixedFloat32Array",
("RO_SPACE", 0x02909): "EmptyFixedFloat64Array",
("RO_SPACE", 0x02929): "EmptyFixedUint8ClampedArray",
("RO_SPACE", 0x02949): "EmptyFixedBigUint64Array",
("RO_SPACE", 0x02969): "EmptyFixedBigInt64Array",
("RO_SPACE", 0x02989): "EmptySloppyArgumentsElements",
("RO_SPACE", 0x029a9): "EmptySlowElementDictionary",
("RO_SPACE", 0x029f1): "EmptyOrderedHashMap",
("RO_SPACE", 0x02a19): "EmptyOrderedHashSet",
("RO_SPACE", 0x02a41): "EmptyFeedbackMetadata",
("RO_SPACE", 0x02a51): "EmptyPropertyCell",
("RO_SPACE", 0x02a79): "EmptyPropertyDictionary",
("RO_SPACE", 0x02ac9): "NoOpInterceptorInfo",
("RO_SPACE", 0x02b69): "EmptyWeakArrayList",
("RO_SPACE", 0x02b81): "InfinityValue",
("RO_SPACE", 0x02b91): "MinusZeroValue",
("RO_SPACE", 0x02ba1): "MinusInfinityValue",
("RO_SPACE", 0x02bb1): "SelfReferenceMarker",
("RO_SPACE", 0x02c09): "OffHeapTrampolineRelocationInfo",
("RO_SPACE", 0x02c21): "HashSeed",
("OLD_SPACE", 0x00139): "ArgumentsIteratorAccessor",
("OLD_SPACE", 0x001a9): "ArrayLengthAccessor",
("OLD_SPACE", 0x00219): "BoundFunctionLengthAccessor",
("OLD_SPACE", 0x00289): "BoundFunctionNameAccessor",
("OLD_SPACE", 0x002f9): "ErrorStackAccessor",
("OLD_SPACE", 0x00369): "FunctionArgumentsAccessor",
("OLD_SPACE", 0x003d9): "FunctionCallerAccessor",
("OLD_SPACE", 0x00449): "FunctionNameAccessor",
("OLD_SPACE", 0x004b9): "FunctionLengthAccessor",
("OLD_SPACE", 0x00529): "FunctionPrototypeAccessor",
("OLD_SPACE", 0x00599): "StringLengthAccessor",
("OLD_SPACE", 0x00609): "InvalidPrototypeValidityCell",
("OLD_SPACE", 0x00619): "EmptyScript",
("OLD_SPACE", 0x00699): "ManyClosuresCell",
("OLD_SPACE", 0x006a9): "ArrayConstructorProtector",
("OLD_SPACE", 0x006b9): "NoElementsProtector",
("OLD_SPACE", 0x006e1): "IsConcatSpreadableProtector",
("OLD_SPACE", 0x006f1): "ArraySpeciesProtector",
("OLD_SPACE", 0x00719): "TypedArraySpeciesProtector",
("OLD_SPACE", 0x00741): "PromiseSpeciesProtector",
("OLD_SPACE", 0x00769): "StringLengthProtector",
("OLD_SPACE", 0x00779): "ArrayIteratorProtector",
("OLD_SPACE", 0x007a1): "ArrayBufferNeuteringProtector",
("OLD_SPACE", 0x007c9): "PromiseHookProtector",
("OLD_SPACE", 0x007f1): "PromiseResolveProtector",
("OLD_SPACE", 0x00801): "MapIteratorProtector",
("OLD_SPACE", 0x00829): "PromiseThenProtector",
("OLD_SPACE", 0x00851): "SetIteratorProtector",
("OLD_SPACE", 0x00879): "StringIteratorProtector",
("OLD_SPACE", 0x008a1): "SingleCharacterStringCache",
("OLD_SPACE", 0x010b1): "StringSplitCache",
("OLD_SPACE", 0x018c1): "RegExpMultipleCache",
("OLD_SPACE", 0x020d1): "DefaultMicrotaskQueue",
("OLD_SPACE", 0x020e9): "BuiltinsConstantsTable",
}
# List of known V8 Frame Markers.
FRAME_MARKERS = (
"ENTRY",
"CONSTRUCT_ENTRY",
"EXIT",
"OPTIMIZED",
"WASM_COMPILED",
"WASM_TO_JS",
"JS_TO_WASM",
"WASM_INTERPRETER_ENTRY",
"C_WASM_ENTRY",
"WASM_COMPILE_LAZY",
"INTERPRETED",
"STUB",
"BUILTIN_CONTINUATION",
"JAVA_SCRIPT_BUILTIN_CONTINUATION",
"JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH",
"INTERNAL",
"CONSTRUCT",
"ARGUMENTS_ADAPTOR",
"BUILTIN",
"BUILTIN_EXIT",
"NATIVE",
)
# This set of constants is generated from a shipping build.
| StarcoderdataPython |
8045793 | <filename>stage1/DataSet.py
#
#author: <NAME>
#Project Description: This repository contains source code for semantically segmenting WSIs; however, it could be easily
# adapted for other domains such as natural image segmentation
# File Description: This file is used to create data tuples
#==============================================================================
import cv2
import torch.utils.data
from PIL import Image
class MyDataset(torch.utils.data.Dataset):
def __init__(self, imList, labelList, transform=None):
self.imList = imList
self.labelList = labelList
self.transform = transform
def __len__(self):
return len(self.imList)
def __getitem__(self, idx):
image_name = self.imList[idx]
label_name = self.labelList[idx]
# image = Image.open(image_name).convert('RGB')
# label = Image.open(label_name).convert( 'L')
image = cv2.imread(image_name)
label = cv2.imread(label_name, 0)
if self.transform:
[image, label] = self.transform(image, label)
return (image, label) | StarcoderdataPython |
351828 | # Generated by Django 2.2.7 on 2019-11-27 15:22
# Modified by hand to ensure that initial dates of a salary grade change are the start date of the financial year
from datetime import date
from django.db import migrations, models
import django.utils.timezone
def set_initial_salarygradechange_date(apps, schema_editor):
SalaryGradeChange = apps.get_model('rse', 'SalaryGradeChange')
for sgc in SalaryGradeChange.objects.all():
sgc.date = date(sgc.salary_band.year.year, 8, 1) #sgc.salary_band.year.start_date()
sgc.save()
def revert_initial_salarygradechange_date(apps, schema_editor):
SalaryGradeChange = apps.get_model('rse', 'SalaryGradeChange')
for sgc in SalaryGradeChange.objects.all():
sgc.date = django.utils.timezone.now().date()
sgc.save()
class Migration(migrations.Migration):
dependencies = [
('rse', '0004_auto_20191124_1946'),
]
operations = [
migrations.AddField(
model_name='salarygradechange',
name='date',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AlterField(
model_name='rseallocation',
name='created_date',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False),
),
migrations.RunPython(set_initial_salarygradechange_date, revert_initial_salarygradechange_date),
]
| StarcoderdataPython |
3515408 | <reponame>jacob975/deep_learning<filename>std_code.py
#!/usr/bin/python3
'''
Abstract:
This is a program for matching the sources in ALLWISE catalogue and c2d+SWIRE catalogue.
Usage:
match_sp_wise.py [spitzer coord] [wise coord]
Output:
1. coordinates of matched sources
2. coordinates of un-matched sources
Editor:
Jacob975
##################################
# Python3 #
# This code is made in python3 #
##################################
20180104
####################################
update log
20180104 version alpha 1
The code demo good
20180204 version alpha 2
Move to python3 instead of python2
20180320 version alpha 3
1. add a "This is python3" warning
20180821 version alpha 4
1. Make all sentence with initial upper case letter.
20190121 version alpha 5
1. Add a new standard on 'argv'
20190522 version alpha 6
1. Specify the output.
'''
import tensorflow as tf
import time
import numpy as np
from sys import argv
#--------------------------------------------
# Main code
if __name__ == "__main__":
VERBOSE = 0
# Measure time
start_time = time.time()
#-----------------------------------
# Load argv
if len(argv) != 2:
print ("The number of arguments is wrong.")
print ("Usage: std_code.py [test]")
exit()
test_text = argv[1]
#-----------------------------------
# Print the test text
print (test_text)
# Compare the difference between normal variable and tensorflow node.
x = 1
y = x + 9
print (y)
x = tf.constant(1, name = "x")
y = tf.Variable(x+9, name = "y") # y save the key of the node.
model = tf.global_variables_initializer()
sess = tf.Session()
sess.run(model)
print (sess.run(y))
#-----------------------------------
# Measure time
elapsed_time = time.time() - start_time
print("Exiting Main Program, spending ", elapsed_time, "seconds.")
| StarcoderdataPython |
1925547 | import eventlet
eventlet.monkey_patch()
import requests
from .issue import Issue
def post(url, headers, payload):
with eventlet.Timeout(20, False):
r = requests.post(url, headers=headers, json=payload)
if r.status_code != 200:
return None
# debug
print(r.json())
data = r.json()['data']
if data is None:
# should be error
print('ERROR: {}'.format(r.json()))
return None
return data
# timeout
print('POST timeout, please try again later...')
return None
def fetch_issues(token, repo_owner, repo_name):
issue_list = []
url = 'https://api.github.com/graphql'
headers = {'Authorization': 'bearer ' + token}
# NOTE: the maximum limit of nodes is 500,000.
# modify the query_str to fetch data you need
query_str_fmt = '''
query
{{
repository(owner: "{}", name: "{}") {{
issues(first: 100, states: OPEN{}) {{
totalCount
edges {{
node {{
url
title
author {{
login
avatarUrl
}}
createdAt
body
labels(first: 20) {{
totalCount
edges {{
node {{
name
}}
}}
}}
comments(last: 100) {{
totalCount
edges {{
node {{
author {{
login
avatarUrl
}}
createdAt
body
}}
}}
}}
}}
cursor
}}
pageInfo {{
endCursor
hasNextPage
}}
}}
}}
}}
'''
cursor_fmt = ', after:"{}"'
has_next_page = True
end_cursor = ''
index = 1
while has_next_page:
if len(end_cursor) == 0:
query_first = query_str_fmt.format(repo_owner, repo_name, '')
payload = { 'query': query_first }
else:
query_n = query_str_fmt.format(repo_owner, repo_name, cursor_fmt.format(end_cursor))
payload = { 'query': query_n}
print('>>>>>> fetching issues ... 100 x {}'.format(index))
index += 1
data = post(url, headers, payload)
if data is None:
print('POST failed...')
return issue_list
#print(data)
repository = data['repository']
issues = repository['issues']
total_count = issues['totalCount']
print('total_count: {}'.format(total_count))
for edge in issues['edges']:
node = edge['node']
issue = Issue(node)
issue_list.append(issue)
page_info = issues['pageInfo']
end_cursor = page_info['endCursor']
has_next_page = page_info['hasNextPage']
print('has_next_page: {}'.format(has_next_page))
return issue_list
| StarcoderdataPython |
5065075 | <reponame>dgarlitt/release_notes_generator
import sys
import mock
from nose.tools import eq_, ok_, assert_raises
import nose.tools
# from main import load_props, sanitize_path, parse_version_number
# class TestMain:
# @mock.patch('main.json')
# @mock.patch('__builtin__.open', spec=open, read_data='some raw json')
# def test_load_props(self, mock_open, mock_json):
# json_result = 'some json'
# mock_json.load.return_value = json_result
# props = load_props()
# ok_(mock_open.called)
# mock_open.assert_called_with('props.json', 'r')
# ok_(mock_json.load.called)
# mock_json.load.assert_called_with(mock_open().__enter__())
# eq_(json_result, props)
# def test_sanitize_path_adds_slash_to_path(sefl):
# path = "../path"
# eq_(path + "/", sanitize_path(path))
# @mock.patch.object(sys, 'argv', ['', '1.2.3.4'])
# def test_parse_version_number_happy_path(self):
# pattern = "^1\.2\.3\.4$"
# actual = parse_version_number(pattern).__dict__
# eq_({'tag_name': '1.2.3.4'}, actual)
| StarcoderdataPython |
1986944 | <filename>mAP_COCO/get_gt_json.py
# -*- coding: utf-8 -*-
# @Time : 2021/9/20 下午3:30
# @Author : DaiPuWei
# @Email : <EMAIL>
# @File : get_gt_json.py
# @Software: PyCharm
"""
这是生成测试数据集每张图像中真实目标及其定位信息json文件的脚本
"""
import os
import cv2
import sys
import json
import argparse
import numpy as np
import xml.etree.ElementTree as ET
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from utils.model_utils import NpEncoder
from utils.model_utils import get_classes
classes_path = os.path.abspath("../model_data/voc_classes.txt")
parser = argparse.ArgumentParser(description='get_gt_json parameters')
parser.add_argument('--dataset_dir', type=str,help="voc dataset dir")
parser.add_argument('--dataset_name', type=str)
parser.add_argument('--ext', type=str,default='.jpg')
parser.add_argument('--classes_path', type=str)
args = parser.parse_args()
def is_contain_object(xml_path):
'''
这是判断XML文件中是否包含目标标签的函数
Args:
xml_path: XML文件路径
Returns:
'''
# 获取XML文件的根结点
root = ET.parse(xml_path).getroot()
return len(root.findall('object')) > 0
def parse_xml(xml_path):
'''
这是解析VOC数据集XML标签文件,获取每个目标分类与定位的函数
Args:
xml_path: XML标签文件路径
Returns:
'''
# 获取XML文件的根结点
root = ET.parse(xml_path).getroot()
# 遍历所有目标
objects = []
for obj in root.findall('object'):
obj_name = obj.find('name').text
bndbox = obj.find('bndbox')
left = bndbox.find('xmin').text
top = bndbox.find('ymin').text
right = bndbox.find('xmax').text
bottom = bndbox.find('ymax').text
objects.append([obj_name, float(left), float(top), float(right), float(bottom)])
return objects
def run_main():
"""
这是主函数
"""
# 初始化目标分类名称指点
classes_path = os.path.abspath(args.class_path)
classes = get_classes(classes_path)
cls2num_dict = dict(zip(classes, np.arange(len(classes))))
# 初始化JSON文件路径
dataset_name = args.dataset_name
input_dir = os.path.abspath("./input/{0}".format(dataset_name))
if not os.path.exists(input_dir):
os.makedirs(input_dir)
# 初始化测试数据集txt文件
dataset_dir = os.path.abspath(args.dataset_dir)
ext = args.ext
gt_result = {}
gt_result_json_path = os.path.join(input_dir, 'gt_result.json')
image_array = []
annotation_array = []
img_cnt = 0
anno_cnt = 0
with open(gt_result_json_path, 'w+') as f:
test_txt_path = os.path.join(dataset_dir, "ImageSets", "Main", "val.txt")
image_ids = []
with open(test_txt_path,'r') as g:
for line in g.readlines():
image_ids.append(line.strip())
# 生成测试集的groundtruth的分类与定位的json文件
annotation_dir = os.path.join(dataset_dir,"Annotations")
image_dir = os.path
for image_id in image_ids:
xml_path = os.path.join(annotation_dir, image_id+".xml")
image_path = os.path.join(image_dir, image_id+ext)
image = cv2.imread(image_path)
h,w,c = np.shape(image)
image_array.append({'file_name': image_id+ext, 'id': img_cnt,'width':w,'height':h})
if is_contain_object(xml_path):
objects = parse_xml(xml_path)
for obj in objects:
cls_name,xmin,ymin,xmax,ymax = obj
w = int(xmax)-int(xmin)
h = int(ymax)-int(ymin)
annotation_array.append({'image_id':img_cnt,
'iscrowd':0,
'bbox':[int(xmin),int(ymin),w,h],
'area':int(w*h),
"category_id":cls2num_dict[cls_name],
'id':anno_cnt})
anno_cnt += 1
img_cnt += 1
gt_result['images'] = image_array
gt_result["annotations"] = annotation_array
gt_result["categories"] = [{"id":id,"name":cls_name} for cls_name,id in cls2num_dict.items()]
gt_result_json_data = json.dumps(gt_result,indent=4,separators=(',', ': '), cls=NpEncoder)
print(gt_result_json_data)
f.write(gt_result_json_data)
print("Test Dataset GroundTruth Result Conversion Completed!")
if __name__ == '__main__':
run_main() | StarcoderdataPython |
3365821 | """Neural style transfer (https://arxiv.org/abs/1508.06576) in PyTorch."""
from pathlib import Path
srgb_profile = (Path(__file__).resolve().parent / 'sRGB_Profile.icc').read_bytes()
del Path
from .style_transfer import STIterate, StyleTransfer
from .web_interface import WebInterface
from .cli import * | StarcoderdataPython |
3502699 | from collections import Counter
from util import read_puzzle_input
def num_questions_answered_by_group(group_input):
questions_only = group_input.replace(" ", "").replace("\n", "")
question_counts = Counter(questions_only)
return len(question_counts)
def sum_questions_answered_by_group(puzzle_input):
group_inputs = puzzle_input.split("\n\n")
return sum(num_questions_answered_by_group(gi) for gi in group_inputs)
def num_questions_answered_by_whole_group(group_input):
individual_questions_answered = [
set(questions) for questions in group_input.split("\n") if questions != ""
]
return len(set.intersection(*individual_questions_answered))
def sum_questions_answered_by_whole_group(puzzle_input):
group_inputs = puzzle_input.split("\n\n")
return sum(num_questions_answered_by_whole_group(gi) for gi in group_inputs)
if __name__ == "__main__":
puzzle_input = read_puzzle_input()
print(f"Part 1: {sum_questions_answered_by_group(puzzle_input)}")
print(f"Part 2: {sum_questions_answered_by_whole_group(puzzle_input)}")
| StarcoderdataPython |
3245069 | import re,os
import xml.etree.ElementTree as ET
from .Webby import Webby
from .Common import *
class Harvester(object):
def __init__(self,verbosity):
self.webbies = set()
self.verbosity = verbosity
def harvest_nessus_dir(self,nessus_dir):
for dirpath,directories,files in os.walk(nessus_dir):
for filename in [f for f in files if f.endswith('.nessus')]:
self.harvest_nessus(os.path.join(dirpath,filename))
def harvest_nessus(self,nessus_file):
if self.verbosity:
print_info("Harvesting Nessus file '{fname}'".format(fname=nessus_file))
tree = ET.parse(nessus_file)
root = tree.getroot()
for host in root.iter('ReportHost'):
try:
hostname = host.find('./HostProperties/*[@name="host-fqdn"]').text
except AttributeError:
hostname = ""
try:
ip = host.find('./HostProperties/*[@name="host-ip"]').text
except AttributeError:
ip = host.attrib['name']
for tcp_item in host.findall('./ReportItem[@pluginID="10335"]'):
if re.search(r'(www|htt|web)',tcp_item.attrib['svc_name'],re.I):
self.webbies.add((ip,hostname,tcp_item.attrib['port']))
svc_names = ['www','https?','http?','http_proxy','http','https']
for svc_name in svc_names:
for www in host.findall('./ReportItem[@svc_name="%s"]' % svc_name):
self.webbies.add((ip,hostname,www.attrib['port']))
def harvest_gnmap_dir(self,gnmap_dir):
for dirpath,directories,files in os.walk(gnmap_dir):
for filename in [f for f in files if f.endswith('.gnmap')]:
self.harvest_gnmap(os.path.join(dirpath,filename))
def harvest_gnmap(self,gnmap_file):
if self.verbosity:
print_info("Harvesting gnmap file {fname}".format(fname=gnmap_file))
lineRE = re.compile(r'Host:\s+(?P<ip>([0-9]{1,3}\.?){4})\s+\((?P<host>[a-z0-9\._\-]*)\)\s+Ports:\s+(?P<ports>.*?)$',re.I)
portsRE = re.compile(r'(?P<port>[0-9]+)/+open/+tcp/+[a-z\-0-9]*http[^/]*',re.I)
for line in filter(None,open(gnmap_file).read().split('\n')):
x = lineRE.search(line)
if x:
openPorts = portsRE.findall(x.group('ports'))
host = x.group('host') if x.group('host') else ""
ip= x.group('ip') if x.group('ip') else ""
if len(openPorts) > 0 and (ip or host):
for port in openPorts:
self.webbies.add((ip,host,port))
return self.webbies
def harvest_IL(self,IL_file):
if self.verbosity:
print_info("Harvesting generic input file {fname}".format(fname=IL_file))
urlRE =re.compile(r'(?P<proto>.*?)://(?P<host>.*?):(?P<port>[0-9]+)')
ipportRE = re.compile(r'(?P<host>.*?):(?P<port>[0-9]+)')
for i,line in enumerate(filter(None,open(IL_file).read().split('\n'))):
x = urlRE.search(line)
host = ""
port = ""
if x:
host = x.group('host')
port = x.group('port')
else:
x = ipportRE.search(line)
if x:
host = x.group('host')
port = x.group('port')
if host and port:
if re.search('[a-zA-Z]',host):
self.webbies.add(("",host,port))
else:
self.webbies.add((host,"",port))
else:
print_error("Error reading host from line {0}".format(i))
| StarcoderdataPython |
4849776 | <filename>EventDec/event_dec/test/test_model.py
import unittest
from unittest.mock import patch
import numpy as np
import event_dec
from event_dec.model import Model
class ModelTests(unittest.TestCase):
@patch.object(event_dec.main.process_input, "input", create=True)
def test_predict(self, input):
"""
Test return type and edge cases
"""
input.return_value = input_attend = np.array([[0, 1, 4]])
input.return_value = input_notattend = np.array([[1, 0, 0]])
model = Model()
self.assertEqual(np.ndarray, type(model.predict(input=input_attend)), "Returns np ndarray")
self.assertIsNotNone(model.predict(input=input_notattend), "Returns not None")
self.assertIn(model.predict(input=input_attend), [0,1], "Returns either 0 or 1")
expected_attend = 1
actual_attend = model.predict(input=input_attend)
self.assertEqual(expected_attend, actual_attend, "Attend event")
expected_notattend = 0
actual_notattend = model.predict(input=input_notattend)
self.assertEqual(expected_notattend, actual_notattend, "Not attend event")
def main():
unittest.main()
if __name__ == "__main__":
main()
| StarcoderdataPython |
5036856 | <reponame>AIJIJI/devtools
#!/bin/python3
from setuptools import setup
from setuptools import find_packages
NAME = "devtools"
PACKAGES = [NAME] + ["%s.%s" % (NAME, i) for i in find_packages(NAME)]
LONG_DESC = '''Some useful helper-funcs for devpers.
Sub-package is the extension of corresponding package with the same name.
'''
setup(
name='AIJIdevtools',
version='1.4.5',
author='AIJI',
author_email='<EMAIL>',
description='Some useful helper-funcs for devpers',
long_description=LONG_DESC,
packages=PACKAGES,
install_requires=[
'sh',
'sqlparse',
'termcolor',
'pyJWT',
'requests',
],
url='https://github.com/AIJIJI/devtools',
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Development Status :: 3 - Alpha"
]
)
| StarcoderdataPython |
372672 | import argh
import argparse
from lithium.manage.commands.services import new
from lithium.manage.commands.clients import generate
from lithium.manage.commands.users import import_data
parser = argh.ArghParser()
parser.add_commands([new], namespace='service', title='Services related commands')
parser.add_commands([generate], namespace='client', title='Clients related commands')
parser.add_commands([import_data], namespace='user', title='Users related commands')
def main():
parser.dispatch()
if __name__ == "__main__":
main()
| StarcoderdataPython |
6425374 | <reponame>fomartin/GeneticAlgorithm<gh_stars>0
from Habitat import *
from Organism import *
class PowerLawHabitat(Habitat):
def _calculate_for_organism(self, organism, set_of_parameters):
if len(organism.genes()) - 1 != len(set_of_parameters):
print("[ERROR] Power Law formula requires one more gene than parameters available." +
"Organism: " + str(organism) +
"Number of Genes: " + str(len(organism.genes())) +
"Number of Parameters " + str(len(set_of_parameters)))
return []
result = organism.genes()[0]
for index in range(0, len(set_of_parameters)):
result = result * (set_of_parameters[index] ** organism.genes()[index + 1])
return result
| StarcoderdataPython |
3287239 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from subprocess import check_output
from contextlib import contextmanager
import re
@contextmanager
def rewrite(fname):
"""Work with lines in-place (read, yield, write)."""
with open(fname, 'r') as f: lines = [line for line in f]
yield lines
with open(fname, 'w') as f: f.write("".join(lines))
# Clean dir
out = check_output(["find", ".", "-type", "f",
"!", "-name", "*.tex", "-name", "bib.*", "-delete"])
# Extract refs
out = check_output(["reference_strip","bib.tex",
"/Users/pataan/Dropbox/DPhil/Refs/references.bib",
"localrefs.bib"])
# Gen .bcf file
out = check_output(["make4ht", "-u", "bib.tex"])
# Gen .bbl file
out = check_output(["biber", "bib.bcf"])
# Compile
out = check_output(["make4ht", "-u", "bib.tex"])
# Insert space between journal and number
with rewrite("bib.html") as lines:
for i, line in enumerate(lines):
lines[i] = re.sub(r"</span>(\d)", r"</span> \1", line)
# Convert html->rst
out = check_output(["pandoc", "-o", "bib.rst", "bib.html"])
# RST post-processing
with rewrite("bib.rst") as lines:
for i, line in enumerate(lines):
# Remove "` <bib.html>`__\ " crap
line = line.replace(r"` <bib.html>`__\ ", "")
# Convert opening and closing "
line = line.replace(r"“", '"')
line = line.replace(r"”", '"')
# Convert .. [ref] to [ref]_
line = re.sub(r"^\s*(\[\w+\])", r".. \1",line)
# Write
lines[i] = line
#---------------------------------
| StarcoderdataPython |
337482 | """Methods used for generating the symmetry group. All of these
methods were adapted to python from their original fortran
implementations which can be found at:
https://github.com/msg-byu/symlib
"""
from copy import deepcopy
import numpy
import math
def get_concs_for_size(size,nspecies,res_concs,nB,concs):
"""Gets the concentration ranges for the atoms within the cells of
certain sizes given the constraints provided such as
concentration restrictions and the presence of arrows. Code
rewritten from the get_conetration_list subroutine of:
https://github.com/msg-byu/enumlib/blob/master/src/derivative_structure_generator.f90
Args:
size (int): The cell size in integer form
nspecies (int): the number of atomic species in the system
nB (int): the number of basis vectors being used
res_concs (bool): a logical that indicates of the concentrations
are being restricted
concs (list of int): A 2D integer array that contains the concentration
ranges for each atom in the system
Returns:
c_list (array-like): The allowed concentration ranges.
"""
eps = 1E-10
from itertools import product
if res_concs == True:
denom = concs[0][2]
vol_table = []
for atom in concs:
minc = min(atom[0:2])
maxc = max(atom[0:2])
vol_table.append([int(math.floor(float(minc)/denom*size*nB)),int(math.ceil(float(maxc)/denom*size*nB)),size*nB])
n = vol_table[0][2]
digit = [vol_table[i][1]-vol_table[i][0] for i in range(len(vol_table))]
dig_cnt = [0*i for i in range(len(vol_table))]
k = len(vol_table)
label = []
minv = []
maxv = []
for i in range(k):
label.append(list(range(vol_table[i][0],vol_table[i][1]+1)))
minv.append(float(min([concs[i][0],concs[i][1]]))/concs[i][2])
maxv.append(float(max([concs[i][0],concs[i][1]]))/concs[i][2])
a = [label[i][0] for i in range(len(label))]
c_list = []
done = False
while done == False:
if sum(a) == n:
conc = []
len_a = len(a)
for i in range(len_a):
conc.append(a[i]/float(n))
if not ((any(conc[i] < (minv[i]-eps) for i in range(len(minv)))) or (any(conc[i] > (maxv[i]+eps) for i in range(len(maxv))))):
c_list.append(deepcopy(a))
j = k-1
done2 = False
while done2 == False:
if dig_cnt[j] != digit[j]:
done2 = True
break
a[j] = label[j][0]
dig_cnt[j] = 0
j -= 1
if j < 0:
done2 = True
break
if j < 0:
done = True
break
dig_cnt[j] += 1
a[j] = label[j][dig_cnt[j]]
else:
c_list = []
crange = list(range(0,(size*nB)+1))
aranges = []
for i in range(nspecies):
aranges.append(crange)
for p in product(*aranges):
if sum(p) == size*nB:
c_list.append(list(p))
return(c_list)
def _does_mapping_exist(v,this_type,atom_pos,atomType,eps):
"""Checks to see if a mapping exists between the vector v and the
position of any of the atoms of type "this_type". If a mapping
exists, then the logical "mapped" is returned .true., otherwise
.false.
Args:
v (list of float): Array of the position to check mapping for
this_type (int): Integer that indicates which type of atom that is
being checked.
atom_pos (array-like): 2D array of the positions of the basis
atoms.
atomType (list of int): Array of integers of the types of atoms in the
basis.
eps (float): Epsilon for checking equivalence.
Returns:
mapped (bool): True if mapping exists.
"""
mapped = False
for i, a_type in enumerate(atomType):
if a_type == this_type:
# if the coordinates are the same,
# their difference will be zero for every component
this_position = atom_pos[i]
if(numpy.allclose(v, this_position, rtol=0,atol=eps)):
mapped = True
break
return mapped
def _get_transformations(par_lat):
"""This routine generates the matrices for converting vectors from
lattice coordinates to cartesion coordinates and vice versa.
Args:
par_lat (array-like): A 2D array that contains the parent
lattice vectors
Returns:
prim_to_cart (numpy ndarray): The matrix that transforms from lattice to
cartesian coordinates.
cart_to_prim (numpy ndarray): The matrix that tranforms form cartesian to
lattice coordinates.
"""
prim_to_cart = numpy.transpose(deepcopy(par_lat))
cart_to_prim = numpy.linalg.inv(numpy.array(prim_to_cart))
return(prim_to_cart,cart_to_prim)
def bring_into_cell(vec,cart_to_latt,latt_to_cart,eps):
"""This subroutine translates a point back into the unit cell in
lattice coordinates, the coefficients of the point must all be
less than one and at least zero.
Args:
vec (list of int): The atom's position vector
cart_to_latt (numpy ndarray): The matrix that transforms from cartesian
to lattice coordinates
latt_to_cart (numpy ndarray): The matrix that transforms form lattice to
cartesian coordinates
eps (float): Finite precision tolerance.
Returns:
vec (list of float): The vector brought back into the cell.
"""
from numpy import matmul
# Put the representation of the point into lattice coordinates
vec = matmul(cart_to_latt,vec).tolist()
# counter to catch compiler bug
c = 0
maxc = max(math.ceil(abs(max(vec))),math.ceil(abs(min(vec))))*2
# If a component >= 1, translate by subtracting a lattice vector
# If a component < 0, translate by adding a lattice vector
while any(i > 1.0-eps for i in vec) or any(i < 0.0-eps for i in vec):
c = c +1
for i, v in enumerate(vec):
if v >= 1.0-eps:
vec[i] -= 1
elif v < 0.0-eps:
vec[i] += 1
if (c>maxc): #pragma: no cover
print("ERROR: loop does not end in bring_into_cell. Probably compiler bug.")
exit()
# Put the point back into cartesion coordinate representation
vec = matmul(latt_to_cart,vec).tolist()
return vec
def get_lattice_pointGroup(a_vecs, eps=1E-10):
"""This routine returns only the point group of the lattite rather
than the space group of the given crystal structure.
Args:
a_vecs (array-like): The 2D array that contains the parent lattice
vectors as row vectors.
eps (float, optional): Finite precision tolerance
Returns:
lattpg_op (array-like): The point group for the lattice in
cartesian coordinates.
"""
inverse_avecs = numpy.linalg.inv(numpy.array(a_vecs))
# Store the norms of the three lattice vectors
norm_avecs = []
for i in range(3):
norm_avecs.append(numpy.linalg.norm(a_vecs[i]).tolist())
# Decide how many lattice points to look in each direction to get all the
# points in a sphere that contains all of the longest _primitive_ vectors
cell_volume = abs(numpy.dot(a_vecs[0],numpy.cross(a_vecs[1],a_vecs[2])))
max_norm = max([numpy.linalg.norm(i) for i in a_vecs])
n1 = math.ceil(max_norm*numpy.linalg.norm(numpy.cross(a_vecs[1],a_vecs[2])/cell_volume+eps))
n2 = math.ceil(max_norm*numpy.linalg.norm(numpy.cross(a_vecs[2],a_vecs[0])/cell_volume+eps))
n3 = math.ceil(max_norm*numpy.linalg.norm(numpy.cross(a_vecs[0],a_vecs[1])/cell_volume+eps))
r_vecs = []
r_lengths = []
a_vecs = numpy.array(a_vecs)
# Store the R vectors that lie within the sphere
num_rs = 0
for i in range(-int(round(n1)), int(round(n1))+1):
for j in range(-int(round(n2)), int(round(n2))+1):
for k in range(-int(round(n3)), int(round(n3))+1):
this_vector = i*a_vecs[0] + j*a_vecs[1] + k*a_vecs[2]
length = numpy.linalg.norm(this_vector)
if (length > max_norm + eps):
continue # This vector is outside sphere
num_rs += 1
r_vecs.append(this_vector.tolist())
r_lengths.append(length)
# Try all R vector triplets in the sphere and see which ones are valid
# rotations of the original basis vectors.
#
# The length of all vectors must be preserved under a unitary
# transformation so skip any trial vectors that aren't the same
# length as the original. We also skip any set of vectors that
# have the right lengths but do not form a parallelpiped that has
# the same volume as the original set. Also, note that the we skip
# sets of vectors that contain the same vector more than once
# (i.e. the indices i, j, k must be unique).
num_ops = 0
lattpg_op = []
from itertools import permutations
for i,j,k in permutations(range(num_rs),3):
if (abs(r_lengths[i] - norm_avecs[0]) > eps) or (abs(r_lengths[j] - norm_avecs[1]) > eps) or (abs(r_lengths[k] - norm_avecs[2]) > eps) or (abs(cell_volume - abs(numpy.linalg.det([r_vecs[i],r_vecs[j],r_vecs[k]]))) > eps):
continue
# Form the new set of "rotated" basis vectors
new_vectors = [r_vecs[i],r_vecs[j],r_vecs[k]]
# If the transformation matrix that takes the original set to the new set is
# an orthogonal matrix then this rotation is a point symmetry of the lattice.
rotation_matrix = numpy.matmul(inverse_avecs,new_vectors)
# Check orthogonality of rotation matrix by [R][R]^T = [1]
test_matrix = numpy.matmul(rotation_matrix,numpy.transpose(rotation_matrix))
if (numpy.allclose(test_matrix, [[1,0,0],[0,1,0],[0,0,1]], rtol=0,atol=eps)): # Found valid rotation
num_ops += 1 # Count number of rotations
lattpg_op.append(rotation_matrix.tolist())
return(lattpg_op)
def get_spaceGroup(par_lat,atomType,bas_vecs,eps=1E-10,lattcoords = False):
"""This routine takes a crystal structure (basis vectors and basis
atoms and positions) and returns the point operators and
fractional translations of the space group. The routine assumes
that the given crystal structure is already primitive.
Args:
par_lat (array-like): A 2D array that contains the parent lattice vectors.
atomType (list of int): Integer array representing the type of each basis atom.
bas_vecs (array-like): A 2D array that contains the basis vectors for the cell.
eps (float, optional): Finite precisions tolerance.
lattcoords (bool, optional): True if vectors are in lattice coordinates
rather than cartesian.
Returns:
(sg_ops, sg_fracts) (array-like, array-like): The rotation and mirror operations of
the space group, and the translation operations of the space group.
"""
# Get number of atoms in the basis
n_atoms = len(atomType)
# save original atomic input positions
atom_pos = deepcopy(bas_vecs)
# A vector can be represented as either a set of cartesian coordi-
# nates or as a linear combination of primitive lattice vectors
# Get transformation matrices to take us back and forth
(latt_to_cart,cart_to_latt) = _get_transformations(par_lat)
# If we're in lattice coordinates Convert the position of the
# basis atoms from lattice coordinates.
if lattcoords:
for i in range(n_atoms):
atom_pos[i] = numpy.matmul(latt_to_cart,atom_pos[i]).tolist()
# bring all the basis atoms into the unit cell
for i, a_pos in enumerate(atom_pos):
atom_pos[i] = bring_into_cell(a_pos,cart_to_latt,latt_to_cart,eps)
# Now find the point group
lattpg_op = get_lattice_pointGroup(par_lat,eps=eps)
# **** Find the elements of the space group ****
# Count the elements
sgop_count = 0
sg_ops = []
sg_fracts = []
# Apply each of the point operators in the point group to the crystal
for op in lattpg_op:
# rotate atom 1 and store its position in the vector v
v = numpy.matmul(op,atom_pos[0])
# Loop over all possible fractional translations
for jAtom in range(n_atoms):
if (atomType[jAtom] != atomType[0]):
continue #pragma: no cover
fract = [atom_pos[jAtom][i] - v[i] for i in range(3)]
fract = bring_into_cell(fract, cart_to_latt, latt_to_cart, eps)
# Is each atom of every type mapped by this rotation + translation?
for kAtom in range(n_atoms):
this_type = atomType[kAtom]
# Rotate and translate each atom
v2 = numpy.matmul(op,atom_pos[kAtom])
v2 = [v2[i] + fract[i] for i in range(3)]
v2 = bring_into_cell(v2, cart_to_latt, latt_to_cart, eps)
# Try to map this rotated atom onto another the same type
mapped = _does_mapping_exist(v2, this_type, atom_pos, atomType, eps)
if not mapped:
break # no mapping for this atom
# if all atoms mapped successfully then count this
# element (rotation + translation)
if mapped:
sgop_count += 1 # Count the number of elements
sg_fracts.append(fract) # Save the translational part
sg_ops.append(op) # Store the rotational part
# loop over fractional translations and try next op
# By removing the preceding exit, we include fractional translations
# for non-primitive lattices. (GLWH 10/26/2009)
return(sg_ops,sg_fracts)
| StarcoderdataPython |
6630500 | <filename>examples/hello.py
from concurrence import dispatch
def hello():
print "Hello World!"
if __name__ == '__main__':
dispatch(hello)
| StarcoderdataPython |
4925120 | <filename>ttt_tests/optimal_ai_move_test.py<gh_stars>0
import tictactoe as ttt
import numpy as np
board = np.array([
['e', 'r', 't'],
['d', 'f', 'g'],
['c', 'v', 'b']
])
available = ttt.find_available_moves(board)
metric_dict = ttt.optimal_ai_move(board=board, x_or_o='x', available=available)
print(metric_dict)
# Finally, a clear demonstration that going in the middle wins more often than other locations.
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.