id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
11240007 | """
test_strange_headers.py
Copyright 2012 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import time
import unittest
import w3af.core.data.kb.knowledge_base as kb
from w3af.core.data.url.HTTPResponse import HTTPResponse
from w3af.core.data.request.fuzzable_request import FuzzableRequest
from w3af.core.data.parsers.doc.url import URL
from w3af.core.data.dc.headers import Headers
from w3af.core.controllers.misc.temp_dir import create_temp_dir
from w3af.plugins.grep.strange_headers import strange_headers
class TestStrangeHeaders(unittest.TestCase):
def setUp(self):
create_temp_dir()
kb.kb.cleanup()
self.plugin = strange_headers()
def tearDown(self):
self.plugin.end()
def test_strange_headers_positive(self):
body = 'Hello world'
url = URL('http://www.w3af.com/')
headers = Headers([('content-type', 'text/html'),
('hello-world', 'yes!')])
request = FuzzableRequest(url, method='GET')
resp_positive = HTTPResponse(200, body, headers, url, url, _id=1)
self.plugin.grep(request, resp_positive)
info_sets = kb.kb.get('strange_headers', 'strange_headers')
self.assertEquals(len(info_sets), 1)
info = info_sets[0]
expected_desc = (u'The remote web server sent 1 HTTP responses with'
u' the uncommon response header "hello-world", one'
u' of the received header values is "yes!". The'
u' first ten URLs which sent the uncommon header'
u' are:\n - http://www.w3af.com/\n')
self.assertEqual(info.get_name(), 'Strange header')
self.assertEqual(info.get_url(), url)
self.assertEqual(info.get_desc(), expected_desc)
def test_strange_headers_timing(self):
body = 'Hello world'
url = URL('http://www.w3af.com/')
headers = Headers([('content-type', 'text/html'),
('hello-world', 'yes!')])
request = FuzzableRequest(url, method='GET')
resp_positive = HTTPResponse(200, body, headers, url, url, _id=1)
start = time.time()
for _ in xrange(5):
self.plugin.grep(request, resp_positive)
spent = time.time() - start
# print('Profiling run in %s seconds' % spent)
def test_strange_headers_no_group(self):
body = 'Hello world'
url_1 = URL('http://www.w3af.com/1')
headers_1 = Headers([('content-type', 'text/html'),
('hello-world', 'yes!')])
request_1 = FuzzableRequest(url_1, method='GET')
resp_1 = HTTPResponse(200, body, headers_1, url_1, url_1, _id=1)
self.plugin.grep(request_1, resp_1)
url_2 = URL('http://www.w3af.com/2')
headers_2 = Headers([('content-type', 'text/html'),
('bye-bye', 'chau')])
request_2 = FuzzableRequest(url_2, method='GET')
resp_2 = HTTPResponse(200, body, headers_2, url_2, url_2, _id=2)
self.plugin.grep(request_2, resp_2)
info_sets = kb.kb.get('strange_headers', 'strange_headers')
self.assertEquals(len(info_sets), 2)
def test_strange_headers_group(self):
body = 'Hello world'
url_1 = URL('http://www.w3af.com/1')
headers_1 = Headers([('content-type', 'text/html'),
('hello-world', 'yes!')])
request_1 = FuzzableRequest(url_1, method='GET')
resp_1 = HTTPResponse(200, body, headers_1, url_1, url_1, _id=1)
self.plugin.grep(request_1, resp_1)
url_2 = URL('http://www.w3af.com/2')
headers_2 = Headers([('content-type', 'text/html'),
('hello-world', 'nope')])
request_2 = FuzzableRequest(url_2, method='GET')
resp_2 = HTTPResponse(200, body, headers_2, url_2, url_2, _id=2)
self.plugin.grep(request_2, resp_2)
info_sets = kb.kb.get('strange_headers', 'strange_headers')
self.assertEquals(len(info_sets), 1)
def test_strange_headers_negative(self):
body = 'Hello world'
url = URL('http://www.w3af.com/')
headers = Headers([('content-type', 'text/html'),
('x-pad', 'yes!')])
request = FuzzableRequest(url, method='GET')
resp_positive = HTTPResponse(200, body, headers, url, url, _id=1)
self.plugin.grep(request, resp_positive)
infos = kb.kb.get('strange_headers', 'strange_headers')
self.assertEquals(len(infos), 0)
| StarcoderdataPython |
6494978 | # +
import theforce.cl as cline
from theforce.calculator.active import FilterDeltas
from theforce.util.aseutil import init_velocities, make_cell_upper_triangular
from ase.md.npt import NPT
from ase.md.langevin import Langevin
from ase.io import read
from ase import units
import numpy as np
import os
def md(atoms, dynamics='NPT', dt=None, tem=300., picos=100, bulk_modulus=None, stress=0., mask=None, iso=False,
trajectory='md.traj', loginterval=1, append=False, rattle=0.0, tdamp=25, pdamp=100, friction=1e-3,
ml_filter=0.8, eps_pos=0.05, eps_cell=0.05):
"""
atoms: ASE atoms
dynamics: 'NPT'
dt: time-step in fs
tem: temperature in Kelvin
picos: pico-seconds for md
bulk_modulus: bulk_modulus for NPT simulations. if None, NVT is performed
stress: external stress (GPa) for NPT
mask: see ase.npt.NPT
iso: if True, keep the shape constant
trajectory: traj file name
loginterval: for traj file
append: append to traj file
rattle: rattle atoms at initial step (recommended ~0.05)
tdamp: temperature damping time (fs)
pdamp: pressure damping time (fs)
friction: for Langevin dynamics
ml_filter: filters force discontinuities due to ML updates range(0, 1)
"""
calc = cline.gen_active_calc()
atoms.set_calculator(calc)
if calc.active:
manual_steps(atoms, eps_pos, eps_cell, npt=bulk_modulus)
atoms.rattle(rattle, rng=np.random)
Ts = get_temperatures(tem)
if calc.rank == 0:
print(f'MD temperatures: {Ts}')
init_velocities(atoms, Ts[0])
atoms.get_potential_energy()
if calc.deltas:
calc.results.clear()
if dt is None:
if (atoms.numbers == 1).any():
dt = 0.25
else:
dt = 1.
if ml_filter:
md_atoms = FilterDeltas(atoms, shrink=ml_filter)
else:
md_atoms = atoms
for T in Ts:
if dynamics.upper() == 'NPT':
dyn = npt_dynamics(md_atoms, dt, T, bulk_modulus, stress, mask, iso,
trajectory, loginterval, append, tdamp, pdamp)
elif dynamics.upper() == 'LANGEVIN':
dyn = langevin_dynamics(md_atoms, dt, T, friction, trajectory,
loginterval, append)
if calc.meta is not None:
dyn.attach(calc.meta.update)
steps = int(picos*1000/dt) if picos > 0 else -picos
dyn.run(steps)
append = True
def get_temperatures(tem):
if hasattr(tem, '__iter__'):
return tem
else:
return [tem]
def langevin_dynamics(atoms, dt, tem, friction, trajectory, loginterval, append):
dyn = Langevin(atoms, dt*units.fs, temperature_K=tem, friction=friction, rng=np.random,
trajectory=trajectory, append_trajectory=append, loginterval=loginterval)
return dyn
def npt_dynamics(atoms, dt, tem, bulk_modulus, stress, mask, iso, trajectory, loginterval,
append, tdamp, pdamp):
ttime = tdamp*units.fs
ptime = pdamp*units.fs
if bulk_modulus:
pfactor = (ptime**2)*bulk_modulus*units.GPa
else:
pfactor = None
configure_cell(atoms)
dyn = NPT(atoms, dt*units.fs, temperature_K=tem, externalstress=stress*units.GPa,
ttime=ttime, pfactor=pfactor, mask=mask, trajectory=trajectory,
append_trajectory=append, loginterval=loginterval)
if iso:
dyn.set_fraction_traceless(0.)
return dyn
def configure_cell(atoms):
if np.allclose(atoms.cell, 0.):
atoms.center(vacuum=6.)
make_cell_upper_triangular(atoms)
def manual_steps(atoms, eps, eps2, npt=False):
calc = atoms.calc
calc._logpref = '#'
calc.log('manual steps:')
calc.log(f'rattle: {eps}')
positions = atoms.positions.copy()
if eps > 0.:
atoms.rattle(eps, rng=np.random)
atoms.get_potential_energy()
if npt and eps2 > 0.:
cell = atoms.cell.copy()
calc.log(f'expand: {(1.+eps2)}*cell')
atoms.set_cell((1.+eps2)*cell, scale_atoms=True)
atoms.get_potential_energy()
calc.log(f'shrink: {(1.-eps2)}*cell')
atoms.set_cell((1.-eps2)*cell, scale_atoms=True)
atoms.get_potential_energy()
atoms.set_cell(cell, scale_atoms=True)
atoms.positions = positions
calc._logpref = ''
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Machine Learning Molecular Dynamics (MLMD)')
parser.add_argument('-i', '--input', default='POSCAR', type=str,
help='the initial coordinates of atoms, POSCAR, xyz, cif, etc.')
parser.add_argument('-o', '--output', default='CONTCAR', type=str,
help='the final coordinates of atoms')
args = parser.parse_args()
if args.input.endswith('.traj'):
atoms = read(args.input, -1)
else:
atoms = read(args.input)
kwargs = cline.get_default_args(md)
cline.update_args(kwargs)
md(atoms, **kwargs)
try:
atoms.write(args.output)
except:
import warnings
alt = 'md.final.xyz'
msg = f'writing to {args.output} failed -> wrote {alt}'
warnings.warn(msg)
atoms.write(alt)
| StarcoderdataPython |
341370 | import doctest
import unittest
import luigi.task
import luigi
from datetime import datetime, timedelta
class DummyTask(luigi.Task):
param = luigi.Parameter()
bool_param = luigi.BooleanParameter()
int_param = luigi.IntParameter()
float_param = luigi.FloatParameter()
date_param = luigi.DateParameter()
datehour_param = luigi.DateHourParameter()
timedelta_param = luigi.TimeDeltaParameter()
list_param = luigi.Parameter(is_list=True)
class TaskTest(unittest.TestCase):
def test_tasks_doctest(self):
doctest.testmod(luigi.task)
def test_task_to_str_to_task(self):
params = dict(
param='test',
bool_param=True,
int_param=666,
float_param=123.456,
date_param=datetime(2014, 9, 13).date(),
datehour_param=datetime(2014, 9, 13, 9),
timedelta_param=timedelta(44), # doesn't support seconds
list_param=['in', 'flames'])
original = DummyTask(**params)
other = DummyTask.from_str_params(original.to_str_params(), {})
self.assertEqual(original, other)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1710432 | for x in range(65,70):
for y in range(65,x+1):
print(chr(y),end='')
print()
"""
# p[attern
A
AB
ABC
ABCD
ABCDE
""" | StarcoderdataPython |
8102178 | <gh_stars>0
import sys
sys.stdin = open('11048.txt')
from collections import deque
N,M = map(int, input().split())
miro = [[0 for _ in range(M+1)]]+[[0]+list(map(int, input().split())) for _ in range(N)]
dx = [1,0,1]
dy = [0,1,1]
candy = [[0 for _ in range(M+1)] for _ in range(N+1)]
for i in range(1,N+1):
for j in range(1,M+1):
candy[i][j] = max(candy[i-1][j]+miro[i][j], candy[i][j-1]+miro[i][j])
print(candy[N][M]) | StarcoderdataPython |
3481808 | <gh_stars>10-100
import os
import sys
import time
import subprocess
import concurrent.futures
from tempfile import mkstemp
from luastyle.indenter import IndentRule, IndentOptions
class BytecodeException(Exception):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(BytecodeException, self).__init__(message)
class Configuration:
def load(self, filepath):
with open(filepath) as json_data_file:
content = json_data_file.read()
options = IndentOptions.from_json(content)
return options
def generate_default(self, filepath):
with open(filepath, 'w') as json_data_file:
json_data_file.write(IndentOptions().to_json())
print('Config. file generated in: ' + os.path.abspath(filepath))
class FilesProcessor:
def __init__(self, rewrite, jobs, check_bytecode, indent_options, verbose):
self._rewrite = rewrite
self._jobs = jobs
self._check_bytecode = check_bytecode
self._indent_options = indent_options
self.verbose = verbose
def _process_one(self, filepath):
"""Process one file.
"""
with open(filepath) as file:
rule_input = file.read()
rule_output = IndentRule(self._indent_options).apply(rule_input)
if self._check_bytecode:
bytecode_equal = check_lua_bytecode(rule_input, rule_output)
else:
bytecode_equal = True
if bytecode_equal:
if self._rewrite:
f = open(filepath, 'r+')
f.seek(0)
f.write(rule_output)
f.truncate()
f.close()
else:
print(rule_output)
return bytecode_equal, len(rule_output.split('\n'))
def run(self, files):
if self.verbose:
print(str(len(files)) + ' file(s) to process')
processed = 0
if self.verbose:
print('[' + str(processed) + '/' + str(len(files)) + '] file(s) processed')
# some stats
start = time.time()
total_lines = 0
# We can use a with statement to ensure threads are cleaned up promptly
with concurrent.futures.ProcessPoolExecutor(max_workers=self._jobs) as executor:
# Start process operations and mark each future with its filename
future_to_file = {executor.submit(self._process_one, file): file for file in files}
for future in concurrent.futures.as_completed(future_to_file):
file = future_to_file[future]
try:
success, n_lines = future.result()
total_lines += n_lines
if not success:
raise BytecodeException('bytecode differs')
except Exception as exc:
print('%r generated an exception: %s' % (file, exc))
else:
processed += 1
if self.verbose:
print('[' + str(processed) + '/' + str(len(files)) + '] file(s) processed, last is ' + file)
sys.stdout.flush()
end = time.time()
if self.verbose:
print(str(total_lines) + ' source lines processed in ' + str(round(end - start, 2)) + ' s')
def check_lua_bytecode(raw, formatted):
if os.name == 'nt':
raise NotImplementedError('check_lua_bytecode not supported on windows')
else:
fd, raw_path = mkstemp()
fd, formatted_path = mkstemp()
fd, raw_bytecode = mkstemp()
fd, formatted_bytecode = mkstemp()
# create raw file
with open(raw_path, 'w') as file:
file.write(raw)
# create formatted file
with open(formatted_path, 'w') as file:
file.write(formatted)
# compile files, strip
if 'LUAC' in os.environ:
luac = '$LUAC'
else:
luac = 'luac'
os.system(luac + ' -s -o ' + raw_bytecode + ' ' + raw_path)
os.system(luac + ' -s -o ' + formatted_bytecode + ' ' + formatted_path)
# check diff
# This command could have multiple commands separated by a new line \n
some_command = 'diff ' + raw_bytecode + ' ' + formatted_bytecode
p = subprocess.Popen(some_command, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
# This makes the wait possible
p_status = p.wait()
output = output.decode("utf-8")
bytecode_equal = (p_status == 0) and (output == "")
# cleanup
os.remove(raw_path)
os.remove(formatted_path)
os.remove(raw_bytecode)
os.remove(formatted_bytecode)
return bytecode_equal
| StarcoderdataPython |
9650851 | <reponame>jia-yi-chen/multimodal-deep-learning
import numpy as np
import random
import torch
import torch.nn as nn
from torch.autograd import Function
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
from transformers import BertModel, BertConfig
from utils import to_gpu
from utils import ReverseLayerF
def masked_mean(tensor, mask, dim):
"""Finding the mean along dim"""
masked = torch.mul(tensor, mask)
return masked.sum(dim=dim) / mask.sum(dim=dim)
def masked_max(tensor, mask, dim):
"""Finding the max along dim"""
masked = torch.mul(tensor, mask)
neg_inf = torch.zeros_like(tensor)
neg_inf[~mask] = -math.inf
return (masked + neg_inf).max(dim=dim)
# let's define a simple model that can deal with multimodal variable length sequence
class MISA(nn.Module):
def __init__(self, config):
super(MISA, self).__init__()
self.config = config
self.text_size = config.embedding_size
self.visual_size = config.visual_size
self.acoustic_size = config.acoustic_size
self.input_sizes = input_sizes = [self.text_size, self.visual_size, self.acoustic_size]
self.hidden_sizes = hidden_sizes = [int(self.text_size), int(self.visual_size), int(self.acoustic_size)]
self.output_size = output_size = config.num_classes
self.dropout_rate = dropout_rate = config.dropout
self.activation = self.config.activation()
self.tanh = nn.Tanh()
rnn = nn.LSTM if self.config.rnncell == "lstm" else nn.GRU
# defining modules - two layer bidirectional LSTM with layer norm in between
if self.config.use_bert:
# Initializing a BERT bert-base-uncased style configuration
bertconfig = BertConfig.from_pretrained('bert-base-uncased', output_hidden_states=True)
self.bertmodel = BertModel.from_pretrained('bert-base-uncased', config=bertconfig)
else:
self.embed = nn.Embedding(len(config.word2id), input_sizes[0])
self.trnn1 = rnn(input_sizes[0], hidden_sizes[0], bidirectional=True)
self.trnn2 = rnn(2*hidden_sizes[0], hidden_sizes[0], bidirectional=True)
self.vrnn1 = rnn(input_sizes[1], hidden_sizes[1], bidirectional=True)
self.vrnn2 = rnn(2*hidden_sizes[1], hidden_sizes[1], bidirectional=True)
self.arnn1 = rnn(input_sizes[2], hidden_sizes[2], bidirectional=True)
self.arnn2 = rnn(2*hidden_sizes[2], hidden_sizes[2], bidirectional=True)
##########################################
# mapping modalities to same sized space
##########################################
if self.config.use_bert:
self.project_t = nn.Sequential()
self.project_t.add_module('project_t', nn.Linear(in_features=768, out_features=config.hidden_size))
self.project_t.add_module('project_t_activation', self.activation)
self.project_t.add_module('project_t_layer_norm', nn.LayerNorm(config.hidden_size))
else:
self.project_t = nn.Sequential()
self.project_t.add_module('project_t', nn.Linear(in_features=hidden_sizes[0]*4, out_features=config.hidden_size))
self.project_t.add_module('project_t_activation', self.activation)
self.project_t.add_module('project_t_layer_norm', nn.LayerNorm(config.hidden_size))
self.project_v = nn.Sequential()
self.project_v.add_module('project_v', nn.Linear(in_features=hidden_sizes[1]*4, out_features=config.hidden_size))
self.project_v.add_module('project_v_activation', self.activation)
self.project_v.add_module('project_v_layer_norm', nn.LayerNorm(config.hidden_size))
self.project_a = nn.Sequential()
self.project_a.add_module('project_a', nn.Linear(in_features=hidden_sizes[2]*4, out_features=config.hidden_size))
self.project_a.add_module('project_a_activation', self.activation)
self.project_a.add_module('project_a_layer_norm', nn.LayerNorm(config.hidden_size))
##########################################
# private encoders
##########################################
self.private_t = nn.Sequential()
self.private_t.add_module('private_t_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))
self.private_t.add_module('private_t_activation_1', nn.Sigmoid())
self.private_v = nn.Sequential()
self.private_v.add_module('private_v_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))
self.private_v.add_module('private_v_activation_1', nn.Sigmoid())
self.private_a = nn.Sequential()
self.private_a.add_module('private_a_3', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))
self.private_a.add_module('private_a_activation_3', nn.Sigmoid())
##########################################
# shared encoder
##########################################
self.shared = nn.Sequential()
self.shared.add_module('shared_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))
self.shared.add_module('shared_activation_1', nn.Sigmoid())
##########################################
# reconstruct
##########################################
self.recon_t = nn.Sequential()
self.recon_t.add_module('recon_t_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))
self.recon_v = nn.Sequential()
self.recon_v.add_module('recon_v_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))
self.recon_a = nn.Sequential()
self.recon_a.add_module('recon_a_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))
##########################################
# shared space adversarial discriminator
##########################################
if not self.config.use_cmd_sim:
self.discriminator = nn.Sequential()
self.discriminator.add_module('discriminator_layer_1', nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size))
self.discriminator.add_module('discriminator_layer_1_activation', self.activation)
self.discriminator.add_module('discriminator_layer_1_dropout', nn.Dropout(dropout_rate))
self.discriminator.add_module('discriminator_layer_2', nn.Linear(in_features=config.hidden_size, out_features=len(hidden_sizes)))
##########################################
# shared-private collaborative discriminator
##########################################
self.sp_discriminator = nn.Sequential()
self.sp_discriminator.add_module('sp_discriminator_layer_1', nn.Linear(in_features=config.hidden_size, out_features=4))
self.fusion = nn.Sequential()
self.fusion.add_module('fusion_layer_1', nn.Linear(in_features=self.config.hidden_size*6, out_features=self.config.hidden_size*3))
self.fusion.add_module('fusion_layer_1_dropout', nn.Dropout(dropout_rate))
self.fusion.add_module('fusion_layer_1_activation', self.activation)
self.fusion.add_module('fusion_layer_3', nn.Linear(in_features=self.config.hidden_size*3, out_features= output_size))
self.tlayer_norm = nn.LayerNorm((hidden_sizes[0]*2,))
self.vlayer_norm = nn.LayerNorm((hidden_sizes[1]*2,))
self.alayer_norm = nn.LayerNorm((hidden_sizes[2]*2,))
encoder_layer = nn.TransformerEncoderLayer(d_model=self.config.hidden_size, nhead=2)
self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=1)
def extract_features(self, sequence, lengths, rnn1, rnn2, layer_norm):
packed_sequence = pack_padded_sequence(sequence, lengths)
if self.config.rnncell == "lstm":
packed_h1, (final_h1, _) = rnn1(packed_sequence)
else:
packed_h1, final_h1 = rnn1(packed_sequence)
padded_h1, _ = pad_packed_sequence(packed_h1)
normed_h1 = layer_norm(padded_h1)
packed_normed_h1 = pack_padded_sequence(normed_h1, lengths)
if self.config.rnncell == "lstm":
_, (final_h2, _) = rnn2(packed_normed_h1)
else:
_, final_h2 = rnn2(packed_normed_h1)
return final_h1, final_h2
def alignment(self, sentences, visual, acoustic, lengths, bert_sent, bert_sent_type, bert_sent_mask):
batch_size = lengths.size(0)
if self.config.use_bert:
bert_output = self.bertmodel(input_ids=bert_sent,
attention_mask=bert_sent_mask,
token_type_ids=bert_sent_type)
bert_output = bert_output[0]
# masked mean
masked_output = torch.mul(bert_sent_mask.unsqueeze(2), bert_output)
mask_len = torch.sum(bert_sent_mask, dim=1, keepdim=True)
bert_output = torch.sum(masked_output, dim=1, keepdim=False) / mask_len
utterance_text = bert_output
else:
# extract features from text modality
sentences = self.embed(sentences)
final_h1t, final_h2t = self.extract_features(sentences, lengths, self.trnn1, self.trnn2, self.tlayer_norm)
utterance_text = torch.cat((final_h1t, final_h2t), dim=2).permute(1, 0, 2).contiguous().view(batch_size, -1)
# extract features from visual modality
final_h1v, final_h2v = self.extract_features(visual, lengths, self.vrnn1, self.vrnn2, self.vlayer_norm)
utterance_video = torch.cat((final_h1v, final_h2v), dim=2).permute(1, 0, 2).contiguous().view(batch_size, -1)
# extract features from acoustic modality
final_h1a, final_h2a = self.extract_features(acoustic, lengths, self.arnn1, self.arnn2, self.alayer_norm)
utterance_audio = torch.cat((final_h1a, final_h2a), dim=2).permute(1, 0, 2).contiguous().view(batch_size, -1)
# Shared-private encoders
self.shared_private(utterance_text, utterance_video, utterance_audio)
if not self.config.use_cmd_sim:
# discriminator
reversed_shared_code_t = ReverseLayerF.apply(self.utt_shared_t, self.config.reverse_grad_weight)
reversed_shared_code_v = ReverseLayerF.apply(self.utt_shared_v, self.config.reverse_grad_weight)
reversed_shared_code_a = ReverseLayerF.apply(self.utt_shared_a, self.config.reverse_grad_weight)
self.domain_label_t = self.discriminator(reversed_shared_code_t)
self.domain_label_v = self.discriminator(reversed_shared_code_v)
self.domain_label_a = self.discriminator(reversed_shared_code_a)
else:
self.domain_label_t = None
self.domain_label_v = None
self.domain_label_a = None
self.shared_or_private_p_t = self.sp_discriminator(self.utt_private_t)
self.shared_or_private_p_v = self.sp_discriminator(self.utt_private_v)
self.shared_or_private_p_a = self.sp_discriminator(self.utt_private_a)
self.shared_or_private_s = self.sp_discriminator( (self.utt_shared_t + self.utt_shared_v + self.utt_shared_a)/3.0 )
# For reconstruction
self.reconstruct()
# 1-LAYER TRANSFORMER FUSION
h = torch.stack((self.utt_private_t, self.utt_private_v, self.utt_private_a, self.utt_shared_t, self.utt_shared_v, self.utt_shared_a), dim=0)
h = self.transformer_encoder(h)
h = torch.cat((h[0], h[1], h[2], h[3], h[4], h[5]), dim=1)
o = self.fusion(h)
return o
def reconstruct(self,):
self.utt_t = (self.utt_private_t + self.utt_shared_t)
self.utt_v = (self.utt_private_v + self.utt_shared_v)
self.utt_a = (self.utt_private_a + self.utt_shared_a)
self.utt_t_recon = self.recon_t(self.utt_t)
self.utt_v_recon = self.recon_v(self.utt_v)
self.utt_a_recon = self.recon_a(self.utt_a)
def shared_private(self, utterance_t, utterance_v, utterance_a):
# Projecting to same sized space
self.utt_t_orig = utterance_t = self.project_t(utterance_t)
self.utt_v_orig = utterance_v = self.project_v(utterance_v)
self.utt_a_orig = utterance_a = self.project_a(utterance_a)
# Private-shared components
self.utt_private_t = self.private_t(utterance_t)
self.utt_private_v = self.private_v(utterance_v)
self.utt_private_a = self.private_a(utterance_a)
self.utt_shared_t = self.shared(utterance_t)
self.utt_shared_v = self.shared(utterance_v)
self.utt_shared_a = self.shared(utterance_a)
def forward(self, sentences, video, acoustic, lengths, bert_sent, bert_sent_type, bert_sent_mask):
batch_size = lengths.size(0)
o = self.alignment(sentences, video, acoustic, lengths, bert_sent, bert_sent_type, bert_sent_mask)
return o
| StarcoderdataPython |
4813475 | """
Generates a Celery config object from environment variables.
"""
import importlib
import os
import yaml
_PREFIX = "NEW_CELERY_"
_PLEN = len(_PREFIX)
class InvalidCeleryConfig(ValueError):
"""Raised when the given input environment variables are ambiguous or wrong."""
def _load_key(environment_key: str):
"""Strips keys of our environment variable prefix"""
if environment_key.startswith(_PREFIX):
key = environment_key[_PLEN:]
if key != key.lower():
raise InvalidCeleryConfig(
"new_celery_config key names should have an uppercase prefix "
"and a lowercase suffix. For example, the environment variable "
f"`{_PREFIX}broker_url` sets the `broker_url` Celery "
f"config setting. You passed the key `{environment_key}` "
"which is invalid."
)
return key
return ""
class Config: # pylint: disable=too-few-public-methods
"""A Celery config object. For each key, it reads values as YAML in environment variables."""
def __init__(self, *, environ=None):
"""Initialize the object from the environment."""
if environ is None:
environ = os.environ
for environment_key, serialized_value in environ.items():
key = _load_key(environment_key)
if key:
try:
value = yaml.safe_load(serialized_value)
except yaml.YAMLError:
continue
else:
setattr(self, key, value)
| StarcoderdataPython |
1840141 | <reponame>ksbg/sparklanes<filename>tests/__main__.py
from unittest import TestSuite, TextTestRunner, makeSuite
from .test_lane import TestLane
from .test_spark import TestSpark
from .test_submit import TestSparkSubmit
suite = TestSuite()
suite.addTest(makeSuite(TestLane))
suite.addTest(makeSuite(TestSparkSubmit))
suite.addTest(makeSuite(TestSpark))
runner = TextTestRunner(verbosity=2)
runner.run(suite)
| StarcoderdataPython |
3471301 | <reponame>ikikara/TI_Project-Compression_Of_Files
import deflate
#COMPRESSÃO
level = 6
with open("D:\Pycharm\Codecs\egg.bmp" , 'rb') as f:
data = bytearray(f.read())
dados = deflate.gzip_compress(data, level)
with open("D:\Pycharm\Codecs\eggDeflate.dat" , 'wb') as f:
f.write(bytearray(dados))
#DESCOMPRESSÃO
descomp = deflate.gzip_decompress(dados)
with open("D:\Pycharm\Codecs\eggDesc.bmp" , 'wb') as f:
f.write(bytearray(descomp)) | StarcoderdataPython |
9785897 | '''
MarkDown format generator
'''
class MarkDown:
'convert raw text to markdown syntax'
def __init__(self):
self.escape_table = {"\\": "\\\\", "`": "\`",
"*": "\*", "_": "\_",
"{": "\{", "}": "\}",
"[": "\[", "]": "\]",
"(": "\(", ")": "\)",
"#": "\#", "+": "\+",
"-": "\-", ".": "\.",
"|": "\|"
}
def __escape(self, data):
return "".join(self.escape_table.get(c,c) for c in data)
def __convert_lines(self, text='', prefix='', suffix='', olist=False):
if type(text) is str:
if olist:
return '1. ' + self.__escape(text)
else:
return prefix + self.__escape(text) + suffix
elif type(text) is list:
for idx, t in enumerate(text):
if olist:
nt = str(idx+1) + '. ' + self.__escape(t)
else:
nt = prefix + self.__escape(t) + suffix
text[idx] = nt
return text
return ''
def text(self, text):
return self.__convert_lines(text)
def error(self, text):
return self.__convert_lines(text)
def title(self, text):
return self.__convert_lines(text, '##')
def subtitle(self, text):
return self.__convert_lines(text, '###')
def ssubtitle(self, text):
return self.__convert_lines(text, '####')
def bold(self, text):
return self.__convert_lines(text, '**', '**')
def line_breaker(self, count=1):
if count > 1:
ret = []
for i in range(0,count):
ret.append("-------------")
return ret
return "-------------"
def reference(self, text):
return self.__convert_lines(text, '>')
def ordered_list(self, data):
return self.__convert_lines(data, olist=True)
def unordered_list(self, data):
return self.__convert_lines(data, '- ') | StarcoderdataPython |
3355929 | import pytest
from labfunctions import types
from labfunctions.client.labstate import LabState
from .factories import (
DockerfileImageFactory,
ProjectDataFactory,
WorkflowDataWebFactory,
)
def test_client_labstate_LabState():
pd = ProjectDataFactory()
wd = WorkflowDataWebFactory()
wd2 = WorkflowDataWebFactory()
lf = LabState(project=pd, workflows={wd.alias: wd})
name = lf.project_name
filepath = lf.filepath
file_ = lf.file
workflows = lf.workflows
workflows_dict = lf.listworkflows2dict([wd])
lf2 = LabState(project=pd, workflows={wd.alias: wd})
lf2._project = None
not_projectid = lf2.projectid
not_name = lf2.project_name
with pytest.raises(AttributeError):
lf2.projectid = "invalid"
wd_find = lf.find_by_id(wd.wfid)
not_found = lf.find_by_id("not found")
lf.add_workflow(wd2)
assert wd2.alias in lf.workflows.keys()
assert isinstance(lf, LabState)
assert lf.project_name == pd.name
assert wd_find.wfid == wd.wfid
assert not_found is None
assert not_projectid is None
assert not_name is None
assert name == pd.name
assert isinstance(file_, types.Labfile)
assert isinstance(workflows, dict)
assert isinstance(workflows_dict, dict)
def test_client_labstate_write(tempdir):
wf = LabState.from_file("tests/labfile_test.yaml")
wf.write(f"{tempdir}/workflows.yaml")
wf_2 = LabState.from_file(f"{tempdir}/workflows.yaml")
assert wf_2._project.name == wf._project.name
def test_client_labstate_file():
wf = LabState.from_file("tests/labfile_test.yaml")
wf2 = wf.file
assert isinstance(wf2, types.Labfile)
def test_client_labstate_list2dict():
tasks = WorkflowDataWebFactory.create_batch(size=5)
res = LabState.listworkflows2dict(tasks)
assert len(res.keys()) == 5
def test_client_labstate_add_workflow():
pd = ProjectDataFactory()
tasks = WorkflowDataWebFactory.create_batch(size=2)
ws = LabState(pd, workflows={tasks[0].alias: tasks[0]})
ws.add_workflow(tasks[1])
assert len(ws.workflows.keys()) == 2
assert tasks[1].alias in ws.workflows.keys()
def test_client_labstate_del_workflow():
pd = ProjectDataFactory()
tasks = WorkflowDataWebFactory.create_batch(size=2)
tasks_dict = LabState.listworkflows2dict(tasks)
ws = LabState(pd, workflows=tasks_dict)
ws.delete_workflow(tasks[1].alias)
assert len(ws.workflows.keys()) == 1
assert tasks[1].alias not in ws.workflows.keys()
def test_client_labstate_update_prj():
pd = ProjectDataFactory()
wf = LabState.from_file("tests/labfile_test.yaml")
wf.update_project(pd)
assert wf.project.projectid == pd.projectid
| StarcoderdataPython |
6659104 | import hashlib
import lxml.html
import os
import pickle
import requests
import sys
_ascii = ('01234567890123456789012345678901 '
'!"#$%&\'()*+,-./0123456789:;<=>?@'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`'
'abcdefghijklmnopqrstuvwxyz{|}~')
class MoxaHTTP_2_2:
def __init__(self, addr, verbose=True, cookie_file=None):
"""Initialize class for Moxa HTTP 2.2 Communication
addr : str
Address of moxa to communicate with
"""
self._addr = addr
self._cookies = None
self._base_url = 'http://{}'.format(addr)
self._verbose = verbose
self._passwd = '<PASSWORD>'
self._username = ''
if cookie_file is None:
home = os.path.expanduser('~')
self._cookie_file = os.path.join(home, '.moxa')
else:
self._cookie_file = cookie_file
def _print(self, *args, **kwargs):
if self._verbose:
print(*args, file=sys.stderr, **kwargs)
def save_cookiejar(self):
"""Save Cookie Jar to home folder for reuse"""
with open(self._cookie_file, 'wb') as f:
pickle.dump(self._cookies, f)
self._print(
"Saved session cookies to {}"
.format(self._cookie_file)
)
def load_cookiejar(self):
"""Load Cookie Jar from home folder"""
try:
with open(self._cookie_file, 'rb') as f:
self._cookies = pickle.load(f)
except FileNotFoundError:
self._cookies = None
if self._cookies is not None:
self._print(
"Loaded session cookies from '{}'"
.format(self._cookie_file)
)
def login(self, username, password):
""" Login to MOXA Web Interface"""
if username is None:
username = 'admin'
if password is None:
password = ''
self._passwd = password
self._username = username
if self._cookies is None:
# Load cookies if possible
self.load_cookiejar()
r = requests.get(self._base_url, cookies=self._cookies)
if r.status_code != 200:
raise RuntimeError('HTTP Login failed.')
# Did we get a main page? If so we are logged in
if self._is_main_page(r.text):
self._print("Already logged in (valid session cookie)")
return
if self._is_logged_in(r.text):
raise RuntimeError(('MOXA Does not permit login, '
'session is already open'))
fcr = self._get_fake_challenge(r.text)
if fcr is None:
raise RuntimeError("Error getting FakeChallengeResponse")
self._print(
"Logging into MOXA. Username : {} FakeChallenge : {}"
.format(username, fcr)
)
xorpw = self._xor_passwd(fcr, password)
data = {
'Username': username,
'Password': '',
'<PASSWORD>': <PASSWORD>,
'FakeChallenge': fcr,
'Submit.x': 0,
'Submit.y': 0
}
r = requests.post(self._base_url, data=data)
if r.status_code != 200:
raise RuntimeError('HTTP Login failed.')
# If we got no cookies set, we failed login
if len(r.cookies) == 0:
raise RuntimeError(
'Failed to login to Moxa.... check username and password'
)
for cookie in r.cookies:
self._print(cookie)
self._cookies = r.cookies
self._print("Logged in as {}.".format(username))
# Check by asking for main page again
r = requests.get(self._base_url, cookies=self._cookies)
if r.status_code != 200:
raise RuntimeError('HTTP Login failed verification.')
print("Main page requested OK.", file=sys.stderr)
self.save_cookiejar()
def set_ipaddr(self, ipaddr, netmask, gateway):
"""Set the IP Address of the MOXA and restart
ipaddr : str
New IP Address
netmask: str
New NETMASK
gateway: str
New Gateway
"""
set_url = ("/Set.htm?IPConfig=0&IPaddr={}&Netmask={}&"
"Gateway={}&DNS1=&DNS2=&WINSDisable=0&WINSServer="
"&IP6Config=0&IPv6DNS1=&IPv6DNS2=&CONN_PRIORITY=0&"
"LAN1Speed=0&Submit=Submit&setfunc=Basic+Network")
set_url = set_url.format(ipaddr, netmask, gateway)
r = requests.get(self._base_url + set_url, cookies=self._cookies)
if r.status_code != 200:
raise RuntimeError('Failed to set IP Address, invalid response.')
self._print("MOXA Network Settings set to:")
self._print(" IP : {}".format(ipaddr))
self._print(" NETMASK : {}".format(netmask))
self._print(" GATEWAY : {}".format(gateway))
self._save_restart()
def _save_restart(self):
r = requests.get(
self._base_url + '/SaveRestart.htm',
cookies=self._cookies
)
if r.status_code != 200:
raise RuntimeError('Failed to restart MOXA, invalid response.')
self._print("Sent SaveRestart to Moxa .... rebooting ....")
def download_config(self):
"""Download the config of the MOXA and return text"""
r = requests.get(
self._base_url + '/Config.txt',
cookies=self._cookies
)
if r.status_code != 200:
raise RuntimeError('Failed to restart MOXA, invalid response.')
# Check if text file
if r.headers['Content-Type'] != 'text/plain':
raise RuntimeError('MOXA Failed to respond with config data')
self._print("Downloaded config from Moxa")
return r.text
def change_passwd(self, new_passwd):
"""Change the password in the MOXA"""
old_hash = hashlib.md5(bytes(self._passwd, 'ascii')).hexdigest()
data = {
'old_passwd': '',
'passwd': '',
'conf_passwd': '',
'pwd': <PASSWORD>,
'newpwd': self._xor_passwd(self._passwd, new_passwd, True),
'Submit': 'Submit'
}
self._print('Setting password to "{}"'.format(new_passwd))
self._print('Old MD5 = {}'.format(data['pwd']))
self._print('New MD5 = {}'.format(data['newpwd']))
r = requests.post(
self._base_url + '/ChPassword.htm',
data=data,
cookies=self._cookies
)
if r.status_code != 200:
raise RuntimeError('Failed to change password on MOXA')
self._save_restart()
def logout(self):
r = requests.get(
self._base_url + '/LogoutAct.htm',
cookies=self._cookies,
allow_redirects=False
)
# If we logged out, we get a redirect
if r.status_code != 307:
raise RuntimeError('Failed to logout of Moxa.')
self._print("Logged out of Moxa")
def _get_fake_challenge(self, htmlstr):
htmltree = lxml.html.fromstring(htmlstr)
for el in htmltree.xpath('//input[@name="FakeChallenge"]'):
return el.attrib['value']
return None
def _is_main_page(self, htmlstr):
htmltree = lxml.html.fromstring(htmlstr)
if htmltree.xpath('//frame[@name="main"][@src="main.htm"]'):
return True
return False
def _is_logged_in(self, htmlstr):
htmltree = lxml.html.fromstring(htmlstr)
for el in htmltree.xpath('//h4'):
if el.text == 'Already login.':
return True
return False
def _xor_passwd(self, str1, str2, fill=False):
md = hashlib.md5(bytes(str1, 'ascii')).digest()
pw_tbl = [_ascii.rindex(c) for c in str2]
result_tbl = [a ^ b for a, b in zip(md, pw_tbl)]
if fill:
new_result = list(md)
else:
new_result = [None] * len(str2)
for i in range(len(result_tbl)):
new_result[i] = result_tbl[i]
_hex = ''.join('{:02x}'.format(a) for a in new_result)
return _hex
| StarcoderdataPython |
3260365 | # Copyright (c) 2021 <NAME>
import sys
gversum = 0
def decode(s, npkg=-1):
global gversum
i = 0
while len(s) - i >= 6 and npkg != 0:
version = int(s[i:i+3],2)
gversum += version
typeid = int(s[i+3:i+6],2)
i += 6
if npkg > 0: npkg -= 1
#print(version, typeid)
if typeid == 4: # literal value
literals = ''
literal = 0
group = 1
while group:
group = int(s[i])
i += 1
literals += s[i:i+4]
i+=4
literal = int(literals,2)
print(version, typeid, literal)
else: # operator
lengthtype = int(s[i])
i += 1
if lengthtype == 0: # total length in bits
sublength = int(s[i:i+15],2)
print(version, typeid, lengthtype, sublength)
i+=15
decode(s[i:i+sublength])
i+=sublength
else:
subpkg = int(s[i:i+11],2)
print(version, typeid, lengthtype, subpkg)
i+=11
i+=decode(s[i:], subpkg)
return i
for l in sys.stdin:
bits =''
gversum = 0
for c in l.strip():
bits += '{:04b}'.format(int(c,16))
print(l.strip())
print(bits)
decode(bits,1)
print(gversum)
i=0
| StarcoderdataPython |
1674234 | import graphene
from quiz.models import Category
from quiz.types import CategoryType
class GetAllCategories:
all_categories = graphene.List(CategoryType, id=graphene.ID())
def resolve_all_categories(root, info):
return Category.objects.all()
| StarcoderdataPython |
4900917 | import sys
import os
import time
import pandas as pd
def get_latest_file(path, sub_dir='raw_data'):
# get the late file from the specifiled path
for root, dirs, files in os.walk(path):
for local_dir in dirs:
if local_dir == sub_dir:
raw_data_path = os.path.join(root, local_dir)
raw_data_files = os.listdir(raw_data_path)
raw_data_files.sort(key=lambda func: os.path.getmtime(
"{}/{}".format(raw_data_path, func)))
latest_file = raw_data_files[-1]
return os.path.join(root, sub_dir, latest_file)
def count_id_type(file, count_result_file):
# count the id data
frames = []
chunk_number = 0
chunk_size = 10 ** 6
for chunk in pd.read_csv(file,
chunksize=chunk_size,
iterator=True):
sum = chunk.groupby(['ID_type'], as_index=False)['ID_type'].agg(
{'cnt': 'count'})
frames.append(sum)
chunk_number += 1
print('Finish {} chunk!'.format(chunk_number))
result = pd.concat(frames)
final = result.groupby(['ID_type'])['cnt'].sum()
print(final.to_csv(count_result_file))
print(final)
print('Done!')
path = input('Please input the path which storeed the current python file:')
file = get_latest_file(path)
print(file)
count_result_file = 'count_{}.csv'.format(
time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime()))
count_id_type(file, count_result_file) | StarcoderdataPython |
3414724 | <reponame>fmiguelgarcia/conanRecipies
from conans import ConanFile
from conans.tools import download, unzip
import os
import shutil
class BoostQtConan(ConanFile):
name = "Boost"
version = "1.64.0"
description = "Boost (HEADERS Only) provides free peer-reviewed portable C++ source libraries"
url = "http://www.boost.org"
generators = "cmake"
license = "http://www.boost.org/users/license.html"
def source(self):
zip_name = "boost_1_64_0.zip"
download( "https://dl.bintray.com/boostorg/release/1.64.0/source/boost_1_64_0.zip", zip_name)
unzip( zip_name)
# We are going to use only header files
shutil.move( "boost_1_64_0/boost", ".")
# Remove unused files
shutil.rmtree( "boost_1_64_0")
os.unlink( zip_name)
def package(self):
self.copy( pattern="*", dst="include/boost",
src = "boost", keep_path=True)
| StarcoderdataPython |
1875079 | <reponame>Deonstudios/GDM
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.forms.models import BaseInlineFormSet
from django.db import models
from planing_tool.models import *
from django.contrib.gis.db import models as geomodels
from libs.widgets import LatLongWidget
class CountryAdmin(admin.ModelAdmin):
list_display = ('name', )
search_fields = ('name', )
class StateAdmin(admin.ModelAdmin):
list_display = ('name', )
search_fields = ('name', )
list_filter = ('country', )
class CityAdmin(admin.ModelAdmin):
list_display = ('name', 'slug')
search_fields = ('name', )
list_filter = ('state__country', )
formfield_overrides = {
geomodels.PointField: {'widget': LatLongWidget},
}
class AgenciaAdmin(admin.ModelAdmin):
model = Agencia
class AgenciaInline(admin.StackedInline):
model = Agencia
class ContactoAdmin(admin.ModelAdmin):
model = Contacto
class NombreMedioAdmin(admin.ModelAdmin):
model = NombreMedio
class TypoMedioAdmin(admin.ModelAdmin):
model = TypoMedio
class ProductoAdmin(admin.ModelAdmin):
model = Producto
class ProgramaAdmin(admin.ModelAdmin):
model = Programa
class SoporteAdmin(admin.ModelAdmin):
model = Soporte
class EmpresaAdmin(admin.ModelAdmin):
model = Empresa
class PlazaAdmin(admin.ModelAdmin):
model = Plaza
class CommentAdmin(admin.ModelAdmin):
model = Comment
class FileAttachmentInline(admin.StackedInline):
model = FileAttachment
class PautaInline(admin.StackedInline):
model = Pauta
classes = ['collapse']
extra = 1
num_max = 1
class ObservacionCotizacionesInline(admin.StackedInline):
model = ObservacionCotizaciones
extra = 1
classes = ['collapse']
num_max = 1
class PautaAdmin(admin.ModelAdmin):
model = Pauta
class CotizacionAdmin (admin.ModelAdmin):
model = Cotizacion
inlines = [
FileAttachmentInline,
PautaInline,
ObservacionCotizacionesInline
]
class ObservacionCampaniasInline(admin.TabularInline):
model = ObservacionCampania
extra = 1
classes = ['collapse']
num_max = 1
class CotizacionesInline(admin.StackedInline):
model = Cotizacion
extra = 1
classes = ['collapse']
num_max = 1
class PresupuestoInline(admin.StackedInline):
model = Presupuesto
extra = 1
classes = ['collapse']
num_max = 1
class CampaniaAdmin(admin.ModelAdmin):
model = Campania
inlines = [
PresupuestoInline,
CotizacionesInline,
ObservacionCampaniasInline
]
admin.site.register(Comment, CommentAdmin)
admin.site.register(Agencia, AgenciaAdmin)
admin.site.register(Contacto, ContactoAdmin)
admin.site.register(NombreMedio, NombreMedioAdmin)
admin.site.register(TypoMedio, TypoMedioAdmin)
admin.site.register(Producto, ProductoAdmin)
admin.site.register(Programa, ProgramaAdmin)
admin.site.register(Soporte, SoporteAdmin)
admin.site.register(Country, CountryAdmin)
admin.site.register(State, StateAdmin)
admin.site.register(City, CityAdmin)
admin.site.register(Empresa, EmpresaAdmin)
admin.site.register(Plaza, PlazaAdmin)
admin.site.register(Pauta, PautaAdmin)
admin.site.register(Cotizacion, CotizacionAdmin)
admin.site.register(Campania, CampaniaAdmin)
| StarcoderdataPython |
379280 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Prerequisites from the system's package manager."""
import logging
import shutil
from spet.lib.utilities import execute
from spet.lib.utilities import prettify
def zypper(packages):
"""Install zypper packages.
Args:
packages (list): Zypper packages to install.
"""
try:
if not shutil.which("zypper"):
return False
logging.info('Installing prerequisites using "zypper".')
for package in packages:
cmd = "sudo -E zypper install -l -y --force-resolution " + package
logging.debug("Zypper install: %s", cmd)
output = execute.output(cmd)
logging.debug("Zypper output: %s", output)
except IOError as err:
logging.debug(err)
except ValueError as err:
logging.debug(err)
except TypeError as err:
logging.debug(err)
def yum(packages):
"""Install yum packages.
Args:
packages (list): Yum packages to install.
"""
try:
if not shutil.which("yum"):
return
logging.info('Installing prerequisites using "yum".')
devtools = "sudo -E yum groupinstall -y --skip-broken " '"Development Tools"'
logging.debug("Yum install: %s", devtools)
output = execute.output(devtools)
logging.debug("Yum output: %s", output)
for package in packages:
cmd = "sudo -E yum install -y --skip-broken " + package
logging.debug("Yum install: %s", cmd)
output = execute.output(cmd)
logging.debug("Yum output: %s", output)
except IOError as err:
logging.debug(err)
except ValueError as err:
logging.debug(err)
except TypeError as err:
logging.debug(err)
def apt_get(packages):
"""Install apt-get packages.
Args:
packages (list): Apt-get packages to install.
"""
try:
if not shutil.which("apt-get"):
return
logging.info('Installing prerequisites using "apt-get".')
for package in packages:
cmd = "sudo -E apt-get install -y --ignore-missing " + package
logging.debug("Apt-get install: %s", cmd)
output = execute.output(cmd)
logging.debug("Apt-get output: %s", output)
except IOError as err:
logging.debug(err)
except ValueError as err:
logging.debug(err)
except TypeError as err:
logging.debug(err)
def aptitude(packages):
"""Install aptitude packages.
Args:
packages (list): Aptitude packages to install.
"""
try:
if not shutil.which("aptitude"):
logging.error("The aptitude package manager could not be found.")
return
logging.info('Installing prerequisites using "aptitude".')
for package in packages:
cmd = "sudo -E aptitude install -y --ignore-missing " + package
logging.debug("Aptitude install: %s", cmd)
output = execute.output(cmd)
logging.debug("Aptitude output: %s", output)
except IOError as err:
logging.debug(err)
except ValueError as err:
logging.debug(err)
except TypeError as err:
logging.debug(err)
def apt(packages):
"""Install apt packages.
Args:
packages (list): Apt packages to install.
"""
try:
if not shutil.which("apt"):
logging.error("The apt package manager could not be found.")
return
logging.info('Installing prerequisites using "apt".')
for package in packages:
cmd = "sudo -E apt install -y --ignore-missing " + package
logging.debug("Apt install: %s", cmd)
output = execute.output(cmd)
logging.debug("Apt output: %s", output)
except IOError as err:
logging.debug(err)
except ValueError as err:
logging.debug(err)
except TypeError as err:
logging.debug(err)
def unknown(packages):
"""Unknown package manager.
Args:
packages (list): Package names for user to install.
"""
logging.warning("Unknown package manager.")
prettify.error_message(
"The appropriate package manager for your system could not be found")
print(
"Please try manually installing the following and rerun this program:")
for package in packages:
print(package)
| StarcoderdataPython |
8157506 | # course_2_assessment_1
"""
The textfile, travel_plans.txt, contains the summer travel plans for someone
with some commentary. Find the total number of characters in the file
and save to the variable num.
"""
fileref = open("travel_plans.txt", "r")
num = 0
for i in fileref:
num += len(i)
print(num)
fileref.close()
print("********************************************************************\n")
"""
We have provided a file called emotion_words.txt that contains lines of words
that describe emotions. Find the total number of words in the file and assign
this value to the variable num_words.
"""
num_words = 0
fileref1 = "emotion_words2.txt"
with open(fileref1, "r") as file:
for line in file:
num_words += len(line.split())
print("number of words : ", num_words)
print("********************************************************************\n")
"""
Assign to the variable num_lines the number of lines in the file
school_prompt.txt.
"""
num_lines = sum(1 for line in open("school_prompt2.txt"))
print(num_lines)
print("********************************************************************\n")
"""
Assign the first 30 characters of school_prompt.txt as a string to
the variable beginning_chars.
"""
f = open("school_prompt2.txt", "r")
beginning_chars = f.read(30)
print(beginning_chars)
print("********************************************************************\n")
three = []
with open("school_prompt2.txt", "r") as x:
three = [line.split()[2] for line in x]
print(three)
print("********************************************************************\n")
"""
6. Challenge: Create a list called emotions that contains the first word of
every line in emotion_words.txt.
"""
fileref = open("emotion_words2.txt", "r")
line = fileref.readlines()
emotions = []
for words in line:
word = words.split()
emotions.append(word[0])
print(emotions)
print("********************************************************************\n")
"""
Assign the first 33 characters from the textfile, travel_plans.txt
to the variable first_chars.
"""
file_arch = open("travel_plans.txt", "r")
first_chars = file_arch.read(33)
print(first_chars)
print("********************************************************************\n")
"""
Challenge: Using the file school_prompt.txt, if the character ‘p’ is in a
word, then add the word to a list called p_words.
"""
fileref = open("school_prompt2.txt", "r")
words = fileref.read().split()
p_words = [words for word in words if 'p' in word]
print(p_words) | StarcoderdataPython |
3413700 | <reponame>Markopolo141/Thesis_code
#Experiment: Bernoulli and Uniform
#---------------------------------
#
# computes the average error for mean estimation in the
# bernoulli and uniform stratified data case using different methods
# with a sample budget between 1 and 20, outputs data to csv file
#do imports
from random import shuffle,random,randint,betavariate
from copy import deepcopy as copy
from methods import *
import sys
try:
from tqdm import tqdm
tqdm_enabled=True
except ImportError:
tqdm_enabled=False
# parameters
d = 1.0 #data width is one in bernoulli and uniform
m = 300 #sample budget of 300
length = 1000 #strata have 1000 data points
print "Computing Bernoulli and Uniform Experiment"
print " for bernoulli successes 1 to 20 ---"
with open("Bernoulli_and_Uniform.csv","w") as f:
f.write("successes,SEBM*,SEBM,Ney,SECM\n");
for ps in range(1,21): # for bernoulli successes 1 to 20
print ps
# structures for strata population data points
vals = [[],[]]
# errors achived by the three methods
error_values = [[],[],[],[]]
iterator = range(20000)
if tqdm_enabled:
iterator = tqdm(iterator)
for trial in iterator: #iterate a large number of times
#setup all the data points
vals[0] = [random() for i in range(length)]
vals[1] = [0]*length
for i in range(ps):
vals[1][randint(0,length-1)]=1
#calculate true population mean
collected_vals = sum(vals,[])
mean = sum(collected_vals)*1.0/len(collected_vals)
#calculate error achieved using SEBM*
cvals = copy(vals)
error_values[0].append(abs(mean-burgess_ideal(cvals,m,d)))
#calculate error achieved using SEBM
cvals = copy(vals)
error_values[1].append(abs(mean-burgess(cvals,m,d)))
#calculate error achieved using Neyman sampling
cvals = copy(vals)
error_values[2].append(abs(mean-super_castro(cvals,m,d)))
#calculate error achieved using Neyman sampling
cvals = copy(vals)
error_values[3].append(abs(mean-altered_burgess(cvals,m,d)))
#average the errors achieved by each method and output
error_values = [sum(errors)*1.0/len(errors) for errors in error_values]
f.write("{},{},{},{},{}\n".format(ps,error_values[0],error_values[1],error_values[2],error_values[3]))
print "Finished Experiment"
| StarcoderdataPython |
356573 | #!/usr/bin/env python
import sys
import os
import logging
logging.basicConfig(
filename=os.path.join(os.path.dirname(__file__), 'example.log'),
level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s %(pathname)s:%(lineno)d %(message)s',
)
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n(If the file settings.py does indeed exist, it's causing an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
from django.core.management import execute_manager
execute_manager(settings)
| StarcoderdataPython |
1799042 | import multiprocessing
import os
import random
from typing import Any
import numpy as np
import torch
import torch.nn as nn
__all__ = ["loss_fn", "set_seed", "AverageMeter", "optimal_num_of_loader_workers"]
def loss_fn(preds: Any, labels: Any) -> Any:
start_preds, end_preds = preds
start_labels, end_labels = labels
start_loss = nn.CrossEntropyLoss(ignore_index=-1)(start_preds, start_labels)
end_loss = nn.CrossEntropyLoss(ignore_index=-1)(end_preds, end_labels)
total_loss = (start_loss + end_loss) / 2
return total_loss
def optimal_num_of_loader_workers() -> int:
num_cpus = multiprocessing.cpu_count()
num_gpus = torch.cuda.device_count()
optimal_value = min(num_cpus, num_gpus * 4) if num_gpus else num_cpus - 1
return optimal_value
def set_seed(seed: int = 42) -> None:
np.random.seed(seed)
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
class AverageMeter:
def __init__(self):
self.reset()
def reset(self) -> None:
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.max = 0
self.min = 1e5
def update(self, val: Any, n: int = 1) -> None:
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count # type: ignore
if val > self.max:
self.max = val
if val < self.min:
self.min = val
| StarcoderdataPython |
6401524 | <gh_stars>0
#!/usr/bin/env python
import rospy
import cv2
from sensor_msgs.msg import CameraInfo
rospy.init_node('camers_info', anonymous=True)
pub = rospy.Publisher('/camera_rect/camera_info', CameraInfo, queue_size=10)
rate = rospy.Rate(60)
while not rospy.is_shutdown():
q=CameraInfo()
q.header.frame_id='usb_cam'
q.height=480
q.width=640
# message of laptop camera
# q.D=[-0.011564379996107469, -0.13278346529156865, 0.002871276933386434, 0.012271004247334799, 0.0]
# q.K=[621.3059884025869, 0.0, 356.0044670915155, 0.0, 613.6311250528399, 228.0540546135174, 0.0, 0.0, 1.0]
# q.R=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
# q.P=[607.4340209960938, 0.0, 363.77425307081285, 0.0, 0.0, 611.0780029296875, 228.238811434112, 0.0, 0.0, 0.0, 1.0, 0.0]
# message of camera1
# q.D=[-1.9819562520245713, 11.24658065837339, -0.16316165042756223, 0.1440911021230497, 0.0]
# q.K=[1125.718720866548, 0.0, 315.06250520584933, 0.0, 3060.327452265269, 211.01406054383608, 0.0, 0.0, 1.0]
# q.R=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
# q.P=[1020.1680297851562, 0.0, 359.16432937780337, 0.0, 0.0, 3213.299560546875, 178.4188567831734, 0.0, 0.0, 0.0, 1.0, 0.0]
# message of IR1
q.D=[0.0, 0.0, 0.0, 0.0, 0.0]
q.K=[424.0954284667969, 0.0, 423.69146728515625, 0.0, 424.0954284667969, 240.66384887695312, 0.0, 0.0, 1.0]
q.R=[1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
q.P=[424.0954284667969, 0.0, 423.69146728515625, 0.0, 0.0, 424.0954284667969, 240.66384887695312, 0.0, 0.0, 0.0, 1.0, 0.0]
q.binning_x=0
q.binning_y=0
q.roi.x_offset=0
q.roi.y_offset=0
q.roi.height=0
q.roi.width=0
q.roi.do_rectify=False
pub.publish(q)
rate.sleep()
| StarcoderdataPython |
1838673 | from stackformation.aws.stacks import (BaseStack, SoloStack)
from troposphere import ec2
from troposphere import ( # noqa
FindInMap, GetAtt, Join,
Parameter, Output, Ref,
Select, Tags, Template,
GetAZs, Export
)
class EIP(object):
def __init__(self, name):
self.name = name
self.stack = None
def _build_ip(self, t):
eip = t.add_resource(ec2.EIP(
"{}EIP".format(self.name)
))
t.add_output([
Output(
"{}AllocationId".format(self.name),
Value=GetAtt(eip, "AllocationId"),
Description="{} Elastic IP".format(self.name)
),
Output(
"{}EIP".format(self.name),
Value=Ref(eip),
Description="{} Elastic IP".format(self.name)
),
])
def output_eip(self):
"""Return EIP"""
return "{}{}EIP".format(self.stack.get_stack_name(), self.name)
def output_allocation_id(self):
"""Return EIP Allocation ID"""
return "{}{}AllocationId".format(
self.stack.get_stack_name(), self.name)
class EIPStack(BaseStack, SoloStack):
def __init__(self, stack_name=""):
super(EIPStack, self).__init__("EIP", 0)
self.stack_name = stack_name
self.ips = []
def add_ip(self, name):
eip = EIP(name)
eip.stack = self
self.ips.append(eip)
return eip
def find_ip(self, name):
for ip in self.ips:
if ip.name == name:
return ip
return None
def build_template(self):
t = self._init_template()
for ip in self.ips:
ip._build_ip(t)
return t
| StarcoderdataPython |
5044164 | import unittest
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
from pydrake.systems.analysis import Simulator
from pydrake.systems.framework import (
Context, DiagramBuilder, PortDataType, VectorSystem)
from pydrake.systems.primitives import SignalLogger
from pydrake.systems.pyplot_visualizer import PyPlotVisualizer
from pydrake.trajectories import PiecewisePolynomial
# TODO(tehbelinda): Augment this test with a Jupyter notebook to make this
# easier to visualize.
class TestVisualizer(PyPlotVisualizer):
# Set limits of view port.
XLIM = (-20., 20.)
YLIM = (-6., 6.)
TICK_DIMS = (0.2, 0.8)
PATCH_WIDTH = 5.
PATCH_HEIGHT = 1.
def __init__(self, size):
PyPlotVisualizer.__init__(self)
self.DeclareInputPort(PortDataType.kVectorValued, size)
self.ax.set_xlim(*self.XLIM)
self.ax.set_ylim(*self.YLIM)
self.ax.set_aspect('auto')
self._make_background()
self.patch = plt.Rectangle((0.0, 0.0),
self.PATCH_WIDTH, self.PATCH_HEIGHT,
fc='#A31F34', ec='k')
self.patch.set_x(-self.PATCH_WIDTH / 2) # Center at x.
def _make_background(self):
# X-axis.
plt.plot(self.XLIM, np.zeros_like(self.XLIM), 'k')
# Tick mark centered at the origin.
tick_pos = -0.5 * np.asarray(self.TICK_DIMS)
self.ax.add_patch(plt.Rectangle(tick_pos, *self.TICK_DIMS, fc='k'))
def draw(self, context):
try:
x = self.EvalVectorInput(context, 0).get_value()[0]
except TypeError:
x = context[0]
self.patch.set_x(x - self.PATCH_WIDTH / 2)
class SimpleContinuousTimeSystem(VectorSystem):
def __init__(self):
VectorSystem.__init__(self,
0, # Zero inputs.
1) # One output.
self.DeclareContinuousState(1) # One state variable.
# xdot(t) = -x(t) + x^3(t)
def DoCalcVectorTimeDerivatives(self, context, u, x, xdot):
xdot[:] = -x + x**3
# y(t) = x(t)
def DoCalcVectorOutput(self, context, u, x, y):
y[:] = x
class TestPyplotVisualizer(unittest.TestCase):
def test_simple_visualizer(self):
builder = DiagramBuilder()
system = builder.AddSystem(SimpleContinuousTimeSystem())
logger = builder.AddSystem(SignalLogger(1))
builder.Connect(system.get_output_port(0), logger.get_input_port(0))
visualizer = builder.AddSystem(TestVisualizer(1))
builder.Connect(system.get_output_port(0),
visualizer.get_input_port(0))
diagram = builder.Build()
context = diagram.CreateDefaultContext()
context.SetContinuousState([0.9])
simulator = Simulator(diagram, context)
simulator.AdvanceTo(.1)
ani = visualizer.animate(logger, repeat=True)
self.assertIsInstance(ani, animation.FuncAnimation)
def test_trajectory(self):
builder = DiagramBuilder()
visualizer = builder.AddSystem(TestVisualizer(1))
ppt = PiecewisePolynomial.FirstOrderHold(
[0., 1.], [[2., 3.], [2., 1.]])
ani = visualizer.animate(ppt)
self.assertIsInstance(ani, animation.FuncAnimation)
def test_recording(self):
visualizer = PyPlotVisualizer()
# Assert that we start with no recordings. This uses private API for
# testing _recorded_contexts and should not be used publicly.
self.assertEqual(len(visualizer._recorded_contexts), 0)
visualizer.start_recording()
# Artificially produce some specific contexts.
times = [0.003, 0.2, 1.1, 1.12]
context = visualizer.AllocateContext()
for time in times:
context.SetTime(time)
visualizer.Publish(context)
# Check that there are now recorded contexts with matching times.
visualizer.stop_recording()
self.assertEqual(len(visualizer._recorded_contexts), len(times))
for i, time in enumerate(times):
self.assertEqual(time, visualizer._recorded_contexts[i].get_time())
ani = visualizer.get_recording_as_animation()
self.assertIsInstance(ani, animation.FuncAnimation)
visualizer.reset_recording()
self.assertEqual(len(visualizer._recorded_contexts), 0)
| StarcoderdataPython |
6666326 | <reponame>LaudateCorpus1/inverse-compositional-STN
import numpy as np
import scipy.linalg
import os,time
import tensorflow as tf
import warp
# load MNIST data
def loadMNIST(fname):
if not os.path.exists(fname):
# download and preprocess MNIST dataset
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
trainData,validData,testData = {},{},{}
trainData["image"] = mnist.train.images.reshape([-1,28,28]).astype(np.float32)
validData["image"] = mnist.validation.images.reshape([-1,28,28]).astype(np.float32)
testData["image"] = mnist.test.images.reshape([-1,28,28]).astype(np.float32)
trainData["label"] = np.argmax(mnist.train.labels.astype(np.float32),axis=1)
validData["label"] = np.argmax(mnist.validation.labels.astype(np.float32),axis=1)
testData["label"] = np.argmax(mnist.test.labels.astype(np.float32),axis=1)
os.makedirs(os.path.dirname(fname))
np.savez(fname,train=trainData,valid=validData,test=testData)
os.system("rm -rf MNIST_data")
MNIST = np.load(fname)
trainData = MNIST["train"].item()
validData = MNIST["valid"].item()
testData = MNIST["test"].item()
return trainData,validData,testData
# generate training batch
def genPerturbations(opt):
with tf.name_scope("genPerturbations"):
X = np.tile(opt.canon4pts[:,0],[opt.batchSize,1])
Y = np.tile(opt.canon4pts[:,1],[opt.batchSize,1])
dX = tf.random_normal([opt.batchSize,4])*opt.pertScale \
+tf.random_normal([opt.batchSize,1])*opt.transScale
dY = tf.random_normal([opt.batchSize,4])*opt.pertScale \
+tf.random_normal([opt.batchSize,1])*opt.transScale
O = np.zeros([opt.batchSize,4],dtype=np.float32)
I = np.ones([opt.batchSize,4],dtype=np.float32)
# fit warp parameters to generated displacements
if opt.warpType=="homography":
A = tf.concat([tf.stack([X,Y,I,O,O,O,-X*(X+dX),-Y*(X+dX)],axis=-1),
tf.stack([O,O,O,X,Y,I,-X*(Y+dY),-Y*(Y+dY)],axis=-1)],1)
b = tf.expand_dims(tf.concat([X+dX,Y+dY],1),-1)
pPert = tf.matrix_solve(A,b)[:,:,0]
pPert -= tf.to_float([[1,0,0,0,1,0,0,0]])
else:
if opt.warpType=="translation":
J = np.concatenate([np.stack([I,O],axis=-1),
np.stack([O,I],axis=-1)],axis=1)
if opt.warpType=="similarity":
J = np.concatenate([np.stack([X,Y,I,O],axis=-1),
np.stack([-Y,X,O,I],axis=-1)],axis=1)
if opt.warpType=="affine":
J = np.concatenate([np.stack([X,Y,I,O,O,O],axis=-1),
np.stack([O,O,O,X,Y,I],axis=-1)],axis=1)
dXY = tf.expand_dims(tf.concat([dX,dY],1),-1)
pPert = tf.matrix_solve_ls(J,dXY)[:,:,0]
return pPert
# make training batch
def makeBatch(opt,data,PH):
N = len(data["image"])
randIdx = np.random.randint(N,size=[opt.batchSize])
# put data in placeholders
[image,label] = PH
batch = {
image: data["image"][randIdx],
label: data["label"][randIdx],
}
return batch
# evaluation on test set
def evalTest(opt,sess,data,PH,prediction,imagesEval=[]):
N = len(data["image"])
# put data in placeholders
[image,label] = PH
batchN = int(np.ceil(N/opt.batchSize))
warped = [{},{}]
count = 0
for b in range(batchN):
# use some dummy data (0) as batch filler if necessary
if b!=batchN-1:
realIdx = np.arange(opt.batchSize*b,opt.batchSize*(b+1))
else:
realIdx = np.arange(opt.batchSize*b,N)
idx = np.zeros([opt.batchSize],dtype=int)
idx[:len(realIdx)] = realIdx
batch = {
image: data["image"][idx],
label: data["label"][idx],
}
evalList = sess.run([prediction]+imagesEval,feed_dict=batch)
pred = evalList[0]
count += pred[:len(realIdx)].sum()
if opt.netType=="STN" or opt.netType=="IC-STN":
imgs = evalList[1:]
for i in range(len(realIdx)):
l = data["label"][idx[i]]
if l not in warped[0]: warped[0][l] = []
if l not in warped[1]: warped[1][l] = []
warped[0][l].append(imgs[0][i])
warped[1][l].append(imgs[1][i])
accuracy = float(count)/N
if opt.netType=="STN" or opt.netType=="IC-STN":
mean = [np.array([np.mean(warped[0][l],axis=0) for l in warped[0]]),
np.array([np.mean(warped[1][l],axis=0) for l in warped[1]])]
var = [np.array([np.var(warped[0][l],axis=0) for l in warped[0]]),
np.array([np.var(warped[1][l],axis=0) for l in warped[1]])]
else: mean,var = None,None
return accuracy,mean,var
| StarcoderdataPython |
12802531 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pyzeebe",
version="2.3.1",
author="<NAME>",
author_email="<EMAIL>",
description="Zeebe client api",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/JonatanMartens/pyzeebe",
packages=setuptools.find_packages(exclude=("tests",)),
install_requires=["oauthlib==3.1.0", "requests-oauthlib==1.3.0", "zeebe-grpc==0.26.0.0"],
exclude=["*test.py", "tests", "*.bpmn"],
keywords="zeebe workflow workflow-engine",
license="MIT",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
)
| StarcoderdataPython |
6673894 | <gh_stars>0
# Hacked By Ry2uko ;}
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv('medical_examination.csv')
def is_overweight(row):
height_in_meters = row['height'] / 100
bmi = row['weight'] / height_in_meters**2
if bmi > 25:
return 1
return 0
df['overweight'] = df.apply(is_overweight, axis=1)
df['cholesterol'] = np.where(df['cholesterol'] == 1, 0, 1)
df['gluc'] = np.where(df['gluc'] == 1, 0, 1)
def draw_cat_plot():
df_cat = pd.melt(df, id_vars=['cardio'], value_vars=['cholesterol', 'gluc', 'smoke', 'alco', 'active', 'overweight'])
df_cat['total'] = 1
df_cat = df_cat.groupby(['cardio', 'variable', 'value'], as_index=False).count()
fig = sns.catplot(x='variable', y='total', data=df_cat, hue='value', kind='bar', col='cardio').fig
fig.savefig('catplot.png')
return fig
def draw_heat_map():
df_heat = df[
(df['ap_lo'] <= df['ap_hi']) &
(df['height'] >= df['height'].quantile(0.025)) &
(df['height'] <= df['height'].quantile(0.975)) &
(df['weight'] >= df['weight'].quantile(0.025)) &
(df['weight'] <= df['weight'].quantile(0.975))
]
corr = df_heat.corr(method='pearson')
mask = np.triu(corr)
fig, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(corr, linewidths=1, annot=True, square=True, mask=mask, fmt='.1f', center=0.08, cbar_kws={'shrink': 0.5})
fig.savefig('heatmap.png')
return fig
if __name__ == '__main__':
# Test here
pass | StarcoderdataPython |
265036 | # Generated by Django 3.1.7 on 2021-02-22 11:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Bug',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('bug_name', models.CharField(max_length=100, verbose_name='Nome do bug')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='data de criação')),
('last_update', models.DateTimeField(auto_now=True, verbose_name='Ultima Atualização')),
('description', models.TextField(blank=True, verbose_name='Descrição do Bug')),
('status', models.CharField(choices=[('OPEN', 'Em Aberto'), ('EVAL', 'Em Avaliação'), ('SOLV', 'Solucionado')], default='OPEN', max_length=20)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bugs', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Bugs',
'ordering': ['id'],
},
),
]
| StarcoderdataPython |
8067800 | n = int(input('Digite um número inteiro para ver sua tabuada:'))
print('='*12)
print('{} x 01 = {}\n'
'{} x 02 = {}\n'
'{} x 03 = {}\n'
'{} x 04 = {}\n'
'{} x 05 = {}\n'
'{} x 06 = {}\n'
'{} x 07 = {}\n'
'{} x 08 = {}\n'
'{} x 09 = {}\n'
'{} x 10 = {}\n'
''.format(n, n, n, n*2, n, n*3, n, n*4, n, n*5, n, n*6, n, n*7, n, n*8, n, n*9, n, n*10))
print('='*12)
# Ao invés de 01, 02, 03... poderia também colocar os valores fixos da tabuada como {:2}.
# Indicando que em todos há dois dígitos.
| StarcoderdataPython |
12807817 | from flask_admin.contrib.sqla.fields import QuerySelectField
from flask_ckeditor import CKEditor, CKEditorField
from admin.opencode import MxImageUploadField
from model.models import *
from model.netModels import *
import os.path as op
import os
from jinja2 import Markup
from model.adminModels import Common_Admin as BaseAdmin
from model.netModels import Net_Index_Images
from setting import Aliyun
import flask_login as login
file_path = op.join(op.dirname(__file__), '../static/product') # 文件上传路径
try:
os.mkdir(file_path)
except OSError:
pass
##下拉选择
def getDictForChoice(name):
return [(dictConfig.key, dictConfig.value) for dictConfig in (DictConfig().query.filter(DictConfig.name == name).all())]
class Common_Admin(BaseAdmin):
def is_accessible(self):
if login.current_user.is_authenticated:
if login.current_user.username=='home':
return True
return False
return False
#首页轮播图
class Index_Images_Admin(Common_Admin):
columnsa=[
['name', '图片名称','text'],
['url', '跳转链接', 'text'],
['yongtu', '图片用途', 'text'],
['pic', '图片地址1920*600', 'image']
]
def __init__(self, session, **kwargs):
self.net_init(self.columnsa)
super(Index_Images_Admin, self).__init__(Net_Index_Images, session, **kwargs)
#首页图文内容
class Net_Index_Admin(Common_Admin):
columns = [
['brand', '品牌描述', 'CKEditorField'],
['brandImage' , '品牌图片', 'image'],
['culture' ,'文化710*660', 'CKEditorField'],
['progress' , '历程','CKEditorField'],
['footer', '页面底部内容', 'text']
]
def __init__(self, session, **kwargs):
self.net_init(self.columns)
super(Net_Index_Admin, self).__init__(Net_Index, session, **kwargs)
class CompanyInfo_Admin(Common_Admin):
columns = [
['banner','顶部大图1920*600','image'],
['introduce','公司简介','CKEditorField'],
['culture' ,'企业文化','CKEditorField'],
['yongtuCompany' ,'公司图片710*660','image'],
['yongtuculture' ,'文化图片960*700','image']
]
def __init__(self, session, **kwargs):
self.net_init(self.columns)
super(CompanyInfo_Admin, self).__init__(Net_CompanyInfo, session, **kwargs)
class Honour_Admin(Common_Admin):
column_list = ['id', 'time','content']
column_labels = {
'time': '时间',
'content' : '荣誉内容'
}
def __init__(self, session, **kwargs):
super(Honour_Admin, self).__init__(Net_Honour, session, **kwargs)
class Contact_Admin(Common_Admin):
create_modal = True
columns = [
['banner','顶部大图','image'],
['zh_company', '公司中文名','text'],
['en_company', '公司英文名','text'],
['email','公司邮箱','text'],
['phone','公司固定电话','text'],
['kf_phone','客服电话','text'],
['mobilephone', '移动号码','text'],
['logo','企业Logo(500*154)', 'image'],
['weixin','微信二维码(124*124)' , 'image'],
['province','省','text'],
['city', '市','text'],
['address', '完整地址','text']
]
def __init__(self, session, **kwargs):
self.net_init(self.columns)
super(Contact_Admin, self).__init__(Net_Contact, session, **kwargs)
class MessageBoard_Admin(Common_Admin):
column_list = ['id', 'name', 'mobile', 'email', 'advice']
column_labels = {
'name': '姓名',
'mobile': '手机号',
'email': '邮箱',
'advice': '建议'
}
def __init__(self, session, **kwargs):
super(MessageBoard_Admin, self).__init__(Net_MessageBoard, session, **kwargs)
class News_Category_Admin(Common_Admin):
columns = [
[ 'name', '类别名称', 'text'],
[ 'desc' , '类别描述', 'text']
]
def __init__(self, session, **kwargs):
self.net_init(self.columns)
super(News_Category_Admin, self).__init__(Net_News_Category, session, **kwargs)
class News_Admin(Common_Admin):
columns = [
[ 'title', '新闻标题', 'text'],
[ 'date' , '日期', 'text'],
[ 'content' , '新闻内容', 'CKEditorField'],
['author' , '新闻作者', 'text'],
[ 'pic','新闻图片' ,'image'],
['introduction', '导读', 'CKEditorField'],
[ 'category' , '新闻类别','QuerySelectField',[(todo_type.id, todo_type.name) for todo_type in (Net_News_Category().query.all())]],
['index', '是否推送首页(首页只能推送3条)', 'text']
]
def _list_thumbnail2(view, context, model, name):
if name == 'category':
if model.category==None:
return ""
for category in (Net_News_Category().all()):
if model.category==category.id:
return category.name
column_formatters = {
'category': _list_thumbnail2
}
def query_factory():
return [(category.id) for category in (Net_News_Category().all())]
def get_label(obj):
return Net_News_Category().get(obj).name
def get_pk(obj):
return obj
form_extra_fields = {
# 'tasktype':form.Select2Field('任务类别', choices =todo_types,coerce=int )
'category': QuerySelectField(label=u'新闻类别', query_factory=query_factory,get_label=get_label,get_pk=get_pk)
}
def __init__(self, session, **kwargs):
self.net_init(self.columns)
super(News_Admin, self).__init__(Net_News, session, **kwargs)
class Net_MessageBoard_Admin(Common_Admin):
column_list = [ 'name','mobile','email','advice']
column_labels = {
'name': '姓名',
'mobile' : '联系电话',
'email' : '邮箱',
'advice' : '建议'
}
form_columns = column_list
def __init__(self, session, **kwargs):
super(Net_MessageBoard_Admin, self).__init__(Net_MessageBoard, session, **kwargs)
class Net_Join_Admin(Common_Admin):
column_list = [ 'name','mobile','email','condition']
column_labels = {
'name': '姓名',
'mobile': '联系电话',
'email': '邮箱',
'condition': '条件'
}
form_columns = column_list
def __init__(self, session, **kwargs):
super(Net_Join_Admin, self).__init__(Net_Join, session, **kwargs)
class Net_Join_Page_Admin(Common_Admin):
columns = [
['banner', '头部大图1920*550', 'image'],
['market_left', '市场分析-左313*239', 'CKEditorField'],
['market_rigth', '市场分析-右313*425', 'CKEditorField'],
['qiye_left', '企业和荣誉-左313*294', 'CKEditorField'],
['qiye_right', '企业和荣誉-右313*431', 'CKEditorField'],
['zhaoshang_left', '招商-左(313*224)*2', 'CKEditorField'],
['zhaoshang_center', '招商-中', 'CKEditorField'],
['zhaoshang_right', '招商-右382*604', 'CKEditorField'],
['join_left', '加盟-左313*295', 'CKEditorField'],
['join_right', '加盟-右', 'CKEditorField'],
['product_left', '产品-左313*359', 'CKEditorField'],
['product_right', '产品-右313*359', 'CKEditorField']
]
def __init__(self, session, **kwargs):
self.net_init(self.columns)
super(Net_Join_Page_Admin, self).__init__(Net_Join_Page, session, **kwargs)
class Net_Product_Category_Parent_Admin(Common_Admin):
columns = [
[ 'name', '类别名称', 'text'],
[ 'desc' , '类别描述', 'text']
]
def __init__(self, session, **kwargs):
self.net_init(self.columns)
super(Net_Product_Category_Parent_Admin, self).__init__(Net_Product_Category_Parent, session, **kwargs)
class Net_Product_Category_Child_Admin(Common_Admin):
columns = [
[ 'name', '类别名称', 'text'],
[ 'desc' , '类别描述', 'text'],
['parentId', '父级类别', 'text']
]
def _list_thumbnail2(view, context, model, name):
if name == 'parentId':
if model.parentId==None:
return ""
for category in (Net_Product_Category_Parent().all()):
if model.parentId==category.id:
return category.name
column_formatters = {
'parentId': _list_thumbnail2
}
def query_factory():
return [(category.id) for category in (Net_Product_Category_Parent().all())]
def get_label(obj):
return Net_Product_Category_Parent().get(obj).name
def get_pk(obj):
return obj
form_extra_fields = {
# 'tasktype':form.Select2Field('任务类别', choices =todo_types,coerce=int )
'parentId': QuerySelectField(label=u'新闻类别', query_factory=query_factory,get_label=get_label,get_pk=get_pk)
}
def __init__(self, session, **kwargs):
self.net_init(self.columns)
super(Net_Product_Category_Child_Admin, self).__init__(Net_Product_Category_Child, session, **kwargs)
class Net_Product_Admin(Common_Admin):
columns = [
[ 'name', '产品名称', 'text'],
[ 'title' , '产品短标题', 'text'],
['categoryId', '类别', 'text'],
['pic', '产品图片313*338', 'image'],
['style', '风格', 'text'],
['desc', '产品描述', 'CKEditorField']
]
def _list_thumbnail2(view, context, model, name):
if name == 'categoryId':
if model.categoryId==None:
return ""
for category in (Net_Product_Category_Child().all()):
if model.categoryId==category.id:
return category.name
column_formatters = {
'categoryId': _list_thumbnail2
}
def query_factory():
return [(category.id) for category in (Net_Product_Category_Child().all())]
def get_label(obj):
return Net_Product_Category_Child().get(obj).name
def get_pk(obj):
return obj
form_extra_fields = {
# 'tasktype':form.Select2Field('任务类别', choices =todo_types,coerce=int )
'categoryId': QuerySelectField(label=u'产品类别', query_factory=query_factory,get_label=get_label,get_pk=get_pk)
}
def __init__(self, session, **kwargs):
self.net_init(self.columns)
super(Net_Product_Admin, self).__init__(NetProduct, session, **kwargs)
class Net_OtherBussiness_Admin(Common_Admin):
columns = [
[ 'name', '名称', 'text'],
[ 'title' , '导读', 'text'],
['categoryId', '类别', 'Select2Field',[(1, '合作伙伴'), (2, '核心业务')]],
['pic', '图片', 'image'],
['url', '跳转链接', 'text'],
['desc', '描述', 'CKEditorField']
]
def __init__(self, session, **kwargs):
self.net_init(self.columns)
super(Net_OtherBussiness_Admin, self).__init__(Net_otherBussiness, session, **kwargs)
| StarcoderdataPython |
12809852 | import logging
import re
import sys
import functools
from django.utils import six
try:
unicode = unicode
except NameError:
unicode = str
logger = logging.getLogger(__name__)
URL_PARAM_RE = re.compile('(?P<k>[^(=|&)]+)=(?P<v>[^&]+)(&|$)')
URL_PARAM_NO_VALUE_RE = re.compile('(?P<k>[^(&|?)]+)(&|$)')
def import_statsd():
'''
Import only the statd by wolph not the mozilla statsd
TODO: Move to mozilla statds which is more widely used
'''
try:
# check to see if the django_statsd we found
# supports start (stop) timing.
import django_statsd
is_wolphs_statsd = hasattr(
django_statsd, 'start') and hasattr(django_statsd, 'stop')
if not is_wolphs_statsd:
django_statsd = None
except ImportError:
django_statsd = None
return django_statsd
django_statsd = import_statsd()
def start_statsd(path):
'''
Simple wrapper to save some typing
'''
if django_statsd:
django_statsd.start(path)
def stop_statsd(path):
if django_statsd:
django_statsd.stop(path)
def base64_url_decode_php_style(inp):
'''
PHP follows a slightly different protocol for base64 url decode.
For a full explanation see:
http://stackoverflow.com/questions/3302946/how-to-base64-url-decode-in-python
and
http://sunilarora.org/parsing-signedrequest-parameter-in-python-bas
'''
import base64
padding_factor = (4 - len(inp) % 4) % 4
inp += "=" * padding_factor
return base64.b64decode(unicode(inp).translate(
dict(zip(map(ord, u'-_'), u'+/'))))
def encode_params(params_dict):
'''
Take the dictionary of params and encode keys and
values to ascii if it's unicode
'''
encoded = [(smart_str(k), smart_str(v)) for k, v in params_dict.items()]
encoded_dict = dict(encoded)
return encoded_dict
def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Adapted from django, needed for urlencoding
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
import types
if strings_only and isinstance(s, (types.NoneType, int)):
return s
elif not isinstance(s, six.string_types):
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg, encoding, strings_only,
errors) for arg in s])
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
# we are no longer supporting python 2.5
# so we can simply assume import json works
import json
def send_warning(message, request=None, e=None, **extra_data):
'''
Uses the logging system to send a message to logging and sentry
'''
username = None
if request and request.user.is_authenticated():
username = request.user.username
error_message = None
if e:
error_message = unicode(e)
data = {
'username': username,
'body': error_message,
}
data.update(extra_data)
logger.warn(message,
exc_info=sys.exc_info(), extra={
'request': request,
'data': data
})
def merge_urls(generated_url, human_url):
'''
merge the generated_url with the human_url following this rules:
params introduced by generated_url are kept
final params order comes from generated_url
there's an hack to support things like this http://url?param¶m=value
>>> gen = "http://mysite.com?p1=a&p2=b&p3=c&p4=d"
>>> hum = "http://mysite.com?p4=D&p3=C&p2=B"
>>> merge_urls(gen, hum)
u'http://mysite.com?p1=a&p2=B&p3=C&p4=D'
>>> gen = "http://mysite.com?id=a&id_s=b&p_id=d"
>>> hum = "http://mysite.com?id=A&id_s=B&p_id=D"
>>> merge_urls(gen, hum)
u'http://mysite.com?id=A&id_s=B&p_id=D'
>>> gen = "http://mysite.com?p1=a&p2=b&p3=c&p4=d"
>>> hum = "http://mysite.com"
>>> merge_urls(gen, hum)
u'http://mysite.com'
>>> gen = "http://ad.zanox.com/ppc/?18595160C2000463397T&zpar4=scrapbook&zpar0=e2494344_c4385641&zpar1=not_authenticated&zpar2=unknown_campaign&zpar3=unknown_ref&ULP=http://www.asos.com/ASOS/ASOS-MARS-Loafer-Shoes/Prod/pgeproduct.aspx?iid=1703516&cid=4172&sh=0&pge=2&pgesize=20&sort=-1&clr=Black&affId=2441"
>>> hum = "http://ad.zanox.com/ppc/?18595160C2000463397T&zpar3=scrapbook&ULP=http://www.asos.com/ASOS/ASOS-MARS-Loafer-Shoes/Prod/pgeproduct.aspx?iid=1703516&cid=4172&sh=0&pge=2&pgesize=20&sort=-1&clr=Black&affId=2441"
>>> merge_urls(gen, hum)
u'http://ad.zanox.com/ppc/?18595160C2000463397T&zpar4=scrapbook&zpar0=e2494344_c4385641&zpar1=not_authenticated&zpar2=unknown_campaign&zpar3=scrapbook&ULP=http://www.asos.com/ASOS/ASOS-MARS-Loafer-Shoes/Prod/pgeproduct.aspx?iid=1703516&cid=4172&sh=0&pge=2&pgesize=20&sort=-1&clr=Black&affId=2441'
>>> gen = "http://mysite.com?invalidparam&p=2"
>>> hum = "http://mysite.com?p=1"
>>> merge_urls(gen, hum)
u'http://mysite.com?invalidparam&p=1'
'''
if '?' not in human_url:
return u'%s' % human_url
gen_path, gen_args = generated_url.split('?', 1)
hum_path, hum_args = human_url.split('?', 1)
get_args = lambda args: [(m.group('k'), m.group('v'))
for m in URL_PARAM_RE.finditer(args)]
get_novalues_args = lambda args: [m.group('k')
for m in URL_PARAM_NO_VALUE_RE.finditer(
args) if "=" not in m.group('k')]
hum_dict = dict(get_args(hum_args))
out_args = []
# prepend crazy param w/o values
for param in get_novalues_args(gen_args):
out_args.append(u'%s' % param)
# replace gen url params
for k, v in get_args(gen_args):
out_args.append(u'%s=%s' % (k, hum_dict.get(k, v)))
return u'%s?%s' % (gen_path, '&'.join(out_args))
class memoized(object):
'''Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
value = self.func(*args)
self.cache[args] = value
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
def camel_to_underscore(name):
'''Convert camelcase style naming to underscore style naming
e.g. SpamEggs -> spam_eggs '''
import string
for c in string.ascii_uppercase:
name = name.replace(c, '_%c' % c)
return name.strip('_').lower()
def validate_is_instance(instance, classes):
'''
Usage
validate_is_instance(10, int)
validate_is_instance('a', (str, unicode))
'''
if not isinstance(classes, tuple):
classes = (classes,)
correct_instance = isinstance(instance, classes)
if not correct_instance:
raise ValueError(
'Expected instance type %s found %s' % (classes, type(instance)))
def is_json(content):
'''
Unfortunately facebook returns 500s which mean they are down
Or 500s with a nice error message because you use open graph wrong
So we have to figure out which is which :)
'''
try:
json.loads(content)
is_json = True
except:
is_json = False
return is_json
| StarcoderdataPython |
9687108 | <filename>car-segment/ensemble.py
from common import *
from submit import *
from dataset.carvana_cars import *
from net.tool import *
def run_vote():
prediction_files=[
'/root/share/project/kaggle-carvana-cars/results/xx5-UNet512_2/submit/probs.8.npy',
'/root/share/project/kaggle-carvana-cars/results/xx5-UNet512_2_two-loss/submit/probs.8.npy',
'/root/share/project/kaggle-carvana-cars/results/xx5-UNet512_2_two-loss-full_1/submit/probs.8.npy',
]
out_dir ='/root/share/project/kaggle-carvana-cars/results/ensemble/xxx'
log = Logger()
log.open(out_dir+'/log.vote.txt',mode='a')
os.makedirs(out_dir, exist_ok=True)
write_list_to_file(prediction_files, out_dir+'/prediction_files.txt')
#----------------------------------------------------------
#read names
split_file = CARVANA_DIR +'/split/'+ 'test%dx%d_100064'%(CARVANA_H,CARVANA_W)
with open(split_file) as f:
names = f.readlines()
names = [name.strip()for name in names]
names = [name.split('/')[-1]+'.jpg' for name in names]
#read probs
num_test = len(names)
votes = np.zeros((num_test, CARVANA_H, CARVANA_W), np.uint8)
num_files = len(prediction_files)
for n in range(num_files):
prediction_file = prediction_files[n]
print(prediction_files[n])
probs = np.load(prediction_file)
votes += probs >=128
probs = None
#prepare csv file -------------------------------------------------------
threshold = 1 #/num_files
probs = votes
gz_file = out_dir+'/results-ensemble-th%05f.csv.gz'%threshold
prob_to_csv(gz_file, names, votes, log, threshold)
# main #################################################################
if __name__ == '__main__':
print( '%s: calling main function ... ' % os.path.basename(__file__))
run_vote()
print('\nsucess!') | StarcoderdataPython |
1895476 | def apply(df):
print('Do something here!')
return df | StarcoderdataPython |
8063232 | #<NAME>
#Codewars : @Kunalpod
#Problem name: Complete The Pattern #14
#Problem level: 6 kyu
def pattern(*args):
n = args[0]
y = 1 if len(args)==1 else args[1]
s= ""
if n<1: return s
if y<=1: y = 1
for i in range(y):
x = 1 if i==0 else 2
for j in range(x, n):
s += ' '*(j-1) + str(j%10) + ' '*(2*(n-j)-1) + str(j%10) + ' '*(j-1) + '\n'
s += ' '*(n-1) + str(n%10) + ' '*(n-1) + '\n'
for j in reversed(list(range(1, n))):
s += ' '*(j-1) + str(j%10) + ' '*(2*(n-j)-1) + str(j%10) + ' '*(j-1) + '\n'
return s[:-1]
| StarcoderdataPython |
1667076 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from unittest import TestCase
from uw_upass import get_upass_url, get_upass_status
from uw_upass.models import UPassStatus
from restclients_core.exceptions import DataFailureException
from uw_upass.util import fdao_upass_override
@fdao_upass_override
class UPassTest(TestCase):
def test_javerage(self):
status = get_upass_status("javerage")
self.assertTrue(status.is_current)
self.assertTrue(status.is_student)
self.assertFalse(status.is_employee)
status_json = status.json_data()
self.assertIsNotNone(status_json['status_message'])
self.assertTrue(status_json['is_current'])
self.assertFalse(status_json['is_employee'])
self.assertIsNotNone(str(status))
status = get_upass_status("javeragefac")
self.assertTrue(status.is_current)
self.assertTrue(status.is_employee)
status = get_upass_status("phil")
self.assertFalse(status.is_current)
self.assertFalse(status.is_student)
self.assertRaises(DataFailureException,
get_upass_status,
"none")
self.assertRaises(Exception,
get_upass_status,
"jerror")
def test_get_url(self):
self.assertEquals(get_upass_url("javerage"),
"/MyUWUpass/MyUWUpass.aspx?id=javerage")
def test_message_parsing(self):
fac_message = ("<p><span class='highlight'>Your Faculty/Staff U-PASS"
" Membership is current.</span></p><p>It can take 24 to"
" 48 hours after purchase or Husky Card replacement"
" for your U-PASS to be transmitted to ORCA readers."
" You must tap your card on an ORCA reader within 60"
" days from purchase or receiving a replacement Husky"
" Card. This updates your smart chip and finalizes"
" activation of your U-PASS.</p><p><a"
" href='http://www.washington.edu/u-pass'>Learn more"
"</a> about U-PASS program member benefits, finalizing"
" activation, and the U-PASS terms of use.</p>")
stu_message = ("<p><span class='highlight'>Your Student U-PASS "
"Membership is current.</span></p><p>It can take 24 "
"to 48 hours after issuance or Husky Card replacement "
"for your U-PASS to be transmitted to ORCA readers. "
"You must tap your card on an ORCA reader within 60 "
"days from U-PASS issuance or receiving a replacement "
"Husky Card. This updates your smart chip and "
"finalizes activation of your U-PASS.</p><p><a "
"href='http://www.washington.edu/u-pass'>Learn more</a>"
" about U-PASS program member benefits and finalizing "
"activation.</p>")
not_current = ("<p><span class='highlight'>Your U-PASS is not current."
"</span></p><p>"
"<a href='http://www.washington.edu/u-pass'>Learn more"
"</a> about U-PASS program member benefits.</p>")
nc_status = UPassStatus.create(not_current)
self.assertFalse(nc_status.is_current)
self.assertFalse(nc_status.is_employee)
self.assertFalse(nc_status.is_student)
stu_status = UPassStatus.create(stu_message)
self.assertTrue(stu_status.is_current)
self.assertFalse(stu_status.is_employee)
self.assertTrue(stu_status.is_student)
fac_status = UPassStatus.create(fac_message)
self.assertTrue(fac_status.is_current)
self.assertTrue(fac_status.is_employee)
self.assertFalse(fac_status.is_student)
| StarcoderdataPython |
372265 | <gh_stars>1-10
import os
from pyproj import Transformer
from vyperdatum.pipeline import *
from vyperdatum.core import VyperCore
vc = VyperCore() # run this once so that the path to the grids is added in pyproj
def test_get_regional_pipeline_upperlower():
pipe = get_regional_pipeline('Ellipse', 'TSS', 'CAORblan01_8301', r'core\geoid12b\g2012bu0.gtx')
assert pipe == get_regional_pipeline('ellipse', 'tss', 'CAORblan01_8301', r'core\geoid12b\g2012bu0.gtx')
def test_get_regional_pipeline_nad83_tss():
pipe = get_regional_pipeline('ellipse', 'TSS', 'CAORblan01_8301', r'core\geoid12b\g2012bu0.gtx')
assert pipe.count('+step +proj') == 1
assert pipe.count('+step +inv +proj') == 1
assert pipe.count('gtx') == 2
transformer = Transformer.from_pipeline(pipe)
result = transformer.transform(xx=-124.853, yy=41.227, zz=0)
assert result == (-124.853, 41.227000000000004, 30.86302107201744)
def test_get_regional_pipeline_tss_nad83():
pipe = get_regional_pipeline('tss', 'ellipse', 'CAORblan01_8301', r'core\geoid12b\g2012bu0.gtx')
assert pipe.count('+step +inv +proj') == 1
assert pipe.count('+step +proj') == 1
assert pipe.count('gtx') == 2
transformer = Transformer.from_pipeline(pipe)
result = transformer.transform(xx=-124.853, yy=41.227, zz=0)
assert result == (-124.853, 41.227000000000004, -30.86302107201744)
def test_get_regional_pipeline_mllw():
pipe = get_regional_pipeline('ellipse', 'mllw', 'CAORblan01_8301', r'core\geoid12b\g2012bu0.gtx')
assert pipe.count('+step +proj') == 2
assert pipe.count('+step +inv +proj') == 1
assert pipe.count('gtx') == 3
assert pipe.count('mllw') == 1
transformer = Transformer.from_pipeline(pipe)
result = transformer.transform(xx=-124.853, yy=41.227, zz=0)
assert result == (-124.853, 41.227000000000004, 31.97132104264427)
def test_get_regional_pipeline_mhw():
pipe = get_regional_pipeline('ellipse', 'mhw', 'CAORblan01_8301', r'core\geoid12b\g2012bu0.gtx')
assert pipe.count('+step +proj') == 2
assert pipe.count('+step +inv +proj') == 1
assert pipe.count('gtx') == 3
assert pipe.count('mhw') == 1
transformer = Transformer.from_pipeline(pipe)
result = transformer.transform(xx=-124.853, yy=41.227, zz=0)
assert result == (-124.853, 41.227000000000004, 30.11322104560066)
def test_get_regional_pipeline_null():
pipe = get_regional_pipeline('mllw', 'mllw', 'CAORblan01_8301', r'core\geoid12b\g2012bu0.gtx')
assert pipe is None
if __name__ == '__main__':
test_get_regional_pipeline_mhw()
test_get_regional_pipeline_mllw()
test_get_regional_pipeline_nad83_tss()
test_get_regional_pipeline_null()
test_get_regional_pipeline_tss_nad83()
test_get_regional_pipeline_upperlower()
| StarcoderdataPython |
8068128 | <filename>api/views/story.py
import django_filters
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework import filters
from api.pagination import LargeResultsSetPagination
from api.permissions import IsAdminUserOrReadOnly
from api.serializers import StorySerializer
from api.models.gcd.story import GCDStory
class StoryFilter(django_filters.FilterSet):
class Meta:
model = GCDStory
fields = ['issue',]
class StoryViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows customers to be viewed or edited.
"""
queryset = GCDStory.objects.all()
serializer_class = StorySerializer
pagination_class = LargeResultsSetPagination
permission_classes = (IsAdminUserOrReadOnly,)
filter_backends = (filters.DjangoFilterBackend,)
filter_class = StoryFilter | StarcoderdataPython |
90758 | # -*- coding: utf-8 -*-
# Imports
#==================================================
import json, keras, gensim, codecs
import tensorflow as tf
import numpy as np
import keras.preprocessing.text as kpt
from keras.callbacks import Callback
from keras.layers import Dropout, Input, Dense, Embedding, LSTM, Bidirectional
from keras.models import Model, load_model
from keras_contrib.layers import CRF
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_viterbi_accuracy
from sklearn.metrics import classification_report, confusion_matrix, precision_recall_fscore_support
from sklearn.model_selection import train_test_split
# Get negations
#==================================================
def get_negation_instances(dataset):
global length
data = dataset
words = []
lemmas = []
pos = []
labels = []
for i in range(len(data)):
w = []
l = []
p = []
c = []
for i2 in range(len(data[i])):
try:
w.append(data[i][i2][2])
l.append(data[i][i2][3])
p.append(data[i][i2][4])
if data[i][i2][5] == '_':
c.append([0, 1])
elif data[i][i2][5] == '***':
c.append([0, 1])
else:
c.append([1, 0])
length+=1
except Exception:
pass
words.append(w)
lemmas.append(l)
pos.append(p)
labels.append(c)
return words, lemmas, pos, labels
# ---------------------- more metrics -------------------
class MoreMetrics(Callback):
def on_train_begin(self, logs={}):
self.val_f1s = []
self.val_recalls = []
self.val_precisions = []
def on_epoch_end(self, epoch, logs={}):
global best_f1
val_predict = model.predict([X_valid, X_lemmas_valid, X_pos_valid])
val_targ = Y_valid
valid_pre, valid_rec, valid_f1 = get_eval_epoch(val_predict,val_targ)
print ("Precision/recall/F1 score on validation set", valid_pre, valid_rec, valid_f1)
if valid_f1 > best_f1:
best_f1 = valid_f1
model.save('cue_bilstm-crf.hdf5')
print ('saved best model')
else:
print ('No progress')
return
def get_eval(predictions,gs):
y,y_ = [],[]
for p in predictions: y.extend(map(lambda x: list(x).index(x.max()),p))
for g in gs: y_.extend(map(lambda x: 0 if list(x)==[1,0] else 1,g))
print (classification_report(y_,y, digits=4))
def get_eval_epoch(predictions,gs):
y,y_ = [],[]
for p in predictions:
y.extend(map(lambda x: list(x).index(x.max()),p))
for g in gs:
y_.extend(map(lambda x: 0 if list(x)==[1,0] else 1,g))
p, r, f1, s = precision_recall_fscore_support(y_,y)
p_pos = p[0]
r_pos = r[0]
f1_pos = f1[0]
return p_pos, r_pos, f1_pos
# ---------------------- Padding features -------------------
def pad_documents(sentences, padding_word='<PAD>'):
"""
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def pad_labels(sentences, padding_word=[0,1]):
"""
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
# ---------------------- storing labeling results -------------------
def store_prediction(lex, dic_inv, pred_dev, gold_dev):
print ("Storing labelling results for dev or test set...")
with codecs.open('cue_best_pred.txt','wb','utf8') as store_pred:
for s, y_sys, y_hat in zip(lex, pred_dev, gold_dev):
s = [dic_inv.get(word) for word in s]
assert len(s)==len(y_sys)==len(y_hat)
for _word,_sys,gold in zip(s,y_sys,y_hat):
_p = list(_sys).index(_sys.max())
_g = 0 if list(gold)==[1,0] else 1
if _word != "<PAD>":
store_pred.write("%s\t%s\t%s\n" % (_word,_g,_p))
store_pred.write("\n")
#==================================================
# loading datasets
#==================================================
length = 0
lengths = []
data = open('./data/....txt').read()
data = data.split('\n\n')
data = [item.split('\n') for item in data]
data = [[i.split('\t') for i in item] for item in data]
words, lemmas, pos, labels = get_negation_instances(data)
lengths.append(length)
words_x = pad_documents(words)
lemmas_x = pad_documents(lemmas)
pos_x = pad_documents(pos)
labels_x = pad_labels(labels)
#Preparing words without pre-trained embeddings
# ==================================================
# create a new Tokenizer
tokenizer = kpt.Tokenizer(lower=False)
# feed our texts to the Tokenizer
tokenizer.fit_on_texts(words_x)
# Tokenizers come with a convenient list of words and IDs
dictionary = tokenizer.word_index
x_words = [[dictionary[word] for word in text] for text in words_x]
vocabulary_size = len(dictionary)
dic_inv = dict(map(reversed, tokenizer.word_index.items()))
#Preparing lemma without pre-trained embeddings
# ==================================================
# create a new Tokenizer
Lemmatokenizer = kpt.Tokenizer(lower=False)
# feed our texts to the Tokenizer
Lemmatokenizer.fit_on_texts(lemmas_x)
# Tokenizers come with a convenient list of words and IDs
Lemmadictionary = Lemmatokenizer.word_index
x_lemmas = [[Lemmadictionary[word] for word in text] for text in lemmas_x]
lemma_vocabulary_size = len(Lemmadictionary)
#==================================================
# Preparing POS embeddings
# ==================================================
# create a new Tokenizer
postokenizer = kpt.Tokenizer(lower=False)
# feed our texts to the Tokenizer
postokenizer.fit_on_texts(pos_x)
# Tokenizers come with a convenient list of words and IDs
posdictionary = postokenizer.word_index
x_pos = [[posdictionary[pos] for pos in text] for text in pos_x]
tag_voc_size = len(posdictionary)
#==================================================
# Splitting data into the original train, validation and test sets
#==================================================
xwords = np.array(x_words, dtype='int32')
xlemmas = np.array(x_lemmas, dtype='int32')
xpos = np.array(x_pos, dtype='int32')
xlabels = np.array(labels_x, dtype='int32')
sequence_length = xwords.shape[1]
Xtrain, X_test, Ytrain, Y_test = train_test_split(xwords, xlabels, random_state=42, test_size=0.2)
X_lemmas_train, X_lemmas_test, _, _ = train_test_split(xlemmas, xlabels, random_state=42, test_size=0.2)
X_pos_train, X_pos_test, _, _ = train_test_split(xpos, xlabels, random_state=42, test_size=0.2)
X_train, X_valid, Y_train, Y_valid = train_test_split(Xtrain, Ytrain, random_state=42, test_size=0.2)
X_lemmas_train, X_lemmas_valid, _, _ = train_test_split(X_lemmas_train, Ytrain, random_state=42, test_size=0.2)
X_pos_train, X_pos_valid, _, _ = train_test_split(X_pos_train, Ytrain, random_state=42, test_size=0.2)
# ---------------------- Parameters section -------------------
# Model Hyperparameters
embedding_dim = 100
hidden_dims = 400
# ~ # Training parameters
num_epochs = 20
batch_size = 32
best_f1 = 0.0
embeddings_initializer = keras.initializers.RandomUniform(minval=-1.0, maxval=1.0, seed=42)
moremetrics = MoreMetrics()
#==================================================
# ---------------------- training section -------------------
#==================================================
print("Creating BiLSTM Model")
inputs_w = Input(shape=(sequence_length,), dtype='int32')
inputs_l = Input(shape=(sequence_length,), dtype='int32')
inputs_pos = Input(shape=(sequence_length,), dtype='int32')
w_emb = Embedding(vocabulary_size+1, embedding_dim, input_length=sequence_length, embeddings_initializer=embeddings_initializer, trainable=True)(inputs_w)
l_emb = Embedding(lemma_vocabulary_size+1, embedding_dim, input_length=sequence_length, embeddings_initializer=embeddings_initializer, trainable=True)(inputs_l)
p_emb = Embedding(tag_voc_size+1, embedding_dim, input_length=sequence_length, embeddings_initializer=embeddings_initializer, trainable=True)(inputs_pos)
summed = keras.layers.add([w_emb, l_emb, p_emb])
dropout_emb = Dropout(0.5)(summed)
BiLSTM = Bidirectional(LSTM(hidden_dims, recurrent_dropout=0.5, return_sequences=True))(dropout_emb)
outputs = CRF(2, sparse_target=False)(BiLSTM)
model = Model(inputs=[inputs_w, inputs_l, inputs_pos], outputs=outputs)
model.compile('adam', loss=crf_loss, metrics=[crf_viterbi_accuracy])
model.summary()
model.fit([X_train, X_lemmas_train, X_pos_train], Y_train, batch_size=batch_size, epochs=num_epochs, verbose=1, validation_data=([X_valid, X_lemmas_valid, X_pos_valid], Y_valid), callbacks=[moremetrics])
#==================================================
# ---------------------- testing section -------------------
#==================================================
custom_objects = {'CRF': CRF, 'crf_loss': crf_loss, 'crf_viterbi_accuracy': crf_viterbi_accuracy}
model = load_model('cue_bilstm-crf.hdf5', custom_objects)
preds = model.predict([X_test, X_lemmas_test, X_pos_test])
get_eval(preds, Y_test)
store_prediction(X_test, dic_inv, preds, Y_test)
| StarcoderdataPython |
5059564 | <reponame>bsmithgall/cookiecutter-kindergarten<filename>{{ cookiecutter.app_name }}/{{ cookiecutter.app_name }}_backend/{{ cookiecutter.app_name }}/blueprints/web.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from flask import Blueprint, render_template
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__))
)
TEMPLATE_DIR = os.environ.get('TEMPLATE_DIR', __location__ + '/../mock_templates/')
STATIC_DIR = os.environ.get('STATIC_DIR', __location__ + '/../mock_static/')
web_bp = Blueprint('web', __name__,
template_folder=TEMPLATE_DIR,
static_folder=STATIC_DIR,
static_url_path='/static',
)
@web_bp.route('/', defaults={'path': ''})
@web_bp.route('/<path:path>')
def web_root(path):
return render_template('index.html'), 200 | StarcoderdataPython |
5146137 | <reponame>droid4control/python-mbus
from ctypes import Structure, c_ubyte, c_size_t, c_long, POINTER
# TODO: is this correct?
c_time_t = c_long
class MBusDataInformationBlock(Structure):
_fields_ = [
('dif', c_ubyte),
('dife', c_ubyte*10),
('ndife', c_size_t),
]
class MBusValueInformationBlock(Structure):
_fields_ = [
('vif', c_ubyte),
('vife', c_ubyte*10),
('nvife', c_size_t),
('custom_vif', c_ubyte*128),
]
class MBusDataRecordHeader(Structure):
_fields_ = [
('dib', MBusDataInformationBlock),
('vib', MBusValueInformationBlock),
]
class MBusDataRecord(Structure):
pass
MBusDataRecord._fields_ = [
('drh', MBusDataRecordHeader),
('data', c_ubyte * 234),
('data_len', c_size_t),
('timestamp', c_time_t),
('next', POINTER(MBusDataRecord)),
]
| StarcoderdataPython |
9779689 | <filename>datetime.py
# import datetime
# x = datetime.datetime.now()
x = 'sss'
print(x)
# Return the year and name of weekday:
#print(x.year)
#print(x.strftime("%A"))
# Create a date object:
#x = datetime.datetime(2019, 02, 15)
# Display the name of the month:
#print(x.strftime("%B"))
# more detail: https://www.w3schools.com/PYTHON/python_datetime.asp | StarcoderdataPython |
11329212 | <filename>tests/test_popjwt.py
import json
from Cryptodome.PublicKey import RSA
from jwkest.jwe import JWE
from jwkest.jwk import KEYS
from jwkest.jwk import RSAKey
from oic.extension.popjwt import PJWT
from oic.extension.popjwt import PopJWT
__author__ = "roland"
RSA_PRIVATE_KEY = """-----<KEY>"""
def _eq(l1, l2):
return set(l1) == set(l2)
def test_pop_jwk():
jwt = {
"iss": "https://server.example.com",
"aud": "https://client.example.org",
"exp": 1361398824,
"cnf": {
"jwk": {
"kty": "EC",
"use": "sig",
"crv": "P-256",
"x": "<KEY>",
"y": <KEY>",
}
},
}
pjwt = PJWT(**jwt)
s = pjwt.to_json()
assert s
de_pjwt = PJWT().from_json(s)
assert _eq(de_pjwt.keys(), ["iss", "aud", "exp", "cnf"])
assert list(de_pjwt["cnf"].keys()) == ["jwk"]
assert _eq(de_pjwt["cnf"]["jwk"].keys(), ["kty", "use", "crv", "x", "y"])
rsa = RSA.importKey(RSA_PRIVATE_KEY)
def test_pop_jwe():
jwk = {
"kty": "oct",
"alg": "HS256",
"k": "<KEY>",
}
encryption_keys = [RSAKey(use="enc", key=rsa, kid="some-key-id")]
jwe = JWE(json.dumps(jwk), alg="RSA-OAEP", enc="A256CBC-HS512")
_jwe = jwe.encrypt(keys=encryption_keys, kid="some-key-id")
jwt = {
"iss": "https://server.example.com",
"aud": "https://client.example.org",
"exp": 1361398824,
"cnf": {"jwe": _jwe},
}
pjwt = PJWT(**jwt)
s = pjwt.to_json()
de_pjwt = PJWT().from_json(s)
assert _eq(de_pjwt.keys(), ["iss", "aud", "exp", "cnf"])
assert list(de_pjwt["cnf"].keys()) == ["jwe"]
_jwe = de_pjwt["cnf"]["jwe"]
msg = jwe.decrypt(_jwe, encryption_keys)
assert msg
assert json.loads(msg.decode("utf8")) == jwk
def test_pop_kid():
jwt = {
"iss": "https://server.example.com",
"aud": "https://client.example.org",
"exp": 1361398824,
"cnf": {"kid": "dfd1aa97-6d8d-4575-a0fe-34b96de2bfad"},
}
pjwt = PJWT(**jwt)
s = pjwt.to_json()
assert s
de_pjwt = PJWT().from_json(s)
assert _eq(de_pjwt.keys(), ["iss", "aud", "exp", "cnf"])
assert list(de_pjwt["cnf"].keys()) == ["kid"]
assert de_pjwt["cnf"]["kid"] == jwt["cnf"]["kid"] # type: ignore
def test_pop_jku():
jwt = {
"iss": "https://server.example.com",
"sub": "17760704",
"aud": "https://client.example.org",
"exp": 1440804813,
"cnf": {"jku": "https://keys.example.net/pop-keys.json", "kid": "2015-08-28"},
}
pjwt = PJWT(**jwt)
s = pjwt.to_json()
assert s
de_pjwt = PJWT().from_json(s)
assert _eq(de_pjwt.keys(), ["iss", "sub", "aud", "exp", "cnf"])
assert _eq(de_pjwt["cnf"].keys(), ["jku", "kid"])
assert de_pjwt["cnf"].to_dict() == jwt["cnf"]
def test_pjwt_with_jwk():
pj = PopJWT(
"https://server.example.com", "https://client.example.org", sub="12345678"
)
jwk = {
"kty": "EC",
"use": "sig",
"crv": "P-256",
"x": "<KEY>",
"y": <KEY>",
}
pjwt = pj.pack_jwk(jwk)
s = pjwt.to_json()
assert s
de_pjwt = PJWT().from_json(s)
assert _eq(de_pjwt.keys(), ["iss", "aud", "exp", "cnf", "sub", "iat"])
assert list(de_pjwt["cnf"].keys()) == ["jwk"]
assert _eq(de_pjwt["cnf"]["jwk"].keys(), ["kty", "use", "crv", "x", "y"])
def test_pjwt_with_jwe():
pj = PopJWT(
"https://server.example.com", "https://client.example.org", sub="12345678"
)
jwk = {
"kty": "oct",
"alg": "HS256",
"k": "<KEY>",
}
encryption_keys = [RSAKey(use="enc", key=rsa, kid="some-key-id")]
jwe = JWE(json.dumps(jwk), alg="RSA-OAEP", enc="A256CBC-HS512")
_jwe = jwe.encrypt(keys=encryption_keys, kid="some-key-id")
pjwt = pj.pack_jwe(jwe=_jwe)
s = pjwt.to_json()
de_pjwt = PJWT().from_json(s)
assert _eq(de_pjwt.keys(), ["iss", "aud", "exp", "cnf", "sub", "iat"])
assert list(de_pjwt["cnf"].keys()) == ["jwe"]
_jwe = de_pjwt["cnf"]["jwe"]
msg = jwe.decrypt(_jwe, encryption_keys)
assert msg
assert json.loads(msg.decode("utf8")) == jwk
def test_pjwt_with_jwe_jwk():
keys = KEYS()
keys.append(RSAKey(use="enc", key=rsa, kid="some-key-id"))
jwe = JWE(alg="RSA-OAEP", enc="A256CBC-HS512")
pj = PopJWT(
"https://server.example.com",
"https://client.example.org",
sub="12345678",
jwe=jwe,
keys=keys,
)
jwk = {
"kty": "oct",
"alg": "HS256",
"k": "<KEY>",
}
pjwt = pj.pack_jwe(jwk=jwk, kid="some-key-id")
s = pjwt.to_json()
de_pjwt = PJWT().from_json(s)
assert _eq(de_pjwt.keys(), ["iss", "aud", "exp", "cnf", "sub", "iat"])
assert list(de_pjwt["cnf"].keys()) == ["jwe"]
_jwe = de_pjwt["cnf"]["jwe"]
msg = jwe.decrypt(_jwe, keys.keys())
assert msg
assert json.loads(msg.decode("utf8")) == jwk
def test_pjwt_with_kid():
pj = PopJWT(
"https://server.example.com", "https://client.example.org", sub="12345678"
)
pjwt = pj.pack_kid("some-key-id")
s = pjwt.to_json()
assert s
de_pjwt = PJWT().from_json(s)
assert _eq(de_pjwt.keys(), ["iss", "aud", "exp", "cnf", "sub", "iat"])
assert list(de_pjwt["cnf"].keys()) == ["kid"]
assert de_pjwt["cnf"]["kid"] == "some-key-id"
def test_pjwt_unpack_jwk():
pj = PopJWT(
"https://server.example.com", "https://client.example.org", sub="12345678"
)
jwk = {
"kty": "EC",
"use": "sig",
"crv": "P-256",
"x": "<KEY>",
"y": <KEY>",
}
pjwt = pj.pack_jwk(jwk)
s = pjwt.to_json()
_jwt = PopJWT().unpack(s)
assert _eq(_jwt.keys(), ["iss", "aud", "exp", "cnf", "sub", "iat"])
assert list(_jwt["cnf"].keys()) == ["jwk"]
assert _eq(_jwt["cnf"]["jwk"].keys(), ["kty", "use", "crv", "x", "y"])
def test_pjwt_unpack_jwe():
keys = KEYS()
keys.append(RSAKey(use="enc", key=rsa, kid="some-key-id"))
pj = PopJWT(
"https://server.example.com", "https://client.example.org", sub="12345678"
)
jwk = {
"kty": "oct",
"alg": "HS256",
"k": "<KEY>",
}
jwe = JWE(json.dumps(jwk), alg="RSA-OAEP", enc="A256CBC-HS512")
_jwe = jwe.encrypt(keys=keys.keys(), kid="some-key-id")
pjwt = pj.pack_jwe(jwe=_jwe)
s = pjwt.to_json()
_jwt = PopJWT(jwe=jwe, keys=keys).unpack(s)
assert _eq(_jwt.keys(), ["iss", "aud", "exp", "cnf", "sub", "iat"])
assert _eq(_jwt["cnf"].keys(), ["jwk", "jwe"])
assert _jwt["cnf"]["jwk"] == jwk
| StarcoderdataPython |
1773829 | from app import app as application
# chamado manualmente com: $ flask run
if __name__ == "__main__":
import os
application.run(host='0.0.0.0', port=os.getenv('SERVER_PORT'))
| StarcoderdataPython |
1692457 | from .core import pre_compute
from ..dispatch import dispatch
from ..expr import Expr
from odo.backends.json import JSON, JSONLines
from odo import into
from collections import Iterator
from odo.utils import records_to_tuples
@dispatch(Expr, JSON)
def pre_compute(expr, data, **kwargs):
seq = into(list, data, **kwargs)
leaf = expr._leaves()[0]
return list(records_to_tuples(leaf.dshape, seq))
@dispatch(Expr, JSONLines)
def pre_compute(expr, data, **kwargs):
seq = into(Iterator, data, **kwargs)
leaf = expr._leaves()[0]
return records_to_tuples(leaf.dshape, seq)
| StarcoderdataPython |
6599088 | <gh_stars>1000+
# coding: utf-8
"""Test url tools. """
from __future__ import unicode_literals
import platform
import unittest
from fs._url_tools import url_quote
class TestBase(unittest.TestCase):
def test_quote(self):
test_fixtures = [
# test_snippet, expected
["foo/bar/egg/foofoo", "foo/bar/egg/foofoo"],
["foo/bar ha/barz", "foo/bar%20ha/barz"],
["example b.txt", "example%20b.txt"],
["exampleㄓ.txt", "example%E3%84%93.txt"],
]
if platform.system() == "Windows":
test_fixtures.extend(
[
["C:\\My Documents\\test.txt", "C:/My%20Documents/test.txt"],
["C:/My Documents/test.txt", "C:/My%20Documents/test.txt"],
# on Windows '\' is regarded as path separator
["test/forward\\slash", "test/forward/slash"],
]
)
else:
test_fixtures.extend(
[
# colon:tmp is bad path under Windows
["test/colon:tmp", "test/colon%3Atmp"],
# Unix treat \ as %5C
["test/forward\\slash", "test/forward%5Cslash"],
]
)
for test_snippet, expected in test_fixtures:
self.assertEqual(url_quote(test_snippet), expected)
| StarcoderdataPython |
6497001 | #!/usr/bin/python
"""
Site24x7 Okta Logs Plugin
"""
from datetime import datetime, timedelta
import json
import os
import sys
import time
import traceback
import glob
import socket
PYTHON_MAJOR_VERSION = sys.version_info[0]
if PYTHON_MAJOR_VERSION == 3:
import urllib
import urllib.request as urlconnection
from urllib.error import URLError, HTTPError
elif PYTHON_MAJOR_VERSION == 2:
import urllib2 as urlconnection
from urllib2 import HTTPError, URLError
OKTA_DOMAIN= 'yourOktaDomain'
OKTA_API_TOKEN= 'apiToken'
#if any impacting changes to this plugin kindly increment the plugin version here.
PLUGIN_VERSION = "1"
#Setting this to true will alert you when there is a communication problem while posting plugin data to server
HEARTBEAT="true"
AGENT_HOME = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0]))))
LOG_FILE_DIR = os.path.join(AGENT_HOME, 'temp', 'scriptout')
LOG_ROTATION_INTERVAL='HOUR'
LOG_API_END_POINT='https://'+OKTA_DOMAIN+'/api/v1/logs'
class LogCollector:
def __init__(self):
pass
def collect_logs(self):
data = {}
data['plugin_version'] = PLUGIN_VERSION
data['heartbeat_required']=HEARTBEAT
try:
headers = { 'Authorization': 'SSWS ' + OKTA_API_TOKEN }
startTime = datetime.utcnow() - timedelta(0, 300)
request_url = LOG_API_END_POINT+'?since='+ startTime.isoformat()[:-3] + 'Z'
handle, error = self.readUrl(request_url, headers);
if error is not None:
data['msg'] = error
data['status'] = 0
else:
okta_events = json.loads(handle.read().decode('utf-8'))
if not os.path.exists(LOG_FILE_DIR):
os.makedirs(LOG_FILE_DIR)
file_suffix = datetime.now().strftime("%Y-%m-%d-%H" if LOG_ROTATION_INTERVAL == 'HOUR' else "%Y-%m-%d")
file_path = os.path.join(LOG_FILE_DIR, 'events-'+file_suffix+'.log')
with open(file_path, 'a') as _file:
for okta_event in okta_events:
_file.write(json.dumps(okta_event))
_file.write("\n")
try:
link_headers = handle.getheader('link').split(',');
for link in link_headers:
if 'next' in link:
handle, error = self.readUrl(link[2:link.index('>')], headers=headers)
if error is None:
okta_events = json.loads(handle.read().decode('utf-8'))
with open(file_path, 'a') as _file:
for okta_event in okta_events:
_file.write(json.dumps(okta_event))
_file.write("\n")
except Exception as e:
pass
data['msg']= 'Success'
data['status']=1
except Exception as e:
data['msg']= 'Failure : '+str(e)
data['status']=0
traceback.print_exc()
return data
def readUrl(self, url_end_point, headers):
error=None
try:
req = urlconnection.Request(url_end_point, headers=headers)
handle = urlconnection.urlopen(req, None)
return handle, error
except HTTPError as e:
if(e.code==401):
error="ERROR: Unauthorized user. Does not have permissions. %s" %(e)
elif(e.code==403):
error="ERROR: Forbidden, yours credentials are not correct. %s" %(e)
else:
error="ERROR: The server couldn\'t fulfill the request. %s" %(e)
except URLError as e:
error = 'ERROR: We failed to reach a server. Reason: %s' %(e.reason)
except socket.timeout as e:
error = 'ERROR: Timeout error'
except socket.error as e:
error = "ERROR: Unable to connect with host "+self.host+":"+self.port
except:
traceback.print_exc(e)
error = "ERROR: Unexpected error: %s"%(sys.exc_info()[0])
return None,error
def cleanup_logs(self):
try:
inode_size_map = {}
stat_file_name = os.path.join(AGENT_HOME, 'statefiles', 'local.properties')
with open(stat_file_name) as _file:
lines = _file.readlines()
for line in lines:
if '=' in line:
line = line.strip()
inode_size_map[line.split('=')[0].strip()] = line.split('=')[1].strip()
log_files = glob.glob(os.path.join(LOG_FILE_DIR, 'events-*.log'))
sorted_files = sorted( log_files, key = lambda file: os.path.getmtime(file), reverse=True)
for log_file in sorted_files[1:]:
statusObj = os.stat(log_file)
inode = str(statusObj.st_ino)
lmtime = datetime.fromtimestamp(statusObj.st_mtime)
time_delta = datetime.now() - lmtime
if (24 * time_delta.days + time_delta.seconds/3600) < 24:
file_size = statusObj.st_size
if inode in inode_size_map and file_size == int(inode_size_map[inode]):
os.remove(log_file)
else:
os.remove(log_file)
except Exception as e:
traceback.print_exc(e)
if __name__ == "__main__":
log_collector = LogCollector()
result = log_collector.collect_logs()
log_collector.cleanup_logs()
print(json.dumps(result, indent=4, sort_keys=True))
| StarcoderdataPython |
9655908 | import fplcoin
wallet_prefix = ""
def publicKeyToAddress(compressedPublicKey):
''' Generate address from public key '''
h = fplcoin.hasher(compressedPublicKey).digest()
return wallet_prefix + "c" + fplcoin.encoder.b58encode(h)
def createNewWallet():
''' Create wallet and save in db '''
privateKey, publicKey = fplcoin.ecc.make_keypair()
compressedPublicKey = compressPublicKey(publicKey)
newAddress = publicKeyToAddress(compressedPublicKey)
fplcoin.db.doQuery('INSERT INTO wallets (privateKey, publicKey, address) VALUES (?, ?, ?)', (str(privateKey), str(compressedPublicKey), newAddress), result='none')
def printBasicInfo():
''' Print basic wallet info '''
outputs = fplcoin.db.doQuery("select distinct transactions_outputs.amount, transactions_outputs.address, transactions_outputs.outputHash from transactions_outputs LEFT JOIN transactions_inputs WHERE NOT EXISTS(SELECT * FROM transactions_inputs WHERE transactions_outputs.outputHash = transactions_inputs.previousOutput)", result='all')
wallets = fplcoin.db.doQuery("select * from wallets", result='all')
totalMoney = 0
for wallet in wallets:
ID, privateKey, publicKey, myAddress = wallet
print "Wallet address: %s " % myAddress
walletMoney = 0
for output in outputs:
amount, address, outputHash = output
if address == myAddress:
walletMoney += amount
print "Money in wallet: %s\n" % str(walletMoney)
totalMoney += walletMoney
print "Total money: %s\n" % str(totalMoney)
def compressPublicKey(publicKey):
''' Compress public key '''
compressedPublicKey = wallet_prefix
p = fplcoin.ecc.p
x = publicKey[0]
ysquared = ((x*x*x+7) % p)
y1 = pow(ysquared, (p+1)/4, p)
y2 = y1 * -1 % p
if y2 == publicKey[1]:
compressedPublicKey += "p"
else:
compressedPublicKey += "m"
a = hex(x)[2:].rstrip('L')
if len(a)%2 == 1:
a = '0' + a
a = a.decode('hex')
compressedPublicKey += fplcoin.encoder.b58encode(a)
return compressedPublicKey
def decompressPublicKey(compressedPublicKey):
''' Decompress public key '''
p = fplcoin.ecc.p
x = int(fplcoin.encoder.b58decode(compressedPublicKey[len(wallet_prefix)+1:]).encode('hex'),16)
ysquared = ((x*x*x+7) % p)
y = pow(ysquared, (p+1)/4, p)
if compressedPublicKey[len(wallet_prefix)] == 'p':
y = y * -1 % p
return (x,y)
| StarcoderdataPython |
1745945 | <reponame>PotatoHD404/hs-log-fireplace
from hsreplay.document import HSReplayDocument
import json
import os
from io import BytesIO
from hslog.export import EntityTreeExporter
def get_file_paths(path):
for root, _, files in os.walk(path):
for filename in files:
yield os.path.join(root, filename)
# for root, dirs, files in os.walk(path):
# for name in files:
# print
# os.path.join(root, name)
def get_games(games_dir):
for filename in get_file_paths(games_dir):
with open(filename) as f:
game = json.load(f)
game['game'] = HSReplayDocument.from_xml_file(BytesIO(game['xml'].encode("utf-8")))
del game['xml']
game['id'] = os.path.basename(filename).split('.')[0]
yield game
def get_games_dict(games_dir):
res = {}
for game in get_games(games_dir):
game_id = game['id']
del game['id']
res[game_id] = game
return res
# def import_data():
# res = list(get_data())
def main():
game = next(get_games(r'D:\datasets and shit\hs_games'))
# print(game['game'].nodes)
# print('\n\n\n')
# packet_tree = game['game'].to_packet_tree()
# p = LogParser()
game = game['game'].to_packet_tree()[0]
# p.
print(game.export())
# for node in game['game'].nodes:
# print(vars(node))
# print()
# print([vars(node) for node in game['game'].nodes])
# print(game)
# print(game)
# games = list(get_games(r'C:\Users\kmv026\Documents\GitHub\hs-log-fireplace\example data'))
# game = games[0]
# print(vars(game['game']))
# print(vars(game['game'].nodes[0].nodes[0]))
if __name__ == '__main__':
main()
| StarcoderdataPython |
6642447 | arq = open('alice.txt') #Abre o arquivo
texto = arq.read() #Lendo todo arquivo
texto = texto.lower() #Deixa tudo em minusculo
import string
for c in string.punctuation: # replace de todos os caracteres especiais por branco
texto = texto.replace(c, ' ')
texto = texto.split()
dic = {}
for p in texto:
if p not in dic:
dic[p] = 1
else:
dic[p] += 1
print ('Alice aparece %s vezes' %dic['alice'])
arq.close()
| StarcoderdataPython |
6532098 | <reponame>aidotse/Team-Haste<gh_stars>0
import pandas as pd
import os
data = pd.read_csv("/mnt/hdd1/users/hakan/ai_haste/exp_stats/full_dataset.csv")
data = data.drop("Unnamed: 0", 1)
# data["well"] = pd.Series(data["C1"].apply(lambda x: os.path.splitext(x)[0].split("_")[3]))
# grouped = data.groupby("magnification")
# a = grouped.apply(lambda x: x.sample(3))
data_test_20 = data[data["magnification"] == "20x"].sample(3)
data = data.drop(data_test_20.index)
data_test_40 = data[data["magnification"] == "40x"].sample(4)
data = data.drop(data_test_40.index)
data_test_60 = data[data["magnification"] == "60x"].sample(6)
data = data.drop(data_test_60.index)
test_data = data_test_20.append(data_test_40)
test_data = test_data.append(data_test_60)
test_data = test_data.reset_index(drop=True)
data = data.reset_index(drop=True)
data.to_csv("/mnt/hdd1/users/hakan/ai_haste/exp_stats/final_train.csv", index=False)
test_data.to_csv("/mnt/hdd1/users/hakan/ai_haste/exp_stats/final_test.csv", index=False)
| StarcoderdataPython |
1616159 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains custom Dcc callback classes
"""
from __future__ import print_function, division, absolute_import
from tpDcc import dcc
from tpDcc.libs.python import decorators
from tpDcc.abstract import callback as abstract_callback
class _MetaCallback(type):
def __call__(cls, *args, **kwargs):
if dcc.is_maya():
from tpDcc.dccs.maya.core import callback as maya_callback
return maya_callback.MayaCallback
else:
return None
@decorators.add_metaclass(_MetaCallback)
class Callback(abstract_callback.AbstractCallback):
pass
| StarcoderdataPython |
6402764 | <gh_stars>0
import unittest
from colony.client import ColonyClient
from colony.sandboxes import SandboxesManager
class TestSandboxes(unittest.TestCase):
def setUp(self) -> None:
self.client_with_account = ColonyClient(account="my_account", space="my_space")
self.sandboxes = SandboxesManager(self.client_with_account)
def test_ui_link_is_properly_generated(self):
self.assertEqual(
self.sandboxes.get_sandbox_ui_link("blah"),
"https://my_account.cloudshellcolony.com/my_space/sandboxes/blah",
)
def test_sandbox_url_properly_generated(self):
self.assertEqual(
self.sandboxes.get_sandbox_url("blah"),
"https://my_account.cloudshellcolony.com/api/spaces/my_space/sandboxes/blah",
)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
347337 | <filename>vff/field.py
# Copyright 2011 Terena. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY TERENA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL TERENA OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of Terena.
import uuid
from django.conf import settings
from django.utils.importlib import import_module
from django.db.models.fields.files import FieldFile, FileField
from vff.storage import VersionedStorage
from vff.abcs import VFFBackend
HAS_SOUTH = True
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
HAS_SOUTH = False
class VersionedFieldFile(FieldFile):
def __init__(self, instance, field, name):
if instance.pk is None: # new file
name = uuid.uuid4().hex
else:
name = field.storage.backend.get_filename(instance)
super(VersionedFieldFile, self).__init__(instance, field, name)
def save(self, name, content, username='', commit_msg='', save=True):
if not username:
return
if self.instance.pk is None: # new file
self.name = uuid.uuid4().hex
else:
self.name = self.storage.backend.get_filename(self.instance)
save = False
self.storage.save(self.name, content, username, commit_msg, save)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = content.size
self._committed = True
save.alters_data = True
def delete(self, username='', commit_msg='', save=False):
if not username:
return
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name, username, commit_msg, save)
delete.alters_data = True
def list_revisions(self, count=0, offset=0):
return self.storage.backend.list_revisions(self.instance,
count=count, offset=offset)
def get_revision(self, rev=None):
return self.storage.backend.get_revision(self.instance, rev=rev)
def get_diff(self, r1, r2):
return self.storage.backend.get_diff(self.instance, r1, r2)
class VersionedFileField(FileField):
attr_class = VersionedFieldFile
def __init__(self, name=None, verbose_name=None, storage=None, **kwargs):
try:
path = settings.VFF_BACKEND
except AttributeError:
raise NameError('When using VersionedField, you have to define'
' VFF_BACKEND in settings.py. Refer'
' to the docs for more info.')
mname = '.'.join(path.split('.')[:-1])
cname = path.split('.')[-1]
module = import_module(mname)
backend_class = getattr(module, cname)
if not issubclass(backend_class, VFFBackend):
raise ValueError('The class pointed at in VFF_BACKEND'
' has to provide the interface defined by'
' vff.abcs.VFFBackend.')
vstorage = VersionedStorage(backend_class, name)
super(VersionedFileField, self).__init__(verbose_name=verbose_name,
name=name,
upload_to='unused',
storage=vstorage,
**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(VersionedFileField, self).deconstruct()
del kwargs["upload_to"]
return name, path, args, kwargs
if HAS_SOUTH:
add_introspection_rules([
(
[VersionedFileField],
[],
{},
),
], ["^vff\.field\.VersionedFileField"])
| StarcoderdataPython |
374180 | __author__ = "<NAME> <<EMAIL>>"
__date__ = "$May 18, 2015 16:46:39 EDT$"
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| StarcoderdataPython |
3488650 | <filename>core/utils/validators.py
from django.core.validators import RegexValidator, ValidationError
from django.utils.translation import ugettext_lazy as _
class DefaultValidator(object):
char_only = RegexValidator(r'^[a-zA-Z-\s]*$', 'Only alphabetic characters are allowed.')
def validate_phonenumber(value):
no = list(value)
country_code = no[:4]
if not country_code[0] == '+':
raise ValidationError(
_('%(value)s is not a valid Phone Number, it must begin with a country code"'),
params={'value': value}
)
body = no[4:]
if len(body) != 9:
raise ValidationError(
_('%(value)s is not a valid Phone Number!!'),
params={'value': value}
)
_validator = DefaultValidator
validators_list = [_validator.char_only, ]
| StarcoderdataPython |
344994 | def sum_of_intervals(intervals):
| StarcoderdataPython |
1942352 | <reponame>HiroakiMikami/gpt-code-clippy<gh_stars>0
import json
# import torch
# import pandas as pd
# import apps.eval.reident
# from apps_utils.generate_gpt_codes import generate_prompt
# from apps_utils.test_one_solution import eval_and_save_problems
# from datasets import load_dataset, load_metric
from fastcore.script import *
from human_eval.data import write_jsonl, read_problems
from human_eval.evaluation import evaluate_functional_correctness
from pathlib import Path
from tqdm.auto import tqdm
# from metrics.extrinsic_eval import compute_metrics
# from subprocess import check_output
from transformers import (
AutoTokenizer,
# FlaxGPTNeoForCausalLM,
GPTNeoForCausalLM
)
# bleu = load_metric("sacrebleu")
MAX_TOKS = 1024
MAX_NEW_TOKS = 128
def clean_text(generation):
# clean up text has discussed in OpenAI's paper "Evaluating Large Language Models Trained on Code"
generation = generation.split("\ndef")[0]
generation = generation.split("\nclass")[0]
generation = generation.split("\n#")[0]
generation = generation.split("\nif")[0]
# clean up for code search net finetuned model
generation = generation.split("def ")[0]
return generation
def generate_text(prompt, n, tokenizer, model):
inputs = tokenizer(prompt, truncation=True, max_length=MAX_TOKS, return_tensors="pt").to("cuda")
output_seq = model.generate(
input_ids=inputs.input_ids, max_length=MAX_TOKS,
max_new_tokens=MAX_NEW_TOKS,
do_sample=True, temperature=0.8,
num_return_sequences=n
)
outputs = tokenizer.batch_decode(output_seq, skip_special_tokens=False)
generated_text = []
for o in outputs:
cleaned = clean_text(o.replace(prompt, ""))
generated_text.append(prompt + cleaned)
return generated_text
# def _eval_concode(path):
# # TODO: format input to model same as App and OpenAI HumanEval datasets are formatted
# data = load_dataset("json", data_files=str(path / "test.json"))["train"]
# predictions = [[]]
# references = []
# for example in data:
# output = generate_text(example["nl"])
# predictions[0].append(output.split(" "))
# references.append(example["code"].split(" "))
# results = compute_metrics(predictions, references)
# print(f"Bleu score for Concode dataset: {results}")
# def _eval_apps(out_path, tokenizer, model):
# gpt_codes = {}
# apps_ds = load_dataset("../data_processing/apps.py")["test"]
# apps_ds = apps_ds.select(range(5_212))
# for idx, example in tqdm(enumerate(apps_ds), total=len(apps_ds)):
# answer = generate_text(example["question"], 5, tokenizer, model)
# gpt_codes[idx] = answer
# with open(out_path / "all_codes.json", "w") as f:
# json.dump(gpt_codes, f)
# eval_and_save_problems(apps_ds, out_path)
def _eval_human_eval(path, out_path, tokenizer, model):
problems = read_problems(str(path))
num_samples_per_task = 10
samples = []
for task_id in tqdm(list(problems.keys())):
for text in generate_text(
problems[task_id]["prompt"],
num_samples_per_task,
tokenizer,
model
):
samples.append(dict(task_id=task_id, completion=text))
write_jsonl(str(out_path / "human_eval.jsonl"), samples)
# test out generated functions
results = evaluate_functional_correctness(str(out_path / "human_eval.jsonl"), [1, 2, 5], 4, 3.0, str(path))
print(results)
@call_parse
def main(
model_name_or_path: Param("Name or path of model to evaluate", str),
human_eval_path: Param("Path to the human eval dataset", str),
out_path: Param("Path to save results", str),
):
human_eval_path = Path(human_eval_path)
out_path = Path(out_path)
out_path = out_path / model_name_or_path.split("/")[-1]
out_path.mkdir(exist_ok=True)
tokenizer = AutoTokenizer.from_pretrained(
model_name_or_path, padding_side="left", pad_token="<|endoftext|>"
)
model = GPTNeoForCausalLM.from_pretrained(
model_name_or_path,
pad_token_id=50256,
).to("cuda")
_eval_human_eval(human_eval_path, out_path, tokenizer, model)
| StarcoderdataPython |
1689026 | from ..pipeline import pipe, ppipe, p
import ray
examples = [
{'headline': 'Web ads for junk food could be banned in the UK', 'source': 'Guardian', 'nwords':11},
{'headline': 'The Olympics will be delayed', 'source': 'Guardian', 'nwords':5},
{'headline': 'Wirecard collapses after fraud scandal', 'source': 'Guardian', 'nwords':5},
{'date': '2020-07-28', 'headline': 'Usability of Footnotes', 'source': 'https://news.ycombinator.com/item?id=23964200'}
]
def test_pipe():
records = examples[:]
def uppercase(x):
return x.upper()
def cleanup(x, words):
for w in words:
x = x.replace(w, '')
return x.strip()
# pass a list of dicts
_res = pipe(
iter(records), # works without the iter() as well
p('headline', uppercase, 'headline'),
p('headline', lambda x: x.lower(), 'new_headline'),
p('headline', cleanup, 'clean_headline', words=['THE'])
)
res = list(_res)
assert len(res) == len(examples)
assert res[0]['headline'] == 'WEB ADS FOR JUNK FOOD COULD BE BANNED IN THE UK'
assert res[1]['clean_headline'] == 'OLYMPICS WILL BE DELAYED'
def test_ppipe():
def uppercase(x):
return x.upper()
def cleanup(x, words):
for w in words:
x = x.replace(w, '')
return x.strip()
many_examples = examples[:] * 50
# pass a list of dicts
_res = ppipe(
many_examples, # works without the iter() as well
p('headline', uppercase, 'headline'),
p('headline', cleanup, 'clean_headline', words=['THE']),
p('clean_headline', lambda x: x.upper(), 'clean_headline'),
records_in_memory=10
)
ray.init()
res = list(_res)
ray.shutdown()
assert len(res) == len(many_examples)
assert res[-1]['clean_headline'] == 'USABILITY OF FOOTNOTES'
| StarcoderdataPython |
1683567 | <reponame>ekhtiar/Python_for_Informatics_Solutions<filename>Ex_3/Ex_3_2.py
#!/usr/bin/env python
#adjust your shebang line
#Rewrite your pay program using try and except so that your program
#handles non-numeric input gracefully by printing a message and exiting the
#program. The following shows two executions of the program
#take input from user and convert to float
hours = raw_input("How many hours have you worked this week? ")
#convert to float
try:
hours = float(hours)
except:
print "Error, please enter numeric input"
exit()
#take input from user and convert to float
rate = raw_input("How much is your hourly pay? ")
#convert to float
try:
rate = float(rate)
except:
print "Error, please enter numeric input"
exit()
#calculate pay
if hours > 40:
pay = rate * 40 + rate * 1.5 * (hours - 40)
else:
pay = rate * hours
#print result
print pay | StarcoderdataPython |
4923979 | <reponame>skad00sh/gsudmlab-mvtsdata_toolkit
import os
from os import path, makedirs
import pandas as pd
import numpy as np
_summary_keywords: dict = {"params_col": 'Feature-Name',
"null_col": "Null-Count",
"count_col": "Val-Count",
"label_col": "Label",
"population": "Population"}
_5num_colnames: list = ['mean', 'std', 'min', '25th', '50th', '75th', 'max']
class ExtractedFeaturesAnalysis:
"""
This class is responsible for data analysis of the extracted statistical features. It takes
the extracted features produced by `features.feature_extractor.py` (or
`features.feature_extractor_parallel.py`) and provides some basic analytics as follows:
* A histogram of classes,
* The counts of the missing values,
* A five-number summary for each extracted feature.
These summaries can be stored in a CSV file as well.
"""
def __init__(self, extracted_features_df: pd.DataFrame, exclude: list = None):
"""
A constructor that initializes the class variables.
:param extracted_features_df: The extracted features as it was produced by
`FeatureExtractor` in `features.feature_extractor` or
`FeatureExtractorParallel` in
`features.feature_extractor_parallel`.
:param exclude: (Optional) A list of column-names indicating which columns should be
excluded from this analysis. All non-numeric columns will automatically
be removed. But this argument can be used to drop some numeric columns (
e.g., ID) whose numerical statistics makes no sense.
"""
self.df = extracted_features_df
self.summary = pd.DataFrame()
self.excluded_colnames = exclude
def compute_summary(self):
"""
Using the extracted data, this method calculates all the basic analysis with respect to
each statistical feature (each column of `extracted_features_df`).
It populates the summary dataframe of the class with all the required data corresponding
to each feature.
Below are the column names of the summary dataframe:
* 'Feature Name': Contains the time series statistical feature name,
* 'Non-null Count': Contains the number of non-null entries per feature,
* 'Null Count': Contains the number of null entries per feature,
* 'Min': Contains the minimum value of the feature(Without considering the null or
nan value),
* '25th': Contains the first quartile (25%) of the feature values (Without considering the
null/nan value),
* 'Mean': Contains the mean of the feature values (Without considering the null/nan
value),
* '50th': Contains the median of the feature values (Without considering the null/nan
value),
* '75th': Contains the third quartile (75%) of the feature values (Without considering the
null/nan value),
* 'Max': Contains the minimum value of the feature (Without considering the null/nan
value),
* 'Std. Dev': Contains the standard deviation of the feature (Without considering the
null/nan value)
The computed summary will be stored in the class field `summary`.
"""
df_desc = pd.DataFrame(self.df)
# drop the columns that were requested to be excluded
if self.excluded_colnames is not None:
df_desc.drop(labels=self.excluded_colnames, inplace=True, axis=1)
# drop any non-numeric column
if not self.df.empty:
df_desc = df_desc.describe(include=[np.number])
else:
raise ValueError(
'''
It seems that the given dataframe is empty. First, run
`features.feature_extractor.py`.
'''
)
if df_desc.empty:
raise ValueError(
'''
It seems that in the given dataframe, no numeric features are available. First, run
`features.feature_extractor.py`.
'''
)
df_desc = df_desc.T
df_desc.insert(0, _summary_keywords['params_col'], df_desc.index)
df_desc.insert(2, _summary_keywords['null_col'], self.df.isnull().sum())
# New colnames: [Feature-Name, Val-Count, Null-Count, mean, std, min, 25th, 50th, 75th, max]
df_desc.columns = [_summary_keywords['params_col'],
_summary_keywords['count_col'],
_summary_keywords['null_col']] + _5num_colnames
df_desc.reset_index(inplace=True)
df_desc.drop(labels='index', inplace=True, axis=1)
self.summary = df_desc
def get_class_population(self, label: str) -> pd.DataFrame:
"""
Gets the per-class population of the original dataset.
:param label: The column-name corresponding to the class_labels.
:return: A dataframe of two columns; class_labels and class counts.
"""
population_df = self.df[label].value_counts()
population_df = population_df.to_frame(_summary_keywords['population'])
population_df.insert(0, label, population_df.index)
return population_df.reset_index(drop=True)
def get_missing_values(self) -> pd.DataFrame:
"""
Gets the missing-value counts for each extracted feature.
:return: A dataframe of two columns; the extracted features (i.e., column names of
`extracted_features_df`) and the missing-value counts.
"""
if self.summary.empty:
raise ValueError(
"""
Execute `compute_summary` before getting the missing values.
"""
)
count_df = self.summary[[_summary_keywords['params_col'], _summary_keywords['null_col']]]
return count_df.reset_index(drop=True)
def get_five_num_summary(self) -> pd.DataFrame:
"""
Returns the seven number summary of each extracted feature. This method does not compute
the statistics but only returns what was already computed in the `compute_summary` method.
:return: A dataframe where the columns are [Feature-Name, mean, std, min, 25th, 50th, 75th,
max] and each row corresponds to the statistics on one of the extracted features.
"""
if self.summary.empty:
raise ValueError(
"""
Execute `compute_summary` before getting the five number summary.
"""
)
colname_copy = _5num_colnames.copy() # copy: we don't want to change `_5num_colnames`
colname_copy.insert(0, _summary_keywords['params_col'])
five_num_df = self.summary[colname_copy]
return five_num_df.reset_index(drop=True)
def print_summary(self):
"""
Prints the summary dataframe to the console.
"""
if self.summary.empty:
print(
'''
The summary is empty. The method `compute_summary` needs to be executed before
printing the results.
'''
)
else:
print(self.summary.to_string())
def summary_to_csv(self, output_path, file_name):
"""
Stores the summary statistics.
:param output_path: Path to where the summary should be stored.
:param file_name: Name of the csv file. If the extension is not given, `.csv` will be
appended to the given name.
"""
if self.summary.empty:
raise ValueError(
'''
Execute `compute_summary` before storing the results.
'''
)
if not path.exists(output_path):
makedirs(output_path)
if not file_name.endswith('.csv'):
file_name = '{}.csv'.format(file_name)
out_file = os.path.join(output_path, file_name)
self.summary.to_csv(out_file, sep='\t', header=True, index=False)
print('Data Analysis of the extracted features is stored at [{}]'.format(out_file))
| StarcoderdataPython |
1877363 | <gh_stars>10-100
from os import mkdir
from os.path import join, exists
import datatable as dt
from rs_datasets.data_loader import download_dataset
from rs_datasets.generic_dataset import Dataset, safe
class Epinions(Dataset):
def __init__(self, path: str = None):
"""
:param path: folder which is used to download dataset to
if it does not contain dataset files.
If files are found, load them.
"""
super().__init__(path)
folder = join(self.data_folder, 'epinions')
if not exists(folder):
self._download(folder)
self.ratings = dt.fread(
join(folder, 'ratings_data.txt'),
columns=['user_id', 'item_id', 'rating']
).to_pandas()
self.trust = dt.fread(
join(folder, 'trust_data.txt'),
columns=['source_user_id', 'target_user_id', 'trust_value']
).to_pandas()
@safe
def _download(self, path):
self.logger.info('Downloading Epinions dataset...')
mkdir(path)
base_url = 'http://www.trustlet.org/datasets/downloaded_epinions/'
filepath = join(path, 'ratings_data.txt.bz2')
download_dataset(
base_url + 'ratings_data.txt.bz2',
filepath,
manage_folder=False
)
filepath = join(path, 'trust_data.txt.bz2')
download_dataset(
base_url + 'trust_data.txt.bz2',
filepath,
manage_folder=False
)
| StarcoderdataPython |
3269499 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright (c) 2021
#
# See the LICENSE file for details
# see the AUTHORS file for authors
# ----------------------------------------------------------------------
#--------------------
# System wide imports
# -------------------
import json
import requests
import time
import datetime
# ---------------
# Airflow imports
# ---------------
from airflow.hooks.base import BaseHook
#--------------
# local imports
# -------------
# -----------------------
# Module global variables
# -----------------------
# ----------------
# Module constants
# ----------------
class ActionDatabaseHook(BaseHook):
DEFAULT_HOST = "api.actionproject.eu"
API_SLUG = "observations"
DEFAULT_CONN_TYPE = "https"
DEFAULT_PORT = 443
DEFAULT_PAGE_SIZE = 100
DEFAULT_TPS = 1
def __init__(self, conn_id):
super().__init__()
self._conn_id = conn_id
self._session = None
def get_conn(self):
if self._session is None:
self.log.info(f"getting connection information from {self._conn_id}")
config = self.get_connection(self._conn_id)
# Define API base url.
ctyp = config.conn_type or self.DEFAULT_CONN_TYPE
host = config.host or self.DEFAULT_HOST
port = config.port or self.DEFAULT_PORT
slug = config.schema or self.API_SLUG
token = config.password
self._page_size = self.DEFAULT_PAGE_SIZE
self._delay = 1.0/self.DEFAULT_TPS
if config.extra:
try:
extra = json.loads(config.extra)
except json.decoder.JSONDecodeError:
pass
else:
self._page_size = extra.get("page_size", self.DEFAULT_PAGE_SIZE)
self._delay = 1.0/extra.get("tps", self.DEFAULT_TPS)
self._base_url = f"{ctyp}://{host}:{port}/{slug}"
self._session = requests.Session()
self._session.headers.update({'Authorization': f"Bearer {token}"})
return self._session, self._base_url, self._page_size, self._delay
def _paginated_get_entries(self, session, url, params, page_size, n_entries):
page = 1
total = 0
premature_exit = False
if page_size > n_entries:
page_size = n_entries
while not premature_exit:
self.log.debug(f"Requesting page {url}")
response = session.get(
url, params={**params, **{"page": page, "limit": page_size}}
)
if not response.ok:
self.log.error(f"{response.text}")
response.raise_for_status()
response_json = response.json()
yield from response_json
n = len(response_json)
page += 1
total += n
premature_exit = (n == 0) or (total >= n_entries)
time.sleep(self._delay)
# ----------
# Public API
# ----------
def __enter__(self):
'''Support for hook context manager'''
self.get_conn()
return self
def __exit__(self, type, value, traceback):
'''Support for hook context manager'''
self.close()
def upload(self, observations):
'''
Fetches entries from Epicollect V between given start/end date.
Parameters
—————
project_slug : str
The slugified project name
start_date : str
Start date to start fetching ratings from (inclusive). Expected
format is YYYY-MM-DD (equal to Airflow"s ds formats).
end_date : str
End date to fetching ratings up to (exclusive). Expected
format is YYYY-MM-DD (equal to Airflow"s ds formats).
batch_size : int
Page size to fetch from the API. Larger values
mean less requests, but more data transferred per request.
'''
session, url, page_size, delay = self.get_conn()
self.log.info(f"Uploading {len(observations)} observations to ACTION Database")
for observation in observations:
observation["written_at"] = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
response = session.post(url, json=observation)
response.raise_for_status()
time.sleep(delay)
def download(self, start_date, end_date, project, obs_type, n_entries):
'''
Fetches entries from ACTION database for a given project between given start/end date
or up to n_entries, whichever occurs sooner.
Parameters
—————
start_date : str
Start date to start fetching ratings from (inclusive). Expected
format is YYYY-MM-DD (equal to Airflow"s ds formats).
end_date : str
End date to fetching ratings up to (exclusive). Expected
format is YYYY-MM-DD (equal to Airflow"s ds formats).
project: str
The ACTION project name
obs_type : str
observation type. Either ·"observation" or "classification".
n_entries : int
maximun number of entries to download.
Request are internally paged by the "page-size" connection parameter
'''
session, url, page_size, delay = self.get_conn()
self.log.info(f"Getting from ACTION Database {n_entries} {obs_type}(s) for {project} from {start_date} to {end_date}")
params = {
"begin_date" : start_date,
"finish_date": end_date,
"project" : project,
"obs_type" : obs_type,
}
yield from self._paginated_get_entries(session, url, params, page_size, n_entries)
def close(self):
self.log.info(f"Closing ACTION database hook")
self._session = None
| StarcoderdataPython |
6640410 | # Generated by Django 3.0.7 on 2020-07-05 00:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0011_person_person'),
]
operations = [
migrations.AlterField(
model_name='person',
name='gender',
field=models.CharField(choices=[('male', 'Male'), ('female', 'Female')], default='U', max_length=7),
),
]
| StarcoderdataPython |
6622121 | <reponame>maksonlee/multitest_transport
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A module to handle cluster commands.
Cluster commands use Docker Swarm to manage multiple MTT replica nodes. However,
this feature is currently not working because Docker Swarm does not support
--privilege option when creating a service. See the link below for details:
https://github.com/docker/swarmkit/issues/1030
"""
import copy
from multitest_transport.cli import command_util
from multitest_transport.cli import config
CONFIG_PATH_FORMAT = '~/.config/mtt/clusters/%s.ini'
class ClusterRegistry(object):
"""A class to store cluster configs."""
def __init__(self):
# A model cluster config.
self._config = config.Config(filename=None)
self._config.DefineField('manager_host')
self._config.DefineField('manager_join_token')
self._config.DefineField('worker_join_token')
self._config_map = {}
def _GetConfigPath(self, name):
return CONFIG_PATH_FORMAT % name
def GetConfig(self, name):
"""Return a cluster config for a given name.
Args:
name: a cluster name.
Returns:
a cluster config.
"""
name = name.lower()
if name not in self._config_map:
filename = self._GetConfigPath(name)
field_map = copy.deepcopy(self._config.field_map)
self._config_map[name] = config.Config(filename, field_map=field_map)
self._config_map[name].Load()
return self._config_map[name]
class ClusterCommandHandler(object):
"""A handler for cluster commands."""
def __init__(self):
self._command_map = {
'create': self.Create,
'add_node': self.AddNode,
'remove_node': self.RemoveNode,
}
self._registry = ClusterRegistry()
def Run(self, args):
self._command_map[args.command](args)
def AddParser(self, subparsers):
"""Add a command argument parser.
Args:
subparsers: an argparse subparsers object.
"""
parser = subparsers.add_parser(
'cluster', help='Create and manage MTT clusters.')
parser.add_argument(
'command', choices=self._command_map.keys())
parser.add_argument('--name')
parser.add_argument('--host', default=None)
parser.add_argument('--token', default=None)
parser.add_argument('--ssh_user', default=None)
parser.set_defaults(func=self.Run)
def Create(self, args):
"""Creates a cluster.
This actually creates a Docker swarm and deploy a MTT service on it.
Args:
args: an argparse.ArgumentParser object.
Raises:
ValueError: if mtt_control_server_url or host is not set.
"""
if not config.config.mtt_control_server_url:
raise ValueError('mtt_control_server_url must be set.')
if not args.host:
raise ValueError('--host option must be set')
context = command_util.CommandContext(host=args.host, user=args.ssh_user)
docker_context = command_util.DockerContext(context, try_use_gcloud=False)
cluster_config = self._registry.GetConfig(args.name)
docker_context.Run(['swarm', 'init'])
# TODO: get token ID and store it.
docker_context.Run([
'service', 'create', '--name', 'mtt', '--env',
'MTT_CONTROL_SERVER_URL=%s' % config.config.mtt_control_server_url,
'--mode', 'global', 'gcr.io/android-mtt/mtt'
])
cluster_config.manager_host = args.host
cluster_config.Save()
def AddNode(self, args):
"""Adds a node to an existing cluster.
Args:
args: an argparse.ArgumentParser object.
Raises:
ValueError: if a host or a token is missing.
"""
if not args.host:
raise ValueError('--host must be provided')
if not args.token:
raise ValueError('--token must be provided')
context = command_util.CommandContext(host=args.host, user=args.ssh_user)
docker_context = command_util.DockerContext(context, try_use_gcloud=False)
cluster_config = self._registry.GetConfig(args.name)
if args.host == cluster_config.manager_host:
raise ValueError(
'%s is already a manager node for %s cluster' % (
args.host, args.name))
docker_context.Run(
[
'swarm', 'join',
'--token', args.token,
'%s:2377' % cluster_config.manager_host])
def RemoveNode(self, args):
"""Removes a node from an existing cluster.
Args:
args: an argparse.ArgumentParser object.
Raises:
ValueError: if a host or a token is missing.
"""
if not args.host:
raise ValueError('--host must be provided')
context = command_util.CommandContext(host=args.host, user=args.ssh_user)
docker_context = command_util.DockerContext(context, try_use_gcloud=False)
docker_context.Run(
['swarm', 'leave', '--force'])
| StarcoderdataPython |
9688347 | <reponame>lmregus/Portfolio<filename>python/coding_bat/count_code/count_code.py
#########################
# #
# Developer: <NAME> #
# #
#########################
def count_code(str):
count = 0
for i in range(len(str)-3):
if str[i:i+2] == 'co' and str[i+3] == 'e':
count += 1
return count
| StarcoderdataPython |
9605770 | <reponame>Peng-zju/R3Net
# coding: utf-8
import os
datasets_root = '/usr/data/msd'
# For each dataset, I put images and masks together
msd_train_path = os.path.join(datasets_root, 'train')
msd_test_path = os.path.join(datasets_root, 'test')
cd | StarcoderdataPython |
3361710 | class ScribblerWrapper(object):
def __init__(self, scribbler):
self.scribbler = scribbler
self.data = getattr(self.scribbler, "data", True)
self.mc = getattr(self.scribbler, "mc", True)
def __repr__(self):
return repr(self.scribbler)
def __getattr__(self, attr):
if attr in ["scribbler", "data", "mc"]:
raise AttributeError("{} should be assigned but isn't".format(attr))
return getattr(self.scribbler, attr)
def begin(self, event):
self.isdata = event.config.dataset.isdata
if self.isdata and not self.data:
return True
if not self.isdata and not self.mc:
return True
if hasattr(self.scribbler, "begin"):
return self.scribbler.begin(event)
def event(self, event):
if self.isdata and not self.data:
return True
if not self.isdata and not self.mc:
return True
if hasattr(self.scribbler, "event"):
return self.scribbler.event(event)
return True
def end(self):
if hasattr(self.scribbler, "end"):
return self.scribbler.end()
return True
| StarcoderdataPython |
8107317 | import socket
from struct import pack, unpack
from threading import Lock
from .Message import Message
from .MessageSerializer import MessageSerializer, JsonSerializer
__all__ = ["SocketManager"]
class SocketManager:
def __init__(self, **kwargs):
self.socket = kwargs.get("socket", None)
self.address = kwargs.get("address", None)
self.Serializer = kwargs.get("Serializer", JsonSerializer)
self._send_lock = Lock()
self._receive_lock = Lock()
if self.address is not None:
if self.socket is None:
self.socket = socket.socket()
self.socket.connect(self.address)
else:
raise TypeError("SocketManager constructor should provide an "
"'address' key with (host, port) tuple.")
try:
isserializer = issubclass(self.Serializer, MessageSerializer)
except:
isserializer = False
if not isserializer:
raise TypeError("'Serializer' key should contain a subclass of {}."
.format(MessageSerializer.__name__))
def Receive(self):
with self._receive_lock:
data = None
try:
data = self.socket.recv(4)
if len(data) != 4:
return None
msg_size = unpack("<I", data)[0]
data = self.socket.recv(msg_size)
except:
raise
finally:
if not data:
raise socket.error("connection forcibly closed.")
msg = Message.Decode(data, self.Serializer)
return msg
def Send(self, msg):
with self._send_lock:
if not isinstance(msg, Message):
raise TypeError("{0} is not instance of {1}"
.format(msg.__class__, Message.__class__))
serialized = Message.Encode(msg, self.Serializer)
try:
data = pack("<I", len(serialized))
data += serialized
self.socket.send(data)
except:
raise socket.error("connection forcibly closed.")
def Disconnect(self):
if self.socket is not None:
try:
# Try to close the connection if not closed
self.socket.shutdown(socket.SHUT_RDWR)
except:
pass
finally:
self.socket.close()
def fileno(self):
'''Return the attached socket file descriptor'''
return self.socket.fileno()
| StarcoderdataPython |
4961257 | <reponame>lovaulonze/matplotlib-img-scatter
import matplotlib
import matplotlib.pyplot as plt
import img_scatter
import numpy
a = numpy.linspace(-3.14*3, 3.14*3, 50)
b = numpy.sin(a)
c = numpy.cos(a)
matplotlib.style.use("science")
fig = plt.figure(figsize=(6, 4))
ax = fig.add_subplot(111)
ax.scatter_img(x=a, y=b, s=200, marker="o")
ax.scatter_img(x=a, y=c, s=200, marker="s")
fig.savefig("one_figure.svg")
fig.savefig("one_figure.png")
fig.savefig("one_figure.pdf")
| StarcoderdataPython |
8165662 | # 2. Write a Python program to calculate sum of first 10 numbers using a while loop.
def sum_of_n_natural_numbers(num):
if (num == 0):
return num
else:
return (num * (num + 1) / 2)
number = int(input("Please Enter any Number: "))
total_value = sum_of_n_natural_numbers(number)
print("Sum of Natural Numbers from 1 to {0} = {1}".format(number, total_value)) | StarcoderdataPython |
1720012 | """
This file contains the main functionality of the software and is used to
achieve two primary goals and is divided into two sections
1. Approximate the walker density as a function of time using Monte Carlo
simulations
2. Determine the steady state solution of a random walk on a network using the
methodology developed in our paper
"""
from operator import itemgetter
from scipy.integrate import quad # Use quadrature integration
import numpy as np # General mathematics
import scipy.sparse as sparse # Sparse representation of matrices necessary for larger networks
import scipy.sparse.linalg as linalg # Access to eigenvectors and eigenvalues
"""
Section 1: Monte Carlo simulations
The Monte-Carlo simulations are implemented as follows
1. A large number of random walkers is allowed to make steps on a given network
2. The walker density is averaged among the walkers
The details of the Monte-Carlo simulation are explained in the appendix of our
paper.
"""
def make_step(G, origin, wtd='wtd'):
"""
This function assumes that a walker is located on node `origin` of a network
`G`. Furthermore, it assumes that all edges originating from `origin` have
a data attribute `wtd` which is an instance of a waiting time distribution.
This function draws a sample waiting time from all edges originating from
`origin` and chooses the edge that gives the smallest waiting time.
This function returns a tuple (`delta`, `neighbour`), where
- `delta` is the length of time before the step was executed
- `neighbour` is the neighbour that the step was made to from `origin`
"""
# Optain a list of tuples (time, neighbour)
options = [(data[wtd].rvs(), neighbour) for neighbour, data in G[origin].iteritems()]
# Select the neighbour with the smallest waiting time
# The argument `key = itemgetter(0)` ensures that the waiting times are compared
return min(options, key=itemgetter(0))
def make_steps(G, origin, maxtime, wtd='wtd'):
"""
This function assumes that a walker is located on node `origin` of a network
`G`. Furthermore, it assumes that all edges have a data attribute `wtd`
which is an instance of a waiting time distribution.
This function executes steps starting at `origin` until the time elapsed
exceeds the argument `maxtime`. Only the elapsed time is constrained. The
number of steps executed is NOT.
This function returns a list of tuples (`time`, `neighbour`), where
- `time` is the time at which the step was executed
- `neighbour` is the neighbour that the step was made to
This function is an implementation of Algorithm 1 in Appendix 1 on page
9 of our paper.
"""
time = 0 # Set the current time
current = origin # Start with the origin
steps = [(time, current)] # Initialise the list of nodes
while time <= maxtime:
delta, node = make_step(G, current, wtd) # Make one step
time += delta # Update the time...
current = node # ...and the node
steps.append((time, current)) # Extend the step list
return steps # Return the list of steps
def steps2probability(steps, delta, bins):
"""
This function takes a sequence of steps and computes an array representing
the probability to find a walker on a given node in a range of time
intervals. The time intervals are uniformly spaced.
`steps` is the sequence of step tuples obtained from, e.g. calling `make_steps`.
`delta` is the width of a time step.
`bins` is the number of bins.
Hence, the array of probabilities corresponds to a time span
$[0, `delta` * `bins`]$.
This function returns a dictionary keyed by node. The values are arrays of
length `bins` whose $i^{th}% element represents the probability to find the
walker on the associated node in the time interval
$[i * `delta`, (i + 1) * `delta`]$.
This function is an implementation of the algorithm discussed in appendix
2 on page 10 of our paper.
"""
# The ith element of the vector associated with each node shall represent
# the probability to find the walker at the respective node in the time
# interval [i, i + 1] * delta
probabilities = {} # Declare a dictionary of probabilities
# Consider all transitions
for (t_i, _), (t_j, j) in zip(steps[1:], steps):
p = probabilities.setdefault(j, np.zeros(bins)) # Get a default probability
lower = int(t_j / delta) # Index of the lowest bin involved
upper = int(t_i / delta) # Index of the highest bin involved
# Did the step happen in the same bin?
if lower == upper:
frac = (t_i - t_j) / delta
p[lower] += frac
else:
# The fractional time spent in the lower bin is given by
# [(lower + 1) * delta - t_j] / delta and simplifying gives
lowerfrac = lower + 1 - t_j / delta
p[lower] += lowerfrac
# The fractional time spent in the upper bin is given by
# [t_i - upper * delta] / delta and simplifying gives
upperfrac = t_i / delta - upper
if upper < bins:
p[upper] += upperfrac
# The number of bins between the lower and upper bins are
span = upper - lower - 1
# Fill these with ones if there are bins inbetween
if span > 0:
p[lower + 1 : lower + 1 + span] = 1
return probabilities
def probability_moments(probability, bins, run=0, moments=None):
"""
This function calculates the mean and standard deviation of the probability
to find a walker on a given node. It does so iteratively such that the
results of a Monte-Carlo simulation can be discarded after each simulation
is completed.
`probability` is the dictionary of probabilities obtained from `steps2probability`
`bins` is the number of bins passed to `steps2probability`
`run` is the number of iterations (not to be used explicitly).
`moments` is a dictionary to keep track of moments (not to be used explicitly).
This function returns a dictionary keyed by node. The value is a tuple of
arrays. The first element is an array of means $x$ and the second array is the
mean of $x^2$, NOT the variance.
"""
z = np.zeros(bins) # Create a default zero array
probability = dict(probability)
if moments == None: moments = {} # Create empty dictionaries for the moments
for node, (mean, mean2) in moments.iteritems(): # Go over each node part of the mean already
p = probability.pop(node, z) # Get the probability
# Calculate the mean, mean square and update iteratively
mean = (run * mean + p) / (run + 1)
mean2 = (run * mean2 + p ** 2) / (run + 1)
moments[node] = (mean, mean2)
for node, p in probability.iteritems(): # Consider nodes that are not part of the mean already
moments[node] = (p / (run + 1), p ** 2 / (run + 1))
return moments
def walk(G, origin, bins, delta, runs, wtd='wtd', debug=False):
"""
This function is a convenience function which calculates probability moments
for a walker starting on node `origin` of the network `G`. The maximal
simulation time is determined by `bins`*`delta`. The behaviour of the walker
is simulated `runs` times.
This function assumes that each edge of the network has a data attribute
`wtd` which is an instance of a probability distribution.
`debug` is a flag which results in the run number being printed if set to True.
This function returns a dictionary keyed by node. The value is a tuple of
arrays. The first element is an array of means $x$ and the second array is the
mean of $x^2$, NOT the variance.
"""
maxtime = bins * delta # The maximum time to run the simulation up to
moments = {} # The moments of the probability distributions
for run in xrange(runs):
steps = make_steps(G, origin, maxtime, wtd) # Make enough steps
probability = steps2probability(steps, delta, bins) # Get the pdf
moments = probability_moments(probability, bins, run, moments)
if debug:
print run + 1 # , np.sum(probability.values()) / bins, \
# np.sum([x for x,_ in moments.values()], axis = 0)
return moments # Return the moments
"""
Section 2: Steady state solutions
"""
def steady_state(ETM, resting_time):
"""
This function calculates at most `k` steady state solutions of
a random walk on a network with effective transition matrix
`ETM` and mean resting time `resting_time`.
Note that multiple distinct steady state solutions can exist
if the network is not connected.
This function returns a list of at most `k` tuples of the form
(`eigenvalue`, `vector`), where `eigenvalue` is
the eigenvalue associated with `vector`. Steady state solutions
will have $`eigenvalue`\approx 1$ and will be represented by
`vector`. Note that the approximate equality is a result of
numerical errors.
"""
# Get the eigenvectors as discussed in section III on page 5 of our paper
evalues, evectors = linalg.eigs(ETM, k=1, which='LM')
results = []
for i in xrange(len(evalues)):
evalue = evalues[i] # Obtain the ith eigenvalue and eigenvector
evector = evectors[:, i]
# Multiply by the resting times as defined in Eq. (27) on page 6
p = evector * resting_time
p = p / sum(p) # Normalise the solutions
results.append((evalue, p)) # Add to the list of possible solutions
results.sort(reverse=True) # Order the results by largest eigenvalue
return results
def ETM_rest_uniform(G, wtd, max_int=np.inf, cache=None):
"""
This function does the same as `ETM_rest` but assumes that the
WTDs of all edges are identical and equal to `wtd`.
`cache` is a dictionary that is used to speed of the calculation
by caching results. If cache is `None` the functionality is disabled.
"""
n = G.number_of_nodes()
ETM = sparse.lil_matrix((n, n)) # Set up a sparse matrix
resting_time = np.zeros(n) # Set up the resting times
temp = np.zeros(n)
for node in G: # Go over each node
neighbours = G.neighbors(node) # Get all the neighbours
n_neighbours = len(neighbours) # Get the number of neighbours
for neighbour in neighbours:
temp[node] = ETM[neighbour, node] = 1. / n_neighbours
# Check the cache
if cache is not None and n_neighbours in cache:
resting_time[node] = cache[n_neighbours]
else:
# Define the phi element
phi = lambda t: (1 - wtd.cdf(t)) ** n_neighbours
# Calculate the resting time
integral = quad(phi, 0, max_int)[0]
resting_time[node] = integral
# Save it to the cache if desired
if cache is not None: cache[n_neighbours] = integral
return ETM, resting_time
def ETM_rest(G, wtd='wtd', max_int=np.inf, debug=False):
"""
This function calculates the matrix $\mathbb{T}$ and the mean resting times
for a network `G`. It assumes that each edge of the network has a data
attribute `wtd` which is a probability distribution.
In principle, the calculation requires the evaluation of integrals over the
domain $[0, \infty]$. The parameter `max_int` can be used to set a fixed
upper limit to the integration domain if desired.
The flag `debugs` indicates whether to print debug information.
This function returns a tuple ($\mathbb{T}$, mean resting times).
"""
n = G.number_of_nodes() # The number of nodes
ETM = sparse.lil_matrix((n, n)) # Create the effective transition matrix
resting_time = np.zeros(n) # Create a vector of resting times
for node in G:
edges = G[node].items()
for neighbour, data in edges:
# Define the T_{neighbour, node} matrix element
# as in Eq. (2) on page 3 of our paper
T = lambda t: data[wtd].pdf(t) * reduce(lambda a, b: a * b,
[1 - data2[wtd].cdf(t) for neighbour2, data2 in edges if neighbour2 != neighbour], 1)
# Integrate to get the effective transition matrix as in
# Eq. (26) on page 5 of our paper
int1 = ETM[neighbour, node] = quad(T, 0, max_int)[0]
# Carry out the integral to find resting times as in the second column
# of page 5
int2 = quad(lambda t: t * T(t), 0, max_int)[0]
# Add to the resting time on the node
resting_time[node] += int2
if debug: print node, int1, resting_time[node]
return ETM, resting_time
| StarcoderdataPython |
9660360 | <filename>gcp_netblocks/gcp_netblocks.py
#!/usr/bin/env python3
#################################################################
# Google Cloud Netblock resolver - prints all subnets
#
# dependencies:
# pip3 install dnspython
#################################################################
import dns.resolver
def main():
netblock_response = dns.resolver.query('_cloud-netblocks.googleusercontent.com', 'TXT').rrset
netblock_names = [rec[8:] for rec in str(netblock_response).split(' ')[5:-1]]
all_subnets = []
for name in netblock_names:
netblock_response = dns.resolver.query(name, 'TXT').rrset
subnets = [net[4:] for net in str(netblock_response).split(' ')[5:-1]]
all_subnets = all_subnets + subnets
print(all_subnets)
if __name__ == '__main__':
main()
| StarcoderdataPython |
9700392 | <filename>capture_raw_logger/advance_logger.py
from typing import Optional, Dict, Union
from aiologger import Logger
import json
import aiofiles
import collections
import time
import os
import asyncio
import base64
import sys
sys.path.insert(0, "../")
from network_monitor.filters import get_protocol, present_protocols # noqa
from network_monitor.protocols import ( # noqa
AF_Packet,
Packet_802_3,
Packet_802_2
)
from network_monitor import ( # noqa
Interface_Listener,
Packet_Parser
)
# write to tmp file and replace with os.replace
async def _log_interface_listener_output(queue):
start_time = int(time.time())
log_dir = "./test/data/interface_listener_output/"
report_interval = 5
fname = f"interface_listener_output_{start_time}.lp"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
tracker = collections.Counter()
last_report_time = time.time()
async with aiofiles.open(os.path.join(log_dir, fname), "w") as fout:
while True:
try:
_, (raw_bytes, address) = await queue.get()
af_packet = AF_Packet(address)
if af_packet.Ethernet_Protocol_Number > 1500:
out_packet = Packet_802_3(raw_bytes)
else:
out_packet = Packet_802_2(raw_bytes)
await fout.write(json.dumps(af_packet.serialize()) + "\n")
r_raw_bytes = base64.b64encode(raw_bytes).decode("utf-8")
await fout.write(r_raw_bytes + "\n")
queue.task_done()
for identifier in present_protocols(out_packet):
tracker[identifier] += 1
now = time.time()
if now - last_report_time > report_interval:
__tracker = {k: v for k, v in tracker.items()}
last_report_time = now
print("queue size: ", queue.qsize())
print("Tracker: ", __tracker)
except asyncio.CancelledError as e:
print("log service cancelled", e)
raise e
async def _log_packet_parser_output_no_filter(queue):
start_time = int(time.time())
log_dir = "./test/data/packet_parser_output/"
report_interval = 5
fname = f"packet_parser_output_{start_time}.lp"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
tracker = collections.Counter()
last_report_time = time.time()
async with aiofiles.open(os.path.join(log_dir, fname), "w") as fout:
while True:
try:
data = await queue.get()
await fout.write(json.dumps(data) + "\n")
queue.task_done()
for identifier in data:
tracker[identifier] += 1
now = time.time()
if now - last_report_time > report_interval:
__tracker = {k: v for k, v in tracker.items()}
last_report_time = now
print("queue size: ", queue.qsize())
print("Tracker: ", __tracker)
except asyncio.CancelledError as e:
print("log service cancelled", e)
raise e
async def log_raw_packets(
interfacename: str,
):
raw_queue = asyncio.Queue()
logger = Logger.with_default_handlers()
# start network listener
listener_service = Interface_Listener(interfacename, raw_queue)
listener_service_task: asyncio.Task = asyncio.create_task(
listener_service.worker(logger), name="listener-service-task")
log_task = asyncio.create_task(
_log_interface_listener_output(raw_queue))
await asyncio.sleep(600)
listener_service_task.cancel()
print("listener task cancelled")
await raw_queue.join()
await asyncio.sleep(2)
log_task.cancel()
print("log task cancelled")
await asyncio.gather(listener_service_task, log_task, return_exceptions=True)
async def log_processed_packets(interfacename: str):
raw_queue = asyncio.Queue()
processed_queue = asyncio.Queue()
logger = Logger.with_default_handlers()
# start network listener
listener_service = Interface_Listener(interfacename, raw_queue)
listener_service_task: asyncio.Task = asyncio.create_task(
listener_service.worker(logger), name="listener-service-task")
packet_parser = Packet_Parser(raw_queue, processed_queue)
packet_parser_service_task: asyncio.Task = asyncio.create_task(
packet_parser.worker(logger), name="packet-service-task"
)
log_task = asyncio.create_task(
_log_packet_parser_output_no_filter(processed_queue))
await asyncio.sleep(600)
listener_service_task.cancel()
print("listener task cancelled")
await raw_queue.join()
packet_parser_service_task.cancel()
await processed_queue.join()
await asyncio.sleep(2)
log_task.cancel()
print("log task cancelled")
await asyncio.gather(listener_service_task, packet_parser_service_task, log_task, return_exceptions=True)
if __name__ == "__main__":
asyncio.run(log_processed_packets(
"eth0"
))
| StarcoderdataPython |
9776976 | <reponame>murlokito/playground
__title__ = "simulation"
__author__ = "murlux"
__copyright__ = "Copyright 2019, " + __author__
__credits__ = (__author__, )
__license__ = "MIT"
__email__ = "<EMAIL>"
import pandas as pd
import numpy as np
from datetime import datetime as dt
from dateutil.parser import parse
from typing import Callable, List
# Local imorts
from playground import settings
from playground.util import setup_logger
from playground.util_ops import get_delta_callable_for_tf
from playground.simulation.operations import SimulatedOperation
class SimulationEngine():
"""An object representing the Simulation Engine.."""
operations: List[SimulatedOperation]
backtesting_engine: BacktestEngine = None
forwardtesting_engine: ForwardtestEngine = None
def __init__(self, config, yesterday, initial_capital, pair, tf, logic,):
"""Initate the SimulationEngine.
param: config: An HLOCV+ pandas dataframe with a datetime index
type: config: pandas.DataFrame
"""
def update_datasets(self, dataset):
"""Process ForwardTestSession.
:param dataset: An HLOCV+ pandas dataframe with a datetime index
:type dataset: pandas.DataFrame
"""
self.backdata = dataset
def process(self, today):
"""Process ForwardTestSession.
:param today: An HLOCV+ pandas dataframe with the last closed candle
:type today: pandas.DataFrame
:return: A bactesting simulation
:rtype: BackTestSession
"""
current_time = dt.now()
if current_time > (self.__next_analysis):
self.logger.info(
'Processing... %-4s - %-4s - %-4s ' + '------------'*10,
self.pair, self.tf, today.datetime,
)
self.logger.info(
'O: %-6.6g - H: %-6.6g - L: %-6.6g - C: %-6.6g - V: %-6.6g - MRFI:' \
+' %-6.6g - SMRFI: %-6.6g - RSI: %-6.6g - MFI: %-6.6g - EMA50: %-6.6g - EMA100: %-6.6g', \
today.open, today.high, today.low, today.close, today.volumeto, today.mrfi,
today.smrfi, today.rsi, today.mfi, today.ema50, today.ema100,
)
date = today.get('datetime')
equity = self.account.total_value(today.close)
self.data = self.data.append(today)
self.data.sort_index(inplace=True, ascending=False)
# Handle stop loss
for p in self.account.positions:
if p.type == "long":
if p.stop_hit(today.get('low')):
self.account.close_position(p, 1.0, today.get('low'))
if p.type == "short":
if p.stop_hit(today.get('high')):
self.account.close_position(p, 1.0, today.get('high'))
self.account.purge_positions()
# Update account variables
self.account.date = date
self.account.equity.append(equity)
# Equity tracking
self.tracker.append({
'date': date,
'benchmark_equity': today.get('close'),
'strategy_equity': equity,
})
self.logger.info('Executing trading logic... LookbackData: {} :: Data: {}'.format(
self.backdata.shape, self.data.shape
))
# Execute trading logic and allow full lookback
self.logic(
name=self._name,
pair=self.pair,
timeframe=self.tf,
account=self.account,
dataset=self.backdata,
lookback=self.data,
logger=self.logger,
last_candle=today,
_tts=self._tts,
_simple_tts=self._simple_tts
)
self.__next_candle = (dt.fromtimestamp(today.time) + self.__analysis_throttle)
self.__next_analysis = (self.__next_analysis + self.__analysis_throttle)
self.yesterday = today
# Cleanup empty positions
# self.account.purge_positions()
# ------------------------------------------------------------
def print_results(self):
"""Print results"""
self.logger.info("-------------- Results ----------------\n")
being_price = self.data.iloc[0].open
final_price = self.data.iloc[-1].close
pc = helpers.percent_change(being_price, final_price)
tweet_string = "--{}--\n".format(self._name)
tweet_string += "Begin vs end : {0} {0}\n".format(being_price, final_price)
tweet_string += "Buy and Hold : {0}%\n".format(round(pc*100, 2))
tweet_string += "Net Profit : {0}\n".format(round(helpers.profit(self.account.initial_capital, pc), 2))
pc = helpers.percent_change(self.account.initial_capital, self.account.total_value(final_price))
tweet_string += "Strategy : {0}%\n".format(round(pc*100, 2))
tweet_string += "Net Profit : {0}\n".format(round(helpers.profit(self.account.initial_capital, pc), 2))
longs = len([t for t in self.account.opened_trades if t.type == 'long'])
sells = len([t for t in self.account.closed_trades if t.type == 'long'])
shorts = len([t for t in self.account.opened_trades if t.type == 'short'])
covers = len([t for t in self.account.closed_trades if t.type == 'short'])
tweet_string += "Longs : {0}\n".format(longs)
tweet_string += "Sells : {0}\n".format(sells)
tweet_string += "Shorts : {0}\n".format(shorts)
tweet_string += "Covers : {0}\n".format(covers)
tweet_string += "--------------------\n"
tweet_string += "Total Trades : {0}\n".format(longs + sells + shorts + covers)
tweet_string += "---------------------------------------"
self.logger.info(tweet_string)
#tn = TwitterNotifier()
#tn.post_results_tweet(tweet_string)
def _get_results(self):
"""
Return results as dict.
# TODO: please.... lol
# """
longs = len([t for t in self.account.opened_trades if t.type == 'long'])
sells = len([t for t in self.account.closed_trades if t.type == 'long'])
shorts = len([t for t in self.account.opened_trades if t.type == 'short'])
covers = len([t for t in self.account.closed_trades if t.type == 'short'])
if len(self.data) != 0:
begin_price = self.data.iloc[0].open
final_price = self.data.iloc[-1].close
buy_hold_pc = helpers.percent_change(begin_price, final_price)
strategy_pc = helpers.percent_change(self.account.initial_capital, self.account.total_value(final_price))
return {
'name': self._name,
'begin_price': begin_price,
'final_price': final_price,
'buy_and_hold': {
'rate_on_equity': round(buy_hold_pc*100, 2),
'net_profit': round(helpers.profit(self.account.initial_capital, buy_hold_pc), 2),
},
'strategy':{
'rate_on_equity': round(strategy_pc*100, 2),
'net_profit': round(helpers.profit(self.account.initial_capital, strategy_pc), 2),
'long_count': longs,
'sell_count': sells,
'short_count': shorts,
'cover_count': covers,
'total': longs + sells + shorts + covers,
},
'positions': self.account._get_positions(),
}
else:
begin_price = 'N/A'
final_price = 'N/A'
buy_hold_pc = 'N/A'
strategy_pc = 'N/A'
return {
'name': self._name,
'begin_price': begin_price,
'final_price': final_price,
'buy_and_hold': {
'rate_on_equity': 0,
'net_profit': 0,
},
'strategy':{
'rate_on_equity': 0,
'net_profit': 0,
'long_count': longs,
'sell_count': sells,
'short_count': shorts,
'cover_count': covers,
'total': longs + sells + shorts + covers,
},
'positions': self.account._get_positions(),
}
def _get_operations(self):
"""
TODO: Get operations from the current simulation engine execution
"""
return self
| StarcoderdataPython |
11357344 | <filename>hata/discord/guild/preinstanced.py
__all__ = (
'AuditLogEvent',
'ContentFilterLevel',
'GuildFeature',
'MFA',
'MessageNotificationLevel',
'NsfwLevel',
'VerificationLevel',
'VerificationScreenStepType',
'VoiceRegion',
)
import warnings
from ...backend.export import export
from ...backend.utils import class_property
from ..bases import PreinstancedBase, Preinstance as P
class AuditLogEvent(PreinstancedBase):
"""
Represents the event type of an ``AuditLogEntry``.
Attributes
----------
name : `str`
The name of audit log event.
value : `int`
The Discord side identifier value of the audit log event.
Class Attributes
----------------
INSTANCES : `dict` of (`int`, ``AuditLogEvent``) items
Stores the predefined ``AuditLogEvent`` instances. These can be accessed with their `value` as key.
VALUE_TYPE : `type` = `int`
The audit log events' values' type.
DEFAULT_NAME : `str` = `'UNDEFINED'`
The default name of the audit log events
Every predefined audit log event can be accessed as class attribute as well:
+---------------------------+---------------------------+-------+
| Class attribute name | name | value |
+===========================+===========================+=======+
| guild_update | guild_update | 1 |
+---------------------------+---------------------------+-------+
| channel_create | channel_create | 10 |
+---------------------------+---------------------------+-------+
| channel_update | channel_update | 11 |
+---------------------------+---------------------------+-------+
| channel_delete | channel_delete | 12 |
+---------------------------+---------------------------+-------+
| channel_overwrite_create | channel_overwrite_create | 13 |
+---------------------------+---------------------------+-------+
| channel_overwrite_update | channel_overwrite_update | 14 |
+---------------------------+---------------------------+-------+
| channel_overwrite_delete | channel_overwrite_delete | 15 |
+---------------------------+---------------------------+-------+
| member_kick | member_kick | 20 |
+---------------------------+---------------------------+-------+
| member_prune | member_prune | 21 |
+---------------------------+---------------------------+-------+
| member_ban_add | member_ban_add | 22 |
+---------------------------+---------------------------+-------+
| member_ban_remove | member_ban_remove | 23 |
+---------------------------+---------------------------+-------+
| member_update | member_update | 24 |
+---------------------------+---------------------------+-------+
| member_role_update | member_role_update | 25 |
+---------------------------+---------------------------+-------+
| member_move | member_move | 26 |
+---------------------------+---------------------------+-------+
| member_disconnect | member_disconnect | 27 |
+---------------------------+---------------------------+-------+
| bot_add | bot_add | 28 |
+---------------------------+---------------------------+-------+
| role_create | role_create | 30 |
+---------------------------+---------------------------+-------+
| role_update | role_update | 31 |
+---------------------------+---------------------------+-------+
| role_delete | role_delete | 32 |
+---------------------------+---------------------------+-------+
| invite_create | invite_create | 40 |
+---------------------------+---------------------------+-------+
| invite_update | invite_update | 41 |
+---------------------------+---------------------------+-------+
| invite_delete | invite_delete | 42 |
+---------------------------+---------------------------+-------+
| webhook_create | webhook_create | 50 |
+---------------------------+---------------------------+-------+
| webhook_update | webhook_update | 51 |
+---------------------------+---------------------------+-------+
| webhook_delete | webhook_delete | 52 |
+---------------------------+---------------------------+-------+
| emoji_create | emoji_create | 60 |
+---------------------------+---------------------------+-------+
| emoji_update | emoji_update | 61 |
+---------------------------+---------------------------+-------+
| emoji_delete | emoji_delete | 62 |
+---------------------------+---------------------------+-------+
| message_delete | message_delete | 72 |
+---------------------------+---------------------------+-------+
| message_bulk_delete | message_bulk_delete | 73 |
+---------------------------+---------------------------+-------+
| message_pin | message_pin | 74 |
+---------------------------+---------------------------+-------+
| message_unpin | message_unpin | 75 |
+---------------------------+---------------------------+-------+
| integration_create | integration_create | 80 |
+---------------------------+---------------------------+-------+
| integration_update | integration_update | 81 |
+---------------------------+---------------------------+-------+
| integration_delete | integration_delete | 82 |
+---------------------------+---------------------------+-------+
| sticker_create | sticker_create | 90 |
+---------------------------+---------------------------+-------+
| sticker_update | sticker_update | 91 |
+---------------------------+---------------------------+-------+
| sticker_delete | sticker_delete | 92 |
+---------------------------+---------------------------+-------+
"""
INSTANCES = {}
VALUE_TYPE = int
DEFAULT_NAME = 'UNDEFINED'
__slots__ = ()
# predefined
guild_update = P(1, 'guild_update')
channel_create = P(10, 'channel_create')
channel_update = P(11, 'channel_update')
channel_delete = P(12, 'channel_delete')
channel_overwrite_create = P(13, 'channel_overwrite_create')
channel_overwrite_update = P(14, 'channel_overwrite_update')
channel_overwrite_delete = P(15, 'channel_overwrite_delete')
member_kick = P(20, 'member_kick')
member_prune = P(21, 'member_prune')
member_ban_add = P(22, 'member_ban_add')
member_ban_remove = P(23, 'member_ban_remove')
member_update = P(24, 'member_update')
member_role_update = P(25, 'member_role_update')
member_move = P(26, 'member_move')
member_disconnect = P(27, 'member_disconnect')
bot_add = P(28, 'member_role_update')
role_create = P(30, 'role_create')
role_update = P(31, 'role_update')
role_delete = P(32, 'role_delete')
invite_create = P(40, 'invite_create')
invite_update = P(41, 'invite_update')
invite_delete = P(42, 'invite_delete')
webhook_create = P(50, 'webhook_create')
webhook_update = P(51, 'webhook_update')
webhook_delete = P(52, 'webhook_delete')
emoji_create = P(60, 'emoji_create')
emoji_update = P(61, 'emoji_update')
emoji_delete = P(62, 'emoji_delete')
message_delete = P(72, 'message_delete')
message_bulk_delete = P(73, 'message_bulk_delete')
message_pin = P(74, 'message_pin')
message_unpin = P(75, 'message_unpin')
integration_create = P(80, 'integration_create')
integration_update = P(81, 'integration_update')
integration_delete = P(82, 'integration_delete')
sticker_create = P(90, 'sticker_create')
sticker_update = P(91, 'sticker_update')
sticker_delete = P(92, 'sticker_delete')
class VerificationLevel(PreinstancedBase):
"""
Represents Discord's verification level.
Attributes
----------
name : `str`
The default name of the verification level.
value : `int`
The discord side identifier value of the verification level.
Class Attributes
----------------
INSTANCES : `dict` of (`int`, ``VerificationLevel``) items
Stores the predefined ``VerificationLevel`` instances. These can be accessed with their `value` as key.
VALUE_TYPE : `type` = `int`
The verification levels' values' type.
DEFAULT_NAME : `str` = `'Undefined'`
The default name of the verification levels.
Every predefined verification level can be accessed as class attribute as well:
+-----------------------+-----------+-------+
| Class attribute name | name | value |
+=======================+===========+=======+
| none | none | 0 |
+-----------------------+-----------+-------+
| low | low | 1 |
+-----------------------+-----------+-------+
| medium | medium | 2 |
+-----------------------+-----------+-------+
| high | high | 3 |
+-----------------------+-----------+-------+
| extreme | extreme | 4 |
+-----------------------+-----------+-------+
"""
INSTANCES = {}
VALUE_TYPE = int
__slots__ = ()
# predefined
none = P(0, 'none')
low = P(1, 'low')
medium = P(2, 'medium')
high = P(3, 'high')
extreme = P(4, 'extreme')
@export
class VoiceRegion(PreinstancedBase):
"""
Represents Discord's voice regions.
Attributes
----------
custom : `bool`
Whether the voice region is custom (used for events, etc.).
deprecated : `bool`
Whether the voice region is deprecated.
value : `str`
The unique identifier of the voice region.
name : `str`
The default name of the voice region.
vip : `bool`
Whether the voice region can be used only by guilds with `VIP_REGIONS` feature.
Class Attributes
----------------
INSTANCES : `dict` of (`str`, ``VoiceRegion``) items
Stores the created ``VoiceRegion`` instances.
VALUE_TYPE : `type` = `str`
The voice regions' values' type.
DEFAULT_NAME : `str` = `'Undefined'`
The default name of the voice regions.
Each predefined voice region is also stored as a class attribute:
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| Class attribute name | value | name | deprecated | vip | custom |
+=======================+===============+===================+===============+===========+===========+
| brazil | brazil | Brazil | False | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| dubai | dubai | Dubai | False | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| eu_central | eu-central | Central Europe | False | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| eu_west | eu-west | Western Europe | False | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| europe | europe | Europe | False | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| hongkong | hongkong | Hong Kong | False | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| india | india | India | False | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| japan | japan | Japan | False | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| russia | russia | Russia | False | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| singapore | singapore | Singapore | False | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| africa_south | southafrica | South Africa | False | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| sydney | sydney | Sydney | False | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| us_central | us-central | US Central | False | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| us_east | us-east | US East | False | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| us_south | us-south | US South | False | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| us_west | us-west | US West | False | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| amsterdam | amsterdam | Amsterdam | True | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| frankfurt | frankfurt | Frankfurt | True | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| london | london | London | True | False | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| vip_us_east | vip-us-east | VIP US West | False | True | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| vip_us_west | vip-us-west | VIP US East | False | True | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
| vip_amsterdam | vip-amsterdam | VIP Amsterdam | True | True | False |
+-----------------------+---------------+-------------------+---------------+-----------+-----------+
"""
INSTANCES = {}
VALUE_TYPE = str
__slots__ = (
'custom',
'deprecated',
'vip',
)
@classmethod
def _from_value(cls, value):
"""
Creates a voice region from the given id and stores it at class's `.INSTANCES`.
Called by `.get` when no voice region was found with the given id.
Parameters
----------
id_ : `str`
The identifier of the voice region.
Returns
-------
voice_region : ``VoiceRegion``
"""
name_parts = value.split('-')
for index in range(len(name_parts)):
name_part = name_parts[index]
if len(name_part) < 4:
name_part = name_part.upper()
else:
name_part = name_part.capitalize()
name_parts[index] = name_part
name = ' '.join(name_parts)
self = object.__new__(cls)
self.name = name
self.value = value
self.deprecated = False
self.vip = value.startswith('vip-')
self.custom = True
self.INSTANCES[value] = self
return self
@classmethod
def from_data(cls, data):
"""
Creates a voice region from the given data and stores it at the class's `.INSTANCES`.
If the voice region already exists returns that instead.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Received voice region data.
Returns
-------
self : ``VoiceRegion``
"""
value = data['id']
try:
return cls.INSTANCES[value]
except KeyError:
pass
self = object.__new__(cls)
self.name = data['name']
self.value = value
self.deprecated = data['deprecated']
self.vip = data['vip']
self.custom = data['custom']
self.INSTANCES[value] = self
return self
def __init__(self, value, name, deprecated, vip):
"""
Creates a new voice region with the given parameters and stores it at the class's `.INSTANCES`.
Parameters
----------
value : `str`
The unique identifier of the voice region.
deprecated : `bool`
Whether the voice region is deprecated.
name : `str`
The default name of the voice region.
vip : `bool`
Whether the voice region can be used only by guilds with `VIP_REGIONS` feature.
"""
self.name = name
self.value = value
self.deprecated = deprecated
self.vip = vip
self.custom = False
self.INSTANCES[value] = self
# predefined
# normal
brazil = P('brazil', 'Brazil', False, False)
dubai = P('dubai', 'Dubai', False, False)
eu_central = P('eu-central', 'Central Europe', False, False)
eu_west = P('eu-west', 'Western Europe', False, False)
europe = P('europe', 'Europe', False, False)
hongkong = P('hongkong', 'Hong Kong', False, False)
india = P('india', 'India', False, False)
japan = P('japan', 'Japan', False, False)
russia = P('russia', 'Russia', False, False)
singapore = P('singapore', 'Singapore', False, False)
africa_south = P('southafrica', 'South Africa', False, False)
sydney = P('sydney', 'Sydney', False, False)
us_central = P('us-central', 'US Central', False, False)
us_east = P('us-east', 'US East', False, False)
us_south = P('us-south', 'US South', False, False)
us_west = P('us-west', 'US West', False, False)
# deprecated
amsterdam = P('amsterdam', 'Amsterdam', True, False)
frankfurt = P('frankfurt', 'Frankfurt', True, False)
london = P('london', 'London', True, False)
# vip
vip_us_east = P('vip-us-west', 'VIP US West', False, True)
vip_us_west = P('vip-us-east', 'VIP US East', False, True)
# vip + deprecated
vip_amsterdam = P('vip-amsterdam', 'VIP Amsterdam', True, True)
class ContentFilterLevel(PreinstancedBase):
"""
Represents Discord's content filter level.
Attributes
----------
value : `int`
The Discord side identifier value of the content filter level.
name : `str`
The default name of the content filter level.
Class Attributes
----------------
INSTANCES : `dict` of (`int`, ``ContentFilterLevel``) items
Stores the predefined content filter levels. This container is accessed when translating a Discord side
identifier of a content filter level. The identifier value is used as a key to get it's wrapper side
representation.
VALUE_TYPE : `type` = `int`
The verification filer levels' values' type.
DEFAULT_NAME : `str` = `'Undefined'`
The default name of the content filter levels.
Every predefined content filter level is also stored as a class attribute:
+-----------------------+-----------+-------+
| Class attribute name | name | value |
+=======================+===========+=======+
| disabled | disabled | 0 |
+-----------------------+-----------+-------+
| no_role | no_role | 1 |
+-----------------------+-----------+-------+
| everyone | everyone | 2 |
+-----------------------+-----------+-------+
"""
INSTANCES = {}
VALUE_TYPE = int
__slots__ = ()
# predefined
disabled = P(0, 'disabled')
no_role = P(1, 'no_role')
everyone = P(2, 'everyone')
class GuildFeature(PreinstancedBase):
"""
Represents a ``Guild``'s feature.
Attributes
----------
value : `str`
The Discord side identifier value of the guild feature.
Class Attributes
----------------
INSTANCES : `dict` of (`str`, ``GuildFeature``) items
Stores the predefined ``GuildFeature`` instances.
VALUE_TYPE : `type` = `str`
The guild features' values' type.
DEFAULT_NAME : `str` = `''`
The default name of the guild features. Guild features have the same value as name, so at their case it is not
applicable.
Every predefined guild feature can be accessed as class attribute as well:
+-------------------------------+-----------------------------------+
| Class attribute names | Value |
+===============================+===================================+
| animated_icon | ANIMATED_ICON |
+-------------------------------+-----------------------------------+
| banner | BANNER |
+-------------------------------+-----------------------------------+
| commerce | COMMERCE |
+-------------------------------+-----------------------------------+
| community | COMMUNITY |
+-------------------------------+-----------------------------------+
| discoverable | DISCOVERABLE |
+-------------------------------+-----------------------------------+
| discoverable_disabled | DISCOVERABLE_DISABLED |
+-------------------------------+-----------------------------------+
| discoverable_enabled_before | ENABLED_DISCOVERABLE_BEFORE |
+-------------------------------+-----------------------------------+
| featurable | FEATURABLE |
+-------------------------------+-----------------------------------+
| member_list_disabled | MEMBER_LIST_DISABLED |
+-------------------------------+-----------------------------------+
| more_emoji | MORE_EMOJI |
+-------------------------------+-----------------------------------+
| news | NEWS |
+-------------------------------+-----------------------------------+
| partnered | PARTNERED |
+-------------------------------+-----------------------------------+
| public | PUBLIC |
+-------------------------------+-----------------------------------+
| public_disabled | PUBLIC_DISABLED |
+-------------------------------+-----------------------------------+
| relay_enabled | RELAY_ENABLED |
+-------------------------------+-----------------------------------+
| invite_splash | INVITE_SPLASH |
+-------------------------------+-----------------------------------+
| vanity_invite | VANITY_URL |
+-------------------------------+-----------------------------------+
| verified | VERIFIED |
+-------------------------------+-----------------------------------+
| vip | VIP_REGIONS |
+-------------------------------+-----------------------------------+
| welcome_screen | WELCOME_SCREEN_ENABLED |
+-------------------------------+-----------------------------------+
| verification_screen | MEMBER_VERIFICATION_GATE_ENABLED |
+-------------------------------+-----------------------------------+
| preview_enabled | PREVIEW_ENABLED |
+-------------------------------+-----------------------------------+
| ticket_events_enabled | TICKETED_EVENTS_ENABLED |
+-------------------------------+-----------------------------------+
| monetization_enabled | MONETIZATION_ENABLED |
+-------------------------------+-----------------------------------+
| more_sticker | MORE_STICKERS |
+-------------------------------+-----------------------------------+
| thread_archive_3_day | THREE_DAY_THREAD_ARCHIVE |
+-------------------------------+-----------------------------------+
| thread_archive_7_day | SEVEN_DAY_THREAD_ARCHIVE |
+-------------------------------+-----------------------------------+
| private_threads | PRIVATE_THREADS |
+-------------------------------+-----------------------------------+
"""
INSTANCES = {}
VALUE_TYPE = str
DEFAULT_NAME = ''
__slots__ = ()
@classmethod
def _from_value(cls, value):
"""
Creates a new guild feature with the given value.
Parameters
----------
value : `str`
The guild feature's identifier value.
Returns
-------
self : ``GuildFeature``
The created guild feature.
"""
self = object.__new__(cls)
self.value = value
self.name = value
self.INSTANCES[value] = self
return self
# predefined
animated_icon = P('ANIMATED_ICON', 'animated_icon')
banner = P('BANNER', 'banner')
commerce = P('COMMERCE', 'commerce')
community = P('COMMUNITY', 'community')
discoverable = P('DISCOVERABLE', 'discoverable')
discoverable_disabled = P('DISCOVERABLE_DISABLED', 'discoverable_disabled')
discoverable_enabled_before = P(
'ENABLED_DISCOVERABLE_BEFORE', 'discoverable_enabled_before'
)
featurable = P('FEATURABLE', 'featurable')
member_list_disabled = P('MEMBER_LIST_DISABLED', 'member_list_disabled')
more_emoji = P('MORE_EMOJI', 'more_emoji')
news = P('NEWS', 'news')
partnered = P('PARTNERED', 'partnered')
public = P('PUBLIC', 'public')
public_disabled = P('PUBLIC_DISABLED', 'public_disabled')
relay_enabled = P('RELAY_ENABLED', 'relay_enabled')
invite_splash = P('INVITE_SPLASH', 'invite_splash')
vanity_invite = P('VANITY_URL', 'vanity_invite')
verified = P('VERIFIED', 'verified')
vip = P('VIP_REGIONS', 'vip')
welcome_screen = P('WELCOME_SCREEN_ENABLED', 'welcome_screen')
verification_screen = P('MEMBER_VERIFICATION_GATE_ENABLED', 'verification_screen')
preview_enabled = P('PREVIEW_ENABLED', 'preview_enabled')
ticket_events_enabled = P('TICKETED_EVENTS_ENABLED', 'ticket_events_enabled')
monetization_enabled = P('MONETIZATION_ENABLED', 'monetization_enabled')
more_sticker = P('MORE_STICKERS', 'more_sticker')
thread_archive_3_day = P('THREE_DAY_THREAD_ARCHIVE', 'thread_archive_3_day')
thread_archive_7_day = P('SEVEN_DAY_THREAD_ARCHIVE', 'thread_archive_7_day')
private_threads = P('PRIVATE_THREADS', 'private_threads')
@class_property
def vanity(cls):
"""
``.vanity`` is deprecated, please use ``.vanity_invite`` instead. Will be removed in 2021 September.
"""
warnings.warn(
f'`{cls.__name__}.vanity` is deprecated, and will be removed in 2021 September. '
f'Please use `{cls.__name__}.vanity_invite` instead.',
FutureWarning,
)
return cls.vanity_invite
class NsfwLevel(PreinstancedBase):
"""
Represents a guild's nsfw level.
Attributes
----------
name : `str`
The name of the nsfw filter level.
value : `int`
The identifier value the nsfw filter level
Class Attributes
----------------
INSTANCES : `dict` of (`int`, ``NsfwLevel``) items
Stores the predefined ``NsfwLevel`` instances. These can be accessed with their `value` as key.
VALUE_TYPE : `type` = `int`
The nsfw level' values' type.
DEFAULT_NAME : `str` = `'UNDEFINED'`
The default name of the nsfw levels.
Every predefined nsfw level can be accessed as class attribute as well:
+-----------------------+-------------------+-------+
| Class attribute name | Name | Value |
+=======================+===================+=======+
| none | none | 0 |
+-----------------------+-------------------+-------+
| explicit | explicit | 1 |
+-----------------------+-------------------+-------+
| safe | safe | 2 |
+-----------------------+-------------------+-------+
| age_restricted | age_restricted | 3 |
+-----------------------+-------------------+-------+
"""
INSTANCES = {}
VALUE_TYPE = int
DEFAULT_NAME = 'UNDEFINED'
__slots__ = ()
none = P(0, 'none')
explicit = P(1, 'explicit')
safe = P(2, 'safe')
age_restricted = P(2, 'age_restricted')
class MessageNotificationLevel(PreinstancedBase):
"""
Represents the default message notification level of a ``Guild``.
Attributes
----------
value : `int`
The Discord side identifier value of the message notification level.
name : `str`
The default name of the message notification level.
Class Attributes
----------------
INSTANCES : `dict` of (`int`, ``MessageNotificationLevel``) items
Stores the predefined message notification levels. This container is accessed when translating message
notification level's value to it's representation.
VALUE_TYPE : `type` = `int`
The notification levels' values' type.
DEFAULT_NAME : `str` = `'Undefined'`
The default name of the notification levels.
Each predefined message notification level can also be accessed as a class attribute:
+-----------------------+---------------+-------+
| Class attribute name | name | value |
+=======================+===============+=======+
| all_messages | all_messages | 0 |
+-----------------------+---------------+-------+
| only_mentions | only_mentions | 1 |
+-----------------------+---------------+-------+
| no_message | no_messages | 2 |
+-----------------------+---------------+-------+
| null | null | 3 |
+-----------------------+---------------+-------+
"""
INSTANCES = {}
VALUE_TYPE = int
__slots__ = ()
# predefined
all_messages = P(0, 'all_messages')
only_mentions = P(1, 'only_mentions')
no_messages = P(2, 'no_messages')
null = P(3, 'null')
class MFA(PreinstancedBase):
"""
Represents Discord's Multi-Factor Authentication's levels.
Attributes
----------
name : `str`
The default name of the MFA level.
value : `int`
The Discord side identifier value of the MFA level.
Class Attributes
----------------
INSTANCES : `dict` of (`int`, ``MFA``) items
Stores the predefined MFA level. This container is accessed when converting an MFA level's value to
it's wrapper side representation.
VALUE_TYPE : `type` = `int`
The mfa levels' values' type.
DEFAULT_NAME : `str` = `'Undefined'`
The default name of the mfa levels.
Each predefined MFA can also be accessed as class attribute:
+-----------------------+-----------+-------+
| Class attribute name | name | value |
+=======================+===========+=======+
| none | none | 0 |
+-----------------------+-----------+-------+
| elevated | elevated | 1 |
+-----------------------+-----------+-------+
"""
INSTANCES = {}
VALUE_TYPE = int
__slots__ = ()
# Predefined
none = P(0, 'none')
elevated = P(1, 'elevated')
class VerificationScreenStepType(PreinstancedBase):
"""
Represents a type of a ``VerificationScreenStep``.
Attributes
----------
value : `str`
The Discord side identifier value of the verification step types.
Class Attributes
----------------
INSTANCES : `dict` of (`int`, ``VerificationScreenStepType``) items
Stores the predefined ``VerificationScreenStepType`` instances.
VALUE_TYPE : `type` = `str`
The verification screen steps' values' type.
DEFAULT_NAME : `str` = `''`
The default name of the verification screen step types.Verification screen step types have the
same value as name, so at their case it is not applicable.
Every predefined verification screen step type can be accessed as class attribute as well:
+-----------------------+-------+
| Class attribute names | Value |
+=======================+=======+
| rules | TERMS |
+-----------------------+-------+
"""
INSTANCES = {}
VALUE_TYPE = str
DEFAULT_NAME = ''
__slots__ = ()
@classmethod
def _from_value(cls, value):
"""
Creates a new verification screen type with the given value.
Parameters
----------
value : `str`
The verification screen type's identifier value.
Returns
-------
self : ``VerificationScreenStepType``
The verification screen type.
"""
self = object.__new__(cls)
self.value = value
self.name = value
self.INSTANCES[value] = self
return self
def __repr__(self):
"""Returns the representation of the verification screen type."""
return f'{self.__class__.__name__}(value={self.value!r})'
rules = P('TERMS', 'rules')
| StarcoderdataPython |
1855617 | <reponame>sayand0122/Hokage_bot<filename>cogs/music.py
import discord
from discord.ext import commands
from discord import FFmpegPCMAudio
import asyncio
from async_timeout import timeout
import itertools
from youtube_dl import YoutubeDL
from validator_collection import checkers
import pafy
class Audio():
"""Creates an audio object with relevant data."""
def __init__(self, search, requester):
self.requester = requester
self.audio = None
self.title = None
self.search = search
self.embed = None
self.gather_stream()
def gather_stream(self):
"""Gathers audio stream and relevant info."""
ffmpegopts = {
'before_options': '-nostdin',
'options': '-vn'
}
ytdlopts = {
'format': 'bestaudio/best',
'outtmpl': 'downloads/%(extractor)s-%(id)s-%(title)s.%(ext)s',
'restrictfilenames': True,
'noplaylist': True,
'nocheckcertificate': True,
'ignoreerrors': False,
'logtostderr': False,
'quiet': True,
'no_warnings': True,
'default_search': 'auto',
'source_address': '0.0.0.0' # ipv6 addresses cause issues sometimes
}
ytdl = YoutubeDL(ytdlopts)
data = ytdl.extract_info(self.search, download=False)
if not checkers.is_url(self.search):
data = data['entries'][0]
self.audio = FFmpegPCMAudio(data['formats'][0]['url'], **ffmpegopts)
thumbnail_url = data['thumbnail']
webpage_url = data['webpage_url']
self.title = data['title']
uploader = data['uploader']
channel_url = data['channel_url']
# youtube_dl doesnt give accurate view count above 100M (Havent checked for a lesser amount once I discovered this).
video = pafy.new(webpage_url)
views = video.viewcount
duration = video.duration
song_embed = discord.Embed()
song_embed.set_image(url=thumbnail_url)
song_embed.add_field(name='\u200b', value=f'**[{self.title}]({webpage_url})**')
song_embed.add_field(name='\u200b', value=f'**[{uploader}]({channel_url})**', inline=False)
song_embed.add_field(name='Views', value=f'{views}')
song_embed.add_field(name='Duration', value=f'{duration}')
song_embed.add_field(name='Requested by', value=f'{self.requester.mention}')
self.embed = song_embed
class MusicPlayer:
"""Player that is generated for each guild (one channel per guild).
When the bot disconnects from the voice client it's instance will be destroyed.
"""
def __init__(self, ctx):
self.ctx = ctx
self.bot = ctx.bot
self._guild = ctx.guild
self._channel = ctx.channel
self._cog = ctx.cog
self.current = None
self.queue = asyncio.Queue()
self.next = asyncio.Event()
ctx.bot.loop.create_task(self.player_loop(self.ctx))
async def player_loop(self, ctx):
"""Main player loop."""
await self.bot.wait_until_ready()
while not self.bot.is_closed():
self.next.clear()
try:
# Wait for the next song. If we timeout cancel the player and disconnect...
async with timeout(300): # 5 minutes...
source = await self.queue.get()
self.current = source
except asyncio.TimeoutError:
return self.destroy(self._guild)
self._guild.voice_client.play(source.audio, after=lambda _: self.bot.loop.call_soon_threadsafe(self.next.set))
await self.next.wait()
def destroy(self, guild):
"""Disconnect and cleanup the player."""
return self.bot.loop.create_task(self._cog.cleanup(guild))
class Music(commands.Cog):
"""Music related commands."""
def __init__(self, bot):
self.bot = bot
self.players = {}
async def cleanup(self, guild):
try:
await guild.voice_client.disconnect()
except AttributeError:
pass
try:
del self.players[guild.id]
except KeyError:
pass
async def __local_check(self, ctx):
"""A local check which applies to all commands in this cog."""
if not ctx.guild:
raise commands.NoPrivateMessage
return True
async def __error(self, ctx, error):
"""A local error handler for all errors arising from commands in this cog."""
if isinstance(error, commands.NoPrivateMessage):
try:
return await ctx.send("`This command can not be used in Private Messages.`")
except discord.HTTPException:
pass
else:
pass
def get_player(self, ctx):
"""Retrieve the guild player or generate one."""
try:
player = self.players[ctx.guild.id]
except KeyError:
player = MusicPlayer(ctx)
self.players[ctx.guild.id] = player
return player
async def connect(self, ctx):
"""Connect to voice the user is in."""
channel = None
try:
channel = ctx.author.voice.channel
except AttributeError:
print('Connection failed')
vc = ctx.voice_client
if vc:
try:
await vc.move_to(channel)
except asyncio.TimeoutError:
print('Connection failed')
else:
try:
await channel.connect()
except asyncio.TimeoutError:
print('Connection failed')
@commands.command(name='play', aliases=['sing'])
async def play_(self, ctx, *search: str):
"""Requests a song and adds to queue.
Args:
search (str): keywords for querying the song on youtube.
"""
search = ' '.join(search[:])
vc = ctx.voice_client
if not vc:
await self.connect(ctx)
player = self.get_player(ctx)
source = Audio(search, ctx.message.author)
await ctx.send(embed=source.embed)
await player.queue.put(source)
@commands.command(name='pause')
async def pause_(self, ctx):
"""Pause the currently playing song."""
vc = ctx.voice_client
if not vc or not vc.is_playing():
return
elif vc.is_paused():
return
vc.pause()
@commands.command(name='resume')
async def resume_(self, ctx):
"""Resume the currently paused song."""
vc = ctx.voice_client
if not vc or not vc.is_connected():
return
elif not vc.is_paused():
return
vc.resume()
@commands.command(name='skip')
async def skip_(self, ctx):
"""Skip the current song."""
vc = ctx.voice_client
if not vc or not vc.is_connected():
return
if vc.is_paused():
pass
elif not vc.is_playing():
return
vc.stop()
@commands.command(name='queue', aliases=['q', 'playlist'])
async def queue_info(self, ctx):
"""Retrieve a basic queue of upcoming songs."""
vc = ctx.voice_client
if not vc or not vc.is_connected():
return
player = self.get_player(ctx)
if player.queue.empty():
return await ctx.send('There are currently no more queued songs.')
# Queries upto 5 songs in the queue.
upcoming = list(itertools.islice(player.queue._queue, 0, 5))
desc = '\n'.join(f'**`{_.title}`**' for _ in upcoming)
embed = discord.Embed(title='Upcoming - Next', description=desc)
await ctx.send(embed=embed)
@commands.command(name='stop')
async def stop_(self, ctx):
"""Stop the currently playing song and destroy the player."""
vc = ctx.voice_client
if not vc or not vc.is_connected():
return
await self.cleanup(ctx.guild)
@commands.command()
async def current(self, ctx):
"""Displays current track."""
vc = ctx.voice_client
if not vc or not vc.is_connected():
return
player = self.get_player(ctx)
desc = player.current.title
if desc:
embed = discord.Embed(title='Currently Playing', description=desc)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Music(bot))
| StarcoderdataPython |
1834581 | # Preppin' Data 2021 Week 07
import pandas as pd
# Load data
shopping_list = pd.read_excel('unprepped_data\\PD 2021 Wk 7 Input - Shopping List and Ingredients.xlsx', engine='openpyxl', sheet_name = 'Shopping List')
keywords = pd.read_excel('unprepped_data\\PD 2021 Wk 7 Input - Shopping List and Ingredients.xlsx', engine='openpyxl', sheet_name = 'Keywords')
# Prepare the keyword data
# - Add an 'E' in front of every E number.
# - Stack Animal Ingredients and E Numbers on top of each other.
# - Get every ingredient and E number onto separate rows.
animal_ingredients = keywords['Animal Ingredients'][0].split(', ')
e_numbers = keywords['E Numbers'][0].split(', ')
e_numbers = ['E' + x for x in e_numbers]
keyword_list = animal_ingredients + e_numbers
keyword_list = [x.lower() for x in keyword_list]
# Check whether each product contains any non-vegan ingredients.
# Prepare a final shopping list of vegan products.
# - Aggregate the products into vegan and non-vegan.
# - Filter out the non-vegan products.
# Prepare a list explaining why the other products aren't vegan.
# - Keep only non-vegan products.
# - Duplicate the keyword field.
# - Rows to columns pivot the keywords using the duplicate as a header.
# - Write a calculation to concatenate all the keywords into a single comma-separated list for each product, e.g. "whey, milk, egg".
# truncate dataframe to set up non-vegan dfs
non_vegan = shopping_list.truncate(before=-1, after=-1)
reason_list = []
# lowercase ingredients to match keywords
shopping_list['Ingredients/Allergens'] = shopping_list['Ingredients/Allergens'].str.lower()
# loop to see if keyword included in ingredients
for ingredient in keyword_list:
a = shopping_list[shopping_list['Ingredients/Allergens'].str.contains(ingredient)]
if len(a) > 0:
b = [ingredient]
b = b * len(a)
reason_list.append(b)
non_vegan = non_vegan.append(a)
# reduce list of lists to just single list
flat_list = [item for sublist in reason_list for item in sublist]
non_vegan['contains'] = flat_list
# make contains column as concatenated list
non_vegan['Contains'] = non_vegan[['Product','contains','Description']].groupby(['Product','Description'])['contains'].transform(lambda x: ', '.join(x))
non_vegan = non_vegan[['Product','Description','Contains']].drop_duplicates()
# left join to filter out non-vegan products
vegan = shopping_list.merge(non_vegan, on='Product', how='left')
vegan = vegan[vegan['Contains'].isnull()]
# reduce dataframe and rename columns
vegan = vegan[['Product','Description_x']]
new_vegan_columns = ['Product','Description']
vegan.columns = new_vegan_columns
# Output the data.
# writing data to csv
vegan.to_csv('prepped_data\\PD 2021 Wk 7 Output - 1 Vegan List.csv', index=False)
non_vegan.to_csv('prepped_data\\PD 2021 Wk 7 Output - 2 Non-Vegan List.csv', index=False)
print("data prepped!")
| StarcoderdataPython |
204873 | <reponame>willdickson/puzzleboxes<gh_stars>0
#!/usr/bin/env python
from __future__ import print_function
import cv2
import rospy
from blob_finder import BlobFinder
from puzzleboxes_base import PuzzleBoxesBase
from puzzleboxes_base import TrackedObject
from puzzleboxes_base import ObjectPosition
from puzzleboxes.msg import PuzzleboxesData
from puzzleboxes.msg import RegionData
class PuzzleBoxes(PuzzleBoxesBase):
def __init__(self):
super(PuzzleBoxes,self).__init__()
self.blob_finder = BlobFinder(
threshold=self.param['tracking']['threshold'],
minArea=self.param['tracking']['min_area'],
maxArea=self.param['tracking']['max_area'],
)
self.data_pub = rospy.Publisher('/puzzleboxes_data', PuzzleboxesData, queue_size=10)
def process_frame(self,frame_data):
ros_time_now = frame_data['ros_time_now']
current_time = frame_data['current_time']
elapsed_time = frame_data['elapsed_time']
image = frame_data['image']
diff_image = frame_data['diff_image']
blob_list, blob_image = self.blob_finder.find(diff_image)
#cv2.imshow('image', image)
#cv2.imshow('bg', self.bg_image)
#cv2.imshow('diff', diff_image)
##cv2.imshow('blob', blob_image)
#cv2.waitKey(1)
## Devel
## -----------------------------------------------------------------------------
#rospy.logwarn('len(blob_list) = {}'.format(len(blob_list)))
## -----------------------------------------------------------------------------
tracked_objects = []
for i, blob in enumerate(blob_list):
obj = TrackedObject() # Replace this with simple class
obj.position.x = blob['centroidX']
obj.position.y = blob['centroidY']
obj.size = blob['area']
#rospy.logwarn(' {}, {}'.format(i, blob['area']))
tracked_objects.append(obj)
self.process_regions(ros_time_now, elapsed_time, tracked_objects)
#bgr_image = cv2.cvtColor(image,cv2.COLOR_GRAY2BGR)
if self.visualizer_on:
visualizer_data = {
'elapsed_time' : elapsed_time,
'bgr_image' : frame_data['bgr_image'],
'trial_scheduler' : self.trial_scheduler,
'tracking_region_list' : self.tracking_region_list,
}
if self.single_threaded:
self.region_visualizer.update(visualizer_data)
else:
self.region_visualizer_queue.put(visualizer_data)
def process_regions(self, ros_time_now, elapsed_time, tracked_objects):
led_enabled = self.trial_scheduler.led_enabled
msg = PuzzleboxesData()
msg.header.stamp = ros_time_now
msg.elapsed_time = elapsed_time
msg.region_data_list = []
msg.led_enabled = led_enabled
msg.queue_overflow = self.queue_overflow
msg.queue_size = self.image_queue.qsize()
for tracking_region in self.tracking_region_list:
region_data = tracking_region.update(elapsed_time, tracked_objects, led_enabled)
msg.region_data_list.append(region_data)
self.data_pub.publish(msg)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
node = PuzzleBoxes()
node.run()
| StarcoderdataPython |
9628206 | from setuptools import setup
setup(
name='vectormatrixlib',
version='0.1.0',
description='Linear Algebra Matrix Tool',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/andrewking1597/vector-matrix',
packages=['vectormatrixlib']
)
| StarcoderdataPython |
3250473 | from pylama.main import check_path, parse_options
def lint():
options = parse_options(["../"])
errors = check_path(options, rootdir=".")
if errors:
raise BaseException(*errors)
| StarcoderdataPython |
6646044 | <gh_stars>10-100
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function, unicode_literals
import optparse
import uuid
from proton import Message
from proton.handlers import MessagingHandler
from proton.reactor import Container, DynamicNodeProperties
import os
class Timer(object):
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.tick_500ms()
class Client(MessagingHandler):
def __init__(self, url):
super(Client, self).__init__()
self.uuid = str(uuid.uuid4())[0:8]
self.url = url
self.report_address = "multicast.amq-demo-report"
self.stats_address = "multicast.amq-demo-stats"
self.control_address = "amq-demo-control"
self.service_address = "FraudDetection/v1"
self.capacity = 0
self.outstanding = 0
self.samples = []
self.max_samples = 4
self.request_count = 0
self.locations = {}
self.receiver = None
self.service_sender = None
def send(self):
if self.outstanding >= self.capacity:
return
to_send = self.capacity - self.outstanding
if self.service_sender.credit < to_send:
to_send = self.service_sender.credit
for i in range(to_send):
msg = Message(reply_to = self.reply_to, correlation_id = 0, body = "Client Request")
self.service_sender.send(msg)
self.outstanding += 1
self.request_count += 1
def count_received(self, location):
if location not in self.locations:
self.locations[location] = []
for i in range(self.max_samples):
self.locations[location].append(0)
self.locations[location][0] += 1
def send_stats_update(self):
stat_map = {}
for loc,samples in self.locations.items():
stat_map[loc] = sum(samples) / (len(samples) * 0.5)
self.locations[loc].pop()
self.locations[loc].insert(0, 0)
if sum(self.locations[loc]) == 0:
self.locations.pop(loc)
if self.stats_sender.credit > 0:
msg = Message(properties={'api':'amq-demo.server-stats.v1'}, body=stat_map)
dlv = self.stats_sender.send(msg)
dlv.settle()
def tick_500ms(self):
self.timer = self.reactor.schedule(0.5, Timer(self))
self.samples.append(self.request_count)
self.request_count = 0
if len(self.samples) > self.max_samples:
self.samples.pop(0)
rate = sum(self.samples) / (len(self.samples) * 0.5) # average rate/sec over the sample span
report = {u'capacity': self.capacity,
u'outstanding': self.outstanding,
u'rate': int(rate)}
if self.report_sender.credit > 0:
msg = Message(properties={'api': 'amq-demo.client-report.v1'}, body=report)
dlv = self.report_sender.send(msg)
dlv.settle()
self.send_stats_update()
def on_start(self, event):
self.container = event.container
self.reactor = event.reactor
self.conn = event.container.connect(self.url)
def on_connection_opened(self, event):
if self.receiver == None:
self.receiver = event.container.create_receiver(self.conn, None, dynamic=True)
self.control_receiver = event.container.create_receiver(self.conn, self.control_address)
self.outstanding = 0
def on_link_opened(self, event):
if event.receiver == self.receiver:
self.reply_to = event.receiver.remote_source.address
if self.service_sender == None:
self.service_sender = event.container.create_sender(self.conn, self.service_address)
self.report_sender = event.container.create_sender(self.conn, self.report_address)
self.stats_sender = event.container.create_sender(self.conn, self.stats_address)
self.timer = self.reactor.schedule(0.5, Timer(self))
def on_sendable(self, event):
self.send()
def on_message(self, event):
try:
if event.receiver == self.control_receiver:
props = event.message.properties
opcode = props.get('opcode')
value = int(props.get('value', 0))
if opcode == 'INC_CAPACITY':
self.capacity += value
if opcode == 'DEC_CAPACITY':
self.capacity -= value
if self.capacity < 0:
self.capacity = 0
elif event.receiver == self.receiver:
ap = event.message.properties
if 'location' in ap:
self.count_received(ap['location'])
except:
pass
def on_accepted(self, event):
if event.sender == self.service_sender:
self.outstanding -= 1
self.send()
def on_rejected(self, event):
if event.sender == self.service_sender:
self.outstanding -= 1
self.send()
def on_released(self, event):
if event.sender == self.service_sender:
self.outstanding -= 1
self.send()
try:
##
## Try to get the message bus hostname from the openshift environment
## Fall back to 127.0.0.1 (loopback)
##
host = os.getenv("MESSAGING_SERVICE_HOST", "127.0.0.1")
container = Container(Client(host))
container.container_id = os.getenv("HOSTNAME", "client")
container.run()
except KeyboardInterrupt: pass
| StarcoderdataPython |
6496228 | <filename>tests/context.py<gh_stars>0
"""Shared context information for all tests."""
import sys
import os
from typing import ClassVar, Optional
import unittest
import keyper
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
# pylint: disable=wrong-import-position
import libtvdb
# pylint: enable=wrong-import-position
class BaseTVDBTest(unittest.TestCase):
"""Base class for TVDB test cases."""
_client: ClassVar[Optional[libtvdb.TVDBClient]] = None
@classmethod
def setUpClass(cls):
"""Setup the test class."""
api_key = BaseTVDBTest._read_secret("libtvdb_api_key")
user_key = BaseTVDBTest._read_secret("libtvdb_user_key")
user_name = BaseTVDBTest._read_secret("libtvdb_user_name")
if api_key is None:
raise Exception("Failed to get API Key")
if user_key is None:
raise Exception("Failed to get user Key")
if user_name is None:
raise Exception("Failed to get user name")
BaseTVDBTest._client = libtvdb.TVDBClient(
api_key=api_key, user_key=user_key, user_name=user_name
)
@classmethod
def _read_secret(cls, secret_name):
keychain_value = keyper.get_password(label=secret_name.lower())
if keychain_value is not None:
return keychain_value
return os.environ.get(secret_name.upper())
# pylint: disable=no-self-use
def client(self) -> libtvdb.TVDBClient:
"""A class reference to the client to clean up the tests."""
if BaseTVDBTest._client is None:
raise Exception("Client was not set")
return BaseTVDBTest._client
# pylint: enable=no-self-use
| StarcoderdataPython |
6590708 | '''OpenGL extension ARB.shading_language_100
This module customises the behaviour of the
OpenGL.raw.GL.ARB.shading_language_100 to provide a more
Python-friendly API
Overview (from the spec)
This extension string indicates that the OpenGL Shading Language is
supported. The Shading Language is defined by a separate specification
document which can be downloaded from
http://www.opengl.org/documentation/oglsl.html
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/shading_language_100.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.shading_language_100 import *
from OpenGL.raw.GL.ARB.shading_language_100 import _EXTENSION_NAME
def glInitShadingLanguage100ARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | StarcoderdataPython |
8083754 | # Code generated by `typeddictgen`. DO NOT EDIT.
"""V1NodeStatusDict generated type."""
from typing import TypedDict, Dict, List
from kubernetes_typed.client import (
V1AttachedVolumeDict,
V1ContainerImageDict,
V1NodeAddressDict,
V1NodeConditionDict,
V1NodeConfigStatusDict,
V1NodeDaemonEndpointsDict,
V1NodeSystemInfoDict,
)
V1NodeStatusDict = TypedDict(
"V1NodeStatusDict",
{
"addresses": List[V1NodeAddressDict],
"allocatable": Dict[str, str],
"capacity": Dict[str, str],
"conditions": List[V1NodeConditionDict],
"config": V1NodeConfigStatusDict,
"daemonEndpoints": V1NodeDaemonEndpointsDict,
"images": List[V1ContainerImageDict],
"nodeInfo": V1NodeSystemInfoDict,
"phase": str,
"volumesAttached": List[V1AttachedVolumeDict],
"volumesInUse": List[str],
},
total=False,
)
| StarcoderdataPython |
6537388 | <reponame>mcauser/deshipu-micropython-is31fl3731<filename>is31fl3731.py
import math
import time
_MODE_REGISTER = const(0x00)
_FRAME_REGISTER = const(0x01)
_AUTOPLAY1_REGISTER = const(0x02)
_AUTOPLAY2_REGISTER = const(0x03)
_BLINK_REGISTER = const(0x05)
_AUDIOSYNC_REGISTER = const(0x06)
_BREATH1_REGISTER = const(0x08)
_BREATH2_REGISTER = const(0x09)
_SHUTDOWN_REGISTER = const(0x0a)
_GAIN_REGISTER = const(0x0b)
_ADC_REGISTER = const(0x0c)
_CONFIG_BANK = const(0x0b)
_BANK_ADDRESS = const(0xfd)
_PICTURE_MODE = const(0x00)
_AUTOPLAY_MODE = const(0x08)
_AUDIOPLAY_MODE = const(0x18)
_ENABLE_OFFSET = const(0x00)
_BLINK_OFFSET = const(0x12)
_COLOR_OFFSET = const(0x24)
class Matrix:
"""
Driver for the IS31FL3731 charlieplexed 16x9 LED matrix.
>>> import is31fl3731
>>> from machine import I2C, Pin
>>> i2c = I2C(Pin(5), Pin(4))
>>> display = is31fl3731.Matrix(i2c)
>>> display.fill(127)
"""
width = 16
height = 9
def __init__(self, i2c, address=0x74):
self.i2c = i2c
self.address = address
self.reset()
self.init()
def _bank(self, bank=None):
if bank is None:
return self.i2c.readfrom_mem(self.address, _BANK_ADDRESS, 1)[0]
self.i2c.writeto_mem(self.address, _BANK_ADDRESS, bytearray([bank]))
def _register(self, bank, register, value=None):
self._bank(bank)
if value is None:
return self.i2c.readfrom_mem(self.address, register, 1)[0]
self.i2c.writeto_mem(self.address, register, bytearray([value]))
def _mode(self, mode=None):
return self._register(_CONFIG_BANK, _MODE_REGISTER, mode)
def init(self):
"""Initialize the display."""
self._mode(_PICTURE_MODE)
self.frame(0)
for frame in range(8):
self.fill(0, False, frame=frame)
for col in range(18):
self._register(frame, _ENABLE_OFFSET + col, 0xff)
self.audio_sync(False)
def reset(self):
self.sleep(True)
time.sleep_us(10)
self.sleep(False)
def sleep(self, value):
"""Enables, disables or gets the sleep mode."""
return self._register(_CONFIG_BANK, _SHUTDOWN_REGISTER, not value)
def autoplay(self, delay=0, loops=0, frames=0):
"""
Enables or disables autoplay.
If ``delay`` is 0, autoplay is disabled. Otherwise the display will
switch between ``frames`` frames every ``delay`` milliseconds, and
repeat the cycle ``loops`` times. If ``loops`` is 0, it will repeat
indefinitely.
"""
if delay == 0:
self._mode(_PICTURE_MODE)
return
delay //= 11
if not 0 <= loops <= 7:
raise ValueError("Loops out of range")
if not 0 <= frames <= 7:
raise ValueError("Frames out of range")
if not 1 <= delay <= 64:
raise ValueError("Delay out of range")
self._register(_CONFIG_BANK, _AUTOPLAY1_REGISTER, loops << 4 | frames)
self._register(_CONFIG_BANK, _AUTOPLAY2_REGISTER, delay % 64)
self._mode(_AUTOPLAY_MODE | self._frame)
def fade(self, fade_in=None, fade_out=None, pause=0):
"""
Disables or enables and configures fading.
If called without parameters, disables fading. If ``fade_in`` and/or
``fade_out`` are specified, it will take that many milliseconds to
change between frames, with ``pause`` milliseconds of dark between.
"""
if fade_in is None and fade_out is None:
self._register(_CONFIG_BANK, _BREATH2_REGISTER, 0)
elif fade_in is None:
fade_in = fade_out
elif fade_out is None:
fade_out = fade_in
fade_in = int(math.log(fade_in / 26, 2))
fade_out = int(math.log(fade_out / 26, 2))
pause = int(math.log(pause / 26, 2))
if not 0 <= fade_in <= 7:
raise ValueError("Fade in out of range")
if not 0 <= fade_out <= 7:
raise ValueError("Fade out out of range")
if not 0 <= pause <= 7:
raise ValueError("Pause out of range")
self._register(_CONFIG_BANK, _BREATH1_REGISTER, fade_out << 4 | fade_in)
self._register(_CONFIG_BANK, _BREATH2_REGISTER, 1 << 4 | pause)
def frame(self, frame=None, show=True):
"""
Change or get active frame.
If ``frame`` is not specified, returns the active frame, otherwise sets
it to the value of ``frame``. If ``show`` is ``True``, also shows that
frame.
"""
if frame is None:
return self._frame
if not 0 <= frame <= 8:
raise ValueError("Frame out of range")
self._frame = frame
if show:
self._register(_CONFIG_BANK, _FRAME_REGISTER, frame);
def audio_sync(self, value=None):
"""Enable, disable or get sync of brightness with audio input."""
return self._register(_CONFIG_BANK, _AUDIOSYNC_REGISTER, value)
def audio_play(self, sample_rate, audio_gain=0,
agc_enable=False, agc_fast=False):
"""
Enable or disable frame display according to the audio input.
The ``sample_rate`` specifies sample rate in microseconds. If it is 0,
disable the audio play. The ``audio_gain`` specifies amplification
between 0dB and 21dB.
"""
if sample_rate == 0:
self._mode(_PICTURE_MODE)
return
sample_rate //= 46
if not 1 <= sample_rate <= 256:
raise ValueError("Sample rate out of range")
self._register(_CONFIG_BANK, _ADC_REGISTER, sample_rate % 256)
audio_gain //= 3
if not 0 <= audio_gain <= 7:
raise ValueError("Audio gain out of range")
self._register(_CONFIG_BANK, _GAIN_REGISTER,
bool(agc_enable) << 3 | bool(agc_fast) << 4 | audio_gain)
self._mode(_AUDIOPLAY_MODE)
def blink(self, rate=None):
"""Get or set blink rate up to 1890ms in steps of 270ms."""
if rate is None:
return (self._register(_CONFIG_BANK, _BLINK_REGISTER) & 0x07) * 270
elif rate == 0:
self._register(_CONFIG_BANK, _BLINK_REGISTER, 0x00)
return
rate //= 270
self._register(_CONFIG_BANK, _BLINK_REGISTER, rate & 0x07 | 0x08)
def fill(self, color=None, blink=None, frame=None):
"""Fill the display with specified color and/or blink."""
if frame is None:
frame = self._frame
self._bank(frame)
if color is not None:
if not 0 <= color <= 255:
raise ValueError("Color out of range")
data = bytearray([color] * 24)
for row in range(6):
self.i2c.writeto_mem(self.address,
_COLOR_OFFSET + row * 24, data)
if blink is not None:
data = bool(blink) * 0xff
for col in range(18):
self._register(frame, _BLINK_OFFSET + col, data)
def _pixel_addr(self, x, y):
return x + y * 16
def pixel(self, x, y, color=None, blink=None, frame=None):
"""
Read or write the specified pixel.
If ``color`` is not specified, returns the current value of the pixel,
otherwise sets it to the value of ``color``. If ``frame`` is not
specified, affects the currently active frame. If ``blink`` is
specified, it enables or disables blinking for that pixel.
"""
if not 0 <= x <= self.width:
return
if not 0 <= y <= self.height:
return
pixel = self._pixel_addr(x, y)
if color is None and blink is None:
return self._register(self._frame, pixel)
if frame is None:
frame = self._frame
if color is not None:
if not 0 <= color <= 255:
raise ValueError("Color out of range")
self._register(frame, _COLOR_OFFSET + pixel, color)
if blink is not None:
addr, bit = divmod(pixel, 8)
bits = self._register(frame, _BLINK_OFFSET + addr)
if blink:
bits |= 1 << bit
else:
bits &= ~(1 << bit)
self._register(frame, _BLINK_OFFSET + addr, bits)
class CharlieWing(Matrix):
"""
Driver for the 15x7 CharlieWing Adafruit FeatherWing.
>>> import is31fl3731
>>> from machine import I2C, Pin
>>> i2c = I2C(Pin(5), Pin(4))
>>> display = is31fl3731.CharlieWing(i2c)
>>> display.fill(127)
"""
width = 15
height = 7
def _pixel_addr(self, x, y):
if x > 7:
x = 15 - x
y += 8
else:
y = 7 - y
return x * 16 + y
| StarcoderdataPython |
8067558 | from asmdot import * # pylint: disable=W0614
@handle_command_line()
class HaskellEmitter(Emitter):
is_first_statement: bool = False
@property
def language(self):
return 'haskell'
@property
def filename(self):
return f'src/Asm/Internal/{self.arch.capitalize()}.hs'
@property
def test_filename(self):
return f'test/Asm/{self.arch.capitalize()}Spec.hs'
def __init__(self, args: Namespace, arch: str) -> None:
super().__init__(args, arch)
self.indent = Indent(' ')
def get_type_name(self, ty: IrType) -> str:
return replace_pattern({
r'bool': r'Bool',
r'uint(\d+)': r'Word\1',
r'int(\d+)': r'Int\1',
r'Reg(\d*)': r'Register\1'
}, ty.id)
def get_operator(self, op: Operator) -> str:
dic = {
OP_BITWISE_AND: '.&.',
OP_BITWISE_OR : '.|.',
OP_BITWISE_XOR: '`xor`',
OP_SHL: '`shiftL`',
OP_SHR: '`shiftR`'
}
if op in dic:
return dic[op]
else:
return op.op
def get_function_name(self, function: Function) -> str:
if function.fullname in ('div'):
return function.fullname + '_'
else:
return function.fullname
def write_header(self):
self.write('module Asm.Internal.', self.arch.capitalize(), ' where\n\n')
self.indent += 1
self.writei('import Control.Exception (assert)\n')
self.writei('import Data.Bits\n')
self.writei('import Data.ByteString.Builder\n')
self.writei('import Data.Int\n')
self.writei('import Data.Semigroup (Semigroup((<>)))\n')
self.writei('import Data.Word\n\n')
def write_footer(self):
self.indent -= 1
def write_expr(self, expr: Expression):
if isinstance(expr, Binary):
self.write('(', expr.l, ' ', expr.op, ' ', expr.r, ')')
elif isinstance(expr, Unary):
self.write(expr.op, expr.v)
elif isinstance(expr, Ternary):
self.write('(if ', expr.condition, ' then ', expr.consequence, ' else ', expr.alternative, ')')
elif isinstance(expr, Var):
self.write(expr.name)
elif isinstance(expr, Call):
self.write(expr.builtin, ' ', join_any(' ', expr.args))
elif isinstance(expr, Literal):
self.write(expr.value)
else:
raise UnsupportedExpression(expr)
def write_stmt(self, stmt: Statement):
deindent = True
if self.is_first_statement:
self.is_first_statement = False
deindent = False
else:
self.writelinei('<>')
self.indent += 1
if isinstance(stmt, Assign):
self.writelinei(stmt.variable, ' = ', stmt.value)
elif isinstance(stmt, Conditional):
self.writelinei('if ', stmt.condition, ' then')
with self.indent.further():
self.is_first_statement = True
self.write_stmt(stmt.consequence)
self.is_first_statement = False
self.writelinei('else')
with self.indent.further():
self.is_first_statement = True
if stmt.alternative:
self.write_stmt(stmt.alternative)
else:
self.writelinei('mempty')
self.is_first_statement = False
elif isinstance(stmt, Block):
self.is_first_statement = True
for s in stmt.statements:
self.write_stmt(s)
self.is_first_statement = False
elif isinstance(stmt, Set):
typ = stmt.type.under
endian = 'BE ' if self.bigendian else 'LE '
if typ is TYPE_I8: self.writei('int8 ')
elif typ is TYPE_U8: self.writei('word8 ')
elif typ.id.startswith('u'): self.writei('word', typ.size * 8, endian)
else: self.writei('int', typ.size * 8, endian)
self.writeline(stmt.value)
elif isinstance(stmt, Define):
self.writelinei('let ', stmt.name, ' = ', stmt.value, ' in')
else:
raise UnsupportedStatement(stmt)
if deindent:
self.indent -= 1
def write_function(self, fun: Function):
self.is_first_statement = True
self.writei(fun.name, ' :: ')
for _, typ, _ in fun.params:
self.write(f'{typ} -> ')
self.write('Builder\n')
self.writei(fun.name, ' ', ' '.join([ name for name, _, _ in fun.params ]), ' =\n')
self.indent += 1
for name, typ, _ in fun.params:
# Deconstruct distinct types.
if typ.underlying is not None:
self.writelinei(f'let {name} = fromIntegral {name} in')
else:
self.writelinei(f'let {name} = fromIntegral {name} in')
for condition in fun.conditions:
self.writei('assert ', condition, '\n')
for stmt in fun.body:
self.write_stmt(stmt)
self.write('\n\n')
self.indent -= 1
def write_decl(self, decl: Declaration):
if isinstance(decl, Enumeration):
self.writei('-- | ', decl.descr, '\n')
self.writei('data ', decl.type, ' =\n')
self.indent += 1
prefix = ' '
for _, _, descr, fullname in decl.members + decl.additional_members:
self.writei(prefix, fullname, ' -- ^ ', descr, '\n')
if prefix == ' ':
prefix = '| '
self.writei(' deriving (Eq, Show)\n\n')
self.indent -= 1
self.writei('instance Enum ', decl.type, ' where\n')
for _, value, _, fullname in decl.members + decl.additional_members:
self.writei(' fromEnum ', fullname, ' = ', value, '\n')
self.write('\n')
for _, value, _, fullname in decl.members + decl.additional_members:
self.writei(' toEnum ', value, ' = ', fullname, '\n')
self.write('\n\n')
elif isinstance(decl, DistinctType):
self.writei('-- | ', decl.descr, '\n')
self.writei('newtype ', decl.type, ' = ', decl.type, ' ', decl.type.underlying, '\n\n')
if decl.constants:
self.writei(', '.join([ name for name, _ in decl.constants ]), ' :: ', decl.type, '\n')
for name, value in decl.constants:
self.writei(name, ' = ', decl.type, ' ', value, '\n')
self.write('\n\n')
else:
raise UnsupportedDeclaration(decl)
def write_test_header(self):
self.write(f'import Asm.{self.arch.capitalize()}\nimport Test.Hspec\n\n')
self.write(f'{self.arch}Spec = do\n')
self.indent += 1
def write_test_footer(self):
self.indent -= 1
def write_test(self, test: TestCase):
self.writei('it "', test.name, '" $\n')
self.indent += 1
self.writelinei('pending')
self.writeline()
self.indent -= 1
| StarcoderdataPython |
4929800 | <reponame>jimgreen/Viscid<filename>viscid/amr_field.py
"""For fields that consist of a list of fields + an AMRSkeleton
Note:
An AMRField is NOT a subclass of Field, but it is a giant wrapper
around a lot of Field functionality.
"""
from __future__ import print_function
import numpy as np
import viscid
# from viscid.compat import string_types
from viscid.field import Field
try:
from viscid.calculator import cycalc
_HAS_CYCALC = True
except ImportError:
# in case cycalc isn't built
_HAS_CYCALC = False
__all__ = ["is_list_of_fields"]
def is_list_of_fields(lst):
"""is a sequence a sequence of Field objects?"""
for item in lst:
if not isinstance(item, Field):
return False
return True
class _FieldListCallableAttrWrapper(object):
objs = None
attrname = None
post_func = None
def __init__(self, objs, attrname, post_func=None):
# print(">>> runtime wrapping:", attrname)
for o in objs:
if not hasattr(o, attrname):
raise AttributeError("{0} has no attribute {1}"
"".format(o, attrname))
self.objs = objs
self.attrname = attrname
self.post_func = post_func
def __call__(self, *args, **kwargs):
lst = [getattr(o, self.attrname)(*args, **kwargs) for o in self.objs]
if self.post_func:
return self.post_func(lst)
else:
return lst
class AMRField(object):
"""Field-like
Contains an AMRSkeleton and a list of Fields. This mimiks a Field,
but it is NOT a subclass of Field. Many methods of Field are
wrapped and return a new AMRField.
If an attribute of Field is not explicitly wrapped, this class will
try to runtime-wrap that method and return a new AMRField or a list
containing the result. This will not work for special methods since
python will not send those through __getattr__ or __getattribute__.
"""
_TYPE = "amr"
skeleton = None
patches = None
nr_patches = None
def __init__(self, fields, skeleton):
if not is_list_of_fields(fields):
raise TypeError("AMRField can only contain Fields:", fields)
self.skeleton = skeleton
self.patches = fields
self.nr_patches = len(fields)
@property
def xl(self):
return np.min(self.skeleton.xl, axis=0)
@property
def xh(self):
return np.max(self.skeleton.xh, axis=0)
def get_slice_extent(self, selection):
extent = self.patches[0]._src_crds.get_slice_extent(selection)
for i in range(3):
if np.isnan(extent[0, i]):
extent[0, i] = self.xl[i]
if np.isnan(extent[1, i]):
extent[1, i] = self.xh[i]
return extent
###########
## slicing
def _prepare_amr_slice(self, selection):
""" return list of patches that contain selection """
# FIXME: it's not good to reach in to src_field[0]'s private methods
# like this, but it's also not good to implement these things twice
# print("??", len(self.patches))
if len(self.patches) == 0:
raise ValueError("AMR field must contain patches to be slicable")
selection, _ = self.patches[0]._prepare_slice(selection)
extent = self.patches[0]._src_crds.get_slice_extent(selection)
inds = []
# these are patches that look like they contain selection
# but might not due to finite precision errors when
# calculating xh
maybe = []
for i, fld in enumerate(self.patches):
# - if xl - atol > the extent of the slice in any direction, then
# there's no overlap
# - if xh <= the lower corner of the slice in any direction, then
# there's no overlap
# the atol and equals are done to match cases where extent overlaps
# the lower corner, but not the upper corner
# logic goes this way cause extent has NaNs in
# dimensions that aren't specified in selection... super-kludge
# also, temporarily disable warnings on NaNs in numpy
invalid_err_level = np.geterr()['invalid']
np.seterr(invalid='ignore')
atol = 100 * np.finfo(fld.crds.xl_nc.dtype).eps
if (not np.any(np.logical_or(fld.crds.xl_nc - atol > extent[1],
fld.crds.xh_nc <= extent[0]))):
if np.any(np.isclose(fld.crds.xh_nc, extent[0], atol=atol)):
maybe.append(i)
else:
inds.append(i)
np.seterr(invalid=invalid_err_level)
# if we found some maybes, but no real hits, then use the maybes
if maybe and not inds:
inds = maybe
if len(inds) == 0:
viscid.logger.error("selection {0} not in any patch @ time {1}"
"".format(selection, self.patches[0].time))
if self.skeleton:
s = (" skeleton: xl= {0} xh = {1}"
"".format(self.skeleton.global_xl,
self.skeleton.global_xh))
viscid.logger.error(s)
inds = None
flds = None
elif len(inds) == 1:
inds = inds[0]
flds = self.patches[inds]
else:
flds = [self.patches[i] for i in inds]
return flds, inds
def _finalize_amr_slice(self, fld_lst): # pylint: disable=no-self-use
skeleton = None # FIXME
for fld in fld_lst:
if isinstance(fld, (int, float, np.number)):
m = ("Trying to make an AMRField where 1+ patches "
"is just a number... You probably slice_reduced "
"a field down to a scalar value")
viscid.logger.error(m)
return AMRField(fld_lst, skeleton)
def patch_indices(self, selection):
"""get the indices of the patches that overlap selection
Args:
selection (slice, str): anything that can slice a field
Returns:
list of indices
"""
_, inds = self._prepare_amr_slice(selection)
return inds
def slice(self, selection):
fld_lst, _ = self._prepare_amr_slice(selection)
if not isinstance(fld_lst, list):
return fld_lst.slice(selection)
fld_lst = [fld.slice(selection) for fld in fld_lst]
return self._finalize_amr_slice(fld_lst)
def slice_reduce(self, selection):
fld_lst, _ = self._prepare_amr_slice(selection)
if not isinstance(fld_lst, list):
return fld_lst.slice_reduce(selection)
fld_lst = [fld.slice_reduce(selection) for fld in fld_lst]
return self._finalize_amr_slice(fld_lst)
def slice_and_keep(self, selection):
fld_lst, _ = self._prepare_amr_slice(selection)
if not isinstance(fld_lst, list):
return fld_lst.slice_and_keep(selection)
fld_lst = [fld.slice_and_keep(selection) for fld in fld_lst]
return self._finalize_amr_slice(fld_lst)
def interpolated_slice(self, selection):
fld_lst, _ = self._prepare_amr_slice(selection)
if not isinstance(fld_lst, list):
raise RuntimeError("can't interpolate to that slice?")
ret_lst = [fld.interpolated_slice(selection) for fld in fld_lst]
return self._finalize_amr_slice(ret_lst)
###################
## special methods
def __getitem__(self, item):
return self.slice(item)
def __setitem__(self, key, value):
raise NotImplementedError()
def __delitem__(self, item):
raise NotImplementedError()
def __enter__(self):
return self
def __exit__(self, exc_type, value, traceback):
"""clear all caches"""
for blk in self.patches:
blk.clear_cache()
return None
def wrap_field_method(self, attrname, *args, **kwargs):
"""Wrap methods whose args are Fields and return a Field"""
# make sure all args have same number of patches as self
is_field = [None] * len(args)
for i, arg in enumerate(args):
try:
if arg.nr_patches != self.nr_patches and arg.nr_patches != 1:
raise ValueError("AMR fields in math operations must "
"have the same number of patches")
is_field[i] = True
except AttributeError:
is_field[i] = False
lst = [None] * self.nr_patches
other = [None] * len(args)
# FIXME: There must be a better way
for i, patch in enumerate(self.patches):
for j, arg in enumerate(args):
if is_field[j]:
try:
other[j] = arg.patches[i]
except IndexError:
other[j] = arg.patches[0]
else:
other[j] = arg
lst[i] = getattr(patch, attrname)(*other, **kwargs)
if np.asarray(lst[0]).size == 1:
# operation reduced to scalar
arr = np.array(lst)
return getattr(arr, attrname)(**kwargs)
else:
return AMRField(lst, self.skeleton)
# TODO: as of numpy 1.10, this will be called on ufuncs... this
# will help some of the FIXMEs in __array__
# def __numpy_ufunc__(self, ufunc, method, i, inputs, **kwargs):
# pass
def __array__(self, *args, **kwargs):
# FIXME: This is heinously inefficient for large arrays because it
# makes an copy of all the arrays... but I don't see
# a way around this because ufuncs expect a single array
# FIXME: adding a dimension to the arrays will break cases like
# np.sum(fld, axis=-1), cause that -1 will now be the patch
# dimension
patches = [patch.__array__(*args, **kwargs) for patch in self.patches]
for i, patch in enumerate(patches):
patches[i] = np.expand_dims(patch, 0)
# the vstack will copy all the arrays, this is what __numpy_ufunc__
# will be able to avoid
arr = np.vstack(patches)
# roll the patch dimension to the last dimension... this is for ufuncs
# that take an axis argument... this way axis will only be confused
# if it's negative, this is the main reason to use __numpy_ufunc__
# in the future
arr = np.rollaxis(arr, 0, len(arr.shape))
return arr
def __array_wrap__(self, arr, context=None): # pylint: disable=unused-argument
# print(">> __array_wrap__", arr.shape, context)
flds = []
for i in range(arr.shape[-1]):
patch_arr = arr[..., i]
fld = self.patches[i].__array_wrap__(patch_arr, context=context)
flds.append(fld)
return AMRField(flds, self.skeleton)
def __add__(self, other):
return self.wrap_field_method("__add__", other)
def __sub__(self, other):
return self.wrap_field_method("__sub__", other)
def __mul__(self, other):
return self.wrap_field_method("__mul__", other)
def __div__(self, other):
return self.wrap_field_method("__div__", other)
def __truediv__(self, other):
return self.wrap_field_method("__truediv__", other)
def __floordiv__(self, other):
return self.wrap_field_method("__floordiv__", other)
def __mod__(self, other):
return self.wrap_field_method("__mod__", other)
def __divmod__(self, other):
return self.wrap_field_method("__divmod__", other)
def __pow__(self, other):
return self.wrap_field_method("__pow__", other)
def __lshift__(self, other):
return self.wrap_field_method("__lshift__", other)
def __rshift__(self, other):
return self.wrap_field_method("__rshift__", other)
def __and__(self, other):
return self.wrap_field_method("__and__", other)
def __xor__(self, other):
return self.wrap_field_method("__xor__", other)
def __or__(self, other):
return self.wrap_field_method("__or__", other)
def __radd__(self, other):
return self.wrap_field_method("__radd__", other)
def __rsub__(self, other):
return self.wrap_field_method("__rsub__", other)
def __rmul__(self, other):
return self.wrap_field_method("__rmul__", other)
def __rdiv__(self, other):
return self.wrap_field_method("__rdiv__", other)
def __rtruediv__(self, other):
return self.wrap_field_method("__rtruediv__", other)
def __rfloordiv__(self, other):
return self.wrap_field_method("__rfloordiv__", other)
def __rmod__(self, other):
return self.wrap_field_method("__rmod__", other)
def __rdivmod__(self, other):
return self.wrap_field_method("__rdivmod__", other)
def __rpow__(self, other):
return self.wrap_field_method("__rpow__", other)
def __iadd__(self, other):
return self.wrap_field_method("__iadd__", other)
def __isub__(self, other):
return self.wrap_field_method("__isub__", other)
def __imul__(self, other):
return self.wrap_field_method("__imul__", other)
def __idiv__(self, other):
return self.wrap_field_method("__idiv__", other)
def __itruediv__(self, other):
return self.wrap_field_method("__itruediv__", other)
def __ifloordiv__(self, other):
return self.wrap_field_method("__ifloordiv__", other)
def __imod__(self, other):
return self.wrap_field_method("__imod__", other)
def __ipow__(self, other):
return self.wrap_field_method("__ipow__", other)
def __neg__(self):
return self.wrap_field_method("__neg__")
def __pos__(self):
return self.wrap_field_method("__pos__")
def __abs__(self):
return self.wrap_field_method("__abs__")
def __invert__(self):
return self.wrap_field_method("__invert__")
def __lt__(self, other):
return self.wrap_field_method("__lt__", other)
def __le__(self, other):
return self.wrap_field_method("__le__", other)
def __eq__(self, other):
return self.wrap_field_method("__eq__", other)
def __ne__(self, other):
return self.wrap_field_method("__ne__", other)
def __gt__(self, other):
return self.wrap_field_method("__gt__", other)
def __ge__(self, other):
return self.wrap_field_method("__ge__", other)
def any(self, **kwargs):
return self.wrap_field_method("any", **kwargs)
def all(self, **kwargs):
return self.wrap_field_method("all", **kwargs)
def argmax(self, **kwargs):
return self.wrap_field_method("argmax", **kwargs)
def argmin(self, **kwargs):
return self.wrap_field_method("argmin", **kwargs)
def argpartition(self, **kwargs):
return self.wrap_field_method("argpartition", **kwargs)
def argsort(self, **kwargs):
return self.wrap_field_method("argsort", **kwargs)
def cumprod(self, **kwargs):
return self.wrap_field_method("cumprod", **kwargs)
def cumsum(self, **kwargs):
return self.wrap_field_method("cumsum", **kwargs)
def max(self, **kwargs):
return self.wrap_field_method("max", **kwargs)
def mean(self, **kwargs):
return self.wrap_field_method("mean", **kwargs)
def min(self, **kwargs):
return self.wrap_field_method("min", **kwargs)
def partition(self, **kwargs):
return self.wrap_field_method("partition", **kwargs)
def prod(self, **kwargs):
return self.wrap_field_method("prod", **kwargs)
def std(self, **kwargs):
return self.wrap_field_method("std", **kwargs)
def sum(self, **kwargs):
return self.wrap_field_method("sum", **kwargs)
def __getattr__(self, name):
# define a callback to finalize
# print("!! getting attr::", name)
if callable(getattr(self.patches[0], name)):
def _wrap(lst):
try:
return AMRField(lst, self.skeleton)
except TypeError:
return lst
return _FieldListCallableAttrWrapper(self.patches, name, _wrap)
else:
# return [getattr(fld, name) for fld in self.patches]
ret0 = getattr(self.patches[0], name)
# Check that all patches have the same value. Maybe this should
# have a debugging flag attached to it since it will take time.
try:
all_same = all(getattr(blk, name) == ret0
for blk in self.patches[1:])
except ValueError:
all_same = all(np.all(getattr(blk, name) == ret0)
for blk in self.patches[1:])
if not all_same:
raise ValueError("different patches of the AMRField have "
"different values for attribute: {0}"
"".format(name))
return ret0
##
## EOF
##
| StarcoderdataPython |
6474856 | <reponame>Worteks/OrangeAssassin
"""
CREATE TABLE `awl` (
`username` varchar(255) NOT NULL DEFAULT '',
`email` varchar(200) NOT NULL DEFAULT '',
`ip` varchar(40) NOT NULL DEFAULT '',
`count` int(11) NOT NULL DEFAULT '0',
`totscore` float NOT NULL DEFAULT '0',
`signedby` varchar(255) NOT NULL DEFAULT '',
PRIMARY KEY (`username`,`email`,`signedby`,`ip`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COMMENT='Used by SpamAssassin for the
auto-whitelist functionality'
"""
from __future__ import absolute_import
from builtins import str
import re
import email
import getpass
import ipaddress
from collections import defaultdict
try:
from sqlalchemy import Column
from sqlalchemy.types import Float
from sqlalchemy.types import String
from sqlalchemy.types import Integer
from sqlalchemy.sql.schema import PrimaryKeyConstraint
from sqlalchemy.ext.declarative.api import declarative_base
Base = declarative_base()
has_sqlalchemy = True
except:
has_sqlalchemy = False
import pymysql
import oa.plugins.base
from oa.regex import Regex
IPV4SUFFIXRE = Regex("(\.0){1,3}$")
AWL_TABLE = (
"CREATE TABLE `awl` IF NOT EXISTS `%s` (",
" `username` varchar(255) NOT NULL, DEFAULT '',",
" `email` varchar(255) NOT NULL DEFAULT '',",
" `ip` varchar(40) NOT NULL DEFAULT '',",
" `count` int(11) NOT NULL DEFAULT '0',",
" `totscore` float NOT NULL DEFAULT '0',",
" `signedby` varchar(255) NOT NULL DEFAULT '',",
" PRIMARY KEY (`username`, `email`, `signedby`, `ip`)",
") ENGINE=MyISAM ",
" DEFAULT CHARSET=latin1",
"COMMENT='Used by SpamAssassin "
"for the auto-whitelist functionality'"
)
if has_sqlalchemy:
class AWL(Base):
"""Schema for the awl table"""
__tablename__ = 'awl'
username = Column("username", String(255))
email = Column("email", String(200))
ip = Column("ip", String(40))
count = Column("count", Integer)
totscore = Column("totscore", Float)
signedby = Column("signedby", String(255))
__table_args__ = (
PrimaryKeyConstraint("username", "email", "signedby", "ip"),)
class AutoWhiteListPlugin(oa.plugins.base.BasePlugin):
"""Reimplementation of the awl spamassassin plugin"""
has_mysql = False
engine = None
eval_rules = ("check_from_in_auto_whitelist",)
options = {
"auto_whitelist_factor": ("float", 0.5),
"auto_whitelist_ipv4_mask_len": ("int", 16),
"auto_whitelist_ipv6_mask_len": ("int", 48),
"user_awl_dsn": ("str", ""),
"user_awl_sql_username": ("str", ""),
"user_awl_sql_password": ("str", ""),
}
@property
def dsn(self):
return self['user_awl_dsn']
@property
def sql_username(self):
return self['user_awl_sql_username']
@property
def sql_password(self):
return self['user_awl_sql_password']
def _get_origin_ip(self, msg):
relays = []
relays.extend(msg.trusted_relays)
relays.extend(msg.untrusted_relays)
relays.reverse()
for relay in relays:
if "ip" in relay:
ip = ipaddress.ip_address(str(relay['ip']))
if not ip.is_private:
return ip
return None
def _get_signed_by(self, msg):
dkim = msg.msg.get('DKIM-Signature', "")
for param in dkim.split(";"):
if param.strip().startswith("d="):
return param.split("=", 1)[1].strip(" ;\r\n")
return ""
def _get_from(self, msg):
try:
return msg.get_addr_header("From")[0]
except IndexError:
return ""
def parsed_metadata(self, msg):
from_addr = self._get_from(msg)
self.set_local(msg, "from", from_addr)
signedby = self._get_signed_by(msg)
self.set_local(msg, "signedby", signedby)
origin_ip = self._get_origin_ip(msg)
self.set_local(msg, "originip", origin_ip)
def ip_to_awl_key(self, ip):
if ip.version == 4:
mask = self["auto_whitelist_ipv4_mask_len"]
else:
mask = self["auto_whitelist_ipv6_mask_len"]
interface = ipaddress.ip_interface("%s/%s" % (ip, mask))
network = interface.network.network_address
return IPV4SUFFIXRE.sub("", str(network))
def get_entry(self, address, ip, signed_by):
self.engine = self.get_engine()
if isinstance(self.engine, defaultdict) and not has_sqlalchemy:
self.has_mysql = True
return self.get_mysql_entry(address, ip, signed_by)
else:
return self.get_sqlalch_entry(address, ip, signed_by)
def get_mysql_entry(self, address, ip, signed_by):
conn = pymysql.connect(host=self.engine["hostname"], port=3306,
user=self.engine["user"],
passwd=self.engine["password"],
db=self.engine["db_name"],
)
cursor = conn.cursor()
cursor.execute("SELECT * FROM awl WHERE username=%s AND email=%s AND "
"signedby=%s AND ip=%s",
(self.ctxt.username, address, signed_by, ip))
try:
result = cursor.fetchall()
if result:
result = result[0]
except pymysql.DatabaseError:
result = cursor.execute(
"SELECT * FROM awl WHERE username=%s AND email=%s AND "
"signedby=%s AND ip=%s",
(self.ctxt.username, address, signed_by, "none"))
if result:
result = cursor.execute(
"UPDATE awl SET ip=%s", (ip))
conn.commit()
if not result:
result = cursor.execute(
"INSERT INTO awl VALUES (%s, %s, %s, %s, %s, %s) ",
(self.ctxt.username, address, ip, 0, 0, signed_by))
conn.commit()
cursor.close()
conn.close()
return result
def get_sqlalch_entry(self, address, ip, signed_by):
session = self.get_session()
result = session.query(AWL).filter(
AWL.username == self.ctxt.username,
AWL.email == address,
AWL.signedby == signed_by,
AWL.ip == ip).first()
if not result:
result = session.query(AWL).filter(
AWL.username == self.ctxt.username,
AWL.email == address,
AWL.signedby == signed_by,
AWL.ip == "none").first()
if result:
result.ip = ip
session.close()
if not result:
result = AWL()
result.count = 0
result.totscore = 0
result.username = self.ctxt.username
result.email = address
result.signedby = signed_by
result.ip = ip
return result
def plugin_tags_sqlalch(self, msg, origin_ip, addr, signed_by, score,
factor, entry):
try:
mean = entry.totscore / entry.count
log_msg = ("auto-whitelist: AWL active, pre-score: %s, "
"mean: %s, IP: %s, address: %s %s")
self.ctxt.log.debug(log_msg,
msg.score,
"%.3f" % mean if mean else "undef",
origin_ip if origin_ip else "undef",
addr,
"signed by %s" % signed_by if signed_by
else "(not signed)")
except ZeroDivisionError:
mean = None
if mean:
delta = mean - score
delta *= factor
msg.plugin_tags["AWL"] = "%2.1f" % delta
msg.plugin_tags["AWLMEAN"] = "%2.1f" % mean
msg.plugin_tags["AWLCOUNT"] = "%2.1f" % entry.count
msg.plugin_tags["AWLPRESCORE"] = "%2.1f" % msg.score
msg.score += delta
entry.count += 1
entry.totscore += score
session = self.get_session()
session.merge(entry)
session.commit()
session.close()
def plugin_tags_mysql(self, msg, origin_ip, addr, signed_by, score, factor):
try:
conn = pymysql.connect(host=self.engine["hostname"], port=3306,
user=self.engine["user"],
passwd=self.engine["password"],
db=self.engine["db_name"],
)
cursor = conn.cursor()
cursor.execute("SELECT totscore, count FROM awl")
entry_totscore, entry_count = cursor.fetchall()[0]
except pymysql.Error:
self.ctxt.log.error("DB connection failed")
return
try:
mean = entry_totscore / entry_count
log_msg = ("auto-whitelist: AWL active, pre-score: %s, "
"mean: %s, IP: %s, address: %s %s")
self.ctxt.log.debug(log_msg,
msg.score,
"%.3f" % mean if mean else "undef",
origin_ip if origin_ip else "undef",
addr,
"signed by %s" % signed_by if signed_by
else "(not signed)")
except ZeroDivisionError:
mean = None
if mean:
delta = mean - score
delta *= factor
msg.plugin_tags["AWL"] = "%2.1f" % delta
msg.plugin_tags["AWLMEAN"] = "%2.1f" % mean
msg.plugin_tags["AWLCOUNT"] = "%2.1f" % entry_count
msg.plugin_tags["AWLPRESCORE"] = "%2.1f" % msg.score
msg.score += delta
entry_count += 1
entry_totscore += score
try:
result = cursor.execute(
"UPDATE awl SET count=%s, totscore=%s",
(entry_count, entry_totscore))
except pymysql.Error:
return False
conn.commit()
cursor.close()
conn.close()
def check_from_in_auto_whitelist(self, msg, target=None):
"""Adjust the message score according to the
auto whitelist rules.
:return:
"""
score = msg.score
factor = self["auto_whitelist_factor"]
origin_ip = self.get_local(msg, "originip")
if origin_ip:
awl_key_ip = self.ip_to_awl_key(origin_ip)
else:
awl_key_ip = "none"
addr = self.get_local(msg, "from")
signed_by = self.get_local(msg, "signedby")
entry = self.get_entry(addr, awl_key_ip, signed_by)
if self.has_mysql:
self.plugin_tags_mysql(msg, origin_ip, addr, signed_by, score,
factor)
else:
self.plugin_tags_sqlalch(msg, origin_ip, addr, signed_by, score,
factor, entry)
self.ctxt.log.debug("auto-whitelist: post auto-whitelist score %.3f",
msg.score)
return False
| StarcoderdataPython |
3283370 | import FWCore.ParameterSet.Config as cms
process = cms.Process("READ")
process.source = cms.Source("PoolSource", fileNames = cms.untracked.vstring("file:overlap.root"))
process.tst = cms.EDAnalyzer("RunLumiEventChecker",
eventSequence = cms.untracked.VEventID(
cms.EventID(1,0,0),
cms.EventID(1,1,0),
cms.EventID(1,1,1),
cms.EventID(1,1,2),
cms.EventID(1,1,3),
cms.EventID(1,1,0),
cms.EventID(1,2,0),
cms.EventID(1,2,4),
cms.EventID(1,2,5),
cms.EventID(1,2,6),
cms.EventID(1,2,0),
cms.EventID(1,3,0),
cms.EventID(1,3,7),
cms.EventID(1,3,8),
cms.EventID(1,3,9),
cms.EventID(1,3,0),
cms.EventID(1,4,0),
cms.EventID(1,4,10),
cms.EventID(1,4,0),
cms.EventID(1,0,0)
),
unorderedEvents = cms.untracked.bool(True))
process.out = cms.EndPath(process.tst)
| StarcoderdataPython |
3564492 | <gh_stars>0
__author__ = 'bneron'
import os
import sys
import glob
import time
from lxml import etree
from abc import ABCMeta, abstractmethod
class Node(metaclass=ABCMeta):
def __init__(self, name, job=None):
self.name = name
self.parent = None
self.children = {}
self._job = job if job is not None else {}
def add_child(self, child):
child.parent = self
self.children[child.name] = child
if child.job:
self.update_job()
return child
@property
def job(self):
return self._job
def __getitem__(self, name):
return self.children[name]
@abstractmethod
def update_job(self):
pass
def to_html(self):
s = '<div data-role="collapsible">\n'
s += """<h1>
<span style="color:blue">{name}</span> pasteur:{pasteur:d} | other:{other:d} | total:{all:d}""".format(name=self.name,
**self.job)
if self.parent:
s += " ({job_part:.2%} of {cat_name} jobs)".format(job_part=self.job['all'] / self.parent.job['all'] \
if self.parent.job['all'] else 0,
cat_name=self.parent.name)
s += "</h1>\n"
for child in sorted(self.children.values()):
s += child.to_html()
s += '</div>\n'
return s
def __gt__(self, other):
return self.name > other.name
class Category(Node):
def update_job(self):
pasteur = sum([c.job['pasteur'] if 'pasteur' in c.job else 0 for c in self.children.values()])
other = sum([c.job['other'] if 'other' in c.job else 0 for c in self.children.values()])
all_ = sum([c.job['all'] if 'all' in c.job else 0 for c in self.children.values()])
self._job = {'pasteur': pasteur,
'other': other,
'all': all_}
self.parent.update_job()
def __gt__(self, other):
if isinstance(other, Interface):
return False
elif isinstance(other, Mobyle):
return True
else:
return super().__gt__(other)
class Interface(Node):
def __init__(self, name, job=None, users=None, package=None, authors=None, references=None, homepage=None):
super().__init__(name, job=job)
self.parent = []
self.package = package
self.authors = authors if authors is not None else []
self.references = references if references is not None else []
self.homepage = homepage
self.users = users
def to_html(self):
s = """<div data-role="collapsible">
<h1>
<a href="http://mobyle.pasteur.fr/cgi-bin/portal.py#forms::{name}" target="mobyle">{name}</a>
jobs: pasteur:{job[pasteur]:d} | other:{job[other]:d} | total:{job[all]:d} ({job_part:.2%} of {cat_name} jobs)
used by {users[all]:d} users ({users[pasteur]:d} pasteuriens)
</h1>
<p>
<ul>""".format(name=self.name,
job=self.job,
users=self.users,
job_part=self.job['all'] / self.parent.job['all'] if self.parent.job['all'] else 0,
cat_name=self.parent.name)
if self.homepage:
s += '<li>homepage: <a href="{homepage}">{homepage}</a></li>\n'.format(homepage=self.homepage)
if self.package:
s += "<li>belongs to package: {package}</li>\n".format(package=self.package)
if self.authors:
s += "<li>authors: {authors}</li>\n".format(authors=self.authors)
if self.references:
s += "<li>references: <ul>"
else:
s += "<li>references:"
for ref in self.references:
s += "<li>references: {}</li>\n".format(ref)
if self.references:
s += "</li>"
else:
s += """</ul>
</li>"""
s += """</ul></p>
</div>\n"""
return s
def update_job(self):
for one_parent in self.parent:
one_parent.update_job()
def __gt__(self, other):
if isinstance(other, (Category, Mobyle)):
return True
else:
return super().__gt__(other)
class Mobyle(Node):
def __init__(self):
super().__init__('Mobyle')
def update_job(self):
pasteur = sum([c.job['pasteur'] if 'pasteur' in c.job else 0 for c in self.children.values()])
other = sum([c.job['other'] if 'other' in c.job else 0 for c in self.children.values()])
all_ = sum([c.job['all'] if 'all' in c.job else 0 for c in self.children.values()])
self._job = {'pasteur': pasteur,
'other': other,
'all': all_}
def add_interface(self, name, authors, references, package, homepage, job, users, categories):
interface = Interface(name,
authors=authors,
references=references,
package=package,
homepage=homepage,
job=job,
users=users
)
# retrieve categories
# if category does not exists yet
# build and add it
for cat in categories:
path = cat.split(':')
node = self
for elt in path:
if elt in node.children:
node = node.children[elt]
else:
node = node.add_child(Category(elt))
# add new interface to the right category
# one interface can be child of categories
node.add_child(interface)
def scan_services(self, repository_path, job_counter, user_counter):
interfaces = glob.glob(os.path.join(repository_path, '*.xml'))
parser = etree.XMLParser(no_network=False)
for interface in interfaces:
print("-------- process {} --------".format(os.path.basename(interface)[:-4]), file=sys.stdout)
doc = etree.parse(interface, parser)
root = doc.getroot()
head_node = root.find('./head')
name = head_node.find('./name').text
categories = [n.text for n in head_node.findall('./category')]
package = head_node.find('package/name')
if package is not None:
package = package.text
homepage = head_node.find('doc/homepagelink')
if homepage is None:
homepage = head_node.find('package/doc/homepagelink')
if homepage is not None:
homepage = homepage.text
authors = head_node.find('doc/authors')
if authors is None:
authors = head_node.find('package/doc/authors')
if authors is not None:
authors = authors.text
references = head_node.findall('doc/reference')
if not references:
references = head_node.findall('package/doc/reference')
references = [n.text for n in references]
job_count = {'pasteur': 0,
'other': 0,
'all': 0}
try:
job_count.update(job_counter[name])
except KeyError:
pass
users = {'pasteur': 0,
'other': 0,
'all': 0}
try:
users.update(user_counter[name])
except KeyError:
pass
self.add_interface(name, authors, references, package, homepage, job_count, users, categories)
def to_html(self, stat_start, stat_stop):
s = """<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" href="http://code.jquery.com/mobile/1.4.5/jquery.mobile-1.4.5.min.css">
<script src="http://code.jquery.com/jquery-1.11.3.min.js"></script>
<script src="http://code.jquery.com/mobile/1.4.5/jquery.mobile-1.4.5.min.js"></script>
</head>
<body>
<div data-role="page" id="pageone">
<div data-role="header">
<h1>Interfaces Mobyle</h1>
</div>
<div data-role="main" class="ui-content">"""
s += super().to_html()
s += """</div>
<div data-role="footer">
<h3> generated the {date} based on statistics from {start} to {stop}</h3>
</div>
</div>
</body>
</html>""".format(date=time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()),
start=start,
stop=stop)
return s
if __name__ == '__main__':
from datetime import datetime
import pymongo
sys.path.insert(0, '/home/bneron/Mobyle/mobyle_statistics')
from mobyle_statistics import jobs_count_per_service, user_count_per_service
client = pymongo.MongoClient('localhost', 27017, w=1, j=True)
db_name = 'mobyle_1'
db = client[db_name]
col = db.logs
start = datetime(2014, 1, 1)
stop = datetime(2015, 1, 1)
jc = jobs_count_per_service(col, start=start, stop=stop, pasteurien=True, foreigner=True)
jc_fo = jobs_count_per_service(col, start=start, stop=stop, pasteurien=False, foreigner=True)
jc_pa = jobs_count_per_service(col, start=start, stop=stop, pasteurien=True, foreigner=False)
job_counter = {}
for p in jc:
job_counter[p['_id']] = {'all': p['count']}
for p in jc_fo:
if p['_id'] in job_counter:
job_counter[p['_id']]['other'] = p['count']
else:
job_counter[p['_id']] = {'other': p['count']}
for p in jc_pa:
if p['_id'] in job_counter:
job_counter[p['_id']]['pasteur'] = p['count']
else:
job_counter[p['_id']] = {'pasteur': p['count']}
uc = user_count_per_service(col, start=start, stop=stop)
uc_fo = user_count_per_service(col, start=start, stop=stop, foreigner=True, pasteurien=False)
uc_pa = user_count_per_service(col, start=start, stop=stop, foreigner=False, pasteurien=True)
user_counter = {}
for s_name, count in uc.items():
user_counter[s_name] = {'all': len(count)}
for s_name, count in uc_fo.items():
if s_name in user_counter:
user_counter[s_name]['other'] = len(count)
else:
user_counter[s_name] = {'other': len(count)}
for s_name, count in uc_pa.items():
if s_name in user_counter:
user_counter[s_name]['pasteur'] = len(count)
else:
user_counter[s_name] = {'pasteur': len(count)}
mobyle = Mobyle()
#repository_path = '/home/bneron/Mobyle/pasteur-programs/trunk/'
repository_path = os.path.abspath('../data/programs')
mobyle.scan_services(repository_path, job_counter, user_counter)
with open('mobyle_statistics.html', 'w') as mob_html:
mob_html.write(mobyle.to_html(start, stop))
| StarcoderdataPython |
5043317 | """
Segmentation Continuation Graph Components Wrapper Script
- Takes a graph of continuation edges as input
- Makes an id mapping that merges the connected continuations using global ids
"""
import synaptor as s
import argparse
parser = argparse.ArgumentParser()
# Inputs & Outputs
parser.add_argument("proc_url")
parser.add_argument("hashmax", type=int)
parser.add_argument("--timing_tag", default=None)
args = parser.parse_args()
args.proc_url = s.io.parse_proc_url(args.proc_url)
print(vars(args))
s.proc.tasks_w_io.seg_graph_cc_task(**vars(args))
| StarcoderdataPython |
9794737 | <filename>apps/deployment/urls.py
# @Time : 2019/2/27 14:42
# @Author : xufqing
from django.urls import path,include
from deployment.views import project, deploy, applog
from rest_framework import routers
router = routers.SimpleRouter()
router.register(r'projects', project.ProjectViewSet, basename="projects")
router.register(r'deploy/records', deploy.DeployRecordViewSet, basename="deploy_record")
urlpatterns = [
path(r'api/', include(router.urls)),
path(r'api/deploy/excu/', deploy.DeployView.as_view(), name='deploy'),
path(r'api/deploy/ver/', deploy.VersionView.as_view(), name='version'),
path(r'api/deploy/applog/', applog.AppLogView.as_view(), name='applog'),
path(r'api/project/copy/', project.ProjectCopy.as_view(), name='project_copy')
] | StarcoderdataPython |
199820 | # -*- coding: utf-8 -*-
"""
Provide authentication using Django Web Framework
:depends: - Django Web Framework
Django authentication depends on the presence of the django framework in the
``PYTHONPATH``, the Django project's ``settings.py`` file being in the
``PYTHONPATH`` and accessible via the ``DJANGO_SETTINGS_MODULE`` environment
variable.
Django auth can be defined like any other eauth module:
.. code-block:: yaml
external_auth:
django:
fred:
- .*
- '@runner'
This will authenticate Fred via Django and allow him to run any execution
module and all runners.
The authorization details can optionally be located inside the Django database.
The relevant entry in the ``models.py`` file would look like this:
.. code-block:: python
class SaltExternalAuthModel(models.Model):
user_fk = models.ForeignKey(User, on_delete=models.CASCADE)
minion_or_fn_matcher = models.CharField(max_length=255)
minion_fn = models.CharField(max_length=255)
The :conf_master:`external_auth` clause in the master config would then look
like this:
.. code-block:: yaml
external_auth:
django:
^model: <fully-qualified reference to model class>
When a user attempts to authenticate via Django, Salt will import the package
indicated via the keyword ``^model``. That model must have the fields
indicated above, though the model DOES NOT have to be named
'SaltExternalAuthModel'.
"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import sys
# Import 3rd-party libs
from salt.ext import six
# pylint: disable=import-error
try:
import django
from django.db import connection # pylint: disable=no-name-in-module
HAS_DJANGO = True
except Exception as exc: # pylint: disable=broad-except
# If Django is installed and is not detected, uncomment
# the following line to display additional information
# log.warning('Could not load Django auth module. Found exception: %s', exc)
HAS_DJANGO = False
# pylint: enable=import-error
DJANGO_AUTH_CLASS = None
log = logging.getLogger(__name__)
__virtualname__ = "django"
def __virtual__():
if HAS_DJANGO:
return __virtualname__
return False
def is_connection_usable():
try:
connection.connection.ping()
except Exception: # pylint: disable=broad-except
return False
else:
return True
def __django_auth_setup():
"""
Prepare the connection to the Django authentication framework
"""
if django.VERSION >= (1, 7):
django.setup()
global DJANGO_AUTH_CLASS
if DJANGO_AUTH_CLASS is not None:
return
# Versions 1.7 and later of Django don't pull models until
# they are needed. When using framework facilities outside the
# web application container we need to run django.setup() to
# get the model definitions cached.
if "^model" in __opts__["external_auth"]["django"]:
django_model_fullname = __opts__["external_auth"]["django"]["^model"]
django_model_name = django_model_fullname.split(".")[-1]
django_module_name = ".".join(django_model_fullname.split(".")[0:-1])
# pylint: disable=possibly-unused-variable
django_auth_module = __import__(
django_module_name, globals(), locals(), "SaltExternalAuthModel"
)
# pylint: enable=possibly-unused-variable
DJANGO_AUTH_CLASS_str = "django_auth_module.{0}".format(django_model_name)
DJANGO_AUTH_CLASS = eval(DJANGO_AUTH_CLASS_str) # pylint: disable=W0123
def auth(username, password):
"""
Simple Django auth
"""
django_auth_path = __opts__["django_auth_path"]
if django_auth_path not in sys.path:
sys.path.append(django_auth_path)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", __opts__["django_auth_settings"])
__django_auth_setup()
if not is_connection_usable():
connection.close()
import django.contrib.auth # pylint: disable=import-error,3rd-party-module-not-gated,no-name-in-module
user = django.contrib.auth.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
log.debug("Django authentication successful")
return True
else:
log.debug(
"Django authentication: the password is valid but the account is disabled."
)
else:
log.debug("Django authentication failed.")
return False
def acl(username):
"""
:param username: Username to filter for
:return: Dictionary that can be slotted into the ``__opts__`` structure for
eauth that designates the user associated ACL
Database records such as:
=========== ==================== =========
username minion_or_fn_matcher minion_fn
=========== ==================== =========
fred test.ping
fred server1 network.interfaces
fred server1 raid.list
fred server2 .*
guru .*
smartadmin server1 .*
=========== ==================== =========
Should result in an eauth config such as:
.. code-block:: yaml
fred:
- test.ping
- server1:
- network.interfaces
- raid.list
- server2:
- .*
guru:
- .*
smartadmin:
- server1:
- .*
"""
__django_auth_setup()
if username is None:
db_records = DJANGO_AUTH_CLASS.objects.all()
else:
db_records = DJANGO_AUTH_CLASS.objects.filter(user_fk__username=username)
auth_dict = {}
for a in db_records:
if a.user_fk.username not in auth_dict:
auth_dict[a.user_fk.username] = []
if not a.minion_or_fn_matcher and a.minion_fn:
auth_dict[a.user_fk.username].append(a.minion_fn)
elif a.minion_or_fn_matcher and not a.minion_fn:
auth_dict[a.user_fk.username].append(a.minion_or_fn_matcher)
else:
found = False
for d in auth_dict[a.user_fk.username]:
if isinstance(d, dict):
if a.minion_or_fn_matcher in six.iterkeys(d):
auth_dict[a.user_fk.username][a.minion_or_fn_matcher].append(
a.minion_fn
)
found = True
if not found:
auth_dict[a.user_fk.username].append(
{a.minion_or_fn_matcher: [a.minion_fn]}
)
log.debug("django auth_dict is %s", auth_dict)
return auth_dict
| StarcoderdataPython |
195223 | # -*- coding: utf-8 -*-
# Copyright (c) 2020-2021 <NAME>.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.conf import settings
from django.urls import path
from . import views
app_name = 'Handleiding'
urlpatterns = [
path('',
views.HandleidingView.as_view(),
name='begin')
]
for pagina in settings.HANDLEIDING_PAGINAS:
conf = path(pagina + '/', views.HandleidingView.as_view(), name=pagina)
urlpatterns.append(conf)
# for
# end of file
| StarcoderdataPython |
255624 | # -*- coding: utf-8 -*-
# @author: zhangping
import json
import datetime as dt
from urllib import request
from urllib import parse
from sqlalchemy import create_engine, Column, String
from sqlalchemy.types import VARCHAR, Date, TIMESTAMP, Integer, Float, DECIMAL
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Table, MetaData
class HttpUtil:
@classmethod
def request_post(cls, url, params, headers={}, raw=False):
if raw:
headers['Content-Type'] = 'application/json'
params = json.dumps(params).encode('utf-8')
elif params is not None:
params = parse.urlencode(params).encode('utf-8')
req = request.Request(url, headers=headers, data=params) # POST
with request.urlopen(req) as result:
data = result.read()
return data
@classmethod
def request_down(cls, url, path, headers={}, params={}):
params = parse.urlencode(params).encode('utf-8')
req = request.Request(url, headers=headers, data=params) # POST
with request.urlopen(req) as f:
data = f.read()
fhandle = open(path, "wb")
fhandle.write(data)
fhandle.close()
class DBUtil:
@classmethod
def get_conn(cls, conn_str, echo=False):
return create_engine(conn_str, echo=echo)
@classmethod
def get_df_type_dict(cls, df):
type_dict = {}
for i, j in zip(df.columns, df.dtypes):
if "object" in str(j):
type_dict.update({i: VARCHAR(20)})
if "float" in str(j):
type_dict.update({i: DECIMAL(20, 5)})
if "date" in str(j):
type_dict.update({i: Date()})
return type_dict
class IndexUtil:
@classmethod
def get(cls, idx, date, value):
return {'idx': idx, 'date': date, 'value': value}
@classmethod
def set_idx(cls, items, idx):
items = items if type(items) == list else [items]
for item in items:
item['idx'] = idx
return items
@classmethod
def get_dict(cls, idx, conn):
sql_table = 'select * from dict_index where id={0}'
record = conn.execute(sql_table.format(int(idx))).fetchone()
return dict(record) if record is not None else None
__pools = {}
@classmethod
def get_table_cls(cls, table_name):
def fun__init(self, idx, date, value):
self.id = None
self.index_id = idx
self.index_date = date
self.index_value = value
self.update_time = dt.datetime.now()
def fun__repr(self):
return "[id:" + str(self.id) + ", index_id:" + str(self.index_id) + ", index_date:" + str(
self.index_date) + ", index_value=" + str(self.index_value) + "]"
def fun_get_item(self):
return {'id': self.id, 'idx': self.index_id, 'date': self.index_date, 'value': self.index_value}
if cls.__pools.get(table_name) is None:
tb = type(table_name, (declarative_base(),),
{'__table__': Table(table_name, MetaData(),
Column('id', Integer, primary_key=True),
Column('index_id', Integer),
Column('index_date', Date),
Column('index_value', Float),
Column('update_time', TIMESTAMP)
),
'__table_args__': ({'autoload': True},),
'__init__': fun__init,
'__repr__': fun__repr,
'get_item': fun_get_item
}
)
tb.metadata = tb.__table__.metadata
cls.__pools[table_name] = tb
return cls.__pools.get(table_name)
@classmethod
def save_items(cls, items, conn, overwrite=False):
items = items if type(items) == list or items is None else [items]
if items is None or len(items) == 0:
return
# for item in items:
# print(item)
table_dict = {}
session = sessionmaker(bind=conn)()
for item in items:
if table_dict.get(item['idx']) is None:
_dict = cls.get_dict(item['idx'], conn)
if _dict is not None and _dict['table_name'] is not None:
table_dict[item['idx']] = _dict['table_name']
cls.__save_item(session, table_dict.get(item['idx']), item, overwrite=overwrite)
session.commit()
session.flush()
# @classmethod
# def save_item(cls, item):
# table = cls.get_dict(item['idx'])['table_name']
# session = sessionmaker(bind=DBUtil.get_conn('idbms'))()
# cls.__save_item(session, table, item)
# session.commit()
# session.flush()
@classmethod
def __save_item(cls, session, table, item, overwrite=False):
tb = cls.get_table_cls(table)
result = session.query(tb).filter_by(index_id=item['idx'], index_date=item['date'].date())
if result.count() == 0:
session.add(tb(item['idx'], item['date'].date(), item['value']))
elif overwrite:
result.update({"index_value": item['value'], "update_time": dt.datetime.now()})
@classmethod
def get_last_date(cls, idx_id, conn):
sql_table = '''select table_name from dict_index where id={0}'''
sql_last_date = '''select max(index_date) index_date from {0} where index_id={1}'''
last_date = None
result_table = conn.execute(sql_table.format(int(idx_id))).fetchone()
if result_table is not None and len(result_table) == 1:
table_name = str(result_table[0]).strip()
last_date_result = conn.execute(sql_last_date.format(table_name, int(idx_id))).fetchone()
if last_date_result is not None and len(last_date_result) == 1:
last_date = last_date_result[0]
return last_date
| StarcoderdataPython |
3415483 | """
@file
@brief Implements a base class which defines a pair of transforms
applied around a predictor to modify the target as well.
"""
from sklearn.base import TransformerMixin, BaseEstimator
class BaseReciprocalTransformer(BaseEstimator, TransformerMixin):
"""
Base for transform which transforms the features
and the targets at the same time. It must also
return another transform which transforms the target
back to what it was.
"""
def __init__(self):
BaseEstimator.__init__(self)
TransformerMixin.__init__(self)
def get_fct_inv(self):
"""
Returns a trained transform which reverse the target
after a predictor.
"""
raise NotImplementedError(
"This must be overwritten.") # pragma: no cover
def transform(self, X, y):
"""
Transforms *X* and *y*.
Returns transformed *X* and *y*.
"""
raise NotImplementedError(
"This must be overwritten.") # pragma: no cover
| StarcoderdataPython |
100083 | <filename>reopening-tiers/scrape.py
"""
Download the status of each county according to California's tier-based reopening framework.
Source: https://covid19.ca.gov/safer-economy/
"""
import pytz
import pathlib
import pandas as pd
from datetime import datetime
# Pathing
THIS_DIR = pathlib.Path(__file__).parent.absolute()
DATA_DIR = THIS_DIR / "data"
def main():
"""
Download the Tableau export as a CSV.
"""
# Download the data
url = "https://covid19.ca.gov/countystatus.json"
df = pd.read_json(url)
# Save it to the raw data folder
df.to_csv(DATA_DIR / "latest.csv", index=False)
# Save it datestamped now too
tz = pytz.timezone("America/Los_Angeles")
today = datetime.now(tz).date()
df.to_csv(DATA_DIR / f"{today}.csv", index=False)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8117192 | """Module to define qp distributions that inherit from scipy distributions
Notes
-----
In the qp distribtuions the last axis in the
input array shapes is reserved for pdf parameters.
This is because qp deals with numerical representations
of distributions, where some of the input parameters consist
of arrays of values for each pdf.
`scipy.stats` assumes that all input parameters scalars for each pdf.
To ensure that `scipy.stats` based distributions behave the same
as `qp` distributions we are going to insure that the all input
variables have shape either (npdf, 1) or (1)
"""
import numpy as np
from qp.test_data import LOC, SCALE, TEST_XVALS
from qp.factory import stats
# pylint: disable=no-member
stats.norm_gen.test_data = dict(norm=dict(gen_func=stats.norm, ctor_data=dict(loc=LOC, scale=SCALE),\
test_xvals=TEST_XVALS, do_samples=True),
norm_shifted=dict(gen_func=stats.norm, ctor_data=dict(loc=LOC, scale=SCALE),\
test_xvals=TEST_XVALS),
norm_multi_d=dict(gen_func=stats.norm,\
ctor_data=dict(loc=np.array([LOC, LOC]),\
scale=np.array([SCALE, SCALE])),\
test_xvals=TEST_XVALS, do_samples=True))
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.