id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
9741590 | <filename>src/genie/libs/parser/iosxe/tests/ShowIpNatTranslations/cli/equal/golden_output_1_expected.py
expected_output = {
"vrf": {
"default": {
"index": {
1: {
"inside_global": "10.1.7.2",
"inside_local": "192.168.1.95",
"outside_global": "---",
"outside_local": "---",
"protocol": "---",
},
2: {
"inside_global": "10.1.7.200",
"inside_local": "192.168.1.89",
"outside_global": "--",
"outside_local": "---",
"protocol": "---",
},
}
},
"number_of_translations": 2,
}
}
| StarcoderdataPython |
11295014 | # code from Learning Card 08 - Rainbow HAT
# import the rainbowhat and signal modules
import rainbowhat
import signal
# this section links a press on button A
# to what to do when it happens
@rainbowhat.touch.A.press()
def touch_a(channel):
rainbowhat.lights.rgb(1, 0, 0)
# this section links a letting go of any button
# to what to do when it happens
@rainbowhat.touch.release()
def release(channel):
rainbowhat.lights.rgb(0, 0, 0)
# waits until a signal is received
signal.pause()
| StarcoderdataPython |
3260783 | import unittest
import qgate
import numpy as np
if hasattr(qgate.simulator, 'cudaruntime') :
class TestMemstore(unittest.TestCase) :
def set_mgpu_preference(self) :
# max chunk size, 2 MB.
max_po2idx_per_chunk = 21
# device memory per memstore
memory_store_size = 5 * (1 << 20)
# device ids.
device_ids = [0] * 8
# initialize
qgate.simulator.cudaruntime.set_preference(device_ids, max_po2idx_per_chunk, memory_store_size)
def term_module(self) :
qgate.simulator.cudaruntime.module_finalize()
def setUp(self) :
# using fp64, 16 MB.
self.n_qregs = 20
self.term_module()
def tearDown(self) :
self.term_module()
qgate.simulator.cudaruntime.reset_preference()
def run_sim(self, circuit) :
sim = qgate.simulator.cuda(isolate_circuits=False)
sim.run(circuit)
return sim
def test_memstore(self) :
qgate.simulator.cudaruntime.module_finalize()
qgate.simulator.cudaruntime.set_preference(device_ids = [ 0 ], max_po2idx_per_chunk = 29, memory_store_size = (1 << 31))
qstates = qgate.simulator.cudaruntime.create_qubit_states(np.float32)
proc = qstates.processor
# internally allocate 4 chunks
proc.initialize_qubit_states(qstates, 28)
# delete internal buffer
qstates.delete()
qstates = qgate.simulator.cudaruntime.create_qubit_states(np.float32)
proc = qstates.processor
# purging cache, and reallocate chunks.
proc.initialize_qubit_states(qstates, 25)
qstates.delete()
qstates = qgate.simulator.cudaruntime.create_qubit_states(np.float32)
proc = qstates.processor
# internally allocate 4 chunks
proc.initialize_qubit_states(qstates, 28)
# delete internal buffer
qstates.delete()
qgate.simulator.cudaruntime.module_finalize()
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6518220 | <filename>program/audio_command.py
#!/usr/bin/env python
from robot_cmd_ros import *
begin()
bip()
wait()
run = True
while run:
a = asr();
if (a!=''):
print a
if ('avanti' in a):
forward();
elif ('dietro' in a):
backward();
elif ('sinistra' in a):
left();
elif ('destra' in a):
right();
elif ('esci' in a):
run = False;
elif (a!=''):
bop()
wait()
end()
| StarcoderdataPython |
5090565 | <reponame>prachetos/goibibo-hackathon2016
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CheckInPhotoDB',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('Userid', models.IntegerField()),
('AWSPhotoUrl', models.CharField(max_length=300)),
('Email', models.CharField(max_length=60)),
('Mobile', models.CharField(max_length=20)),
('DateCreated', models.DateTimeField(default=datetime.datetime(2016, 10, 14, 16, 10, 22, 722109), null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
]
| StarcoderdataPython |
4829881 | <reponame>KyleKing/dash_charts
"""Example Bulma layout.
See documentation on Bulma layouts: https://bulma.io/documentation/layout/tiles/
"""
import dash_html_components as html
import plotly.express as px
from implements import implements
from dash_charts.utils_app import STATIC_URLS, AppBase, AppInterface
from dash_charts.utils_fig import min_graph
from dash_charts.utils_helpers import parse_dash_cli_args
@implements(AppInterface)
class BulmaStylingDemo(AppBase):
"""Demo laying out a 3 column grid with Bulma where.
- the first column has three tiles
- the middle column is half the full screen width
- the tiles will wrap on smaller screens
"""
name = 'Example Bulma Styling Demo'
"""Application name"""
external_stylesheets = [STATIC_URLS['bulmaswatch-flatly']]
"""List of external stylesheets. Default is minimal Dash CSS. Only applies if app argument not provided."""
def initialization(self) -> None:
"""Initialize ids with `self.register_uniq_ids([...])` and other one-time actions."""
super().initialization()
self.register_uniq_ids(['---'])
def create_elements(self) -> None:
"""Initialize the charts, tables, and other Dash elements."""
...
def return_layout(self) -> dict:
"""Return Dash application layout.
Returns:
dict: Dash HTML object
"""
return html.Div(
className='section', children=[
html.Div(
className='tile is-ancestor', children=[
html.Div(
className='tile is-parent is-vertical is-3', children=[
html.Article(
className='tile is-child notification', children=[
html.P(className='title', children='Top Vertical Tile'),
html.P(className='subtitle', children='Notification class for grey background'),
html.P(
className='subtitle',
children='Could also add is-info, is-warning, etc.',
),
],
),
html.Article(
className='tile is-child', children=[
html.P(className='title', children='Vertical...'),
html.P(className='subtitle', children='(Top tile)'),
min_graph(
figure=px.scatter(
px.data.iris(), x='sepal_width', y='sepal_length', height=200,
),
),
],
),
html.Article(
className='tile is-child', children=[
html.P(className='title', children='...tiles'),
html.P(className='subtitle', children='(Bottom tile)'),
min_graph(
figure=px.scatter(
px.data.iris(), x='sepal_width', y='sepal_length', height=200,
),
),
],
),
],
),
min_graph(
className='tile is-child is-6 is-block-desktop',
figure={},
),
html.Article(
className='tile is-child is-3 is-block-desktop', children=[
html.P(className='title', children='A Small Chart'),
min_graph(
figure=px.scatter(
px.data.iris(),
x='sepal_width',
y='sepal_length',
height=350,
),
),
html.P(className='subtitle', children='An Image'),
html.Img(src='https://media.giphy.com/media/JGQe5mxayVF04/giphy.gif'),
],
),
],
),
],
)
def create_callbacks(self) -> None:
"""Create Dash callbacks."""
... # No callbacks necessary for this simple example
instance = BulmaStylingDemo
app = instance()
app.create()
if __name__ == '__main__':
app.run(**parse_dash_cli_args())
else:
FLASK_HANDLE = app.get_server()
| StarcoderdataPython |
8182630 | # Za pomocą funkcji isinstance() oraz issubclass()
# sprawdź wynik dla instancji obiektu Pracownik oraz Menadzer
# dla klas Osoba, Pracownik i Manadzer.
class Osoba:
def __init__(self, imie, nazwisko):
self.imie = imie
self.nazwisko = nazwisko
def przedstaw_sie(self):
return "{} {}".format(self.imie, self.nazwisko)
class Pracownik(Osoba):
def __init__(self, imie, nazwisko, pensja):
Osoba.__init__(self, imie, nazwisko)
# lub
# super().__init__(imie, nazwisko)
self.pensja = pensja
def przedstaw_sie(self):
return "{} {} i zarabiam {}".format(self.imie, self.nazwisko, self.pensja)
class Menadzer(Pracownik):
def przedstaw_sie(self):
return "{} {}, jestem menadżerem i zarabiam {}".format(self.imie, self.nazwisko, self.pensja)
jozek = Pracownik("Józek", "Bajka", 2000)
adrian = Menadzer("Adrian", "Mikulski", 12000)
print(jozek.przedstaw_sie())
print(adrian.przedstaw_sie())
print()
print('isinstance(jozek, Osoba):', isinstance(jozek, Osoba))
print('isinstance(jozek, Pracownik):', isinstance(jozek, Pracownik))
print('isinstance(jozek, Menadzer):', isinstance(jozek, Menadzer))
print()
print('isinstance(adrian, Osoba):', isinstance(adrian, Osoba))
print('isinstance(adrian, Pracownik):', isinstance(adrian, Pracownik))
print('isinstance(adrian, Menadzer):', isinstance(adrian, Menadzer))
print()
print('issubclass(Pracownik, Osoba):', issubclass(Pracownik, Osoba))
print('issubclass(Menadzer, Osoba):', issubclass(Menadzer, Osoba))
print('issubclass(Osoba, Pracownik):', issubclass(Osoba, Pracownik))
print('issubclass(Osoba, Menadzer):', issubclass(Osoba, Menadzer))
print('issubclass(Menadzer, Pracownik):', issubclass(Menadzer, Pracownik))
print('issubclass(Pracownik, Menadzer):', issubclass(Pracownik, Menadzer))
| StarcoderdataPython |
3348795 | # -*- coding: utf-8 -*-
import math
# 素数
num = int(input("Enter the number "))
for i in range(2, num):
# 怎么优化?
if num % i == 0:
print("The number is not a prime")
break
else:
print("The number is a prime")
for i in range(2, int(math.sqrt(num)) + 1):
# 怎么优化?
# 只选择素数来检验
if num % i == 0:
print("The number is not a prime")
break
else:
print("The number is a prime")
| StarcoderdataPython |
186005 | import json
from pathlib import Path
import pytest
from cb_backend.eye.models import EventSessionStatus
@pytest.fixture(scope="module")
def load_schemas():
schemas = []
for file_name in ["fixtures/schema.json", "fixtures/schema_2.json"]:
fixture_json = open(Path(__file__).parent / file_name, "r")
fixture_schema = json.loads(fixture_json.read())
schemas.append(fixture_schema)
return schemas
@pytest.mark.django_db
def test_validate_event_session(create_event_with_session, load_schemas):
schema = load_schemas[0]
event_session = create_event_with_session({
"host": "www.consumeraffairs.com",
"path": "/",
"element": "chat bubble"
}, schema)
assert event_session.validate_event() is True
assert event_session.status == EventSessionStatus.VALIDATED
@pytest.mark.django_db
def test_fail_invalid_data_event_session(create_event_with_session, load_schemas):
schema = load_schemas[0]
event_session = create_event_with_session({
"host": "www.consumeraffairs.com",
"path": "/",
"element_1": "chat bubble"
}, schema)
assert event_session.validate_event() is False
assert event_session.status == EventSessionStatus.REJECTED
@pytest.mark.django_db
def test_validate_event_session_multiple_schema(create_event_with_session, load_schemas):
event_session = create_event_with_session({
"host": "www.consumeraffairs.com",
"path": "/",
"form": {
"first_name": "John",
"last_name": "Doe"
}
}, load_schemas)
assert event_session.validate_event() is True
assert event_session.status == EventSessionStatus.VALIDATED
assert event_session.payload_error is None
| StarcoderdataPython |
3483121 | <reponame>jehboyes/finance_manager
"""Luminate commercial income table"""
from finance_manager.database.replaceable import ReplaceableObject as o
sql = f"""
SELECT c.directorate_id, s.acad_year, s.set_cat_id, c.costc + ' ' + x.description as description, x.amount
FROM
(
--Courses
SELECT set_id, course_name as description, total as amount FROM v_input_inc_courses
WHERE total <> 0
UNION ALL
--Other
SELECT set_id, i.description, SUM(amount) as amount
FROM v_input_inc_other i INNER JOIN fs_account a on a.account = i.account
WHERE a.summary_code = 104
GROUP BY set_id, i.description
Having SUM(amount) <> 0
) x
INNER JOIN f_set s ON x.set_id = s.set_id
INNER JOIN fs_cost_centre c ON c.costc = s.costc
WHERE s.surpress = 0
"""
def _view():
return o("v_luminate_commercial", sql)
| StarcoderdataPython |
4909650 | <reponame>michaelberks/madym_python
'''
Module for working with the active uptake and efflux model (AUEM). This has
a bi-exponential IRF, and uses the dibem model to compute a forward model.
All times are assumed to be in minutes.
The AIF must be a QbiPy AIF object (see dce_aif). However if you have a set of AIF values (Ca_t)
and associated dynamic times (t), it is trivial to create an AIF object:
aif = dce_aif.Aif(times = t, base_aif=Ca_t, aif_type=ARRAY)
The remaining model parameters can either be input as scalars, or 1D numpy arrays. The two forms
can be mixed, but any paramaters set as arrays must be the same length.
Code for converting AUEM parameters to DIBEM form is defined below.
---------------------- AUEM conversions ----------------------------------
Concentration model equation
Cl_t = F_p.(E_i.exp(-t/Ti) / (1 - T_e/T_i) + (1 - E_i/(1 - T_e / T_i)).exp(-t/Te)) * Cp_t
Where
Cp_t = (f_a.Ca_t + f_v.Cv_t) / (1 - Hct)
F_p - flow plasma rate
T_e = v_ecs / (F_p + k_i) - extracellular mean transit time
T_i = vi / kef - intracellular mean transit time
E_i = ki / (Fp + ki) - the hepatic uptake fraction
f_a - the arterial fraction
f_v = 1 - fa - estimate of hepatic portal venous fraction
v_i = 1 - v_ecs - estimate of intracellular volume
See paper: Invest Radiol. 2017 Feb52(2):111-119. doi: 10.1097/RLI.0000000000000316.
"Quantitative Assessment of Liver Function Using Gadoxetate-Enhanced Magnetic Resonance Imaging:
Monitoring Transporter-Mediated Processes in Healthy Volunteers"
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.
'''
import warnings
import numpy as np
from QbiPy.dce_models import dce_aif, dibem
from QbiPy import helpers
#
#-------------------------------------------------------------------------------
def params_to_DIBEM(F_p, v_ecs, k_i, k_ef, using_Fp=False):
'''compute the derived parameters for the AUEM given input physiological parameters
[K_pos, K_neg, F_pos, F_neg] = active_params_phys_to_model(F_p, v_e, k_i, k_ef)
Inputs:
F_p - flow plasma rate
v_ecs - extra-cellular space (v_i = 1 - v_ecs)
k_i - active-uptake rate
k_ef - efflux rate
Outputs:
F_pos, F_neg - scalars in model IRF
K_pos, K_neg - exponents in model IRF
'''
_, F_p, v_ecs, k_i, k_ef = helpers.check_param_shape(
F_p=F_p, v_ecs=v_ecs, k_i=k_i, k_ef=k_ef
)
#Compute derived parameters from input parameters
T_e = v_ecs / (F_p + k_i) # extracellular mean transit time
v_i = 1 - v_ecs # - etsimate of intracellular volume
T_i = v_i / k_ef # intracellular mean transit time
E_i = k_i / (F_p + k_i) # the hepatic uptake fraction
#This can also be precomputed
E_pos = E_i / (1 - T_e/T_i)
K_neg = 1 / T_e
K_pos = 1 / T_i
if using_Fp:
F_pos = F_p
F_neg = E_pos
else:
F_pos = F_p*E_pos
F_neg = F_p*(1 - E_pos)
return F_pos, F_neg, K_pos, K_neg,
#
#-------------------------------------------------------------------------------
def params_from_DIBEM(F_pos, F_neg, K_pos, K_neg,
using_Fp=False, warn_mode = 'warn'):
'''
Starting with the derived parameters fitted in
the IRF-3 model, convert to the physiological parameters F_p, v_ecs, k_i
and k_ef
model given input physiological parameters
[F_p, v_ecs, k_i, k_ef] = active_params_model_to_phys(K_pos, K_neg, F_pos, F_neg)
Inputs:
F_pos, F_neg - scalars in 2CXM model IRF
K_pos, K_neg - exponents in 2CXM model IRF
Outputs:
F_p - flow plasma rate
v_ecs - extra-cellular space (v_i = 1 - v_ecs)
k_i - active-uptake rate
k_ef - efflux rate
Concentration model equation
Cl_t = F_p.(E_i.exp(-t/Ti) / (1 - T_e/T_i) + (1 - E_i/(1 - T_e / T_i)).exp(-t/Te)) * Cp_t
Where
Cp_t = (f_a.Ca_t + f_v.Cv_t) / (1 - Hct)
F_p - flow plasma rate
T_e = v_ecs / (F_p + k_i) - extracellular mean transit time
T_i = vi / kef - intracellular mean transit time
E_i = ki / (Fp + ki) - the hepatic uptake fraction
f_a - the arterial fraction
f_v = 1 - fa - estimate of hepatic portal venous fraction
v_i = 1 - v_ecs - estimate of intracellular volume
See paper: Invest Radiol. 2017 Feb52(2):111-119. doi: 10.1097/RLI.0000000000000316.
"Quantitative Assessment of Liver Function Using Gadoxetate-Enhanced Magnetic Resonance Imaging:"
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>.'''
_, F_pos, F_neg, K_pos, K_neg = helpers.check_param_shape(
F_pos=F_pos, F_neg=F_neg, K_pos=K_pos, K_neg=K_neg
)
#First get F_p from F_pos and F_neg
if not using_Fp:
F_p = F_pos + F_neg
E_pos = F_pos / F_p
else:
F_p = F_pos
E_pos = F_neg
#Derivation is only valid for K_pos < K_neg. If not, the swapping
#F_pos, K_pos for F_neg, K_neg will generate valid active parameters (and
#an indentical concentration time series due to symmetry of the
#bi-exponential). User defines whether swap with warning, quietly or force
#an error if invalid voxels found
swap_idx = K_pos > K_neg
if np.any(swap_idx):
if warn_mode == 'warn':
warnings.warn(
f'K_pos > K_neg for {np.sum(swap_idx)} of {swap_idx.size} voxels. Switching these voxels')
elif warn_mode == 'error':
raise RuntimeError(
f'K_pos > K_neg for {np.sum(swap_idx)} of {swap_idx.size} voxels. '
'Run with warn_mode = ''quiet'' or ''warn to switch these voxels.')
elif warn_mode == 'quiet':
#do nothing
pass
else:
raise ValueError('Warn mode {warn_mode} not recognised. Must be ''warn'', ''quiet'' or ''error''')
if not using_Fp:
#F_p doesn't change it is the sum of F_pos and F_neg
#E_pos needs to remade from F_neg for the swapped indices
E_pos[swap_idx] = F_neg[swap_idx] / F_p[swap_idx]
else:
#F_p doesn't change, E_pos needs negating
E_pos[swap_idx] = 1 - E_pos[swap_idx]
#K_pos and K_neg are just a straight swap
K_pos_swap = K_pos[swap_idx]
K_pos[swap_idx] = K_neg[swap_idx]
K_neg[swap_idx] = K_pos_swap
#Now derive Te, Ti and Ei
Te = 1 / K_neg
Ti = 1 / K_pos
Ei = E_pos * (1 - Te / Ti)
#Can solve for k_i in terms of F_p and Ei
k_i = Ei * F_p / (1 - Ei)
#Solve for v_ecs in terms of Te, F_p and K-i
v_ecs = Te * (F_p + k_i)
#Finally solve for k_ef in terms of v_ecs and Ti
k_ef = (1 - v_ecs) / Ti
return F_p, v_ecs, k_i, k_ef
#
#---------------------------------------------------------------------------------
def concentration_from_model(aif:dce_aif.Aif,
Fp: np.array, PS: np.array, Ve: np.array, Vp: np.array,
f_a:np.array, tau_a: np.array, tau_v:np.array)->np.array:
'''
Compute concentration time-series of 2CXM from input
paramaters. Note instead of re-implementing a bi-exponential
model here, we call the DIBEM module to convert the 2CXM
params to the bi-exponential parameters, and then call
DIBEM's concentration_from_model
Parameters:
aif (Aif object, n_t): object to store and resample arterial input function values (1 for each time point)
Parameters:
Fp: np.array (1D n_samples)
flow plasma rate
v_ecs: np.array (1D n_samples)
extra-cellular volume fraction
k_i: np.array (1D n_samples)
uptake rate constant
k_ef: np.array (1D n_samples)
efflux rate constant
f_a: np.array (1D n_samples)
Arterial mixing fraction, final plasma input is Cp(t) = f_a*Ca(t) + (1-f_a)*Cv(t)
tau_a: np.array (1D n_samples)
offset times of arrival for conccentraion for Ca_t
tau_v: np.array (1D n_samples)
offset times of arrival for conccentraion for Cv_t
Returns:
C_model (2D numpy array, n_t x n_vox) - Model concentrations at each time point for each
voxel computed from model paramaters
'''
#We derive the params in a standalone function now, this takes care of
#checks on FP, PS to choose the best form of derived parameters
F_pos, F_neg, K_pos, K_neg = params_to_DIBEM(
Fp, PS, Ve, Vp)
C_t = dibem.concentration_from_model(
aif, F_pos, F_neg, K_pos, K_neg, f_a, tau_a, tau_v)
return C_t
#
#---------------------------------------------------------------------------
def construct_LLS_matrix(Ctis_t:np.array, aif:dce_aif.Aif, f_a:float, tau_a:float, tau_v:float):
'''
Make a matrix for linear least-sqaures (LLS) solving for a single tissue time-series
Inputs:
Ct_sig: np.array (num_times)
time-series of signal derived CA concentration
aif (Aif object):
object to store and resample arterial input function values (1 for each time point)
f_a: float
Arterial mixing fraction, final plasma input is Cp(t) = f_a*Ca(t) + (1-f_a)*Cv(t)
tau_a: float
offset times of arrival for conccentraion for Ca_t
tau_v: float
offset times of arrival for conccentraion for Cv_t
Outputs:
A_:np.array (num_times x 3)
Matrix for LLS solver collapsed column major to a single data vector
Notes:
We can directly use the generic bi-expontential function
'''
return dibem.construct_LLS_matrix(Ctis_t, aif, f_a, tau_a, tau_v)
#
#---------------------------------------------------------------------------
def solve_LLS(Ctis_t:np.array, aif:dce_aif.Aif, f_a:float, tau_a:float, tau_v:float):
'''
Solve model parameters for a single tissue time-series using LLS
Inputs:
Ct_sig: np.array (num_times)
time-series of signal derived CA concentration
aif (Aif object, num_times):
object to store and resample arterial input function values (1 for each time point)
f_a: float
Arterial mixing fraction, final plasma input is Cp(t) = f_a*Ca(t) + (1-f_a)*Cv(t)
tau_a: float
offset times of arrival for conccentraion for Ca_t
tau_v: float
offset times of arrival for conccentraion for Cv_t
Outputs:
F_p, v_ecs, k_i, k_ef : float
TK model parameters
Notes:
Need to complete this!
'''
A_ = construct_LLS_matrix(Ctis_t, aif, f_a, tau_a, tau_v)
C_ = Ctis_t
B_ = np.linalg.lstsq(A_, C_, rcond=None)[0]
F_p = B_[3]
T = B_[2] / (B_[0]*F_p)
T_e = B_[1] / B_[0] - T
T_p = 1 / (B_[0]*T_e)
v_ecs = T_p * F_p
#TODO
k_i = 0
k_ef = 0
return F_p, v_ecs, k_i, k_ef | StarcoderdataPython |
1759367 | __copyright__ = """\
(c). Copyright 2008-2020, Vyper Logix Corp., All Rights Reserved.
Published under Creative Commons License
(http://creativecommons.org/licenses/by-nc/3.0/)
restricted to non-commercial educational use only.,
http://www.VyperLogix.com for details
THE AUTHOR VYPER LOGIX CORP DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
USE AT YOUR OWN RISK.
"""
import os,sys
from vyperlogix import misc
from vyperlogix.misc import _utils
from vyperlogix.process import Popen
from vyperlogix.hash.lists import HashedFuzzyLists2
from vyperlogix.classes import CooperativeClass
class Shell(CooperativeClass.Cooperative):
def __init__(self,command,callback=None,isWait=False,isExit=True,isDebugging=False,onExit=None):
self.__command__ = command
self.__callback__ = callback
self.__onExit__ = onExit
self.__isDebugging__ = isDebugging if (misc.isBoolean(isDebugging)) else False
print 'DEBUG: %s' % (self.__command__)
self.__shell__(self.__command__,isWait=isWait,isExit=isExit)
def __callback__(self,data):
if (self.__isDebugging__):
print '<<%s>>' % (data)
if (callable(self.__callback__)):
try:
self.__callback__(data)
except Exception as ex:
info_string = _utils.formattedException(details=ex)
print >> sys.stderr, info_string
def __shell__(self,cmd,isExit=True,isWait=False,isVerbose=True):
if (callable(self.__callback__)):
try:
self.__callback__(None)
except Exception as ex:
info_string = _utils.formattedException(details=ex)
print >> sys.stderr, info_string
_isExit=isExit
_isWait=isWait
if (self.__isDebugging__):
print '%s.1 --> cmd=%s, isExit=%s, isWait=%s, isVerbose=%s' % (misc.funcName(),cmd,_isExit,_isWait,isVerbose)
s = Popen.Shell(cmd, shell=None, env=None, isExit=_isExit, isWait=_isWait, isVerbose=isVerbose, fOut=self.__callback__, onExit=self.__onExit__)
return
def command():
doc = "get the command."
def fget(self):
return self.__command__
return locals()
command = property(**command())
def callback():
doc = "get the callback."
def fget(self):
return self.__callback__
return locals()
callback = property(**callback())
################################################################################################
if (__name__ == "__main__"):
s = Shell(isDebugging=True)
print 'The command is "%s".' % (s.command) | StarcoderdataPython |
399239 | import chainer.functions.pooling as P
import numpy as np
from helpers import calculate_cost
def test_max_pooling():
x = np.random.randn(1, 3, 100, 100).astype(np.float32)
f = P.max_pooling_2d.MaxPooling2D(np.int64(2), np.int64(2),
np.int64(0), cover_all=True)
flops, mread, mwrite, params = calculate_cost(f, [x])
# flops is (output size) * (inside window operation)
# when window size is 2x2, max operation is applied 2x2-1 times.
assert flops == (3 * 50 * 50) * (2 * 2 - 1)
assert mread == x.size
assert mwrite == (3 * 50 * 50)
assert params == {'k': 2, 's': 2, 'p': 0}
assert type(params['k']) is int
assert type(params['s']) is int
assert type(params['p']) is int
def test_average_pooling():
x = np.random.randn(1, 3, 100, 100).astype(np.float32)
f = P.average_pooling_2d.AveragePooling2D(np.int64(2), np.int64(2),
np.int64(0), cover_all=True)
flops, mread, mwrite, params = calculate_cost(f, [x])
# flops is (output size) * (inside window operation)
# when window size is 2x2, max operation is applied 2x2-1 times.
assert flops == (3 * 50 * 50) * ((2 * 2 - 1) + 1)
assert mread == x.size
assert mwrite == (3 * 50 * 50)
assert params == {'k': 2, 's': 2, 'p': 0}
assert type(params['k']) is int
assert type(params['s']) is int
assert type(params['p']) is int
def test_unpooling_2d():
x = np.random.randn(1, 3, 10, 10).astype(np.float32)
f = P.unpooling_2d.Unpooling2D(
ksize=np.int64(3), stride=np.int64(3), outsize=(30, 30))
flops, mread, mwrite, params = calculate_cost(f, [x])
assert flops == 0
assert mread == 1 * 3 * 10 * 10
assert mwrite == 3 * 30 * 30
assert params == {
'k': 3, 's': 3, 'p': 0, 'outsize': (30, 30), 'cover_all': True
}
assert type(params['k']) is int
assert type(params['s']) is int
assert type(params['p']) is int
def test_unpooling_2d_no_outsize():
x = np.random.randn(1, 3, 10, 10).astype(np.float32)
f = P.unpooling_2d.Unpooling2D(ksize=np.int64(3), stride=np.int64(3))
flops, mread, mwrite, params = calculate_cost(f, [x])
assert flops == 0
assert mread == 1 * 3 * 10 * 10
assert mwrite == 3 * 28 * 28
assert params == {
'k': 3, 's': 3, 'p': 0, 'outsize': (28, 28), 'cover_all': True
}
assert type(params['k']) is int
assert type(params['s']) is int
assert type(params['p']) is int
def test_upsampling_2d():
x = np.random.randn(1, 3, 10, 10).astype(np.float32)
indices = np.random.randint(0, 9, (1, 3, 10, 10)).astype(np.int32)
f = P.upsampling_2d.Upsampling2D(indices, ksize=np.int64(3),
stride=np.int64(3), outsize=(30, 30))
flops, mread, mwrite, params = calculate_cost(f, [x])
assert flops == 0
assert mread == 2 * 3 * 10 * 10
assert mwrite == 3 * 30 * 30
assert params == {
'k': 3, 's': 3, 'p': 0, 'outsize': (30, 30), 'cover_all': True
}
assert type(params['k']) is int
assert type(params['s']) is int
assert type(params['p']) is int
def test_upsampling_2d_no_outsize():
x = np.random.randn(1, 3, 10, 10).astype(np.float32)
indices = np.random.randint(0, 9, (1, 3, 10, 10)).astype(np.int32)
f = P.upsampling_2d.Upsampling2D(indices, ksize=np.int64(3),
stride=np.int64(3))
flops, mread, mwrite, params = calculate_cost(f, [x])
assert flops == 0
assert mread == 2 * 3 * 10 * 10
assert mwrite == 3 * 28 * 28
assert params == {
'k': 3, 's': 3, 'p': 0, 'outsize': (28, 28), 'cover_all': True
}
assert type(params['k']) is int
assert type(params['s']) is int
assert type(params['p']) is int
| StarcoderdataPython |
1645113 | <reponame>crzdg/acconeer-python-exploration<filename>src/acconeer/exptool/clients/base.py
import abc
import logging
from distutils.version import StrictVersion
from acconeer.exptool import SDK_VERSION, modes
from acconeer.exptool.structs import configbase
log = logging.getLogger(__name__)
class BaseClient(abc.ABC):
@abc.abstractmethod
def __init__(self, **kwargs):
self.squeeze = kwargs.pop("squeeze", True)
if kwargs:
a_key = next(iter(kwargs.keys()))
raise TypeError("Got unexpected keyword argument ({})".format(a_key))
self._connected = False
self._session_setup_done = False
self._streaming_started = False
self.supported_modes = None
def connect(self):
if self._connected:
raise ClientError("already connected")
info = self._connect()
self._connected = True
if info is None:
info = {}
if not info.get("mock"):
try:
log.info("reported version: {}".format(info["version_str"]))
if info["strict_version"] < StrictVersion(SDK_VERSION):
log.warning("old server version - please upgrade server")
elif info["strict_version"] > StrictVersion(SDK_VERSION):
log.warning("new server version - please upgrade client")
except KeyError:
log.warning("could not read software version (might be too old)")
self.supported_modes = self._get_supported_modes()
return info
def setup_session(self, config, check_config=True):
if check_config:
self._check_config(config)
if self._streaming_started:
raise ClientError("can't setup session while streaming")
if not self._connected:
self.connect()
if check_config and config.mode not in self.supported_modes:
raise ClientError("Unsupported mode")
session_info = self._setup_session(config)
self._session_setup_done = True
return session_info
def start_session(self, config=None, check_config=True):
if self._streaming_started:
raise ClientError("already streaming")
if config is None:
ret = None
else:
ret = self.setup_session(config, check_config=check_config)
if not self._session_setup_done:
raise ClientError("session needs to be set up before starting stream")
self._start_session()
self._streaming_started = True
return ret
def get_next(self):
if not self._streaming_started:
raise ClientError("must be streaming to get next")
return self._get_next()
def stop_session(self):
if not self._streaming_started:
raise ClientError("not streaming")
self._stop_session()
self._streaming_started = False
def disconnect(self):
if not self._connected:
raise ClientError("not connected")
if self._streaming_started:
self.stop_session()
self._disconnect()
self._connected = False
self.supported_modes = None
def _check_config(self, config):
try:
alerts = config.check()
except AttributeError:
return
try:
error_alert = next(a for a in alerts if a.severity == configbase.Severity.ERROR)
except StopIteration:
return
msg = "error in config: {}: {}".format(error_alert.param, error_alert.msg)
raise IllegalConfigError(msg)
def _get_supported_modes(self):
return set(modes.Mode)
@abc.abstractmethod
def _connect(self):
pass
@abc.abstractmethod
def _setup_session(self, config):
pass
@abc.abstractmethod
def _start_session(self):
pass
@abc.abstractmethod
def _get_next(self):
pass
@abc.abstractmethod
def _stop_session(self):
pass
@abc.abstractmethod
def _disconnect(self):
pass
class ClientError(Exception):
pass
class IllegalConfigError(ClientError):
pass
class SessionSetupError(ClientError):
pass
def decode_version_str(version: str) -> dict:
if "-" in version:
strict_version = StrictVersion(version.split("-")[0])
else:
strict_version = StrictVersion(version)
return {
"version_str": version,
"strict_version": strict_version,
}
| StarcoderdataPython |
5131476 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('powerdns', '0033_auto_20161114_1442'),
]
operations = [
migrations.AddField(
model_name='domain',
name='require_sec_acceptance',
field=models.BooleanField(help_text='Do new A records require security acceptance', default=False),
),
migrations.AddField(
model_name='domain',
name='require_seo_acceptance',
field=models.BooleanField(help_text='Does deleting A records require SEO acceptance', default=False),
),
]
| StarcoderdataPython |
3486739 | import json, boto3, base64
def getpath(p, env=None):
p = p.strip('/?')
if env and env.get('data_root'):
p = p[len(env['data_root']):] if p.startswith(env['data_root']) else p
p = p[:-len('.json')] if p.endswith('.json') else p
return p.strip('/').split('/')
def get_env_context(event, context):
env = context.client_context.env if context.client_context and context.client_context.env else event.get('_env', {})
env['path'] = getpath(event['key'], env)
client_context = base64.b64encode(bytes(json.dumps({'env': env}), 'utf-8')).decode('utf-8')
return env, client_context
def getprocessor(env, name, source='core', scope=None):
return name if ':' in name else '{lambda_namespace}-{source}-{name}'.format(lambda_namespace=env['lambda_namespace'], source=source, name='{}-{}'.format(scope, name) if scope else name)
def main(event, context):
'''
- triggered by writes at _/feed/{class_name}/{query_id}/{connection_id}/*
- trigger view for each view configuration in feed->view
'''
counter = 0
if event.get('key'):
s3 = boto3.resource('s3')
s3_client = boto3.client('s3')
lambda_client = boto3.client('lambda')
env, client_context = get_env_context(event, context)
class_name, query_id, connection_id = env['path'][1:4]
view = json.loads(s3_client.get_object(Bucket=env['bucket'], Key=event['key'])['Body'].read().decode('utf-8'))
lambda_client.invoke(FunctionName=getprocessor(env, 'view'), InvocationType='Event', Payload=bytes(json.dumps({
'class_name': class_name,
'entity_type': 'query',
'entity_id': query_id,
'view': view,
'_env': {**env, 'connection_type': view.get('connection_type', 'connection'), 'connection_id': connection_id}
}), 'utf-8'))
counter = counter + 1
return counter | StarcoderdataPython |
4870679 | from .injection import inject_db
from .markers import DBSessionInTransactionMarker
__all__ = [
'inject_db',
'DBSessionInTransactionMarker'
]
| StarcoderdataPython |
1865350 | <filename>test.py<gh_stars>1-10
from util import get_args, detect_with_thresholding, mask_to_detections
from network import *
from util import *
from datasets import VideoDataset
from torchvision import transforms
import torch.backends.cudnn as cudnn
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
def nms(proposals, thresh):
proposals = np.array(proposals)
x1 = proposals[:,1]
x2 = proposals[:,2]
scores = proposals[:,3]
areas = x2 - x1 + 1
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(proposals[i].tolist())
xx1 = np.maximum(x1[i], x1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
inter = np.maximum(0.0, xx2 - xx1 + 1)
iou = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(iou < thresh)[0]
order = order[inds + 1]
return keep
def smooth(x):
temp = np.array(x)
temp[1:, :] = temp[1:, :] + x[:-1, :]
temp[:-1, :] = temp[:-1, :] + x[1:, :]
temp[1:-1, :] /= 3
temp[0, :] /= 2
temp[-1, :] /= 2
return temp
def main():
best_pec1 = 0
args = get_args()
torch.backends.cudnn.enabled = False
cudnn.benchmark = False
torch.multiprocessing.set_sharing_strategy('file_system')
video_val_loader = torch.utils.data.DataLoader(
VideoDataset(args=args, transform=transforms.Compose([
transforms.CenterCrop((224,224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()]), test_mode=True),
batch_size=args.batch_size, shuffle= True, num_workers=8)
print("start validate")
validate(video_val_loader, args)
def validate(video_val_loader, args):
model = GANModel(args).cuda()
model.load_state_dict(torch.load('models/best.pth'))
thrh = args.thrh
pro = args.pro
weight_global = args.weight_global
sample_offset = args.sample_offset
fps = args.fps
pred_file = 'thumos-I3D-pred.txt' # for thumos dataset
anno_dir = 'thumos14-test-annotations'
with torch.no_grad():
for i, (video, video_label, video_cnt, video_name) in enumerate(video_val_loader):
test_input = {'video': video, 'video_label': video_label}
model.test_set_input(test_input)
Attention, video_middle_class_result, video_class_result, predict_label, real_label = model.test_forward()
softmax = nn.Softmax(dim=-1)
video_cnt = video_cnt.item()
duration = video_cnt/fps
video_name = video_name[0]
video_middle_class_result = softmax(torch.squeeze(video_middle_class_result, 0)).cpu()
Attention = torch.squeeze(Attention).cpu()
#print(video_class_result)
video_class_result = softmax(torch.squeeze(video_class_result)).cpu()
#####detection#####
out_detections = []
for class_id in range(args.class_num):
if video_class_result[class_id] <= args.global_score_thrh: # threshold for 0.1
#if class_id != predict_label:
continue
_score = video_middle_class_result[:, class_id] # 0.3664
metric = Attention * _score
# print(torch.gt(metric, (_score/Attention.size(-1))).nonzero())
metric = smooth(metric)
metric = normalize(metric).detach().numpy()
# att_filtering_value = 1 / Attention.shape[0]
# assert (att_filtering_value is not None)
#
# metric = video_class_result[class_id]
# metric = smooth(metric)
# metric = normalize(metric)
# metric[Attention < att_filtering_value] = 0
# metric = normalize(metric).detach().numpy()
# map the feature to the original video frame
t_cam = interpolate(metric, frame_cnt=video_cnt, sample_rate=16, snippet_size=16, kind='linear')
t_cam = np.expand_dims(t_cam, axis=1)
mask = detect_with_thresholding(t_cam, thrh, pro) # mask calculation
temp_out = mask_to_detections(mask, t_cam) # [start, end, None, detection_score]#
for entry in temp_out:
entry[2] = class_id
entry[3] += video_class_result[class_id].item() * weight_global # each class confidence
entry[0] = (entry[0] + sample_offset) / fps
entry[1] = (entry[1] + sample_offset) / fps
entry[0] = max(0, entry[0])
entry[1] = max(0, entry[1])
entry[0] = min(duration, entry[0])
entry[1] = min(duration, entry[1])
#########################################
for entry_id in range(len(temp_out)):
temp_out[entry_id].insert(0, video_name)
temp_out = nms(temp_out, 0.7)
print(temp_out)
out_detections += temp_out #to obtain the different category detections of videos
output_detections_thumos14(out_detections, pred_file)
summary_file = 'final_localization.npz'
all_test_map = np.zeros((9, 1))
all_test_aps = np.zeros((9, args.class_num))
for IoU_idx, IoU in enumerate([.1, .2, .3, .4, .5, .6, .7, .8, .9]):
if len(out_detections) != 0:
temp_aps, temp_map = eval_thumos_detect(pred_file,anno_dir,'test',IoU)
all_test_aps[IoU_idx, :] = temp_aps
all_test_map[IoU_idx, 0] = temp_map
print('{}'.format(IoU_idx))
np.savez(summary_file, all_test_aps=all_test_aps, all_test_map=all_test_map)
if __name__ == '__main__':
# parse the arguments
args = get_args()
main()
| StarcoderdataPython |
11259896 | <filename>problems/statistics10binomialdistribution2/submissions/accepted/stefan.py<gh_stars>1-10
#!/usr/bin/env python3
#Author: <NAME>
from math import factorial
def choose(n, k):
return factorial(n)/factorial(k)/factorial(n-k)
def binom(n, k, p):
return choose(n, k)*(p**k)*((1-p)**(n-k))
if __name__ == '__main__':
p,n = map(int, input().split())
print(round(sum([binom(n, k, p/100) for k in range(3)]), 3))
print(round(sum([binom(n, k, p/100) for k in range(2, n+1)]), 3))
| StarcoderdataPython |
3527636 | <filename>extra_apps/DjangoUeditor/urls.py
# coding:utf-8
from django import VERSION
from .widgets import UEditorWidget, AdminUEditorWidget
from .views import get_ueditor_controller
from django.urls import path
urlpatterns = [
path('controller/', get_ueditor_controller),
]
| StarcoderdataPython |
1707528 | import os
from flask import Flask
from flask_mail import Mail
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_migrate import Migrate
from flask_wtf.csrf import CSRFProtect
from flask_cors import CORS
from flask_jwt_extended import JWTManager
app = Flask(__name__)
csrf = CSRFProtect(app)
app.config['SECRET_KEY'] = '<KEY>'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
app.config['MAIL_SERVER'] = 'smtp.googlemail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USERNAME'] = os.environ.get('EMAIL_USER')
app.config['MAIL_PASSWORD'] = os.environ.get('EMAIL_PASS')
app.config['CORS_HEADERS'] = 'Content-Type' # f0r react
app.config["JWT_SECRET_KEY"] = '<KEY>'
db = SQLAlchemy(app) # database instance
bcrypt = Bcrypt(app)
migrate = Migrate(app, db)
mail = Mail(app)
jwt = JWTManager(app)
# cors = CORS(app, resources={r"/foo": {"origins": "http://127.0.0.1:81"}}) # for react
cors = CORS(app)
from anico_application import routes # has to be imported at the bottom to prevent 'circular' import
| StarcoderdataPython |
6536799 | <filename>test/issue_94_111_154.py
import time
import RPi.GPIO as GPIO
LED_PIN = 12
def issue_154():
# fails with led off at around 400
count = 0
pinRef = GPIO.PWM(LED_PIN,50) # create new PWM instance
while True:
pinRef.start(10) # update PWM value
time.sleep(0.05)
pinRef.stop()
GPIO.output(LED_PIN,0)
time.sleep(0.05)
count = count + 1
print count
def issue_94(cycles):
# led flickers. Bug = LED stays off at around cycle 400
pwm = GPIO.PWM(LED_PIN, 1)
for i in xrange(cycles):
print(i)
pwm.ChangeFrequency(25)
pwm.start(50)
time.sleep(1)
pwm.stop()
if __name__ == '__main__':
GPIO.setmode(GPIO.BOARD)
GPIO.setup(LED_PIN, GPIO.OUT)
try:
# issue_94(1000)
issue_154()
finally:
GPIO.cleanup()
| StarcoderdataPython |
3446847 | <reponame>redst4r/arboreto
'''
File created to address the reviewer's comment: how many trees were used
'''
import pandas as pd
import time
import sys
from arboreto.utils import load_tf_names
from arboreto.algo import *
from distributed import Client
if __name__ == '__main__':
ex_path = sys.argv[1]
tf_path = sys.argv[2]
net_out_path = sys.argv[3]
meta_out_path = sys.argv[4]
start_time = time.time()
expression_matrix = pd.read_csv(ex_path, sep='\t')
tf_names = load_tf_names(tf_path)
gene_names = expression_matrix.columns
client = Client(LocalCluster())
print(client._repr_html_())
network_graph, meta_graph = create_graph(expression_matrix.as_matrix(),
gene_names,
tf_names,
"GBM",
SGBM_KWARGS,
client=client, # broadcast!
early_stop_window_length=25,
include_meta=True)
# Good!
a, b = client.persist([network_graph, meta_graph])
network_df = a.compute(sync=True)
meta_df = b.compute(sync=True)
# Bad!
# network_df, meta_df = client.compute([network_graph, meta_graph], sync=True)
if client:
client.close()
network_df.to_csv(net_out_path, sep='\t', index=False)
meta_df.to_csv(meta_out_path, sep='\t', index=False)
end_time = time.time()
print('wall time: {} seconds'.format(end_time - start_time)) | StarcoderdataPython |
6634125 | <reponame>e2jk/syncboom
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Running the tests:
# $ python3 -m unittest discover --start-directory ./tests/
# Checking the coverage of the tests:
# $ coverage run --include=./*.py --omit=tests/* -m unittest discover && \
# rm -rf html_dev/coverage && coverage html --directory=html_dev/coverage \
# --title="Code test coverage for SyncBoom"
import unittest
import sys
from unittest.mock import patch
import inspect
sys.path.append('.')
target = __import__("syncboom")
class TestProcessMasterCard(unittest.TestCase):
def test_process_master_card_0(self):
"""
Test processing a new master card without labels or attachments
"""
target.config = {"key": "ghi", "token": "jkl", "destination_lists": []}
master_card = {"id": "1a2b3c", "desc": "abc", "name": "Card name",
"labels": [], "badges": {"attachments": 0}}
with self.assertLogs(level='DEBUG') as cm:
output = target.process_master_card(master_card)
self.assertEqual(output, (0, 0, 0))
self.assertEqual(cm.output, ['DEBUG:root:================================================================',
"DEBUG:root:Process master card 'Card name'",
'DEBUG:root:Master card is to be synced on 0 destination lists',
'INFO:root:This master card has no slave cards'])
def test_process_master_card_unknown_label(self):
"""
Test processing a new master card with one label that is not in the config
"""
target.config = {"key": "ghi", "token": "jkl",
"destination_lists": {
"Label One": ["a1a1a1a1a1a1a1a1a1a1a1a1"],
"Label Two": ["ddd"],
"All Teams": [
"a1a1a1a1a1a1a1a1a1a1a1a1",
"ddd"
]
}}
master_card = {"id": "1a2b3c", "desc": "abc", "name": "Card name",
"labels": [{"name": "Unknown label"}], "badges": {"attachments": 0}}
with self.assertLogs(level='DEBUG') as cm:
output = target.process_master_card(master_card)
self.assertEqual(output, (0, 0, 0))
self.assertEqual(cm.output, ['DEBUG:root:================================================================',
"DEBUG:root:Process master card 'Card name'",
'DEBUG:root:Master card is to be synced on 0 destination lists',
'INFO:root:This master card has no slave cards'])
@patch("syncboom.perform_request")
def test_process_master_card_one_label(self, t_pr):
"""
Test processing a new master card with one recognized label
"""
target.args = type(inspect.stack()[0][3], (object,), {"dry_run": True})()
target.config = {"key": "ghi", "token": "jkl",
"destination_lists": {
"Label One": ["a1a1a1a1a1a1a1a1a1a1a1a1"],
"Label Two": ["ddd"],
"All Teams": [
"a1a1a1a1a1a1a1a1a1a1a1a1",
"ddd"
]
}}
master_card = {"id": "t"*24, "desc": "abc", "name": "Card name",
"labels": [{"name": "Label One"}], "badges": {"attachments": 0},
"shortUrl": "https://trello.com/c/eoK0Rngb",
"url": "https://trello.com/c/eoK0Rngb/blablabla"}
t_pr.side_effect = [{"id": "b"*24, "name": "Slave card One",
"idBoard": "k"*24, "idList": "l"*24,
"url": "https://trello.com/c/abcd1234/blablabla2"},
{"name": "Board name"},
{"name": "List name"},
{},
{},
{}]
with self.assertLogs(level='DEBUG') as cm:
output = target.process_master_card(master_card)
self.assertEqual(output, (1, 1, 1))
expected = ['DEBUG:root:================================================================',
"DEBUG:root:Process master card 'Card name'",
'DEBUG:root:Master card is to be synced on 1 destination lists',
'DEBUG:root:Creating new slave card',
'DEBUG:root:New slave card ID: bbbbbbbbbbbbbbbbbbbbbbbb',
"DEBUG:root:New master card metadata: \n- 'Slave card One' on list '**Board name|List name**'",
'INFO:root:This master card has 1 slave cards (1 newly created)',
'DEBUG:root:Updating master card metadata',
"DEBUG:root:abc\n\n--------------------------------\n*== DO NOT EDIT BELOW THIS LINE ==*\n\n- 'Slave card One' on list '**Board name|List name**'",
'DEBUG:root:Attaching master card tttttttttttttttttttttttt to slave card bbbbbbbbbbbbbbbbbbbbbbbb',
'DEBUG:root:Attaching slave card bbbbbbbbbbbbbbbbbbbbbbbb to master card tttttttttttttttttttttttt']
self.assertEqual(cm.output, expected)
target.args = None
def test_process_master_card_label_multiple(self):
"""
Test processing a new master card with one label that maps to multiple lists
"""
target.args = type(inspect.stack()[0][3], (object,), {"dry_run": True})()
target.config = {"key": "ghi", "token": "jkl",
"destination_lists": {
"Label One": ["a1a1a1a1a1a1a1a1a1a1a1a1"],
"Label Two": ["ddd"],
"All Teams": [
"a1a1a1a1a1a1a1a1a1a1a1a1",
"ddd"
]
}}
master_card = {"id": "1a2b3c", "desc": "abc", "name": "Card name",
"labels": [{"name": "All Teams"}], "badges": {"attachments": 0},
"shortUrl": "https://trello.com/c/eoK0Rngb"}
with self.assertLogs(level='DEBUG') as cm:
output = target.process_master_card(master_card)
self.assertEqual(output, (1, 2, 2))
expected = ['DEBUG:root:================================================================',
"DEBUG:root:Process master card 'Card name'",
'DEBUG:root:Master card is to be synced on 2 destination lists',
'DEBUG:root:Creating new slave card',
"DEBUG:root:Skipping POST call to 'https://api.trello.com/1/cards' due to --dry-run parameter",
'DEBUG:root:Creating new slave card',
"DEBUG:root:Skipping POST call to 'https://api.trello.com/1/cards' due to --dry-run parameter",
'INFO:root:This master card has 2 slave cards (2 newly created)']
self.assertEqual(cm.output, expected)
target.args = None
def test_process_master_card_label_multiple_and_duplicate_single(self):
"""
Test processing a new master card with one label that maps to multiple lists and another single label that was already in the multiple list
"""
target.args = type(inspect.stack()[0][3], (object,), {"dry_run": True})()
target.config = {"key": "ghi", "token": "jkl",
"destination_lists": {
"Label One": ["a1a1a1a1a1a1a1a1a1a1a1a1"],
"Label Two": ["ddd"],
"All Teams": [
"a1a1a1a1a1a1a1a1a1a1a1a1",
"ddd"
]
}}
master_card = {"id": "1a2b3c", "desc": "abc", "name": "Card name",
"labels": [{"name": "All Teams"}, {"name": "Label One"}], "badges": {"attachments": 0},
"shortUrl": "https://trello.com/c/eoK0Rngb"}
with self.assertLogs(level='DEBUG') as cm:
output = target.process_master_card(master_card)
self.assertEqual(output, (1, 2, 2))
expected = ['DEBUG:root:================================================================',
"DEBUG:root:Process master card 'Card name'",
'DEBUG:root:Master card is to be synced on 2 destination lists',
'DEBUG:root:Creating new slave card',
"DEBUG:root:Skipping POST call to 'https://api.trello.com/1/cards' due to --dry-run parameter",
'DEBUG:root:Creating new slave card',
"DEBUG:root:Skipping POST call to 'https://api.trello.com/1/cards' due to --dry-run parameter",
'INFO:root:This master card has 2 slave cards (2 newly created)']
self.assertEqual(cm.output, expected)
target.args = None
@patch("syncboom.perform_request")
def test_process_master_card_dummy_attachment(self, t_pr):
"""
Test processing a new master card with one non-Trello attachment
"""
target.args = type(inspect.stack()[0][3], (object,), {"dry_run": True})()
target.config = {"key": "ghi", "token": "jkl",
"destination_lists": {
"Label One": ["a1a1a1a1a1a1a1a1a1a1a1a1"],
"Label Two": ["ddd"],
"All Teams": [
"a1a1a1a1a1a1a1a1a1a1a1a1",
"ddd"
]
}}
master_card = {"id": "t"*24, "desc": "abc", "name": "Card name",
"labels": [{"name": "Label One"}], "badges": {"attachments": 1},
"shortUrl": "https://trello.com/c/eoK0Rngb",
"url": "https://trello.com/c/eoK0Rngb/blablabla"}
t_pr.side_effect = [[{"id": "rrr", "url": "https://monip.org"}],
{"id": "b"*24, "name": "Slave card One",
"idBoard": "k"*24, "idList": "l"*24,
"url": "https://trello.com/c/abcd1234/blablabla2"},
{"name": "Board name"},
{"name": "List name"},
{},
{},
{}]
with self.assertLogs(level='DEBUG') as cm:
output = target.process_master_card(master_card)
self.assertEqual(output, (1, 1, 1))
expected = ['DEBUG:root:================================================================',
"DEBUG:root:Process master card 'Card name'",
'DEBUG:root:Master card is to be synced on 1 destination lists',
'DEBUG:root:Getting 1 attachments on master card tttttttttttttttttttttttt',
'DEBUG:root:Creating new slave card',
'DEBUG:root:New slave card ID: bbbbbbbbbbbbbbbbbbbbbbbb',
"DEBUG:root:New master card metadata: \n- 'Slave card One' on list '**Board name|List name**'",
'INFO:root:This master card has 1 slave cards (1 newly created)',
'DEBUG:root:Updating master card metadata',
"DEBUG:root:abc\n\n--------------------------------\n*== DO NOT EDIT BELOW THIS LINE ==*\n\n- 'Slave card One' on list '**Board name|List name**'",
'DEBUG:root:Attaching master card tttttttttttttttttttttttt to slave card bbbbbbbbbbbbbbbbbbbbbbbb',
'DEBUG:root:Attaching slave card bbbbbbbbbbbbbbbbbbbbbbbb to master card tttttttttttttttttttttttt']
self.assertEqual(cm.output, expected)
target.args = None
@patch("syncboom.perform_request")
def test_process_master_card_attachment(self, t_pr):
"""
Test processing a new master card with one Trello attachment
"""
target.args = type(inspect.stack()[0][3], (object,), {"dry_run": True})()
target.config = {"key": "ghi", "token": "jkl",
"destination_lists": {
"Label One": ["aaa"],
"Label Two": ["ddd"],
"All Teams": [
"aaa",
"ddd"
]
}}
master_card = {"id": "t"*24, "desc": "abc", "name": "Card name",
"labels": [{"name": "Label One"}], "badges": {"attachments": 1},
"shortUrl": "https://trello.com/c/eoK0Rngb",
"url": "https://trello.com/c/eoK0Rngb/blablabla"}
t_pr.side_effect = [[{"id": "rrr", "url": "https://trello.com/c/abcd1234/blablabla4"}],
{"id": "q"*24, "name": "Slave card One",
"idBoard": "k"*24, "idList": "aaa"},
{"name": "Board name"},
{"name": "List name"},
{}]
with self.assertLogs(level='DEBUG') as cm:
output = target.process_master_card(master_card)
self.assertEqual(output, (1, 1, 0))
expected = ['DEBUG:root:================================================================',
"DEBUG:root:Process master card 'Card name'",
'DEBUG:root:Master card is to be synced on 1 destination lists',
'DEBUG:root:Getting 1 attachments on master card tttttttttttttttttttttttt',
"DEBUG:root:Slave card qqqqqqqqqqqqqqqqqqqqqqqq already exists on list aaa",
"DEBUG:root:{'id': 'qqqqqqqqqqqqqqqqqqqqqqqq', 'name': 'Slave card One', 'idBoard': 'kkkkkkkkkkkkkkkkkkkkkkkk', 'idList': 'aaa'}",
"DEBUG:root:New master card metadata: \n- 'Slave card One' on list '**Board name|List name**'",
'INFO:root:This master card has 1 slave cards (0 newly created)',
'DEBUG:root:Updating master card metadata',
"DEBUG:root:abc\n\n--------------------------------\n*== DO NOT EDIT BELOW THIS LINE ==*\n\n- 'Slave card One' on list '**Board name|List name**'"]
self.assertEqual(cm.output, expected)
target.args = None
@patch("syncboom.perform_request")
def test_process_master_card_attachment_no_label(self, t_pr):
"""
Test processing a new master card with one Trello attachment but no label
"""
target.args = type(inspect.stack()[0][3], (object,), {"dry_run": True})()
target.config = {"key": "ghi", "token": "jkl",
"destination_lists": {
"Label One": ["a1a1a1a1a1a1a1a1a1a1a1a1"],
"Label Two": ["ddd"],
"All Teams": [
"a1a1a1a1a1a1a1a1a1a1a1a1",
"ddd"
]
}}
master_card = {"id": "t"*24, "desc": "abc", "name": "Card name",
"labels": [], "badges": {"attachments": 1},
"shortUrl": "https://trello.com/c/eoK0Rngb",
"url": "https://trello.com/c/eoK0Rngb/blablabla"}
t_pr.side_effect = [[{"id": "rrr", "url": "https://trello.com/c/abcd1234/blablabla4"}],
{"id": "q"*24, "name": "Slave card One",
"idBoard": "k"*24, "idList": "aaa"},
{"name": "Board name"},
{"name": "List name"},
{}]
with self.assertLogs(level='DEBUG') as cm:
output = target.process_master_card(master_card)
self.assertEqual(output, (0, 0, 0))
expected = ['DEBUG:root:================================================================',
"DEBUG:root:Process master card 'Card name'",
'DEBUG:root:Master card is to be synced on 0 destination lists',
'DEBUG:root:Getting 1 attachments on master card tttttttttttttttttttttttt',
"DEBUG:root:Master card has been unlinked from slave cards",
"INFO:root:This master card has no slave cards"]
self.assertEqual(cm.output, expected)
target.args = None
@patch("syncboom.perform_request")
def test_process_master_card_one_label_wet_run_no_checklist(self, t_pr):
"""
Test processing a new master card with one recognized label, no dry_run, without a checklist
"""
target.args = type(inspect.stack()[0][3], (object,), {"dry_run": False})()
target.config = {"key": "ghi", "token": "jkl",
"destination_lists": {
"Label One": ["a1a1a1a1a1a1a1a1a1a1a1a1"],
"Label Two": ["ddd"],
"All Teams": [
"a1a1a1a1a1a1a1a1a1a1a1a1",
"ddd"
]
},
"friendly_names": {
"Label Two": "Nicer Label"
}}
master_card = {"id": "t"*24, "desc": "abc", "name": "Card name",
"labels": [{"name": "Label One"}], "badges": {"attachments": 0},
"shortUrl": "https://trello.com/c/eoK0Rngb",
"url": "https://trello.com/c/eoK0Rngb/blablabla"}
t_pr.side_effect = [{"id": "b"*24, "name": "Slave card One",
"idBoard": "k"*24, "idList": "l"*24,
"url": "https://trello.com/c/abcd1234/blablabla2"},
{"name": "Board name"},
{"name": "List name"},
{},
[],
{"id": "w"*24, "name": "New checklist"},
{"idBoard": "hhh"},
{"name": "Destination board name"},
{"name": "New checklist item"},
{},
{}]
with self.assertLogs(level='DEBUG') as cm:
output = target.process_master_card(master_card)
self.assertEqual(output, (1, 1, 1))
expected = "\n".join(["DEBUG:root:Retrieving checklists from card tttttttttttttttttttttttt",
"DEBUG:root:Creating new checklist",
"DEBUG:root:{'id': 'wwwwwwwwwwwwwwwwwwwwwwww', 'name': 'New checklist'}",
"DEBUG:root:Adding new checklistitem 'Destination board name' to checklist wwwwwwwwwwwwwwwwwwwwwwww",
"DEBUG:root:{'name': 'New checklist item'}"])
self.assertTrue(expected in "\n".join(cm.output))
target.args = None
@patch("syncboom.perform_request")
def test_process_master_card_one_label_wet_run_unrelated_checklist(self, t_pr):
"""
Test processing a new master card with one recognized label, no dry_run, with one unrelated checklist
"""
target.args = type(inspect.stack()[0][3], (object,), {"dry_run": False})()
target.config = {"key": "ghi", "token": "jkl",
"destination_lists": {
"Label One": ["aaa"],
"Label Two": ["ddd"],
"All Teams": [
"aaa",
"ddd"
]
},
"friendly_names": {
"Label Two": "Nicer Label"
}}
master_card = {"id": "t"*24, "desc": "abc", "name": "Card name",
"labels": [{"name": "Label One"}], "badges": {"attachments": 0},
"shortUrl": "https://trello.com/c/eoK0Rngb",
"url": "https://trello.com/c/eoK0Rngb/blablabla"}
t_pr.side_effect = [{"id": "b"*24, "name": "Slave card One",
"idBoard": "k"*24, "idList": "l"*24,
"url": "https://trello.com/c/abcd1234/blablabla2"},
{"name": "Board name"},
{"name": "List name"},
{},
[{"name": "Unrelated checklist"}],
{"id": "w"*24, "name": "New checklist"},
{"idBoard": "hhh"},
{"name": "Destination board name"},
{"name": "New checklist item"},
{},
{}]
with self.assertLogs(level='DEBUG') as cm:
output = target.process_master_card(master_card)
self.assertEqual(output, (1, 1, 1))
expected = "\n".join(["DEBUG:root:Retrieving checklists from card tttttttttttttttttttttttt",
"DEBUG:root:Already 1 checklists on this master card: Unrelated checklist",
"DEBUG:root:Creating new checklist",
"DEBUG:root:{'id': 'wwwwwwwwwwwwwwwwwwwwwwww', 'name': 'New checklist'}",
"DEBUG:root:Adding new checklistitem 'Destination board name' to checklist wwwwwwwwwwwwwwwwwwwwwwww",
"DEBUG:root:{'name': 'New checklist item'}"])
self.assertTrue(expected in "\n".join(cm.output))
target.args = None
@patch("syncboom.perform_request")
def test_process_master_card_one_label_wet_run_friendly_name_checklist(self, t_pr):
"""
Test processing a new master card with one recognized label, no dry_run,
without a checklist and using a friendly name as checklist item instead of the board's name
"""
target.args = type(inspect.stack()[0][3], (object,), {"dry_run": False})()
target.config = {"key": "ghi", "token": "jkl",
"destination_lists": {
"Label One": ["a1a1a1a1a1a1a1a1a1a1a1a1"],
"Label Two": ["ddd"],
"All Teams": [
"a1a1a1a1a1a1a1a1a1a1a1a1",
"ddd"
]
},
"friendly_names": {
"Destination board name": "Nicer Label"
}}
master_card = {"id": "t"*24, "desc": "abc", "name": "Card name",
"labels": [{"name": "Label One"}], "badges": {"attachments": 0},
"shortUrl": "https://trello.com/c/eoK0Rngb",
"url": "https://trello.com/c/eoK0Rngb/blablabla"}
t_pr.side_effect = [{"id": "b"*24, "name": "Slave card One",
"idBoard": "k"*24, "idList": "l"*24,
"url": "https://trello.com/c/abcd1234/blablabla2"},
{"name": "Board name"},
{"name": "List name"},
{},
[],
{"id": "w"*24, "name": "New checklist"},
{"idBoard": "hhh"},
{"name": "Destination board name"},
{"name": "New checklist item"},
{},
{}]
with self.assertLogs(level='DEBUG') as cm:
output = target.process_master_card(master_card)
self.assertEqual(output, (1, 1, 1))
expected = "\n".join(["DEBUG:root:Retrieving checklists from card tttttttttttttttttttttttt",
"DEBUG:root:Creating new checklist",
"DEBUG:root:{'id': 'wwwwwwwwwwwwwwwwwwwwwwww', 'name': 'New checklist'}",
"DEBUG:root:Adding new checklistitem 'Nicer Label' to checklist wwwwwwwwwwwwwwwwwwwwwwww",
"DEBUG:root:{'name': 'New checklist item'}"])
self.assertTrue(expected in "\n".join(cm.output))
target.args = None
@patch("syncboom.perform_request")
def test_process_master_card_one_label_wet_run_related_checklist(self, t_pr):
"""
Test processing a new master card with one recognized label, no dry_run, with already the related checklist
"""
target.args = type(inspect.stack()[0][3], (object,), {"dry_run": False})()
target.config = {"key": "ghi", "token": "jkl",
"destination_lists": {
"Label One": ["a1a1a1a1a1a1a1a1a1a1a1a1"],
"Label Two": ["ddd"],
"All Teams": [
"a1a1a1a1a1a1a1a1a1a1a1a1",
"ddd"
]
}}
master_card = {"id": "t"*24, "desc": "abc", "name": "Card name",
"labels": [{"name": "Label One"}], "badges": {"attachments": 0},
"shortUrl": "https://trello.com/c/eoK0Rngb",
"url": "https://trello.com/c/eoK0Rngb/blablabla"}
t_pr.side_effect = [{"id": "b"*24, "name": "Slave card One",
"idBoard": "k"*24, "idList": "l"*24,
"url": "https://trello.com/c/abcd1234/blablabla2"},
{"name": "Board name"},
{"name": "List name"},
{},
[{"name": "Involved Teams"}],
{},
{}]
with self.assertLogs(level='DEBUG') as cm:
output = target.process_master_card(master_card)
self.assertEqual(output, (1, 1, 1))
expected = "\n".join(["DEBUG:root:Retrieving checklists from card tttttttttttttttttttttttt",
"DEBUG:root:Already 1 checklists on this master card: Involved Teams"])
self.assertTrue(expected in "\n".join(cm.output))
target.args = None
target.config
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6567341 | <filename>main.py
from flask import Flask, jsonify, request, render_template
app = Flask(__name__)
messages = dict()
@app.route("/")
def index():
return render_template("index.html")
@app.route('/message', methods=['POST'])
def update_message():
content = request.get_json()
print(content)
messages[content['void_id']] = content['msg']
return jsonify(content)
@app.route('/message', methods=['GET'])
def get_all_message():
print(messages)
return jsonify(messages)
@app.route('/message', methods=['DELETE'])
def delete_all_message():
messages.clear()
print(messages)
return jsonify(messages)
@app.route('/message/<void_id>', methods=['GET'])
def get_message(void_id):
msg = messages.get(void_id)
return jsonify(msg)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80)
10418 | StarcoderdataPython |
3382748 | <gh_stars>1-10
import re
import requests
from bs4 import BeautifulSoup
class Tracker:
def __init__( self ):
self.url = 'https://www.worldometers.info/coronavirus/?utm_campaign=homeAdvegas1%3F'
def maincounter( self ):
r = requests.get(self.url)
if r.status_code == 200:
r = r.text
soup = BeautifulSoup(r, 'html.parser' )
return soup
else:
return False
def total_cases( self ):
soup = self.maincounter()
if soup:
div = soup.find_all('div', attrs = {'class', 'maincounter-number'})
regex_pattern = "<span.*>(.+?)</span>"
items = list()
for i in div:
item = re.findall(regex_pattern, str(i))
items.append(item)
num = str(items[0][0]).replace(',','')
num = int(num.strip())
return num
else:
return None
def total_deaths( self ):
soup = self.maincounter()
if soup:
div = soup.find_all('div', attrs = {'class', 'maincounter-number'})
regex_pattern = "<span.*>(.+?)</span>"
items = list()
for i in div:
item = re.findall(regex_pattern, str(i))
items.append(item)
num = str(items[1][0]).replace(',','')
num = int(num.strip())
return num
else:
return None
def total_recoveries( self ):
soup = self.maincounter()
if soup:
div = soup.find_all('div', attrs = {'class', 'maincounter-number'})
regex_pattern = "<span.*>(.+?)</span>"
items = list()
for i in div:
item = re.findall(regex_pattern, str(i))
items.append(item)
num = str(items[2][0]).replace(',','')
num = int(num.strip())
return num
else:
return None
def data_grab( self ):
try:
r = requests.get(self.url)
r = r.text
soup = BeautifulSoup(r, 'html.parser')
data = soup.find_all('div', attrs = { 'class': 'panel_front'})
regex_pattern = r">(.+?)<"
main_numbers = list()
for i in data:
soup = BeautifulSoup(str(i), 'html.parser')
item = soup.find_all('div', attrs = {'class': 'number-table-main'})
if item:
for j in item:
val = re.findall(regex_pattern, str(j))
if val:
for k in val:
main_numbers.append(k)
regex_pattern = r">\s*(.+?)<"
secondary_numbers = list()
for i in data:
soup = BeautifulSoup(str(i), 'html.parser')
item = soup.find_all('span', attrs = {'class': 'number-table'})
if item:
for j in item:
val = re.findall(regex_pattern, str(j))
if val:
for k in val:
secondary_numbers.append(k)
combined_List = list()
for i in range(4):
try:
combined_List.append(main_numbers[i])
except:
pass
try:
combined_List.append(secondary_numbers[i])
except:
pass
data = list()
for i in combined_List:
data.append(int(i.replace(',','').strip()))
if data:
return data
else:
return False
except:
return False
def active_cases( self ):
data = self.data_grab()
if data:
actives = {
'currently infected patients' : data[0],
'patients in mild conditions' : data[1],
'serious/critical conditions' : data[3]
}
return actives
else:
return False
def closed_cases( self ):
data = self.data_grab()
if data:
closed = {
'outcomes' : data[2],
'recovered/discharged' : data[4],
'deaths' : data[5]
}
return closed
else:
return False
def country_info( self ):
try:
r = requests.get(self.url)
r = r.text
soup = BeautifulSoup(r, 'html.parser')
data = soup.find_all('table', attrs = {'id' : 'main_table_countries_today'})
data = str(data)
soup = BeautifulSoup(data, 'html.parser')
data = soup.find_all('tr')
for i in range(8):
data.pop(0)
regex_pattern = r">(.*?)<"
rows = list()
for i in data:
soup = BeautifulSoup(str(i), 'html.parser')
Data = soup.find_all('td')
Data = str(Data)
item = re.findall(regex_pattern, Data)
for i in range(item.count(', ')):
item.pop(item.index(', '))
try:
rows.append(item)
except:
pass
if rows:
return rows
else:
return False
except:
return False
def countries( self ):
data = self.country_info()
if data:
data = data[1:-8]
names = list()
for i in data:
info = {
'id': int(i[0].replace(',','').strip()),
'name': i[2].strip(),
'continent': i[19].strip()
}
names.append(info)
if names:
return names
else:
return False
else:
return False
def country_info_by_name( self, name ):
if name is not None:
data = self.country_info()
if data:
data = data[1:-8]
name = name.upper()
bool = False
for i in data:
if i[2].upper() == name:
bool = True
break
if bool:
n_list = list()
for j in range(len(i)):
item = i[j].replace(',','')
item = item.replace('+','')
item.strip()
try:
item = int(item)
except:
item = 'N/A'
n_list.append(item)
j = i
i = n_list
info = {
'id': int(j[0].strip()),
'name': j[2],
'total cases': i[4],
'new cases': i[5],
'total deaths': i[6],
'new deaths': i[7],
'total recoveries': i[8],
'new recoveries': i[9],
'active cases': i[10],
'critical cases': i[11],
'total cases/1M pop': i[12],
'deaths/1M pop': i[13],
'total tests/1M pop': i[14],
'tests/1M pop': i[15],
'population': i[17],
'continent': j[19],
'1 case every X ppl': i[20],
'1 death every X ppl': i[21],
'1 test every X ppl': i[22],
}
return info
else:
return bool
else:
return False
else:
return False
def country_info_by_id( self, id ):
if id is not None:
data = self.country_info()
if data:
data = data[1:-8]
bool = False
for i in data:
if int(i[0].strip()) == id:
bool = True
break
if bool:
n_list = list()
for j in range(len(i)):
item = i[j].replace(',','')
item = item.replace('+','')
item.strip()
try:
item = int(item)
except:
item = 'N/A'
n_list.append(item)
j = i
i = n_list
info = {
'id': int(j[0].strip()),
'name': j[2],
'total cases': i[4],
'new cases': i[5],
'total deaths': i[6],
'new deaths': i[7],
'total recoveries': i[8],
'new recoveries': i[9],
'active cases': i[10],
'critical cases': i[11],
'total cases/1M pop': i[12],
'deaths/1M pop': i[13],
'total tests/1M pop': i[14],
'tests/1M pop': i[15],
'population': i[17],
'continent': j[19],
'1 case every X ppl': i[20],
'1 death every X ppl': i[21],
'1 test every X ppl': i[22],
}
return info
else:
return bool
else:
return False
else:
return False
def cont_info( self ):
try:
r = requests.get(self.url)
r = r.text
soup = BeautifulSoup(r, 'html.parser')
data = soup.find_all('table', attrs = {'id' : 'main_table_countries_today'})
data = str(data)
soup = BeautifulSoup(data, 'html.parser')
data = soup.find_all('tr')
data = data[1:8]
regex_pattern = r">(.*?)<"
rows = list()
for i in data:
soup = BeautifulSoup(str(i), 'html.parser')
Data = soup.find_all('td')
Data = str(Data)
item = re.findall(regex_pattern, Data)
for i in range(item.count(', ')):
item.pop(item.index(', '))
try:
rows.append(item)
except:
pass
if rows:
return rows
else:
return False
except:
return False
def continent_info( self, name ):
if name is not None:
data = self.cont_info()
if data:
name = name.upper()
bool = False
for i in data:
if i[1].upper().replace('AUSTRALIA/OCEANIA','OCEANIA') == name:
bool = True
break
if bool:
n_list = list()
for j in range(len(i)):
item = i[j].replace(',','')
item = item.replace('+','')
item.strip()
try:
item = int(item)
except:
item = 'N/A'
n_list.append(item)
j = i
i = n_list
info = {
'name': j[1],
'total cases': i[2],
'new cases': i[3],
'total deaths': i[4],
'new deaths': i[5],
'total recoveries': i[6],
'new recoveries': i[7],
'active cases': i[8],
'critical cases': i[9],
}
return info
else:
return bool
else:
return False
else:
return False
def countries_info_by_continent( self, name ):
if name is not None:
data = self.country_info()
if data:
data = data[1:-8]
name = name.upper()
countries = list()
bool = False
for i in data:
if i[19].upper().replace('AUSTRALIA/OCEANIA','OCEANIA') == name:
bool = True
countries.append(i)
if bool:
n_list = list()
info = list()
for i in countries:
for j in range(len(i)):
item = i[j].replace(',','')
item = item.replace('+','')
item.strip()
try:
item = int(item)
except:
item = 'N/A'
n_list.append(item)
j = i
i = n_list
info.append({
'id': int(j[0].strip()),
'name': j[2],
'total cases': i[4],
'new cases': i[5],
'total deaths': i[6],
'new deaths': i[7],
'total recoveries': i[8],
'new recoveries': i[9],
'active cases': i[10],
'critical cases': i[11],
'total cases/1M pop': i[12],
'deaths/1M pop': i[13],
'total tests/1M pop': i[14],
'tests/1M pop': i[15],
'population': i[17],
'continent': j[19],
'1 case every X ppl': i[20],
'1 death every X ppl': i[21],
'1 test every X ppl': i[22],
})
return info
else:
return bool
else:
return False
else:
return False
class covid:
def __init__( self):
self.url = r'https://raw.githubusercontent.com/Ajay2810-hub/covid19-tracker/master/src/'
def symptoms(self):
r = requests.get(r'{}symptoms.txt'.format(self.url))
if r.status_code == 200:
r = str(r.text)
return r
else:
return False
def preventions(self):
r = requests.get(r'{}preventions.txt'.format(self.url))
if r.status_code == 200:
r = str(r.text)
return r
else:
return False
| StarcoderdataPython |
345408 | from django.db import models
from django.db.models import Q
from rest_framework import serializers
from iaso.api.common import TimestampField
from iaso.models import OrgUnit, OrgUnitType, Group
class TimestampSerializerMixin:
"""This Mixin override the serialization of the DateTime field to timestamp
instead of RST default RFC3339
this is used to stay compatible with older API"""
serializer_field_mapping = serializers.ModelSerializer.serializer_field_mapping.copy()
serializer_field_mapping[models.DateTimeField] = TimestampField
class GroupSerializer(TimestampSerializerMixin, serializers.ModelSerializer):
class Meta:
model = Group
fields = ["id", "name", "source_ref", "source_version", "created_at", "updated_at"]
class OrgUnitTypeSerializer(TimestampSerializerMixin, serializers.ModelSerializer):
class Meta:
model = OrgUnitType
fields = ["id", "name", "short_name", "created_at", "updated_at", "depth"]
# noinspection PyMethodMayBeStatic
class OrgUnitSerializer(TimestampSerializerMixin, serializers.ModelSerializer):
"""Master Serializer for OrgUnit
This allow us to keep the conversion in one place, subclass if you want to serialize
less or more field. See OrgUnitSearchParentSerializer for an example
"""
org_unit_type = OrgUnitTypeSerializer()
groups = GroupSerializer(many=True)
parent_name = serializers.SerializerMethodField()
source = serializers.SerializerMethodField()
org_unit_type_name = serializers.SerializerMethodField()
search_index = serializers.SerializerMethodField()
source_id = serializers.SerializerMethodField()
has_geo_json = serializers.SerializerMethodField()
latitude = serializers.SerializerMethodField()
longitude = serializers.SerializerMethodField()
altitude = serializers.SerializerMethodField()
# If in a subclass this will correctly use the subclass own serializer
parent = serializers.SerializerMethodField()
@classmethod
def get_parent(cls, org_unit):
return cls(org_unit.parent).data if org_unit.parent else None
def get_parent_name(self, org_unit):
return org_unit.parent.name if org_unit.parent else None
def get_source(self, org_unit):
return org_unit.version.data_source.name if org_unit.version else None
def get_org_unit_type_name(self, org_unit):
return org_unit.org_unit_type.name if org_unit.org_unit_type else None
def get_search_index(self, org_unit):
return getattr(org_unit, "search_index", None)
def get_source_id(self, org_unit):
return org_unit.version.data_source.id if org_unit.version else None
def get_has_geo_json(self, org_unit):
return True if org_unit.simplified_geom else False
def get_latitude(self, org_unit):
return org_unit.location.y if org_unit.location else None
def get_longitude(self, org_unit):
return org_unit.location.x if org_unit.location else None
def get_altitude(self, org_unit):
return org_unit.location.z if org_unit.location else None
class Meta:
model = OrgUnit
fields = [
"id",
"name",
"aliases",
"parent_id",
"validation_status",
"parent_name",
"source",
"source_ref",
"sub_source",
"org_unit_type_name",
"parent",
"latitude",
"longitude",
"altitude",
"has_geo_json",
"search_index",
"created_at",
"org_unit_type_id",
]
class OrgUnitSmallSearchSerializer(OrgUnitSerializer):
class Meta:
model = OrgUnit
fields = [
"id",
"name",
"parent_id",
"validation_status",
"parent_name",
"source",
"source_ref",
"org_unit_type_name",
"search_index",
"parent",
]
class OrgUnitSearchParentSerializer(OrgUnitSerializer):
class Meta:
model = OrgUnit
fields = ["id", "name", "parent"]
# noinspection PyMethodMayBeStatic
class OrgUnitSearchSerializer(OrgUnitSerializer):
parent = OrgUnitSearchParentSerializer()
instances_count = serializers.SerializerMethodField()
def get_instances_count(self, org_unit):
# in some case instances_count is prefilled by an annotation
if hasattr(org_unit, "instances_count"):
return org_unit.instances_count
else:
return org_unit.instance_set.filter(~Q(file="") & ~Q(device__test_device=True) & ~Q(deleted=True)).count()
class Meta:
model = OrgUnit
fields = [
"id",
"name",
"aliases",
"parent_id",
"validation_status",
"parent_name",
"source",
"source_ref",
"sub_source",
"org_unit_type_name",
"parent",
"latitude",
"longitude",
"altitude",
"has_geo_json",
"search_index",
"created_at",
"source_id",
"org_unit_type",
"org_unit_type_id",
"instances_count",
"updated_at",
"groups",
]
class OrgUnitTreeSearchSerializer(OrgUnitSerializer):
has_children = serializers.SerializerMethodField()
# probably a way to optimize that
def get_has_children(self, org_unit):
return org_unit.children().exists() if org_unit.path else False
class Meta:
model = OrgUnit
fields = ["id", "name", "parent", "has_children", "validation_status", "org_unit_type_id"]
| StarcoderdataPython |
11243309 | <reponame>kanihal/CS631_pg_semantic_search
from gensim.utils import smart_open, simple_preprocess
from gensim.corpora.wikicorpus import _extract_pages, filter_wiki
from gensim.parsing.preprocessing import STOPWORDS
import gensim
import pandas as pd
import numpy as np
def tokenize(text):
try:
t=[token for token in simple_preprocess(text) if token not in STOPWORDS]
except:
t=[]
return t
df=pd.read_csv("/mnt/a99/d0/jagadeesha/db_ss/articles1.csv")
df2=pd.read_csv("/mnt/a99/d0/jagadeesha/db_ss/articles2.csv")
df3=pd.read_csv("/mnt/a99/d0/jagadeesha/db_ss/articles3.csv")
df["text"]=""
df2["text"]=""
df3["text"]=""
df["text"]=(df["title"]+" "+df["content"]).apply(tokenize)
df2["text"]=(df2["title"]+" "+df2["content"]).apply(tokenize)
df3["text"]=(df3["title"]+" "+df3["content"]).apply(tokenize)
l=[]
for index, row in df.iterrows():
l.append(row['text'])
for index, row in df2.iterrows():
l.append(row['text'])
for index, row in df2.iterrows():
l.append(row['text'])
id2word_news=gensim.corpora.Dictionary(l)
id2word_news.filter_extremes(no_below=10, no_above=0.1)
print(id2word_news)
id2word_news.save("~/news.dict")
l=[]
for index, row in df.iterrows():
l.append(id2word_news.doc2bow(row['text']))
for index, row in df3.iterrows():
l.append(id2word_news.doc2bow(row['text']))
for index, row in df3.iterrows():
l.append(id2word_news.doc2bow(row['text']))
corpus=l
gensim.corpora.MmCorpus.serialize('~/corpus.mm', corpus)
mm_corpus=gensim.corpora.MmCorpus('~/corpus.mm')
print(mm_corpus)
tfidf_model = gensim.models.TfidfModel(mm_corpus, id2word=id2word_news)
lsi_model = gensim.models.LsiModel(tfidf_model[mm_corpus], id2word=id2word_news, num_topics=100)
gensim.corpora.MmCorpus.serialize('~/news_tfidf.mm', tfidf_model[mm_corpus])
gensim.corpora.MmCorpus.serialize('~/news_lsa.mm', lsi_model[tfidf_model[mm_corpus]])
tfidf_corpus = gensim.corpora.MmCorpus('~/news_tfidf.mm')
lsi_corpus = gensim.corpora.MmCorpus('~/news_lsa.mm')
text = "A blood cell, also called a hematocyte, is a cell produced by hematopoiesis and normally found in blood."
bow_vector = id2word_news.doc2bow(tokenize(text))
lsi_vector = lsi_model[tfidf_model[bow_vector]]
print(lsi_vector)
lsi_model.save('~/lsi_news.model')
tfidf_model.save('~/tfidf_news.model')
id2word_news.save('~/news.dictionary')
lsi_model = gensim.models.LsiModel.load('~/lsi_news.model')
txt = "A blood cell, also called a hematocyte, is a cell produced by hematopoiesis and normally found in blood."
words=[token for token in simple_preprocess(txt) if token not in STOPWORDS]
bow = lsi_model.id2word.doc2bow(words)
vec=lsi_model[bow]
print(vec)
| StarcoderdataPython |
11262867 | <reponame>Common-Tool/flare-fakenet-ng
# Diverter for Windows implemented using WinDivert library
import logging
from pydivert.windivert import *
from pydivert.enum import Direction, Defaults
import socket
import os
import dpkt
import time
import threading
import platform
from winutil import *
import subprocess
class Diverter(WinUtilMixin):
def __init__(self, diverter_config, listeners_config, logging_level = logging.INFO):
self.logger = logging.getLogger('Diverter')
self.logger.setLevel(logging_level)
self.diverter_config = diverter_config
self.listeners_config = listeners_config
# Local IP address
self.external_ip = socket.gethostbyname(socket.gethostname())
self.loopback_ip = socket.gethostbyname('localhost')
# Sessions cache
# NOTE: A dictionary of source ports mapped to destination address, port tuples
self.sessions = dict()
#######################################################################
# Listener specific configuration
# NOTE: All of these definitions have protocol as the first key
# followed by a list or another nested dict with the actual definitions
# Diverted ports
self.diverted_ports = dict()
# Listener Port Process filtering
# TODO: Allow PIDs
self.port_process_whitelist = dict()
self.port_process_blacklist = dict()
# Listener Port Host filtering
# TODO: Allow domain name resolution
self.port_host_whitelist = dict()
self.port_host_blacklist = dict()
# Execute command list
self.port_execute = dict()
# Parse listener configurations
self.parse_listeners_config(listeners_config)
#######################################################################
# Diverter settings and filters
# Intercept filter
# NOTE: All relevant connections are recorded as outbound by WinDivert
# so additional filtering based on destination port will need to be
# performed in order to determine the correct traffic direction.
self.filter = None
# Default TCP/UDP listeners
self.default_listener_tcp_port = None
self.default_listener_udp_port = None
# Global TCP/UDP port blacklist
self.blacklist_ports_tcp = []
self.blacklist_ports_udp = []
# Global process blacklist
# TODO: Allow PIDs
self.blacklist_processes = []
# Global host blacklist
# TODO: Allow domain resolution
self.blacklist_hosts = []
# Parse diverter config
self.parse_diverter_config()
#######################################################################
# Network verification
# Check active interfaces
if not self.check_active_ethernet_adapters():
self.logger.warning('WARNING: No active ethernet interfaces detected!')
self.logger.warning(' Please enable a network interface.')
# Check configured gateways
if not self.check_gateways():
self.logger.warning('WARNING: No gateways configured!')
self.logger.warning(' Please configure a default gateway or route in order to intercept external traffic.')
# Check configured DNS servers
if not self.check_dns_servers():
self.logger.warning('WARNING: No DNS servers configured!')
self.logger.warning(' Please configure a DNS server in order to allow network resolution.')
#######################################################################
# Initialize WinDivert
# Locate the WinDivert driver
# NOTE: This is necessary to work in scenarios where the applications is
# executed as a python script, installed as an egg or with the pyinstaller
dll_arch = "64" if platform.machine() == 'AMD64' else "32"
dll_path = os.path.join('lib', dll_arch, 'WinDivert.dll')
if not os.path.exists(dll_path):
dll_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib', dll_arch, 'WinDivert.dll')
if not os.path.exists(dll_path):
self.logger.error('Could not open bundled WinDivert.dll')
sys.exit(1)
# Divert handle
driver = None
driver = WinDivert(dll_path = dll_path)
try:
self.handle = Handle(driver, filter=self.filter)
self.handle.open()
except WindowsError, e:
if e.winerror == 5:
self.logger.error('ERROR: Insufficient privileges to run windows diverter.')
self.logger.error(' Please restart with Administrator privileges.')
sys.exit(1)
elif e.winerror == 3:
self.logger.error('ERROR: Could not locate WinDivert DLL or one of its components.')
self.logger.error(' Please make sure you have copied FakeNet-NG to the C: drive.')
sys.exit(1)
else:
self.logger.error('ERROR: Failed to open a handle to the WinDivert driver: %s', e)
sys.exit(1)
# Capture packets configuration
self.capture_flag = False
self.dump_packets_file_prefix = "packets"
self.pcap = None
if self.diverter_config.get('dumppackets') and self.diverter_config['dumppackets'].lower() == 'yes':
self.capture_flag = True
pcap_filename = "%s_%s.pcap" % (diverter_config.get('dumppacketsfileprefix', 'packets'), time.strftime("%Y%m%d_%H%M%S"))
self.logger.info('Capturing traffic to %s', pcap_filename)
self.pcap = dpkt.pcap.Writer(open(pcap_filename, 'wb'), linktype=dpkt.pcap.DLT_RAW)
###########################################################################
# Parse listener specific settings and filters
def parse_listeners_config(self, listeners_config):
#######################################################################
# Populate diverter ports and process filters from the configuration
for listener_name, listener_config in listeners_config.iteritems():
if 'port' in listener_config:
port = int(listener_config['port'])
if not 'protocol' in listener_config:
self.logger.error('ERROR: Protocol not defined for listener %s', listener_name)
sys.exit(1)
protocol = listener_config['protocol'].upper()
if not protocol in ['TCP', 'UDP']:
self.logger.error('ERROR: Invalid protocol %s for listener %s', protocol, listener_name)
sys.exit(1)
if not protocol in self.diverted_ports:
self.diverted_ports[protocol] = list()
self.diverted_ports[protocol].append(port)
###############################################################
# Process filtering configuration
if 'processwhitelist' in listener_config and 'processblacklist' in listener_config:
self.logger.error('ERROR: Listener can\'t have both process whitelist and blacklist.')
sys.exit(1)
elif 'processwhitelist' in listener_config:
self.logger.debug('Process whitelist:')
if not protocol in self.port_process_whitelist:
self.port_process_whitelist[protocol] = dict()
self.port_process_whitelist[protocol][port] = [process.strip() for process in listener_config['processwhitelist'].split(',')]
for port in self.port_process_whitelist[protocol]:
self.logger.debug(' Port: %d (%s) Processes: %s', port, protocol, ', '.join(self.port_process_whitelist[protocol][port]))
elif 'processblacklist' in listener_config:
self.logger.debug('Process blacklist:')
if not protocol in self.port_process_blacklist:
self.port_process_blacklist[protocol] = dict()
self.port_process_blacklist[protocol][port] = [process.strip() for process in listener_config['processblacklist'].split(',')]
for port in self.port_process_blacklist[protocol]:
self.logger.debug(' Port: %d (%s) Processes: %s', port, protocol, ', '.join(self.port_process_blacklist[protocol][port]))
###############################################################
# Host filtering configuration
if 'hostwhitelist' in listener_config and 'hostblacklist' in listener_config:
self.logger.error('ERROR: Listener can\'t have both host whitelist and blacklist.')
sys.exit(1)
elif 'hostwhitelist' in listener_config:
self.logger.debug('Host whitelist:')
if not protocol in self.port_host_whitelist:
self.port_host_whitelist[protocol] = dict()
self.port_host_whitelist[protocol][port] = [host.strip() for host in listener_config['hostwhitelist'].split(',')]
for port in self.port_host_whitelist[protocol]:
self.logger.debug(' Port: %d (%s) Hosts: %s', port, protocol, ', '.join(self.port_host_whitelist[protocol][port]))
elif 'hostblacklist' in listener_config:
self.logger.debug('Host blacklist:')
if not protocol in self.port_host_blacklist:
self.port_host_blacklist[protocol] = dict()
self.port_host_blacklist[protocol][port] = [host.strip() for host in listener_config['hostblacklist'].split(',')]
for port in self.port_host_blacklist[protocol]:
self.logger.debug(' Port: %d (%s) Hosts: %s', port, protocol, ', '.join(self.port_host_blacklist[protocol][port]))
###############################################################
# Execute command configuration
if 'executecmd' in listener_config:
if not protocol in self.port_execute:
self.port_execute[protocol] = dict()
self.port_execute[protocol][port] = listener_config['executecmd'].strip()
self.logger.debug('Port %d (%s) ExecuteCmd: %s', port, protocol, self.port_execute[protocol][port] )
###########################################################################
# Parse diverter settings and filters
def parse_diverter_config(self):
# Do not redirect blacklisted processes
if self.diverter_config.get('processblacklist') != None:
self.blacklist_processes = [process.strip() for process in self.diverter_config.get('processblacklist').split(',')]
self.logger.debug('Blacklisted processes: %s', ', '.join([str(p) for p in self.blacklist_processes]))
# Do not redirect blacklisted hosts
if self.diverter_config.get('hostblacklist') != None:
self.blacklist_hosts = [host.strip() for host in self.diverter_config.get('hostblacklist').split(',')]
self.logger.debug('Blacklisted hosts: %s', ', '.join([str(p) for p in self.blacklist_hosts]))
# Redirect all traffic
if self.diverter_config.get('redirectalltraffic') and self.diverter_config['redirectalltraffic'].lower() == 'yes':
self.filter = "outbound and ip and (icmp or tcp or udp)"
if self.diverter_config.get('defaulttcplistener') == None:
self.logger.error('ERROR: No default TCP listener specified in the configuration.')
sys.exit(1)
elif self.diverter_config.get('defaultudplistener') == None:
self.logger.error('ERROR: No default UDP listener specified in the configuration.')
sys.exit(1)
elif not self.diverter_config.get('defaulttcplistener') in self.listeners_config:
self.logger.error('ERROR: No configuration exists for default TCP listener %s', self.diverter_config.get('defaulttcplistener'))
sys.exit(1)
elif not self.diverter_config.get('defaultudplistener') in self.listeners_config:
self.logger.error('ERROR: No configuration exists for default UDP listener %s', self.diverter_config.get('defaultudplistener'))
sys.exit(1)
else:
self.default_listener_tcp_port = int( self.listeners_config[ self.diverter_config['defaulttcplistener'] ]['port'] )
self.logger.error('Using default listener %s on port %d', self.diverter_config['defaulttcplistener'], self.default_listener_tcp_port)
self.default_listener_udp_port = int( self.listeners_config[ self.diverter_config['defaultudplistener'] ]['port'] )
self.logger.error('Using default listener %s on port %d', self.diverter_config['defaultudplistener'], self.default_listener_udp_port)
# Do not redirect blacklisted TCP ports
if self.diverter_config.get('blacklistportstcp') != None:
self.blacklist_ports_tcp = [int(port.strip()) for port in self.diverter_config.get('blacklistportstcp').split(',')]
self.logger.debug('Blacklisted TCP ports: %s', ', '.join([str(p) for p in self.blacklist_ports_tcp]))
# Do not redirect blacklisted UDP ports
if self.diverter_config.get('blacklistportsudp') != None:
self.blacklist_ports_udp = [int(port.strip()) for port in self.diverter_config.get('blacklistportsudp').split(',')]
self.logger.debug('Blacklisted UDP ports: %s', ', '.join([str(p) for p in self.blacklist_ports_udp]))
# Redirect only specific traffic, build the filter dynamically
else:
filter_diverted_ports = list()
if self.diverted_ports.get('TCP') != None:
for tcp_port in self.diverted_ports.get('TCP'):
filter_diverted_ports.append("tcp.DstPort == %s" % tcp_port)
filter_diverted_ports.append("tcp.SrcPort == %s" % tcp_port)
if self.diverted_ports.get('UDP') != None:
for udp_port in self.diverted_ports.get('UDP'):
filter_diverted_ports.append("udp.DstPort == %s" % udp_port)
filter_diverted_ports.append("udp.SrcPort == %s" % udp_port)
if len(filter_diverted_ports) > 0:
self.filter = "outbound and ip and (icmp or %s)" % " or ".join(filter_diverted_ports)
else:
self.filter = "outbound and ip"
###########################################################################
# Diverter controller functions
def start(self):
self.logger.info('Starting...')
# Set local DNS server IP address
if self.diverter_config.get('modifylocaldns') and self.diverter_config['modifylocaldns'].lower() == 'yes':
self.set_dns_server(self.loopback_ip)
# Stop DNS service
if self.diverter_config.get('stopdnsservice') and self.diverter_config['stopdnsservice'].lower() == 'yes':
self.stop_service_helper('Dnscache')
self.logger.info('Diverting ports: ')
if self.diverted_ports.get('TCP'): self.logger.info('TCP: %s', ', '.join("%d" % port for port in self.diverted_ports['TCP']))
if self.diverted_ports.get('UDP'): self.logger.info('UDP: %s', ', '.join("%d" % port for port in self.diverted_ports['UDP']))
self.flush_dns()
self.diverter_thread = threading.Thread(target=self.divert_thread)
self.diverter_thread.daemon = True
self.diverter_thread.start()
def divert_thread(self):
try:
while True:
packet = self.handle.receive()
self.handle_packet(packet)
# Handle errors related to shutdown process.
except WindowsError as e:
if e.winerror in [4,6,995]:
return
else:
raise
def stop(self):
self.logger.info('Stopping...')
if self.pcap:
self.pcap.close()
self.handle.close()
# Restore DNS server
if self.diverter_config.get('modifylocaldns') and self.diverter_config['modifylocaldns'].lower() == 'yes':
self.restore_dns_server()
# Restart DNS service
if self.diverter_config.get('stopdnsservice') and self.diverter_config['stopdnsservice'].lower() == 'yes':
self.start_service_helper('Dnscache')
self.flush_dns()
def handle_icmp_packet(self, packet):
# Modify outgoing ICMP packet to target local Windows host which will reply to the ICMP messages.
# HACK: Can't intercept inbound ICMP server, but still works for now.
if not ((packet.meta.is_loopback() and packet.src_addr == self.loopback_ip and packet.dst_addr == self.loopback_ip) or \
(packet.src_addr == self.external_ip and packet.dst_addr == self.external_ip)):
self.logger.info('Modifying %s ICMP packet:', 'loopback' if packet.meta.is_loopback() else 'external')
self.logger.info(' from: %s -> %s', packet.src_addr, packet.dst_addr)
# Direct packet to the right interface IP address to avoid routing issues
packet.dst_addr = self.loopback_ip if packet.meta.is_loopback() else self.external_ip
self.logger.info(' to: %s -> %s', packet.src_addr, packet.dst_addr)
return packet
def handle_tcp_udp_packet(self, packet, protocol, default_listener_port, blacklist_ports):
# Meta strings
interface_string = 'loopback' if packet.meta.is_loopback() else 'external'
direction_string = 'inbound' if packet.meta.is_inbound() else 'outbound'
# Protocol specific filters
diverted_ports = self.diverted_ports.get(protocol)
port_process_whitelist = self.port_process_whitelist.get(protocol)
port_process_blacklist = self.port_process_blacklist.get(protocol)
port_host_whitelist = self.port_host_whitelist.get(protocol)
port_host_blacklist = self.port_host_blacklist.get(protocol)
port_execute = self.port_execute.get(protocol)
if packet.src_port in blacklist_ports or packet.dst_port in blacklist_ports:
self.logger.debug('Forwarding blacklisted port %s %s %s packet:', direction_string, interface_string, protocol)
self.logger.debug(' %s:%d -> %s:%d', packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port)
# Check if a packet must be diverted to a local listener
# Rules:
# 1) Divert outbound packets only
# 2) Make sure we are not diverting response packet based on the source port
# 3) Make sure the destination port is a known diverted port or we have a default listener port defined
elif diverted_ports and (packet.dst_port in diverted_ports or default_listener_port != None) and not packet.src_port in diverted_ports:
# Find which process ID is sending the request
conn_pid = self.get_pid_port_tcp(packet.src_port) if type(packet.headers[1].hdr) == TcpHeader else self.get_pid_port_udp(packet.src_port)
process_name = self.get_process_image_filename(conn_pid) if conn_pid else None
# Check process blacklist
if process_name and process_name in self.blacklist_processes:
self.logger.debug('Ignoring %s %s %s request packet from process %s in the process blacklist.', direction_string, interface_string, protocol, process_name)
self.logger.debug(' %s:%d -> %s:%d', packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port)
# Check host blacklist
if packet.dst_addr in self.blacklist_hosts:
self.logger.debug('Ignoring %s %s %s request packet to %s in the host blacklist.', direction_string, interface_string, protocol, packet.dst_addr)
self.logger.debug(' %s:%d -> %s:%d', packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port)
# Check the port process whitelist
elif process_name and port_process_whitelist and \
((packet.dst_port in port_process_whitelist and not process_name in port_process_whitelist[packet.dst_port]) or\
(default_listener_port and default_listener_port in port_process_whitelist and not process_name in port_process_whitelist[default_listener_port])) :
self.logger.debug('Ignoring %s %s %s request packet from process %s not in the listener process whitelist.', direction_string, interface_string, protocol, process_name)
self.logger.debug(' %s:%d -> %s:%d', packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port)
# Check the port process blacklist
elif process_name and port_process_blacklist and \
((packet.dst_port in port_process_blacklist and process_name in port_process_blacklist[packet.dst_port]) or\
(default_listener_port and default_listener_port in port_process_blacklist and process_name in port_process_blacklist[default_listener_port])) :
self.logger.debug('Ignoring %s %s %s request packet from process %s in the listener process blacklist.', direction_string, interface_string, protocol, process_name)
self.logger.debug(' %s:%d -> %s:%d', packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port)
# Check the port host whitelist
elif packet.dst_addr and port_host_whitelist and \
((packet.dst_port in port_host_whitelist and not packet.dst_addr in port_host_whitelist[packet.dst_port]) or\
(default_listener_port and default_listener_port in port_host_whitelist and not packet.dst_addr in port_host_whitelist[default_listener_port])) :
self.logger.debug('Ignoring %s %s %s request packet to %s not in the listener host whitelist.', direction_string, interface_string, protocol, packet.dst_addr)
self.logger.debug(' %s:%d -> %s:%d', packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port)
# Check the port host blacklist
elif packet.dst_addr and port_host_blacklist and \
((packet.dst_port in port_host_blacklist and packet.dst_addr in port_host_blacklist[packet.dst_port]) or\
(default_listener_port and default_listener_port in port_host_blacklist and packet.dst_addr in port_host_blacklist[default_listener_port])) :
self.logger.debug('Ignoring %s %s %s request packet to %s in the listener host blacklist.', direction_string, interface_string, protocol, packet.dst_addr)
self.logger.debug(' %s:%d -> %s:%d', packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port)
# Make sure you are not intercepting packets from one of the FakeNet listeners
elif conn_pid and os.getpid() == conn_pid:
self.logger.debug('Skipping %s %s %s listener packet:', direction_string, interface_string, protocol)
self.logger.debug(' %s:%d -> %s:%d', packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port)
# Modify the packet
else:
# Adjustable log level output. Used to display info level logs for first packets of the session and
# debug level for the rest of the communication in order to reduce log output.
logger_level = self.logger.debug
# First packet in a new session
if not (packet.src_port in self.sessions and self.sessions[packet.src_port] == (packet.dst_addr, packet.dst_port)):
# Cache original target IP address based on source port
self.sessions[packet.src_port] = (packet.dst_addr, packet.dst_port)
# Override log level to display all information on info level
logger_level = self.logger.info
# Execute command
if conn_pid and port_execute and (packet.dst_port in port_execute or (default_listener_port and default_listener_port in port_execute)):
execute_cmd = port_execute[packet.dst_port if packet.dst_port in diverted_ports else default_listener_port].format(pid = conn_pid,
procname = process_name,
src_addr = packet.src_addr,
src_port = packet.src_port,
dst_addr = packet.dst_addr,
dst_port = packet.dst_port)
logger_level('Executing command: %s', execute_cmd)
self.execute_detached(execute_cmd)
logger_level('Modifying %s %s %s request packet:', direction_string, interface_string, protocol)
logger_level(' from: %s:%d -> %s:%d', packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port)
# Direct packet to the right interface IP address to avoid routing issues
packet.dst_addr = self.loopback_ip if packet.meta.is_loopback() else self.external_ip
# Direct packet to an existing or a default listener
packet.dst_port = packet.dst_port if packet.dst_port in diverted_ports else default_listener_port
logger_level(' to: %s:%d -> %s:%d', packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port)
if conn_pid:
logger_level(' pid: %d name: %s', conn_pid, process_name if process_name else 'Unknown')
# Restore diverted response from a local listener
# NOTE: The response can come from a legitimate request
elif diverted_ports and packet.src_port in diverted_ports:
# Find which process ID is sending the request
conn_pid = self.get_pid_port_tcp(packet.dst_port) if type(packet.headers[1].hdr) == TcpHeader else self.get_pid_port_udp(packet.dst_port)
process_name = self.get_process_image_filename(conn_pid)
if not packet.dst_port in self.sessions:
self.logger.debug('Unknown %s %s %s response packet:', direction_string, interface_string, protocol)
self.logger.debug(' %s:%d -> %s:%d', packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port)
# Restore original target IP address from the cache
else:
self.logger.debug('Modifying %s %s %s response packet:', direction_string, interface_string, protocol)
self.logger.debug(' from: %s:%d -> %s:%d', packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port)
# Restore original target IP address based on destination port
packet.src_addr, packet.src_port = self.sessions[packet.dst_port]
self.logger.debug(' to: %s:%d -> %s:%d', packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port)
if conn_pid:
self.logger.debug(' pid: %d name: %s', conn_pid, process_name if process_name else 'Unknown')
else:
self.logger.debug('Forwarding %s %s %s packet:', direction_string, interface_string, protocol)
self.logger.debug(' %s:%d -> %s:%d', packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port)
return packet
def handle_packet(self, packet):
if packet == None:
self.logger.error('ERROR: Can\'t handle packet.')
return
# Preserve destination address to detect packet being diverted
dst_addr = packet.dst_addr
#######################################################################
# Capture packet and store raw packet in the PCAP
if self.capture_flag:
self.pcap.writepkt(packet._raw_packet)
###########################################################################
# Verify the IP packet has an additional header
if len(packet.headers) > 1 and packet.headers[1] and packet.headers[1].hdr:
#######################################################################
# Handle ICMP Packets
if type(packet.headers[1].hdr) in [IcmpHeader, Icmpv6Header]:
packet = self.handle_icmp_packet(packet)
#######################################################################
# Handle TCP/UDP Packets
elif type(packet.headers[1].hdr) == TcpHeader:
protocol = 'TCP'
packet = self.handle_tcp_udp_packet(packet,
protocol,
self.default_listener_tcp_port,
self.blacklist_ports_tcp)
elif type(packet.headers[1].hdr) == UdpHeader:
protocol = 'UDP'
packet = self.handle_tcp_udp_packet(packet,
protocol,
self.default_listener_udp_port,
self.blacklist_ports_udp)
else:
self.logger.error('ERROR: Unknown packet header type.')
#######################################################################
# Capture modified packet and store raw packet in the PCAP
# NOTE: While this results in potentially duplicate traffic capture, this is necessary
# to properly restore TLS/SSL sessions.
# TODO: Develop logic to record traffic before modification for both requests and
# responses to reduce duplicate captures.
if self.capture_flag and (dst_addr != packet.dst_addr):
self.pcap.writepkt(packet._raw_packet)
#######################################################################
# Attempt to send the processed packet
try:
self.handle.send(packet)
except Exception, e:
protocol = 'Unknown'
if type(packet.headers[1].hdr) == TcpHeader:
protocol = 'TCP'
elif type(packet.headers[1].hdr) == UdpHeader:
protocol = 'UDP'
elif type(packet.headers[1].hdr) in [IcmpHeader, Icmpv6Header]:
protocol = 'ICMP'
interface_string = 'loopback' if packet.meta.is_loopback() else 'external'
direction_string = 'inbound' if packet.meta.is_inbound() else 'outbound'
self.logger.error('ERROR: Failed to send %s %s %s packet', direction_string, interface_string, protocol)
if packet.src_port and packet.dst_port:
self.logger.error(' %s:%d -> %s:%d', packet.src_addr, packet.src_port, packet.dst_addr, packet.dst_port)
else:
self.logger.error(' %s -> %s', packet.src_addr, packet.dst_addr)
self.logger.error(' %s', e)
def main():
self.diverter_config = {'redirectalltraffic': 'no', 'defaultlistener': 'DefaultListener', 'dumppackets': 'no'}
listeners_config = {'DefaultListener': {'port': '1337'}}
diverter = Diverter(diverter_config, listeners_config)
diverter.start()
###########################################################################
# Run processing
import time
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
diverter.stop()
###########################################################################
# Run tests
# TODO
if __name__ == '__main__':
main() | StarcoderdataPython |
5112276 | import pytest
import os
import glob
from subprocess import call
from snips.parser import parse
from snips.ast import Snippet, parse_snippet_body
snippets = 'https://github.com/honza/vim-snippets.git'
@pytest.fixture(scope='module')
def snippets_dir(current_dir):
d = os.path.join(current_dir, 'data', 'vim-snippets')
if not os.path.exists(d):
retcode = call(['git', 'clone', snippets, d])
if retcode != 0:
raise Exception('clone vim-snippets failed')
return os.path.join(d, 'UltiSnips')
def test_parse(snippets_dir):
items = glob.glob(os.path.join(snippets_dir, '*'))
for item in items:
_, ext = os.path.splitext(item)
if ext != '.snippets':
continue
with open(item) as f:
data = f.read()
parse(data, filename=item)
def test_parse_snippet():
body = 'Indent is'
assert parse_snippet_body(body)[0][0].literal == 'Indent is'
body = 'Indent is: `!v indent(".")`.'
assert parse_snippet_body(body)[0][1].literal == '!v indent(".")'
body = r'`!p snip.rv = \`aaa\``'
assert parse_snippet_body(body)[0][1].literal == r'!p snip.rv = `aaa`'
body = r'''def ${1:function}(`!p
if snip.indent:
snip.rv = 'self' + (", " if len(t[2]) else "")`${2:arg1}):
`!p snip.rv = triple_quotes(snip)`${4:TODO: Docstring for $1.}`!p
write_function_docstring(t, snip) `
${5:${VISUAL:pass}}
'''
d = parse_snippet_body(body)[0]
assert d[3].type == 'interp'
assert d[7].type == 'interp'
assert d[11].type == 'interp'
body = 'def ${1:fname}(`!p snip.rv = "self, " if snip.indent else ""`$2):\n\t$0' # noqa
d = parse_snippet_body(body)[0]
assert d[3].literal == '!p snip.rv = "self, " if snip.indent else ""'
| StarcoderdataPython |
3340155 | import io
from CommonServerPython import *
import CortexXDRCloudProviderWidget
import pytest
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
@pytest.mark.parametrize('incident_data, expected_result', [
(util_load_json('test_data/incident_data.json'), {'AWS'}),
(util_load_json('test_data/multi_clouds_incident_data.json'), {'AWS', 'GCP', 'Azure'})
])
def test_cloud_provider(mocker, incident_data, expected_result):
mocker.patch.object(demisto, 'incident', return_value=incident_data)
results = CortexXDRCloudProviderWidget.get_cloud_providers()
assert results == expected_result
def test_cloud_provider_other_provider(mocker):
mocker.patch.object(CortexXDRCloudProviderWidget, 'get_cloud_providers', return_value={'IBM'})
results = CortexXDRCloudProviderWidget.get_cloudprovider_html_result()
assert '000000' in results.get('Contents') # if not GCP, AWS or Azure should be in black
| StarcoderdataPython |
9632875 | <gh_stars>0
"""List of common fit functions."""
import numpy as np
from eddington.exceptions import FitFunctionLoadError
from eddington.fit_function_class import fit_function
@fit_function(
n=2,
syntax="a[0] + a[1] * x",
x_derivative=lambda a, x: np.full(shape=x.shape, fill_value=a[1]),
a_derivative=lambda a, x: np.stack((np.ones(shape=x.shape), x)),
) # pylint: disable=C0103
def linear(a, x):
"""
Simple linear fit function.
:param a: Coefficients array of length 2
:param x: free parameter
:return: float
"""
return a[0] + a[1] * x
@fit_function(
n=1,
syntax="a[0]",
x_derivative=lambda a, x: np.zeros(shape=x.shape),
a_derivative=lambda a, x: np.stack([np.ones(shape=x.shape)]),
) # pylint: disable=C0103
def constant(a, x):
"""
Constant fit function.
:param a: Coefficients array of length 1
:param x: free parameter
:return: float
"""
return np.full(fill_value=a[0], shape=x.shape)
@fit_function(
n=3,
syntax="a[0] + a[1] * x + a[2] * x ^ 2",
x_derivative=lambda a, x: a[1] + 2 * a[2] * x,
a_derivative=lambda a, x: np.stack([np.ones(shape=x.shape), x, x ** 2]),
) # pylint: disable=C0103
def parabolic(a, x):
"""
Parabolic fit function.
:param a: Coefficients array of length 3
:param x: free parameter
:return: float
"""
return a[0] + a[1] * x + a[2] * x ** 2
@fit_function(
n=4,
name="straight_power",
x_derivative=lambda a, x: a[2] * a[0] * (x + a[1]) ** (a[2] - 1),
a_derivative=lambda a, x: np.stack(
[
(x + a[1]) ** a[2],
a[2] * a[0] * (x + a[1]) ** (a[2] - 1),
a[0] * np.log(x + a[1]) * (x + a[1]) ** a[2],
np.ones(shape=x.shape),
]
),
) # pylint: disable=C0103
def straight_power(a, x): # pylint: disable=C0103
"""
Represent fitting of y ~ x^n.
:param a: Coefficients array of length 4
:param x: free parameter
:return: float
"""
return a[0] * (x + a[1]) ** a[2] + a[3]
@fit_function(
n=4,
name="inverse_power",
x_derivative=lambda a, x: -a[2] * a[0] / (x + a[1]) ** (a[2] + 1),
a_derivative=lambda a, x: np.stack(
[
1 / (x + a[1]) ** a[2],
-a[2] * a[0] / (x + a[1]) ** (a[2] + 1),
-a[0] * np.log(x + a[1]) * (x + a[1]) ** a[2],
np.ones(shape=x.shape),
]
),
) # pylint: disable=C0103
def inverse_power(a, x): # pylint: disable=C0103
"""
Represent fitting of y ~ x^(-n).
:param a: Coefficients array of length 4
:param x: free parameter
:return: float
"""
return a[0] / (x + a[1]) ** a[2] + a[3]
def polynom(n): # pylint: disable=C0103
"""
Creates a polynomial fit function with parameters as coefficients.
:param n: Degree of the polynom.
:return: :class:`FitFunction`
"""
n = int(n)
if n <= 0:
raise FitFunctionLoadError(f"n must be positive, got {n}")
if n == 1:
return linear
arange = np.arange(1, n + 1)
syntax = "a[0] + a[1] * x + " + " + ".join(
[f"a[{i}] * x ^ {i}" for i in arange[1:]]
)
@fit_function(
n=n + 1,
name=f"polynom_{n}",
syntax=syntax,
x_derivative=lambda a, x: polynom(n - 1)(arange * a[1:], x),
a_derivative=lambda a, x: np.stack([x ** i for i in range(n + 1)]),
save=False,
) # pylint: disable=C0103
def func(a, x):
return sum([a[i] * x ** i for i in range(n + 1)])
return func
@fit_function(
n=3,
syntax="a[0] / (x + a[1]) + a[2]",
x_derivative=lambda a, x: -a[0] / ((x + a[1]) ** 2),
a_derivative=lambda a, x: np.stack(
[1 / (x + a[1]), -a[0] / ((x + a[1]) ** 2), np.ones(shape=x.shape)]
),
) # pylint: disable=C0103
def hyperbolic(a, x):
"""
Hyperbolic fit function.
:param a: Coefficients array of length 3
:param x: free parameter
:return: float
"""
return a[0] / (x + a[1]) + a[2]
@fit_function(
n=3,
syntax="a[0] * exp(a[1] * x) + a[2]",
x_derivative=lambda a, x: a[0] * a[1] * np.exp(a[1] * x),
a_derivative=lambda a, x: np.stack(
[np.exp(a[1] * x), a[0] * x * np.exp(a[1] * x), np.ones(x.shape)]
),
) # pylint: disable=C0103
def exponential(a, x):
"""
Exponential fit function.
:param a: Coefficients array of length 3
:param x: free parameter
:return: float
"""
return a[0] * np.exp(a[1] * x) + a[2]
@fit_function(
n=4,
syntax="a[0] * cos(a[1] * x + a[2]) + a[3]",
x_derivative=lambda a, x: -a[0] * a[1] * np.sin(a[1] * x + a[2]),
a_derivative=lambda a, x: np.stack(
[
np.cos(a[1] * x + a[2]),
-a[0] * x * np.sin(a[1] * x + a[2]),
-a[0] * np.sin(a[1] * x + a[2]),
np.ones(shape=x.shape),
]
),
) # pylint: disable=C0103
def cos(a, x):
"""
Cosines fit function.
:param a: Coefficients array of length 4
:param x: free parameter
:return: float
"""
return a[0] * np.cos(a[1] * x + a[2]) + a[3]
@fit_function(
n=4,
syntax="a[0] * sin(a[1] * x + a[2]) + a[3]",
x_derivative=lambda a, x: a[0] * a[1] * np.cos(a[1] * x + a[2]),
a_derivative=lambda a, x: np.stack(
[
np.sin(a[1] * x + a[2]),
a[0] * x * np.cos(a[1] * x + a[2]),
a[0] * np.cos(a[1] * x + a[2]),
np.ones(shape=x.shape),
]
),
) # pylint: disable=C0103
def sin(a, x):
"""
Sine fit function.
:param a: Coefficients array of length 4
:param x: free parameter
:return: float
"""
return a[0] * np.sin(a[1] * x + a[2]) + a[3]
| StarcoderdataPython |
3526709 | <reponame>ithaaswin/TeachersPetBot<gh_stars>0
import sqlite3
from sqlite3 import Error
import os
CON = None
def connect():
''' connect program to database file db.sqlite '''
global CON
db_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'db.sqlite')
print(db_path)
try:
CON = sqlite3.connect(db_path)
print("Connection to SQLite DB successful")
except Error as err:
print(f"The error '{err}' occurred when trying to connect to SQLite database")
def select_query(sql, args=()):
''' select query to return items from database '''
cur = CON.cursor()
return cur.execute(sql, args)
def mutation_query(sql, args=()):
''' do a mutation on the database '''
cur = CON.cursor()
cur.execute(sql, args)
CON.commit()
| StarcoderdataPython |
6542789 | <filename>DbUtil.py
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import mysql.connector
from mysql.connector import errorcode
def close_db(cursor, cnx):
cursor.close()
cnx.close()
def open_db():
config = {
'user': 'root',
'password': '<PASSWORD>',
'host': '127.0.0.1',
'database': 'pythontest',
'raise_on_warnings': True
}
try:
return mysql.connector.connect(**config)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
| StarcoderdataPython |
4926524 | <gh_stars>10-100
#!/usr/bin/env python
# encoding: utf-8
import mock
from unittest import TestCase
from ycyc.frameworks.events import base
class TestEvent(TestCase):
def test_event(self):
event = base.Event()
mock_callback1 = event.register(mock.MagicMock(side_effect=ValueError))
mock_callback2 = event.register(mock.MagicMock(return_value=0))
with self.assertRaises(ValueError):
event.notify(self)
self.assertEqual(mock_callback1.call_count, 1)
self.assertEqual(mock_callback2.call_count, 0)
result = event.notify_all(self)
self.assertEqual(mock_callback1.call_count, 2)
self.assertEqual(mock_callback2.call_count, 1)
self.assertIsNone(result[0].result)
self.assertIsInstance(result[0].exception, ValueError)
self.assertIs(result[0].callback, mock_callback1)
self.assertEqual(result[1].result, 0)
self.assertIsNone(result[1].exception)
self.assertIs(result[1].callback, mock_callback2)
event.unregister(mock_callback1)
event.notify(self)
self.assertEqual(mock_callback1.call_count, 2)
self.assertEqual(mock_callback2.call_count, 2)
with self.assertRaises(base.ListenerNoExistedError):
event.unregister(mock_callback1)
with self.assertRaises(base.ListenerDuplicatedError):
event.register(mock_callback2)
| StarcoderdataPython |
48160 | <gh_stars>1-10
import json
from temapi.commons.paths import OUTPUTS_DIR
class Loader:
file = None
def __init__(self):
assert self.file is not None
_file = OUTPUTS_DIR / self.file
with _file.open() as f:
data = json.load(f)
self.setup(data)
def setup(self, data):
pass
| StarcoderdataPython |
4805748 | """
Problem Statement
We are given an array containing ‘n’ objects. Each object, when created, was assigned a unique number from 1 to ‘n’
based on their creation sequence. This means that the object with sequence number ‘3’ was created just before the
object with sequence number ‘4’.
Write a function to sort the objects in-place on their creation sequence number in O(n)O(n) and without any extra space.
For simplicity, let’s assume we are passed an integer array containing only the sequence numbers, though each number
is actually an object.
Example 1:
Input: [3, 1, 5, 4, 2]
Output: [1, 2, 3, 4, 5]
Example 2:
Input: [2, 6, 4, 3, 1, 5]
Output: [1, 2, 3, 4, 5, 6]
"""
sorted() | StarcoderdataPython |
1940542 | <gh_stars>0
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Ground truth values for `german_credit_numeric_with_test_sparse_logistic_regression`."""
import numpy as np
PARAMS_MEAN: np.ndarray = np.array([
-1.1838851267416666,
1.0059914494179805,
-1.0380936008221013,
0.42841185011358596,
-0.9674014534941406,
-0.6549982166162993,
-0.2107040125304899,
-0.07739429755818186,
0.7565319305257808,
-0.28419896987171034,
-0.742617803871896,
0.46553108544053223,
-0.0015668160573959259,
-0.6223828083288044,
-0.8336673737094527,
0.7953585919647085,
-0.8658881317229088,
0.6010881604881895,
0.7516534603918409,
0.21155297753118213,
-0.36974589585691925,
-0.10978624983135435,
-0.0007779317310475013,
0.0355824067624817,
-1.3336557175080002,
2.4439010026026664,
1.674765021480318,
1.7932340821347843,
0.6136909648053737,
1.51467537955159,
0.8161300923770367,
0.3904371941611651,
0.36680198109322104,
1.0088795651500453,
0.4515115026180969,
0.9280353201286664,
0.6029283115574513,
0.3468666388809409,
0.7783922276226264,
1.182414764875162,
1.0399347275051336,
1.241251855731462,
0.8263362337619607,
1.0223875563837959,
0.44737122679785035,
0.5440297031154755,
0.3647276043157606,
0.37259531531480305,
0.36676433072893144,
3.2286390193640004,
0.36594427819919995,
]).reshape((51,))
PARAMS_MEAN_STANDARD_ERROR: np.ndarray = np.array([
0.0005981660464235878,
0.0006135876540999674,
0.0006132548865209968,
0.000803377297419766,
0.0006049782223892016,
0.000771292967698475,
0.0007774783903198308,
0.0007659490351722464,
0.0008049962237919129,
0.0007893216669665765,
0.0007721116046629342,
0.0008030001846045975,
0.0007530940897997456,
0.0007837046272220854,
0.0007537488478658359,
0.0007551929529850969,
0.0007275810106867986,
0.0009107156036158675,
0.0008380421358340607,
0.0008027590248072173,
0.000846842591322889,
0.0007770780743274671,
0.0007431850802852932,
0.000757876544618884,
0.0006592016543744323,
0.0016674727697632262,
0.0013373333345408193,
0.0014172343648693945,
0.0009208409611009669,
0.0012925597320408856,
0.0010240743950809543,
0.000665833848910506,
0.0005727478123486804,
0.0011809240640211491,
0.0006774109626239607,
0.0011085220063862288,
0.0008927776683871067,
0.0005607264239990413,
0.0010691700039338601,
0.0012393408295048253,
0.0012373207240113956,
0.0012589868491428302,
0.001250630244548729,
0.0012793885048060923,
0.0006897384909408552,
0.0007810885576205727,
0.0005958773232696626,
0.0005699488539133139,
0.0005909674993432928,
0.0022468031264735837,
0.0002711192882221147,
]).reshape((51,))
PARAMS_STANDARD_DEVIATION: np.ndarray = np.array([
0.5692117195860952,
0.5677421529175252,
0.562277138614212,
0.7892247581272893,
0.5660226933295356,
0.691689221495052,
0.8071941992835272,
0.823920491277778,
0.6567992672766995,
0.8035247310142861,
0.6409207318443784,
0.7669103963270938,
0.8209748967688372,
0.7079062958936378,
0.6293368138798237,
0.6233261155952887,
0.6068794880781312,
0.7424100127193938,
0.6681241435870447,
0.8244839092591019,
0.7982633705938547,
0.8186456335220941,
0.8280317028703763,
0.8250445765335408,
0.5765291148513438,
1.5065700228426764,
1.2708158068948712,
1.2953232321587766,
0.8677959268703033,
1.209475774208417,
0.959992910035797,
0.6680939493819217,
0.6426845158867424,
1.0589135045514155,
0.7245789915824581,
0.993230463704737,
0.8432012939457578,
0.6203093523335816,
0.943026246915948,
1.1330109879905408,
1.0468797941904935,
1.14060323931888,
1.0014950042286426,
1.0722476046007519,
0.7213365756284413,
0.8029465429365654,
0.6419600178748756,
0.6487860091686608,
0.6424489834913355,
1.7568431706964294,
0.1579495547414705,
]).reshape((51,))
PER_EXAMPLE_TEST_NLL_MEAN: np.ndarray = np.array([
0.8932802985806665,
0.8805982137320001,
0.07281155224717999,
0.9696056991333333,
1.6555208475533334,
0.23406506287579995,
0.11815078074993335,
0.04570794983766,
0.6878347782614667,
0.4600854188124,
0.929148332632,
0.11039605532619998,
0.5900256371900001,
0.05600385689016,
0.11208472671766664,
0.2830690590789334,
0.040564828436726,
1.0367364725576667,
0.26402510842353333,
0.13792483890593332,
0.722906202488,
0.11671178047146669,
0.9805809884439334,
1.6982768760573337,
0.6280893774479999,
0.1121140445752,
1.0448789837586667,
0.49211185157,
0.21612400629286665,
0.5936031824406667,
1.1891306144279998,
0.21368964659513331,
0.10598263353479997,
0.35968327909579995,
0.05922757560342,
0.21453071531793336,
0.8167858978053333,
0.9226470367146667,
1.6739274616213333,
0.2059964318222,
0.061943855831933334,
1.4196761859882667,
0.27310091894846666,
1.9685428938179999,
0.7486841133025333,
0.32431026917820005,
0.15231322856,
0.2810245611262667,
0.2262218080458,
0.2320797038295333,
0.24237465992033336,
0.147699391710502,
0.03580297003770001,
0.04784709383689333,
2.257388003131333,
0.2823087150972,
0.15069805655253332,
0.4023085103093334,
0.24900819939513336,
0.03616694009950334,
0.2061633290075333,
0.11161810852027332,
1.1207777654920001,
1.545352750278,
0.18583755919926664,
0.8644224076466667,
0.18665400533333334,
0.15555939496553334,
0.13014888101053335,
0.03664557280752666,
0.6494967742393334,
0.2941871563751334,
0.2889536361571333,
0.24962913162800002,
0.49207019825173326,
1.1510740997213333,
0.8329377725120001,
0.19200160236973335,
0.14537038947793332,
0.11181803228066664,
0.3127595554272,
0.03239767553237133,
0.30491776777580004,
0.8088852840606666,
0.7558027646053334,
0.057796918749126666,
0.5764735361079999,
0.2501331716055334,
1.7948774935226666,
0.6947927857393333,
0.8983110609686668,
1.567359699042,
1.4142068950033333,
0.730860043442,
1.7710474526253335,
0.6017339474286667,
0.749811021938,
0.1118560518586,
0.5682557513679999,
0.38729974644399995,
0.2816426459196,
0.05467074141433333,
0.7718351100593333,
0.1667896605224,
0.3119883308179999,
0.13536394779188668,
0.9178761668906663,
0.44131320456519996,
0.06658872596986667,
1.2863488526326665,
0.04631893589452667,
0.19453692593639998,
0.5028246541526668,
0.028845926386493324,
0.46592164511173334,
0.5076860763915334,
0.8270957236898001,
0.07363693203990666,
1.4723151542153332,
0.12480194739273334,
1.4880357725780666,
0.01962631420628133,
0.5645879731626666,
0.06788400987932666,
0.7549323627757332,
0.12088377891893336,
0.3967343325802667,
0.3011679270792667,
1.8095277259,
0.23377042774520002,
0.09137190347906665,
0.6514547235018666,
1.5451398924420001,
1.2845736072493998,
0.4688204046366667,
0.46958583356479994,
0.13690902913453334,
0.60335870604,
0.3253230410302,
0.625201563154,
1.9678167017879997,
0.027051042997486667,
0.34268634162173334,
0.2876715895887333,
0.19654652381566667,
0.3368272875571333,
0.20043682701610868,
0.02639687867358,
0.07038683047575334,
1.0039479645593332,
0.11704837096753336,
0.038846639525936,
1.2944749141606668,
0.200117590195,
0.5280947220835334,
0.10819592967340667,
0.08957297113539332,
0.19915713105326666,
0.2508756714848,
0.6654825840224666,
0.18839285406499998,
0.26009153379333333,
0.7107080698952,
0.06831037274867333,
0.22442168287003997,
0.16220565458480002,
0.3574284081358,
0.27277224430086666,
2.1643369750226666,
1.5271146576286667,
3.39686751388,
0.09534177339573333,
0.7024778671526667,
0.2742506722181334,
0.1242365225118,
0.29555073333079995,
1.1505656520653331,
0.1303518809298933,
0.5711314973851334,
0.5650289145466668,
0.7564385054113333,
0.04193295254096667,
0.5362276295793333,
0.12413039833002666,
0.596463325482,
2.6180225795533336,
1.300850718916,
0.125580339638,
2.1985649611913334,
0.11614611674899997,
0.4298701917603333,
0.1617380555714,
0.8716859400560001,
0.5417966828789333,
1.0740642201193336,
0.09689275426793334,
0.8125607004706668,
0.18467196175659334,
0.06233246876997334,
0.055702146692313326,
0.23313085222080004,
1.0521178133963331,
0.27470644321119997,
0.6188661788486667,
0.04665166994681666,
0.6237205510606667,
0.3962529994002667,
0.696233716036,
0.10006141391909332,
0.15355236612273332,
0.2295005324601333,
1.9570544718693335,
0.5270949862324666,
2.15984109137,
1.3971375662206666,
0.060433736697293336,
0.951622933616,
0.5389614065448,
0.2442092646370667,
0.2953145316329334,
0.38661033270833334,
0.22969068909986662,
0.7347364076233334,
0.28590243673646665,
0.5616320499584,
0.16576861787206665,
0.18062649420206664,
0.22918376737266666,
0.3864906409583333,
0.23430458785193334,
0.15875909530953333,
0.6971458376559333,
0.09057607382153332,
0.08572732540012667,
0.5429912861110667,
1.3043550583725334,
0.20225930718653334,
1.1258259671913335,
0.47547891463806663,
0.4234302535124,
0.18131325537266665,
0.1891564493292,
0.14857409147016,
0.1719800655262,
0.05115110293182,
0.23237854730566668,
0.15236638079620002,
0.02799980400684,
1.254673685934667,
0.040121037181924,
]).reshape((250,))
PER_EXAMPLE_TEST_NLL_MEAN_STANDARD_ERROR: np.ndarray = np.array([
0.00032153712196378206,
0.00023987491920897367,
2.9056972772856642e-05,
0.00021166578233862928,
0.00043286735427594677,
9.474655265972705e-05,
3.6769971555736415e-05,
2.22790724281724e-05,
0.00034757760926201157,
0.0001459733449800984,
0.00027568474103784854,
3.9717712692023445e-05,
0.00017102139654367904,
2.8998965590756203e-05,
3.534369220060247e-05,
0.00010969792072407062,
4.9465862687622044e-05,
0.000497900287430413,
0.00012016042562926001,
3.556406055866058e-05,
0.0002100447023359982,
3.369774407451265e-05,
0.00041235201732991945,
0.00045150835143756247,
0.00015957255930384458,
4.291421452093546e-05,
0.00031217327310758576,
0.0001340016008333503,
8.040957621534812e-05,
0.00018643402374624683,
0.00026816261372477376,
9.136803189307127e-05,
4.700313966486291e-05,
0.00012949625147407127,
2.8826699808503546e-05,
0.00013252988171583857,
0.00025625911646822076,
0.0002850216055911534,
0.00041410518418894617,
9.184207067944421e-05,
3.03933388399952e-05,
0.0005554639477388119,
0.00016596127561907965,
0.0006557803120425634,
0.0002654605779504038,
0.00015384544699978154,
7.419344722973179e-05,
6.998155213847191e-05,
0.00010415274709450757,
0.00010972086803592591,
0.00018098690159083975,
0.0001578116214151066,
2.0750929712384972e-05,
2.1151313324986292e-05,
0.00042567280979356476,
0.0001081758655083595,
4.916957476257293e-05,
0.00013003969123315395,
8.584358742796228e-05,
4.975657824560633e-05,
9.530194046452203e-05,
8.151421958960496e-05,
0.0002288372526354027,
0.0002384287219728366,
0.00010953419199016353,
0.00032105858409531746,
8.742810825880636e-05,
5.039293622766902e-05,
5.390853654712829e-05,
2.3442820198379243e-05,
0.00012335258128747694,
0.0001523505347439892,
0.0001759768093202779,
0.00010161254101374626,
0.00022097449441655658,
0.00020583926408299132,
0.00015585091573371853,
7.693969182352437e-05,
4.6398276418383935e-05,
5.314551551227712e-05,
0.00012298472668576045,
2.2133134265128136e-05,
0.00011886588280429521,
0.00015598073357127003,
0.00017081863058799953,
3.064514213766753e-05,
0.00020484644532236276,
7.584550162732319e-05,
0.0003990340395200468,
0.0002052111534213109,
0.0002778123409407707,
0.0003901784836145747,
0.0003551264268598599,
0.00023809726726717093,
0.00039583365414470705,
0.000175770043894884,
0.0002469729935741937,
3.4068457267907234e-05,
0.0001857752537767532,
0.00023276911935070368,
9.676012056608388e-05,
2.4574360954064275e-05,
0.00024108618293458304,
5.360904767100146e-05,
0.00011168609490095625,
8.839929397645976e-05,
0.0003252014286279444,
0.0003469966836000381,
2.062502993905412e-05,
0.0004844893081670843,
2.3621884246583598e-05,
0.00010410044486011985,
0.00011066478748694188,
1.9195262126370067e-05,
0.00017961628230170025,
0.00021736222737213,
0.0004032331462548495,
4.290553529876965e-05,
0.000408269282913353,
5.240574151915042e-05,
0.0007801616684809686,
1.3000696714067651e-05,
0.0001769712836205073,
3.948675865930506e-05,
0.0002924712429698084,
4.994857839539926e-05,
0.00017969868888541312,
9.443455997663064e-05,
0.0003629153109308881,
0.00010333089822221198,
3.9827197498076646e-05,
0.00025940704021357625,
0.00032417917681847253,
0.0005493148184189602,
0.0001224682108357506,
0.0001450591539849271,
5.033920443961946e-05,
0.00018814297962652638,
0.00011490494407704568,
0.00023046371713432656,
0.0003918038855148469,
1.3416454041465919e-05,
0.00011616689700913684,
8.678529436974346e-05,
6.0908987028661074e-05,
7.572097416087637e-05,
0.00023830793709131456,
1.1944204971171449e-05,
4.3503911130069016e-05,
0.00017726403868561054,
3.579977892831471e-05,
3.4551969721394e-05,
0.0003460133138490385,
4.8068410377227915e-05,
0.00023373050862508218,
5.4372061147928335e-05,
9.144495759228718e-05,
0.00010997006691395534,
7.909131135256938e-05,
0.0002700860243563609,
0.00011594640500683878,
0.00011044213886367434,
0.0003295419703947446,
2.9710761417678753e-05,
0.0001466843454372131,
6.741972049143248e-05,
0.0001128600791970681,
8.472937726513761e-05,
0.0007336297460907198,
0.000544958686805171,
0.0005650413777817236,
2.6236226766601777e-05,
0.0001412604475692239,
0.00011517438650125967,
4.127009696264485e-05,
0.00011654236679867573,
0.0006248258703969294,
9.077588227080383e-05,
0.000304977940964981,
0.0002477401379619092,
0.0002285837385799114,
1.8422580299896414e-05,
0.0001529871110784206,
7.210337453730762e-05,
0.00017938448740229278,
0.0006070594823202606,
0.00037804892480219947,
4.3479931953811176e-05,
0.00047759954736740476,
4.0973389923993534e-05,
0.00015223423059360924,
6.814082902388355e-05,
0.00020674150810314311,
0.0001852605573179122,
0.00025594312192626033,
3.814489428680866e-05,
0.00031417815278787624,
0.00012189063454042425,
2.3606224017910556e-05,
3.468171843799626e-05,
0.0001230462495298638,
0.0003943005087404004,
9.28050680822266e-05,
0.00016763756824874365,
6.257037365733747e-05,
0.00022115186920846147,
0.00015781584715620332,
0.00012721766852535148,
4.8587934947068584e-05,
6.606508615603971e-05,
9.318169782027268e-05,
0.00036506615387339644,
0.00018105688558136954,
0.00040738379428702215,
0.00026417956174610224,
2.5832121679318452e-05,
0.0002525800553239043,
0.00022304003698103006,
0.00011417532664578275,
0.00013378995878979302,
0.00016986083408308326,
9.107894958869919e-05,
0.00016782775124419373,
0.00010888453166183272,
0.00022322614261667225,
8.120066736622856e-05,
8.17154685210108e-05,
0.00022439578193871545,
0.0002324824393590016,
0.00015101111591184555,
4.9074503238359104e-05,
0.00040182055800629004,
3.066480963829134e-05,
5.632657901197814e-05,
0.00021451828428282717,
0.000597946126757927,
7.668893390682197e-05,
0.00023854946620907374,
0.00020898795823861847,
0.00013923696647332034,
5.0201128326308424e-05,
8.396976091974454e-05,
0.00010108566712236049,
4.77951740702592e-05,
2.7079409244519088e-05,
7.186940369354517e-05,
6.054115596412238e-05,
2.0033849466202603e-05,
0.0003309812625048176,
5.486107033374318e-05,
]).reshape((250,))
PER_EXAMPLE_TEST_NLL_STANDARD_DEVIATION: np.ndarray = np.array([
0.26870590581007175,
0.2217099414120239,
0.028994763913104814,
0.1999899644680198,
0.3486194966246353,
0.0882674294673305,
0.035806440122558826,
0.019782012312970602,
0.2732964334600797,
0.12866777854504527,
0.24723123198875507,
0.041463456203576,
0.15440953761741166,
0.025449356696734517,
0.034439140897084435,
0.09568109419435025,
0.03228638118348737,
0.35493375591640897,
0.1090449013422922,
0.0331399801182846,
0.17481850313469502,
0.033505632228384555,
0.3506596768938177,
0.3377623166327418,
0.15027609441319847,
0.03688291973145708,
0.27472770386128964,
0.11535162069644271,
0.07188915885513883,
0.1686171722435377,
0.23594136851152725,
0.07516589740138449,
0.04112877709986131,
0.11747513623886345,
0.02511997886408359,
0.12140725090542837,
0.21932672290437974,
0.25251746560936983,
0.3216304432907333,
0.08404016877963255,
0.027289303460650775,
0.535515639181839,
0.11159969658621757,
0.5405717749540634,
0.2414805556820765,
0.12175910203198075,
0.06643262574499945,
0.06524762991037394,
0.09590604257529335,
0.09429595046207012,
0.1370962910819887,
0.10705263571600035,
0.017968146290713034,
0.01858973640086307,
0.3842366559941347,
0.09806633942876164,
0.05044543429564461,
0.11419149056518363,
0.08334854276850098,
0.030221760976944068,
0.08618651095243166,
0.0595762654316076,
0.20194423951961055,
0.24208162475303469,
0.09099225574121131,
0.2529255976015715,
0.0709658932540574,
0.05173611684280135,
0.04410837802766944,
0.01891668973541581,
0.1212535657741028,
0.1470547242195373,
0.12236960123323515,
0.0852063318377774,
0.1857070663477703,
0.1767921623226157,
0.1418863499876215,
0.06823285600225194,
0.04660994800992922,
0.04242989951961966,
0.1048066515085139,
0.017025869574626006,
0.09986619247806754,
0.15582010911993338,
0.16103410254059386,
0.024981096320957873,
0.17457996452373445,
0.07450800259037783,
0.3363511543192469,
0.16283688404583618,
0.21723566535385436,
0.35484835035475865,
0.29290743148381077,
0.2103343106088003,
0.34103254397135807,
0.1498201003933424,
0.22158985455393937,
0.032715121609988906,
0.1514649345744654,
0.18592524953898643,
0.09209051453181301,
0.022870626159565578,
0.20515713341635747,
0.05184542906546079,
0.09861245662163572,
0.06539251351574366,
0.29488988060939747,
0.23889518197867815,
0.020453018375294556,
0.3932176084063769,
0.019882295343682306,
0.0976949509966712,
0.10787145704382368,
0.016009909734227064,
0.15101422072258966,
0.17379105603704603,
0.274322218904628,
0.03657354188965388,
0.3350592088787028,
0.044291069054555945,
0.6572681233124966,
0.010627006744534219,
0.16112830431509956,
0.03128091864501645,
0.27903396882623577,
0.04338389564722415,
0.15860918464805612,
0.08544691416417534,
0.3070331829855171,
0.09395302698072064,
0.03592085410987787,
0.22288929932687246,
0.30818302277207194,
0.4164513877514838,
0.11515218877628915,
0.13629912010979156,
0.04138763303783175,
0.1622715987615615,
0.09898923505587526,
0.19529996262513385,
0.360578775645853,
0.012002038711402319,
0.09054402586263995,
0.0845145451699156,
0.05617260274414722,
0.07179360123488963,
0.15020935895248905,
0.011088884595306908,
0.03322367607762828,
0.16135880197974697,
0.03420337477427561,
0.023891949710504363,
0.28182998981590146,
0.05031576483060035,
0.20965439723235607,
0.04860488081184116,
0.06964023965267858,
0.08809483159464274,
0.07403454057109365,
0.2288814286120875,
0.10154106758703926,
0.10057474819084714,
0.32266699275810556,
0.027446473561892597,
0.09962166190881608,
0.058195843212894616,
0.10919409384773555,
0.07542358601626517,
0.6204737800635651,
0.46789373027072073,
0.4895211627903162,
0.02675587932238126,
0.12949666498288093,
0.10028249755305937,
0.03595281205921115,
0.10850017647207592,
0.44771603671335913,
0.07916454904961388,
0.262412763946681,
0.1904060700247172,
0.18859680317143404,
0.015798223449583235,
0.13310467279868995,
0.061668809425952945,
0.14916093397708888,
0.5332644900134329,
0.3246135500062896,
0.0399701338038215,
0.3864178405040727,
0.03766581823967911,
0.1402896874819159,
0.05823300248888654,
0.1939218158250446,
0.1651622843555703,
0.22700036941296883,
0.033475707164925976,
0.26124785735974376,
0.08500662266629593,
0.022315758852807496,
0.030563817640353312,
0.09903661281843554,
0.3740792631036485,
0.07651404131019925,
0.15913123239034582,
0.0422504320638421,
0.1764359381351995,
0.13499747955947156,
0.13360075866437296,
0.04416903629767875,
0.057756627190915136,
0.08187299041338668,
0.3147691284101902,
0.1600162423680936,
0.3589557338592094,
0.24172633110227984,
0.023036657474523194,
0.2353486037790884,
0.18239232150673318,
0.09429001991135842,
0.10281453124009195,
0.13415168173493047,
0.07406423617878902,
0.16603692213954888,
0.09304832878807569,
0.2017087450397049,
0.06763377267666501,
0.07215818427800608,
0.15753018382355996,
0.2161197246962323,
0.1406374931676345,
0.04988528973046572,
0.2938798989221163,
0.029855304358490837,
0.0417605777397679,
0.191639232903419,
0.5407719433901764,
0.06716596659602878,
0.20782344642986664,
0.18486552924330144,
0.11970451222145784,
0.054787703291929536,
0.07470159174648763,
0.06990358142403602,
0.04649309164118321,
0.024435158132194035,
0.06619042731876243,
0.05140282178085058,
0.015066065924307795,
0.3015167642555913,
0.03288881522272262,
]).reshape((250,))
TEST_NLL_MEAN: np.ndarray = np.array([
132.86829305266664,
]).reshape(())
TEST_NLL_MEAN_STANDARD_ERROR: np.ndarray = np.array([
0.00315842068751233,
]).reshape(())
TEST_NLL_STANDARD_DEVIATION: np.ndarray = np.array([
2.9687648183742548,
]).reshape(())
| StarcoderdataPython |
6584956 | <gh_stars>1-10
"""
Pylibui test suite.
"""
from pylibui.controls import ProgressBar
from tests.utils import WindowTestCase
class ProgressBarTest(WindowTestCase):
def setUp(self):
super().setUp()
self.progressbar = ProgressBar()
def test_value_initial_value(self):
"""Tests the progressbar's `value` initial value is zero."""
self.assertEqual(self.progressbar.value, 0)
def test_value_can_be_changed(self):
"""Tests the progressbar's `value` attribute can be changed."""
value = 30
self.progressbar.value = value
self.assertEqual(self.progressbar.value, value)
# TODO: should we check for variable type to avoid app crashes ?
# NOTE: weirdly enough, the sliders don't crash like this; this may
# be a bug in libui.
# with self.assertRaises(ValueError):
# self.progressbar.set_value('hello')
| StarcoderdataPython |
11210462 | <gh_stars>0
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem.porter import *
import matplotlib.pyplot as plt
from sklearn.metrics import *
import time
import sys
# Go to the last two lines of this program to have an idea from start (bottom-up functional approach)
# reads the two files as command line argument
# Example: LFDassignment2.py <trainset> <testset>
def read_files():
with open(sys.argv[1], 'r', encoding='utf-8') as train:
trainData = train.readlines() # copy the content of the file in a list
with open(sys.argv[2], 'r', encoding='utf-8') as test:
testData = test.readlines()
return trainData, testData
# we are using NLTK stemmer to stem multiple words into root
def apply_stemmer(doc):
stemmer = PorterStemmer()
roots = [stemmer.stem(plural) for plural in doc]
return roots
# Tokenize and Append the text in documents array.
# Append one of the first two tokens (either sentiment type (true)/topics type (false)) in labels array depending on use_sentiment.
def modify_corpus(data, use_sentiment):
documents = []
labels = []
for line in data:
tokens = line.strip().split() # tokenize the lines
documents.append(tokens[3:]) # append the text - starts from 4th tokens
if use_sentiment:
# 2-class problem: positive vs negative
labels.append(tokens[1]) # tokens[1] is sentiment type (either pos/neg)
else:
# 6-class problem: books, camera, dvd, health, music, software
labels.append(tokens[0]) # tokens[0] is one of 6 topic types
stemmed_documents = []
for doc in documents:
stemmed_documents.append(apply_stemmer(doc))
return stemmed_documents, labels
# Show Distribution of Data
def distribution(trainClass, testClass):
labels = ["books", "camera", "dvd", "health", "music", "software"]
count_training = [0, 0, 0, 0, 0, 0]
count_testing = [0, 0, 0, 0, 0, 0]
i = 0
for label in labels:
for cls in trainClass:
if cls == label:
count_training[i] += 1
i += 1
i = 0
for label in labels:
for cls in testClass:
if cls == label:
count_testing[i] += 1
i += 1
print("Distribution of classes in Training Set:")
print(labels)
print(count_training)
print("\nDistribution of classes in Testing Set:")
print(labels)
print(count_testing)
# a dummy function that just returns its input
def identity(x):
return x
# Using NLTK lemmatizer
class LemmaTokenizer(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, doc):
return [self.wnl.lemmatize(t) for t in word_tokenize(doc)]
# decide on TF-IDF vectorization for feature
# based on the value of tfidf (True/False)
def tf_idf_func(tfidf):
# let's use the
# we use a dummy function as tokenizer and preprocessor,
# since the texts are already preprocessed and tokenized.
if tfidf:
vec = TfidfVectorizer(stop_words='english', preprocessor = identity, tokenizer = identity)
else:
vec = CountVectorizer(stop_words='english', preprocessor = identity, tokenizer = identity)
# using lemmatizer doesn't improve performance
# if tfidf:
# vec = TfidfVectorizer(analyzer=identity, stop_words='english',
# preprocessor = identity, tokenizer = LemmaTokenizer)
# else:
# vec = CountVectorizer(analyzer=identity, stop_words='english',
# preprocessor = identity, tokenizer = LemmaTokenizer)
return vec
# Naive Bayes classifier: the value of boolean arg - use_sentiment decides on binary (True - sentiment) vs multi-class (False - Topic) classification
def NB_classifier(trainDoc, trainClass, testDoc, testClass, tfIdf, use_sentiment):
# decides on TfidfVectorizer(True) or CountVectorizer(False)
vec = tf_idf_func(tfIdf)
# combine the vectorizer with a Naive Bayes classifier
classifier = Pipeline( [('vec', vec),
('cls', MultinomialNB())] )
t0 = time.time()
# Fit/Train Multinomial Naive Bayes classifier according to trainDoc, trainClass
# Here trainDoc are the documents from training set and trainClass is the class labels for those documents
classifier.fit(trainDoc, trainClass)
train_time = time.time() - t0
t1 = time.time()
# Use the classifier to predict the class for all the documents in the test set testDoc
# Save those output class labels in testGuess
testGuess = classifier.predict(testDoc)
test_time = time.time() - t1
# Just to know the output type
classType = "Topic Class"
if use_sentiment:
classType = "Sentiment Class"
# Just to know which version of Tfidf is being used
tfIDF_type = "TfidfVectorizer" if(tfIdf) else "CountVectorizer" # This is ternary conditional operator in python
print("\n########### Naive Bayes Classifier For ", classType, " (", tfIDF_type, ") ###########")
# Call to function(s) to do the jobs ^_^
calculate_measures(classifier, testClass, testGuess)
# Showing 10 fold cross validation score cv = no. of folds
print("Cross Validation:\n", cross_val_score(classifier, testDoc, testClass, cv=10))
print()
print("Training Time: ", train_time)
print("Testing Time: ", test_time)
calculate_probabilities(classifier, testClass, trainClass)
# Exercise 2.1.2 – Decision Tree
# Decision Trees classifier: the value of boolean arg - use_sentiment decides on binary (True - sentiment) vs multi-class (False - Topic) classification
def Decision_Trees(trainDoc, trainClass, testDoc, testClass, tfIdf, use_sentiment):
# decides on TfidfVectorizer(True) or CountVectorizer(False)
vec = tf_idf_func(tfIdf)
# combine the vectorizer with a Decision Trees classifier
classifier = Pipeline( [('vec', vec),
('cls', DecisionTreeClassifier())] )
# Try to run the above classifier with the following parameters and see performance change
# DecisionTreeClassifier(min_samples_split=3, min_samples_leaf=2, max_depth=10, max_features=1000)
t0 = time.time()
classifier.fit(trainDoc, trainClass)
train_time = time.time() - t0
t1 = time.time()
testGuess = classifier.predict(testDoc)
test_time = time.time() - t1
# Just to know the output type
classType = "Topic Class"
if use_sentiment:
classType = "Sentiment Class"
# Just to know which version of Tfidf is being used
tfIDF_type = "TfidfVectorizer" if(tfIdf) else "CountVectorizer" # This is ternary conditional operator in python
print("\n########### Decision Trees Classifier For "+classType+" (", tfIDF_type, ") ###########")
# Call to function(s) to do the jobs ^_^
calculate_measures(classifier, testClass, testGuess)
print("Cross Validation:\n", cross_val_score(classifier, testDoc, testClass, cv=10))
print()
print("Training Time: ", train_time)
print("Testing Time: ", test_time)
# Exercise 2.2 – K-Nearest Neighbor
# K-Nearest Neighbor classifier: the value of boolean arg - use_sentiment decides on binary (True - sentiment) vs multi-class (False - Topic) classification
def KNN_classifier(trainDoc, trainClass, testDoc, testClass, tfIdf, use_sentiment):
# decides on TfidfVectorizer(True) or CountVectorizer(False)
vec = tf_idf_func(tfIdf)
# combine the vectorizer with a Decision Trees classifier
classifier = Pipeline( [('vec', vec),
('cls', KNeighborsClassifier(n_neighbors=15))] )
t0 = time.time()
classifier.fit(trainDoc, trainClass)
train_time = time.time() - t0
t1 = time.time()
testGuess = classifier.predict(testDoc)
test_time = time.time() - t1
# Just to know the output type
classType = "Topic Class"
if use_sentiment:
classType = "Sentiment Class"
# Just to know which version of Tfidf is being used
tfIDF_type = "TfidfVectorizer" if(tfIdf) else "CountVectorizer" # This is ternary conditional operator in python
print("\n########### K-Nearest Neighbor Classifier For "+classType+" (", tfIDF_type, ") ###########")
# Call to function(s) to do the jobs ^_^
calculate_measures(classifier, testClass, testGuess)
print("Cross Validation:\n", cross_val_score(classifier, testDoc, testClass, cv=10))
print()
print("Training Time: ", train_time)
print("Testing Time: ", test_time)
# Exercise 2.2.1 – K-Nearest Neighbor (for different accuracy and f1-scores)
# K-Nearest Neighbor classifiers results for different values of K
def KNN_loop(trainDoc, trainClass, testDoc, testClass, tfIdf, use_sentiment):
# decides on TfidfVectorizer(True) or CountVectorizer(False)
vec = tf_idf_func(tfIdf)
# combine the vectorizer with a Decision Trees classifier
k_val = []
accu = []
f1 = []
print("\n##### Output of K-NN classifier for different values of K (1-20) #####\n")
for k in range(1, 31):
classifier = Pipeline( [('vec', vec),
('cls', KNeighborsClassifier(n_neighbors=k))] )
classifier.fit(trainDoc, trainClass)
testGuess = classifier.predict(testDoc)
k_val.append(k)
accu.append(accuracy_score(testClass, testGuess))
f1.append(f1_score(testClass, testGuess, average='macro'))
# print("K =", k, ": Accuracy = ", round(accuracy_score(testClass, testGuess), 3), " F1-score (micro) = ", round(f1_score(testClass, testGuess, average='macro'), 3))
print()
for i in range(1, 30):
print("K=",k_val[i]," Accuracy=",round(accu[i], 3)," F1-score=",round(f1[i], 3))
return k_val, accu, f1
# for calculating different scores
def calculate_measures(classifier, testClass, testGuess):
# Compare the accuracy of the output (Yguess) with the class labels of the original test set (Ytest)
print("Accuracy = "+str(accuracy_score(testClass, testGuess)))
# Report on the precision, recall, f1-score of the output (Yguess) with the class labels of the original test set (Ytest)
print(classification_report(testClass, testGuess, labels=classifier.classes_, target_names=None, sample_weight=None, digits=3))
'''
# Showing the Confusion Matrix
print("Confusion Matrix:")
cm = confusion_matrix(testClass, testGuess, labels=classifier.classes_)
print(classifier.classes_)
print(cm)
print()
'''
# Probabilities
def calculate_probabilities(classifier, testClass, trainClass):
# Posterior probabilities for every documents ()
print("\nPosterior probabilities:")
print(classifier.classes_)
print(classifier.predict_proba(testClass)) # Posterior probability depends on the documents in Test Set(Xtest)
# Prior Probability for classes
prior = classifier.predict_proba(trainClass) # Prior probability depends on the occurrence of class in Training Set(Ytrain)
finalPrior = prior[len(prior)-1:] # Last row in the array is the final prior probability (as it builds up gradually: N(class i)/N(doc))
print("\nPrior Probability(Probability of Class):")
print(classifier.classes_)
print(finalPrior)
def draw_plots(k_val, accu, f1):
plt.plot(k_val, accu, color='red', label='Accuracy')
plt.plot(k_val, f1, color='yellow', label='F1-score')
plt.xlabel('Values of K')
plt.legend()
plt.show()
# This function runs Naive Bayes, Decision Tree and K-NN classifiers
def run_all_classifiers(trainDoc, trainClass, testDoc, testClass):
# Test the Naive Bayes (False for Topic Class) with Tf-Idf vectorizer
#NB_classifier(trainDoc, trainClass, testDoc, testClass, True, False)
# Test the Naive Bayes (False for Topic Class) with CountVectorizer
NB_classifier(trainDoc, trainClass, testDoc, testClass, False, False)
# Test the Decision_Trees (False for Topic Class) with Tf-Idf vectorizer
Decision_Trees(trainDoc, trainClass, testDoc, testClass, True, False)
# Test the Decision_Trees (False for Topic Class) with CountVectorizer
Decision_Trees(trainDoc, trainClass, testDoc, testClass, False, False)
# Test the KNN classfier (False for Topic Class) with Tf-Idf vectorizer
KNN_classifier(trainDoc, trainClass, testDoc, testClass, True, False)
# Test the KNN classfier (False for Topic Class) with CountVectorizer
KNN_classifier(trainDoc, trainClass, testDoc, testClass, False, False)
#To collect the data for curve
k_val, accu, f1 = KNN_loop(trainDoc, trainClass, testDoc, testClass, True, False)
draw_plots(k_val, accu, f1)
# This function runs Naive Bayes with Tf-Idf Vectorizers and some Pre-preprocessing
def run_best_model(trainDoc, trainClass, testDoc, testClass):
# Test the Naive Bayes (False for Topic Class) with Tf-Idf vectorizer
NB_classifier(trainDoc, trainClass, testDoc, testClass, True, False)
# this is the main function but you can name it anyway you want
def main():
print("Wait for it... Don't panic (Porter's Stemmer is taking time...)\n")
# reads files <trainSet> <testSet> as command line argument
trainSet, testSet = read_files()
# divides the files into tokenized documents and class labels (A False means the 6 Topic Type Classification)
trainDoc, trainClass = modify_corpus(trainSet, False)
testDoc, testClass = modify_corpus(testSet, False)
# show the distribution of classes in training and testing set
# distribution(trainClass, testClass)
# Running the best model based among 3 (if you want to see the output of every model then uncomment the above function)
run_best_model(trainDoc, trainClass, testDoc, testClass)
print("\n\n Do you want to See the Output of other classifiers(Decsision Tree/K-NN) too?:")
c = str(input("[Y/N]:"))
if c =='Y' or c == 'y':
# run all the 3 classifiers
run_all_classifiers(trainDoc, trainClass, testDoc, testClass)
# program starts from here
if __name__ == '__main__':
main()
| StarcoderdataPython |
3459569 | <filename>pensa/dimensionality/pca.py
import numpy as np
import pyemma
from pyemma.util.contexts import settings
import MDAnalysis as mda
import matplotlib.pyplot as plt
# --- METHODS FOR PRINCIPAL COMPONENT ANALYSIS ---
def calculate_pca(data):
"""
Performs a PyEMMA PCA on the provided data.
Parameters
----------
data : float array
Trajectory data [frames,frame_data].
Returns
-------
pca : PCA obj
Principal components information.
"""
pca = pyemma.coordinates.pca(data)
return pca
def pca_eigenvalues_plot(pca, num=12, plot_file=None):
"""
Plots the highest eigenvalues over the numberr of the principal components.
Parameters
----------
pca : PCA obj
Principal components information.
num : int, optional
Number of eigenvalues to plot. Defaults to 12.
plot_file : str, optional
Path and name of the file to save the plot.
"""
# Plot eigenvalues over component numbers
fig,ax = plt.subplots(1, 1, figsize=[4,3], dpi=300)
componentnr = np.arange(num)+1
eigenvalues = pca.eigenvalues[:num]
ax.plot(componentnr, eigenvalues, 'o')
ax.set_xlabel('component number')
ax.set_ylabel('eigenvalue')
fig.tight_layout()
# Save the figure to a file
if plot_file: fig.savefig(plot_file, dpi=300)
return componentnr, eigenvalues
def pca_features(pca, features, num, threshold, plot_file=None):
"""
Prints relevant features and plots feature correlations.
Parameters
----------
pca : PCA obj
The PCA of which to plot the features.
features : list of str
Features for which the PCA was performed.
(obtained from features object via .describe()).
num : float
Number of feature correlations to plot.
threshold : float
Features with a correlation above this will be printed.
plot_file : str, optional
Path and name of the file to save the plot.
"""
# Plot the highest PC correlations and print relevant features
test_graph = []
test_corr = []
fig,ax = plt.subplots(num,1,figsize=[4,num*3],dpi=300,sharex=True)
for i in range(num):
relevant = pca.feature_PC_correlation[:,i]**2 > threshold**2
print("Features with abs. corr. above a threshold of %3.1f for PC %i:"%(threshold, i+1))
for j, ft in enumerate(features):
if relevant[j]:
print(ft, "%6.3f"%(pca.feature_PC_correlation[j,i]))
test_corr.append(pca.feature_PC_correlation[j,i])
ax[i].plot(pca.feature_PC_correlation[:,i])
ax[i].set_xlabel('feature index')
ax[i].set_ylabel('correlation with PC%i'%(i+1))
test_graph.append(pca.feature_PC_correlation[:,i])
fig.tight_layout()
# Save the figure to a file
if plot_file: fig.savefig(plot_file,dpi=300)
return test_graph, test_corr
def project_on_pc(data, ev_idx, pca=None):
"""
Projects a trajectory onto an eigenvector of its PCA.
Parameters
----------
data : float array
Trajectory data [frames,frame_data].
ev_idx : int
Index of the eigenvector to project on (starts with zero).
pca : PCA obj, optional
Information of pre-calculated PCA. Defaults to None.
Must be calculated for the same features (but not necessarily the same trajectory).
Returns
-------
projection : float array
Value along the PC for each frame.
"""
# Perform PCA if none is provided
if pca is None:
pca = pyemma.coordinates.pca(data) #,dim=3)
# Project the features onto the principal components
projection = np.zeros(data.shape[0])
for ti in range(data.shape[0]):
projection[ti] = np.dot(data[ti],pca.eigenvectors[:,ev_idx])
# Return the value along the PC for each frame
return projection
def get_components_pca(data, num, pca=None, prefix=''):
"""
Projects a trajectory onto the first num eigenvectors of its PCA.
Parameters
----------
data : float array
Trajectory data [frames,frame_data].
num : int
Number of eigenvectors to project on.
pca : PCA obj, optional
Information of pre-calculated PCA. Defaults to None.
Must be calculated for the same features (but not necessarily the same trajectory).
Returns
-------
comp_names : list
Names/numbers of the components.
components : float array
Component data [frames,components]
"""
# Perform PCA if none is provided
if pca is None:
pca = pyemma.coordinates.pca(data)
# Project the features onto the principal components
comp_names = []
components = []
for ev_idx in range(num):
projection = np.zeros(data.shape[0])
for ti in range(data.shape[0]):
projection[ti] = np.dot(data[ti],pca.eigenvectors[:,ev_idx])
components.append(projection)
comp_names.append(prefix+'PC'+str(ev_idx+1))
# Return the names and data
return comp_names, np.array(components).T
def sort_traj_along_pc(data, pca, start_frame, top, trj, out_name, num_pc=3):
"""
Sort a trajectory along given principal components.
Parameters
----------
data : float array
Trajectory data [frames,frame_data].
pca : PCA obj
Principal components information.
num_pc : int
Sort along the first num_pc principal components.
start_frame : int
Offset of the data with respect to the trajectories (defined below).
top : str
File name of the reference topology for the trajectory.
trj : str
File name of the trajetory from which the frames are picked.
Should be the same as data was from.
out_name : str
Core part of the name of the output files
"""
# Remember the index in the simulation (taking into account cutoff)
oidx = np.arange(len(data))+start_frame
# Define the MDAnalysis trajectories from where the frames come
u = mda.Universe(top,trj)
a = u.select_atoms('all')
return_str = []
all_proj = []
# Loop through the principal components
for evi in range(num_pc):
# Project the combined data on the principal component
proj = project_on_pc(data,evi,pca=pca)
all_proj.append(proj)
# Sort everything along the projection onto the PC
sort_idx = np.argsort(proj)
proj_sort = proj[sort_idx]
oidx_sort = oidx[sort_idx]
# Write the trajectory, ordered along the PC
with mda.Writer(out_name+"_pc"+str(evi+1)+".xtc", a.n_atoms) as W:
for i in range(data.shape[0]):
ts = u.trajectory[oidx_sort[i]]
W.write(a)
return_str.append(a)
return return_str, all_proj
def sort_trajs_along_common_pc(data_a, data_b, start_frame, top_a, top_b, trj_a, trj_b, out_name, num_pc=3):
"""
Sort two trajectories along their most important common principal components.
Parameters
----------
data_a : float array
Trajectory data [frames,frame_data].
data_b : float array
Trajectory data [frames,frame_data].
start_frame : int
Offset of the data with respect to the trajectories (defined below).
top_a : str
Reference topology for the first trajectory.
top_b : str
Reference topology for the second trajectory.
trj_a : str
First of the trajetories from which the frames are picked.
Should be the same as data_a was from.
trj_b : str
Second of the trajetories from which the frames are picked.
Should be the same as data_b was from.
out_name : str
Core part of the name of the output files.
"""
# Combine the input data
data = np.concatenate([data_a,data_b],0)
# Remember which simulation the data came frome
cond = np.concatenate([np.ones(len(data_a)), np.zeros(len(data_b))])
# Remember the index in the respective simulation (taking into account cutoff)
oidx = np.concatenate([np.arange(len(data_a))+start_frame,
np.arange(len(data_b))+start_frame])
# Calculate the principal components
pca = pyemma.coordinates.pca(data,dim=3)
# Define the MDAnalysis trajectories from where the frames come
ua = mda.Universe(top_a,trj_a)
ub = mda.Universe(top_b,trj_b)
# ... and select all atoms
aa = ua.select_atoms('all')
ab = ub.select_atoms('all')
return_str = []
# Loop over principal components.
for evi in range(num_pc):
# Project the combined data on the principal component
proj = project_on_pc(data,evi,pca=pca)
# Sort everything along the projection on th resp. PC
sort_idx = np.argsort(proj)
proj_sort = proj[sort_idx]
cond_sort = cond[sort_idx]
oidx_sort = oidx[sort_idx]
# Write the trajectory, ordered along the PC
with mda.Writer(out_name+"_pc"+str(evi+1)+".xtc", aa.n_atoms) as W:
for i in range(data.shape[0]):
if cond_sort[i] == 1: # G-protein bound
ts = ua.trajectory[oidx_sort[i]]
W.write(aa)
return_str.append(aa)
elif cond_sort[i] == 0: # arrestin bound
ts = ub.trajectory[oidx_sort[i]]
W.write(ab)
return_str.append(ab)
return return_str
def sort_mult_trajs_along_common_pc(data, start_frame, top, trj, out_name, num_pc=3):
"""
Sort multiple trajectories along their most important common principal components.
Parameters
----------
data : list of float arrays
List of trajectory data arrays, each [frames,frame_data].
start_frame : int
Offset of the data with respect to the trajectories (defined below).
top : list of str
Reference topology files.
trj : list of str
Trajetories from which the frames are picked.
trj[i] should be the same as data[i] was from.
out_name : str
Core part of the name of the output files.
"""
num_frames = [len(d) for d in data]
num_traj = len(data)
# Combine the input data
data = np.concatenate(data,0)
# Remember which simulation the data came frome
cond = np.concatenate([i*np.ones(num_frames[i],dtype=int) for i in range(num_traj)])
# Remember the index in the respective simulation (taking into account cutoff)
oidx = np.concatenate([np.arange(num_frames[i])+start_frame for i in range(num_traj)])
# Calculate the principal components
pca = pyemma.coordinates.pca(data,dim=3)
# Define the MDAnalysis trajectories from where the frames come
univs = []
atoms = []
for j in range(num_traj):
u = mda.Universe(top[j],trj[j])
print('Length of trajectory',len(u.trajectory))
univs.append(u)
atoms.append(u.select_atoms('all'))
# Loop over principal components.
for evi in range(num_pc):
# Project the combined data on the principal component
proj = project_on_pc(data,evi,pca=pca)
# Sort everything along the projection on th resp. PC
sort_idx = np.argsort(proj)
proj_sort = proj[sort_idx]
cond_sort = cond[sort_idx]
oidx_sort = oidx[sort_idx]
# Write the trajectory, ordered along the PC
with mda.Writer(out_name+"_pc"+str(evi+1)+".xtc", atoms[0].n_atoms) as W:
for i in range(data.shape[0]):
j = cond_sort[i]
o = oidx_sort[i]
uj = univs[j]
ts = uj.trajectory[o]
W.write(atoms[j])
return
def compare_projections(data_a, data_b, pca, num=3, saveas=None, label_a=None, label_b=None):
"""
Compare two datasets along a given principal component.
Parameters
----------
data_a : float array
Trajectory data [frames,frame_data]
data_b : float array
Trajectory data [frames,frame_data]
pca : PCA object
Principal components information.
num : int
Number of principal components to plot.
saveas : str, optional
Name of the output file.
label_a : str, optional
Label for the first dataset.
label_b : str, optional
Label for the second dataset.
"""
# Start the figure
fig,ax = plt.subplots(num, 2, figsize=[8,3*num], dpi=300)
val = []
# Loop over PCs
for evi in range(num):
# Calculate values along PC for each frame
proj_a = project_on_pc(data_a, evi, pca=pca)
proj_b = project_on_pc(data_b, evi, pca=pca)
# Plot the time series in the left panel
ax[evi,0].plot(proj_a, alpha=0.5, label=label_a)
ax[evi,0].plot(proj_b, alpha=0.5, label=label_b)
ax[evi,0].set_xlabel('frame number')
ax[evi,0].set_ylabel('PC %i'%(evi+1))
# Plot the histograms in the right panel
ax[evi,1].hist(proj_a, bins=30, alpha=0.5, density=True, label=label_a)
ax[evi,1].hist(proj_b, bins=30, alpha=0.5, density=True, label=label_b)
ax[evi,1].set_xlabel('PC %i'%(evi+1))
ax[evi,1].set_ylabel('frequency')
# Legend
if label_a and label_b:
ax[evi,0].legend()
ax[evi,1].legend()
val.append([proj_a, proj_b])
fig.tight_layout()
# Save the figure
if saveas is not None:
fig.savefig(saveas, dpi=300)
return val
def compare_mult_projections(data, pca, num=3, saveas=None, labels=None, colors=None):
"""
Compare two datasets along a given principal component.
Parameters
----------
data : list of float arrays
Data from multiple trajectories [frames,frame_data]
pca : PCA object
Principal components information.
num : int
Number of principal components to plot.
saveas : str, optional
Name of the output file.
labels : list of str, optional
Labels for the datasets. If provided, it must have the same length as data.
"""
if labels is not None:
assert len(labels) == len(data)
else:
labels = [None for _ in range(len(data))]
if colors is not None:
assert len(colors) == len(data)
else:
colors = ['C%i'%num for num in range(len(data))]
# Start the figure
fig,ax = plt.subplots(num, 2, figsize=[9,3*num], dpi=300)
# Loop over PCs
for evi in range(num):
for j,d in enumerate(data):
# Calculate values along PC for each frame
proj = project_on_pc(d, evi, pca=pca)
# Plot the time series in the left panel
ax[evi,0].plot(proj, alpha=0.5,
label=labels[j], color=colors[j])
# Plot the histograms in the right panel
ax[evi,1].hist(proj, bins=30, alpha=0.5, density=True,
label=labels[j], color=colors[j])
# Axis labels
ax[evi,0].set_xlabel('frame number')
ax[evi,0].set_ylabel('PC %i'%(evi+1))
ax[evi,1].set_xlabel('PC %i'%(evi+1))
ax[evi,1].set_ylabel('frequency')
# Legend
if labels[0] is not None:
ax[evi,0].legend()
ax[evi,1].legend()
fig.tight_layout()
# Save the figure
if saveas is not None:
fig.savefig(saveas, dpi=300)
return
| StarcoderdataPython |
1876780 | <filename>scripts/python/backend_server/wsgi/alerts.py
#!/usr/bin/env python
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
# ** Copyright UCAR (c) 1992 - 2015
# ** University Corporation for Atmospheric Research(UCAR)
# ** National Center for Atmospheric Research(NCAR)
# ** Research Applications Laboratory(RAL)
# ** P.O.Box 3000, Boulder, Colorado, 80307-3000, USA
# ** See LICENCE.TXT if applicable for licence details
# ** 2015/04/02 23:53:46
# *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
import datetime
import time
import os
import numpy
from numpy import ma
from netCDF4 import Dataset
import StringIO
import base64
import string
import sites
import log_msg
font_file = "/usr/share/fonts/truetype/freefont/FreeSerif.ttf"
MISSING_VALUE = -9999
(MISSING, CLEAR, WARN, ALERT) = range(4)
(ALERTS, CODES, GROUPS) = range(3)
WARN_LETTER = "w"
ALERT_LETTER = "a"
CLEAR_LETTER = "c"
MISSING_LETTER = "m"
WARN_COLOR = (255,255,0,255)
ALERT_COLOR = (255,0,0,255)
CLEAR_COLOR = (0,128,0,255)
MISSING_COLOR = (190,190,190,255)
WARN_CODE = "warning"
ALERT_CODE = "alert"
CLEAR_CODE = "clear"
MISSING_CODE = "missing"
road_state_1_map = {
0: "No report",
1: "Dry",
2: "Moist",
3: "Moist and chemically treated",
4: "Wet",
5: "Wet and chemically treated",
6: "Ice",
7: "Frost",
8: "Snow",
9: "Snow/Ice Watch",
10: "Snow/Ice Warning",
11: "Wet Above Freezing",
12: "Wet Below Freezing",
13: "Absorption",
14: "Absorption at Dewpoint",
15: "Dew",
16: "Black Ice Warning",
17: "Other",
18: "Slush",
MISSING_VALUE : MISSING_CODE
}
def max_code(code1, code2):
"""Find the maximum of the two alert codes"""
if code1 == ALERT_CODE or code2 == ALERT_CODE:
return ALERT_CODE
elif code1 == WARN_CODE or code2 == WARN_CODE:
return WARN_CODE
elif code1 == CLEAR_CODE or code2 == CLEAR_CODE:
return CLEAR_CODE
else:
return MISSING_CODE
alert_to_letter = {
WARN : WARN_LETTER,
ALERT : ALERT_LETTER,
CLEAR : CLEAR_LETTER,
MISSING : MISSING_LETTER
}
alert_to_code = {
CLEAR : CLEAR_CODE,
WARN : WARN_CODE,
ALERT : ALERT_CODE,
MISSING : MISSING_CODE
}
alert_to_color = {
CLEAR : CLEAR_COLOR,
WARN : WARN_COLOR,
ALERT : ALERT_COLOR,
MISSING : MISSING_COLOR
}
def get_fpath(ptime, in_dir, basename, suffix):
"""Get forecast system file paths"""
dir_dict = {}
for m in range(60):
etime = ptime + 60 - 60 * m
time_tup = time.gmtime(etime)
date = time.strftime("%Y%m%d%H%M", time_tup)
fname = "%s.%s.%s.%s" % (basename, date[0:8], date[8:12], suffix)
day_dir = os.path.join(in_dir, date[0:8])
if os.path.exists(day_dir):
if not day_dir in dir_dict:
dir_dict[day_dir] = set(os.listdir(day_dir))
if fname in dir_dict[day_dir]:
fpath = os.path.join(in_dir, date[0:8], fname)
return (fpath, etime)
return (None, None)
class Alerts:
def __init__(self, fpath, cf, ptime, logg):
self.nc = None
self.nc_sites = []
self.num_days = 0
self.num_fc_times = 0
if fpath != None:
self.nc = Dataset(fpath, "r")
self.nc_sites = self.nc.variables["site_list"][:]
self.num_days = len(self.nc.dimensions["days"])
#self.forecast_time = self.nc.variables["forc_time"][:]
#self.alert_time = self.forecast_time
self.num_fc_times = len(self.nc.dimensions["fc_times_per_day"])
else:
self.error = "problem loading netcdf file: %s" % fpath
self.obs_sites = sites.get_wx_obs_sites(cf)
self.rwis_sites = sites.get_rwis_sites(cf)
self.obs_alerts = ObsAlerts(cf, ptime, logg)
self.rwis_alerts = RwisAlerts(cf, ptime, logg)
def get_alert(self, i, d, h):
return MISSING
def get_time_alerts(self, start_hour):
alerts = {}
alert_codes = {}
alert_groups = {}
for i in range(len(self.nc_sites)):
site_num = self.nc_sites[i]
alerts[site_num] = []
groups = [[],[],[]]
for d in range(self.num_days):
for h in range(self.num_fc_times):
hr = d * 24 + h
if hr < start_hour or hr >= 72 + start_hour:
continue
alert = self.get_alert(i, d, h)
alerts[site_num].append(alert)
if hr >= start_hour and hr < start_hour + 6:
groups[0].append(alert)
if hr >= start_hour + 6 and hr < start_hour + 24:
groups[1].append(alert)
if hr >= start_hour + 24 and hr < start_hour + 72:
groups[2].append(alert)
max_alert = map(lambda x: max(x), groups)
(a6, a24, a72) = max_alert
ob = MISSING
if self.obs_sites.has_key(site_num):
obs = self.obs_alerts.get_obs(site_num)
ob = self.obs_alerts.get_obs_alert(obs)
if self.rwis_sites.has_key(site_num):
# Note that rwis takes precedence over observation if
# it exists
obs = self.rwis_alerts.get_obs(site_num)
ob = self.rwis_alerts.get_rwis_alert(obs)
alert_group0 = alert_to_letter[ob]
alert_group6 = alert_to_letter[a6]
alert_group24 = alert_to_letter[a24]
alert_group72 = alert_to_letter[a72]
alert_groups[site_num] = [alert_group0, alert_group6, alert_group24, alert_group72]
alert_codes[site_num] = [alert_to_code[ob], alert_to_code[a6], alert_to_code[a24], alert_to_code[a72]]
return (alerts, alert_codes, alert_groups)
class RdwxAlerts(Alerts):
def __init__(self, cf, ptime):
self.cf = cf
(self.file_name, self.file_path_time) = get_fpath(ptime, cf.rdwx_dir, "rdwx_fcst", "nc")
Alerts.__init__(self, self.file_name, cf, ptime)
#self.alert_time = self.forecast_time
self.alert_time = ptime - ptime % 3600
self.precip_types = None
if self.nc != None:
self.precip_types = self.nc.variables["precip_type"][:]
def get_alert(self, i, d, h):
if self.precip_types == None:
return MISSING
precip_type = self.precip_types[i][d][h]
if precip_type == 1:
return WARN
if precip_type == 2 or precip_type == 5:
return ALERT
return CLEAR
def get_alert_time(self):
return self.alert_time
def get_file_name(self):
return self.file_name
class TmtAlerts(Alerts):
def __init__(self, cf, ptime):
self.cf = cf
(self.file_name, self.file_path_time) = get_fpath(ptime, cf.tmt_dir, cf.tmt_base_name, "nc")
Alerts.__init__(self, self.file_name, cf, ptime)
self.alert_time = ptime - ptime % 3600
self.chems = None
self.plows = None
self.road_temps = None
if self.nc != None:
self.chems = self.nc.variables["apply_chem"][:]
self.plows = self.nc.variables["do_plowing"][:]
self.road_temps = self.nc.variables["road_TempF"][:]
def get_alert(self, i, d, h):
if self.chems != None and self.plows != None and self.road_temps != None:
chem = self.chems[i][d][h]
plow = self.plows[i][d][h]
road_temp = self.road_temps[i][d][h]
if road_temp <= 15 and (chem > 0 or plow > 0):
return ALERT
if road_temp <= 32 and (chem > 0 or plow > 0):
return WARN
# if road_temp <= 15 and fabs(air_temp - dewpoint_temp) < 3:
# return ALERT
# if 15 < road_temp and road_temp <= 32 and fabs(air_temp - dewpoint_temp) < 3:
# return WARN
# For treatments chemical is indicated only if > 0, and should be Chemical,
# For treatments plow is indicated only if > 0, and should be Plow,
# Then include road temperature
# OPTIONAL: Up arrow for road temperature + 6 hours is greater than now
# Change from missing to treatments not configured for this site
return CLEAR
else:
return MISSING
def get_treatments(self, start_hour):
plow_dict = {}
chem_dict = {}
road_temps_dict = {}
for i in range(len(self.nc_sites)):
site_num = self.nc_sites[i]
plow_dict[site_num] = []
chem_dict[site_num] = []
road_temps_dict[site_num] = []
for d in range(self.num_days):
for h in range(self.num_fc_times):
hr = d * 24 + h
if hr < start_hour or hr >= 72 + start_hour:
continue
chem = self.chems[i][d][h]
chem_dict[site_num].append(chem)
plow = self.plows[i][d][h]
plow_dict[site_num].append(plow)
road_temp = self.road_temps[i][d][h]
if road_temp > 200:
road_temp = -9999
road_temps_dict[site_num].append(road_temp)
return (plow_dict, chem_dict, road_temps_dict)
def get_alert_time(self):
return self.alert_time
def get_file_name(self):
return self.file_name
class ObsAlerts(Alerts):
def __init__(self, cf, ptime, logg):
self.cf = cf
self.nc_var_names = self.get_nc_var_names()
self.nc_files = []
self.nc_data = {}
self.ptime = ptime
self.alert_time = 0
btime = ptime
etime = btime - 3600
fnames = []
while btime > etime:
time_tup = time.gmtime(btime)
day = time.strftime("%Y%m%d", time_tup)
hhmm = time.strftime("%H%M", time_tup)
fname = self.get_fname(day, hhmm)
fname = os.path.join(self.get_in_dir(), day, fname)
if not fname in fnames and os.path.exists(fname):
if self.alert_time == 0:
self.alert_time = etime
fnames.append(fname)
btime -= 60
for fname in fnames:
#print "obs fname: %s" % fname
logg.write_time("Reading obs file: %s\n" % fname)
nc = Dataset(fname, "r")
dimension = nc.dimensions.get("recNum", None)
if dimension != None:
if len(dimension) == 0:
continue
self.nc_files.append(nc)
self.nc_data[nc] = {}
for k in self.nc_var_names:
if k == "stationId":
self.nc_data[nc][k] = numpy.ma.getdata(nc.variables[k][:])
self.nc_data[nc][k] = map(lambda x: string.join(x,""), self.nc_data[nc][k])
else:
self.nc_data[nc][k] = numpy.ma.filled(nc.variables[k][:], MISSING_VALUE)
if self.alert_time == 0:
self.alert_time = ptime
def get_alert_time(self):
return self.alert_time
def get_fname(self, day, hhmm):
return "int_obs.%s.nc" % day
def get_in_dir(self):
return self.cf.wx_obs_dir
def get_nc_var_names(self):
return [
"site_list",
"time_nominal",
self.cf.met_vars.wx_temp_var,
self.cf.met_vars.wx_dewp_var,
self.cf.met_vars.wx_wind_spd_var
]
def get_obs(self, site):
for nc in self.nc_files:
nc_data = self.nc_data[nc]
site_i = numpy.where(nc_data["site_list"] == site)
obs_times = nc_data["time_nominal"]
if len(site_i[0]) == 0:
continue
time_i = min(range(len(obs_times)), key=lambda i: abs(obs_times[i] - self.ptime))
temp = nc_data[self.cf.met_vars.wx_temp_var][site_i][0][time_i]
dewp = nc_data[self.cf.met_vars.wx_dewp_var][site_i][0][time_i]
wind_spd = nc_data[self.cf.met_vars.wx_wind_spd_var][site_i][0][time_i]
obs_time = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(obs_times[time_i]))
temp_units = nc.variables[self.cf.met_vars.wx_temp_var].units
dewp_units = nc.variables[self.cf.met_vars.wx_dewp_var].units
wind_spd_units = nc.variables[self.cf.met_vars.wx_wind_spd_var].units
# convert the temp and wind units to English units
if temp_units=='degrees Celsius' and temp > MISSING_VALUE:
temp=(9.0/5.0)*(temp)+32
temp_units='deg F'
if dewp_units=='degrees Celsius' and dewp > MISSING_VALUE:
dewp=(9.0/5.0)*(dewp)+32
dewp_units='deg F'
if wind_spd_units=='meters per second' and wind_spd > MISSING_VALUE:
wind_spd=2.23694*wind_spd
wind_spd_units='mph'
return {
"temp_val":temp,
"dewp_val":dewp,
"wind_spd":wind_spd,
"temp": "missing" if MISSING_VALUE == temp else "%.2f %s" % (temp, temp_units),
"dewp": "missing" if MISSING_VALUE == temp else "%.2f %s" % (dewp, dewp_units),
"wind_spd": "missing" if MISSING_VALUE == wind_spd else "%.2f %s" % (wind_spd, wind_spd_units),
"obstime": obs_time
}
return {}
def get_obs_alert(self, obs):
if obs == None:
return MISSING
if not obs.has_key("temp_val"):
return MISSING
temp = obs["temp_val"]
if temp == MISSING_VALUE:
return MISSING
if temp <= 0:
return WARN
return CLEAR
class RwisAlerts(ObsAlerts):
def __init__(self, cf, ptime, logg):
"""Initialize rwis alerts"""
ObsAlerts.__init__(self, cf, ptime, logg)
self.cf = cf
self.site_idxs = {}
for nc in self.nc_files:
stn_ids = self.nc_data[nc]["stationId"]
for i in range(len(stn_ids)):
stn_id = stn_ids[i]
if not self.site_idxs.has_key(nc):
self.site_idxs[nc] = {}
if not self.site_idxs[nc].has_key(stn_id):
self.site_idxs[nc][stn_id] = []
self.site_idxs[nc][stn_id].append(i)
def get_fname(self, day, hhmm):
"""Get mesonet file name"""
return "mesonet.%s.%s00.nc" % (day, hhmm[:2])
def get_in_dir(self):
"""Get input mesonet directory"""
return self.cf.mesonet_dir
def get_nc_var_names(self):
"""Get the netcdf variable names from ascii sites variable file"""
f = open(self.cf.rwis_sites_var_file, "r")
self.site_vars = {}
all_vars = set()
for l in f.readlines():
(site_num, site_id, road_state_1, sub_surface_2, sub_surface_1, road_temp_2, road_temp_1, wind_dir, wind_spd, rel_hum, temp, temp_qcr) = l.strip().split(';')
site_num = int(site_num)
self.site_vars[site_num] = (site_num, site_id, road_state_1, sub_surface_2, sub_surface_1, road_temp_2, road_temp_1, wind_dir, wind_spd, rel_hum, temp, temp_qcr)
all_vars.add(sub_surface_1)
all_vars.add(sub_surface_2)
all_vars.add(road_temp_2)
all_vars.add(road_temp_1)
all_vars.add(wind_dir)
all_vars.add(wind_spd)
all_vars.add(rel_hum)
all_vars.add(temp)
all_vars.add(road_state_1)
all_vars.add(temp_qcr)
f.close()
all_vars = list(all_vars)
all_vars.append("observationTime")
all_vars.append("stationId")
return all_vars
def get_obs(self, site):
"""Get observations from input forecast system netcdf file"""
if not site in self.site_vars:
return {}
(site_num,site_id,road_state_1_var_name, sub_surface_2_var_name,sub_surface_1_var_name,road_temp_2_var_name,road_temp_1_var_name,wind_dir_var_name,wind_spd_var_name,rel_hum_var_name,temp_var_name,temp_qcr_var_name) = self.site_vars[site]
for nc in self.nc_files:
nc_data = self.nc_data[nc]
obs_times = nc_data["observationTime"]
if not self.site_idxs[nc].has_key(site_id):
continue
stn_i = self.site_idxs[nc][site_id]
stn_i = (numpy.array(stn_i),)
if len(stn_i[0]) == 0:
continue
road_state_1_var = nc.variables[road_state_1_var_name]
sub_surface_2_var = nc.variables[sub_surface_2_var_name]
sub_surface_1_var = nc.variables[sub_surface_1_var_name]
road_temp_2_var = nc.variables[road_temp_2_var_name]
road_temp_1_var = nc.variables[road_temp_1_var_name]
wind_dir_var = nc.variables[wind_dir_var_name]
wind_spd_var = nc.variables[wind_spd_var_name]
rel_hum_var = nc.variables[rel_hum_var_name]
temp_var = nc.variables[temp_var_name]
temp_qcr_var = nc.variables[temp_qcr_var_name]
obs_times = obs_times[stn_i]
road_state_1_data = nc_data[road_state_1_var_name][stn_i]
sub_surface_2_data = nc_data[sub_surface_2_var_name][stn_i]
sub_surface_1_data = nc_data[sub_surface_1_var_name][stn_i]
road_temp_2_data = nc_data[road_temp_2_var_name][stn_i]
road_temp_1_data = nc_data[road_temp_1_var_name][stn_i]
wind_dir_data = nc_data[wind_dir_var_name][stn_i]
wind_spd_data = nc_data[wind_spd_var_name][stn_i]
rel_hum_data = nc_data[rel_hum_var_name][stn_i]
temp_data = nc_data[temp_var_name][stn_i]
temp_qcr_data = nc_data[temp_qcr_var_name][stn_i]
time_i = min(range(len(obs_times)), key=lambda i: abs(obs_times[i] - self.ptime))
local_obs_datetime = datetime.datetime.fromtimestamp(obs_times[time_i], tz=self.cf.timezone) # only plot 3 days
local_obs_time_string = local_obs_datetime.strftime("%Y-%m-%d %H:%M:%S")
obs_time = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(obs_times[time_i]))
road_state_1 = road_state_1_data[time_i]
sub_surface_2 = sub_surface_2_data[time_i]
sub_surface_1 = sub_surface_1_data[time_i]
road_temp_2 = road_temp_2_data[time_i]
road_temp_1 = road_temp_1_data[time_i]
wind_dir = wind_dir_data[time_i]
wind_spd = wind_spd_data[time_i]
rel_hum = rel_hum_data[time_i]
temp = temp_data[time_i]
temp_qcr = temp_qcr_data[time_i]
# convert to English units
temp_units=temp_var.units
road_temp_1_units=road_temp_1_var.units
road_temp_2_units=road_temp_2_var.units
sub_surface_1_units=sub_surface_1_var.units
sub_surface_2_units=sub_surface_2_var.units
wind_spd_units=wind_spd_var.units
if temp_var.units=='kelvin' and temp>(-9000):
temp=(9.0/5.0)*(temp-273.15)+32
temp_units='deg F'
if road_temp_1_var.units=='kelvin' and road_temp_1 > MISSING_VALUE:
road_temp_1=(9.0/5.0)*(road_temp_1-273.15)+32
road_temp_1_units='deg F'
if road_temp_2_var.units=='kelvin' and road_temp_2 > MISSING_VALUE:
road_temp_2=(9.0/5.0)*(road_temp_2-273.15)+32
road_temp_2_units='deg F'
if sub_surface_1_var.units=='kelvin' and sub_surface_1 > MISSING_VALUE:
sub_surface_1=(9.0/5.0)*(sub_surface_1-273.15)+32
sub_surface_1_units='deg F'
if sub_surface_2_var.units=='kelvin' and sub_surface_2 > MISSING_VALUE:
sub_surface_2=(9.0/5.0)*(sub_surface_2-273.15)+32
sub_surface_2_units='deg F'
if wind_spd_var.units=='meter/sec' and wind_spd > MISSING_VALUE:
wind_spd=2.23694*wind_spd
wind_spd_units='mph'
# ADDING qc for the road temperatures
# If we have air temp and it passed the qcr test, take it as truth and compare the road temps to it
if temp_qcr == 0 and temp != MISSING_VALUE:
# if the road temp is more than 20 belore or 50 above the air temp, set it to missing
if (road_temp_1 < temp) and ((temp - road_temp_1) > 20):
road_temp_1 = MISSING_VALUE
elif (road_temp_1 > temp) and ((road_temp_1 - temp) > 50):
road_temp_1 = MISSING_VALUE
if (road_temp_2 < temp) and ((temp - road_temp_2) > 20):
road_temp_2 = MISSING_VALUE
elif (road_temp_2 > temp) and ((road_temp_2 - temp) > 50):
road_temp_2 = MISSING_VALUE
# If temp failed a qc check or it is missing
else:
temp = MISSING_VALUE
# Do a global min/max bound check on road temp
if (road_temp_1 > 140) or (road_temp_1 < -60):
road_temp_1 = MISSING_VALUE
if (road_temp_2 > 140) or (road_temp_2 < -60):
road_temp_2 = MISSING_VALUE
return {
"road_state_1": road_state_1_map[road_state_1],
"road_temp_val":road_temp_1,
"temp_val":temp,
"sub_surface_2":"missing" if MISSING_VALUE == sub_surface_2 else "%.2f %s" % (sub_surface_2, sub_surface_2_units),
"sub_surface_1":"missing" if MISSING_VALUE == sub_surface_1 else "%.2f %s" % (sub_surface_1, sub_surface_1_units),
"road_temp_2":"missing" if MISSING_VALUE == road_temp_2 else "%.2f %s" % (road_temp_2, road_temp_2_units),
"road_temp_1":"missing" if MISSING_VALUE == road_temp_1 else "%.2f %s" % (road_temp_1, road_temp_1_units),
"wind_dir":"missing" if MISSING_VALUE == wind_dir else "%.2f %s" % (wind_dir, wind_dir_var.units),
"wind_spd":"missing" if MISSING_VALUE == wind_spd else "%.2f %s" % (wind_spd, wind_spd_units),
"rel_hum":"missing" if MISSING_VALUE == rel_hum else "%.2f %s" % (rel_hum, rel_hum_var.units),
"temp":"missing" if MISSING_VALUE == temp else "%.2f %s" % (temp, temp_units),
"temp_qcr":temp_qcr,
"obstime":local_obs_time_string
}
return {}
def get_rwis_alert(self, obs):
"""Derive rwis station alerts from temperature and/or road temperature"""
if obs == {}:
return MISSING
temp = MISSING_VALUE
road_temp = MISSING_VALUE
if obs.has_key("temp_val"):
temp = obs["temp_val"]
if obs.has_key("road_temp_val"):
road_temp = obs["road_temp_val"]
if temp != MISSING_VALUE:
temp = temp * (9.0/5.0) - 459.67
if road_temp != MISSING_VALUE:
road_temp = road_temp * (9.0/5.0) - 459.67
if road_temp != MISSING_VALUE and road_temp <= 32:
return ALERT
if temp != MISSING_VALUE and temp <= 32:
return WARN
return MISSING
if "__main__" == __name__:
ptime = 1467734400
time_tup = time.gmtime(ptime)
print time.strftime("%Y%m%d.%H%M", time_tup)
import backend_sys_path_20160705 as backend_sys_path
cf = backend_sys_path.State_dictionary["alaska"]
logg = log_msg.LogMessage("")
r = RwisAlerts(cf, ptime, logg)
print r.get_obs(70275013)
print get_fpath(time.time() - 7200, cf.tmt_dir, cf.tmt_base_name, "nc")
| StarcoderdataPython |
8141504 | import os
import win32com.client as wincl
class redeem:
def banhammer(self, name):
speak = wincl.Dispatch("SAPI.SpVoice")
speak.Speak(name + " has been banned for spamming. Goodbye.")
return
def voicecomm(self, keyword):
speak = wincl.Dispatch("SAPI.SpVoice")
if keyword=="guccigang":
speak.Speak("GUCCIGANG GUCCIGANG GUCCIGANG GUCCIGANG GUCCIGANG GUCCIGANG GUCCIGANG GUCCIGANG.")
return
elif keyword=="announcement":
speak.Speak("Hey everyone we have an announcement to make!")
return
def points(self,data):
speak = wincl.Dispatch("SAPI.SpVoice")
# Does TTS on highlight my text
if "msg-id=highlighted-message" in data:
ind = data.index("PRIVMSG #ingeniousartist :")+26
name_ind = data.index("display-name=")+13
i = name_ind
end_ind = 0
while True:
if data[i]==";":
end_ind = i
break
i+=1
highlight = data[name_ind:end_ind] + " says: " + data[ind:]
speak.Speak(highlight)
return
# Thanks subscribers for subbing
elif "msg-id=resub" in data:
name_ind = data.index("display-name=")+13
i = name_ind
end_ind = 0
while True:
if data[i]==";":
end_ind = i
break
i+=1
sub = data[name_ind:end_ind] + " has just subscribed to the channel! Arigatow Go<NAME>."
speak.Speak(sub)
return
# Custom reward for drop your weapons, change the reward id to yours
elif "custom-reward-id=3c6182d9-2f87-43ad-bc5b-9764317ca104" in data:
custom = "Drop your weapon. Drop. Drop it! Drop your weapons. Now!"
speak.Speak(custom)
return
# Custom reward for end the stream, change the reward id to yours
elif "custom-reward-id=a3e7db64-a64b-40d6-bcdd-4c59934c17db" in data:
custom = "Sad. We have to end the stream now. Boo Hoo. Get ready for the raid boys!"
speak.Speak(custom)
return
# Custom reward for play this game, change the reward id to yours
elif "custom-reward-id=159a44a2-2ba5-4e12-bac8-1cd551bc48d1" in data:
custom = "Stop. Wait a minute. Now you have to play another game after this."
speak.Speak(custom)
return
# Custom reward for change your hero, change the reward id to yours
elif "custom-reward-id=c76d57c2-aa9c-4137-9bbe-46d4219393c5" in data:
custom = "Wow. You suck at playing this guy, You better change your hero to this one right here!"
speak.Speak(custom)
return
# Custom reward for Hydrate, change the reward id to yours
elif "custom-reward-id=d2240303-53ff-47c2-a741-b7ac31302e94" in data:
custom = "DRINK! DRINK! DRINK! DRINK! DRINK!"
speak.Speak(custom)
return
elif "custom-reward-id=60785c5c-2e61-4525-a458-888242be5767" in data:
ind = data.index("PRIVMSG #ingeniousartist :")+26
name_ind = data.index("display-name=")+13
i = name_ind
end_ind = 0
while True:
if data[i]==";":
end_ind = i
break
i+=1
timeout = data[name_ind:end_ind] + " has timed out " + data[ind:] + " for 5 minutes"
speak.Speak(timeout)
return
elif "custom-reward-id=dc1dc3fc-4c06-4062-8c2e-f53305076913" in data:
ind = data.index("PRIVMSG #ingeniousartist :")+26
name_ind = data.index("display-name=")+13
i = name_ind
end_ind = 0
while True:
if data[i]==";":
end_ind = i
break
i+=1
timeout = data[name_ind:end_ind] + " has requested a song"
speak.Speak(timeout)
return
| StarcoderdataPython |
3413360 | <gh_stars>0
# Time: O(logn * log(logn))
# Space: O(1)
import math
class Solution(object):
def smallestGoodBase(self, n):
"""
:type n: str
:rtype: str
"""
num = int(n)
max_len = int(math.log(num,2))
for l in xrange(max_len, 1, -1):
b = int(num ** (l**-1))
if (b**(l+1)-1) // (b-1) == num:
return str(b)
return str(num-1)
| StarcoderdataPython |
3545176 | <reponame>cj-mills/OpenCV-Notes<filename>streamlit-demo-color-spaces.py
import streamlit as st
import cv2 as cv
import numpy as np
st.title("Color Spaces")
st.header("RGB")
img_bgr = cv.imread("images/flower.jpg")
st.image(cv.cvtColor(img_bgr, cv.COLOR_BGR2RGB), caption="Input")
st.header("BGR")
st.image(img_bgr, "BGR")
st.header("Grayscale")
gray = cv.cvtColor(img_bgr, cv.COLOR_BGR2GRAY)
st.image(gray, caption="Grayscale")
st.header("BGR to HSV (Hue Saturation and Value)")
hsv = cv.cvtColor(img_bgr, cv.COLOR_BGR2HSV)
st.image(hsv, "HSV")
st.header("BGR to LAB")
lab = cv.cvtColor(img_bgr, cv.COLOR_BGR2LAB)
st.image(lab, "LAB")
st.header("Grayscale to HSV")
gray_bgr = cv.cvtColor(gray, cv.COLOR_GRAY2BGR)
bgr_hsv = cv.cvtColor(gray_bgr, cv.COLOR_BGR2HSV)
st.image(bgr_hsv, "Gray to HSV")
| StarcoderdataPython |
1797685 | <reponame>Cobaltians-Samples/Samples-SideMenu-Web
# WARNING :
# install handlebars first (same version as used in js file)
# sudo npm install handlebars@2.0.0 -g
# (you will need nmp (node) to be installed first
#
# use this script like this :
# python compile.py
#
# it will build every files ending with .handlebars in the templates directory.
# if a directory is found, templates inside this directory will be compiled and concatened into a file with the folder's name
#
import os
from os.path import basename
destPath = os.path.join(os.path.normpath(os.path.abspath(os.path.dirname(__file__))),"tpl","comp")
templates_folder = os.path.join(os.path.normpath(os.path.abspath(os.path.dirname(__file__))),"tpl","src")
os.chdir(templates_folder)
print "starting"
for file in os.listdir(templates_folder):
if os.path.isdir(file):
folder = file
print "\ncompiling folder %s templates" % folder
print "---creating new file."
os.system('echo "" > {destPath}/{folder}.js'.format(folder=folder, destPath=destPath))
os.chdir(folder)
for subfile in os.listdir(os.path.join(templates_folder,folder)):
if subfile.endswith(".handlebars"):
print "---Compiling file %s and adding it into created file" % subfile
os.system('handlebars {file} >> ../{destPath}/{folder}.js'.format(file=subfile, folder=folder, destPath=destPath))
os.chdir(os.pardir)
print "---All folder templates compiled."
else :
if file.endswith(".handlebars"):
print "--compiling file %s" % file
os.system('handlebars {file} > {destPath}/{destfile}.js'.format(file=file, destfile=file.replace(".handlebars",""), destPath=destPath))
print "end of script." | StarcoderdataPython |
6608352 | <gh_stars>0
import sqlalchemy
from datetime import datetime
from ml_buff.database import DeclarativeBase
from ml_buff.models import feature_value
from sqlalchemy.orm import relationship
class Feature(DeclarativeBase):
__tablename__ = 'features'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String)
feature_values = relationship("FeatureValue", back_populates="feature", cascade="expunge")
created_at = sqlalchemy.Column(sqlalchemy.DateTime, default=datetime.now)
updated_at = sqlalchemy.Column(sqlalchemy.DateTime, default=datetime.now, onupdate=datetime.now)
def __init__(self, name):
self.name = name
| StarcoderdataPython |
3496693 | import requests
from bs4 import BeautifulSoup
import re
import time
import sys
import urllib.request
import xlwt
from lxml import etree
from multiprocessing import Pool
def getHTMLText(url,cookies):
try:
r = requests.get(url,cookies)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
print("Failed!")
def getVideoInfo(html):
soup=BeautifulSoup(html,"html.parser")
videoContentList=soup.find('div',attrs={'id':'videobox'})
#print(videoContentList)#可以打印出来
videoInfoList=[]
i=0
selector=etree.HTML(html)
for videoLi in videoContentList.find_all('div',attrs={'class':'listchannel'}):
videoName=videoLi.find('img',attrs={'width':'120'}).get('title')
videoUrl=videoLi.find('a',attrs={'target':'blank'}).get('href')
timetext=selector.xpath('//div[@class="listchannel"]/text()')[4+i*17].strip()
addtimetext=selector.xpath('//div[@class="listchannel"]/text()')[6+i*17].strip()
try:
videoAuthorContent=videoLi.find('a',attrs={'target':'_parent'}).getText()
except AttributeError:
videoAuthorContent="None"
#print(videoUrl+str(i))
try:
videoAuthorUrl=videoLi.find('a',attrs={'target':'_parent'}).get('href')
except AttributeError:
videoAuthorUrl="None"
viewNumber=selector.xpath('//div[@class="listchannel"]/text()')[10+i*17].strip()
likeNumber=selector.xpath('//div[@class="listchannel"]/text()')[11+i*17].strip()
commentNumber=selector.xpath('//div[@class="listchannel"]/text()')[13+i*17].strip()
videoInfoList.append(videoUrl)#链接
videoInfoList.append(videoName)#视频名
videoInfoList.append(timetext)#视频时长
videoInfoList.append(addtimetext)#上传时间
videoInfoList.append(videoAuthorContent)#上传者id
videoInfoList.append(videoAuthorUrl)#上传者主页
videoInfoList.append(viewNumber)#观看数
videoInfoList.append(likeNumber)#收藏数
videoInfoList.append(commentNumber)#评论数
i+=1
#print(videoUrl)
return videoInfoList
def saveToExcel(videoInfoList):
workbook=xlwt.Workbook()
sheet1=workbook.add_sheet('sheet1',cell_overwrite_ok=True)
k=0
for i in range(10000):
for j in range(9):
print('正在写入的行和列是',i,j)
sheet1.write(i,j,videoInfoList[k])
k+=1
workbook.save('E:\\MyFile\\PythonSpider\\91Best\\top78000.xls')
def main():
cookies=''#使用自己的cookies
top10000List=[]
for page in range(1,505):#1到500,加5防止数组溢出
FvUrl=url+str(page)
print('正在保存的页面为第'+str(page)+'页')
top10000List+=getVideoInfo(getHTMLText(FvUrl,cookies))
saveToExcel(top10000List)
if __name__=='__main__':
main()
| StarcoderdataPython |
8191736 | <reponame>immunIT/octowire-framework
# -*- coding: utf-8 -*-
# Octowire Framework
# Copyright (c) ImmunIT - <NAME> / <NAME>
# License: Apache 2.0
# <NAME> / Eresse <<EMAIL>>
# <NAME> / Ghecko <<EMAIL>>
import inspect
import os
import pathlib
import pkg_resources
import pkgutil
import platform
import subprocess
import sys
import tempfile
from importlib import import_module
from octowire.utils.Logger import Logger
from octowire_framework.core.utils.removal_script import script
from octowire_framework.module.AModule import AModule
class OWFRemove:
def __init__(self):
self.logger = Logger()
self.not_removed = []
def _get_installed_modules(self):
"""
Return a dict of currently installed module(s).
:return: A dict of currently installed module(s) {'module_name': 'version', ...}.
"""
module_name = "owfmodules"
installed_modules = {}
try:
package = import_module(module_name)
except ImportError:
return installed_modules
for loader, module, is_pkg in pkgutil.walk_packages(package.__path__, prefix=package.__name__ + '.'):
try:
imported_module = import_module(module)
for x in dir(imported_module):
obj = getattr(imported_module, x)
if inspect.isclass(obj) and issubclass(obj, AModule) and obj is not AModule:
installed_modules[module] = pkg_resources.get_distribution(module).version
except ImportError:
self.logger.handle('Error while dynamically importing package "{}"... Unable to removed it'
.format(module), Logger.ERROR)
self.not_removed.append(module)
return installed_modules
@staticmethod
def _create_uninstall_script():
"""
Create the uninstall script that will be executed in a subprocess.
:return: Bool
"""
file = tempfile.NamedTemporaryFile(mode="w+", suffix=".py", delete=False)
file.write(script)
file.close()
return file.name
def _manage_uninstall(self, package_name):
"""
Removing the specified package (module or framework).
:param package_name: The name of the package to install (module or framework).
:return: Bool: True if successfully removed, False otherwise.
"""
python_path = sys.executable
current_dir = pathlib.Path().absolute()
if package_name != "octowire-framework":
pipes = subprocess.Popen([python_path, '-m', 'pip', 'uninstall', '-y', package_name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = pipes.communicate()
if pipes.returncode != 0:
self.logger.handle("Error while removing the '{}' package: {}".format(package_name, stderr.strip()),
Logger.ERROR)
return False
else:
self.logger.handle("'{}' successfully removed".format(package_name), Logger.SUCCESS)
return True
else:
# This method is necessary to remove the framework. Indeed, this allows releasing the owfremove
# executable in order to removed it.
script_name = self._create_uninstall_script()
log_file = current_dir / "framework_remove.log"
if platform.system() == "Windows":
subprocess.Popen([python_path, script_name, '-p', str(os.getpid()), '-f', str(log_file)],
creationflags=subprocess.DETACHED_PROCESS)
else:
subprocess.Popen([python_path, script_name, '-p', str(os.getpid()), '-f', str(log_file)])
self.logger.handle("The remove of the framework was launched in background... check the following "
"file to see if it was successfully removed: {}".format(str(log_file)),
self.logger.WARNING)
return True
def remove(self, remove_framework=None):
"""
This script checks all installed Octowire modules and remove it.
:param remove_framework: If True, remove the octowire Framework.
:return: Nothing
"""
installed_modules = self._get_installed_modules()
if not installed_modules:
self.logger.handle("No module seems installed", Logger.WARNING)
for module_name, _ in installed_modules.items():
self.logger.handle(f"Removing module '{module_name}'..", Logger.INFO)
if not self._manage_uninstall(module_name):
self.not_removed.append(module_name)
if len(self.not_removed) > 0:
self.logger.handle("Unable to remove the following package(s):", Logger.ERROR)
for module in self.not_removed:
print(" - {}".format(module))
self.logger.handle("Please try to uninstall it manually with the following command: "
"'pip3 uninstall owfmodules.<category>.<module_name>'", Logger.ERROR)
if remove_framework:
self._manage_uninstall("octowire-framework")
self.logger.handle("User configuration files need to be manually removed; these are present in '~/.owf' "
"directory for any user which has run the framework at least once.",
self.logger.USER_INTERACT)
| StarcoderdataPython |
4958402 | <filename>web_app/app/game_models/Game.py
"""
Game
====
"""
import random
from .Player import Player
from .GameSettings import GameSettings
from trivia_generator.web_scraper.WebScraper import get_page_by_random
from trivia_generator.web_scraper.WebScraper import get_page_by_category
from trivia_generator.web_scraper.WebScraper import get_page_by_location_zip
from trivia_generator.NLPPreProcessor import create_TUnits
from question_generator.NLPQuestionGeneratorSpacy import nlp_question_generation
class Game:
"""Class for a running instance of a game session. Contains All Game Logic.
:param players: A list of the currently active players in the game.
:param game_settings: A *GameSettings* object which contains the settings of the game.
:param round_number: the current round number the game is on.
:param game_code: the game code used to connect to the game.
:param current_state: the current state of the game.
:param game_states: a list of all possible game states.
:param game_room: the ID of the game room used by connecting sockets.
:param trivia_database: the database containing trivia questions.
"""
# game_states: list
def __init__(self, game_code: str,
game_settings: GameSettings,
host_id: str):
self.players = []
self.num_players = 0
self.game_code = game_code
self.game_settings = game_settings
self.host_id = host_id
self.round_number = 0
self.current_state = "LOBBY"
self.game_started = False
self.current_trivia = ""
self.number_of_responses = 0
self.number_of_lies = 0
self.current_answer = ""
def add_player_to_lobby(self, player: Player) -> bool:
"""Adds a player to the current game lobby.
:param player: the player to be added to the game lobby
:type player: Player
:returns: True if player was successfully added to lobby, False otherwise
"""
if not self.game_started:
self.players.append(player)
self.num_players += 1
return True
else:
return False
def remove_player_from_lobby(self, player: Player) -> bool:
"""Removes a player from the current game lobby.
:param player: the player to be removed from the game lobby
:type player: Player
:returns: True if player was successfully removed from lobby, False otherwise
"""
self.players.remove(player)
self.num_players -= 1
return True
def start_game(self) -> bool:
"""Finalizes the lobby and begins a game session.
:returns: True if the game session was successfully started, false otherwise
"""
self.game_started = True
self.round_number = 1
return True
def get_round_number(self) -> int:
"""Returns the current game round.
:returns: the current game round number as an integer
"""
return self.round_number
def get_score(self) -> dict:
"""creates and returns dictionary with the name and score of each player in game
:returns: a dictionary containinging the score of each player
"""
data = dict()
data['players'] = []
self.players.sort(key=lambda p: p.current_score, reverse=True)
for player in self.players:
player_entry = dict()
player_entry['name'] = player.name
player_entry['score'] = player.current_score
data['players'].append(player_entry)
return data
def get_next_trivia(self) -> str:
"""Fetches a trivia question for the upcoming round from the trivia database, based on the current GameSettings.
:returns: a trivia question
"""
quest_ans_pairs = []
while not quest_ans_pairs:
if self.game_settings.game_mode == 'category':
print("getting article by category")
trivia_article = get_page_by_category(self.game_settings.category)
elif self.game_settings.game_mode == 'location':
print("getting article by location")
trivia_article = get_page_by_location_zip(self.game_settings.zip_code)
else:
print("getting article by random")
trivia_article = get_page_by_random()
tunit_list = create_TUnits(trivia_article)
if len(tunit_list) > 0:
tunit = random.choice(tunit_list)
quest_ans_pairs = nlp_question_generation(tunit.sentence)
trivia_question, trivia_answer = random.choice(quest_ans_pairs)
print('found trivia!')
self.current_trivia = trivia_question
self.current_answer = trivia_answer
return trivia_question
def submit_answer(self, data: dict) -> list:
"""Retrives an answer the current trivia question from a given player.
:returns: A list, the first values corresponding the the success of submitting
the answer, true if successful, false otherwise,
the second value is true if there are no players left to answer, false if there are
"""
print("Game submission:", data)
player = self.get_player_by_sid(data['sid'])
if player is None:
return [False, False]
else:
player.current_answer = data['answer']
self.number_of_responses += 1
print('number of responses:', self.number_of_responses)
print('number of players:', self.num_players)
if self.number_of_responses == self.num_players:
return [True, True]
return [True, False]
def submit_lie(self, data: dict) -> list:
"""Retrives a lie submitted by a player in a fibbage game.
:returns: A list, the first value corresponding to the success
of submitting lie, the second corresponding to the if there are more players left to submit lies
"""
player = self.get_player_by_sid(data['sid'])
if player is None:
return [False, False]
player.current_lie = data['lie']
print("submitted lie:", data['lie'])
self.number_of_lies += 1
print("number of lies:", self.number_of_lies)
print('number of players:', self.num_players)
if self.number_of_lies == self.num_players:
return [True, True]
return [True, False]
def get_trivia_answer_and_responses(self) -> dict:
"""Returns the answer to the current trivia, and the responses of each player
:returns: a dictionary containing the trivia answer, and player answers
"""
data = dict()
data['answer'] = self.current_answer
self.players.sort(key=lambda p: p.name)
data['player_answers'] = dict()
for player in self.players:
data['player_answers'][player.name] = dict()
data['player_answers'][player.name]['answer'] = player.current_answer
is_correct = (player.current_answer == self.current_answer)
data['player_answers'][player.name]['correct'] = is_correct
player.current_answer = ""
self.round_number += 1
self.update_scores(data)
self.number_of_responses = 0
return data
def get_fibbage_answer_and_responses(self) -> dict:
"""Returns the answer to the current trivia, and the lies+answers of each player
:returns: a dictionary containing the trivia answer, and the lie and answer of each player
"""
data = dict()
data['answer'] = self.current_answer
data['players'] = []
for player in self.players:
player_info = dict()
player_info['name'] = player.name
player_info['answer'] = player.current_answer
is_correct = (player.current_answer == self.current_answer)
player_info['correct'] = is_correct
player_info['lie'] = player.current_lie
num_fooled = len([p.current_answer
for p in self.players
if p.current_answer == player.current_lie])
player_info['fooled'] = num_fooled
player.number_fooled = num_fooled
data['players'].append(player_info)
self.round_number += 1
# self.update_fibbage_scores(data) TODO
self.number_of_responses = 0
self.update_fibbage_scores(data)
return data
def get_fibbage_lies_and_answer(self) -> dict:
"""Returns all user-submitted lies to current fibbage trivia, and real answer
:returns: a dictionary containing the trivia answer, and player's lies
"""
data = dict()
data['answer'] = self.current_answer
data['lies'] = []
for player in self.players:
lie = player.current_lie
if lie != "":
data['lies'].append(lie)
# player.current_lie = ""
# self.numer_of_lies = 0
return data
def update_fibbage_scores(self, data):
"""Updates the scores of each player based on the answer and lies of each player"""
for player in self.players:
if data['answer'] == player.current_answer:
player.update_score(1)
player.update_score(player.number_fooled)
player.number_fooled = 0
player.current_lie = ""
player.current_answer = ""
self.number_of_lies = 0
def update_scores(self, data):
"""Updates the scores of each player based on the data of each player."""
for player in self.players:
if data['player_answers'][player.name]['correct']:
# TODO determine how many points they should get
player.update_score(1)
def submit_trivia_rank(self, rank):
# TODO
# 1. find current trivia TUnit
# 2. update TUnit in DB based on rank
print("trivia recieved rank", rank)
def display_category_options(self) -> bool:
"""If applicable (depending on game mode), send a list of possible categories that a player can choose from to the front end, which will be displayed to the selected user.
:returns: True if categories were properly fetched from database and sent to frontend, False otherwise
"""
pass
def determine_winners_of_round(self):
"""Based of off the current trivia and the received answers from each player, determine who won the round.
"""
pass
def prompt_for_lie(self) -> bool:
"""If applicable (depending on game mode), tell front-end to prompt all player(s) for a fake-answer to a trivia question.
:returns: True if info was successfully sent to front-end, False otherwise
"""
pass
def finish_game(self) -> bool:
"""After all rounds have been completed, sents "credits" information to front-end and updates statistics for all registered users.
:returns: True if info was successfully sent to front-end and user statistics were updated, false otherwise
"""
pass
def get_player_by_sid(self, sid: str) -> Player:
"""Returns the given player in game based off of their SID, or None if not found.
:returns: The player corresponding to the given SID, or None if not found
"""
for player in self.players:
if sid == player.ID:
return player
return None
| StarcoderdataPython |
9777549 | import ctypes as ct
import numpy as np
class BeeDNN:
c_float_p = ct.POINTER(ct.c_float)
lib = ct.cdll.LoadLibrary("./BeeDNNLib") # .dll is added under windows, .so under linux
lib.create.argtypes=[ct.c_int32]
lib.create.restype=ct.c_void_p
lib.add_layer.argtypes = [ct.c_void_p,ct.c_char_p]
lib.set_classification_mode.argtypes = [ct.c_void_p,ct.c_int32]
lib.predict.argtypes=[ct.c_void_p,c_float_p,c_float_p,ct.c_int32]
def __init__(self,inputSize):
self.net = ct.c_void_p(self.lib.create(inputSize))
self.inputSize=inputSize
def add_layer(self,layer_name):
cstr = ct.c_char_p(layer_name.encode('utf-8'))
self.lib.add_layer(self.net,cstr)
def set_classification_mode(self,bClassificationMode):
self.lib.set_classification_mode(self.net,ct.c_int32(bClassificationMode))
def predict(self,mIn,mOut):
data_in = mIn.astype(np.float32)
nbSamples=ct.c_int32(mIn.shape[0])
# mOut=np.zeros((mIn.shape[0],1),dtype=np.float32) #todo
data_p_in = data_in.ctypes.data_as(self.c_float_p)
data_p_out = mOut.ctypes.data_as(self.c_float_p)
self.lib.predict(self.net,data_p_in,data_p_out,nbSamples)
| StarcoderdataPython |
8154908 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-01-23 17:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('easyrequest_hay_app', '0003_auto_20180118_1216'),
]
operations = [
migrations.RemoveField(
model_name='itemrequest',
name='item_author',
),
migrations.RemoveField(
model_name='itemrequest',
name='item_barcode',
),
migrations.RemoveField(
model_name='itemrequest',
name='item_bib',
),
migrations.RemoveField(
model_name='itemrequest',
name='item_callnumber',
),
migrations.RemoveField(
model_name='itemrequest',
name='item_digital_version_url',
),
migrations.RemoveField(
model_name='itemrequest',
name='item_id',
),
migrations.RemoveField(
model_name='itemrequest',
name='item_publish_info',
),
migrations.RemoveField(
model_name='itemrequest',
name='patron_barcode',
),
migrations.RemoveField(
model_name='itemrequest',
name='patron_email',
),
migrations.RemoveField(
model_name='itemrequest',
name='patron_name',
),
migrations.AddField(
model_name='itemrequest',
name='modified_datetime',
field=models.DateTimeField(auto_now=True, null=True),
),
migrations.AddField(
model_name='itemrequest',
name='patron_info',
field=models.TextField(blank=True, null=True),
),
]
| StarcoderdataPython |
6512692 | <reponame>ariafyy/R2Base
import requests
import os
host_url = "http://localhost:8000"
def delete_index(index_id):
res = requests.delete(url=os.path.join(host_url, 'r2base/v1/index/{}'.format(index_id)))
if res.status_code > 300:
raise Exception(res.json())
def make_index(index_id, mapping):
res = requests.post(url=os.path.join(host_url, 'r2base/v1/index/{}'.format(index_id)),
json={'mappings': mapping})
if res.status_code > 300:
raise Exception(res.json())
return res.json()
def add_docs(index_id, docs):
res = requests.post(url=os.path.join(host_url, 'r2base/v1/index/{}/docs'.format(index_id)),
json={'docs': docs, 'batch_siz': 100})
if res.status_code > 300:
raise Exception(res.json())
return res.json()
def search(index_id, query):
res = requests.post(url=os.path.join(host_url, 'r2base/v1/search/{}/query'.format(index_id)),
json={'query': query})
if res.status_code > 300:
raise Exception(res.json())
return res.json()
if __name__ == "__main__":
mapping = {
'doc_id': {'type': 'keyword'},
'v': {'type': 'vector', 'num_dim': 3},
'v2': {'type': 'vector', 'num_dim': 3}
}
index = 'v-test'
docs = []
docs.append({'doc_id': '1', 'v': [1, 2, 3], 'v2': [1, 2, 3]})
docs.append({'doc_id': '2', 'v': [-1, -2, -3], 'v2': [-1, -2, -3]})
docs.append({'doc_id': '3', 'v': [7, 8, 9], 'v2': [7, 8, 9]})
delete_index(index)
make_index(index, mapping)
add_docs(index, docs)
import time
time.sleep(2)
print(search(index, {'match': {'v': [1, 2, 3]}}))
print(search(index, {'match': {'v': {'value': [1,2,3], "threshold": 0.8},
'v2': {'value': [-2,2,-3], "threshold": 0.0}}}))
| StarcoderdataPython |
5194055 | <filename>apps/breakfast/tools/Life/tools/cx/messages/CxRecordRequestMsg.py
#
# This class is automatically generated by mig. DO NOT EDIT THIS FILE.
# This class implements a Python interface to the 'CxRecordRequestMsg'
# message type.
#
import tinyos.message.Message
# The default size of this message type in bytes.
DEFAULT_MESSAGE_SIZE = 8
# The Active Message type associated with this message.
AM_TYPE = 240
class CxRecordRequestMsg(tinyos.message.Message.Message):
# Create a new CxRecordRequestMsg of size 8.
def __init__(self, data="", addr=None, gid=None, base_offset=0, data_length=8):
tinyos.message.Message.Message.__init__(self, data, addr, gid, base_offset, data_length)
self.amTypeSet(AM_TYPE)
# Get AM_TYPE
def get_amType(cls):
return AM_TYPE
get_amType = classmethod(get_amType)
#
# Return a String representation of this message. Includes the
# message type name and the non-indexed field values.
#
def __str__(self):
s = "Message <CxRecordRequestMsg> \n"
try:
s += " [node_id=0x%x]\n" % (self.get_node_id())
except:
pass
try:
s += " [cookie=0x%x]\n" % (self.get_cookie())
except:
pass
try:
s += " [length=0x%x]\n" % (self.get_length())
except:
pass
return s
# Message-type-specific access methods appear below.
#
# Accessor methods for field: node_id
# Field type: int
# Offset (bits): 0
# Size (bits): 16
#
#
# Return whether the field 'node_id' is signed (False).
#
def isSigned_node_id(self):
return False
#
# Return whether the field 'node_id' is an array (False).
#
def isArray_node_id(self):
return False
#
# Return the offset (in bytes) of the field 'node_id'
#
def offset_node_id(self):
return (0 / 8)
#
# Return the offset (in bits) of the field 'node_id'
#
def offsetBits_node_id(self):
return 0
#
# Return the value (as a int) of the field 'node_id'
#
def get_node_id(self):
return self.getUIntElement(self.offsetBits_node_id(), 16, 1)
#
# Set the value of the field 'node_id'
#
def set_node_id(self, value):
self.setUIntElement(self.offsetBits_node_id(), 16, value, 1)
#
# Return the size, in bytes, of the field 'node_id'
#
def size_node_id(self):
return (16 / 8)
#
# Return the size, in bits, of the field 'node_id'
#
def sizeBits_node_id(self):
return 16
#
# Accessor methods for field: cookie
# Field type: long
# Offset (bits): 16
# Size (bits): 32
#
#
# Return whether the field 'cookie' is signed (False).
#
def isSigned_cookie(self):
return False
#
# Return whether the field 'cookie' is an array (False).
#
def isArray_cookie(self):
return False
#
# Return the offset (in bytes) of the field 'cookie'
#
def offset_cookie(self):
return (16 / 8)
#
# Return the offset (in bits) of the field 'cookie'
#
def offsetBits_cookie(self):
return 16
#
# Return the value (as a long) of the field 'cookie'
#
def get_cookie(self):
return self.getUIntElement(self.offsetBits_cookie(), 32, 1)
#
# Set the value of the field 'cookie'
#
def set_cookie(self, value):
self.setUIntElement(self.offsetBits_cookie(), 32, value, 1)
#
# Return the size, in bytes, of the field 'cookie'
#
def size_cookie(self):
return (32 / 8)
#
# Return the size, in bits, of the field 'cookie'
#
def sizeBits_cookie(self):
return 32
#
# Accessor methods for field: length
# Field type: int
# Offset (bits): 48
# Size (bits): 16
#
#
# Return whether the field 'length' is signed (False).
#
def isSigned_length(self):
return False
#
# Return whether the field 'length' is an array (False).
#
def isArray_length(self):
return False
#
# Return the offset (in bytes) of the field 'length'
#
def offset_length(self):
return (48 / 8)
#
# Return the offset (in bits) of the field 'length'
#
def offsetBits_length(self):
return 48
#
# Return the value (as a int) of the field 'length'
#
def get_length(self):
return self.getUIntElement(self.offsetBits_length(), 16, 1)
#
# Set the value of the field 'length'
#
def set_length(self, value):
self.setUIntElement(self.offsetBits_length(), 16, value, 1)
#
# Return the size, in bytes, of the field 'length'
#
def size_length(self):
return (16 / 8)
#
# Return the size, in bits, of the field 'length'
#
def sizeBits_length(self):
return 16
| StarcoderdataPython |
11386380 | <filename>fliswarm/tools.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: <NAME> (<EMAIL>)
# @Date: 2020-11-01
# @Filename: tools.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import asyncio
from typing import Any, Dict, List, Optional, Set, Union
import fliswarm.node
__all__ = ["select_nodes", "FakeCommand", "IDPool", "subprocess_run_async"]
def select_nodes(
nodes: Dict[str, Any],
category: Optional[str] = None,
names: Optional[Union[str, List[str]]] = None,
) -> Set["fliswarm.node.Node"]:
"""Filters the nodes to command.
Parameters
----------
nodes
A dictionary of `.Node` instances to be filtered, keyed by node name.
category
A category on which to filter.
names
A list or comma-separated string of node names on which to filter.
Returns
-------
:
A `set` of enabled `.Node` instances that match the provided
``category`` or ``names``. If neither ``category`` or ``names``
are defined, returns all the ``nodes``.
"""
if names and isinstance(names, str):
names = list(map(lambda x: x.strip(), names.split(",")))
valid_nodes = set()
node_values = nodes.values()
if names:
valid_nodes |= set([node for node in node_values if node.name in names])
if category:
valid_nodes |= set([node for node in node_values if node.category in category])
if not names and not category:
valid_nodes |= set(node_values)
selected_nodes = set([node for node in valid_nodes if node.enabled])
return selected_nodes
class FakeCommand:
"""A fake `~clu.command.Command` object that doesn't do anything."""
def __getattr__(self, item):
def fake_method(*args, **kwargs):
pass
return fake_method
class IDPool:
"""An ID pool that allows to return values to be reused."""
def __init__(self):
self.emitted: Set[int] = set()
self.returned: Set[int] = set()
def get(self):
"""Returns an ID."""
if len(self.returned) > 0:
id = min(self.returned)
self.returned.remove(id)
return id
if len(self.emitted) == 0:
id = 1
else:
id = max(self.emitted) + 1
self.emitted.add(id)
return id
def put(self, id: int):
"""Returns an ID to the pool."""
self.returned.add(id)
async def subprocess_run_async(*args, shell=False):
"""Runs a command asynchronously.
If ``shell=True`` the command will be executed through the shell. In that case
the argument must be a single string with the full command. Otherwise, must receive
a list of program arguments. Returns the output of stdout.
"""
if shell:
cmd = await asyncio.create_subprocess_shell(
args[0],
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
else:
cmd = await asyncio.create_subprocess_exec(
*args,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
await cmd.communicate()
return cmd
| StarcoderdataPython |
372112 | from flask import Flask
import requests
APP = Flask(__name__)
@APP.route("/")
def home():
return f"Hello World" | StarcoderdataPython |
12805627 | from portal_gun.fabric.operations import *
| StarcoderdataPython |
3271068 | #-*- coding: utf-8 -*-
import ask_util
import json
import os, sys
import re
import requests
import time
class ParkReview:
def get(self,prdNo):
url = 'http://mbook.interpark.com/api/my/review/shortReviewList?sc.prdNo=%s&sc.page=1&sc.row=20' % prdNo
res = requests.get(url)
#print(res.text)
jsonData = res.json()
#print(jsonData)
review_list = []
try:
for item in jsonData['resData']['list']:
if len(item['usedTitle']) < 3:
continue
star = item['avgScoreTot']
reg_nm = item['usedMemNm']
if len(reg_nm) ==3:
reg_nm = reg_nm[:1]+"-"+reg_nm[2:]
else:
reg_nm = reg_nm[:1]+"-"
reg_dt = item['regDts']
comment = ask_util.repl_excp(ask_util.getSqlReplace(item['usedTitle']))
review_list.append({"star":star,"reg_nm":reg_nm,"reg_dt":reg_dt,"comment":comment})
# print("start %s, reg_nm %s, reg_dt %s, comment %s" %(star, reg_nm,reg_dt,comment))
except Exception as e:
print(e)
return review_list
# if __name__ == '__main__':
# inter = ParkReview()
# print(inter.get("348910874"))
# review_list = inter.get("348910874")
# print(len(review_list))
| StarcoderdataPython |
4897909 | # -*- coding: utf-8 -*-
# Swish integration
from __future__ import absolute_import
# Copyright 2019 Open End AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
if (sys.version_info >=(3, 0)):
PYT3 = True
else:
PYT3 = False
import decimal
import flask
import os
import re
import tempfile
import time
import OpenSSL.crypto
import dateutil.parser
from bson.objectid import ObjectId
if PYT3:
import json
from io import StringIO
from http import client as httplib
from urllib.request import Request
from urllib import request as urllib2
else:
import json
from StringIO import StringIO
import httplib
import urllib2
import pytransact.commit
import pytransact.context
import accounting.config
import blm.accounting
log = accounting.config.getLogger('swish')
swish_api = flask.Blueprint('swish_api', __name__)
def itercerts(chain):
try:
chain.read
except AttributeError:
f = StringIO(chain)
else:
f = chain
cert = StringIO()
for line in f:
if line == '-----BEGIN CERTIFICATE-----\n':
cert = StringIO()
cert.write(line)
if line == '-----END CERTIFICATE-----\n':
yield cert.getvalue()
def find_root_cert(cert):
for pem in itercerts(cert):
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, pem)
issuer = dict(cert.get_issuer().get_components())
if PYT3:
# Convert from byte to string
issuer = { k.decode() : v.decode() for (k,v) in issuer.items()}
if (issuer['O'] == 'Getswish AB' and
issuer['OU'] == 'Swish Member CA' and
issuer['CN'] == 'Swish Root CA v1'):
return 'live', pem
if (issuer['O'] == 'Getswish AB' and
issuer['OU'] == 'Swish Member CA' and
issuer['CN'] == 'Swish Root CA v2 Test'):
return 'test', pem
raise ValueError('Bad certificate', issuer)
class HTTPSClientAuthHandler(urllib2.HTTPSHandler):
def __init__(self, cert, key):
urllib2.HTTPSHandler.__init__(self)
self.key = key
self.cert = cert
def https_open(self, req):
# Rather than pass in a reference to a connection class, we pass in
# a reference to a function which, for all intents and purposes,
# will behave as a constructor
return self.do_open(self.getConnection, req)
def getConnection(self, host, timeout=300):
return httplib.HTTPSConnection(host, key_file=self.key,
cert_file=self.cert)
class Client(object):
def __init__(self, merchant, cert, pkey, test=False):
self.merchant = merchant
self.cert_data = cert
self.pkey_data = pkey
self.test = test
def __enter__(self):
self.cert_file = tempfile.NamedTemporaryFile()
self.pkey_file = tempfile.NamedTemporaryFile()
if PYT3:
self.cert_file.write(self.cert_data.encode())
else:
self.cert_file.write(self.cert_data)
self.cert_file.flush()
if PYT3:
self.pkey_file.write(self.pkey_data.encode())
else:
self.pkey_file.write(self.pkey_data)
self.pkey_file.flush()
return self
def __exit__(self, exception_type, exception_value, traceback):
self.cert_file.close()
self.pkey_file.close()
@classmethod
def from_toi(cls, toi):
return cls(toi.swish_id[0], toi.cert[0], toi.pkey[0], toi.is_test[0])
@property
def cert(self):
return self.cert_file.name, self.pkey_file.name
@property
def callback_root(self):
try:
baseurl = os.environ['FAKE_HTTPS_ROOT']
except KeyError:
baseurl = accounting.config.config.get('accounting', 'baseurl')
return baseurl + 'providers/swish/webhook/'
@property
def url(self):
if self.test:
return 'https://mss.cpc.getswish.net/swish-cpcapi/api/v1/'
else:
return 'https://cpc.getswish.net/swish-cpcapi/api/v1/'
def _get_url(self, endpoint):
if endpoint.startswith(self.url):
return endpoint
return self.url + endpoint
def get(self, endpoint):
opener = urllib2.build_opener(HTTPSClientAuthHandler(*self.cert))
url = self._get_url(endpoint)
request = urllib2.Request(url)
return opener.open(request)
def post(self, endpoint, payload):
opener = urllib2.build_opener(HTTPSClientAuthHandler(*self.cert))
data = json.dumps(payload)
url = self._get_url(endpoint)
log.info('Posting: %s, %s', url, data)
if PYT3:
request = Request(url, data)
else:
request = urllib2.Request(url, data)
request.add_header('Content-Type', 'application/json')
if PYT3:
request.data = request.data.encode()
return opener.open(request)
def create_payment(self, provider, purchase, **kw):
callback = self.callback_root + 'charge/%s/%s' % (provider, purchase)
kw.setdefault('callbackUrl', callback)
kw.setdefault('payeeAlias', self.merchant)
try:
response = self.post('paymentrequests', payload=kw)
except urllib2.HTTPError as exc:
data = json.load(exc.fp)
return Payment.from_error(data)
return Payment.from_location(response.headers['Location'])
def retrieve(self, refid):
response = self.get('paymentrequests/%s' % refid)
return Payment.from_json(response)
def refund(self, id, **kw):
callback = self.callback_root + 'refund/%s' % id
kw.setdefault('callbackUrl', callback)
kw.setdefault('payerAlias', self.merchant)
try:
response = self.post('refunds', payload=kw)
except urllib2.HTTPError as exc:
print(json.load(exc.fp))
raise
location = response.headers['Location']
# xxx we need an asynchronous api for refunds...
for x in range(20):
response = self.get(location)
payment = Payment.from_json(response)
if payment.status == 'PAID':
return payment
time.sleep(1)
def request_callback(self, payment):
return self.get(payment.location)
class Payment(object):
def __init__(self, id=None, status='CREATED', currency='SEK',
payerAlias=None, paymentReference=None, amount=None,
datePaid=None, **kw):
self.id = id
self.status = status
self.currency = currency
self.payerAlias = payerAlias
self.paymentReference = paymentReference
try:
self.amount = decimal.Decimal(amount)
except TypeError:
self.amount = None
try:
self.datePaid = int(dateutil.parser.parse(datePaid).strftime('%s'))
except (AttributeError, TypeError):
self.datePaid = None
if self.status == 'ERROR':
self.errors = [{'errorCode': kw['errorCode'],
'errorMessage': kw['errorMessage']}]
else:
self.errors = None
@property
def http_result(self):
if self.errors:
return json.dumps(self.errors), 422
else:
data = {'id': self.id, 'status': self.status}
return json.dumps(data), 200
@classmethod
def from_location(cls, location):
obj = cls(location.split('/')[-1])
obj.location = location
return obj
@classmethod
def from_error(cls, errors):
obj = cls()
obj.errors = errors
obj.error = errors[0]['errorCode']
obj.errorMessage = errors[0]['errorMessage']
return obj
@classmethod
def from_json(cls, stream):
return cls.from_dict(json.load(stream))
@classmethod
def from_dict(cls, data):
log.debug('Payment from data: %s', data)
return cls(**data)
def _filter_message(message):
message = re.sub(u'[^a-zA-z0-9åäöÅÄÖ:;.,\\?!\\(\\)]', ' ', message)
message = re.sub(u'[ ]+', ' ', message)
return message[:50]
@swish_api.route('/charge/<objectid:provider>/<objectid:purchase>',
methods=['GET', 'POST'])
def charge(provider, purchase):
data = flask.request.get_json()
phone = data['phone']
with pytransact.context.ReadonlyContext(flask.g.database):
provider, = blm.accounting.SwishProvider._query(id=provider).run()
purchase, = blm.members.BasePurchase._query(id=purchase).run()
amount = purchase.total[0]
currency = provider.currency[0]
swish_id = provider.swish_id[0]
cert = provider.cert[0]
pkey = provider.pkey[0]
is_test = provider.is_test[0]
with Client(swish_id, cert, pkey, test=is_test) as client:
message = provider.org[0].name[0]
if is_test:
message = data.get('code', message)
message = _filter_message(message)
payment = client.create_payment(
provider=provider.id[0],
purchase=purchase.id[0],
payeePaymentReference=purchase.ocr[0],
payerAlias=phone,
amount=str(amount.quantize(decimal.Decimal('1.00'))),
currency=currency,
message=message,
)
return payment.http_result
@swish_api.route('/poll/<objectid:provider>/<refid>', methods=['GET', 'POST'])
def poll(provider, refid):
with pytransact.context.ReadonlyContext(flask.g.database):
q = blm.accounting.SwishProvider._query(id=provider)
q.attrList = ['swish_id', 'cert', 'pkey']
provider, = q.run()
swish_id = provider.swish_id[0]
cert = provider.cert[0]
pkey = provider.pkey[0]
is_test = provider.is_test[0]
with Client(swish_id, cert, pkey, test=is_test) as client:
payment = client.retrieve(refid)
result = payment.http_result
return result
@swish_api.route('/webhook/refund/<objectid:payment>', methods=['GET', 'POST'])
def webhook_refund(payment):
data = flask.request.get_json()
log.info('WEBHOOK REFUND: %s', data)
return ''
@swish_api.route('/webhook/charge/<objectid:provider>/<objectid:purchase>',
methods=['GET', 'POST'])
def webhook_charge(provider, purchase):
data = flask.request.get_json()
log.info('WEBHOOK CHARGE: %s', data)
if data['status'] != 'PAID':
return ''
paymentReference = data['paymentReference']
interested = 'swish-%s-%s' % (paymentReference, ObjectId())
with pytransact.commit.CommitContext(flask.g.database) as ctx:
provider = blm.accounting.SwishProvider._query(id=provider).run()
purchase = blm.members.BasePurchase._query(id=purchase).run()
if not provider + purchase:
return ''
op = pytransact.commit.CallBlm('members', 'handleSwishPayment',
[provider, purchase, [data]])
ctx.runCommit([op], interested)
result, error = pytransact.commit.wait_for_commit(flask.g.database,
interested)
if error:
raise error
paymentId = result[0][0]
interested = 'send-swish-payment-confirmation-%s' % ObjectId()
with pytransact.commit.CommitContext(flask.g.database) as ctx:
op = pytransact.commit.CallToi(paymentId, 'sendConfirmationEmail', [])
commit = ctx.runCommit([op], interested=interested)
result, error = pytransact.commit.wait_for_commit(flask.g.database,
interested=interested)
if error:
raise error
return ''
| StarcoderdataPython |
6502848 | # Django
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
# Models
from coeadmin.user.models import User, Profile
class CustomUserAdmin(UserAdmin):
""" User model admin. """
list_display = ('email','username','first_name','phone_number','is_staff','is_pollster', 'is_admin', 'is_verified')
list_filter = ('is_admin','is_pollster', 'is_staff','created','modified')
actions = ['is_pollster','is_not_pollster']
def is_pollster(self, request, queryset):
'''Make pollster is false'''
queryset.update(is_pollster=False)
is_pollster.short_description = 'Make selected user is not pollster'
def is_not_pollster(self, request, queryset):
'''Make pollster is true'''
queryset.update(is_pollster=True)
is_not_pollster.short_description = 'Make selected user is pollster'
@admin.register(Profile)
class ProfileAdmin(admin.ModelAdmin):
""" Profile model admin ."""
list_display = ('user','created','polls')
search_fields = ('user_username','user__email','user__first_name','user__last_name')
admin.site.register(User, CustomUserAdmin) | StarcoderdataPython |
4821194 | <gh_stars>0
#!/usr/bin/python3
"""
/c/<text>: display “C ”, followed by the value of the text
variable (replace underscore _ symbols with a space )
"""
from flask import Flask
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def hello_hbnb():
return 'Hello HBNB!'
@app.route('/hbnb', strict_slashes=False)
def hbnb():
return 'HBNB!'
@app.route('/c/<text>', strict_slashes=False)
def c_text(text):
return 'C {}'.format(text.replace('_', ' '))
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000, debug=True)
| StarcoderdataPython |
4983180 | import numpy as np
from PIL import Image, ImageTk
import matplotlib.pyplot as plt
import cv2
from scipy.integrate import simps
import os
import tkinter
from scipy.signal import find_peaks
import math
import re
from matplotlib.ticker import (AutoMinorLocator)
from tkinter import Text, Radiobutton, Frame, Button, filedialog, messagebox, Scale, Canvas, PhotoImage, Label, Scale, Entry, StringVar
from shutil import rmtree
import xlsxwriter
#make GUI
root = tkinter.Tk()
root.title("Intensity Grapher")
smooth_val = 0
h_shift_val = 0
v_shift_val = 0
bounds = []
#ratio is 3:2
plot_disp_size = (int(430*1.5), 430)
#creates resource folder in the current directory
if 'temp_resources' not in os.listdir('./'):
os.mkdir('./temp_resources')
if 'cropped' not in os.listdir('./temp_resources'):
os.mkdir('./temp_resources/cropped')
#for exiting the program
def on_closing():
if messagebox.askokcancel("Quit", "Are you sure you want to quit (unsaved data will be discarded)?"):
print("[Exited]")
root.quit()
root.destroy()
rmtree('./temp_resources')
#widget for creating help window
class CustomText(Text):
def __init__(self, *args, **kwargs):
Text.__init__(self, *args, **kwargs)
def HighlightPattern(self, pattern, tag, start="1.0", end="end", regexp=True):
start = self.index(start)
end = self.index(end)
self.mark_set("matchStart",start)
self.mark_set("matchEnd",end)
self.mark_set("searchLimit", end)
count = tkinter.IntVar()
while True:
index = self.search(pattern, "matchEnd","searchLimit",count=count, regexp=regexp)
if index == "": break
self.mark_set("matchStart", index)
self.mark_set("matchEnd", "%s+%sc" % (index,count.get()))
self.tag_add(tag, "matchStart","matchEnd")
#presents a help window with documentation on how to use our program, will make it read from the README.md file later
def help_window():
window = tkinter.Toplevel(root)
window.title("Help")
window.geometry("800x600")
f = open("DIRECTIONS.txt", 'r')
text = f.readlines()
f.close()
t = CustomText(window, wrap="word", width=100, height=10, borderwidth=2)
t.pack(sid="top", fill="both", expand=True)
t.insert("1.0","".join(text))
t.config(state='disable')
t.tag_configure("blue", foreground="blue")
t.HighlightPattern("/\D{1,}[^:]:/g", "blue")
Button(window, text="OK", command=window.destroy).pack()
#opens dialog to select image
def select_file():
root.filename = filedialog.askopenfilename(initialdir="../", title="Select image file", filetypes=(("Image files (.jpg, .jpeg, .png)", "*.jpg *.jpeg *.png"), ("all files","*.*")))
try:
img_path = root.filename
except:
print("Root Filename not compatible with image path")
return
global im
imtemp = Image.open(img_path).resize(plot_disp_size)
im = ImageTk.PhotoImage(imtemp)
image_canvas.itemconfigure(imload, image=im)
#threshold slider
def update_thresh(val):
global thresh_val
thresh_val = val
thresh_and_crop()
#image processing
def thresh_and_crop():
global init_vals
init_vals = []
try:
img_path = root.filename
except:
print("Root Filename not compatible with image path")
return
#thresholding
img = cv2.imread(img_path)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, img_thresh = cv2.threshold(img_gray, 255*(float(thresh_val)/100), 255, cv2.THRESH_TOZERO)
#cropping
cnt, hierarchy = cv2.findContours(img_thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnt_sort = sorted(cnt, key=cv2.contourArea)
cv2.drawContours(img_thresh, cnt_sort[:-2], -1, 0, -1)
cnt_sort = cnt_sort[-2:]
xmin = cnt_sort[-1][0][0][0]
xmax = 0
ymin = cnt_sort[-1][0][0][1]
ymax = 0
#finding lowest x val and highest x val
for i in range(len(cnt_sort)):
for j in range(len(cnt_sort[i])):
for z in range(len(cnt_sort[i][j])):
f = cnt_sort[i][j]
if f[z][0] < xmin:
xmin = f[z][0]
if f[z][0] > xmax:
xmax = f[z][0]
if f[z][1] < ymin:
ymin = f[z][1]
if f[z][1] > ymax:
ymax = f[z][1]
img_crop = img_thresh[ymin:ymax, xmin:xmax]
#saves cropped image in cropped folder
cv2.imwrite('./temp_resources/cropped/' + os.path.split(img_path)[1], img_crop)
global im
imtemp = Image.open('./temp_resources/cropped/' + os.path.split(img_path)[1]).resize(plot_disp_size)
im = ImageTk.PhotoImage(imtemp)
image_canvas.itemconfigure(imload, image=im)
#finding regions of interest
def find_roi():
try:
global img_path
img_path = './temp_resources/cropped/' + os.path.split(root.filename)[1]
except:
print("Image path not defined")
return
if os.path.exists(img_path) == False:
print("Must threshold image first")
return
img_raw = cv2.imread(img_path)
img_raw = cv2.resize(img_raw, (1032, 688))
#select ROI function 1 (top strip)
roi = cv2.selectROI(img_raw)
roi_cropped1 = img_raw[int(roi[1]):int(roi[1]+roi[3]), int(roi[0]):int(roi[0]+roi[2])]
#select ROI function 2 (bottom strip)
roi = cv2.selectROI(img_raw)
roi_cropped2 = img_raw[int(roi[1]):int(roi[1]+roi[3]), int(roi[0]):int(roi[0]+roi[2])]
try:
cv2.imwrite("./temp_resources/topstrip.jpeg", roi_cropped1)
cv2.imwrite('./temp_resources/bottomstrip.jpeg', roi_cropped2)
except:
print("No ROI selected")
cv2.destroyAllWindows()
#smoothing filter slider
def update_smooth(val):
global smooth_val
smooth_val = val
make_graph()
os.remove('./temp_resources/temp.png')
#curve smoothing
def smooth(interval, window_size):
window = np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, mode='valid')
#updates after baseline selection
def update_baseline():
preview_button["state"] = "normal"
global baseline_grabbed
baseline_grabbed = baseline_choice.get()
#updates after selecting number of peak bounds
def update_peaks():
bounds_button['state'] = 'normal'
global peaks_num_grabbed
peaks_num_grabbed = peak_num_choice.get()
#choosing peak bounds for integration step
def choose_peak_bounds():
global bounds
make_graph(bounds = True)
return bounds
#horizontal shift slider
def update_h_shift(val):
global h_shift_val
h_shift_val = val
make_graph()
os.remove('./temp_resources/temp.png')
#vertical shift slider
def update_v_shift(val):
global v_shift_val
v_shift_val = val
make_graph()
os.remove('./temp_resources/temp.png')
#preview button
def preview_graph():
make_graph()
try:
os.remove('./temp_resources/temp.png')
except:
return
curve_smoothing_slider['state'] = 'normal'
horizontal_shift_slider['state'] = 'normal'
vertical_shift_slider['state'] = 'normal'
#displays graph
def make_graph(bounds = False):
global vals
vals = []
#in case matplotlib crashes
plt.clf()
try:
top_line = Image.open('./temp_resources/topstrip.jpeg').convert("L")
bottom_line = Image.open('./temp_resources/bottomstrip.jpeg').convert("L")
except:
print("No ROI selected")
return
#special treatment for this disaster
export_button['state'] = 'normal'
#convert to numpy array
np_top = np.array(top_line)
top_line_array = []
for elem in np_top:
if elem.sum() != 0:
top_line_array.append(elem)
np_bottom = np.array(bottom_line)
bottom_line_array = []
for elem in np_bottom:
if elem.sum() != 0:
bottom_line_array.append(elem)
x1 = [float(sum(l))/len(l) for l in zip(*top_line_array)]
x2 = [float(sum(l))/len(l) for l in zip(*bottom_line_array)]
#initial vals
if len(init_vals) == 0:
t1 = np.arange(len(x1))
t2 = np.arange(len(x2))
init_vals.extend([t1, x1, t2, x2])
#smoothing
if int(smooth_val) > 0:
x1 = smooth(x1, int(smooth_val))
x2 = smooth(x2, int(smooth_val))
x1 = x1[1:(len(x1) - 1)]
x2 = x2[1:(len(x2) - 1)]
#baseline adjustment
if baseline_grabbed == 101: #midpoint
x1_mid = x1[int(len(x1)/2)]
x2_mid = x2[int(len(x2)/2)]
x1 = [i - x1_mid for i in x1]
x2 = [i - x2_mid for i in x2]
#low val (shifts all to y=0 for standard axis)
low_val = min(list(np.append(x1, x2)))
x1 = [i-low_val for i in x1]
x2 = [i-low_val for i in x2]
#converts values to percentages of max intensity to nearest hundredth (to make uniform across pictures)
highest_intensity = max(list(np.append(x1, x2)))
for i in range(len(x1)):
x1[i] = round((float(x1[i]) / float(highest_intensity)) * 100.00000, 2)
for i in range(len(x2)):
x2[i] = round((float(x2[i]) / float(highest_intensity)) * 100.00000, 2)
#new auto peak detector for initial horizontal adjustment
x1_peaks, _ = find_peaks(np.array(x1), height=15, distance=10, width=10)
x2_peaks, _ = find_peaks(np.array(x2), height=15, distance=10, width=10)
x1_peak = 0
x2_peak = 0
for i in x1_peaks:
if x1[i] > x1[x1_peak]:
x1_peak = i
for i in x2_peaks:
if x2[i] > x2[x2_peak]:
x2_peak = i
t1 = np.arange(len(x1))
t2 = np.arange(len(x2))
if x1_peak < x2_peak:
t1 = [i+x2_peak-x1_peak for i in t1]
if x2_peak < x1_peak:
t2 = [i+x1_peak-x2_peak for i in t2]
#manual h and v shift
t1 = [i+int(h_shift_val) for i in t1]
x1 = [i+int(v_shift_val) for i in x1]
#bounds selection
if bounds == True:
plt.clf()
plt.figure(figsize=(9,5.5))
plt.title("Select LEFT and RIGHT BOUNDS of CONTROL PEAK (right)")
plt.plot(t1, x1)
plt.plot(t2, x2)
clicked = plt.ginput(2)
plt.close()
control_peak = [math.floor(float(str(clicked).split(', ')[0][2:])), math.ceil(float(str(clicked).split(', ')[2][1:]))]
left_point = min(range(len(t1)), key=lambda i: abs(t1[i]-control_peak[0]))
right_point = min(range(len(t1)), key=lambda i: abs(t1[i]-control_peak[1]))
points_right_peak = [left_point + t1[0], right_point + t1[0]]
plt.clf()
if peaks_num_grabbed == 102:
plt.clf()
plt.figure(figsize=(9,5.5))
plt.title("Select LEFT and RIGHT BOUNDS of TEST PEAK (left)")
plt.plot(t1, x1)
plt.plot(t2, x2)
clicked = plt.ginput(2)
plt.close()
test_peak = [math.floor(float(str(clicked).split(', ')[0][2:])), math.ceil(float(str(clicked).split(', ')[2][1:]))]
left_point = min(range(len(t1)), key=lambda i: abs(t1[i]-test_peak[0]))
right_point = min(range(len(t1)), key=lambda i: abs(t1[i]-test_peak[1]))
points_left_peak = [left_point + t1[0], right_point + t1[0]]
plt.clf()
#matplot plotting
hfont = {'fontname': 'Arial', 'weight': 'bold', 'size': 45}
ax = plt.subplot(111)
plt.plot(t1, x1, linewidth=2)
plt.plot(t2, x2, linewidth=2)
ax.tick_params(width=1)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
ax.xaxis.set_minor_locator(AutoMinorLocator(2))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
plt.setp(ax.spines.values(), linewidth=1.5)
ax.tick_params(which='minor', width=1, length=5, labelsize=14)
ax.tick_params(which='major', width=1.5, length=15, labelsize=32)
plt.title(str(img_path).split('cropped/')[1], loc = 'right')
plt.ylabel('Rel. Int. (% max)', **hfont)
plt.xlabel('Pixel distance', **hfont)
plt.setp(ax.get_yticklabels(), fontweight="bold", fontname="Arial")
plt.setp(ax.get_xticklabels(), fontweight="bold", fontname="Arial")
vals.extend([t1, x1, t2, x2])
plt.legend(['Top Strip', 'Bottom Strip'], frameon=False, prop={'family': 'Arial', 'weight': 'bold', 'size': 32})
#resizing
figure = plt.gcf()
figure.set_size_inches(15, 10)
#shading of area under curve
if bounds == True:
try:
t1 = t1.tolist()
except:
try:
t2 = t2.tolist()
except:
pass
print("Shading...")
try:
plt.fill_between(t1, x1, 0, where = (t1 > points_right_peak[0]) & (t1 <= points_right_peak[1]), color = (0, 0, 1, 0.2))
plt.fill_between(t2, x2, 0, where = (t2 > points_right_peak[0]) & (t2 <= points_right_peak[1]), color = (0, 0, 1, 0.2))
vals.extend([simps(x1[t1.index(points_right_peak[0]):t1.index(points_right_peak[1])], np.linspace(points_right_peak[0], points_right_peak[1], num=len(x1[t1.index(points_right_peak[0]):t1.index(points_right_peak[1])])), dx=0.01)])
vals.extend([simps(x2[t2.index(points_right_peak[0]):t2.index(points_right_peak[1])], np.linspace(points_right_peak[0], points_right_peak[1], num=len(x2[t2.index(points_right_peak[0]):t2.index(points_right_peak[1])])), dx=0.01)])
vals.extend([max(x1[t1.index(points_right_peak[0]):t1.index(points_right_peak[1])]), max(x2[t2.index(points_right_peak[0]):t2.index(points_right_peak[1])]), points_right_peak[0], points_right_peak[1]])
except:
print("Invalid bounds on control peak")
if peaks_num_grabbed == 102:
try:
plt.fill_between(t1, x1, 0, where = (t1 > points_left_peak[0]) & (t1 <= points_left_peak[1]), color = (1, 0, 0, 0.2))
plt.fill_between(t2, x2, 0, where = (t2 > points_left_peak[0]) & (t2 <= points_left_peak[1]), color = (1, 0, 0, 0.2))
vals.extend([simps(x1[t1.index(points_left_peak[0]):t1.index(points_left_peak[1])], np.linspace(points_left_peak[0], points_left_peak[1], num=len(x1[t1.index(points_left_peak[0]):t1.index(points_left_peak[1])])), dx=0.01)])
vals.extend([simps(x2[t2.index(points_left_peak[0]):t2.index(points_left_peak[1])], np.linspace(points_left_peak[0], points_left_peak[1], num=len(x2[t2.index(points_left_peak[0]):t2.index(points_left_peak[1])])), dx=0.01)])
vals.extend([max(x1[t1.index(points_left_peak[0]):t1.index(points_left_peak[1])]), max(x2[t2.index(points_left_peak[0]):t2.index(points_left_peak[1])]), points_left_peak[0], points_left_peak[1]])
except:
print("Invalid bounds on test peak")
global im
plt.savefig('./temp_resources/temp.png', bbox_inches='tight')
im = ImageTk.PhotoImage(Image.open('./temp_resources/temp.png').resize(plot_disp_size))
image_canvas.itemconfigure(imload, image=im)
#saves graph
def save_graph():
f = filedialog.askdirectory(initialdir='../', title='Choose Location to Save Data')
if f:
plt.savefig(f+'/'+re.sub(r'\W','',os.path.split(root.filename)[1].split('.jpg')[0]) + '.png', bbox_inches='tight')
workbook = xlsxwriter.Workbook(f+'/'+re.sub(r'\W','',os.path.split(root.filename)[1].split('.jpg')[0]) + '_DATA.xlsx')
worksheet = workbook.add_worksheet()
#adds a bold format to use to highlight cells
bold = workbook.add_format({'bold': True})
#initialize the top row labels, all with bold text
worksheet.write('A1', 'Top Strip X-values (initial)', bold)
worksheet.write('B1', 'Top Strip Y-values (initial)', bold)
worksheet.write('C1', 'Bottom Strip X-values (initial)', bold)
worksheet.write('D1', 'Bottom Strip Y-Values (initial)', bold)
worksheet.write('E1', 'Top Strip X-values (adjusted)', bold)
worksheet.write('F1', 'Top Strip Y-values (adjusted)', bold)
worksheet.write('G1', 'Bottom Strip X-values (adjusted)', bold)
worksheet.write('H1', 'Bottom Strip Y-Values (adjusted)', bold)
worksheet.write('I1', 'Area of control (right) peak - Top Strip', bold)
worksheet.write('J1', 'Area of control (right) peak - Bottom Strip', bold)
worksheet.write('K1', 'Area of test (left) peak - Top Strip', bold)
worksheet.write('L1', 'Area of test (left) peak - Bottom Strip', bold)
worksheet.write('I3', 'Max of control (right) peak - Top Strip', bold)
worksheet.write('J3', 'Max of control (right) peak - Bottom Strip', bold)
worksheet.write('K3', 'Max of test (left) peak - Top Strip', bold)
worksheet.write('L3', 'Max of test (left) peak - Bottom Strip', bold)
worksheet.write('I5', 'Left bound of control (right) peak', bold)
worksheet.write('J5', 'Right bound of control (right) peak', bold)
worksheet.write('K5', 'Left bound of test (left) peak', bold)
worksheet.write('L5', 'Right bound of test (left) peak', bold)
worksheet.set_column('A:A', 22) #these are widths of columns in cm of excel, just to make it more readable
worksheet.set_column('B:B', 22)
worksheet.set_column('C:C', 25)
worksheet.set_column('D:D', 25)
worksheet.set_column('E:E', 25)
worksheet.set_column('F:F', 25)
worksheet.set_column('G:G', 28)
worksheet.set_column('H:H', 28)
worksheet.set_column('I:I', 32)
worksheet.set_column('J:J', 36)
worksheet.set_column('K:K', 30)
worksheet.set_column('L:L', 34)
for i in range(len(init_vals[0])):
worksheet.write('A'+str(i+2), init_vals[0][i])
worksheet.write('B'+str(i+2), init_vals[1][i])
for i in range(len(init_vals[2])):
worksheet.write('C'+str(i+2), init_vals[2][i])
worksheet.write('D'+str(i+2), init_vals[3][i])
for i in range(len(vals[0])):
worksheet.write('E'+str(i+2), vals[0][i])
worksheet.write('F'+str(i+2), vals[1][i])
for i in range(len(vals[2])):
worksheet.write('G'+str(i+2), vals[2][i])
worksheet.write('H'+str(i+2), vals[3][i])
if len(vals) >= 6:
worksheet.write('I2', vals[4])
worksheet.write('J2', vals[5])
worksheet.write('I4', vals[6])
worksheet.write('J4', vals[7])
worksheet.write('I6', vals[8])
worksheet.write('J6', vals[9])
if len(vals) >= 12:
worksheet.write('K2', vals[10])
worksheet.write('L2', vals[11])
worksheet.write('K4', vals[12])
worksheet.write('L4', vals[13])
worksheet.write('K6', vals[14])
worksheet.write('L6', vals[15])
#inserts cropped ROI image
worksheet.insert_image('J8', f+'/'+re.sub(r'\W','',os.path.split(root.filename)[1].split('.jpg')[0]) + '.png', {'x_scale': 0.40, 'y_scale': 0.40})
worksheet.insert_image('J27', img_path, {'x_scale': 0.40, 'y_scale': 0.40})
workbook.close()
print("Data for " + os.path.split(root.filename)[1].split('.jpg')[0] + " successfully exported")
elif f is None:
return
#initializes tkinter GUI
def init():
#setting variables to global scope that need to be accessed outside of init()
global curve_smoothing_slider, horizontal_shift_slider, vertical_shift_slider, image_canvas, bounds_button, preview_button, export_button, baseline_choice, im, imload, peak_num_choice
left_frame = Frame(root)
left_frame.pack(side="left")
middle_frame = Frame(root)
middle_frame.pack(side="right")
right_frame = Frame(root)
right_frame.pack(side="right")
sub_middle_frame = Frame(middle_frame)
sub_middle_frame.pack(side="bottom", pady=(0,10))
#LEFT SIDE
#help button
Button(left_frame, text="Help", command=help_window).pack(anchor='nw', padx=(10,0),pady=(10,10))
#button for selecting image file to analyze
Button(left_frame, text="Select a file", command=select_file).pack(anchor= 'n',pady=(0,15))
#slider for scaling the cropped image
Label(left_frame, text="Threshold and Crop", justify="center").pack()
threshold_slider = Scale(left_frame, orient="horizontal", length=200, from_=1.0, to=30.0, command=update_thresh)
threshold_slider.pack(padx=20, pady=(0,10))
#button for selecting the region of interest (ROI), this ROI is then analyzed for the graph
Button(left_frame, text="Select a ROI", command=find_roi).pack(pady=(0,15))
#slider for determining how much the curve is smoothed out (typically has very many oscillations and spikes)
Label(left_frame, text="Curve Smoothing", justify="center", padx=20).pack()
curve_smoothing_slider = Scale(left_frame, orient="horizontal", length=200, from_=0.0, to=30.0, command=update_smooth)
curve_smoothing_slider.pack(padx=20, pady=(0,20))
curve_smoothing_slider['state'] = 'disable'
#determines whether the baselining will happen from the lowest value (from both curves lowest val is zeroed) or midpoint (average value of both is zeroed and then lowest value brought to zero)
baseline_choice = tkinter.IntVar()
baseline_choice.set(1)
modes = [("Midpoint", 101), ("Lowest Value", 102)]
Label(left_frame, text="Baseline from:", justify="left", padx=20).pack()
for mode, val in modes:
Radiobutton(left_frame, text=mode, indicatoron=1, command=update_baseline, justify="left", padx=20, variable=baseline_choice, value=val).pack(anchor='w')
#a multiple choice field for how many peaks you want analyzed at the current moment
peak_num_choice = tkinter.IntVar()
peak_num_choice.set(1)
modes = [("One Peak", 101), ("Two Peaks", 102)]
Label(left_frame, text="How many peaks to compare:", justify="left", padx=20).pack(pady=(20,0))
for mode, val in modes:
Radiobutton(left_frame, text=mode, indicatoron=1, command=update_peaks, justify="left", padx=20, variable=peak_num_choice, value=val).pack(anchor='w')
#building the bounds button, for selecting left and right bounds of target peaks
bounds_button = Button(left_frame, text="Choose Bounds", command=choose_peak_bounds)
bounds_button.pack(side="left", padx=(15,10), pady=(30,10))
bounds_button["state"] = "disable"
#building the preview button, used to look at the current strip being analyzed
preview_button = Button(left_frame, text="Preview", command=preview_graph)
preview_button.pack(side="left", padx=(10,10), pady=(30,10))
preview_button["state"] = "disable"
#building the export button, disabled at first until you have data to export
export_button = Button(left_frame, text="Export", command=save_graph)
export_button.pack(side="left", padx=(10,0), pady=(30,10))
export_button["state"] = "disable"
#RIGHT SIDE
#building the horizontal shift slider (used to shift one line left and right)
Label(sub_middle_frame, text="Horizontal Shift").grid(column=0, row=1, padx=(0,20))
horizontal_shift_slider = Scale(sub_middle_frame, orient="horizontal", length=300, from_=-10.0, to=10.0, command=update_h_shift)
horizontal_shift_slider.grid(column=0, row=0, padx=(0,20))
horizontal_shift_slider['state'] = 'disable'
#building the vertical shift slider (shifts one line up and down)
Label(sub_middle_frame, text="Vertical Shift").grid(column=1, row=1)
vertical_shift_slider = Scale(sub_middle_frame, orient="horizontal", length=300, from_=-10.0, to=10.0, command=update_v_shift)
vertical_shift_slider.grid(column=1, row=0)
vertical_shift_slider['state'] = 'disable'
#right side graph
width, height = plot_disp_size
image_canvas = Canvas(middle_frame, width=width, height=height)
image_canvas.pack(padx=(20,0), pady=(0,0))
#blanks canvas with a white frame, image_canvas is modified to add the graph onto screen each time
im = ImageTk.PhotoImage(Image.new("RGB", plot_disp_size, (255, 255, 255))) #PIL solution
imload = image_canvas.create_image(0, 0, image=im, anchor='nw')
if __name__ == '__main__':
init() #builds all the buttons and frames
root.protocol("WM_DELETE_WINDOW", on_closing) #when the "x" is hit to close the window, tkinter needs to handle it in a special way
root.mainloop() #starts the instance of tkinter (the GUI framework) | StarcoderdataPython |
3557733 | n = cont = soma = media = maior = menor = 0
escolha = 's'
while escolha in 'SIMsim':
n = float(input('Digite um número: '))
if cont == 0:
maior = menor = n
else:
if n > maior:
maior = n
if n < menor:
menor = n
soma += n
cont += 1
escolha = input('Quer continuar? [S/N] ').strip()
media = soma / cont
print('A média entre os {} valores foi {:.2f}'.format(cont, media))
print('O maior valor foi {:.1f} e o menor foi {:.1f}.'.format(maior, menor))
| StarcoderdataPython |
1765828 | from setuptools import setup, find_packages
setup(
name='dash_data_viewer',
python_requires='>=3.10',
version='1.0',
packages=find_packages('src'),
package_dir={'': 'src'},
url='https://github.com/TimChild/dash_data_viewer',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Dash Viewer for Dats (Folk lab UBC)',
install_requires=[
'dat_analysis',
'dash>=2.0',
'plotly',
'pandas',
'dash-extensions',
'dash-labs',
'dash-bootstrap-components',
'dacite',
'kaleido',
'filelock',
]
)
| StarcoderdataPython |
5007433 | import pandas
from ccxt.base.exchange import Exchange
from ccxt.base.errors import BadRequest, InvalidOrder, OrderNotFound
from collections import defaultdict
from copy import deepcopy
from decimal import Decimal
from btrccts.check_dataframe import _check_dataframe
from btrccts.convert_float import _convert_float_or_raise, _convert_float
from btrccts.balance import Balance
DECIMAL_ONE = Decimal('1')
class ExchangeAccount:
def __init__(self, timeframe, balances={}, ohlcvs={}):
self._timeframe = timeframe
self._start_balances = defaultdict(Balance)
for key in balances:
self._start_balances[key] = Balance(balances[key])
self._balances = self._start_balances.copy()
self._ohlcvs = {}
for key in ohlcvs:
self._ohlcvs[key] = _check_dataframe(ohlcvs[key], timeframe)
self._last_order_id = 0
self._open_orders = {}
self._closed_orders = {}
self._private_order_info = {}
self._next_private_order_to_update = None
def _move_to_closed_orders(self, id):
self._closed_orders[id] = self._open_orders[id]
del self._open_orders[id]
del self._private_order_info[id]
def _update_next_private_order_to_update(self):
try:
self._next_private_order_to_update = min(
filter(lambda x: x['fillable_date'] is not None,
self._private_order_info.values()),
key=lambda x: x['fillable_date'])
except ValueError:
self._next_private_order_to_update = None
def _update_orders(self):
curr_date = self._timeframe.date()
while True:
private_order = self._next_private_order_to_update
if private_order is None:
return
fillable_date = private_order['fillable_date']
if fillable_date > curr_date:
return
order_id = private_order['id']
timestamp = int(fillable_date.value / 10e5)
order = self._open_orders[order_id]
amount = order['amount']
price = private_order['price']
base = private_order['base']
quote = private_order['quote']
buy = private_order['buy']
fee_percentage = private_order['fee_percentage']
self._remove_used_balance(price, amount, base, quote, buy)
self._update_balance(price, amount, base, quote, buy,
fee_percentage)
self._fill_order(order, buy, price, timestamp, fee_percentage)
self._move_to_closed_orders(order_id)
self._update_next_private_order_to_update()
def _return_decimal_to_float(self, result):
for key in result.keys():
value_type = type(result[key])
if value_type == Decimal:
result[key] = float(str(result[key]))
elif value_type == dict:
result[key] = self._return_decimal_to_float(result[key])
return result
def cancel_order(self, id, symbol=None):
self._update_orders()
closed_order = self._closed_orders.get(id)
if closed_order is not None:
raise BadRequest('ExchangeAccount: cannot cancel {} order {}'
.format(closed_order['status'], id))
open_order = self._open_orders.get(id)
if open_order is None:
raise OrderNotFound('ExchangeAccount: order {} does not exist'
.format(id))
else:
open_order.update({
'status': 'canceled',
})
private = self._private_order_info[id]
self._remove_used_balance(amount=open_order['amount'],
price=private['price'],
base=private['base'],
quote=private['quote'],
buy=private['buy'])
self._move_to_closed_orders(id)
if private == self._next_private_order_to_update:
self._update_next_private_order_to_update()
return {'id': id,
'info': {}}
def create_order(self, market, type, price, side, amount):
self._update_orders()
type_market = False
type_limit = False
if type == 'market':
if price is not None:
raise InvalidOrder(
'ExchangeAccount: market order has no price')
type_market = True
elif type == 'limit':
price = _convert_float_or_raise(price, 'ExchangeAccount: price')
type_limit = True
if price <= 0:
raise BadRequest('ExchangeAccount: price needs to be positive')
else:
raise InvalidOrder(
'ExchangeAccount: only market and limit order supported')
if market is None:
raise InvalidOrder('ExchangeAccount: market is None')
symbol = market.get('symbol')
ohlcv = self._ohlcvs.get(symbol)
if ohlcv is None:
raise InvalidOrder('ExchangeAccount: no prices available for {}'
.format(symbol))
if side not in ['buy', 'sell']:
raise InvalidOrder('ExchangeAccount: side {} not supported'
.format(side))
buy = side == 'buy'
amount = _convert_float_or_raise(amount, 'ExchangeAccount: amount')
if amount <= 0:
raise BadRequest('ExchangeAccount: amount needs to be positive')
base = market.get('base')
quote = market.get('quote')
if base is None:
raise BadRequest('ExchangeAccount: market has no base')
if quote is None:
raise BadRequest('ExchangeAccount: market has no quote')
self._last_order_id += 1
order_id = str(self._last_order_id)
date = self._timeframe.date()
timestamp = int(date.value / 10e5)
order = {
'info': {},
'id': order_id,
'timestamp': timestamp,
'datetime': Exchange.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': None,
'amount': amount,
'cost': None,
'average': None,
'filled': 0,
'remaining': amount,
'status': 'open',
'fee': {'currency': base if buy else quote,
'cost': None,
'rate': None},
'trades': None,
}
if type_market:
# Determinie the price of the market order
# We could use the next low/high to fill the order, but then we
# need to wait for the next date to fill the order, otherwise we
# would introduce a possibility to see the future price
# (Look-Ahead Bias)
# If we wait for the next date, we would return a market order that
# is pending, but this should never happen in reality
# Maybe the factor should depend on the volume
factor = Decimal('0.0015')
if buy:
price = (1 + factor) * _convert_float(ohlcv['high'][date])
else:
price = (1 - factor) * _convert_float(ohlcv['low'][date])
fee_percentage = market.get('taker', 0)
fee_percentage = _convert_float_or_raise(fee_percentage,
'ExchangeAccount: fee')
self._update_balance(price, amount, base, quote, buy,
fee_percentage)
self._fill_order(order, buy, price, timestamp, fee_percentage)
self._closed_orders[order_id] = order
if type_limit:
# TODO Probably use taker fee, if the order can be filled now
fee_percentage = market.get('maker', 0)
fee_percentage = _convert_float_or_raise(fee_percentage,
'ExchangeAccount: fee')
if buy:
self._balances[quote].change_used(price * amount)
else:
self._balances[base].change_used(amount)
self._open_orders[order_id] = order
self._private_order_info[order_id] = {
'id': order_id,
'base': base,
'quote': quote,
'price': price,
'buy': buy,
'fee_percentage': fee_percentage,
'fillable_date': self._limit_order_fillable_date(
symbol, buy, price),
}
self._update_next_private_order_to_update()
return {'id': order_id,
'info': {}}
def _limit_order_fillable_date(self, symbol, buy, price):
ohlcv = self._ohlcvs[symbol]
date = self._timeframe.date()
if ohlcv.index[0] != date:
ohlcv = ohlcv[date:]
# save reduced dataframe for better performance
self._ohlcvs[symbol] = ohlcv
# only look at the future
ohlcv = ohlcv[date + pandas.Timedelta(1, unit='ns'):]
if buy:
low = ohlcv.low
use = low[low <= price]
else:
high = ohlcv.high
use = high[high >= price]
if use is not None and len(use.index) > 0:
return use.index[0]
else:
return None
def _update_balance(self, price, amount, base, quote, buy, fee_percentage):
# First decrease balance, then increase, so
# decrease can throw and increase wont be affected
multiplier = DECIMAL_ONE - fee_percentage
if buy:
self._balances[quote].change_total(- price * amount)
self._balances[base].change_total(amount * multiplier)
else:
self._balances[base].change_total(- amount)
self._balances[quote].change_total(price * amount * multiplier)
def _remove_used_balance(self, price, amount, base, quote, buy):
if buy:
self._balances[quote].change_used(- price * amount)
else:
self._balances[base].change_used(- amount)
def _fill_order(self, order, buy, price, timestamp, fee_percentage):
amount = order['amount']
amount_price = amount * price
order.update({
'average': price,
'cost': amount_price,
'filled': amount,
'lastTradeTimestamp': timestamp,
'price': price,
'remaining': 0,
'status': 'closed',
})
order['fee'].update({
'rate': fee_percentage,
'cost': fee_percentage * (amount if buy else amount_price),
})
def fetch_balance(self):
self._update_orders()
result = {}
for key, balance in self._balances.items():
result[key] = self._return_decimal_to_float(balance.to_dict())
return result
def fetch_order(self, id, symbol=None):
self._update_orders()
order = self._closed_orders.get(id)
if order is None:
order = self._open_orders.get(id)
if order is None:
raise OrderNotFound('ExchangeAccount: order {} does not exist'
.format(id))
return self._return_decimal_to_float(deepcopy(order))
def _filter_sort_orders(
self, orders, since, limit, symbol, since_get, filter_non_zero):
usable_orders = [order for _, order in orders.items()
if ((symbol is None or order['symbol'] == symbol) and
(filter_non_zero is None or
order[filter_non_zero] != 0) and
(since is None or order[since_get] > since))]
usable_orders = sorted(usable_orders, key=lambda x: x[since_get])
return usable_orders[:limit]
def fetch_closed_orders(self, symbol=None, since=None, limit=None):
self._update_orders()
orders = self._filter_sort_orders(orders=self._closed_orders,
symbol=symbol, limit=limit,
since=since,
filter_non_zero='filled',
since_get='lastTradeTimestamp')
return [self._return_decimal_to_float(deepcopy(o)) for o in orders]
def fetch_open_orders(self, symbol=None, since=None, limit=None):
self._update_orders()
orders = self._filter_sort_orders(orders=self._open_orders,
symbol=symbol, limit=limit,
since=since,
filter_non_zero=None,
since_get='timestamp')
return [self._return_decimal_to_float(deepcopy(o)) for o in orders]
| StarcoderdataPython |
6666558 | import tempfile
import time
import logging
from collections import OrderedDict
from .exceptions import RateLimitError
try:
import fiona # try importing fiona directly, because otherwise geopandas defers errors to later on when it actually needs to use it
import geopandas
GEOPANDAS_AVAILABLE = True
except ImportError:
GEOPANDAS_AVAILABLE = False
logging.warning("Can't load fiona or geopandas - will not be able to undertake spatial operations")
import pandas
MAX_FEATURE_IDS_LIST_LENGTH = 40
RATE_LIMIT = 5000 # ms
FEATURE_TYPE_GEOPANDAS = "geopandas"
FEATURE_TYPE_GEOJSON = "geojson"
FEATURE_TYPE_ARCPY = "arcpy"
FEATURE_TYPE_SHAPEPLY = "shapely"
def get_coords_shapely(geometry):
coords = geometry.centroid.coords
return (coords.x, coords.y)
def get_coords_arcpy(geometry):
centroid = geometry.centroid.projectAs(4326)
return (centroid.X, centroid.Y)
class Geodatabase(object):
def __init__(self, client):
self.client = client
def get_et_for_features(self,
params,
features,
feature_type,
output_field=None,
geometry_field="geometry",
endpoint="timeseries/features/stats/annual",
wait_time=RATE_LIMIT,
batch_size=MAX_FEATURE_IDS_LIST_LENGTH,
return_type="joined",
join_type="outer"):
"""
Takes one of multiple data formats (user specified, we're not inspecting it - options are
geopandas, geojson) and gets its
coordinate values, then gets the field IDs in OpenET for the coordinate pair, retrieves the ET data
and returns it as a geopandas data frame with the results in the specified output_field
:param params:
:param features:
:param endpoint: which features endpoint should it use?
:param return_type: How should we return the data? Options are "raw" to return just the JSON from OpenET,
"list" to return a list of dictionaries with the OpenET data, "pandas" to return a pandas
data frame of the results, or "joined" to return the
data joined back to the input data. "joined" is the default.
:param join_type: When merging results back in, what type of join should we use? Defaults to "outer" so that
records are retained even if no results come back for them. This is also useful behavior when
we have multiple timeseries records, such as for monthly results, but it can duplicate input
records (not always desirable). To change the behavior, change this to any value supported
by pandas.merge or change the return_type so no join occurs.
:return:
"""
if GEOPANDAS_AVAILABLE is False:
# we'll check it this way because that way we can let people who don't want to get a working fiona/geopandas environment
# use the application without it confusingly failing on them at runtime.
raise EnvironmentError("Fiona or Geopandas is unavailable - check that Fiona and Geopandas are both installed and that importing Fiona works - cannot proceed without a working installation with fiona and geopandas")
if endpoint.startswith("timeseries/"): # strip it off the front if they included it
endpoint.replace("timeseries/", "")
if output_field is None and return_type == "joined":
raise ValueError("Must specify value for output_field when return_type is 'joined'")
if return_type not in ("joined", "pandas", "list", "raw"):
raise ValueError("return_type must be one of ('joined', 'list', 'raw', 'pandas')")
if feature_type not in (FEATURE_TYPE_GEOPANDAS, FEATURE_TYPE_GEOJSON):
raise ValueError(f"Feature type must be in ({FEATURE_TYPE_GEOPANDAS}, {FEATURE_TYPE_GEOJSON}) to get geometries and retrieve ET. CHeck that the feature_type parameter is specified correctly")
if feature_type == FEATURE_TYPE_GEOJSON:
features = geopandas.GeoDataFrame.from_features(features)
features_wgs = features.to_crs(4326)
features_wgs.loc[:, "centroid_geom"] = features_wgs[geometry_field].centroid
def set_centroid(row):
"""
There's a better way to do this, but my Pandas-fu is failing me right now.
Make a function to set the centroid as text elementwise
:param row:
:return:
"""
# get the values as a string, but truncate it to 7 places for precision so that we can more reliably cache it
row["centroid"] = f'{round(row["centroid_geom"].x, 7)} {round(row["centroid_geom"].y, 7)}'
return row
features_wgs = features_wgs.apply(set_centroid, axis=1)
features_wgs = features_wgs.drop(columns=["centroid_geom"]) # drop it so it doesn't create output problems later
# we're going to have to get the feature IDs one by one if we want a reliable mapping of polygons to openET features
# which isn't ideal and we'll want to rate limit it to make sure we don't abuse the API too heavily
# we'll probably also want to do some form of caching or saving the feature IDs to the geopandas dfs so that
# we don't have to go back and get it again if we already got it.
# only get the feature IDs if they aren't already there to save time and
# avoid a column naming conflict if they run the same data through multiple times
if not "openet_feature_id" in list(features_wgs.columns):
openet_feature_ids = self.get_feature_ids(features_wgs, field="centroid")
#temp_feature_outputs = tempfile.mktemp(suffix=".csv", prefix="openet_client")
#openet_feature_ids.to_csv(temp_feature_outputs)
features_wgs = features_wgs.merge(openet_feature_ids, on="centroid")
feature_ids = features_wgs["openet_feature_id"].tolist()
df_length = len(feature_ids)
start = 0
end = min(batch_size, df_length)
results = []
original_batch_size = batch_size
slow_batch_count = 0
while start < df_length:
partial_list = [feat for feat in feature_ids[start:end] if feat is not None] # remove the null values and filter to the batch size
params["field_ids"] = str(partial_list).replace(" ", "").replace("\'", '"') # what's weird is we basically have to send this as a python list, so we need to stringify it first so requests doesn't process it
try:
response = self.client.send_request(endpoint, method="post", disable_encoding=True, **params)
except RateLimitError as e:
# if it gets interrupted save the data we currently have to the exception then raise it up
raise RateLimitError(str(e) + ". The retrieved data is available as an attribute '.data' on this exception, but is incomplete.", data=self._process_results(results, return_type, output_field, features_wgs, join_type))
if not response.status_code == 500:
results.extend(response.json())
else:
logging.warning(f"Error retrieving ET for one or more fields. Request sent was {response.url}. Got response {response.text}")
if batch_size == original_batch_size: # if we're not already there, switch to slow batch mode so we go through it one by one now
batch_size = 1
end = start + 1
continue # go back through the last batch one by one so we make sure we get as many as possible
# if we are already in slow batch mode, then basically, this record gets skipped
time.sleep(wait_time / 1000)
start += batch_size
if batch_size != original_batch_size: # if we're in slow batch mode
slow_batch_count += 1 # count how many we've done
if slow_batch_count == original_batch_size: # until we get back to where we would have been in the first place
batch_size = original_batch_size # then increase the batch size again to *try* to get a larger set for the next group
end += batch_size
end = min(end, df_length) # we'll only check end because we won't enter the next iteration if start < df_length
return self._process_results(results, return_type, output_field, features_wgs, join_type)
def _process_results(self, results, return_type, output_field, features_wgs, join_type):
if return_type == "raw":
return results
# openet_output_field_name = "data_value" if "aggregation" not in params else params["aggregation"]
# figure out which keys are there using the first result - there should only be one, but this lets us make sure we get anything
output_field_keys = list(set(results[0].keys()).intersection(set(["data_value", "sum", "mean", "min", "max", "median"])))
results_reformed = results
for item in results:
if output_field:
if len(output_field_keys) == 1: # there should only be one
item[output_field] = item[output_field_keys[0]]
del item[output_field_keys[0]]
else:
for key in output_field_keys:
item[output_field + "_" + key] = item[key]
del item[key]
item["openet_feature_id"] = item["feature_unique_id"]
del item["feature_unique_id"]
# we had used a list comprehension, but we wanted to keep all the other keys in the dict. Preserving for now, but can remove later
#results_reformed = [{output_field: item[openet_output_field_name], "openet_feature_id": item["feature_unique_id", ]}
# for item in results]
if return_type == "list":
return results_reformed
results_df = pandas.DataFrame(results_reformed)
if return_type == "pandas":
return results_df
final = features_wgs.merge(results_df, on="openet_feature_id", how=join_type)
return final
def get_feature_ids(self, features, field=None, wait_time=RATE_LIMIT):
"""
An internal method used to get a list of coordinate pairs and return the feature ID. Values come back as a dictionary
where the input item in the list (coordinate pair shown as DD Longitude space DD latitude)
is a dictionary key and the value is the OpenET featureID
:param features:
:param field: when field is defined, features will be a pandas data frame with a field that has the coordinate values to use.
In that case, results will be joined back to the data frame as the field openet_feature_id.
:param wait_time: how long in ms should we wait between subsequent requests?
:return:
"""
if field and not isinstance(features, pandas.DataFrame):
raise ValueError("A field name was provided, but `features` are not a Pandas DataFrame. Must be a DataFrame to proceed, or a field name should not be provided")
if field:
inputs = features[field]
else:
inputs = features
outputs = OrderedDict()
for item in inputs:
# check the cache first - we might not need an API request for their field ID
cached_value = self.client.cache.check_gdb_cache(key=item)
if cached_value is False: # False indicates no records, None indicates it's there and Null
params = {"coordinates": item, "spatial_join_type": "intersect", "override": "False"}
results = self.feature_ids_list(params)
results_dict = results.json()
if "feature_unique_ids" in results_dict:
ids = results_dict["feature_unique_ids"]
else:
logging.error(f"Unable to retrieve field ID. Server returned {results_dict}")
raise ValueError(f"Unable to retrieve field ID. Server returned {results_dict}")
if len(ids) > 0:
outputs[item] = ids[0]
else:
outputs[item] = None
# save the returned value in our cache so we don't make another roundtrip if we run these
# same values through in the future
self.client.cache.cache_gdb_item(key=item, value=outputs[item])
time.sleep(wait_time / 1000)
else:
outputs[item] = cached_value
# no need to sleep when we check out own cache!
if field:
out_df = pandas.DataFrame({field: outputs.keys(), "openet_feature_id": outputs.values()})
out_df.set_index(keys=field)
return out_df
else:
return outputs
def feature_ids_list(self, params=None):
"""
The base OpenET Method - sends the supplied params to metadata/openet/region_of_interest/feature_ids_list
and returns the requests.Response object
:param params:
:return:
"""
endpoint = "metadata/openet/region_of_interest/feature_ids_list"
if params is None:
params = {}
results = self.client.send_request(endpoint, method="post", **params)
return results
| StarcoderdataPython |
9651609 | import re, itertools
import cfgescape as config
import random
class Factor(object):
__idx = 0
def __init__(self, name=None, abbv=None, label=None, values=None, id=None, tabular=True, bounds=None, visualize=True, default=0):
if tabular:
if values:
self.values = values
self.binary = False
else:
self.values = [False, True]
self.binary = True
self.dim = 1
else:
self.bounds = bounds
self.binary = False
if config.current.tabular_discretization_factor:
self.values = range(self.bounds[0],int(self.bounds[1]/config.current.tabular_discretization_factor+1))
self.dim = len(self.bounds) / 2
self.tabular = tabular
self.name = name
self.id = self.abbv = abbv or name
self.label = label or name
if id is not None: self.id = id
self.visualize = visualize
self.default = default
self.idx = Factor.__idx
Factor.__idx += 1
def random_value(self):
if self.tabular:
return random.choice(self.values)
else:
point = [random.randint(self.bounds[2*i],self.bounds[2*i+1]) for i in range(self.dim)]
return point
def __eq__(self, other):
if other == None: return False
if not isinstance(other, Factor): return False
return self.id == other.id
def __cmp__(self, other):
if other == None: return 1
return cmp(self.id, other.id)
@staticmethod
def map(factor):
return (factor.name, factor.id)
@staticmethod
def fromName(name, factors):
for f in factors:
if f.name == name:
return f
def __repr__(self):
return 'F:%s' % self.id
class Action(object):
__idx = 1
def __init__(self, name=None, abbv=None, id=None, value=None, scale=None, type=None):
self.name = name
self.id = self.abbv = abbv or name
if id is not None: self.id = id
self.value = value
self.idx = Action.__idx
self.scale = scale
self.type = type
Action.__idx += 1
@staticmethod
def map(action):
return (action.name, action.id)
@staticmethod
def fromName(name, actions):
for a in actions:
if a.name == name:
return a
def __cmp__(self, other):
if other == None: return 1
return cmp(self.id, other.id)
def __eq__(self, other):
if other == None: return False
if not isinstance(other, Action): return False
return self.id == other.id
def __repr__(self):
if self.scale == 1:
return self.id
return '%s, scale:%s' % (self.id,self.scale)
def copy(self):
a = Action(name=self.name,abbv=self.abbv,id=self.id,value=self.value,scale=self.scale)
a.idx = self.idx
return a
class FactorState(object):
def __init__(self, values):
self.values = values
self.keys = self.values.keys
self.items = self.values.items
self.reset = False
def __setitem__(self, i, v):
self.values[i] = v
def __getitem__(self, i):
return self.values[i]
def __contains__(self, i):
return i in self.values
def __iter__(self):
return iter(self.values)
def next(self):
return next(self.values)
def __len__(self):
return len(self.values)
def copy(self):
state = self.__class__(self.values.copy())
state.reset = self.reset
return state
def convert(self, values):
return self.__class__(values.copy())
def discretize(self, factors):
for f in factors:
if not f.tabular and config.current.tabular_discretization_factor:
self.values[f.id] = round(self.values[f.id] / config.current.tabular_discretization_factor)
class GeneratedFactorState(FactorState):
def copy(self):
state = self.__class__(self.values.copy(), self.generator)
state.reset = self.reset
return state
class FactorWorld(object):
def __init__(self, actions, factors):
class Action: pass
for a in actions:
setattr(Action, a.name, a.id)
type(self).Action = Action
class Factor: pass
for f in factors:
setattr(Factor, f.name, f.id)
type(self).Factor = Factor
self.name = self.__class__.__name__
self.actions = actions
self.factors = factors
self.layout = None
self.agents = []
self.alookup = {a.id:a for a in actions}
self.flookup = {f.id:f for f in factors}
def takeAction(self, state, action): pass
def likelyNextStates(self, state):
for action in self.actions:
s = state.copy()
self.takeAction(s, action)
yield s
def allNextStates(self):
values, ids = [], []
for f in self.factors:
values.append(f.values)
ids.append(f.id)
s = self.createState()
for combination in itertools.product(*values):
s.values = dict(zip(ids, combination))
yield s
class ActionSequence(list):
def __init__(self, actions, path):
self.path = path
self.actions = {a.id:a for a in actions}
try:
self.read()
self.reverse()
except: pass
def read(self):
with open(self.path, 'r') as fh:
while True:
line = fh.readline()
if line == '': break
m = re.match(r'(\w+):(\d+)', line)
if m:
aid = m.group(1)
iters = int(m.group(2))
for i in range(iters):
self.append(self.actions[aid])
continue
m = re.match(r'(\w+)', line)
if m:
aid = m.group(1)
self.append(self.actions[aid])
| StarcoderdataPython |
3214505 | from typing import Tuple
from sqlalchemy import select, String
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload, aliased
from sqlalchemy import func
from . import models, schemas
async def get_user(async_session: AsyncSession, user_id: int):
result = await async_session.execute(
select(models.User)
.where(models.User.user_id == user_id)
.options(selectinload(models.User.posts))
)
return result.scalars().first()
async def get_user_by_email(async_session: AsyncSession, email: str):
result = await async_session.execute(
select(models.User)
.where(models.User.email == email)
.options(selectinload(models.User.posts))
)
return result.scalars().first()
async def get_users(
async_session: AsyncSession, skip: int = 0, limit: int = 100
):
result = await async_session.execute(
select(models.User)
.order_by(models.User.user_id)
.offset(skip)
.limit(limit)
.options(selectinload(models.User.posts))
)
return result.scalars().fetchall()
async def create_user(async_session: AsyncSession, user: schemas.UserCreate):
new_user = models.User(
email=user.email,
first_name=user.first_name,
last_name=user.last_name,
posts=[], # so pydantic won't trigger a lazy load
)
async_session.add(new_user)
await async_session.commit()
return new_user
async def get_posts(
async_session: AsyncSession, skip: int = 0, limit: int = 100
):
result = await async_session.execute(
select(models.Post)
.order_by(models.Post.post_id)
.offset(skip)
.limit(limit)
.options(selectinload(models.Post.user))
)
return result.scalars().fetchall()
async def create_post(async_session: AsyncSession, post: schemas.PostCreate):
new_post = models.Post(**post.dict())
sort_key = await _get_next_sort_key(async_session)
new_post.sort_key = sort_key
async_session.add(new_post)
await async_session.commit()
return new_post
async def create_user_post(
async_session: AsyncSession,
post: schemas.PostCreate,
user_id: int
):
new_post = models.Post(**post.dict(), user_id=user_id)
sort_key = await _get_next_sort_key(async_session)
new_post.sort_key = sort_key
async_session.add(new_post)
await async_session.commit()
return new_post
async def get_post(async_session: AsyncSession, post_id: int):
result = await async_session.execute(
select(models.Post)
.where(models.Post.post_id == post_id)
)
return result.scalars().first()
async def get_topics(
async_session: AsyncSession,
categories: Tuple[str],
topic_id: int = None
):
if topic_id is not None:
hierarchy = (
select(
models.Post,
func.cast(models.Post.sort_key, String).label("sorting_key")
)
.where(models.Post.post_id == topic_id)
.where(models.Post.parent_id == 0)
.cte(name="hierarchy", recursive=True)
)
else:
hierarchy = (
select(
models.Post,
func.cast(models.Post.sort_key, String).label("sorting_key")
)
.where(models.Post.parent_id == 0)
.cte(name="hierarchy", recursive=True)
)
children = aliased(models.Post, name="c")
hierarchy = (
hierarchy.union_all(
select(
children,
(hierarchy.c.sorting_key + " " + func.cast(children.sort_key, String)).label("sorting_key")
)
.where(children.parent_id == hierarchy.c.post_id)
)
)
stmt = (
select(hierarchy.c)
.where(hierarchy.c.type.in_(categories))
.group_by(hierarchy.c.sorting_key)
.order_by(hierarchy.c.sorting_key)
)
result = await async_session.execute(stmt)
posts = result.fetchall()
return [
{
"post": post,
"level": len(post.sorting_key.split(" "))
}
for post in posts
]
async def _get_next_sort_key(async_session: AsyncSession) -> int:
result = await async_session.execute(
select(func.ifnull(func.max(models.Post.sort_key) + 1, 0))
)
retval = result.one_or_none()
if retval is None:
raise RuntimeError("Failed to get new value for sort_key")
return retval[0]
# build the recursive CTE query
# v = 1
# hierarchy = (
# sync_session
# .query(models.Post, models.Post.sort_key.label("sorting_key"))
# .cte(name='hierarchy', recursive=True)
# )
# children = aliased(models.Post, name="c")
# hierarchy = hierarchy.union_all(
# sync_session
# .query(
# children,
# (hierarchy.c.sorting_key + " " + children.sort_key).label("sorting_key")
# )
# .filter(children.parent_id == hierarchy.c.post_id)
# )
# # query the hierarchy for the post and it's comments
# retval = (
# sync_session
# .query(models.Post, hierarchy.c.sorting_key)
# .select_entity_from(hierarchy)
# .order_by(hierarchy.c.sorting_key)
# .all()
# )
# return retval
| StarcoderdataPython |
5157286 | from flask import Flask
from threading import Thread
app = Flask(__name__)
@app.route('/')
def home(): return "Hello, I am alive!"
def runWebServer():
print('Running WebServer...')
app.run('0.0.0.0', 8080)
def keep_alive(): Thread(target=runWebServer).start() | StarcoderdataPython |
232534 | '''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def associate_customer_gateway(CustomerGatewayArn=None, GlobalNetworkId=None, DeviceId=None, LinkId=None):
"""
Associates a customer gateway with a device and optionally, with a link. If you specify a link, it must be associated with the specified device.
You can only associate customer gateways that are connected to a VPN attachment on a transit gateway. The transit gateway must be registered in your global network. When you register a transit gateway, customer gateways that are connected to the transit gateway are automatically included in the global network. To list customer gateways that are connected to a transit gateway, use the DescribeVpnConnections EC2 API and filter by transit-gateway-id .
You cannot associate a customer gateway with more than one device and link.
See also: AWS API Documentation
Exceptions
:example: response = client.associate_customer_gateway(
CustomerGatewayArn='string',
GlobalNetworkId='string',
DeviceId='string',
LinkId='string'
)
:type CustomerGatewayArn: string
:param CustomerGatewayArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the customer gateway. For more information, see Resources Defined by Amazon EC2 .\n
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type DeviceId: string
:param DeviceId: [REQUIRED]\nThe ID of the device.\n
:type LinkId: string
:param LinkId: The ID of the link.
:rtype: dict
ReturnsResponse Syntax
{
'CustomerGatewayAssociation': {
'CustomerGatewayArn': 'string',
'GlobalNetworkId': 'string',
'DeviceId': 'string',
'LinkId': 'string',
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'
}
}
Response Structure
(dict) --
CustomerGatewayAssociation (dict) --
The customer gateway association.
CustomerGatewayArn (string) --
The Amazon Resource Name (ARN) of the customer gateway.
GlobalNetworkId (string) --
The ID of the global network.
DeviceId (string) --
The ID of the device.
LinkId (string) --
The ID of the link.
State (string) --
The association state.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.ServiceQuotaExceededException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'CustomerGatewayAssociation': {
'CustomerGatewayArn': 'string',
'GlobalNetworkId': 'string',
'DeviceId': 'string',
'LinkId': 'string',
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.ServiceQuotaExceededException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def associate_link(GlobalNetworkId=None, DeviceId=None, LinkId=None):
"""
Associates a link to a device. A device can be associated to multiple links and a link can be associated to multiple devices. The device and link must be in the same global network and the same site.
See also: AWS API Documentation
Exceptions
:example: response = client.associate_link(
GlobalNetworkId='string',
DeviceId='string',
LinkId='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type DeviceId: string
:param DeviceId: [REQUIRED]\nThe ID of the device.\n
:type LinkId: string
:param LinkId: [REQUIRED]\nThe ID of the link.\n
:rtype: dict
ReturnsResponse Syntax
{
'LinkAssociation': {
'GlobalNetworkId': 'string',
'DeviceId': 'string',
'LinkId': 'string',
'LinkAssociationState': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'
}
}
Response Structure
(dict) --
LinkAssociation (dict) --
The link association.
GlobalNetworkId (string) --
The ID of the global network.
DeviceId (string) --
The device ID for the link association.
LinkId (string) --
The ID of the link.
LinkAssociationState (string) --
The state of the association.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.ServiceQuotaExceededException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'LinkAssociation': {
'GlobalNetworkId': 'string',
'DeviceId': 'string',
'LinkId': 'string',
'LinkAssociationState': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.ServiceQuotaExceededException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
"""
pass
def create_device(GlobalNetworkId=None, Description=None, Type=None, Vendor=None, Model=None, SerialNumber=None, Location=None, SiteId=None, Tags=None):
"""
Creates a new device in a global network. If you specify both a site ID and a location, the location of the site is used for visualization in the Network Manager console.
See also: AWS API Documentation
Exceptions
:example: response = client.create_device(
GlobalNetworkId='string',
Description='string',
Type='string',
Vendor='string',
Model='string',
SerialNumber='string',
Location={
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
SiteId='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type Description: string
:param Description: A description of the device.\nLength Constraints: Maximum length of 256 characters.\n
:type Type: string
:param Type: The type of the device.
:type Vendor: string
:param Vendor: The vendor of the device.\nLength Constraints: Maximum length of 128 characters.\n
:type Model: string
:param Model: The model of the device.\nLength Constraints: Maximum length of 128 characters.\n
:type SerialNumber: string
:param SerialNumber: The serial number of the device.\nLength Constraints: Maximum length of 128 characters.\n
:type Location: dict
:param Location: The location of the device.\n\nAddress (string) --The physical address.\n\nLatitude (string) --The latitude.\n\nLongitude (string) --The longitude.\n\n\n
:type SiteId: string
:param SiteId: The ID of the site.
:type Tags: list
:param Tags: The tags to apply to the resource during creation.\n\n(dict) --Describes a tag.\n\nKey (string) --The tag key.\nLength Constraints: Maximum length of 128 characters.\n\nValue (string) --The tag value.\nLength Constraints: Maximum length of 256 characters.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Device': {
'DeviceId': 'string',
'DeviceArn': 'string',
'GlobalNetworkId': 'string',
'Description': 'string',
'Type': 'string',
'Vendor': 'string',
'Model': 'string',
'SerialNumber': 'string',
'Location': {
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
'SiteId': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
Device (dict) --
Information about the device.
DeviceId (string) --
The ID of the device.
DeviceArn (string) --
The Amazon Resource Name (ARN) of the device.
GlobalNetworkId (string) --
The ID of the global network.
Description (string) --
The description of the device.
Type (string) --
The device type.
Vendor (string) --
The device vendor.
Model (string) --
The device model.
SerialNumber (string) --
The device serial number.
Location (dict) --
The site location.
Address (string) --
The physical address.
Latitude (string) --
The latitude.
Longitude (string) --
The longitude.
SiteId (string) --
The site ID.
CreatedAt (datetime) --
The date and time that the site was created.
State (string) --
The device state.
Tags (list) --
The tags for the device.
(dict) --
Describes a tag.
Key (string) --
The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --
The tag value.
Length Constraints: Maximum length of 256 characters.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.ServiceQuotaExceededException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'Device': {
'DeviceId': 'string',
'DeviceArn': 'string',
'GlobalNetworkId': 'string',
'Description': 'string',
'Type': 'string',
'Vendor': 'string',
'Model': 'string',
'SerialNumber': 'string',
'Location': {
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
'SiteId': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.ServiceQuotaExceededException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def create_global_network(Description=None, Tags=None):
"""
Creates a new, empty global network.
See also: AWS API Documentation
Exceptions
:example: response = client.create_global_network(
Description='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type Description: string
:param Description: A description of the global network.\nLength Constraints: Maximum length of 256 characters.\n
:type Tags: list
:param Tags: The tags to apply to the resource during creation.\n\n(dict) --Describes a tag.\n\nKey (string) --The tag key.\nLength Constraints: Maximum length of 128 characters.\n\nValue (string) --The tag value.\nLength Constraints: Maximum length of 256 characters.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'GlobalNetwork': {
'GlobalNetworkId': 'string',
'GlobalNetworkArn': 'string',
'Description': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
GlobalNetwork (dict) --
Information about the global network object.
GlobalNetworkId (string) --
The ID of the global network.
GlobalNetworkArn (string) --
The Amazon Resource Name (ARN) of the global network.
Description (string) --
The description of the global network.
CreatedAt (datetime) --
The date and time that the global network was created.
State (string) --
The state of the global network.
Tags (list) --
The tags for the global network.
(dict) --
Describes a tag.
Key (string) --
The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --
The tag value.
Length Constraints: Maximum length of 256 characters.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.ServiceQuotaExceededException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'GlobalNetwork': {
'GlobalNetworkId': 'string',
'GlobalNetworkArn': 'string',
'Description': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.ServiceQuotaExceededException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def create_link(GlobalNetworkId=None, Description=None, Type=None, Bandwidth=None, Provider=None, SiteId=None, Tags=None):
"""
Creates a new link for a specified site.
See also: AWS API Documentation
Exceptions
:example: response = client.create_link(
GlobalNetworkId='string',
Description='string',
Type='string',
Bandwidth={
'UploadSpeed': 123,
'DownloadSpeed': 123
},
Provider='string',
SiteId='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type Description: string
:param Description: A description of the link.\nLength Constraints: Maximum length of 256 characters.\n
:type Type: string
:param Type: The type of the link.\nConstraints: Cannot include the following characters: | ^\nLength Constraints: Maximum length of 128 characters.\n
:type Bandwidth: dict
:param Bandwidth: [REQUIRED]\nThe upload speed and download speed in Mbps.\n\nUploadSpeed (integer) --Upload speed in Mbps.\n\nDownloadSpeed (integer) --Download speed in Mbps.\n\n\n
:type Provider: string
:param Provider: The provider of the link.\nConstraints: Cannot include the following characters: | ^\nLength Constraints: Maximum length of 128 characters.\n
:type SiteId: string
:param SiteId: [REQUIRED]\nThe ID of the site.\n
:type Tags: list
:param Tags: The tags to apply to the resource during creation.\n\n(dict) --Describes a tag.\n\nKey (string) --The tag key.\nLength Constraints: Maximum length of 128 characters.\n\nValue (string) --The tag value.\nLength Constraints: Maximum length of 256 characters.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Link': {
'LinkId': 'string',
'LinkArn': 'string',
'GlobalNetworkId': 'string',
'SiteId': 'string',
'Description': 'string',
'Type': 'string',
'Bandwidth': {
'UploadSpeed': 123,
'DownloadSpeed': 123
},
'Provider': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
Link (dict) --
Information about the link.
LinkId (string) --
The ID of the link.
LinkArn (string) --
The Amazon Resource Name (ARN) of the link.
GlobalNetworkId (string) --
The ID of the global network.
SiteId (string) --
The ID of the site.
Description (string) --
The description of the link.
Type (string) --
The type of the link.
Bandwidth (dict) --
The bandwidth for the link.
UploadSpeed (integer) --
Upload speed in Mbps.
DownloadSpeed (integer) --
Download speed in Mbps.
Provider (string) --
The provider of the link.
CreatedAt (datetime) --
The date and time that the link was created.
State (string) --
The state of the link.
Tags (list) --
The tags for the link.
(dict) --
Describes a tag.
Key (string) --
The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --
The tag value.
Length Constraints: Maximum length of 256 characters.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.ServiceQuotaExceededException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'Link': {
'LinkId': 'string',
'LinkArn': 'string',
'GlobalNetworkId': 'string',
'SiteId': 'string',
'Description': 'string',
'Type': 'string',
'Bandwidth': {
'UploadSpeed': 123,
'DownloadSpeed': 123
},
'Provider': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.ServiceQuotaExceededException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def create_site(GlobalNetworkId=None, Description=None, Location=None, Tags=None):
"""
Creates a new site in a global network.
See also: AWS API Documentation
Exceptions
:example: response = client.create_site(
GlobalNetworkId='string',
Description='string',
Location={
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type Description: string
:param Description: A description of your site.\nLength Constraints: Maximum length of 256 characters.\n
:type Location: dict
:param Location: The site location. This information is used for visualization in the Network Manager console. If you specify the address, the latitude and longitude are automatically calculated.\n\nAddress : The physical address of the site.\nLatitude : The latitude of the site.\nLongitude : The longitude of the site.\n\n\nAddress (string) --The physical address.\n\nLatitude (string) --The latitude.\n\nLongitude (string) --The longitude.\n\n\n
:type Tags: list
:param Tags: The tags to apply to the resource during creation.\n\n(dict) --Describes a tag.\n\nKey (string) --The tag key.\nLength Constraints: Maximum length of 128 characters.\n\nValue (string) --The tag value.\nLength Constraints: Maximum length of 256 characters.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Site': {
'SiteId': 'string',
'SiteArn': 'string',
'GlobalNetworkId': 'string',
'Description': 'string',
'Location': {
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
Site (dict) --
Information about the site.
SiteId (string) --
The ID of the site.
SiteArn (string) --
The Amazon Resource Name (ARN) of the site.
GlobalNetworkId (string) --
The ID of the global network.
Description (string) --
The description of the site.
Location (dict) --
The location of the site.
Address (string) --
The physical address.
Latitude (string) --
The latitude.
Longitude (string) --
The longitude.
CreatedAt (datetime) --
The date and time that the site was created.
State (string) --
The state of the site.
Tags (list) --
The tags for the site.
(dict) --
Describes a tag.
Key (string) --
The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --
The tag value.
Length Constraints: Maximum length of 256 characters.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.ServiceQuotaExceededException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'Site': {
'SiteId': 'string',
'SiteArn': 'string',
'GlobalNetworkId': 'string',
'Description': 'string',
'Location': {
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.ServiceQuotaExceededException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def delete_device(GlobalNetworkId=None, DeviceId=None):
"""
Deletes an existing device. You must first disassociate the device from any links and customer gateways.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_device(
GlobalNetworkId='string',
DeviceId='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type DeviceId: string
:param DeviceId: [REQUIRED]\nThe ID of the device.\n
:rtype: dict
ReturnsResponse Syntax
{
'Device': {
'DeviceId': 'string',
'DeviceArn': 'string',
'GlobalNetworkId': 'string',
'Description': 'string',
'Type': 'string',
'Vendor': 'string',
'Model': 'string',
'SerialNumber': 'string',
'Location': {
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
'SiteId': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
Device (dict) --
Information about the device.
DeviceId (string) --
The ID of the device.
DeviceArn (string) --
The Amazon Resource Name (ARN) of the device.
GlobalNetworkId (string) --
The ID of the global network.
Description (string) --
The description of the device.
Type (string) --
The device type.
Vendor (string) --
The device vendor.
Model (string) --
The device model.
SerialNumber (string) --
The device serial number.
Location (dict) --
The site location.
Address (string) --
The physical address.
Latitude (string) --
The latitude.
Longitude (string) --
The longitude.
SiteId (string) --
The site ID.
CreatedAt (datetime) --
The date and time that the site was created.
State (string) --
The device state.
Tags (list) --
The tags for the device.
(dict) --
Describes a tag.
Key (string) --
The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --
The tag value.
Length Constraints: Maximum length of 256 characters.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'Device': {
'DeviceId': 'string',
'DeviceArn': 'string',
'GlobalNetworkId': 'string',
'Description': 'string',
'Type': 'string',
'Vendor': 'string',
'Model': 'string',
'SerialNumber': 'string',
'Location': {
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
'SiteId': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def delete_global_network(GlobalNetworkId=None):
"""
Deletes an existing global network. You must first delete all global network objects (devices, links, and sites) and deregister all transit gateways.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_global_network(
GlobalNetworkId='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:rtype: dict
ReturnsResponse Syntax{
'GlobalNetwork': {
'GlobalNetworkId': 'string',
'GlobalNetworkArn': 'string',
'Description': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
GlobalNetwork (dict) --Information about the global network.
GlobalNetworkId (string) --The ID of the global network.
GlobalNetworkArn (string) --The Amazon Resource Name (ARN) of the global network.
Description (string) --The description of the global network.
CreatedAt (datetime) --The date and time that the global network was created.
State (string) --The state of the global network.
Tags (list) --The tags for the global network.
(dict) --Describes a tag.
Key (string) --The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --The tag value.
Length Constraints: Maximum length of 256 characters.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'GlobalNetwork': {
'GlobalNetworkId': 'string',
'GlobalNetworkArn': 'string',
'Description': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
"""
pass
def delete_link(GlobalNetworkId=None, LinkId=None):
"""
Deletes an existing link. You must first disassociate the link from any devices and customer gateways.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_link(
GlobalNetworkId='string',
LinkId='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type LinkId: string
:param LinkId: [REQUIRED]\nThe ID of the link.\n
:rtype: dict
ReturnsResponse Syntax
{
'Link': {
'LinkId': 'string',
'LinkArn': 'string',
'GlobalNetworkId': 'string',
'SiteId': 'string',
'Description': 'string',
'Type': 'string',
'Bandwidth': {
'UploadSpeed': 123,
'DownloadSpeed': 123
},
'Provider': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
Link (dict) --
Information about the link.
LinkId (string) --
The ID of the link.
LinkArn (string) --
The Amazon Resource Name (ARN) of the link.
GlobalNetworkId (string) --
The ID of the global network.
SiteId (string) --
The ID of the site.
Description (string) --
The description of the link.
Type (string) --
The type of the link.
Bandwidth (dict) --
The bandwidth for the link.
UploadSpeed (integer) --
Upload speed in Mbps.
DownloadSpeed (integer) --
Download speed in Mbps.
Provider (string) --
The provider of the link.
CreatedAt (datetime) --
The date and time that the link was created.
State (string) --
The state of the link.
Tags (list) --
The tags for the link.
(dict) --
Describes a tag.
Key (string) --
The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --
The tag value.
Length Constraints: Maximum length of 256 characters.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'Link': {
'LinkId': 'string',
'LinkArn': 'string',
'GlobalNetworkId': 'string',
'SiteId': 'string',
'Description': 'string',
'Type': 'string',
'Bandwidth': {
'UploadSpeed': 123,
'DownloadSpeed': 123
},
'Provider': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def delete_site(GlobalNetworkId=None, SiteId=None):
"""
Deletes an existing site. The site cannot be associated with any device or link.
See also: AWS API Documentation
Exceptions
:example: response = client.delete_site(
GlobalNetworkId='string',
SiteId='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type SiteId: string
:param SiteId: [REQUIRED]\nThe ID of the site.\n
:rtype: dict
ReturnsResponse Syntax
{
'Site': {
'SiteId': 'string',
'SiteArn': 'string',
'GlobalNetworkId': 'string',
'Description': 'string',
'Location': {
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
Site (dict) --
Information about the site.
SiteId (string) --
The ID of the site.
SiteArn (string) --
The Amazon Resource Name (ARN) of the site.
GlobalNetworkId (string) --
The ID of the global network.
Description (string) --
The description of the site.
Location (dict) --
The location of the site.
Address (string) --
The physical address.
Latitude (string) --
The latitude.
Longitude (string) --
The longitude.
CreatedAt (datetime) --
The date and time that the site was created.
State (string) --
The state of the site.
Tags (list) --
The tags for the site.
(dict) --
Describes a tag.
Key (string) --
The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --
The tag value.
Length Constraints: Maximum length of 256 characters.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'Site': {
'SiteId': 'string',
'SiteArn': 'string',
'GlobalNetworkId': 'string',
'Description': 'string',
'Location': {
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def deregister_transit_gateway(GlobalNetworkId=None, TransitGatewayArn=None):
"""
Deregisters a transit gateway from your global network. This action does not delete your transit gateway, or modify any of its attachments. This action removes any customer gateway associations.
See also: AWS API Documentation
Exceptions
:example: response = client.deregister_transit_gateway(
GlobalNetworkId='string',
TransitGatewayArn='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type TransitGatewayArn: string
:param TransitGatewayArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the transit gateway.\n
:rtype: dict
ReturnsResponse Syntax
{
'TransitGatewayRegistration': {
'GlobalNetworkId': 'string',
'TransitGatewayArn': 'string',
'State': {
'Code': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'|'FAILED',
'Message': 'string'
}
}
}
Response Structure
(dict) --
TransitGatewayRegistration (dict) --
The transit gateway registration information.
GlobalNetworkId (string) --
The ID of the global network.
TransitGatewayArn (string) --
The Amazon Resource Name (ARN) of the transit gateway.
State (dict) --
The state of the transit gateway registration.
Code (string) --
The code for the state reason.
Message (string) --
The message for the state reason.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'TransitGatewayRegistration': {
'GlobalNetworkId': 'string',
'TransitGatewayArn': 'string',
'State': {
'Code': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'|'FAILED',
'Message': 'string'
}
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def describe_global_networks(GlobalNetworkIds=None, MaxResults=None, NextToken=None):
"""
Describes one or more global networks. By default, all global networks are described. To describe the objects in your global network, you must use the appropriate Get* action. For example, to list the transit gateways in your global network, use GetTransitGatewayRegistrations .
See also: AWS API Documentation
Exceptions
:example: response = client.describe_global_networks(
GlobalNetworkIds=[
'string',
],
MaxResults=123,
NextToken='string'
)
:type GlobalNetworkIds: list
:param GlobalNetworkIds: The IDs of one or more global networks. The maximum is 10.\n\n(string) --\n\n
:type MaxResults: integer
:param MaxResults: The maximum number of results to return.
:type NextToken: string
:param NextToken: The token for the next page of results.
:rtype: dict
ReturnsResponse Syntax
{
'GlobalNetworks': [
{
'GlobalNetworkId': 'string',
'GlobalNetworkArn': 'string',
'Description': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
GlobalNetworks (list) --
Information about the global networks.
(dict) --
Describes a global network.
GlobalNetworkId (string) --
The ID of the global network.
GlobalNetworkArn (string) --
The Amazon Resource Name (ARN) of the global network.
Description (string) --
The description of the global network.
CreatedAt (datetime) --
The date and time that the global network was created.
State (string) --
The state of the global network.
Tags (list) --
The tags for the global network.
(dict) --
Describes a tag.
Key (string) --
The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --
The tag value.
Length Constraints: Maximum length of 256 characters.
NextToken (string) --
The token for the next page of results.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'GlobalNetworks': [
{
'GlobalNetworkId': 'string',
'GlobalNetworkArn': 'string',
'Description': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'NextToken': 'string'
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def disassociate_customer_gateway(GlobalNetworkId=None, CustomerGatewayArn=None):
"""
Disassociates a customer gateway from a device and a link.
See also: AWS API Documentation
Exceptions
:example: response = client.disassociate_customer_gateway(
GlobalNetworkId='string',
CustomerGatewayArn='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type CustomerGatewayArn: string
:param CustomerGatewayArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the customer gateway. For more information, see Resources Defined by Amazon EC2 .\n
:rtype: dict
ReturnsResponse Syntax
{
'CustomerGatewayAssociation': {
'CustomerGatewayArn': 'string',
'GlobalNetworkId': 'string',
'DeviceId': 'string',
'LinkId': 'string',
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'
}
}
Response Structure
(dict) --
CustomerGatewayAssociation (dict) --
Information about the customer gateway association.
CustomerGatewayArn (string) --
The Amazon Resource Name (ARN) of the customer gateway.
GlobalNetworkId (string) --
The ID of the global network.
DeviceId (string) --
The ID of the device.
LinkId (string) --
The ID of the link.
State (string) --
The association state.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'CustomerGatewayAssociation': {
'CustomerGatewayArn': 'string',
'GlobalNetworkId': 'string',
'DeviceId': 'string',
'LinkId': 'string',
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def disassociate_link(GlobalNetworkId=None, DeviceId=None, LinkId=None):
"""
Disassociates an existing device from a link. You must first disassociate any customer gateways that are associated with the link.
See also: AWS API Documentation
Exceptions
:example: response = client.disassociate_link(
GlobalNetworkId='string',
DeviceId='string',
LinkId='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type DeviceId: string
:param DeviceId: [REQUIRED]\nThe ID of the device.\n
:type LinkId: string
:param LinkId: [REQUIRED]\nThe ID of the link.\n
:rtype: dict
ReturnsResponse Syntax
{
'LinkAssociation': {
'GlobalNetworkId': 'string',
'DeviceId': 'string',
'LinkId': 'string',
'LinkAssociationState': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'
}
}
Response Structure
(dict) --
LinkAssociation (dict) --
Information about the link association.
GlobalNetworkId (string) --
The ID of the global network.
DeviceId (string) --
The device ID for the link association.
LinkId (string) --
The ID of the link.
LinkAssociationState (string) --
The state of the association.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'LinkAssociation': {
'GlobalNetworkId': 'string',
'DeviceId': 'string',
'LinkId': 'string',
'LinkAssociationState': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to\nClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid\nfor. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By\ndefault, the http method is whatever is used in the method\'s model.
"""
pass
def get_customer_gateway_associations(GlobalNetworkId=None, CustomerGatewayArns=None, MaxResults=None, NextToken=None):
"""
Gets the association information for customer gateways that are associated with devices and links in your global network.
See also: AWS API Documentation
Exceptions
:example: response = client.get_customer_gateway_associations(
GlobalNetworkId='string',
CustomerGatewayArns=[
'string',
],
MaxResults=123,
NextToken='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type CustomerGatewayArns: list
:param CustomerGatewayArns: One or more customer gateway Amazon Resource Names (ARNs). For more information, see Resources Defined by Amazon EC2 . The maximum is 10.\n\n(string) --\n\n
:type MaxResults: integer
:param MaxResults: The maximum number of results to return.
:type NextToken: string
:param NextToken: The token for the next page of results.
:rtype: dict
ReturnsResponse Syntax
{
'CustomerGatewayAssociations': [
{
'CustomerGatewayArn': 'string',
'GlobalNetworkId': 'string',
'DeviceId': 'string',
'LinkId': 'string',
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
CustomerGatewayAssociations (list) --
The customer gateway associations.
(dict) --
Describes the association between a customer gateway, a device, and a link.
CustomerGatewayArn (string) --
The Amazon Resource Name (ARN) of the customer gateway.
GlobalNetworkId (string) --
The ID of the global network.
DeviceId (string) --
The ID of the device.
LinkId (string) --
The ID of the link.
State (string) --
The association state.
NextToken (string) --
The token for the next page of results.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'CustomerGatewayAssociations': [
{
'CustomerGatewayArn': 'string',
'GlobalNetworkId': 'string',
'DeviceId': 'string',
'LinkId': 'string',
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'
},
],
'NextToken': 'string'
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def get_devices(GlobalNetworkId=None, DeviceIds=None, SiteId=None, MaxResults=None, NextToken=None):
"""
Gets information about one or more of your devices in a global network.
See also: AWS API Documentation
Exceptions
:example: response = client.get_devices(
GlobalNetworkId='string',
DeviceIds=[
'string',
],
SiteId='string',
MaxResults=123,
NextToken='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type DeviceIds: list
:param DeviceIds: One or more device IDs. The maximum is 10.\n\n(string) --\n\n
:type SiteId: string
:param SiteId: The ID of the site.
:type MaxResults: integer
:param MaxResults: The maximum number of results to return.
:type NextToken: string
:param NextToken: The token for the next page of results.
:rtype: dict
ReturnsResponse Syntax
{
'Devices': [
{
'DeviceId': 'string',
'DeviceArn': 'string',
'GlobalNetworkId': 'string',
'Description': 'string',
'Type': 'string',
'Vendor': 'string',
'Model': 'string',
'SerialNumber': 'string',
'Location': {
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
'SiteId': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Devices (list) --
The devices.
(dict) --
Describes a device.
DeviceId (string) --
The ID of the device.
DeviceArn (string) --
The Amazon Resource Name (ARN) of the device.
GlobalNetworkId (string) --
The ID of the global network.
Description (string) --
The description of the device.
Type (string) --
The device type.
Vendor (string) --
The device vendor.
Model (string) --
The device model.
SerialNumber (string) --
The device serial number.
Location (dict) --
The site location.
Address (string) --
The physical address.
Latitude (string) --
The latitude.
Longitude (string) --
The longitude.
SiteId (string) --
The site ID.
CreatedAt (datetime) --
The date and time that the site was created.
State (string) --
The device state.
Tags (list) --
The tags for the device.
(dict) --
Describes a tag.
Key (string) --
The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --
The tag value.
Length Constraints: Maximum length of 256 characters.
NextToken (string) --
The token for the next page of results.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'Devices': [
{
'DeviceId': 'string',
'DeviceArn': 'string',
'GlobalNetworkId': 'string',
'Description': 'string',
'Type': 'string',
'Vendor': 'string',
'Model': 'string',
'SerialNumber': 'string',
'Location': {
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
'SiteId': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'NextToken': 'string'
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def get_link_associations(GlobalNetworkId=None, DeviceId=None, LinkId=None, MaxResults=None, NextToken=None):
"""
Gets the link associations for a device or a link. Either the device ID or the link ID must be specified.
See also: AWS API Documentation
Exceptions
:example: response = client.get_link_associations(
GlobalNetworkId='string',
DeviceId='string',
LinkId='string',
MaxResults=123,
NextToken='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type DeviceId: string
:param DeviceId: The ID of the device.
:type LinkId: string
:param LinkId: The ID of the link.
:type MaxResults: integer
:param MaxResults: The maximum number of results to return.
:type NextToken: string
:param NextToken: The token for the next page of results.
:rtype: dict
ReturnsResponse Syntax
{
'LinkAssociations': [
{
'GlobalNetworkId': 'string',
'DeviceId': 'string',
'LinkId': 'string',
'LinkAssociationState': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
LinkAssociations (list) --
The link associations.
(dict) --
Describes the association between a device and a link.
GlobalNetworkId (string) --
The ID of the global network.
DeviceId (string) --
The device ID for the link association.
LinkId (string) --
The ID of the link.
LinkAssociationState (string) --
The state of the association.
NextToken (string) --
The token for the next page of results.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'LinkAssociations': [
{
'GlobalNetworkId': 'string',
'DeviceId': 'string',
'LinkId': 'string',
'LinkAssociationState': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'
},
],
'NextToken': 'string'
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def get_links(GlobalNetworkId=None, LinkIds=None, SiteId=None, Type=None, Provider=None, MaxResults=None, NextToken=None):
"""
Gets information about one or more links in a specified global network.
If you specify the site ID, you cannot specify the type or provider in the same request. You can specify the type and provider in the same request.
See also: AWS API Documentation
Exceptions
:example: response = client.get_links(
GlobalNetworkId='string',
LinkIds=[
'string',
],
SiteId='string',
Type='string',
Provider='string',
MaxResults=123,
NextToken='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type LinkIds: list
:param LinkIds: One or more link IDs. The maximum is 10.\n\n(string) --\n\n
:type SiteId: string
:param SiteId: The ID of the site.
:type Type: string
:param Type: The link type.
:type Provider: string
:param Provider: The link provider.
:type MaxResults: integer
:param MaxResults: The maximum number of results to return.
:type NextToken: string
:param NextToken: The token for the next page of results.
:rtype: dict
ReturnsResponse Syntax
{
'Links': [
{
'LinkId': 'string',
'LinkArn': 'string',
'GlobalNetworkId': 'string',
'SiteId': 'string',
'Description': 'string',
'Type': 'string',
'Bandwidth': {
'UploadSpeed': 123,
'DownloadSpeed': 123
},
'Provider': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Links (list) --
The links.
(dict) --
Describes a link.
LinkId (string) --
The ID of the link.
LinkArn (string) --
The Amazon Resource Name (ARN) of the link.
GlobalNetworkId (string) --
The ID of the global network.
SiteId (string) --
The ID of the site.
Description (string) --
The description of the link.
Type (string) --
The type of the link.
Bandwidth (dict) --
The bandwidth for the link.
UploadSpeed (integer) --
Upload speed in Mbps.
DownloadSpeed (integer) --
Download speed in Mbps.
Provider (string) --
The provider of the link.
CreatedAt (datetime) --
The date and time that the link was created.
State (string) --
The state of the link.
Tags (list) --
The tags for the link.
(dict) --
Describes a tag.
Key (string) --
The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --
The tag value.
Length Constraints: Maximum length of 256 characters.
NextToken (string) --
The token for the next page of results.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'Links': [
{
'LinkId': 'string',
'LinkArn': 'string',
'GlobalNetworkId': 'string',
'SiteId': 'string',
'Description': 'string',
'Type': 'string',
'Bandwidth': {
'UploadSpeed': 123,
'DownloadSpeed': 123
},
'Provider': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'NextToken': 'string'
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name\nas the method name on the client. For example, if the\nmethod name is create_foo, and you\'d normally invoke the\noperation as client.create_foo(**kwargs), if the\ncreate_foo operation can be paginated, you can use the\ncall client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
ReturnsA paginator object.
"""
pass
def get_sites(GlobalNetworkId=None, SiteIds=None, MaxResults=None, NextToken=None):
"""
Gets information about one or more of your sites in a global network.
See also: AWS API Documentation
Exceptions
:example: response = client.get_sites(
GlobalNetworkId='string',
SiteIds=[
'string',
],
MaxResults=123,
NextToken='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type SiteIds: list
:param SiteIds: One or more site IDs. The maximum is 10.\n\n(string) --\n\n
:type MaxResults: integer
:param MaxResults: The maximum number of results to return.
:type NextToken: string
:param NextToken: The token for the next page of results.
:rtype: dict
ReturnsResponse Syntax
{
'Sites': [
{
'SiteId': 'string',
'SiteArn': 'string',
'GlobalNetworkId': 'string',
'Description': 'string',
'Location': {
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
Sites (list) --
The sites.
(dict) --
Describes a site.
SiteId (string) --
The ID of the site.
SiteArn (string) --
The Amazon Resource Name (ARN) of the site.
GlobalNetworkId (string) --
The ID of the global network.
Description (string) --
The description of the site.
Location (dict) --
The location of the site.
Address (string) --
The physical address.
Latitude (string) --
The latitude.
Longitude (string) --
The longitude.
CreatedAt (datetime) --
The date and time that the site was created.
State (string) --
The state of the site.
Tags (list) --
The tags for the site.
(dict) --
Describes a tag.
Key (string) --
The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --
The tag value.
Length Constraints: Maximum length of 256 characters.
NextToken (string) --
The token for the next page of results.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'Sites': [
{
'SiteId': 'string',
'SiteArn': 'string',
'GlobalNetworkId': 'string',
'Description': 'string',
'Location': {
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
],
'NextToken': 'string'
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def get_transit_gateway_registrations(GlobalNetworkId=None, TransitGatewayArns=None, MaxResults=None, NextToken=None):
"""
Gets information about the transit gateway registrations in a specified global network.
See also: AWS API Documentation
Exceptions
:example: response = client.get_transit_gateway_registrations(
GlobalNetworkId='string',
TransitGatewayArns=[
'string',
],
MaxResults=123,
NextToken='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type TransitGatewayArns: list
:param TransitGatewayArns: The Amazon Resource Names (ARNs) of one or more transit gateways. The maximum is 10.\n\n(string) --\n\n
:type MaxResults: integer
:param MaxResults: The maximum number of results to return.
:type NextToken: string
:param NextToken: The token for the next page of results.
:rtype: dict
ReturnsResponse Syntax
{
'TransitGatewayRegistrations': [
{
'GlobalNetworkId': 'string',
'TransitGatewayArn': 'string',
'State': {
'Code': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'|'FAILED',
'Message': 'string'
}
},
],
'NextToken': 'string'
}
Response Structure
(dict) --
TransitGatewayRegistrations (list) --
The transit gateway registrations.
(dict) --
Describes the registration of a transit gateway to a global network.
GlobalNetworkId (string) --
The ID of the global network.
TransitGatewayArn (string) --
The Amazon Resource Name (ARN) of the transit gateway.
State (dict) --
The state of the transit gateway registration.
Code (string) --
The code for the state reason.
Message (string) --
The message for the state reason.
NextToken (string) --
The token for the next page of results.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'TransitGatewayRegistrations': [
{
'GlobalNetworkId': 'string',
'TransitGatewayArn': 'string',
'State': {
'Code': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'|'FAILED',
'Message': 'string'
}
},
],
'NextToken': 'string'
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def get_waiter(waiter_name=None):
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters\nsection of the service docs for a list of available waiters.
:rtype: botocore.waiter.Waiter
"""
pass
def list_tags_for_resource(ResourceArn=None):
"""
Lists the tags for a specified resource.
See also: AWS API Documentation
Exceptions
:example: response = client.list_tags_for_resource(
ResourceArn='string'
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource.\n
:rtype: dict
ReturnsResponse Syntax{
'TagList': [
{
'Key': 'string',
'Value': 'string'
},
]
}
Response Structure
(dict) --
TagList (list) --The list of tags.
(dict) --Describes a tag.
Key (string) --The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --The tag value.
Length Constraints: Maximum length of 256 characters.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'TagList': [
{
'Key': 'string',
'Value': 'string'
},
]
}
"""
pass
def register_transit_gateway(GlobalNetworkId=None, TransitGatewayArn=None):
"""
Registers a transit gateway in your global network. The transit gateway can be in any AWS Region, but it must be owned by the same AWS account that owns the global network. You cannot register a transit gateway in more than one global network.
See also: AWS API Documentation
Exceptions
:example: response = client.register_transit_gateway(
GlobalNetworkId='string',
TransitGatewayArn='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type TransitGatewayArn: string
:param TransitGatewayArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the transit gateway. For more information, see Resources Defined by Amazon EC2 .\n
:rtype: dict
ReturnsResponse Syntax
{
'TransitGatewayRegistration': {
'GlobalNetworkId': 'string',
'TransitGatewayArn': 'string',
'State': {
'Code': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'|'FAILED',
'Message': 'string'
}
}
}
Response Structure
(dict) --
TransitGatewayRegistration (dict) --
Information about the transit gateway registration.
GlobalNetworkId (string) --
The ID of the global network.
TransitGatewayArn (string) --
The Amazon Resource Name (ARN) of the transit gateway.
State (dict) --
The state of the transit gateway registration.
Code (string) --
The code for the state reason.
Message (string) --
The message for the state reason.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'TransitGatewayRegistration': {
'GlobalNetworkId': 'string',
'TransitGatewayArn': 'string',
'State': {
'Code': 'PENDING'|'AVAILABLE'|'DELETING'|'DELETED'|'FAILED',
'Message': 'string'
}
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def tag_resource(ResourceArn=None, Tags=None):
"""
Tags a specified resource.
See also: AWS API Documentation
Exceptions
:example: response = client.tag_resource(
ResourceArn='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource.\n
:type Tags: list
:param Tags: [REQUIRED]\nThe tags to apply to the specified resource.\n\n(dict) --Describes a tag.\n\nKey (string) --The tag key.\nLength Constraints: Maximum length of 128 characters.\n\nValue (string) --The tag value.\nLength Constraints: Maximum length of 256 characters.\n\n\n\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.ServiceQuotaExceededException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {}
:returns:
(dict) --
"""
pass
def untag_resource(ResourceArn=None, TagKeys=None):
"""
Removes tags from a specified resource.
See also: AWS API Documentation
Exceptions
:example: response = client.untag_resource(
ResourceArn='string',
TagKeys=[
'string',
]
)
:type ResourceArn: string
:param ResourceArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the resource.\n
:type TagKeys: list
:param TagKeys: [REQUIRED]\nThe tag keys to remove from the specified resource.\n\n(string) --\n\n
:rtype: dict
ReturnsResponse Syntax
{}
Response Structure
(dict) --
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {}
:returns:
(dict) --
"""
pass
def update_device(GlobalNetworkId=None, DeviceId=None, Description=None, Type=None, Vendor=None, Model=None, SerialNumber=None, Location=None, SiteId=None):
"""
Updates the details for an existing device. To remove information for any of the parameters, specify an empty string.
See also: AWS API Documentation
Exceptions
:example: response = client.update_device(
GlobalNetworkId='string',
DeviceId='string',
Description='string',
Type='string',
Vendor='string',
Model='string',
SerialNumber='string',
Location={
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
SiteId='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type DeviceId: string
:param DeviceId: [REQUIRED]\nThe ID of the device.\n
:type Description: string
:param Description: A description of the device.\nLength Constraints: Maximum length of 256 characters.\n
:type Type: string
:param Type: The type of the device.
:type Vendor: string
:param Vendor: The vendor of the device.\nLength Constraints: Maximum length of 128 characters.\n
:type Model: string
:param Model: The model of the device.\nLength Constraints: Maximum length of 128 characters.\n
:type SerialNumber: string
:param SerialNumber: The serial number of the device.\nLength Constraints: Maximum length of 128 characters.\n
:type Location: dict
:param Location: Describes a location.\n\nAddress (string) --The physical address.\n\nLatitude (string) --The latitude.\n\nLongitude (string) --The longitude.\n\n\n
:type SiteId: string
:param SiteId: The ID of the site.
:rtype: dict
ReturnsResponse Syntax
{
'Device': {
'DeviceId': 'string',
'DeviceArn': 'string',
'GlobalNetworkId': 'string',
'Description': 'string',
'Type': 'string',
'Vendor': 'string',
'Model': 'string',
'SerialNumber': 'string',
'Location': {
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
'SiteId': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
Device (dict) --
Information about the device.
DeviceId (string) --
The ID of the device.
DeviceArn (string) --
The Amazon Resource Name (ARN) of the device.
GlobalNetworkId (string) --
The ID of the global network.
Description (string) --
The description of the device.
Type (string) --
The device type.
Vendor (string) --
The device vendor.
Model (string) --
The device model.
SerialNumber (string) --
The device serial number.
Location (dict) --
The site location.
Address (string) --
The physical address.
Latitude (string) --
The latitude.
Longitude (string) --
The longitude.
SiteId (string) --
The site ID.
CreatedAt (datetime) --
The date and time that the site was created.
State (string) --
The device state.
Tags (list) --
The tags for the device.
(dict) --
Describes a tag.
Key (string) --
The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --
The tag value.
Length Constraints: Maximum length of 256 characters.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'Device': {
'DeviceId': 'string',
'DeviceArn': 'string',
'GlobalNetworkId': 'string',
'Description': 'string',
'Type': 'string',
'Vendor': 'string',
'Model': 'string',
'SerialNumber': 'string',
'Location': {
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
'SiteId': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def update_global_network(GlobalNetworkId=None, Description=None):
"""
Updates an existing global network. To remove information for any of the parameters, specify an empty string.
See also: AWS API Documentation
Exceptions
:example: response = client.update_global_network(
GlobalNetworkId='string',
Description='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of your global network.\n
:type Description: string
:param Description: A description of the global network.\nLength Constraints: Maximum length of 256 characters.\n
:rtype: dict
ReturnsResponse Syntax
{
'GlobalNetwork': {
'GlobalNetworkId': 'string',
'GlobalNetworkArn': 'string',
'Description': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
GlobalNetwork (dict) --
Information about the global network object.
GlobalNetworkId (string) --
The ID of the global network.
GlobalNetworkArn (string) --
The Amazon Resource Name (ARN) of the global network.
Description (string) --
The description of the global network.
CreatedAt (datetime) --
The date and time that the global network was created.
State (string) --
The state of the global network.
Tags (list) --
The tags for the global network.
(dict) --
Describes a tag.
Key (string) --
The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --
The tag value.
Length Constraints: Maximum length of 256 characters.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'GlobalNetwork': {
'GlobalNetworkId': 'string',
'GlobalNetworkArn': 'string',
'Description': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def update_link(GlobalNetworkId=None, LinkId=None, Description=None, Type=None, Bandwidth=None, Provider=None):
"""
Updates the details for an existing link. To remove information for any of the parameters, specify an empty string.
See also: AWS API Documentation
Exceptions
:example: response = client.update_link(
GlobalNetworkId='string',
LinkId='string',
Description='string',
Type='string',
Bandwidth={
'UploadSpeed': 123,
'DownloadSpeed': 123
},
Provider='string'
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type LinkId: string
:param LinkId: [REQUIRED]\nThe ID of the link.\n
:type Description: string
:param Description: A description of the link.\nLength Constraints: Maximum length of 256 characters.\n
:type Type: string
:param Type: The type of the link.\nLength Constraints: Maximum length of 128 characters.\n
:type Bandwidth: dict
:param Bandwidth: The upload and download speed in Mbps.\n\nUploadSpeed (integer) --Upload speed in Mbps.\n\nDownloadSpeed (integer) --Download speed in Mbps.\n\n\n
:type Provider: string
:param Provider: The provider of the link.\nLength Constraints: Maximum length of 128 characters.\n
:rtype: dict
ReturnsResponse Syntax
{
'Link': {
'LinkId': 'string',
'LinkArn': 'string',
'GlobalNetworkId': 'string',
'SiteId': 'string',
'Description': 'string',
'Type': 'string',
'Bandwidth': {
'UploadSpeed': 123,
'DownloadSpeed': 123
},
'Provider': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
Link (dict) --
Information about the link.
LinkId (string) --
The ID of the link.
LinkArn (string) --
The Amazon Resource Name (ARN) of the link.
GlobalNetworkId (string) --
The ID of the global network.
SiteId (string) --
The ID of the site.
Description (string) --
The description of the link.
Type (string) --
The type of the link.
Bandwidth (dict) --
The bandwidth for the link.
UploadSpeed (integer) --
Upload speed in Mbps.
DownloadSpeed (integer) --
Download speed in Mbps.
Provider (string) --
The provider of the link.
CreatedAt (datetime) --
The date and time that the link was created.
State (string) --
The state of the link.
Tags (list) --
The tags for the link.
(dict) --
Describes a tag.
Key (string) --
The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --
The tag value.
Length Constraints: Maximum length of 256 characters.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.ServiceQuotaExceededException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'Link': {
'LinkId': 'string',
'LinkArn': 'string',
'GlobalNetworkId': 'string',
'SiteId': 'string',
'Description': 'string',
'Type': 'string',
'Bandwidth': {
'UploadSpeed': 123,
'DownloadSpeed': 123
},
'Provider': 'string',
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.ServiceQuotaExceededException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
def update_site(GlobalNetworkId=None, SiteId=None, Description=None, Location=None):
"""
Updates the information for an existing site. To remove information for any of the parameters, specify an empty string.
See also: AWS API Documentation
Exceptions
:example: response = client.update_site(
GlobalNetworkId='string',
SiteId='string',
Description='string',
Location={
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
}
)
:type GlobalNetworkId: string
:param GlobalNetworkId: [REQUIRED]\nThe ID of the global network.\n
:type SiteId: string
:param SiteId: [REQUIRED]\nThe ID of your site.\n
:type Description: string
:param Description: A description of your site.\nLength Constraints: Maximum length of 256 characters.\n
:type Location: dict
:param Location: The site location:\n\nAddress : The physical address of the site.\nLatitude : The latitude of the site.\nLongitude : The longitude of the site.\n\n\nAddress (string) --The physical address.\n\nLatitude (string) --The latitude.\n\nLongitude (string) --The longitude.\n\n\n
:rtype: dict
ReturnsResponse Syntax
{
'Site': {
'SiteId': 'string',
'SiteArn': 'string',
'GlobalNetworkId': 'string',
'Description': 'string',
'Location': {
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
Response Structure
(dict) --
Site (dict) --
Information about the site.
SiteId (string) --
The ID of the site.
SiteArn (string) --
The Amazon Resource Name (ARN) of the site.
GlobalNetworkId (string) --
The ID of the global network.
Description (string) --
The description of the site.
Location (dict) --
The location of the site.
Address (string) --
The physical address.
Latitude (string) --
The latitude.
Longitude (string) --
The longitude.
CreatedAt (datetime) --
The date and time that the site was created.
State (string) --
The state of the site.
Tags (list) --
The tags for the site.
(dict) --
Describes a tag.
Key (string) --
The tag key.
Length Constraints: Maximum length of 128 characters.
Value (string) --
The tag value.
Length Constraints: Maximum length of 256 characters.
Exceptions
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
:return: {
'Site': {
'SiteId': 'string',
'SiteArn': 'string',
'GlobalNetworkId': 'string',
'Description': 'string',
'Location': {
'Address': 'string',
'Latitude': 'string',
'Longitude': 'string'
},
'CreatedAt': datetime(2015, 1, 1),
'State': 'PENDING'|'AVAILABLE'|'DELETING'|'UPDATING',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
}
:returns:
NetworkManager.Client.exceptions.ValidationException
NetworkManager.Client.exceptions.AccessDeniedException
NetworkManager.Client.exceptions.ResourceNotFoundException
NetworkManager.Client.exceptions.ConflictException
NetworkManager.Client.exceptions.ThrottlingException
NetworkManager.Client.exceptions.InternalServerException
"""
pass
| StarcoderdataPython |
1972126 | import numpy
import pytest
import sympy
from mpmath import mp
import quadpy
mp.dps = 50
test_cases = [
(lambda t: 1, -1, +1, 2),
(lambda t: 1, 0, +5, 5),
(lambda t: t, -0, +1, sympy.Rational(1, 2)),
(lambda t: t ** 2, -1, +1, sympy.Rational(2, 3)),
# Bailey example 1:
(lambda t: t * sympy.log(1 + t), 0, 1, sympy.Rational(1, 4)),
# Bailey example 2:
(lambda t: t ** 2 * sympy.atan(t), 0, 1, (sympy.pi - 2 + 2 * sympy.log(2)) / 12),
# Bailey example 3:
(
lambda t: sympy.exp(t) * sympy.cos(t),
0,
mp.pi / 2,
(sympy.exp(sympy.pi / 2) - 1) / 2,
),
# Bailey example 4:
(
lambda t: sympy.atan(sympy.sqrt(2 + t ** 2))
/ (1 + t ** 2)
/ sympy.sqrt(2 + t ** 2),
0,
1,
sympy.pi ** 2 * sympy.Rational(5, 96),
),
# Bailey example 5:
(lambda t: sympy.sqrt(t) * sympy.log(t), 0, 1, -sympy.Rational(4, 9)),
# Bailey example 6 with singularity moved to 0.
(lambda t: sympy.sqrt(2 * t - t ** 2), 0, 1, sympy.pi / 4),
# Bailey example 8:
(lambda t: sympy.log(t) ** 2, 0, 1, 2),
# Bailey example 9:
(lambda t: sympy.log(sympy.sin(t)), 0, mp.pi / 2, -mp.pi * mp.log(2) / 2),
# Bailey example 11:
(lambda s: 1 / (1 - 2 * s + 2 * s ** 2), 0, 1, mp.pi / 2),
# Bailey example 13:
(lambda s: sympy.exp(-((1 / s - 1) ** 2) / 2) / s ** 2, 0, 1, mp.sqrt(mp.pi / 2)),
# Bailey example 14:
(
lambda s: sympy.exp(1 - 1 / s) * sympy.cos(1 / s - 1) / s ** 2,
0,
1,
sympy.Rational(1, 2),
),
]
@pytest.mark.parametrize("f, a, b, exact", test_cases)
def test_tanh_sinh(f, a, b, exact):
# test fine error estimate
mp.dps = 50
tol = 10 ** (-mp.dps)
tol2 = 10 ** (-mp.dps + 1)
t = sympy.Symbol("t")
f_derivatives = {
1: sympy.lambdify(t, sympy.diff(f(t), t, 1), modules=["mpmath"]),
2: sympy.lambdify(t, sympy.diff(f(t), t, 2), modules=["mpmath"]),
}
value, _ = quadpy.tanh_sinh(
f, a, b, tol, f_derivatives=f_derivatives, mode="mpmath"
)
assert abs(value - exact) < tol2
# test with crude estimate
value, _ = quadpy.tanh_sinh(f, a, b, tol, mode="mpmath")
assert abs(value - exact) < tol2
return
@pytest.mark.parametrize("f, a, b, exact", test_cases)
def test_tanh_sinh_numpy(f, a, b, exact):
# test fine error estimate
tol = 1.0e-14
tol2 = 1.0e-13
t = sympy.Symbol("t")
f_derivatives = {
1: sympy.lambdify(t, sympy.diff(f(t), t, 1), modules=["numpy"]),
2: sympy.lambdify(t, sympy.diff(f(t), t, 2), modules=["numpy"]),
}
f = sympy.lambdify(t, f(t), modules=["numpy"])
a = float(a)
b = float(b)
value, _ = quadpy.tanh_sinh(f, a, b, tol, f_derivatives=f_derivatives)
assert abs(value - exact) < tol2
# test with crude estimate
value, _ = quadpy.tanh_sinh(f, a, b, tol)
assert abs(value - exact) < tol2
return
def test_tanh_sinh_numpy_example():
tol = 1.0e-14
val, error_estimate = quadpy.tanh_sinh(
lambda x: numpy.exp(x) * numpy.cos(x),
0,
numpy.pi / 2,
tol,
# f_derivatives={
# 1: lambda x: numpy.exp(x) * (numpy.cos(x) - numpy.sin(x)),
# 2: lambda x: -2 * numpy.exp(x) * numpy.sin(x),
# },
)
exact = (numpy.exp(numpy.pi / 2) - 1) / 2
assert abs(val - exact) < tol
return
# Test functions with singularities at both ends.
@pytest.mark.parametrize(
"f_left, f_right, b, exact",
# Bailey example 7 (f only has one singularity, but derivatives have two):
[
(
lambda t: sympy.sqrt((1 - t) / (2 * t - t ** 2)),
lambda t: sympy.sqrt(t / (1 - t ** 2)),
1,
(
2
* sympy.sqrt(sympy.pi)
* sympy.gamma(sympy.Rational(3, 4))
/ sympy.gamma(sympy.Rational(1, 4))
),
)
]
# Bailey example 10:
# singularity on the right, derivative singularities at both ends
+ [
(
lambda t: sympy.sqrt(sympy.tan(t)),
lambda t: 1 / sympy.sqrt(sympy.tan(t)),
mp.pi / 2,
mp.pi / mp.sqrt(2),
)
]
# Bailey example 12:
+ [
(
lambda s: sympy.exp(1 - 1 / s) / sympy.sqrt(s ** 3 - s ** 4),
lambda s: sympy.exp(s / (s - 1))
/ sympy.sqrt(s * (s * ((3 - s) * s - 3) + 1)),
1,
mp.sqrt(mp.pi),
)
],
)
def test_singularities_at_both_ends(f_left, f_right, b, exact):
# test fine error estimate
tol = 10 ** (-mp.dps)
t = sympy.Symbol("t")
fl = {
0: f_left,
1: sympy.lambdify(t, sympy.diff(f_left(t), t, 1), modules=["mpmath"]),
2: sympy.lambdify(t, sympy.diff(f_left(t), t, 2), modules=["mpmath"]),
}
fr = {
0: f_right,
1: sympy.lambdify(t, sympy.diff(f_right(t), t, 1), modules=["mpmath"]),
2: sympy.lambdify(t, sympy.diff(f_right(t), t, 2), modules=["mpmath"]),
}
value, _ = quadpy.tanh_sinh_lr(fl, fr, b, tol, mode="mpmath")
tol2 = 10 ** (-mp.dps + 1)
assert abs(value - exact) < tol2
# # test with crude estimate
# fl = {0: f_left}
# fr = {0: f_right}
# value, _ = quadpy.tanh_sinh_lr(fl, fr, b, tol)
# tol2 = 10**(-mp.dps + 2)
# assert abs(value - exact) < tol2
return
@pytest.mark.parametrize(
"f, a, b, exact", [(lambda t: t ** 2, -1, +1, sympy.Rational(2, 3))]
)
def test_low_precision(f, a, b, exact):
mp.dps = 10
t = sympy.Symbol("t")
f_derivatives = {
1: sympy.lambdify(t, sympy.diff(f(t), t, 1), modules=["mpmath"]),
2: sympy.lambdify(t, sympy.diff(f(t), t, 2), modules=["mpmath"]),
}
tol = 1.0e-2
value, _ = quadpy.tanh_sinh(
f, a, b, tol, f_derivatives=f_derivatives, mode="mpmath"
)
assert abs(value - exact) < tol
return
if __name__ == "__main__":
# test_tanh_sinh(
# lambda t: 1, 0, 1, 1
# )
# test_singularities_at_both_ends(
# lambda s: sympy.exp(1 - 1 / s) / sympy.sqrt(s ** 3 - s ** 4),
# lambda s: sympy.exp(s / (s - 1)) / sympy.sqrt(s * (s * ((3 - s) * s - 3) + 1)),
# 1,
# mp.sqrt(mp.pi),
# )
test_tanh_sinh_numpy_example()
| StarcoderdataPython |
11308390 | from aws_cdk import aws_ec2 as ec2
from aws_cdk import core
from aws_emr_launch.constructs.security_groups.emr import EMRSecurityGroups
def test_emr_security_groups():
app = core.App()
stack = core.Stack(app, 'test-stack')
vpc = ec2.Vpc(stack, 'test-vpc')
emr_security_groups = EMRSecurityGroups(stack, 'test-security-groups', vpc=vpc)
assert emr_security_groups.service_group
assert emr_security_groups.master_group
assert emr_security_groups.workers_group
| StarcoderdataPython |
1887697 | import logging
import socket
import pickle
from select import select
from gen import generate_code_str
import time
import os
import numpy
import scipy
from net import *
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(15)
s.connect(('discord-img-bot', 4571))
logging.info('Connected to server')
# send container ID to server
container_id = os.environ['HOSTNAME']
logging.info(f'Sending container ID: {container_id}')
send_bytes(s, container_id.encode('utf-8'))
s.settimeout(None)
raw_bytes = receive_bytes(s, None)
s.settimeout(15)
data = pickle.loads(raw_bytes)
ret_dict = {}
try:
logging.info('Generating code')
user_code = generate_code_str(data['body'], data['argname'], data['global_name'])
logging.info(f'Code generated:\n{user_code}')
loc = {data['global_name']: data['array']}
exec(user_code, {'np': numpy, 'sp': scipy}, loc)
result = loc['output']
logging.info(f'eval success')
ret_dict = {'status': 'success', 'result': result}
except Exception as e:
logging.info(f'Error: {e}')
ret_dict = {'status': 'error', 'error': str(e)}
logging.info(f'Sending result to server: {str(ret_dict)}')
ret_bytes = pickle.dumps(ret_dict)
logging.info(f'Sending {len(ret_bytes)} bytes')
send_bytes(s, ret_bytes)
s.shutdown(socket.SHUT_WR)
time.sleep(5)
exit(0)
| StarcoderdataPython |
16014 | import sys
from util.Timer import Timer
from util.FileOpener import FileOpener
from util.Logger import Logger
from util.PathExtractor import PathExtractor
from util.PathValidator import PathValidator
from service import SpacyModel
def lemmatize_text(file_path: str, timer: Timer):
logger = Logger()
output_file = FileOpener().get_new_file("wiki.en.lemmatized.txt", "a")
with open(file_path, "r") as file:
for line in file:
lemmatized_list = [word.lemma_ for word in SpacyModel.instance.get_en_spacy_line(line)]
lemmazized_line = " ".join(lemmatized_list)
output_file.write(lemmazized_line)
logger.every_n_wiki_status(10, timer.get_duration())
logger.every_n_wiki_status(1)
def main():
script_name: str = PathExtractor().get_file_name(sys.argv[0])
if len(sys.argv) != 2:
Logger().usage(f"python {script_name} <wiki.en.filtered.txt>")
return
file_path = sys.argv[1]
if PathValidator().is_valid_files([file_path]):
Logger().info(f'Input file: "{file_path}"')
Logger().info("Starting to lemmatize text")
timer = Timer()
lemmatize_text(file_path, timer)
Logger().finish_script(timer.get_duration(), script_name)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4819557 | <filename>check_process.py<gh_stars>0
#!/usr/bin/env python
'''Checks processes'''
#===============================================================================
# Import modules
#===============================================================================
# Standard Library
import os
import subprocess
import logging
# Third party modules
# Application modules
#===============================================================================
# Check script is running
#===============================================================================
def is_running(script_name):
'''Checks list of processes for script name and filters out lines with the
PID and parent PID. Returns a TRUE if other script with the same name is
found running.'''
try:
logger = logging.getLogger('root')
cmd1 = subprocess.Popen(['ps', '-ef'], stdout=subprocess.PIPE)
cmd2 = subprocess.Popen(['grep', '-v', 'grep'], stdin=cmd1.stdout,
stdout=subprocess.PIPE)
cmd3 = subprocess.Popen(['grep', '-v', str(os.getpid())], stdin=cmd2.stdout,
stdout=subprocess.PIPE)
cmd4 = subprocess.Popen(['grep', '-v', str(os.getppid())], stdin=cmd3.stdout,
stdout=subprocess.PIPE)
cmd5 = subprocess.Popen(['grep', script_name], stdin=cmd4.stdout,
stdout=subprocess.PIPE)
other_script_found = cmd5.communicate()[0]
if other_script_found:
logger.info('Script already runnning. Exiting...')
logger.info(other_script_found)
return True
return False
except Exception, e:
logger.error('System check failed ({error_v}). Exiting...'.format(
error_v=e))
return True
| StarcoderdataPython |
11380501 | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/augment_PIL-img_filters.ipynb (unless otherwise specified).
__all__ = ['is_3dlut_row', 'read_lut', 'ApplyPILFilter']
# Cell
try:
from fastai.vision.all import *
except:
from fastai2.vision.all import *
from PIL import ImageFilter
from typing import List, Tuple, Callable, Union, Optional, Any
# Cell
def is_3dlut_row(row:list) -> bool:
'Check if one line in the file has exactly 3 values'
row_values = []
for val in row:
try: row_values.append(float(val))
except: continue
if len(row_values) == 3: return True
else: return False
def read_lut(path_lut:Union[str,Path], num_channels:int=3):
'Read LUT from raw file. Assumes each line in a file is part of the lut table'
with open(path_lut) as f: lut_raw = f.read().splitlines()
size = round(len(lut_raw) ** (1/3))
row2val = lambda row: tuple([float(val) for val in row])
lut_table = [row2val(row.split(' ')) for row in lut_raw if is_3dlut_row(row.split(' '))]
return ImageFilter.Color3DLUT(size, lut_table, num_channels)
# Cell
class ApplyPILFilter(RandTransform):
"Apply a `PIL.ImageFilter` and return as a PILImage"
order = 0 # Apply before `ToTensor`
def __init__(self, filters, p=1.):
super().__init__(p=p)
self.filter = filters
def select_filter(self, o):
'If multiple `filters` are given, select and apply one'
if isinstance(self.filter, (tuple,list,L)):
rand_idx = np.random.randint(0, len(self.filter))
return o.filter(self.filter[rand_idx])
else: return o.filter(self.filter)
#def _encodes(self, o:(PILImage,TensorImage,str,Path)): return TensorImage(self.select_filter(o)).permute(2,0,1)
def _encodes(self, o): return PILImage(self.select_filter(o))
def encodes(self, o:PILImage): return self._encodes(o)
def encodes(self, o:(TensorImage,str,Path)): return self._encodes(PILImage.create(o)) | StarcoderdataPython |
270292 | <reponame>aplneto/Algoritmos-IF969
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 16:13:59 2019
@author: apln2
"""
class _No:
'''
Classe auxiliar
Usada dentro da programação das classes das estruturas lineares.
A lista implementada abaixo é uma lista de encadeamento simples, ou seja,
os nós possuem apenas uma referência, para o nó seguinte.
'''
def __init__(self, valor, seguinte = None):
self.valor = valor
self.seguinte = seguinte
def __str__(self):
'''
Esse é o método usado para imprimir um objeto usando a função print.
:return str: esse método deve, obrigatoriamente, retornar uma string
'''
return str(self.valor)
class Lista:
'''
Classe principal
A classe lista é um conjunto de nós alinhados a partir de um nó estrutural,
muitas vezes chamados de nó cabeça ou nó sentinela.
'''
def __init__(self):
'''
A lista começa vazia, então, nesse caso o único nó presente na lista é
o sentinela.
O nó sentinela é um nó intermediário que fica entre o início e o fim da
lista.
'''
self.sentinela = _No(None)
def anexar(self, valor):
'''
Anexar um valor a uma lista é equivalente a uma inserção no fim da
lista.
Anexação ocorre encontrado o último elemento da lista e, inserindo após
ele um novo nó.
'''
pos = self.sentinela
while pos.seguinte is not None:
pos = pos.seguinte
pos.seguinte = _No(valor)
def __str__(self):
'''
Método de exibição.
'''
_str = ''
pos = self.sentinela.seguinte
while pos is not None:
if _str: _str += ', '
_str += pos.valor.__str__()
pos = pos.seguinte
return '[{}]'.format(_str)
class Musica:
def __init__(self, titulo, dur):
self.titulo = titulo
self.tempo = dur
def __str__(self):
return "{} - {}".format(self.titulo, self.tempo)
class Playlist(Lista):
'''
A implementação de uma subclasse Playlist foi feita para auxiliar a leitura
de comandos na função.
'''
def __init__(self):
'''
Usaremos um ponteiro para marcar a posição na qual a lista se encontra.
A partir do ponteiro iremos executar os comandos a medida que eles são
executados.
Veja mais detalhes na documentação de cada comando.
'''
Lista.__init__(self)
self.ponteiro = self.sentinela
def reproduzir(self):
'''
Considerando que o ponteiro começa no sentinela (um nó estrutural, que
não possui valor guardado), a reprodução começa a partir do nó
seguinte.
Então para reproduzir é necessário avançar a posição do ponteiro para o
próximo nó e então executar.
'''
self.ponteiro = self.ponteiro.seguinte
# A execução é representada pelo retorno do valor do nó, no caso dessa
# implementação, um objeto do tipo música.
return self.ponteiro.valor
def repetir(self):
'''
Uma vez que o método de reprodução avança o ponteiro para a próxima
música, o método de repetição não avança o ponteiro, apenas retorna o
valor que já foi reproduzido uma vez.
'''
return self.ponteiro.valor
def pular(self):
'''
O método de avanço de música ignora a próxima música que seria
executada. Nesse caso, o ponteiro avança, mas não há reprodução.
'''
self.ponteiro = self.ponteiro.seguinte
def finalizar(self):
'''
O método de finalização da playlist coloca o ponteiro depois da útlima
música.
Dependeno da implementação, o novo valor do ponteiro passa a ser o
sentinela (valor inicial) ou None.
'''
self.ponteiro = None
if __name__ == "__main__":
T = int(input())
for t in range(T):
p = Playlist()
M = int(input())
for m in range (M):
titulo = input()
dur = float(input())
p.anexar(Musica(titulo, dur))
comandos = input()
tempo = 0
reproduzidas = ''
for c in comandos:
if c == 'r':
musica = p.reproduzir()
tempo += musica.tempo
if reproduzidas: reproduzidas += ', '
reproduzidas += musica.titulo
elif c == 'v':
musica = p.repetir()
tempo += musica.tempo
if reproduzidas: reproduzidas += ', '
reproduzidas += musica.titulo
elif c == 'p':
musica = p.pular()
else:
p.finalizar()
break
print("Viagem {}: {}".format(t+1, tempo))
print(reproduzidas) | StarcoderdataPython |
1992570 | from __future__ import absolute_import, unicode_literals
GRAPH_URL = 'https://graph.facebook.com'
API_VERSION = ''
APP_SECRET = None
APP_TOKEN = None
DEBUG = False
DEBUG_REQUESTS = DEBUG
DEBUG_HEADERS = False
TESTING = False
ETAGS = True
CACHE = None
DEDUP = True
MIGRATIONS = {}
RELATIVE_URL_HOOK = None
SUMMARY_INFO = True
| StarcoderdataPython |
6470990 | <filename>sdk/python/lib/pulumi/_utils.py
# Copyright 2016-2020, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
# Empty function definitions.
def _empty():
...
def _empty_doc():
"""Empty function docstring."""
...
_empty_lambda = lambda: None
_empty_lambda_doc = lambda: None
_empty_lambda_doc.__doc__ = """Empty lambda docstring."""
def _consts(fn: typing.Callable) -> tuple:
"""
Returns a tuple of the function's constants excluding the docstring.
"""
return tuple(x for x in fn.__code__.co_consts if x != fn.__doc__)
# Precompute constants for each of the empty functions.
_consts_empty = _consts(_empty)
_consts_empty_doc = _consts(_empty_doc)
_consts_empty_lambda = _consts(_empty_lambda)
_consts_empty_lambda_doc = _consts(_empty_lambda_doc)
def is_empty_function(fn: typing.Callable) -> bool:
"""
Returns true if the function is empty.
"""
consts = _consts(fn)
return (
(fn.__code__.co_code == _empty.__code__.co_code and consts == _consts_empty) or
(fn.__code__.co_code == _empty_doc.__code__.co_code and consts == _consts_empty_doc) or
(fn.__code__.co_code == _empty_lambda.__code__.co_code and consts == _consts_empty_lambda) or
(fn.__code__.co_code == _empty_lambda_doc.__code__.co_code and consts == _consts_empty_lambda_doc)
)
| StarcoderdataPython |
3336614 | <gh_stars>1-10
from dataclasses import InitVar, dataclass, field
import numpy as np # type: ignore
from survival_evaluation.types import NumericArrayLike
def to_array(array_like: NumericArrayLike, to_boolean: bool = False) -> np.array:
array = np.asarray(array_like)
shape = np.shape(array)
if len(shape) > 1:
raise ValueError(
f"Input should be a 1-d array. Got a shape of {shape} instead."
)
if np.any(array < 0):
raise ValueError("All event times must be greater than or equal to zero.")
if to_boolean:
check_indicators(array)
return array.astype(bool)
return array
def check_indicators(indicators: np.array) -> None:
if not all(np.logical_or(indicators == 0, indicators == 1)):
raise ValueError(
"Event indicators must be 0 or 1 where 0 indicates censorship and 1 is an event."
)
def validate_size(
event_times: NumericArrayLike,
event_indicators: NumericArrayLike,
predictions: NumericArrayLike,
):
same_size = (
np.shape(event_times) == np.shape(event_indicators) == np.shape(predictions)
)
if not same_size:
raise ValueError("All three inputs must be of the same shape.")
@dataclass
class KaplanMeier:
event_times: InitVar[np.array]
event_indicators: InitVar[np.array]
survival_times: np.array = field(init=False)
survival_probabilities: np.array = field(init=False)
def __post_init__(self, event_times, event_indicators):
index = np.lexsort((event_indicators, event_times))
unique_times = np.unique(event_times[index], return_counts=True)
self.survival_times = unique_times[0]
population_count = np.flip(np.flip(unique_times[1]).cumsum())
event_counter = np.append(0, unique_times[1].cumsum()[:-1])
event_ind = list()
for i in range(np.size(event_counter[:-1])):
event_ind.append(event_counter[i])
event_ind.append(event_counter[i + 1])
event_ind.append(event_counter[-1])
event_ind.append(len(event_indicators))
events = np.add.reduceat(np.append(event_indicators[index], 0), event_ind)[::2]
self.survival_probabilities = np.empty(population_count.size)
survival_probability = 1
counter = 0
for population, event_num in zip(population_count, events):
survival_probability *= 1 - event_num / population
self.survival_probabilities[counter] = survival_probability
counter += 1
def predict(self, prediction_times: np.array):
probability_index = np.digitize(prediction_times, self.survival_times)
probability_index = np.where(
probability_index == self.survival_times.size + 1,
probability_index - 1,
probability_index,
)
probabilities = np.append(1, self.survival_probabilities)[probability_index]
return probabilities
@dataclass
class KaplanMeierArea(KaplanMeier):
area_times: np.array = field(init=False)
area_probabilities: np.array = field(init=False)
area: np.array = field(init=False)
def __post_init__(self, event_times, event_indicators):
super().__post_init__(event_times, event_indicators)
area_probabilities = np.append(1, self.survival_probabilities)
area_times = np.append(0, self.survival_times)
if self.survival_probabilities[-1] != 0:
slope = (area_probabilities[-1] - 1) / area_times[-1]
zero_survival = -1 / slope
area_times = np.append(area_times, zero_survival)
area_probabilities = np.append(area_probabilities, 0)
area_diff = np.diff(area_times, 1)
area = np.flip(np.flip(area_diff * area_probabilities[0:-1]).cumsum())
self.area_times = np.append(area_times, np.inf)
self.area_probabilities = area_probabilities
self.area = np.append(area, 0)
def best_guess(self, censor_times: np.array):
surv_prob = self.predict(censor_times)
censor_indexes = np.digitize(censor_times, self.area_times)
censor_indexes = np.where(
censor_indexes == self.area_times.size + 1,
censor_indexes - 1,
censor_indexes,
)
censor_area = (
self.area_times[censor_indexes] - censor_times
) * self.area_probabilities[censor_indexes - 1]
censor_area += self.area[censor_indexes]
return censor_times + censor_area / surv_prob
| StarcoderdataPython |
6514161 | import_batch['contacts'] = {k: v for (k, v) in contacts.items(
) if k in fields or k in CONFIG['departments'][dept]}
for k, v in contacts.items():
contact = {}
contact['mobileNumber'] = v['sis']['Mobile']
contact['uniqueCampusId'] = k
contact['firstName'] = v['sis']['FirstName']
contact['lastName'] = v['sis']['LastName']
contact['optedOut'] = v['opt_newstate']
contact['customFields'] = v[ns]
contact['allowMobileUpdate'] = False
# Add remote state dict inside existing contacts dict
# Fetch each local contact from Cadence and add to dict with PCID as key
# {'P000000000': {'remote': {'foo':'bar'}, 'sis': {'foo':'bar'}, ...}}
for k, v in contacts.items():
if 'lss' in v and 'MobileNumber' in v['lss']:
mobile = v['lss']['mobileNumber']
elif 'sis' in v:
# Might as well see if any SIS contacts were manually added at remote end
mobile = v['sis']['mobileNumber']
if mobile:
remote = cadence_get_contact(mobile)
if remote:
contacts[k]['remote'] = remote
# Update opt-in/out status for each user that exists on remote.
# Example: ((False, True), (False, False), (False, True))
# ...user opted out in Cadence. SIS and LSS need to be changed.
if 'remote' in v:
optin_local = pc_get_sms(k, dept)
# Cadence considers True = Opt Out. We will use PowerCampus method, True = Opt In.
opt_newstate = eval_sync_state(
optin_local, not v['remote']['optedOut'], not v['lss']['optedOut'])
# Store new state
contacts[k]['ns']['optedOut'] = opt_newstate[0][0]
# Update PowerCampus if necessary.
if opt_newstate[0][1]:
pc_update_opt()
def eval_sync_state(local, remote, sync):
"""Return tuple of the target state of each argument and whether it has changed.
Intended for deciding how to sync Opt-In flag, which can be changed from either end.
The logic behind this is as follows:
Local Remote LSS Action
0 0 0 None
1 0 0 Remote and LSS = 1
0 1 0 Local and LSS = 1
0 0 1 LSS = 0
1 0 1 Local and LSS = 0
0 1 1 Remote and LSS = 0
1 1 0 LSS = 1
1 1 1 None
Keyword arguments:
local -- state of the local database
remote -- state of the remote database
sync -- last sync state
"""
state_dict = {
(0, 0, 0): ((0, 0), (0, 0), (0, 0)),
(1, 0, 0): ((1, 0), (1, 1), (1, 1)),
(0, 1, 0): ((1, 1), (1, 0), (1, 1)),
(0, 0, 1): ((0, 0), (0, 0), (0, 1)),
(1, 0, 1): ((0, 1), (0, 0), (0, 1)),
(0, 1, 1): ((0, 0), (0, 1), (0, 1)),
(1, 1, 0): ((1, 0), (1, 0), (1, 1)),
(1, 1, 1): ((1, 0), (1, 0), (1, 0))
}
result = state_dict.get((local, remote, sync))
# Turn 1 and 0 into True and False
result = tuple(tuple(bool(kk) for kk in k) for k in result)
return result
def pc_get_sms(pcid, dept):
'''Return boolean of SMS Opt-In status in PowerCampus Telecommunications or None if nothing in Telecommunications.'''
CURSOR.execute(
'''select [STATUS] from [CAMPUS6].[DBO].[TELECOMMUNICATIONS]
where [PEOPLE_ORG_CODE_ID] = ? AND [COM_TYPE] = ?''', pcid, 'SMS' + dept)
row = CURSOR.fetchone()
if row is not None:
status = row.STATUS
status_mapping = {'A': True, 'I': False}
return status_mapping[status]
else:
return None
# TEMP: Delete contacts from Cadence who shouldn't have been uploaded to begin with
if contact['optedOut'] is None:
r = HTTP_SESSION.delete(api_url+'/v2/contacts/' +
dept + '/' + contact['mobileNumber'])
r.raise_for_status()
def cadence_get_contact(mobile):
'''Get a contact from the Cadence API. Returns None of not found.'''
try:
r = HTTP_SESSION.get(api_url + '/v2/contacts/SS/' + mobile)
r.raise_for_status()
r = json.loads(r.text)
return r
except requests.HTTPError:
# We can ignore 404 errors
if r.status_code != 404:
raise
return None
| StarcoderdataPython |
3580877 | # Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random as py_random
import numpy as np
import nnabla as nn
from nnabla.logger import logger
def set_global_seed(seed: int) -> None:
np.random.seed(seed=seed)
py_random.seed(seed)
nn.seed(seed)
logger.info("Set seed to {}".format(seed))
def save_snapshot(save_dir: str) -> None:
logger.info("Save network parameters")
os.makedirs(save_dir, exist_ok=True)
model_file_path = os.path.join(save_dir, "pointnet_classification.h5")
nn.save_parameters(path=model_file_path)
def load_snapshot(load_dir: str, file_name: str = "pointnet_classification.h5") -> None:
logger.info("Load network parameters")
model_file_path = os.path.join(load_dir, file_name)
nn.load_parameters(path=model_file_path)
def categorical_accuracy(pred: np.ndarray, label: np.ndarray) -> np.ndarray:
pred_label = np.argmax(pred, axis=1)
return (pred_label == label.flatten()).mean()
def get_decayed_learning_rate(
num_epoch: int, learning_rate: float, decay_step: int = 20, decay_rate: float = 0.7
) -> float:
if num_epoch % decay_step == 0:
learning_rate *= decay_rate
return learning_rate
| StarcoderdataPython |
11224022 | import os
import csv
def match(matched_dir,
matching_dir,
output_dir,
matched_key_column=0,
matching_key_column=0,
matched_column=-1,
matching_column=-1,
matched_header=True,
matching_header=True,
output_header=True,
insert=True,
delimiter=','):
""" Match two files.
Match two files with the key. The keys in both files should be in ascending
order.
If the matched_dir is directed to the csv file as follows:
idx,attr1
E1,RRR
E2,OOO
E3,MMM
and the matching_dir is directed to the csv file as follows:
idx,attr2
E1,LLLLLLLLLLLLL
E3,MMMMMMMMMMMMM
E7,AAAAAAAAAAAAA
then match(matched_dir, matching_dir, output_dir) will creates a csv file
as follows:
idx,attr1,attr2
E1,RRR,LLLLLLLLLLLLL
E3,MMM,MMMMMMMMMMMMM
Args:
matched_dir: directory of matched file. The matching column will be
added to this file according to the key.
matching_dir: directory of matching file.
matched_key_column: column index of the matched key.
matching_key_column: column index of the matching key.
matched_column: column to be inserted or overwritten.
matching_column: column to be added to the matched file.
matched_header: whether the csv file contains the header.
matching_header: whether the csv file contains the header.
insert: insertion if true; else overwriting.
"""
with open(matched_dir, 'r') as matched, \
open(matching_dir, 'r') as matching, \
open(output_dir, 'w', newline='') as output:
csv_writer = csv.writer(output)
if matched_header is True:
matched_headers = \
matched.readline().split('\n')[0].split(delimiter)
if matching_header is True:
matching_headers = \
matching.readline().split('\n')[0].split(delimiter)
if output_header is True:
if matched_header is not True or matching_header is not True:
print('Header information not found.')
else:
if insert is True:
matched_headers.insert(
matched_column % (len(matched_headers) + 1),
matching_headers[matching_column]
)
else:
matched_headers[matched_column] = \
matching_headers[matching_column]
csv_writer.writerow(matched_headers)
matching_line = matching.readline()
while True:
matched_line = matched.readline()
if not matched_line:
break
matched_entry = matched_line.split('\n')[0].split(delimiter)
output_entry = matched_entry.copy()
while True:
if not matching_line:
break
matching_entry = matching_line.split('\n')[0].split(delimiter)
if matching_entry[matching_key_column] < \
matched_entry[matched_key_column]:
matching_line = matching.readline()
continue
elif matching_entry[matching_key_column] == \
matched_entry[matched_key_column]:
if insert is True:
output_entry.insert(
matched_column % (len(matched_entry) + 1),
matching_entry[matching_column]
)
else:
output_entry[matched_column] = \
matching_entry[matching_column]
csv_writer.writerow(output_entry)
break
else:
break
if __name__ == "__main__":
match(
'C:\\Users\\white\\Desktop\\FILE1.CSV',
'C:\\Users\\white\\Desktop\\FILE2.CSV',
'C:\\Users\\white\\Desktop\\FILE3.CSV'
)
| StarcoderdataPython |
56178 | <reponame>yudame/prakti-api
from django.test import TestCase
from ..test_behaviors import TimestampableTest
from ...models import Address
class AddressTest(TimestampableTest, TestCase):
model = Address
| StarcoderdataPython |
9757683 |
import tensorflow as tf
from tensorflow.python.compiler.tensorrt import trt_convert as trt
with tf.Session() as sess:
# First deserialize your frozen graph:
with tf.gfile.GFile(“/path/to/your/frozen/graph.pb”, ‘rb’) as f:
frozen_graph = tf.GraphDef()
frozen_graph.ParseFromString(f.read())
# Now you can create a TensorRT inference graph from your
# frozen graph:
converter = trt.TrtGraphConverter(
input_graph_def=frozen_graph,
nodes_blacklist=['logits', 'classes']) #output nodes
trt_graph = converter.convert()
# Import the TensorRT graph into a new graph and run:
output_node = tf.import_graph_def(
trt_graph,
return_elements=['logits', 'classes'])
sess.run(output_node)
| StarcoderdataPython |
1885696 | try:
from libs.layers import *
from libs.utils_ft import *
except:
from layers import *
from utils_ft import *
import copy
import os
import sys
from collections import defaultdict
from typing import Optional
import torch
import torch.nn as nn
from torch import Tensor
from torch.nn import MultiheadAttention, TransformerEncoderLayer
from torch.nn.init import constant_, xavier_uniform_
from torchinfo import summary
current_path = os.path.dirname(os.path.abspath(__file__))
SRC_ROOT = os.path.dirname(current_path)
sys.path.append(SRC_ROOT)
ADDITIONAL_ATTR = ['normalizer', 'raw_laplacian', 'return_latent',
'residual_type', 'norm_type', 'norm_eps', 'boundary_condition',
'upscaler_size', 'downscaler_size', 'spacial_dim', 'spacial_fc',
'regressor_activation', 'attn_activation',
'downscaler_activation', 'upscaler_activation',
'encoder_dropout', 'decoder_dropout', 'ffn_dropout']
class SimpleTransformerEncoderLayer(nn.Module):
def __init__(self,
d_model=96,
pos_dim=1,
n_head=2,
dim_feedforward=512,
attention_type='fourier',
pos_emb=False,
layer_norm=True,
attn_norm=None,
norm_type='layer',
norm_eps=None,
batch_norm=False,
attn_weight=False,
xavier_init: float=1e-2,
diagonal_weight: float=1e-2,
symmetric_init=False,
residual_type='add',
activation_type='relu',
dropout=0.1,
ffn_dropout=None,
debug=False,
):
super(SimpleTransformerEncoderLayer, self).__init__()
dropout = default(dropout, 0.05)
if attention_type in ['linear', 'softmax']:
dropout = 0.1
ffn_dropout = default(ffn_dropout, dropout)
norm_eps = default(norm_eps, 1e-5)
attn_norm = default(attn_norm, not layer_norm)
if (not layer_norm) and (not attn_norm):
attn_norm = True
norm_type = default(norm_type, 'layer')
self.attn = SimpleAttention(n_head=n_head,
d_model=d_model,
attention_type=attention_type,
diagonal_weight=diagonal_weight,
xavier_init=xavier_init,
symmetric_init=symmetric_init,
pos_dim=pos_dim,
norm=attn_norm,
norm_type=norm_type,
eps=norm_eps,
dropout=dropout)
self.d_model = d_model
self.n_head = n_head
self.pos_dim = pos_dim
self.add_layer_norm = layer_norm
if layer_norm:
self.layer_norm1 = nn.LayerNorm(d_model, eps=norm_eps)
self.layer_norm2 = nn.LayerNorm(d_model, eps=norm_eps)
dim_feedforward = default(dim_feedforward, 2*d_model)
self.ff = FeedForward(in_dim=d_model,
dim_feedforward=dim_feedforward,
batch_norm=batch_norm,
activation=activation_type,
dropout=ffn_dropout,
)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.residual_type = residual_type # plus or minus
self.add_pos_emb = pos_emb
if self.add_pos_emb:
self.pos_emb = PositionalEncoding(d_model)
self.debug = debug
self.attn_weight = attn_weight
self.__name__ = attention_type.capitalize() + 'TransformerEncoderLayer'
def forward(self, x, pos=None, weight=None):
'''
- x: node feature, (batch_size, seq_len, n_feats)
- pos: position coords, needed in every head
Remark:
- for n_head=1, no need to encode positional
information if coords are in features
'''
if self.add_pos_emb:
x = x.permute((1, 0, 2))
x = self.pos_emb(x)
x = x.permute((1, 0, 2))
if pos is not None and self.pos_dim > 0:
att_output, attn_weight = self.attn(
x, x, x, pos=pos, weight=weight) # encoder no mask
else:
att_output, attn_weight = self.attn(x, x, x, weight=weight)
if self.residual_type in ['add', 'plus'] or self.residual_type is None:
x = x + self.dropout1(att_output)
else:
x = x - self.dropout1(att_output)
if self.add_layer_norm:
x = self.layer_norm1(x)
x1 = self.ff(x)
x = x + self.dropout2(x1)
if self.add_layer_norm:
x = self.layer_norm2(x)
if self.attn_weight:
return x, attn_weight
else:
return x
class GalerkinTransformerDecoderLayer(nn.Module):
r"""
A lite implementation of the decoder layer based on linear causal attention
adapted from the TransformerDecoderLayer in PyTorch
https://github.com/pytorch/pytorch/blob/afc1d1b3d6dad5f9f56b1a4cb335de109adb6018/torch/nn/modules/transformer.py#L359
"""
def __init__(self, d_model,
nhead,
pos_dim = 1,
dim_feedforward=512,
attention_type='galerkin',
layer_norm=True,
attn_norm=None,
norm_type='layer',
norm_eps=1e-5,
xavier_init: float=1e-2,
diagonal_weight: float = 1e-2,
dropout=0.05,
ffn_dropout=None,
activation_type='relu',
device=None,
dtype=None,
debug=False,) -> None:
factory_kwargs = {'device': device, 'dtype': dtype, }
super(GalerkinTransformerDecoderLayer, self).__init__()
ffn_dropout = default(ffn_dropout, dropout)
self.debug = debug
self.self_attn = SimpleAttention(nhead, d_model,
attention_type=attention_type,
pos_dim=pos_dim,
norm=attn_norm,
eps=norm_eps,
norm_type=norm_type,
diagonal_weight=diagonal_weight,
xavier_init=xavier_init,
dropout=dropout,)
self.multihead_attn = SimpleAttention(nhead, d_model,
attention_type='causal',
pos_dim=pos_dim,
norm=attn_norm,
eps=norm_eps,
norm_type=norm_type,
diagonal_weight=diagonal_weight,
xavier_init=xavier_init,
dropout=dropout,)
dim_feedforward = default(dim_feedforward, 2*d_model)
self.ff = FeedForward(in_dim=d_model,
dim_feedforward=dim_feedforward,
activation=activation_type,
dropout=ffn_dropout,
)
self.dropout = nn.Dropout(ffn_dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs)
self.add_layer_norm = layer_norm
if self.add_layer_norm:
self.norm1 = nn.LayerNorm(d_model, eps=norm_eps, **factory_kwargs)
self.norm2 = nn.LayerNorm(d_model, eps=norm_eps, **factory_kwargs)
self.norm3 = nn.LayerNorm(d_model, eps=norm_eps, **factory_kwargs)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = F.relu
def forward(self, x: Tensor, memory: Tensor,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,) -> Tensor:
r"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequence from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
Shape:
see the docs in Transformer class.
"""
if self.add_layer_norm:
x = self.norm1(x + self._sa_block(x, tgt_mask))
x = self.norm2(x + self._mha_block(x, memory, memory_mask))
x = self.norm3(x + self._ff_block(x))
else:
x = x + self._sa_block(x, tgt_mask)
x = x + self._mha_block(x, memory, memory_mask)
x = x + self._ff_block(x)
return x
# self-attention block
def _sa_block(self, x: Tensor, attn_mask: Optional[Tensor]) -> Tensor:
x = self.self_attn(x, x, x, attn_mask=attn_mask,)[0]
return self.dropout1(x)
# multihead attention block
def _mha_block(self, x: Tensor, mem: Tensor, attn_mask: Optional[Tensor]) -> Tensor:
x = self.multihead_attn(x, mem, mem, mask=attn_mask,)[0]
return self.dropout2(x)
# feed forward block
def _ff_block(self, x: Tensor) -> Tensor:
x = self.ff(x)
return self.dropout(x)
class _TransformerEncoderLayer(nn.Module):
r"""
Taken from official torch implementation:
https://pytorch.org/docs/stable/_modules/torch/nn/modules/transformer.html#TransformerEncoderLayer
- add a layer norm switch
- add an attn_weight output switch
- batch first
batch_first has been added in PyTorch 1.9.0
https://github.com/pytorch/pytorch/pull/55285
"""
def __init__(self, d_model, nhead,
dim_feedforward=2048,
dropout=0.1,
layer_norm=True,
attn_weight=False,
):
super(_TransformerEncoderLayer, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.add_layer_norm = layer_norm
self.attn_weight = attn_weight
self.activation = nn.ReLU()
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super(_TransformerEncoderLayer, self).__setstate__(state)
def forward(self, src: Tensor,
pos: Optional[Tensor] = None,
weight: Optional[Tensor] = None,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Pass the input through the encoder layer.
Args (modified from torch):
src: the sequence to the encoder layer (required): (batch_size, seq_len, d_model)
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
Remark:
PyTorch official implementation: (seq_len, n_batch, d_model) as input
here we permute the first two dims as input
so in the first line the dim needs to be permuted then permuted back
"""
if pos is not None:
src = torch.cat([pos, src], dim=-1)
src = src.permute(1, 0, 2)
if (src_mask is None) or (src_key_padding_mask is None):
src2, attn_weight = self.self_attn(src, src, src)
else:
src2, attn_weight = self.self_attn(src, src, src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)
src = src + self.dropout1(src2)
if self.add_layer_norm:
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
if self.add_layer_norm:
src = self.norm2(src)
src = src.permute(1, 0, 2)
if self.attn_weight:
return src, attn_weight
else:
return src
class TransformerEncoderWrapper(nn.Module):
r"""TransformerEncoder is a stack of N encoder layers
Modified from pytorch official implementation
TransformerEncoder's input and output shapes follow
those of the encoder_layer fed into as this is essentially a wrapper
Args:
encoder_layer: an instance of the TransformerEncoderLayer() class (required).
num_layers: the number of sub-encoder-layers in the encoder (required).
norm: the layer normalization component (optional).
Examples::
>>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8)
>>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6)
>>> src = torch.rand(10, 32, 512)
>>> out = transformer_encoder(src)
"""
__constants__ = ['norm']
def __init__(self, encoder_layer, num_layers,
norm=None,):
super(TransformerEncoderWrapper, self).__init__()
self.layers = nn.ModuleList(
[copy.deepcopy(encoder_layer) for i in range(num_layers)])
self.num_layers = num_layers
self.norm = norm
def forward(self, src: Tensor,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None) -> Tensor:
r"""Pass the input through the encoder layers in turn.
Args:
src: the sequence to the encoder (required).
mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in Transformer class.
"""
output = src
for mod in self.layers:
output = mod(output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask)
if self.norm is not None:
output = self.norm(output)
return output
class GCN(nn.Module):
def __init__(self,
node_feats=4,
out_features=96,
num_gcn_layers=2,
edge_feats=6,
activation=True,
raw_laplacian=False,
dropout=0.1,
debug=False):
super(GCN, self).__init__()
'''
A simple GCN, a wrapper for Kipf and Weiling's code
learnable edge features similar to
Graph Transformer https://arxiv.org/abs/1911.06455
but using neighbor agg
'''
self.edge_learner = EdgeEncoder(out_dim=out_features,
edge_feats=edge_feats,
raw_laplacian=raw_laplacian
)
self.gcn_layer0 = GraphConvolution(in_features=node_feats, # hard coded
out_features=out_features,
debug=debug,
)
self.gcn_layers = nn.ModuleList([copy.deepcopy(GraphConvolution(
in_features=out_features, # hard coded
out_features=out_features,
debug=debug
)) for _ in range(1, num_gcn_layers)])
self.activation = activation
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.edge_feats = edge_feats
self.debug = debug
def forward(self, x, edge):
x = x.permute(0, 2, 1).contiguous()
edge = edge.permute([0, 3, 1, 2]).contiguous()
assert edge.size(1) == self.edge_feats
edge = self.edge_learner(edge)
out = self.gcn_layer0(x, edge)
for gc in self.gcn_layers[:-1]:
out = gc(out, edge)
if self.activation:
out = self.relu(out)
# last layer no activation
out = self.gcn_layers[-1](out, edge)
return out.permute(0, 2, 1)
class GAT(nn.Module):
def __init__(self,
node_feats=4,
out_features=96,
num_gcn_layers=2,
edge_feats=None,
activation=False,
debug=False):
super(GAT, self).__init__()
'''
A simple GAT: modified from the official implementation
'''
self.gat_layer0 = GraphAttention(in_features=node_feats,
out_features=out_features,
)
self.gat_layers = nn.ModuleList([copy.deepcopy(GraphAttention(
in_features=out_features,
out_features=out_features,
)) for _ in range(1, num_gcn_layers)])
self.activation = activation
self.relu = nn.ReLU()
self.debug = debug
def forward(self, x, edge):
'''
input: node feats (-1, seq_len, n_feats)
edge only takes adj (-1, seq_len, seq_len)
edge matrix first one in the last dim is graph Lap.
'''
edge = edge[..., 0].contiguous()
out = self.gat_layer0(x, edge)
for layer in self.gat_layers[:-1]:
out = layer(out, edge)
if self.activation:
out = self.relu(out)
# last layer no activation
return self.gat_layers[-1](out, edge)
class PointwiseRegressor(nn.Module):
def __init__(self, in_dim, # input dimension
n_hidden,
out_dim, # number of target dim
num_layers: int = 2,
spacial_fc: bool = False,
spacial_dim=1,
dropout=0.1,
activation='silu',
return_latent=False,
debug=False):
super(PointwiseRegressor, self).__init__()
'''
A wrapper for a simple pointwise linear layers
'''
dropout = default(dropout, 0.1)
self.spacial_fc = spacial_fc
activ = nn.SiLU() if activation == 'silu' else nn.ReLU()
if self.spacial_fc:
in_dim = in_dim + spacial_dim
self.fc = nn.Linear(in_dim, n_hidden)
self.ff = nn.ModuleList([nn.Sequential(
nn.Linear(n_hidden, n_hidden),
activ,
)])
for _ in range(num_layers - 1):
self.ff.append(nn.Sequential(
nn.Linear(n_hidden, n_hidden),
activ,
))
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(n_hidden, out_dim)
self.return_latent = return_latent
self.debug = debug
def forward(self, x, grid=None):
'''
2D:
Input: (-1, n, n, in_features)
Output: (-1, n, n, n_targets)
1D:
Input: (-1, n, in_features)
Output: (-1, n, n_targets)
'''
if self.spacial_fc:
x = torch.cat([x, grid], dim=-1)
x = self.fc(x)
for layer in self.ff:
x = layer(x)
x = self.dropout(x)
x = self.out(x)
if self.return_latent:
return x, None
else:
return x
class SpectralRegressor(nn.Module):
def __init__(self, in_dim,
n_hidden,
freq_dim,
out_dim,
modes: int,
num_spectral_layers: int = 2,
n_grid=None,
dim_feedforward=None,
spacial_fc=False,
spacial_dim=2,
return_freq=False,
return_latent=False,
normalizer=None,
activation='silu',
last_activation=True,
dropout=0.1,
debug=False):
super(SpectralRegressor, self).__init__()
'''
A wrapper for both SpectralConv1d and SpectralConv2d
Ref: Li et 2020 FNO paper
https://github.com/zongyi-li/fourier_neural_operator/blob/master/fourier_2d.py
A new implementation incoporating all spacial-based FNO
in_dim: input dimension, (either n_hidden or spacial dim)
n_hidden: number of hidden features out from attention to the fourier conv
'''
if spacial_dim == 2: # 2d, function + (x,y)
spectral_conv = SpectralConv2d
elif spacial_dim == 1: # 1d, function + x
spectral_conv = SpectralConv1d
else:
raise NotImplementedError("3D not implemented.")
activation = default(activation, 'silu')
self.activation = nn.SiLU() if activation == 'silu' else nn.ReLU()
dropout = default(dropout, 0.1)
self.spacial_fc = spacial_fc # False in Transformer
if self.spacial_fc:
self.fc = nn.Linear(in_dim + spacial_dim, n_hidden)
self.spectral_conv = nn.ModuleList([spectral_conv(in_dim=n_hidden,
out_dim=freq_dim,
n_grid=n_grid,
modes=modes,
dropout=dropout,
activation=activation,
return_freq=return_freq,
debug=debug)])
for _ in range(num_spectral_layers - 1):
self.spectral_conv.append(spectral_conv(in_dim=freq_dim,
out_dim=freq_dim,
n_grid=n_grid,
modes=modes,
dropout=dropout,
activation=activation,
return_freq=return_freq,
debug=debug))
if not last_activation:
self.spectral_conv[-1].activation = Identity()
self.n_grid = n_grid # dummy for debug
self.dim_feedforward = default(dim_feedforward, 2*spacial_dim*freq_dim)
self.regressor = nn.Sequential(
nn.Linear(freq_dim, self.dim_feedforward),
self.activation,
nn.Linear(self.dim_feedforward, out_dim),
)
self.normalizer = normalizer
self.return_freq = return_freq
self.return_latent = return_latent
self.debug = debug
def forward(self, x, edge=None, pos=None, grid=None):
'''
2D:
Input: (-1, n, n, in_features)
Output: (-1, n, n, n_targets)
1D:
Input: (-1, n, in_features)
Output: (-1, n, n_targets)
'''
x_latent = []
x_fts = []
if self.spacial_fc:
x = torch.cat([x, grid], dim=-1)
x = self.fc(x)
for layer in self.spectral_conv:
if self.return_freq:
x, x_ft = layer(x)
x_fts.append(x_ft.contiguous())
else:
x = layer(x)
if self.return_latent:
x_latent.append(x.contiguous())
x = self.regressor(x)
if self.normalizer:
x = self.normalizer.inverse_transform(x)
if self.return_freq or self.return_latent:
return x, dict(preds_freq=x_fts, preds_latent=x_latent)
else:
return x
class DownScaler(nn.Module):
def __init__(self, in_dim,
out_dim,
dropout=0.1,
padding=5,
downsample_mode='conv',
activation_type='silu',
interp_size=None,
debug=False):
super(DownScaler, self).__init__()
'''
A wrapper for conv2d/interp downscaler
'''
if downsample_mode == 'conv':
self.downsample = nn.Sequential(Conv2dEncoder(in_dim=in_dim,
out_dim=out_dim,
activation_type=activation_type,
debug=debug),
Conv2dEncoder(in_dim=out_dim,
out_dim=out_dim,
padding=padding,
activation_type=activation_type,
debug=debug))
elif downsample_mode == 'interp':
self.downsample = Interp2dEncoder(in_dim=in_dim,
out_dim=out_dim,
interp_size=interp_size,
activation_type=activation_type,
dropout=dropout,
debug=debug)
else:
raise NotImplementedError("downsample mode not implemented.")
self.in_dim = in_dim
self.out_dim = out_dim
def forward(self, x):
'''
2D:
Input: (-1, n, n, in_dim)
Output: (-1, n_s, n_s, out_dim)
'''
n_grid = x.size(1)
bsz = x.size(0)
x = x.view(bsz, n_grid, n_grid, self.in_dim)
x = x.permute(0, 3, 1, 2)
x = self.downsample(x)
x = x.permute(0, 2, 3, 1)
return x
class UpScaler(nn.Module):
def __init__(self, in_dim: int,
out_dim: int,
hidden_dim=None,
padding=2,
output_padding=0,
dropout=0.1,
upsample_mode='conv',
activation_type='silu',
interp_mode='bilinear',
interp_size=None,
debug=False):
super(UpScaler, self).__init__()
'''
A wrapper for DeConv2d upscaler or interpolation upscaler
Deconv: Conv1dTranspose
Interp: interp->conv->interp
'''
hidden_dim = default(hidden_dim, in_dim)
if upsample_mode in ['conv', 'deconv']:
self.upsample = nn.Sequential(
DeConv2dBlock(in_dim=in_dim,
out_dim=out_dim,
hidden_dim=hidden_dim,
padding=padding,
output_padding=output_padding,
dropout=dropout,
activation_type=activation_type,
debug=debug),
DeConv2dBlock(in_dim=in_dim,
out_dim=out_dim,
hidden_dim=hidden_dim,
padding=padding*2,
output_padding=output_padding,
dropout=dropout,
activation_type=activation_type,
debug=debug))
elif upsample_mode == 'interp':
self.upsample = Interp2dUpsample(in_dim=in_dim,
out_dim=out_dim,
interp_mode=interp_mode,
interp_size=interp_size,
dropout=dropout,
activation_type=activation_type,
debug=debug)
else:
raise NotImplementedError("upsample mode not implemented.")
self.in_dim = in_dim
self.out_dim = out_dim
def forward(self, x):
'''
2D:
Input: (-1, n_s, n_s, in_dim)
Output: (-1, n, n, out_dim)
'''
x = x.permute(0, 3, 1, 2)
x = self.upsample(x)
x = x.permute(0, 2, 3, 1)
return x
class SimpleTransformer(nn.Module):
def __init__(self, **kwargs):
super(SimpleTransformer, self).__init__()
self.config = defaultdict(lambda: None, **kwargs)
self._get_setting()
self._initialize()
self.__name__ = self.attention_type.capitalize() + 'Transformer'
def forward(self, node, edge, pos, grid=None, weight=None):
'''
seq_len: n, number of grid points
node_feats: number of features of the inputs
edge_feats: number of Laplacian matrices (including learned)
pos_dim: dimension of the Euclidean space
- node: (batch_size, seq_len, node_feats)
- pos: (batch_size, seq_len, pos_dim)
- edge: (batch_size, seq_len, seq_len, edge_feats)
- weight: (batch_size, seq_len, seq_len): mass matrix prefered
or (batch_size, seq_len) when mass matrices are not provided
Remark:
for classic Transformer: pos_dim = n_hidden = 512
pos encodings is added to the latent representation
'''
x_latent = []
attn_weights = []
x = self.feat_extract(node, edge)
if self.spacial_residual or self.return_latent:
res = x.contiguous()
x_latent.append(res)
for encoder in self.encoder_layers:
if self.return_attn_weight:
x, attn_weight = encoder(x, pos, weight)
attn_weights.append(attn_weight)
else:
x = encoder(x, pos, weight)
if self.return_latent:
x_latent.append(x.contiguous())
if self.spacial_residual:
x = res + x
x_freq = self.freq_regressor(
x)[:, :self.pred_len, :] if self.n_freq_targets > 0 else None
x = self.dpo(x)
x = self.regressor(x, grid=grid)
return dict(preds=x,
preds_freq=x_freq,
preds_latent=x_latent,
attn_weights=attn_weights)
def _initialize(self):
self._get_feature()
self._get_encoder()
if self.n_freq_targets > 0:
self._get_freq_regressor()
self._get_regressor()
if self.decoder_type in ['pointwise', 'convolution']:
self._initialize_layer(self.regressor)
self.config = dict(self.config)
@staticmethod
def _initialize_layer(layer, gain=1e-2):
for param in layer.parameters():
if param.ndim > 1:
xavier_uniform_(param, gain=gain)
else:
constant_(param, 0)
def _get_setting(self):
all_attr = list(self.config.keys()) + ADDITIONAL_ATTR
for key in all_attr:
setattr(self, key, self.config[key])
self.dim_feedforward = default(self.dim_feedforward, 2*self.n_hidden)
self.spacial_dim = default(self.spacial_dim, self.pos_dim)
self.spacial_fc = default(self.spacial_fc, False)
self.dropout = default(self.dropout, 0.05)
self.dpo = nn.Dropout(self.dropout)
if self.decoder_type == 'attention':
self.num_encoder_layers += 1
self.attention_types = ['fourier', 'integral',
'cosine', 'galerkin', 'linear', 'softmax']
def _get_feature(self):
if self.num_feat_layers > 0 and self.feat_extract_type == 'gcn':
self.feat_extract = GCN(node_feats=self.node_feats,
edge_feats=self.edge_feats,
num_gcn_layers=self.num_feat_layers,
out_features=self.n_hidden,
activation=self.graph_activation,
raw_laplacian=self.raw_laplacian,
debug=self.debug,
)
elif self.num_feat_layers > 0 and self.feat_extract_type == 'gat':
self.feat_extract = GAT(node_feats=self.node_feats,
out_features=self.n_hidden,
num_gcn_layers=self.num_feat_layers,
activation=self.graph_activation,
debug=self.debug,
)
else:
self.feat_extract = Identity(in_features=self.node_feats,
out_features=self.n_hidden)
def _get_encoder(self):
if self.attention_type in self.attention_types:
encoder_layer = SimpleTransformerEncoderLayer(d_model=self.n_hidden,
n_head=self.n_head,
attention_type=self.attention_type,
dim_feedforward=self.dim_feedforward,
layer_norm=self.layer_norm,
attn_norm=self.attn_norm,
norm_type=self.norm_type,
batch_norm=self.batch_norm,
pos_dim=self.pos_dim,
xavier_init=self.xavier_init,
diagonal_weight=self.diagonal_weight,
symmetric_init=self.symmetric_init,
attn_weight=self.return_attn_weight,
residual_type=self.residual_type,
activation_type=self.attn_activation,
dropout=self.encoder_dropout,
ffn_dropout=self.ffn_dropout,
debug=self.debug)
else:
encoder_layer = _TransformerEncoderLayer(d_model=self.n_hidden,
nhead=self.n_head,
dim_feedforward=self.dim_feedforward,
layer_norm=self.layer_norm,
attn_weight=self.return_attn_weight,
dropout=self.encoder_dropout
)
self.encoder_layers = nn.ModuleList(
[copy.deepcopy(encoder_layer) for _ in range(self.num_encoder_layers)])
def _get_freq_regressor(self):
if self.bulk_regression:
self.freq_regressor = BulkRegressor(in_dim=self.seq_len,
n_feats=self.n_hidden,
n_targets=self.n_freq_targets,
pred_len=self.pred_len)
else:
self.freq_regressor = nn.Sequential(
nn.Linear(self.n_hidden, self.n_hidden),
nn.ReLU(),
nn.Linear(self.n_hidden, self.n_freq_targets),
)
def _get_regressor(self):
if self.decoder_type == 'pointwise':
self.regressor = PointwiseRegressor(in_dim=self.n_hidden,
n_hidden=self.n_hidden,
out_dim=self.n_targets,
spacial_fc=self.spacial_fc,
spacial_dim=self.spacial_dim,
activation=self.regressor_activation,
dropout=self.decoder_dropout,
debug=self.debug)
elif self.decoder_type == 'ifft':
self.regressor = SpectralRegressor(in_dim=self.n_hidden,
n_hidden=self.n_hidden,
freq_dim=self.freq_dim,
out_dim=self.n_targets,
num_spectral_layers=self.num_regressor_layers,
modes=self.fourier_modes,
spacial_dim=self.spacial_dim,
spacial_fc=self.spacial_fc,
dim_feedforward=self.freq_dim,
activation=self.regressor_activation,
dropout=self.decoder_dropout,
)
else:
raise NotImplementedError("Decoder type not implemented")
def get_graph(self):
return self.gragh
def get_encoder(self):
return self.encoder_layers
class FourierTransformer2D(nn.Module):
def __init__(self, **kwargs):
super(FourierTransformer2D, self).__init__()
self.config = defaultdict(lambda: None, **kwargs)
self._get_setting()
self._initialize()
self.__name__ = self.attention_type.capitalize() + 'Transformer2D'
def forward(self, node, edge, pos, grid, weight=None, boundary_value=None):
'''
- node: (batch_size, n, n, node_feats)
- pos: (batch_size, n_s*n_s, pos_dim)
- edge: (batch_size, n_s*n_s, n_s*n_s, edge_feats)
- weight: (batch_size, n_s*n_s, n_s*n_s): mass matrix prefered
or (batch_size, n_s*n_s) when mass matrices are not provided (lumped mass)
- grid: (batch_size, n-2, n-2, 2) excluding boundary
'''
bsz = node.size(0)
n_s = int(pos.size(1)**(0.5))
x_latent = []
attn_weights = []
if not self.downscaler_size:
node = torch.cat(
[node, pos.contiguous().view(bsz, n_s, n_s, -1)], dim=-1)
x = self.downscaler(node)
x = x.view(bsz, -1, self.n_hidden)
x = self.feat_extract(x, edge)
x = self.dpo(x)
for encoder in self.encoder_layers:
if self.return_attn_weight and self.attention_type != 'official':
x, attn_weight = encoder(x, pos, weight)
attn_weights.append(attn_weight)
elif self.attention_type != 'official':
x = encoder(x, pos, weight)
else:
out_dim = self.n_head*self.pos_dim + self.n_hidden
x = x.view(bsz, -1, self.n_head, self.n_hidden//self.n_head).transpose(1, 2)
x = torch.cat([pos.repeat([1, self.n_head, 1, 1]), x], dim=-1)
x = x.transpose(1, 2).contiguous().view(bsz, -1, out_dim)
x = encoder(x)
if self.return_latent:
x_latent.append(x.contiguous())
x = x.view(bsz, n_s, n_s, self.n_hidden)
x = self.upscaler(x)
if self.return_latent:
x_latent.append(x.contiguous())
x = self.dpo(x)
if self.return_latent:
x, xr_latent = self.regressor(x, grid=grid)
x_latent.append(xr_latent)
else:
x = self.regressor(x, grid=grid)
if self.normalizer:
x = self.normalizer.inverse_transform(x)
if self.boundary_condition == 'dirichlet':
x = x[:, 1:-1, 1:-1].contiguous()
x = F.pad(x, (0, 0, 1, 1, 1, 1), "constant", 0)
if boundary_value is not None:
assert x.size() == boundary_value.size()
x += boundary_value
return dict(preds=x,
preds_latent=x_latent,
attn_weights=attn_weights)
def _initialize(self):
self._get_feature()
self._get_scaler()
self._get_encoder()
self._get_regressor()
self.config = dict(self.config)
def cuda(self, device=None):
self = super().cuda(device)
if self.normalizer:
self.normalizer = self.normalizer.cuda(device)
return self
def cpu(self):
self = super().cpu()
if self.normalizer:
self.normalizer = self.normalizer.cpu()
return self
def to(self, *args, **kwargs):
self = super().to(*args, **kwargs)
if self.normalizer:
self.normalizer = self.normalizer.to(*args, **kwargs)
return self
def print_config(self):
for a in self.config.keys():
if not a.startswith('__'):
print(f"{a}: \t", getattr(self, a))
@staticmethod
def _initialize_layer(layer, gain=1e-2):
for param in layer.parameters():
if param.ndim > 1:
xavier_uniform_(param, gain=gain)
else:
constant_(param, 0)
@staticmethod
def _get_pos(pos, downsample):
'''
get the downscaled position in 2d
'''
bsz = pos.size(0)
n_grid = pos.size(1)
x, y = pos[..., 0], pos[..., 1]
x = x.view(bsz, n_grid, n_grid)
y = y.view(bsz, n_grid, n_grid)
x = x[:, ::downsample, ::downsample].contiguous()
y = y[:, ::downsample, ::downsample].contiguous()
return torch.stack([x, y], dim=-1)
def _get_setting(self):
all_attr = list(self.config.keys()) + ADDITIONAL_ATTR
for key in all_attr:
setattr(self, key, self.config[key])
self.dim_feedforward = default(self.dim_feedforward, 2*self.n_hidden)
self.dropout = default(self.dropout, 0.05)
self.dpo = nn.Dropout(self.dropout)
if self.decoder_type == 'attention':
self.num_encoder_layers += 1
self.attention_types = ['fourier', 'integral', 'local', 'global',
'cosine', 'galerkin', 'linear', 'softmax']
def _get_feature(self):
if self.feat_extract_type == 'gcn' and self.num_feat_layers > 0:
self.feat_extract = GCN(node_feats=self.n_hidden,
edge_feats=self.edge_feats,
num_gcn_layers=self.num_feat_layers,
out_features=self.n_hidden,
activation=self.graph_activation,
raw_laplacian=self.raw_laplacian,
debug=self.debug,
)
elif self.feat_extract_type == 'gat' and self.num_feat_layers > 0:
self.feat_extract = GAT(node_feats=self.n_hidden,
out_features=self.n_hidden,
num_gcn_layers=self.num_feat_layers,
activation=self.graph_activation,
debug=self.debug,
)
else:
self.feat_extract = Identity()
def _get_scaler(self):
if self.downscaler_size:
self.downscaler = DownScaler(in_dim=self.node_feats,
out_dim=self.n_hidden,
downsample_mode=self.downsample_mode,
interp_size=self.downscaler_size,
dropout=self.downscaler_dropout,
activation_type=self.downscaler_activation)
else:
self.downscaler = Identity(in_features=self.node_feats+self.spacial_dim,
out_features=self.n_hidden)
if self.upscaler_size:
self.upscaler = UpScaler(in_dim=self.n_hidden,
out_dim=self.n_hidden,
upsample_mode=self.upsample_mode,
interp_size=self.upscaler_size,
dropout=self.upscaler_dropout,
activation_type=self.upscaler_activation)
else:
self.upscaler = Identity()
def _get_encoder(self):
if self.attention_type in self.attention_types:
encoder_layer = SimpleTransformerEncoderLayer(d_model=self.n_hidden,
n_head=self.n_head,
attention_type=self.attention_type,
dim_feedforward=self.dim_feedforward,
layer_norm=self.layer_norm,
attn_norm=self.attn_norm,
batch_norm=self.batch_norm,
pos_dim=self.pos_dim,
xavier_init=self.xavier_init,
diagonal_weight=self.diagonal_weight,
symmetric_init=self.symmetric_init,
attn_weight=self.return_attn_weight,
dropout=self.encoder_dropout,
ffn_dropout=self.ffn_dropout,
norm_eps=self.norm_eps,
debug=self.debug)
elif self.attention_type == 'official':
encoder_layer = TransformerEncoderLayer(d_model=self.n_hidden+self.pos_dim*self.n_head,
nhead=self.n_head,
dim_feedforward=self.dim_feedforward,
dropout=self.encoder_dropout,
batch_first=True,
layer_norm_eps=self.norm_eps,
)
else:
raise NotImplementedError("encoder type not implemented.")
self.encoder_layers = nn.ModuleList(
[copy.deepcopy(encoder_layer) for _ in range(self.num_encoder_layers)])
def _get_regressor(self):
if self.decoder_type == 'pointwise':
self.regressor = PointwiseRegressor(in_dim=self.n_hidden,
n_hidden=self.n_hidden,
out_dim=self.n_targets,
num_layers=self.num_regressor_layers,
spacial_fc=self.spacial_fc,
spacial_dim=self.spacial_dim,
activation=self.regressor_activation,
dropout=self.decoder_dropout,
return_latent=self.return_latent,
debug=self.debug)
elif self.decoder_type == 'ifft2':
self.regressor = SpectralRegressor(in_dim=self.n_hidden,
n_hidden=self.freq_dim,
freq_dim=self.freq_dim,
out_dim=self.n_targets,
num_spectral_layers=self.num_regressor_layers,
modes=self.fourier_modes,
spacial_dim=self.spacial_dim,
spacial_fc=self.spacial_fc,
activation=self.regressor_activation,
last_activation=self.last_activation,
dropout=self.decoder_dropout,
return_latent=self.return_latent,
debug=self.debug
)
else:
raise NotImplementedError("Decoder type not implemented")
class FourierTransformer2DLite(nn.Module):
'''
A lite model of the Fourier/Galerkin Transformer
'''
def __init__(self, **kwargs):
super(FourierTransformer2DLite, self).__init__()
self.config = defaultdict(lambda: None, **kwargs)
self._get_setting()
self._initialize()
def forward(self, node, edge, pos, grid=None):
'''
seq_len: n, number of grid points
node_feats: number of features of the inputs
pos_dim: dimension of the Euclidean space
- node: (batch_size, n*n, node_feats)
- pos: (batch_size, n*n, pos_dim)
- grid: (batch_size, n, n, pos_dim)
Remark:
for classic Transformer: pos_dim = n_hidden = 512
pos encodings is added to the latent representation
'''
bsz = node.size(0)
input_dim = node.size(-1)
n_grid = grid.size(1)
node = torch.cat([node.view(bsz, -1, input_dim), pos],
dim=-1)
x = self.feat_extract(node, edge)
for encoder in self.encoder_layers:
x = encoder(x, pos)
x = self.dpo(x)
x = x.view(bsz, n_grid, n_grid, -1)
x = self.regressor(x, grid=grid)
return dict(preds=x,
preds_freq=None,
preds_latent=None,
attn_weights=None)
def _initialize(self):
self._get_feature()
self._get_encoder()
self._get_regressor()
self.config = dict(self.config)
def _get_setting(self):
all_attr = list(self.config.keys()) + ADDITIONAL_ATTR
for key in all_attr:
setattr(self, key, self.config[key])
self.dim_feedforward = default(self.dim_feedforward, 2*self.n_hidden)
self.spacial_dim = default(self.spacial_dim, self.pos_dim)
self.spacial_fc = default(self.spacial_fc, False)
self.dropout = default(self.dropout, 0.05)
self.dpo = nn.Dropout(self.dropout)
if self.decoder_type == 'attention':
self.num_encoder_layers += 1
self.attention_types = ['fourier', 'integral',
'cosine', 'galerkin', 'linear', 'softmax']
def _get_feature(self):
self.feat_extract = Identity(in_features=self.node_feats,
out_features=self.n_hidden)
def _get_encoder(self):
encoder_layer = SimpleTransformerEncoderLayer(d_model=self.n_hidden,
n_head=self.n_head,
dim_feedforward=self.dim_feedforward,
layer_norm=self.layer_norm,
attention_type=self.attention_type,
attn_norm=self.attn_norm,
norm_type=self.norm_type,
xavier_init=self.xavier_init,
diagonal_weight=self.diagonal_weight,
dropout=self.encoder_dropout,
ffn_dropout=self.ffn_dropout,
pos_dim=self.pos_dim,
debug=self.debug)
self.encoder_layers = nn.ModuleList(
[copy.deepcopy(encoder_layer) for _ in range(self.num_encoder_layers)])
def _get_regressor(self):
self.regressor = SpectralRegressor(in_dim=self.n_hidden,
n_hidden=self.n_hidden,
freq_dim=self.freq_dim,
out_dim=self.n_targets,
num_spectral_layers=self.num_regressor_layers,
modes=self.fourier_modes,
spacial_dim=self.spacial_dim,
spacial_fc=self.spacial_fc,
dim_feedforward=self.freq_dim,
activation=self.regressor_activation,
dropout=self.decoder_dropout,
)
if __name__ == '__main__':
for graph in ['gcn', 'gat']:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
config = defaultdict(lambda: None,
node_feats=1,
edge_feats=5,
pos_dim=1,
n_targets=1,
n_hidden=96,
num_feat_layers=2,
num_encoder_layers=2,
n_head=2,
pred_len=0,
n_freq_targets=0,
dim_feedforward=96*2,
feat_extract_type=graph,
graph_activation=True,
raw_laplacian=True,
attention_type='fourier', # no softmax
xavier_init=1e-4,
diagonal_weight=1e-2,
symmetric_init=False,
layer_norm=True,
attn_norm=False,
batch_norm=False,
spacial_residual=False,
return_attn_weight=True,
seq_len=None,
bulk_regression=False,
decoder_type='ifft',
freq_dim=64,
num_regressor_layers=2,
fourier_modes=16,
spacial_dim=1,
spacial_fc=True,
dropout=0.1,
debug=False,
)
ft = SimpleTransformer(**config)
ft.to(device)
batch_size, seq_len = 8, 512
summary(ft, input_size=[(batch_size, seq_len, 1),
(batch_size, seq_len, seq_len, 5),
(batch_size, seq_len, 1),
(batch_size, seq_len, 1)], device=device)
layer = TransformerEncoderLayer(d_model=128, nhead=4)
print(layer.__class__)
| StarcoderdataPython |
1936962 | import datasets
from typing import List, Optional, Union
def get_code_search_net_dataset(split: Optional[Union[str, List[str]]] = None, lang: str = 'all'):
dataset = datasets.load_dataset('code_search_net', split=split, name=lang)
return dataset
| StarcoderdataPython |
316992 | import numpy as np
from ..numpy_functions import np_func
from ..signatures import NUMPY_MA as NP_MA
np_ma = {
name: np_func(getattr(np.ma, name), "ma." + name, sigs)
for name, sigs in NP_MA.items()
}
| StarcoderdataPython |
8195148 | <gh_stars>10-100
import unittest
import tempfile
from shutil import rmtree
from os import path
from quikey.directories import AppDirectories
class AppDirectoriesTestCase(unittest.TestCase):
def setUp(self):
self.data = tempfile.mkdtemp()
self.config = tempfile.mkdtemp()
self.cache = tempfile.mkdtemp()
self.appDirs = AppDirectories(self.data, self.config, self.cache)
def tearDown(self):
rmtree(self.data)
rmtree(self.config)
rmtree(self.cache)
def testAppDirectories(self):
self.assertEqual(path.join(self.data, "quikey/"), self.appDirs.data)
self.assertEqual(path.join(self.config, "quikey/"), self.appDirs.config)
self.assertEqual(path.join(self.cache, "quikey/"), self.appDirs.cache)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3385666 | <gh_stars>0
# valueIterationAgents.py
# -----------------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by <NAME>
# (<EMAIL>) and <NAME> (<EMAIL>).
# Student side autograding was added by <NAME>, <NAME>, and
# <NAME> (<EMAIL>).
import mdp, util
from learningAgents import ValueEstimationAgent
class ValueIterationAgent(ValueEstimationAgent):
"""
* Please read learningAgents.py before reading this.*
A ValueIterationAgent takes a Markov decision process
(see mdp.py) on initialization and runs value iteration
for a given number of iterations using the supplied
discount factor.
"""
def __init__(self, mdp, discount = 0.9, iterations = 100):
"""
Your value iteration agent should take an mdp on
construction, run the indicated number of iterations
and then act according to the resulting policy.
Some useful mdp methods you will use:
mdp.getStates()
mdp.getPossibleActions(state)
mdp.getTransitionStatesAndProbs(state, action)
mdp.getReward(state, action, nextState)
mdp.isTerminal(state)
"""
self.mdp = mdp
self.discount = discount
self.iterations = iterations
self.values = util.Counter() # A Counter is a dict with default 0
# Write value iteration code here
"*** YOUR CODE HERE ***"
currentIteration = 0
while currentIteration < iterations:
lastValues = util.Counter()
for state in mdp.getStates():
if state != 'TERMINAL_STATE':
action = self.computeActionFromValues(state)
lastValues[state] = self.computeQValueFromValues(state, action)
self.values = lastValues
currentIteration = currentIteration + 1
def getValue(self, state):
"""
Return the value of the state (computed in __init__).
"""
return self.values[state]
def computeQValueFromValues(self, state, action):
"""
Compute the Q-value of action in state from the
value function stored in self.values.
"""
"*** YOUR CODE HERE ***"
totalTransitionValue = 0
for transition in self.mdp.getTransitionStatesAndProbs(state, action):
if transition[0] == 'TERMINAL_STATE':
transitionValue = self.mdp.getReward(state, action, transition[0]) * transition[1]
else:
transitionValue = self.getValue(transition[0]) * transition[1] * self.discount
totalTransitionValue = totalTransitionValue + transitionValue
return totalTransitionValue
def computeActionFromValues(self, state):
"""
The policy is the best action in the given state
according to the values currently stored in self.values.
You may break ties any way you see fit. Note that if
there are no legal actions, which is the case at the
terminal state, you should return None.
"""
"*** YOUR CODE HERE ***"
actions = dict()
if self.mdp.isTerminal(state):
return None
else:
for action in self.mdp.getPossibleActions(state):
actionValue = self.computeQValueFromValues(state, action)
actions[action] = actionValue
max_key = max(actions, key=actions.get)
return max_key
def getPolicy(self, state):
return self.computeActionFromValues(state)
def getAction(self, state):
"Returns the policy at the state (no exploration)."
return self.computeActionFromValues(state)
def getQValue(self, state, action):
return self.computeQValueFromValues(state, action) | StarcoderdataPython |
6497649 | import unittest
from sidemash_sdk.sum import sum
class TestSum(unittest.TestCase):
def test_list_int(self):
data = [1, 2, 3]
result = sum(data)
self.assertEqual(result, 6)
if __name__ = '__main__'
unittest.main()
| StarcoderdataPython |
3319532 | <gh_stars>0
import random
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
REGRESSION = LinearRegression()
def linear_reg_anim(x_values, y_values, time):
"""
Creates animated linear regression with randomly
created dataset.
Params:
x_values -> list:
Empty list where we add random items to x-plane.
y_values -> list:
Empty list where we add random items to y-plane.
Return:
Show animated linear regression with randomly created dataset.
"""
for item in range(1000):
plt.clf()
x_values.append(random.randint(0, 100))
y_values.append(random.randint(0, 100))
x = np.array(x_values)
x = x.reshape(-1, 1)
y = np.array(y_values)
y = y.reshape(-1, 1)
if item % 5 == 0:
REGRESSION.fit(x, y)
plt.xlim(0, 100)
plt.ylim(0, 100)
plt.scatter(x_values, y_values, color='black')
plt.plot(list(range(100)), REGRESSION.predict(np.array([x for x in range(100)]).reshape(-1, 1)))
plt.pause(time)
return plt.show()
if __name__ == '__main__':
speed = {
'fast': 0.0000000001,
'normal': 0.0001,
'slow': 0.1,
}
speed_input = str(input('Please, choose speed of the animation:\n'
'fast | normal | slow\n'))
x_data = []
y_data = []
linear_reg_anim(x_data, y_data, speed[speed_input])
| StarcoderdataPython |
8190829 | from dvc.command.base import CmdBase
from dvc.exceptions import DvcException
class CmdDestroy(CmdBase):
def run_cmd(self):
try:
msg = u'This will destroy all information about your pipelines, ' \
u'all data files, as well as cache in .dvc/cache.\n' \
u'Are you sure you want to continue?'
if not self.args.force \
and not self.project.prompt.prompt(msg, False):
msg = u'Cannot destroy without a confirmation from the ' \
u'user. Use \'-f\' to force.'
raise DvcException(msg)
self.project.destroy()
except Exception as exc:
self.project.logger.error('Failed to destroy DVC', exc)
return 1
return 0
| StarcoderdataPython |
9722563 | <filename>gwtarget/DESI_mainInjector/Main-Injector-master/python/insideDesFootprint.py
import numpy as np
import matplotlib.path
def insideFootprint (ra, dec) :
ix = ra > 180
ra[ix] = ra[ix]-360.
footprint = getFootprint()
ix = footprint.contains_points( zip(ra,dec) )
return ix
def getFootprint() :
ra, dec = getFootprintRaDec()
footprint = desPath(ra,dec)
return footprint
def getFootprintRaDec() :
import os
gw_data_dir = os.environ["DESGW_DATA_DIR"]
footFile = gw_data_dir + "round19_v0.txt"
#ra,dec = np.genfromtxt(footFile,unpack=True,skiprows=30)
ra,dec = np.genfromtxt(footFile,unpack=True,comments="#")
return ra,dec
def desPath(raDes, decDes) :
footprint = matplotlib.path.Path(zip(raDes, decDes))
return footprint
| StarcoderdataPython |
11304580 | import sys
sys.path.insert(0, '../')
import unittest
import lib.base as sinon
from lib.spy import SinonSpy
from lib.stub import SinonStub
from lib.sandbox import sinontest
"""
======================================================
FOR TEST ONLY START
======================================================
"""
# build-in module
import os
# customized class
class A_object(object):
# customized function
def A_func(self):
return "test_global_A_func"
# global function
def B_func(x=None):
if x:
return "test_local_B_func"+str(x)
return "test_local_B_func"
def C_func(a="a", b="b", c="c"):
return "test_local_C_func"
def D_func(err=False):
if err:
raise err
else:
return "test_local_D_func"
from TestClass import ForTestOnly
"""
======================================================
FOR TEST ONLY END
======================================================
"""
class TestSinonSandbox(unittest.TestCase):
def setUp(self):
sinon.init(globals())
@classmethod
@sinontest
def _spy_in_sinontest(self):
base1 = SinonSpy(ForTestOnly)
base2 = SinonSpy(D_func)
base3 = SinonSpy(A_object)
@classmethod
@sinontest
def _stub_in_sinontest(self):
base1 = SinonStub(ForTestOnly)
base2 = SinonStub(D_func)
base3 = SinonStub(A_object)
def test001_test_spy_in_sinontest(self):
base = SinonSpy()
self.assertEqual(len(base._queue), 1)
TestSinonSandbox._spy_in_sinontest()
self.assertEqual(len(base._queue), 1)
base.restore()
def test002_test_stub_in_sinontest(self):
base = SinonStub()
self.assertEqual(len(base._queue), 1)
TestSinonSandbox._stub_in_sinontest()
self.assertEqual(len(base._queue), 1)
base.restore()
| StarcoderdataPython |
3394129 | <gh_stars>0
from __future__ import absolute_import
import json
import six
import tempfile
from datetime import timedelta
from django.core import mail
from django.core.urlresolvers import reverse
from django.utils import timezone
from sentry.data_export.base import ExportQueryType, ExportStatus, DEFAULT_EXPIRATION
from sentry.data_export.models import ExportedData
from sentry.models import File
from sentry.testutils import TestCase
from sentry.utils.http import absolute_uri
from sentry.utils.compat.mock import patch
class ExportedDataTest(TestCase):
TEST_STRING = "A bunch of test data..."
def setUp(self):
super(ExportedDataTest, self).setUp()
self.user = self.create_user()
self.organization = self.create_organization()
self.data_export = ExportedData.objects.create(
user=self.user, organization=self.organization, query_type=0, query_info={"env": "test"}
)
self.file1 = File.objects.create(
name="tempfile-data-export", type="export.csv", headers={"Content-Type": "text/csv"}
)
self.file2 = File.objects.create(
name="tempfile-data-export", type="export.csv", headers={"Content-Type": "text/csv"}
)
def test_status_property(self):
assert self.data_export.status == ExportStatus.Early
self.data_export.update(
date_expired=timezone.now() + timedelta(weeks=2),
date_finished=timezone.now() - timedelta(weeks=2),
)
assert self.data_export.status == ExportStatus.Valid
self.data_export.update(date_expired=timezone.now() - timedelta(weeks=1))
assert self.data_export.status == ExportStatus.Expired
def test_payload_property(self):
assert isinstance(self.data_export.payload, dict)
keys = self.data_export.query_info.keys() + ["export_type"]
assert sorted(self.data_export.payload.keys()) == sorted(keys)
def test_file_name_property(self):
assert isinstance(self.data_export.file_name, six.string_types)
file_name = self.data_export.file_name
assert file_name.startswith(ExportQueryType.as_str(self.data_export.query_type))
assert file_name.endswith(six.text_type(self.data_export.id) + ".csv")
def test_format_date(self):
assert ExportedData.format_date(self.data_export.date_finished) is None
assert isinstance(ExportedData.format_date(self.data_export.date_added), six.binary_type)
def test_delete_file(self):
# Empty call should have no effect
assert self.data_export.file is None
self.data_export.delete_file()
assert self.data_export.file is None
# Real call should delete the file
assert File.objects.filter(id=self.file1.id).exists()
self.data_export.update(file=self.file1)
assert isinstance(self.data_export.file, File)
self.data_export.delete_file()
assert not File.objects.filter(id=self.file1.id).exists()
# The ExportedData should be unaffected
assert ExportedData.objects.filter(id=self.data_export.id).exists()
assert ExportedData.objects.get(id=self.data_export.id).file is None
def test_delete(self):
self.data_export.finalize_upload(file=self.file1)
assert ExportedData.objects.filter(id=self.data_export.id).exists()
assert File.objects.filter(id=self.file1.id).exists()
self.data_export.delete()
assert not ExportedData.objects.filter(id=self.data_export.id).exists()
assert not File.objects.filter(id=self.file1.id).exists()
def test_finalize_upload(self):
# With default expiration
with tempfile.TemporaryFile() as tf:
tf.write(self.TEST_STRING)
tf.seek(0)
self.file1.putfile(tf)
self.data_export.finalize_upload(file=self.file1)
assert self.data_export.file.getfile().read() == self.TEST_STRING
assert self.data_export.date_finished is not None
assert self.data_export.date_expired is not None
assert self.data_export.date_expired == self.data_export.date_finished + DEFAULT_EXPIRATION
# With custom expiration
with tempfile.TemporaryFile() as tf:
tf.write(self.TEST_STRING + self.TEST_STRING)
tf.seek(0)
self.file2.putfile(tf)
self.data_export.finalize_upload(file=self.file2, expiration=timedelta(weeks=2))
assert self.data_export.file.getfile().read() == self.TEST_STRING + self.TEST_STRING
# Ensure the first file is deleted
assert not File.objects.filter(id=self.file1.id).exists()
assert self.data_export.date_expired == self.data_export.date_finished + timedelta(weeks=2)
def test_email_success(self):
# Shouldn't send if ExportedData is incomplete
with self.tasks():
self.data_export.email_success()
assert len(mail.outbox) == 0
# Should send one email if complete
self.data_export.finalize_upload(file=self.file1)
with self.tasks():
self.data_export.email_success()
assert len(mail.outbox) == 1
@patch("sentry.utils.email.MessageBuilder")
def test_email_success_content(self, builder):
self.data_export.finalize_upload(file=self.file1)
with self.tasks():
self.data_export.email_success()
expected_url = absolute_uri(
reverse(
"sentry-data-export-details", args=[self.organization.slug, self.data_export.id]
)
)
expected_email_args = {
"subject": "Your data is ready.",
"context": {
"url": expected_url,
"expiration": ExportedData.format_date(date=self.data_export.date_expired),
},
"type": "organization.export-data",
"template": "sentry/emails/data-export-success.txt",
"html_template": "sentry/emails/data-export-success.html",
}
builder.assert_called_with(**expected_email_args)
def test_email_failure(self):
with self.tasks():
self.data_export.email_failure(self.TEST_STRING)
assert len(mail.outbox) == 1
assert not ExportedData.objects.filter(id=self.data_export.id).exists()
@patch("sentry.utils.email.MessageBuilder")
def test_email_failure_content(self, builder):
with self.tasks():
self.data_export.email_failure(self.TEST_STRING)
expected_email_args = {
"subject": "We couldn't export your data.",
"context": {
"creation": ExportedData.format_date(date=self.data_export.date_added),
"error_message": self.TEST_STRING,
"payload": json.dumps(self.data_export.payload, indent=2, sort_keys=True),
},
"type": "organization.export-data",
"template": "sentry/emails/data-export-failure.txt",
"html_template": "sentry/emails/data-export-failure.html",
}
builder.assert_called_with(**expected_email_args)
| StarcoderdataPython |
1840355 | import os
from django.conf import settings
from django.core.management.base import BaseCommand
from oldp.apps.cases.processing.case_processor import CaseProcessor, CaseInputHandlerFS, CaseInputHandlerDB
class Command(BaseCommand):
help = 'Processes cases from FS or DB with different processing steps (extract refs, ...)'
indexer = CaseProcessor()
def add_arguments(self, parser):
self.indexer.set_parser_arguments(parser)
CaseInputHandlerDB.set_parser_arguments(parser)
parser.add_argument('--input', nargs='+', type=str, default=os.path.join(settings.BASE_DIR, 'workingdir', 'cases'))
parser.add_argument('--input-handler', type=str, default='db',
help='Read input from this source (file system or database)', choices=['db', 'fs'])
parser.add_argument('--max-lines', type=int, default=-1)
parser.add_argument('--source', type=str, default='serializer',
help='When reading from FS process files differently (serializer)')
parser.add_argument('--empty', action='store_true', default=False, help='Empty existing index')
def handle(self, *args, **options):
self.indexer.set_options(options)
# Define input
if options['input_handler'] == 'fs':
if options['source'] == 'serializer':
handler = CaseInputHandlerFS(limit=options['limit'], start=options['start'], selector=options['input'])
else:
raise ValueError('Mode not supported. Use openjur or serializer.')
elif options['input_handler'] == 'db':
handler = CaseInputHandlerDB(
limit=options['limit'],
start=options['start'],
filter_qs=options['filter'],
exclude_qs=options['exclude'],
order_by=options['order_by'],
per_page=options['per_page'],
)
else:
raise ValueError('Unsupported input handler: %s' % options['input_handler'])
self.indexer.set_input_handler(handler)
# Prepare processing steps
self.indexer.set_processing_steps(options['step'])
if options['empty']:
self.indexer.empty_content()
# Do processing
self.indexer.process()
self.indexer.log_stats()
| StarcoderdataPython |
6506316 | #!/usr/bin/env python
# encoding: utf-8
# ----------------------------------------------------------------------------
from django.conf import settings as django_settings
from django.core import mail
from django_mailer import models, constants, queue_email_message
from base import MailerTestCase
class TestBackend(MailerTestCase):
"""
Backend tests for the django_mailer app.
For Django versions less than 1.2, these tests are still run but they just
use the queue_email_message funciton rather than directly sending messages.
"""
def setUp(self):
super(TestBackend, self).setUp()
if constants.EMAIL_BACKEND_SUPPORT:
if hasattr(django_settings, 'EMAIL_BACKEND'):
self.old_email_backend = django_settings.EMAIL_BACKEND
else:
self.old_email_backend = None
django_settings.EMAIL_BACKEND = 'django_mailer.smtp_queue.'\
'EmailBackend'
def tearDown(self):
super(TestBackend, self).tearDown()
if constants.EMAIL_BACKEND_SUPPORT:
if self.old_email_backend:
django_settings.EMAIL_BACKEND = self.old_email_backend
else:
delattr(django_settings, 'EMAIL_BACKEND')
def send_message(self, msg):
if constants.EMAIL_BACKEND_SUPPORT:
msg.send()
else:
queue_email_message(msg)
def testQueuedMessagePriorities(self):
# high priority message
msg = mail.EmailMessage(subject='subject', body='body',
from_email='<EMAIL>', to=['<EMAIL>'],
headers={'X-Mail-Queue-Priority': 'high'})
self.send_message(msg)
# low priority message
msg = mail.EmailMessage(subject='subject', body='body',
from_email='<EMAIL>', to=['<EMAIL>'],
headers={'X-Mail-Queue-Priority': 'low'})
self.send_message(msg)
# normal priority message
msg = mail.EmailMessage(subject='subject', body='body',
from_email='<EMAIL>', to=['<EMAIL>'],
headers={'X-Mail-Queue-Priority': 'normal'})
self.send_message(msg)
# normal priority message (no explicit priority header)
msg = mail.EmailMessage(subject='subject', body='body',
from_email='<EMAIL>', to=['<EMAIL>'])
self.send_message(msg)
qs = models.QueuedMessage.objects.high_priority()
self.assertEqual(qs.count(), 1)
queued_message = qs[0]
self.assertEqual(queued_message.priority, constants.PRIORITY_HIGH)
qs = models.QueuedMessage.objects.low_priority()
self.assertEqual(qs.count(), 1)
queued_message = qs[0]
self.assertEqual(queued_message.priority, constants.PRIORITY_LOW)
qs = models.QueuedMessage.objects.normal_priority()
self.assertEqual(qs.count(), 2)
for queued_message in qs:
self.assertEqual(queued_message.priority,
constants.PRIORITY_NORMAL)
def testUnicodeQueuedMessage(self):
"""
Checks that we capture unicode errors on mail
"""
from django.core.management import call_command
msg = mail.EmailMessage(subject='subject', body='body',
from_email=u'<EMAIL>', to=['<EMAIL>'],
headers={'X-Mail-Queue-Priority': 'normal'})
self.send_message(msg)
queued_messages = models.QueuedMessage.objects.all()
self.assertEqual(queued_messages.count(), 1)
call_command('send_mail', verbosity='0')
num_errors = models.Log.objects.filter(result=constants.RESULT_FAILED).count()
self.assertEqual(num_errors, 1)
def testUnicodePriorityMessage(self):
"""
Checks that we capture unicode errors on mail on priority.
It's hard to check as by definiton priority email does not Logs its
contents.
"""
from django.core.management import call_command
msg = mail.EmailMessage(subject=u'á subject', body='body',
from_email=u'<EMAIL>', to=[u'<EMAIL>'],
headers={'X-Mail-Queue-Priority': 'now'})
self.send_message(msg)
queued_messages = models.QueuedMessage.objects.all()
self.assertEqual(queued_messages.count(), 0)
call_command('send_mail', verbosity='0')
num_errors = models.Log.objects.filter(result=constants.RESULT_FAILED).count()
self.assertEqual(num_errors, 0)
def testSendMessageNowPriority(self):
# NOW priority message
msg = mail.EmailMessage(subject='subject', body='body',
from_email='<EMAIL>', to=['<EMAIL>'],
headers={'X-Mail-Queue-Priority': 'now'})
self.send_message(msg)
queued_messages = models.QueuedMessage.objects.all()
self.assertEqual(queued_messages.count(), 0)
| StarcoderdataPython |
294287 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import beanmachine.ppl as bm
import gpytorch.likelihoods as likelihoods
import torch
from beanmachine.ppl.model.rv_identifier import RVIdentifier
class GpytorchMixin(torch.nn.Module):
"""
Wrapper that registers the ``forward()`` call of GPyTorch likelihoods
with Bean Machine. Priors for likelihood parameters can be registered
similarly, as with kernels::
from bm.experimental.likelihoods import GaussianLikelihood
@bm.random_variable
def noise_prior():
return dist.Uniform(torch.tensor(0.01), torch.tensor(1.))
likelihood = GaussianLikelihood(noise_prior=noise_prior)
gp = SimpleGP(...)
gp_prior = partial(gp, train_x) # bind gp prior to train data
obs = {likelihood(gp_prior): train_y}
samples = nuts.infer([noise_prior()], obs, num_samples=100)
"""
def _validate_args(self, prior):
assert isinstance(prior(), RVIdentifier)
"Prior should be None or a random variable but was: {}".format(type(prior))
def __init__(self, *args, **kwargs):
self.priors = {}
for k, v in kwargs.copy().items():
if "prior" not in k:
continue
self._validate_args(v)
self.priors[k] = v
# remove the prior for GPytorch
kwargs.pop(k)
super().__init__(*args, **kwargs)
def __call__(self, prior_sample, *args, **kwargs):
"""
In train mode, returns a sample from the likelihood given a `bm.random_variable`
wrapped function. In eval mode, generates a marginal predictive sample.
See `~gpytorch.likelihoods.Likelihood`.
:param prior_sample: In train mode, a BM random variable. In eval mode, a
`~gpytorch.distributions.MultivariateNormal` object.
"""
if self.training:
return self._bm_forward(prior_sample)
return super().__call__(prior_sample, *args, **kwargs)
@bm.random_variable
def _bm_forward(self, prior_sample):
return super().__call__(prior_sample())
def train(self, mode=True):
"""
In `train()` mode, parameters and the forward method are
lifted to BM random variables.
In `eval()` mode, this acts as a Gpytorch likelihood, ie
all methods conform to the parent class's function signatures.
"""
if mode:
self._strict(True)
if hasattr(self, "_priors"):
self.priors = self._priors
super().train()
else:
self._strict(False)
self._priors = copy.deepcopy(self.priors)
self.priors = {}
super().train(False)
@property
def noise(self):
if "noise_prior" in self.priors:
return self.priors["noise_prior"]()
return super().noise
@noise.setter
def noise(self, val):
self.noise_covar.initialize(noise=val)
@property
def mixing_weights(self):
if "mixing_weights_prior" in self.priors:
return self.priors["mixing_weights_prior"]()
return super().mixing_weights
@property
def scale(self):
if "scale_prior" in self.priors:
return self.priors["scale_prior"]()
return super().scale
@scale.setter
def scale(self, value):
self.initialize(raw_scale=self.raw_scale_constraint.inverse_transform(value))
@property
def task_noise_covar_factor(self):
if "task_prior" in self.priors:
return self.priors["task_prior"]()
return super().task_noise_covar_factor
@property
def deg_free(self):
if "deg_free_prior" in self.priors:
return self.priors["deg_free_prior"]()
return super().deg_free
@deg_free.setter
def deg_free(self, value):
self._set_deg_free(value)
all_likelihoods = []
# Wrap all the likelihoods from GPytorch
for name, likelihood in likelihoods.__dict__.items():
if not isinstance(likelihood, type):
continue
if not issubclass(likelihood, likelihoods.Likelihood):
continue
all_likelihoods.append(name)
bm_likelihood = type(name, (GpytorchMixin, likelihood), {})
bm_likelihood.__module__ = __name__
locals()[name] = bm_likelihood
| StarcoderdataPython |
335 | <reponame>nirdslab/streaminghub
#!/usr/bin/env python3
import glob
import os
import pandas as pd
import dfs
SRC_DIR = f"{dfs.get_data_dir()}/adhd_sin_orig"
OUT_DIR = f"{dfs.get_data_dir()}/adhd_sin"
if __name__ == '__main__':
files = glob.glob(f"{SRC_DIR}/*.csv")
file_names = list(map(os.path.basename, files))
for file_name in file_names:
df: pd.DataFrame = pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[
['GazePointX (ADCSpx)', 'GazePointY (ADCSpx)', 'PupilLeft', 'PupilRight']].reset_index()
df.columns = ['t', 'x', 'y', 'dl', 'dr']
# fill blanks (order=interpolate(inter)->bfill+ffill(edges))->zerofill
df = df.apply(lambda x: x.interpolate().fillna(method="bfill").fillna(method="ffill")).fillna(0)
df['x'] = df['x'] / 1920
df['y'] = df['y'] / 1080
df['d'] = (df['dl'] + df['dr']) / 2
# start with t=0, and set unit to ms
df['t'] = (df['t'] - df['t'].min()) / 1000
df = df[['t', 'x', 'y', 'd']].round(6).set_index('t')
df.to_csv(f'{OUT_DIR}/{file_name}')
print(f'Processed: {file_name}')
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.