id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6533912 | <reponame>EchoRickZz/DeepST<filename>deepst/utils/viewRetFromPkl.py
#!/usr/bin/env python
# encoding: utf-8
import sys
import cPickle as pickle
def view(fname):
pkl = pickle.load(open(fname, 'rb'))
for ke in pkl.keys():
print ('=' * 10)
print (ke)
print (pkl[ke])
view(sys.argv[1])
| StarcoderdataPython |
23932 | <reponame>JakubBatel/Attendance-Recorder<filename>src/attendance/card_reader.py
from .resources.config import config
from .utils import reverse_endianness
from abc import ABC
from abc import abstractmethod
from logging import getLogger
from logging import Logger
from time import sleep
from typing import Final
import re
import serial
class ICardReader(ABC):
"""Class representation of card reader."""
@abstractmethod
def read_card(self, raise_if_no_data: bool = False) -> str:
"""Read one card and returns the data as a hex string.
Args:
raise_if_no_data: If True the NoDataException is raised if no data are present.
Returns:
Hex string representation of the data.
Raises:
NoDataException: If raise_if_no_data is set to True and no data was read.
"""
pass
class InvalidDataException(Exception):
"""Exception used when card data are not valid."""
def __init__(self, message):
"""Init exception with message.
Args:
message: Error message.
"""
super().__init__(message)
class NoDataException(Exception):
"""Exception used when no card data was read."""
def __init__(self, message):
"""Init exception with message.
Args:
message: Error message.
"""
super().__init__(message)
class CardReader(ICardReader):
"""Class representation of physical card reader.
It reads data from physical card reader using serial communication.
It is configured using config file (config.ini in resources folder).
"""
INIT_BYTE: Final = b'\x02'
CARD_SIZE: Final = 10
PORT: Final = config['CardReader']['devPath']
BAUDRATE: Final = int(config['CardReader']['baudrate'])
PARITY: Final = getattr(serial, config['CardReader']['parity'])
STOPBITS: Final = getattr(serial, config['CardReader']['stopbits'])
BYTESIZE: Final = getattr(serial, config['CardReader']['bytesize'])
TIMEOUT: Final = float(config['CardReader']['timeout'])
CARD_REGEX: Final = re.compile('^[0-9a-f]{10}$')
def __init__(self):
"""Init logger and create new Serial object for serial communication based on configuration."""
self.logger: Logger = getLogger(__name__)
self._port = serial.Serial(
port=CardReader.PORT,
baudrate=CardReader.BAUDRATE,
parity=CardReader.PARITY,
stopbits=CardReader.STOPBITS,
bytesize=CardReader.BYTESIZE,
timeout=CardReader.TIMEOUT
)
def read_card(self, raise_if_no_data: bool = False) -> str:
"""Read one card using serial communication.
This method ends only when some data are red or until times out.
If no card data are present operation is retried 0.5 second later.
Args:
raise_if_no_data: If true the NoDataException is raised if no data are present.
Returns:
Hex string representation of card data.
Raises:
NoDataException: If raise_if_no_data is set to True and no data was read.
InvalidDataException: If card data are corrupted.
"""
while True:
byte = self._port.read()
if byte == b'':
self.logger.debug('No card data.')
if raise_if_no_data:
raise NoDataException('No card data was read.')
else:
sleep(0.5)
continue
if byte != CardReader.INIT_BYTE:
self.logger.debug('Invalid initial sequence.')
continue
data = self._port.read(CardReader.CARD_SIZE)
card: str = reverse_endianness(data.decode('ascii'))
if not CardReader.CARD_REGEX.match(card):
self.logger.debug('Incomplete or corrupted data.')
raise InvalidDataException(
'Card data are invalid - incomplete or corrupted data.')
self.logger.info(card + ' was read')
while self._port.read() != b'':
continue # consume all residual data
return card
| StarcoderdataPython |
229097 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2019 <NAME> <<EMAIL>>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
NUM_OUTPUT_CHANNELS = 8
# TODO: Other I²C functions: general call / reset address, device ID address.
class Decoder(srd.Decoder):
api_version = 3
id = 'pca9571'
name = 'PCA9571'
longname = 'NXP PCA9571'
desc = 'NXP PCA9571 8-bit I²C output expander.'
license = 'gplv2+'
inputs = ['i2c']
outputs = []
tags = ['Embedded/industrial', 'IC']
annotations = (
('register', 'Register type'),
('value', 'Register value'),
('warning', 'Warning'),
)
annotation_rows = (
('regs', 'Registers', (0, 1)),
('warnings', 'Warnings', (2,)),
)
def __init__(self):
self.reset()
def reset(self):
self.state = 'IDLE'
self.last_write = 0xFF # Chip port default state is high.
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def putx(self, data):
self.put(self.ss, self.es, self.out_ann, data)
def handle_io(self, b):
if self.state == 'READ DATA':
operation = ['Outputs read', 'R']
if b != self.last_write:
self.putx([2, ['Warning: read value and last write value '
'(%02X) are different' % self.last_write]])
else:
operation = ['Outputs set', 'W']
self.last_write = b
self.putx([1, [operation[0] + ': %02X' % b,
operation[1] + ': %02X' % b]])
def check_correct_chip(self, addr):
if addr != 0x25:
self.putx([2, ['Warning: I²C slave 0x%02X not a PCA9571 '
'compatible chip.' % addr]])
return False
return True
def decode(self, ss, es, data):
cmd, databyte = data
self.ss, self.es = ss, es
# State machine.
if cmd in ('ACK', 'BITS'): # Discard 'ACK' and 'BITS'.
pass
elif cmd in ('START', 'START REPEAT'): # Start a communication.
self.state = 'GET SLAVE ADDR'
elif cmd in ('NACK', 'STOP'): # Reset the state machine.
self.state = 'IDLE'
elif cmd in ('ADDRESS READ', 'ADDRESS WRITE'):
if ((self.state == 'GET SLAVE ADDR') and
self.check_correct_chip(databyte)):
if cmd == 'ADDRESS READ':
self.state = 'READ DATA'
else:
self.state = 'WRITE DATA'
else:
self.state = 'IDLE'
elif cmd in ('DATA READ', 'DATA WRITE'):
if self.state in ('READ DATA', 'WRITE DATA'):
self.handle_io(databyte)
else:
self.state = 'IDLE'
| StarcoderdataPython |
8198804 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 7 10:31:00 2016
@author: roel
"""
from opengrid_dev import ureg
import pandas as pd
from dateutil import rrule
import datetime as dt
from itertools import groupby, count
import pytz
def parse_date(d):
"""
Return a pandas.Timestamp if possible.
Parameters
----------
d : Datetime, float, int, string or pandas Timestamp
Anything that can be parsed into a pandas.Timestamp
Returns
-------
pts : pandas.Timestamp
Raises
------
ValueError if it was not possible to create a pandas.Timestamp
"""
if isinstance(d, float) or isinstance(d, int):
# we have a POSIX timestamp IN SECONDS.
pts = pd.Timestamp(d, unit='s')
return pts
try:
pts = pd.Timestamp(d)
except:
raise ValueError("{} cannot be parsed into a pandas.Timestamp".format(d))
else:
return pts
def time_to_timedelta(t):
"""
Return a pandas Timedelta from a time object
Parameters
----------
t : datetime.time
Returns
-------
pd.Timedelta
Notes
------
The timezone of t (if present) is ignored.
"""
return pd.Timedelta(seconds=t.hour * 3600 + t.minute * 60 + t.second + t.microsecond * 1e-3)
def split_by_day(df, starttime=dt.time.min, endtime=dt.time.max):
"""
Return a list with dataframes, one for each day
Parameters
----------
df : pandas DataFrame with datetimeindex
starttime, endtime :datetime.time objects
For each day, only return a dataframe between starttime and endtime
If None, use begin of day/end of day respectively
Returns
-------
list, one dataframe per day.
"""
if df.empty:
return None
df = df[(df.index.time >= starttime) & (df.index.time < endtime)] # slice between starttime and endtime
list_df = [group[1] for group in df.groupby(df.index.date)] # group by date and create list
return list_df
def unit_conversion_factor(source, target):
"""
Shorthand function to get a conversion factor for unit conversion.
Parameters
----------
source, target : str
Unit as string, should be parsable by pint
Returns
-------
cf : float
Conversion factor. Multiply the source value with this factor to
get the target value. Works only for factorial conversion!
"""
if not source or not target:
return 1
if source == target:
return 1
else:
return 1 * ureg(source).to(target).magnitude
def dayset(start, end):
"""
Takes a start and end date and returns a set containing all dates between start and end
Parameters
----------
start : datetime-like object
end : datetime-like object
Returns
-------
set of datetime objects
"""
res = []
for day in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
res.append(day.date())
return sorted(set(res))
def split_irregular_date_list(date_list):
"""
Takes a list of dates and groups it into blocks of continuous dates.
It returns the begin and end of those blocks
eg. A list with continuous dates from januari to march and september to october will be split into two lists
Parameters
----------
date_list : list of datetime.date
Returns
-------
list of tuples of datetime.date
"""
date_list = sorted(date_list)
def as_range(g):
l = list(g)
return l[0], l[-1]
return [as_range(g) for _, g in groupby(date_list, key=lambda n, c=count(): n - dt.timedelta(days=next(c)))]
def calculate_temperature_equivalent(temperatures):
"""
Calculates the temperature equivalent from a series of average daily temperatures
according to the formula: 0.6 * tempDay0 + 0.3 * tempDay-1 + 0.1 * tempDay-2
Parameters
----------
series : Pandas Series
Returns
-------
Pandas Series
"""
ret = 0.6*temperatures + 0.3*temperatures.shift(1) + 0.1*temperatures.shift(2)
ret.name = 'temp_equivalent'
return ret
def calculate_degree_days(temperature_equivalent, base_temperature, cooling=False):
"""
Calculates degree days, starting with a series of temperature equivalent values
Parameters
----------
temperature_equivalent : Pandas Series
base_temperature : float
cooling : bool
Set True if you want cooling degree days instead of heating degree days
Returns
-------
Pandas Series
"""
if cooling:
ret = temperature_equivalent - base_temperature
else:
ret = base_temperature - temperature_equivalent
# degree days cannot be negative
ret[ret < 0] = 0
prefix = 'cooling' if cooling else 'heating'
ret.name = '{}_degree_days_{}'.format(prefix, base_temperature)
return ret
def last_midnight(timezone):
"""
Return the timestamp of the last midnight in a given timezone
Parameters
----------
timezone: str
pytz timezone
Returns
-------
datetime
"""
tz = pytz.timezone(timezone)
now = dt.datetime.now(tz=tz)
midnight = now.replace(hour=0, minute=0, second=0, microsecond=0)
return midnight | StarcoderdataPython |
3302419 | # pylint:disable=invalid-name,missing-docstring,too-few-public-methods,redefined-builtin
from __future__ import print_function
from linthints import adds_arguments, sets_arguments
import sys
def bogus_dec(func):
def inner(*args, **kwargs):
return func(*args, **kwargs)
return inner
@adds_arguments(file=None)
@sets_arguments(c=1)
def simple_function_dec(func):
def inner(*args, **kwargs):
file = kwargs.pop('file', None) or sys.stdout
print('{}(*{}, **{})'.format(func.__name__, args, kwargs), file=file)
kwargs['c'] = 1
return func(*args, **kwargs)
return inner
@adds_arguments(file=None)
@sets_arguments(c=1)
def complex_function_dec(*oargs, **okwargs):
def wrapper(func):
def inner(*args, **kwargs):
file = kwargs.pop('file', None) or sys.stdout
print('Outer args={}, kwargs={}'.format(oargs, okwargs), file=file)
kwargs['c'] = 1
return func(*args, **kwargs)
return inner
return wrapper
@adds_arguments(file=None)
@sets_arguments(c=1)
class SimpleClassDec(object):
def __init__(self, func):
self.func = func
def __call__(self, func):
def inner(*args, **kwargs):
file = kwargs.pop('file', None) or sys.stdout
print('{}(*{}, **{})'.format(func.__name__, args, kwargs), file=file)
kwargs['c'] = 1
return func(*args, **kwargs)
return inner
@adds_arguments(file=None)
@sets_arguments(c=1)
class ComplexClassDec(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __call__(self, func):
def inner(*args, **kwargs):
file = kwargs.pop('file', None) or sys.stdout
print('Class args={}, kwargs={}'.format(self.args, self.kwargs), file=file)
kwargs['c'] = 1
return func(*args, **kwargs)
return inner
@bogus_dec
@simple_function_dec
@bogus_dec
def simple_function_add(a, b, c=0):
return a + b + c
@complex_function_dec('hello', world='world')
@bogus_dec
def complex_function_add(a, b, c=0):
return a + b + c
@SimpleClassDec
def simple_class_add(a, b, c=0):
return a + b + c
@bogus_dec
@ComplexClassDec('hello', world='world')
def complex_class_add(a, b, c=0):
return a + b + c
def undecorated_add(a, b, c=0):
return a + b + c
@bogus_dec
def noarg_decorated_add(a, b, c=0):
return a + b + c
| StarcoderdataPython |
1638442 | # Generated by Django 2.0.8 on 2018-08-12 21:47
from django.db import migrations
import i18nfield.fields
class Migration(migrations.Migration):
dependencies = [
("submission", "0026_auto_20180811_1827"),
]
operations = [
migrations.AlterField(
model_name="question",
name="question",
field=i18nfield.fields.I18nCharField(max_length=800),
),
]
| StarcoderdataPython |
60599 | <gh_stars>0
import os
import numpy as np
class ForceField(object):
def __init__(self):
self.atomtypes = []
self.pairtypes = []
self.bondtypes = []
self.constrainttypes = []
self.angletypes = []
self.dihedraltypes = []
self.implicit_genborn_params = []
self.cmaptypes = []
self.nonbond_params = []
@classmethod
def load_gromacs_ff(cls, ffdir):
self=cls()
self.load_from_file(os.path.join(ffdir,'forcefield.itp'))
return self
def load_from_file(self, filename, mode=None, defs=None, ifdefs = None):
mode = mode
if defs is None:
defs = []
if ifdefs is None:
ifdefs = []
inactive_because_ifdef = False
previous_line = ''
with open(filename, 'rt') as f:
for l in f:
if ';' in l:
l=l.split(';')[0]
l = previous_line +l
previous_line = ''
l=l.strip()
if not l:
continue
if l.endswith('\\'):
previous_line = l[:-1]+' '
continue
if l.startswith('#'):
#preprocessor directive
target = l.split(None, 1)[-1]
if l.startswith('#define '):
defs.append(target)
elif l.startswith('#undef '):
defs = [d for d in defs if d!=target]
elif l.startswith('#ifdef '):
ifdefs.append((target, target in defs))
elif l.startswith('#ifndef '):
ifdefs.append((target, target not in defs))
elif l.startswith('#else '):
ifdefs[-1] = (ifdefs[-1][0], not ifdefs[-1][1])
elif l.startswith('#endif'):
ifdefs=ifdefs[:-1]
elif l.startswith('#include'):
#print('#Including {}'.format(target))
mode, defs, ifdefs = self.load_from_file(os.path.join(os.path.split(filename)[0],target[1:-1]), mode, defs, ifdefs)
continue
if ifdefs and (not ifdefs[-1][1]):
#print('Skipping line in mode [ {} ] because of an unsatisfied preprocessor conditional ({}): {}'.format(mode, ifdefs[-1][0],l))
continue
if l.startswith('[') and l.endswith(']'):
mode = l[1:-1].strip()
continue
if mode == 'defaults':
nbfunc, combrule, genpairs, fudgeLJ, fudgeQQ = l.split()
self.nbfunc=int(nbfunc)
self.combrule=int(combrule)
self.genpairs = genpairs.lower()=='yes'
self.fudgeLJ=float(fudgeLJ)
self.fudgeQQ=float(fudgeQQ)
elif mode == 'atomtypes':
at, atnum, mass, charge, ptype, sigma, epsilon = l.split()
self.atomtypes.append((at, int(atnum), float(mass), float(charge), ptype, float(sigma), float(epsilon)))
elif mode == 'pairtypes':
at1, at2, func, sigma, epsilon = l.split()
self.pairtypes.append((at1, at2, int(func), float(sigma), float(epsilon)))
elif mode == 'bondtypes':
lis=l.split()
self.bondtypes.append((lis[0], lis[1], int(lis[2]))+tuple(float(x) for x in lis[3:]))
elif mode == 'constrainttypes':
at1, at2, func, value = l.split()
self.constrainttypes.append((at1, at2, int(func), float(value)))
elif mode == 'angletypes':
lis = l.split()
self.angletypes.append(tuple(lis[:3])+(int(lis[3]),) + tuple([float(x) for x in lis[4:]]))
elif mode == 'dihedraltypes':
lis = l.split()
self.dihedraltypes.append(tuple(lis[:4])+(int(lis[4]),)+tuple([float(x) for x in lis[5:]]))
elif mode == 'implicit_genborn_params':
at, sar, st, pi, gbr, hct = l.split()
self.implicit_genborn_params.append((at, float(sar), float(st), float(pi), float(gbr), float(hct)))
elif mode == 'cmaptypes':
at1, at2, at3, at4, at5, func, nx, ny, data = l.split(None,8)
data = np.array([float(d) for d in data.split()]).reshape(int(nx), int(ny))
self.cmaptypes.append((at1, at2, at3, at4, at5, int(func), int(nx), int(ny), data))
elif mode == 'nonbond_params':
at1, at2, func, sigma, epsilon = l.split()
self.nonbond_params.append((at1, at2, int(func), float(sigma), float(epsilon)))
else:
print('Unknown mode: {}'.format(mode))
return mode, defs, ifdefs
def get_atomtype(self, at1):
return [a for a in self.atomtypes if a[0] == at1]
def get_bond(self, at1, at2):
return [b for b in self.bondtypes if (b[0]==at1 and b[1]==at2) or (b[1] == at1 and b[0] == at2)]
def get_angle(self, at1, at2, at3):
return [a for a in self.angletypes if (a[0]==at1 and a[1] == at2 and a[2]==at3) or (a[0]==at3 and a[1]==at2 and a[2]==at1)]
def get_dihedral(self, at1, at2, at3, at4):
return [d for d in self.dihedraltypes if
(d[0] in (at1,'X') and d[1]==at2 and d[2]==at3 and d[3] in (at4, 'X')) or
(d[0] in (at4,'X') and d[1]==at3 and d[2]==at2 and d[3] in (at1, 'X'))]
def get_pair(self, at1, at2):
return [p for p in self.pairtypes if (p[0]==at1 and p[1]==at2) or (p[0]==at2 and p[1]==at1) ]
def get_nonbond_param(self, at1, at2):
return [p for p in self.nonbond_params if (p[0]==at1 and p[1]==at2) or (p[0]==at2 and p[1]==at1) ]
def new_atomtype(self, newname, basedon):
"""Create a new atom type by copying another."""
def replaceif(newname, basedon, tup, length):
if length > 0:
for t in replaceif(newname, basedon, tup[1:], length-1):
if tup[0] == basedon:
yield (newname,) + t
yield (tup[0],) +t
else:
yield tup
return
for length, attr in [(1, 'atomtypes'),
(2, 'bondtypes'),
(3, 'angletypes'),
(4, 'dihedraltypes'),
(2, 'constrainttypes'),
(2, 'pairtypes'),
(1, 'implicit_genborn_params'),
(5, 'cmaptypes'),
(2, 'nonbond_params')]:
lis = getattr(self, attr)
newlis = []
for row in lis:
newlis.extend(list(replaceif(newname, basedon, row, length)))
setattr(self, attr, newlis)
def write_forcefield(self):
self.atomtypes = sorted(self.atomtypes, key=lambda at:at[0])
self.bondtypes = sorted(self.bondtypes, key=lambda bt:bt[:2])
self.angletypes = sorted()
| StarcoderdataPython |
12802076 | from multiprocessing import Process, Queue
from datetime import datetime as dt
import time
import os
class Worker(Process):
def __init__(self, q):
Process.__init__(self)
self.q: Queue = q
def run(self):
while True:
if not self.q.empty():
break
if dt.now() == dt.now():
pass
class Temp(Process):
def __init__(self, q):
Process.__init__(self)
self.q: Queue = q
def run(self):
n = 0
while True:
n += 1
if not self.q.empty():
break
if n == 5:
print(os.popen("/opt/vc/bin/vcgencmd measure_temp").read())
n = 0
time.sleep(1)
q = Queue()
for _ in range(4):
w = Worker(q)
w.start()
t = Temp(q)
t.start()
q.put(input())
| StarcoderdataPython |
5102627 | <reponame>kgresearch/models
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
from smoke.ops import gather_op
class SMOKECoder(paddle.nn.Layer):
"""SMOKE Coder class
"""
def __init__(self, depth_ref, dim_ref):
super().__init__()
# self.depth_ref = paddle.to_tensor(depth_ref)
# self.dim_ref = paddle.to_tensor(dim_ref)
self.depth_decoder = DepthDecoder(depth_ref)
self.dimension_decoder = DimensionDecoder(dim_ref)
@staticmethod
def rad_to_matrix(rotys, N):
"""decode rotys to R_matrix
Args:
rotys (Tensor): roty of objects
N (int): num of batch
Returns:
Tensor: R matrix with shape (N, 3, 3)
R = [[cos(r), 0, sin(r)], [0, 1, 0], [-cos(r), 0, sin(r)]]
"""
cos, sin = rotys.cos(), rotys.sin()
i_temp = paddle.to_tensor([[1, 0, 1], [0, 1, 0], [-1, 0, 1]], dtype="float32")
# ry = paddle.reshape(i_temp.tile([N, 1]), (N, -1, 3))
# ry[:, 0, 0] *= cos
# ry[:, 0, 2] *= sin
# ry[:, 2, 0] *= sin
# ry[:, 2, 2] *= cos
# slice bug, so use concat
pos1 = (paddle.ones([N], dtype="float32") * cos).unsqueeze(-1)
pos2 = (paddle.zeros([N], dtype="float32")).unsqueeze(-1)
pos3 = (paddle.ones([N], dtype="float32") * sin).unsqueeze(-1)
pos4 = (paddle.zeros([N], dtype="float32")).unsqueeze(-1)
pos5 = (paddle.ones([N], dtype="float32")).unsqueeze(-1)
pos6 = (paddle.zeros([N], dtype="float32")).unsqueeze(-1)
pos7 = (paddle.ones([N], dtype="float32") * (-sin)).unsqueeze(-1)
pos8 = (paddle.zeros([N], dtype="float32")).unsqueeze(-1)
pos9 = (paddle.ones([N], dtype="float32") * cos).unsqueeze(-1)
ry = paddle.concat([pos1, pos2, pos3, pos4, pos5, pos6, pos7, pos8, pos9], axis=1)
ry = paddle.reshape(ry, [N, 3, 3])
return ry
def encode_box3d(self, rotys, dims, locs):
"""
construct 3d bounding box for each object.
Args:
rotys: rotation in shape N
dims: dimensions of objects
locs: locations of objects
Returns:
"""
if len(rotys.shape) == 2:
rotys = rotys.flatten()
if len(dims.shape) == 3:
dims = paddle.reshape(dims, (-1, 3))
if len(locs.shape) == 3:
locs = paddle.reshape(locs, (-1, 3))
N = rotys.shape[0]
ry = self.rad_to_matrix(rotys, N)
# if test:
# dims.register_hook(lambda grad: print('dims grad', grad.sum()))
# dims = paddle.reshape(dims, (-1, 1)).tile([1, 8])
# dims[::3, :4] = 0.5 * dims[::3, :4]
# dims[1::3, :4] = 0.
# dims[2::3, :4] = 0.5 * dims[2::3, :4]
# dims[::3, 4:] = -0.5 * dims[::3, 4:]
# dims[1::3, 4:] = -dims[1::3, 4:]
# dims[2::3, 4:] = -0.5 * dims[2::3, 4:]
dim_left_1 = (0.5 * dims[:, 0]).unsqueeze(-1)
dim_left_2 = paddle.zeros([dims.shape[0], 1]).astype("float32") #(paddle.zeros_like(dims[:, 1])).unsqueeze(-1)
dim_left_3 = (0.5 * dims[:, 2]).unsqueeze(-1)
dim_left = paddle.concat([dim_left_1, dim_left_2, dim_left_3], axis=1)
dim_left = paddle.reshape(dim_left, (-1, 1)).tile([1, 4])
dim_right_1 = (-0.5 * dims[:, 0]).unsqueeze(-1)
dim_right_2 = (-dims[:, 1]).unsqueeze(-1)
dim_right_3 = (-0.5 * dims[:, 2]).unsqueeze(-1)
dim_right = paddle.concat([dim_right_1, dim_right_2, dim_right_3], axis=1)
dim_right = paddle.reshape(dim_right, (-1, 1)).tile([1, 4])
dims = paddle.concat([dim_left, dim_right], axis=1)
index = paddle.to_tensor([[4, 0, 1, 2, 3, 5, 6, 7],
[4, 5, 0, 1, 6, 7, 2, 3],
[4, 5, 6, 0, 1, 2, 3, 7]]).tile([N, 1])
box_3d_object = gather_op(dims, 1, index)
box_3d = paddle.matmul(ry, paddle.reshape(box_3d_object, (N, 3, -1)))
# box_3d += locs.unsqueeze(-1).repeat(1, 1, 8)
box_3d += locs.unsqueeze(-1).tile((1, 1, 8))
return box_3d
def decode_depth(self, depths_offset):
"""
Transform depth offset to depth
"""
#depth = depths_offset * self.depth_ref[1] + self.depth_ref[0]
#return depth
return self.depth_decoder(depths_offset)
def decode_location(self,
points,
points_offset,
depths,
Ks,
trans_mats):
"""
retrieve objects location in camera coordinate based on projected points
Args:
points: projected points on feature map in (x, y)
points_offset: project points offset in (delata_x, delta_y)
depths: object depth z
Ks: camera intrinsic matrix, shape = [N, 3, 3]
trans_mats: transformation matrix from image to feature map, shape = [N, 3, 3]
Returns:
locations: objects location, shape = [N, 3]
"""
# number of points
N = points_offset.shape[0]
# batch size
N_batch = Ks.shape[0]
batch_id = paddle.arange(N_batch).unsqueeze(1)
# obj_id = batch_id.repeat(1, N // N_batch).flatten()
obj_id = batch_id.tile([1, N // N_batch]).flatten()
# trans_mats_inv = trans_mats.inverse()[obj_id]
# Ks_inv = Ks.inverse()[obj_id]
inv = trans_mats.inverse()
trans_mats_inv = paddle.concat([inv[int(obj_id[i])].unsqueeze(0) for i in range(len(obj_id))])
inv = Ks.inverse()
Ks_inv = paddle.concat([inv[int(obj_id[i])].unsqueeze(0) for i in range(len(obj_id))])
points = paddle.reshape(points, (-1, 2))
assert points.shape[0] == N
# int + float -> int, but float + int -> float
# proj_points = points + points_offset
proj_points = points_offset + points
# transform project points in homogeneous form.
proj_points_extend = paddle.concat(
(proj_points.astype("float32"), paddle.ones((N, 1))), axis=1)
# expand project points as [N, 3, 1]
proj_points_extend = proj_points_extend.unsqueeze(-1)
# transform project points back on image
proj_points_img = paddle.matmul(trans_mats_inv, proj_points_extend)
# with depth
proj_points_img = proj_points_img * paddle.reshape(depths, (N, -1, 1))
# transform image coordinates back to object locations
locations = paddle.matmul(Ks_inv, proj_points_img)
return locations.squeeze(2)
def decode_location_without_transmat(self,
points, points_offset,
depths, Ks, down_ratios=None):
"""
retrieve objects location in camera coordinate based on projected points
Args:
points: projected points on feature map in (x, y)
points_offset: project points offset in (delata_x, delta_y)
depths: object depth z
Ks: camera intrinsic matrix, shape = [N, 3, 3]
trans_mats: transformation matrix from image to feature map, shape = [N, 3, 3]
Returns:
locations: objects location, shape = [N, 3]
"""
if down_ratios is None:
down_ratios = [(1, 1)]
# number of points
N = points_offset.shape[0]
# batch size
N_batch = Ks.shape[0]
#batch_id = paddle.arange(N_batch).unsqueeze(1)
batch_id = paddle.arange(N_batch).reshape((N_batch, 1))
# obj_id = batch_id.repeat(1, N // N_batch).flatten()
obj_id = batch_id.tile([1, N // N_batch]).flatten()
# Ks_inv = Ks[obj_id] pytorch
# Ks_inv = paddle.concat([Ks[int(obj_id[i])].unsqueeze(0) for i in range(len(obj_id))])
length = int(obj_id.shape[0])
ks_v = []
for i in range(length):
ks_v.append(Ks[int(obj_id[i])].unsqueeze(0))
Ks_inv = paddle.concat(ks_v)
down_ratio = down_ratios[0]
points = paddle.reshape(points, (numel_t(points)//2, 2))
proj_points = points + points_offset
# trans point from heatmap to ori image, down_sample * resize_scale
proj_points[:, 0] = down_ratio[0] * proj_points[:, 0]
proj_points[:, 1] = down_ratio[1] * proj_points[:, 1]
# transform project points in homogeneous form.
proj_points_extend = paddle.concat(
[proj_points, paddle.ones((N, 1))], axis=1)
# expand project points as [N, 3, 1]
proj_points_extend = proj_points_extend.unsqueeze(-1)
# with depth
proj_points_img = proj_points_extend * paddle.reshape(depths, (N, numel_t(depths)//N, 1))
# transform image coordinates back to object locations
locations = paddle.matmul(Ks_inv, proj_points_img)
return locations.squeeze(2)
def decode_bbox_2d_without_transmat(self, points, bbox_size, down_ratios=None):
"""get bbox 2d
Args:
points (paddle.Tensor, (50, 2)): 2d center
bbox_size (paddle.Tensor, (50, 2)): 2d bbox height and width
trans_mats (paddle.Tensor, (1, 3, 3)): transformation coord from img to feature map
"""
if down_ratios is None:
down_ratios = [(1, 1)]
# number of points
N = bbox_size.shape[0]
points = paddle.reshape(points, (-1, 2))
assert points.shape[0] == N
box2d = paddle.zeros((N, 4))
down_ratio = down_ratios[0]
box2d[:, 0] = (points[:, 0] - bbox_size[:, 0] / 2)
box2d[:, 1] = (points[:, 1] - bbox_size[:, 1] / 2)
box2d[:, 2] = (points[:, 0] + bbox_size[:, 0] / 2)
box2d[:, 3] = (points[:, 1] + bbox_size[:, 1] / 2)
box2d[:, 0] = down_ratio[0] * box2d[:, 0]
box2d[:, 1] = down_ratio[1] * box2d[:, 1]
box2d[:, 2] = down_ratio[0] * box2d[:, 2]
box2d[:, 3] = down_ratio[1] * box2d[:, 3]
return box2d
def decode_dimension(self, cls_id, dims_offset):
"""
retrieve object dimensions
Args:
cls_id: each object id
dims_offset: dimension offsets, shape = (N, 3)
Returns:
"""
# cls_id = cls_id.flatten().long()
# dims_select = self.dim_ref[cls_id, :]
# cls_id = cls_id.flatten()
# dims_select = paddle.concat([self.dim_ref[int(cls_id[i])].unsqueeze(0) for i in range(len(cls_id))])
# dimensions = dims_offset.exp() * dims_select
# return dimensions
return self.dimension_decoder(cls_id, dims_offset)
def decode_orientation(self, vector_ori, locations, flip_mask=None):
"""
retrieve object orientation
Args:
vector_ori: local orientation in [sin, cos] format
locations: object location
Returns: for training we only need roty
for testing we need both alpha and roty
"""
locations = paddle.reshape(locations, (-1, 3))
rays = paddle.atan(locations[:, 0] / (locations[:, 2] + 1e-7))
alphas = paddle.atan(vector_ori[:, 0] / (vector_ori[:, 1] + 1e-7))
# get cosine value positive and negtive index.
cos_pos_idx = (vector_ori[:, 1] >= 0).nonzero()
cos_neg_idx = (vector_ori[:, 1] < 0).nonzero()
PI = 3.14159
for i in range(cos_pos_idx.shape[0]):
ind = int(cos_pos_idx[i,0])
alphas[ind] = alphas[ind] - PI / 2
for i in range(cos_neg_idx.shape[0]):
ind = int(cos_neg_idx[i,0])
alphas[ind] = alphas[ind] + PI / 2
# alphas[cos_pos_idx] -= PI / 2
# alphas[cos_neg_idx] += PI / 2
# retrieve object rotation y angle.
rotys = alphas + rays
# in training time, it does not matter if angle lies in [-PI, PI]
# it matters at inference time? todo: does it really matter if it exceeds.
larger_idx = (rotys > PI).nonzero()
small_idx = (rotys < -PI).nonzero()
if len(larger_idx) != 0:
for i in range(larger_idx.shape[0]):
ind = int(larger_idx[i,0])
rotys[ind] -= 2 * PI
if len(small_idx) != 0:
for i in range(small_idx.shape[0]):
ind = int(small_idx[i,0])
rotys[ind] += 2 * PI
if flip_mask is not None:
fm = flip_mask.astype("float32").flatten()
rotys_flip = fm * rotys
# rotys_flip_pos_idx = rotys_flip > 0
# rotys_flip_neg_idx = rotys_flip < 0
# rotys_flip[rotys_flip_pos_idx] -= PI
# rotys_flip[rotys_flip_neg_idx] += PI
rotys_flip_pos_idx = (rotys_flip > 0).nonzero()
rotys_flip_neg_idx = (rotys_flip < 0).nonzero()
for i in range(rotys_flip_pos_idx.shape[0]):
ind = int(rotys_flip_pos_idx[i, 0])
rotys_flip[ind] -= PI
for i in range(rotys_flip_neg_idx.shape[0]):
ind = int(rotys_flip_neg_idx[i, 0])
rotys_flip[ind] += PI
rotys_all = fm * rotys_flip + (1 - fm) * rotys
return rotys_all
else:
return rotys, alphas
def decode_bbox_2d(self, points, bbox_size, trans_mats, img_size):
"""get bbox 2d
Args:
points (paddle.Tensor, (50, 2)): 2d center
bbox_size (paddle.Tensor, (50, 2)): 2d bbox height and width
trans_mats (paddle.Tensor, (1, 3, 3)): transformation coord from img to feature map
"""
img_size = img_size.flatten()
# number of points
N = bbox_size.shape[0]
# batch size
N_batch = trans_mats.shape[0]
batch_id = paddle.arange(N_batch).unsqueeze(1)
# obj_id = batch_id.repeat(1, N // N_batch).flatten()
obj_id = batch_id.tile([1, N // N_batch]).flatten()
inv = trans_mats.inverse()
trans_mats_inv = paddle.concat([inv[int(obj_id[i])].unsqueeze(0) for i in range(len(obj_id))])
#trans_mats_inv = trans_mats.inverse()[obj_id]
points = paddle.reshape(points, (-1, 2))
assert points.shape[0] == N
box2d = paddle.zeros([N, 4])
box2d[:, 0] = (points[:, 0] - bbox_size[:, 0] / 2)
box2d[:, 1] = (points[:, 1] - bbox_size[:, 1] / 2)
box2d[:, 2] = (points[:, 0] + bbox_size[:, 0] / 2)
box2d[:, 3] = (points[:, 1] + bbox_size[:, 1] / 2)
# transform project points in homogeneous form.
proj_points_extend_top = paddle.concat(
(box2d[:, :2], paddle.ones([N, 1])), axis=1)
proj_points_extend_bot = paddle.concat(
(box2d[:, 2:], paddle.ones([N, 1])), axis=1)
# expand project points as [N, 3, 1]
proj_points_extend_top = proj_points_extend_top.unsqueeze(-1)
proj_points_extend_bot = proj_points_extend_bot.unsqueeze(-1)
# transform project points back on image
proj_points_img_top = paddle.matmul(trans_mats_inv, proj_points_extend_top)
proj_points_img_bot = paddle.matmul(trans_mats_inv, proj_points_extend_bot)
box2d[:, :2] = proj_points_img_top.squeeze(2)[:, :2]
box2d[:, 2:] = proj_points_img_bot.squeeze(2)[:, :2]
box2d[:, ::2] = box2d[:, ::2].clip(0, img_size[0])
box2d[:, 1::2] = box2d[:, 1::2].clip(0, img_size[1])
return box2d
class DepthDecoder(paddle.nn.Layer):
def __init__(self, depth_ref):
super().__init__()
self.depth_ref = paddle.to_tensor(depth_ref)
def forward(self, depths_offset):
"""
Transform depth offset to depth
"""
depth = depths_offset * self.depth_ref[1] + self.depth_ref[0]
return depth
class DimensionDecoder(paddle.nn.Layer):
def __init__(self, dim_ref):
super().__init__()
self.dim_ref = paddle.to_tensor(dim_ref)
def forward(self, cls_id, dims_offset):
"""
retrieve object dimensions
Args:
cls_id: each object id
dims_offset: dimension offsets, shape = (N, 3)
Returns:
"""
# cls_id = cls_id.flatten().long()
# dims_select = self.dim_ref[cls_id, :]
cls_id = cls_id.flatten()
#dims_select = paddle.concat([self.dim_ref[int(cls_id[i])].unsqueeze(0) for i in range(len(cls_id))])
length = int(cls_id.shape[0])
list_v = []
for i in range(length):
list_v.append(self.dim_ref[int(cls_id[i])].unsqueeze(0))
dims_select = paddle.concat(list_v)
dimensions = dims_offset.exp() * dims_select
return dimensions
def numel_t(var):
from numpy import prod
assert -1 not in var.shape
return prod(var.shape) | StarcoderdataPython |
395161 | """
Copyright 2020 The Johns Hopkins University Applied Physics Laboratory LLC
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#Approved for public release, 20-563
import argparse
import os
from utilities.ml_utils import train,test,metrics
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--augmentation", action="store_true", help="train with rotations (modifies ground truth orientation)")
parser.add_argument("--test-rotations", action="store_true", help="apply test-time rotations (more test instances)")
parser.add_argument("--add-height", action="store_true", help="train with height regression")
parser.add_argument("--train", action="store_true", help="train")
parser.add_argument("--test", action="store_true", help="generate test predictions")
parser.add_argument("--metrics", action="store_true", help="evaluate predictions against ground truth")
parser.add_argument("--multiprocessing", action="store_true", help="use multiprocessing for metrics")
parser.add_argument("--gpus", type=str, help="gpu indices (comma separated)", default='0')
parser.add_argument("--num-epochs", type=int, help="gpu indices (comma separated)", default=100)
parser.add_argument("--save-period", type=int, help="gpu indices (comma separated)", default=5)
parser.add_argument("--batch-size", type=int, help="batch size", default=2)
parser.add_argument("--continue-training-file", type=str, help="file to continue training from", default=None)
parser.add_argument("--test-model-file", type=str, help="test checkpoint if not running default selection", default=None)
parser.add_argument("--checkpoint-dir", type=str, help="where to store and load checkpoints from", default="./checkpoints")
parser.add_argument("--tensorboard-dir", type=str, help="tensorboard log directory", default="./tensorboard")
parser.add_argument("--predictions-dir", type=str, help="where to store predictions", default="./predictions")
parser.add_argument("--dataset-dir", type=str, help="dataset directory", default="./dataset")
parser.add_argument("--train-sub-dir", type=str, help="train folder within datset-dir", default="train")
parser.add_argument("--test-sub-dir", type=str, help="test folder within datset-dir", default="test")
parser.add_argument("--image-size", type=int, nargs="+", help="image size", default=(2048,2048))
parser.add_argument("--backbone", type=str, help="unet backbone", default="resnet34")
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
if args.train:
train(args)
if args.test:
test(args)
if args.metrics:
metrics(args)
| StarcoderdataPython |
12864409 | <reponame>erogleva/core
"""eafm fixtures."""
import pytest
from tests.async_mock import patch
@pytest.fixture()
def mock_get_stations():
"""Mock aioeafm.get_stations."""
with patch("homeassistant.components.eafm.config_flow.get_stations") as patched:
yield patched
@pytest.fixture()
def mock_get_station():
"""Mock aioeafm.get_station."""
with patch("homeassistant.components.eafm.sensor.get_station") as patched:
yield patched
| StarcoderdataPython |
11343678 | import tempfile
from threatexchange.cli.tests.e2e_test_helper import ThreatExchangeCLIE2eTest
from threatexchange.signal_type.md5 import VideoMD5Signal
class MatchCommandTest(ThreatExchangeCLIE2eTest):
COMMON_CALL_ARGS = ("match",)
def test_file_noexst(self):
self.assert_cli_usage_error(("text", "doesnt_exist.txt"))
def test_match_file(self):
with tempfile.NamedTemporaryFile() as fp:
# Empty file
self.assert_cli_output(
("video", fp.name), "video_md5 - (Sample Signals) WORTH_INVESTIGATING"
)
def test_hash(self):
hash = VideoMD5Signal.get_examples()[0]
self.assert_cli_output(
("-H", "video", "--", hash),
"video_md5 - (Sample Signals) WORTH_INVESTIGATING",
)
def test_invalid_hash(self):
not_hash = "this is not an md5"
self.assert_cli_usage_error(
("-H", "video", "--", not_hash),
f"{not_hash!r} from .* is not a valid hash for video_md5",
)
| StarcoderdataPython |
1886137 | # views.py — accounts views
#
# This file is part of debexpo -
# https://salsa.debian.org/mentors.debian.net-team/debexpo
#
# Copyright © 2008 <NAME> <<EMAIL>>
# Copyright © 2010 <NAME> <<EMAIL>>
# Copyright © 2019 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import logging
from datetime import datetime
from django.conf import settings
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.tokens import default_token_generator
from django.urls import reverse
from django.utils.translation import gettext as _
from django.shortcuts import render
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from .forms import RegistrationForm, AccountForm, ProfileForm, GPGForm
from .models import Profile, User, UserStatus
from debexpo.keyring.models import Key
from debexpo.tools.email import Email
log = logging.getLogger(__name__)
def _send_activate_email(request, uid, token, recipient):
"""
Sends an activation email to the potential new user.
``key``
Activation key that's already stored in the database.
``recipient``
Email address to send to.
"""
log.debug('Sending activation email')
email = Email('email-password-creation.html')
activate_url = request.scheme + '://' + request.site.domain + \
reverse('password_reset_confirm', kwargs={
'uidb64': uid, 'token': token
})
email.send(_('Next step: Confirm your email address'), [recipient],
activate_url=activate_url, settings=settings)
def _register_submit(request, info):
"""
Handles the form submission for a maintainer account registration.
"""
log.info('Creating new user {} <{}> as {}'.format(
info.get('name'), info.get('email'),
UserStatus(int(info.get('account_type'))).label
))
# Debexpo use the email field as the username
user = User.objects.create_user(info.get('email'), info.get('name'))
user.save()
profile = Profile(user=user, status=info.get('account_type'))
profile.save()
uid = urlsafe_base64_encode(force_bytes(user.pk))
token = default_token_generator.make_token(user)
_send_activate_email(request, uid, token, user.email)
log.debug('New user saved')
return render(request, 'activate.html', {
'settings': settings
})
def _update_account(request, info):
request.user.name = info.get('name')
request.user.email = info.get('email')
request.user.save()
def _update_key(request, gpg_form):
data = gpg_form.key
key = gpg_form.save(commit=False)
key.user = request.user
key.size = data.size
key.fingerprint = data.fingerprint
key.algorithm = data.algorithm
key.save()
key.update_subkeys()
def _format_fingerprint(fingerprint):
prettify = ''
for index in range(0, len(fingerprint)):
prettify += fingerprint[index]
if not (index + 1) % 4:
prettify += ' '
if not (index + 1) % 20:
prettify += ' '
return prettify
def register(request):
"""
Provides the form for a maintainer account registration.
"""
# Has the form been submitted?
if request.method == 'POST':
log.debug('Maintainer form submitted')
form = RegistrationForm(None, request.POST,
elapsed=request.session.get('timestamp', None),
ip=request.META['REMOTE_ADDR'])
if form.is_valid():
return _register_submit(request, form.cleaned_data)
else:
form = RegistrationForm(None)
request.session['timestamp'] = str(datetime.now())
log.debug('Maintainer form requested')
return render(request, 'register.html', {
'settings': settings,
'form': form
})
@login_required
def profile(request):
account_initial = {
'name': request.user.name,
'email': request.user.email
}
account_form = AccountForm(None, initial=account_initial)
password_form = PasswordChangeForm(user=request.user)
profile_form = ProfileForm(request.user, instance=request.user.profile)
gpg_fingerprint = None
try:
gpg_form = GPGForm(request.user, instance=request.user.key)
gpg_fingerprint = _format_fingerprint(request.user.key.fingerprint)
except Key.DoesNotExist:
gpg_form = GPGForm(request.user)
if request.method == 'POST':
if 'commit_account' in request.POST:
account_form = AccountForm(request.user, request.POST)
if account_form.is_valid():
log.debug('Updating user account for '
'{}'.format(request.user.email))
_update_account(request, account_form.cleaned_data)
if 'commit_password' in request.POST:
password_form = PasswordChangeForm(user=request.user,
data=request.POST)
if password_form.is_valid():
log.debug('Changing password for account '
'{}'.format(request.user.email))
password_form.save()
update_session_auth_hash(request, password_form.user)
if 'commit_profile' in request.POST:
profile_form = ProfileForm(request.user, request.POST,
instance=request.user.profile)
if profile_form.is_valid():
profile = profile_form.save(commit=False)
profile.user = request.user
profile.save()
if 'commit_gpg' in request.POST:
try:
gpg_form = GPGForm(request.user, request.POST,
instance=request.user.key)
except Key.DoesNotExist:
gpg_form = GPGForm(request.user, request.POST)
if gpg_form.is_valid():
_update_key(request, gpg_form)
gpg_fingerprint = _format_fingerprint(
request.user.key.fingerprint
)
if 'delete_gpg' in request.POST:
try:
key = request.user.key
except Key.DoesNotExist:
pass
else:
key.delete()
gpg_form = GPGForm(request.user)
gpg_fingerprint = None
return render(request, 'profile.html', {
'settings': settings,
'account_form': account_form,
'password_form': password_form,
'profile_form': profile_form,
'gpg_form': gpg_form,
'gpg_fingerprint': gpg_fingerprint,
})
| StarcoderdataPython |
5122935 | #!/usr/bin/python
#
# Adapted from: https://github.com/mcordts/cityscapesScripts
#
# The evaluation script for pixel-level semantic labeling.
# We use this script to evaluate your approach on the test set.
# You can use the script to evaluate on the validation set.
#
# usage: evalPixelLevelSemanticLabeling.py --gt_path [gtPath] --pred_path [predictionPath]
#
# Note that the script is a lot faster, if you enable cython support.
# WARNING: Cython only tested for Ubuntu 64bit OS.
# To enable cython, run
# setup.py build_ext --inplace
#
# To run this script, make sure that your results are images,
# where pixels encode the class IDs as defined in labels.py.
# Note that the regular ID is used, not the train ID.
# Further note that many classes are ignored from evaluation.
# Thus, authors are not expected to predict these classes and all
# pixels with a ground truth label that is ignored are ignored in
# evaluation.
# python imports
import os, sys, argparse
import math
import platform
import fnmatch
try:
import numpy as np
except:
print("Failed to import numpy package.")
sys.exit(-1)
try:
from PIL import Image
except:
print("Please install the module 'Pillow' for image processing, e.g.")
print("pip install pillow")
sys.exit(-1)
try:
from itertools import izip
except ImportError:
izip = zip
# C Support
# Enable the cython support for faster evaluation
# Only tested for Ubuntu 64bit OS
CSUPPORT = True
# Check if C-Support is available for better performance
if CSUPPORT:
try:
import addToConfusionMatrix
except:
CSUPPORT = False
parser = argparse.ArgumentParser()
parser.add_argument('--gt_path', required=True, help='path to gt files')
parser.add_argument('--pred_path', required=True, help='path to result files')
parser.add_argument('--output_file', default='', help='output file (default pred_path/semantic_label.txt')
opt = parser.parse_args()
if not opt.output_file:
opt.output_file = os.path.join(opt.pred_path, 'semantic_label.txt')
CLASS_LABELS = ['wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window', 'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator', 'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture']
VALID_CLASS_IDS = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39])
ALL_CLASS_IDS = np.arange(40 + 1)
#########################
# Methods
#########################
# Print an error message and quit
def printError(message, user_fault=False):
print('ERROR: ' + str(message))
if user_fault:
sys.exit(2)
sys.exit(-1)
# Generate empty confusion matrix and create list of relevant labels
def generateMatrix():
# generate for all labels, regardless of being ignored
max_id = np.max(ALL_CLASS_IDS)
return np.zeros(shape=(max_id+1, max_id+1),dtype=np.ulonglong)
# Get absolute or normalized value from field in confusion matrix.
def getMatrixFieldValue(confMatrix, i, j, normalized=True):
if normalized:
rowSum = confMatrix[i].sum()
if (rowSum == 0):
return float('nan')
return float(confMatrix[i][j]) / rowSum
else:
return confMatrix[i][j]
# Calculate and return IOU score for a particular label
def getIouScoreForLabel(label, confMatrix):
if not label in VALID_CLASS_IDS:
return float('nan')
# the number of true positive pixels for this label
# the entry on the diagonal of the confusion matrix
tp = np.longlong(confMatrix[label,label])
# the number of false negative pixels for this label
# the row sum of the matching row in the confusion matrix
# minus the diagonal entry
fn = np.longlong(confMatrix[label,:].sum()) - tp
# the number of false positive pixels for this labels
# Only pixels that are not on a pixel with ground truth label that is ignored
# The column sum of the corresponding column in the confusion matrix
# without the ignored rows and without the actual label of interest
notIgnored = [l for l in VALID_CLASS_IDS if not l==label]
fp = np.longlong(confMatrix[notIgnored,label].sum())
# the denominator of the IOU score
denom = (tp + fp + fn)
if denom == 0:
return float('nan')
# return IOU
return float(tp) / denom
# Calculate prior for a particular class id.
def getPrior(label, confMatrix):
return float(confMatrix[label,:].sum()) / confMatrix.sum()
# Get average of scores.
# Only computes the average over valid entries.
def getScoreAverage(scoreList):
validScores = 0
scoreSum = 0.0
for score in scoreList:
if not math.isnan(scoreList[score]):
validScores += 1
scoreSum += scoreList[score]
if validScores == 0:
return float('nan')
return scoreSum / validScores
# Print intersection-over-union scores for all classes.
def printClassScores(scoreList):
print 'classes IoU'
print '----------------------------'
for i in range(len(VALID_CLASS_IDS)):
label = VALID_CLASS_IDS[i]
labelName = CLASS_LABELS[i]
iouStr = "{0:>5.3f}".format(scoreList[labelName])
print ("{0:<14s}: ".format(labelName) + iouStr)
# Save results.
def write_result_file(conf, scores, filename):
_SPLITTER = ','
with open(filename, 'w') as f:
f.write('iou scores\n')
for i in range(len(VALID_CLASS_IDS)):
label = VALID_CLASS_IDS[i]
label_name = CLASS_LABELS[i]
iou = scores[label_name]
f.write('{0:<14s}({1:<2d}): {2:>5.3f}\n'.format(label_name, label, iou))
f.write('\nconfusion matrix\n')
for i in range(len(VALID_CLASS_IDS)):
f.write('\t{0:<14s}({1:<2d})'.format(CLASS_LABELS[i], VALID_CLASS_IDS[i]))
for r in range(len(VALID_CLASS_IDS)):
f.write('{0:<14s}({1:<2d})'.format(CLASS_LABELS[r], VALID_CLASS_IDS[r]))
for c in range(len(VALID_CLASS_IDS)):
f.write('\t{0:>5.3f}'.format(conf[r,c]))
f.write('\n')
print 'wrote results to', filename
# Evaluate image lists pairwise.
def evaluateImgLists(predictionImgList, groundTruthImgList, outputFile):
if len(predictionImgList) != len(groundTruthImgList):
printError("List of images for prediction and groundtruth are not of equal size.", user_fault=True)
confMatrix = generateMatrix()
perImageStats = {}
nbPixels = 0
print 'Evaluating', len(predictionImgList), 'pairs of images...'
# Evaluate all pairs of images and save them into a matrix
for i in range(len(predictionImgList)):
predictionImgFileName = predictionImgList[i]
groundTruthImgFileName = groundTruthImgList[i]
#print "Evaluate ", predictionImgFileName, "<>", groundTruthImgFileName
nbPixels += evaluatePair(predictionImgFileName, groundTruthImgFileName, confMatrix, perImageStats)
# sanity check
if confMatrix.sum() != nbPixels:
printError('Number of analyzed pixels and entries in confusion matrix disagree: confMatrix {}, pixels {}'.format(confMatrix.sum(),nbPixels))
sys.stdout.write("\rImages Processed: {}".format(i+1))
sys.stdout.flush()
print ""
# sanity check
if confMatrix.sum() != nbPixels:
printError('Number of analyzed pixels and entries in confusion matrix disagree: contMatrix {}, pixels {}'.format(confMatrix.sum(),nbPixels))
# Calculate IOU scores on class level from matrix
classScoreList = {}
for i in range(len(VALID_CLASS_IDS)):
labelName = CLASS_LABELS[i]
label = VALID_CLASS_IDS[i]
classScoreList[labelName] = getIouScoreForLabel(label, confMatrix)
# Print IOU scores
printClassScores(classScoreList)
iouAvgStr = "{avg:5.3f}".format(avg=getScoreAverage(classScoreList))
print "--------------------------------"
print "Score Average : " + iouAvgStr
print "--------------------------------"
print ""
# write result file
write_result_file(confMatrix, classScoreList, outputFile)
# Main evaluation method. Evaluates pairs of prediction and ground truth
# images which are passed as arguments.
def evaluatePair(predictionImgFileName, groundTruthImgFileName, confMatrix, perImageStats):
# Loading all resources for evaluation.
try:
predictionImg = Image.open(predictionImgFileName)
predictionNp = np.array(predictionImg)
except Exception, e:
printError("Unable to load " + predictionImgFileName + ": " + str(e))
try:
groundTruthImg = Image.open(groundTruthImgFileName)
groundTruthNp = np.array(groundTruthImg)
except Exception, e:
printError("Unable to load " + groundTruthImgFileName + ": " + str(e))
# Check for equal image sizes
if not (predictionImg.size[0] == groundTruthImg.size[0] or predictionImg.size[0] == 640 and predictionImg.size[1] == 480):
printError("Invalid image size for " + predictionImgFileName, user_fault=True)
if ( len(predictionNp.shape) != 2 ):
printError("Predicted image has multiple channels.", user_fault=True)
# resize for evaluation
predictionImg = predictionImg.resize((640, 480), Image.NEAREST)
predictionNp = np.array(predictionImg)
groundTruthImg = groundTruthImg.resize((640, 480), Image.NEAREST)
groundTruthNp = np.array(groundTruthImg)
imgWidth = predictionImg.size[0]
imgHeight = predictionImg.size[1]
nbPixels = imgWidth*imgHeight
# Evaluate images
if (CSUPPORT):
# using cython
confMatrix = addToConfusionMatrix.cEvaluatePair(predictionNp, groundTruthNp, confMatrix, VALID_CLASS_IDS.tolist())
else:
# the slower python way
for (groundTruthImgPixel,predictionImgPixel) in izip(groundTruthImg.getdata(),predictionImg.getdata()):
if (not groundTruthImgPixel in VALID_CLASS_IDS):
printError("Unknown label with id {:}".format(groundTruthImgPixel))
confMatrix[groundTruthImgPixel][predictionImgPixel] += 1
return nbPixels
# The main method
def main():
pred_files = os.listdir(opt.pred_path)
gt_files = []
if len(pred_files) == 0:
printError("No result files found.", user_fault=True)
for i in range(len(pred_files)):
gt_file = os.path.join(opt.gt_path, pred_files[i])
if not os.path.isfile(gt_file):
printError("Result file {} does not match any gt file".format(pred_files[i]), user_fault=True)
gt_files.append(gt_file)
pred_files[i] = os.path.join(opt.pred_path, pred_files[i])
# evaluate
evaluateImgLists(pred_files, gt_files, opt.output_file)
return
# call the main method
if __name__ == "__main__":
main()
| StarcoderdataPython |
57225 | import os
import pytest
from io import StringIO
pytest.register_assert_rewrite('tests.common')
@pytest.fixture
def content():
def _reader(filename):
with open(filename) as f:
return f.read()
return _reader
@pytest.fixture
def expected(request):
filename = os.path.splitext(request.module.__file__)[0]
filename += '.' + request.function.__name__ + '.exp'
with open(filename) as f:
return f.read()
@pytest.fixture
def rpath(request):
def _path_resolver(filename):
path = os.path.join(
os.path.dirname(request.module.__file__),
filename,
)
return os.path.relpath(
path,
os.path.join(os.path.dirname(__file__), '..'),
)
return _path_resolver
@pytest.fixture
def stringio():
return StringIO()
class _StringIOTTY(StringIO):
def isatty(self):
return True
@pytest.fixture
def stringio_tty():
return _StringIOTTY()
| StarcoderdataPython |
11381425 | <reponame>gaybro8777/osf.io
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-03-06 16:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import osf.utils.datetime_aware_jsonfield
class Migration(migrations.Migration):
dependencies = [
('osf', '0155_merge_20190115_1437'),
]
operations = [
migrations.CreateModel(
name='ChronosJournal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('name', models.TextField()),
('title', models.TextField()),
('journal_id', models.TextField(unique=True)),
('raw_response', osf.utils.datetime_aware_jsonfield.DateTimeAwareJSONField(encoder=osf.utils.datetime_aware_jsonfield.DateTimeAwareJSONEncoder)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ChronosSubmission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('publication_id', models.TextField(unique=True)),
('status', models.IntegerField(blank=True, choices=[(1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5')], default=None, null=True)),
('raw_response', osf.utils.datetime_aware_jsonfield.DateTimeAwareJSONField(encoder=osf.utils.datetime_aware_jsonfield.DateTimeAwareJSONEncoder)),
('submission_url', models.TextField()),
('journal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='osf.ChronosJournal')),
('preprint', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='osf.Preprint')),
],
),
migrations.AddField(
model_name='osfuser',
name='chronos_user_id',
field=models.TextField(blank=True, db_index=True, null=True),
),
migrations.AddField(
model_name='chronossubmission',
name='submitter',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='chronossubmission',
unique_together=set([('preprint', 'journal')]),
),
]
| StarcoderdataPython |
373535 | from shaape.overlayparser import OverlayParser
from shaape.node import Node
from shaape.opengraph import OpenGraph
from shaape.polygon import Polygon
import nose
import unittest
from nose.tools import *
class TestOverlayParser(unittest.TestCase):
def test_init(self):
parser = OverlayParser()
assert parser != None
def test_cycle_len(self):
parser = OverlayParser()
cycle = [Node(0, 0), Node(4, 0), Node(4, 2), Node(0, 2), Node(0, 0)]
assert parser.cycle_len(cycle) == 12
def test_run(self):
parser = OverlayParser()
parser.run("",[])
parser.run("-",[])
assert len(parser.drawable_objects()) == 1, parser.drawable_objects()
assert type(parser.drawable_objects()[0]) == OpenGraph
parser.run("- -",[])
assert len(parser.drawable_objects()) == 2
assert type(parser.drawable_objects()[0]) == OpenGraph
assert type(parser.drawable_objects()[1]) == OpenGraph
parser.run(["++","++"],[])
assert len(parser.drawable_objects()) == 1, "got " + str(len(parser.drawable_objects())) + " drawable objects " + str(parser.drawable_objects())
assert len([ o for o in parser.drawable_objects() if type(o) == Polygon]) == 1
parser.run(["+--+", "| ++", "| ++", "+--+"],[])
assert len(parser.drawable_objects()) == 2, "got " + str(len(parser.drawable_objects())) + " drawable objects "
assert len([ o for o in parser.drawable_objects() if type(o) == Polygon]) == 2
| StarcoderdataPython |
9635754 | # Copyright (c) IBM Corp. 2018. All Rights Reserved.
# Project name: Constrained Exploration and Recovery from Experience Shaping
# This project is licensed under the MIT License, see LICENSE
from baselines import logger
import baselines.common.tf_util as U
import numpy as np
import time
from mpi4py import MPI
import gym
def update_constraint_activation_probability(env, extra_args, logger, is_direct_policy, do_train_cnet,
activation_probability_before, activation_probability_after):
'''
Update environment constraint activation probability using constraint accuracy before or after training
'''
activation_probability = extra_args.constant_constraint_activation
if len(extra_args.adaptive_constraint_activation) > 0:
do_use_prior_accuracy_as_activation_probability = 'prior' in extra_args.adaptive_constraint_activation
if do_use_prior_accuracy_as_activation_probability or (not do_train_cnet):
activation_probability = activation_probability_before
else:
activation_probability = activation_probability_after
if (not is_direct_policy) and extra_args.unconstrained_recovery:
activation_probability = 0.
if activation_probability is not None:
logger.log('Set constraint activation probability to {0:.1f} %'.format(activation_probability * 100.))
env.unwrapped.set_constraint_activation_probability(activation_probability)
def check_time_between_backups(extra_args, last_backup_time=None):
'''
Only write backups every min_time_between_backups
'''
time_now = time.time()
if last_backup_time is not None:
time_since_last = time_now - last_backup_time
do_save_backup = time_since_last > extra_args.min_time_between_backups
else:
do_save_backup = True
if do_save_backup:
last_backup_time = time_now
return do_save_backup, last_backup_time
def build_policy_observation_filter(extra_args, ob_space):
'''
If extra_args.policy_observation_filter is a string of the form "1:3:6", only provide the policy with observations number 1, 3 and 6
'''
if len(extra_args.policy_observation_filter) == 0:
observation_filter = lambda ob: ob
ob_space_filtered = ob_space
else:
indices = [int(_v) for _v in extra_args.policy_observation_filter.split(':')]
observation_filter = lambda ob: np.array([ob[_i] for _i in indices], dtype=ob.dtype)
low_filtered = observation_filter(ob_space.low)
high_filtered = observation_filter(ob_space.high)
ob_space_filtered = gym.spaces.Box(low=low_filtered, high=high_filtered, dtype=ob_space.dtype)
return ob_space_filtered, observation_filter
def build_mpi_vars(extra_args):
'''
Initialize process indices across direct and recovery agents
'''
mpi_comm = MPI.COMM_WORLD
mpi_rank = mpi_comm.Get_rank()
is_direct_policy = mpi_rank < extra_args.n_direct
mpi_root_direct = 0
mpi_group_direct = list(range(extra_args.n_direct))
mpi_root_recovery = extra_args.n_direct
mpi_group_recovery = list(range(extra_args.n_direct, extra_args.n_direct + extra_args.n_recovery))
if is_direct_policy:
mpi_root = mpi_root_direct
mpi_group = mpi_group_direct
else:
mpi_root = mpi_root_recovery
mpi_group = mpi_group_recovery
mpi_destinations = [_e for _e in mpi_group if _e != mpi_root]
mpi_n_processes = extra_args.n_direct + extra_args.n_recovery
is_root = mpi_rank == mpi_root
if extra_args.n_recovery > 0:
# Correspondences between direct and recovery agents for CNet data exchange
cnet_exchange_ids = {_i: [] for _i in mpi_group_direct + mpi_group_recovery}
for _i in range(max(len(mpi_group_direct), len(mpi_group_recovery))):
_i_direct = mpi_group_direct[_i % len(mpi_group_direct)]
_i_recovery = mpi_group_recovery[_i % len(mpi_group_recovery)]
if not (_i_recovery in cnet_exchange_ids[_i_direct]):
cnet_exchange_ids[_i_direct].append(_i_recovery)
if not (_i_direct in cnet_exchange_ids[_i_recovery]):
cnet_exchange_ids[_i_recovery].append(_i_direct)
# Also get the index of each recovery process within those associated to the corresponding direct process (re-read this several times)
cnet_recovery_id_in_direct_exchange_ids = {_i: {} for _i in mpi_group_recovery}
for _i_recovery in mpi_group_recovery:
for _i_direct in cnet_exchange_ids[_i_recovery]:
cnet_recovery_id_in_direct_exchange_ids[_i_recovery][_i_direct] = cnet_exchange_ids[_i_direct].index(_i_recovery)
n_exchange_processes = len(cnet_exchange_ids[mpi_rank])
else:
cnet_exchange_ids = None
cnet_recovery_id_in_direct_exchange_ids = None
n_exchange_processes = None
return mpi_comm, mpi_rank, is_direct_policy, mpi_root, mpi_group, mpi_destinations, mpi_n_processes, is_root, cnet_recovery_id_in_direct_exchange_ids, cnet_exchange_ids, n_exchange_processes
def save_models_and_data(extra_args, iters_so_far, end_training, last_backup_time,
is_root, mpi_rank, pi, cnet, constraint_demonstration_buffer):
'''
Save policy network, constraint network and constraint demonstration buffer
'''
do_save_at_all = extra_args.backup_frequency > 0
do_save_this_iter = (((iters_so_far - 1) % extra_args.backup_frequency) == 0) or end_training
do_save_this_time, last_backup_time = check_time_between_backups(extra_args, last_backup_time)
do_save_policy = not extra_args.only_train_constraints
do_save_constraints = not extra_args.only_train_policy
do_save_buffer = not (extra_args.only_train_policy or extra_args.only_train_constraints)
if do_save_at_all and do_save_this_iter and do_save_this_time:
if do_save_policy and is_root:
# save direct and recovery policies separatery
pi.save_model(global_step=(iters_so_far-1), verbose=True)
if do_save_constraints and (mpi_rank == 0):
# same CNet for all agents
cnet.save_model(global_step=(iters_so_far-1), verbose=True)
if do_save_buffer:
# different buffers for all agents
constraint_demonstration_buffer.write(verbose=is_root)
return last_backup_time
| StarcoderdataPython |
11247792 | <reponame>medunigraz/outpost.django.attendance
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2018-07-14 19:59
from __future__ import unicode_literals
from django.db import migrations, models
from ...base.fields import ChoiceArrayField
class Migration(migrations.Migration):
dependencies = [("attendance", "0005_auto_20171002_1450")]
operations = [
migrations.AddField(
model_name="terminal",
name="behaviour",
field=ChoiceArrayField(
base_field=models.CharField(max_length=256), default=list, size=None
),
)
]
| StarcoderdataPython |
3529534 | CORRECT_OPTION_TAG = "correct_option"
INCORRECT_OPTION_TAG = "incorrect_option"
CORRECT_OPTION_GOLD_TAG = "gold"
CORRECT_OPTION_TAG_LIST = [CORRECT_OPTION_TAG, CORRECT_OPTION_GOLD_TAG]
ALL_OPTION_TAG_LIST = [
CORRECT_OPTION_TAG,
CORRECT_OPTION_GOLD_TAG,
INCORRECT_OPTION_TAG,
]
| StarcoderdataPython |
1795679 | """
seqgra evaluator hierarchy - gradient-based feature importance evaluators (FIE)
Classes:
- :class:`~seqgra.evaluator.gradientbased.gradientbasedevaluator.GradientBasedEvaluator`: abstract class for all gradient-based feature importance evaluators
- :class:`~seqgra.evaluator.gradientbased.abstractdifferencegradientevaluator.AbstractDifferenceGradientEvaluator`: abstract class for difference-based gradient-based feature importance evaluators
- :class:`~seqgra.evaluator.gradientbased.abstractgradientevaluator.AbstractGradientEvaluator`: abstract class for baseline-based gradient-based feature importance evaluators
- :class:`~seqgra.evaluator.gradientbased.contrastiveexcitationbackpropevaluator.ContrastiveExcitationBackpropEvaluator`: contrastive excitation backprop FIE
- :class:`~seqgra.evaluator.gradientbased.deconvevaluator.DeconvEvaluator`: deconvolution FIE
- :class:`~seqgra.evaluator.gradientbased.deepliftevaluator.DeepLiftEvaluator`: DeepLIFT FIE
- :class:`~seqgra.evaluator.gradientbased.differencegradientevaluator.DifferenceGradientEvaluator`: difference gradient FIE
- :class:`~seqgra.evaluator.gradientbased.excitationbackpropevaluator.ExcitationBackpropEvaluator`: excitation backprop FIE
- :class:`~seqgra.evaluator.gradientbased.feedbackevaluator.FeedbackEvaluator`: feedback FIE
- :class:`~seqgra.evaluator.gradientbased.gradcamgradientevaluator.GradCamGradientEvaluator`: GradCAM FIE
- :class:`~seqgra.evaluator.gradientbased.gradientevaluator.DifferenceGradientEvaluator`: difference gradient FIE
- :class:`~seqgra.evaluator.gradientbased.gradientxinputevaluator.GradientxInputEvaluator`: gradient times input FIE
- :class:`~seqgra.evaluator.gradientbased.guidedbackpropevaluator.GuidedBackpropEvaluator`: guided backprop FIE
- :class:`~seqgra.evaluator.gradientbased.integratedgradientevaluator.IntegratedGradientEvaluator`: Integrated Gradients FIE
- :class:`~seqgra.evaluator.gradientbased.nonlinearintegratedgradientevaluator.NonlinearIntegratedGradientEvaluator`: nonlinear Integrated Gradients FIE
- :class:`~seqgra.evaluator.gradientbased.saliencyevaluator.SaliencyEvaluator`: absolute gradient (saliency) FIE
- :class:`~seqgra.evaluator.gradientbased.smoothgradevaluator.SmoothGradEvaluator`: smooth grad FIE
"""
from seqgra.evaluator.gradientbased.gradientbasedevaluator import GradientBasedEvaluator
from seqgra.evaluator.gradientbased.abstractdifferencegradientevaluator import AbstractDifferenceGradientEvaluator
from seqgra.evaluator.gradientbased.abstractgradientevaluator import AbstractGradientEvaluator
from seqgra.evaluator.gradientbased.contrastiveexcitationbackpropevaluator import ContrastiveExcitationBackpropEvaluator
from seqgra.evaluator.gradientbased.deconvevaluator import DeconvEvaluator
from seqgra.evaluator.gradientbased.deepliftevaluator import DeepLiftEvaluator
from seqgra.evaluator.gradientbased.differencegradientevaluator import DifferenceGradientEvaluator
from seqgra.evaluator.gradientbased.excitationbackpropevaluator import ExcitationBackpropEvaluator
from seqgra.evaluator.gradientbased.feedbackevaluator import FeedbackEvaluator
from seqgra.evaluator.gradientbased.gradcamgradientevaluator import GradCamGradientEvaluator
from seqgra.evaluator.gradientbased.gradientevaluator import GradientEvaluator
from seqgra.evaluator.gradientbased.gradientxinputevaluator import GradientxInputEvaluator
from seqgra.evaluator.gradientbased.guidedbackpropevaluator import GuidedBackpropEvaluator
from seqgra.evaluator.gradientbased.integratedgradientevaluator import IntegratedGradientEvaluator
from seqgra.evaluator.gradientbased.nonlinearintegratedgradientevaluator import NonlinearIntegratedGradientEvaluator
from seqgra.evaluator.gradientbased.saliencyevaluator import SaliencyEvaluator
from seqgra.evaluator.gradientbased.smoothgradevaluator import SmoothGradEvaluator
| StarcoderdataPython |
3417097 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-01-21 13:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0003_auto_20190121_1307'),
]
operations = [
migrations.AlterField(
model_name='talk',
name='gdpr_consent',
field=models.BooleanField(default=False, help_text='<a href="2019/about/privacy/">Privacy policy</a> and <a href="2019/about/code/">Code of Conduct</a>', verbose_name='I have read and agree to the PyCon CZ Privacy Policy and Code of Conduct'),
),
migrations.AlterField(
model_name='workshop',
name='gdpr_consent',
field=models.BooleanField(default=False, help_text='<a href="2019/about/privacy/">Privacy policy</a> and <a href="2019/about/code/">Code of Conduct</a>', verbose_name='I have read and agree to the PyCon CZ Privacy Policy and Code of Conduct'),
),
]
| StarcoderdataPython |
4870868 | # -*- coding: utf-8 -*-
import os
import time
import unittest
from configparser import ConfigParser
import inspect
import copy
import requests as _requests
from unittest.mock import patch
from AbstractHandle.AbstractHandleImpl import AbstractHandle
from AbstractHandle.AbstractHandleServer import MethodContext
from AbstractHandle.authclient import KBaseAuth as _KBaseAuth
from AbstractHandle.Utils.MongoUtil import MongoUtil
from AbstractHandle.Utils.Handler import Handler
from installed_clients.WorkspaceClient import Workspace
from mongo_util import MongoHelper
class handle_serviceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.token = os.environ.get('KB_AUTH_TOKEN', None)
config_file = os.environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('AbstractHandle'):
cls.cfg[nameval[0]] = nameval[1]
cls.cfg['admin-token'] = cls.token
# Getting username from Auth profile for token
authServiceUrl = cls.cfg['auth-service-url']
auth_client = _KBaseAuth(authServiceUrl)
cls.user_id = auth_client.get_user(cls.token)
cls.shock_url = cls.cfg['shock-url']
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': cls.token,
'user_id': cls.user_id,
'provenance': [
{'service': 'AbstractHandle',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = Workspace(cls.wsURL)
cls.serviceImpl = AbstractHandle(cls.cfg)
cls.scratch = cls.cfg['scratch']
cls.callback_url = os.environ['SDK_CALLBACK_URL']
cls.mongo_helper = MongoHelper()
cls.my_client = cls.mongo_helper.create_test_db(db=cls.cfg['mongo-database'],
col=cls.cfg['mongo-collection'])
cls.mongo_util = MongoUtil(cls.cfg)
cls.shock_ids_to_delete = list()
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
if hasattr(cls, 'shock_ids_to_delete'):
print('Nodes to delete: {}'.format(cls.shock_ids_to_delete))
cls.deleteShockID(cls.shock_ids_to_delete)
@classmethod
def deleteShockID(cls, shock_ids):
headers = {'Authorization': 'OAuth {}'.format(cls.token)}
for shock_id in shock_ids:
end_point = os.path.join(cls.shock_url, 'node', shock_id)
resp = _requests.delete(end_point, headers=headers, allow_redirects=True)
if resp.status_code != 200:
print('Cannot detele shock node ' + shock_id)
else:
print('Deleted shock node ' + shock_id)
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_AbstractHandle_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName}) # noqa
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
def createTestNode(self):
headers = {'Authorization': 'OAuth {}'.format(self.token)}
end_point = os.path.join(self.shock_url, 'node')
resp = _requests.post(end_point, headers=headers)
if resp.status_code != 200:
raise ValueError('Grant user readable access failed.\nError Code: {}\n{}\n'
.format(resp.status_code, resp.text))
else:
shock_id = resp.json().get('data').get('id')
self.shock_ids_to_delete.append(shock_id)
return shock_id
def start_test(self):
testname = inspect.stack()[1][3]
print('\n*** starting test: ' + testname + ' **')
def test_fetch_handles_by_ok(self):
self.start_test()
handler = self.getImpl()
# test query 'hid' field
elements = [68021, 68022]
field_name = 'hid'
handles = handler.fetch_handles_by(self.ctx, {'elements': elements, 'field_name': field_name})[0]
self.assertEqual(len(handles), 2)
self.assertCountEqual(elements, [h.get('hid') for h in handles])
# test query 'hid' field with empty data
elements = [0]
field_name = 'hid'
handles = handler.fetch_handles_by(self.ctx, {'elements': elements, 'field_name': field_name})[0]
self.assertEqual(len(handles), 0)
# test query 'id' field
elements = ['b753774f-0bbd-4b96-9202-89b0c70bf31c']
field_name = 'id'
handles = handler.fetch_handles_by(self.ctx, {'elements': elements, 'field_name': field_name})[0]
self.assertEqual(len(handles), 1)
handle = handles[0]
self.assertFalse('_id' in handle)
self.assertEqual(handle.get('hid'), 67712)
def test_ids_to_handles_ok(self):
self.start_test()
handler = self.getImpl()
ids = ['b753774f-0bbd-4b96-9202-89b0c70bf31c']
handles = handler.ids_to_handles(self.ctx, ids)[0]
self.assertEqual(len(handles), 1)
handle = handles[0]
self.assertFalse('_id' in handle)
self.assertEqual(handle.get('hid'), 67712)
def test_hids_to_handles_ok(self):
self.start_test()
handler = self.getImpl()
hids = [68021, 68022]
handles = handler.hids_to_handles(self.ctx, hids)[0]
self.assertEqual(len(handles), 2)
self.assertCountEqual(hids, [h.get('hid') for h in handles])
def test_persist_handle_ok(self):
self.start_test()
handler = self.getImpl()
handle = {'id': 'id',
'file_name': 'file_name',
'type': 'shock',
'url': 'http://ci.kbase.us:7044/'}
# testing persist_handle with non-existing handle (inserting a handle)
hid = handler.persist_handle(self.ctx, handle)[0]
handles = handler.fetch_handles_by(self.ctx, {'elements': [hid], 'field_name': 'hid'})[0]
self.assertEqual(len(handles), 1)
handle = handles[0]
self.assertEqual(handle.get('hid'), hid)
self.assertEqual(handle.get('id'), 'id')
self.assertEqual(handle.get('file_name'), 'file_name')
self.assertEqual(handle.get('created_by'), self.user_id)
# testing persist_handle with existing handle (updating a handle)
new_handle = copy.deepcopy(handle)
new_file_name = 'new_file_name'
new_id = 'new_id'
new_handle['file_name'] = new_file_name
new_handle['id'] = new_id
new_hid = handler.persist_handle(self.ctx, new_handle)[0]
handles = handler.fetch_handles_by(self.ctx, {'elements': [new_hid], 'field_name': 'hid'})[0]
self.assertEqual(len(handles), 1)
handle = handles[0]
self.assertEqual(handle.get('hid'), new_hid)
self.assertEqual(handle.get('id'), new_id)
self.assertEqual(handle.get('file_name'), new_file_name)
self.assertEqual(handle.get('created_by'), self.user_id)
self.assertEqual(new_hid, hid)
self.mongo_util.delete_one(handle)
def test_delete_handles_ok(self):
self.start_test()
handler = self.getImpl()
handles = [{'id': 'id',
'file_name': 'file_name',
'type': 'shock',
'url': 'http://ci.kbase.us:7044/'}] * 2
hids_to_delete = list()
for handle in handles:
hid = handler.persist_handle(self.ctx, handle)[0]
hids_to_delete.append(hid)
handles_to_delete = handler.fetch_handles_by(self.ctx, {'elements': hids_to_delete, 'field_name': 'hid'})[0]
delete_count = handler.delete_handles(self.ctx, handles_to_delete)[0]
self.assertEqual(delete_count, len(hids_to_delete))
def test_is_owner_ok(self):
self.start_test()
handler = self.getImpl()
hids = list()
node_id = self.createTestNode()
handle = {'id': node_id,
'file_name': 'file_name',
'type': 'shock',
'url': 'https://ci.kbase.us/services/shock-api'}
hid = handler.persist_handle(self.ctx, handle)[0]
hids.append(hid)
node_id2 = self.createTestNode()
handle = {'id': node_id2,
'file_name': 'file_name',
'type': 'shock',
'url': 'https://ci.kbase.us/services/shock-api'}
hid = handler.persist_handle(self.ctx, handle)[0]
hids.append(hid)
is_owner = handler.is_owner(self.ctx, hids)[0]
self.assertTrue(is_owner)
new_handles = handler.fetch_handles_by(self.ctx, {'elements': hids, 'field_name': 'hid'})[0]
for handle in new_handles:
self.mongo_util.delete_one(handle)
def test_are_is_readable_ok(self):
self.start_test()
handler = self.getImpl()
hids = list()
node_id = self.createTestNode()
handle = {'id': node_id,
'file_name': 'file_name',
'type': 'shock',
'url': 'https://ci.kbase.us/services/shock-api'}
hid = handler.persist_handle(self.ctx, handle)[0]
hids.append(hid)
node_id = self.createTestNode()
handle = {'id': node_id,
'file_name': 'file_name',
'type': 'shock',
'url': 'https://ci.kbase.us/services/shock-api'}
hid = handler.persist_handle(self.ctx, handle)[0]
hids.append(hid)
are_readable = handler.are_readable(self.ctx, hids)[0]
self.assertTrue(are_readable)
is_readable = handler.is_readable(self.ctx, hids[0])[0]
self.assertTrue(is_readable)
new_handles = handler.fetch_handles_by(self.ctx, {'elements': hids, 'field_name': 'hid'})[0]
for handle in new_handles:
self.mongo_util.delete_one(handle)
@patch.object(Handler, "_is_admin_user", return_value=True)
def test_add_read_acl_ok(self, _is_admin_user):
self.start_test()
handler = self.getImpl()
node_id = self.createTestNode()
hids = list()
handle = {'id': node_id,
'file_name': 'file_name',
'type': 'shock',
'url': 'https://ci.kbase.us/services/shock-api'}
hid = handler.persist_handle(self.ctx, handle)[0]
hids.append(hid)
headers = {'Authorization': 'OAuth {}'.format(self.token)}
end_point = os.path.join(self.shock_url, 'node', node_id, 'acl/?verbosity=full')
resp = _requests.get(end_point, headers=headers)
data = resp.json()
# no public access at the beginning
self.assertFalse(data.get('data').get('public').get('read'))
# only token user has read access
users = [user.get('username') for user in data.get('data').get('read')]
self.assertCountEqual(users, [self.user_id])
# grant public read access
succeed = handler.set_public_read(self.ctx, hids)[0]
self.assertTrue(succeed)
resp = _requests.get(end_point, headers=headers)
data = resp.json()
self.assertTrue(data.get('data').get('public').get('read'))
# should work for already publicly accessable ndoes
succeed = handler.set_public_read(self.ctx, hids)[0]
self.assertTrue(succeed)
resp = _requests.get(end_point, headers=headers)
data = resp.json()
self.assertTrue(data.get('data').get('public').get('read'))
# test grant access to user who already has read access
succeed = handler.add_read_acl(self.ctx, hids, username=self.user_id)[0]
self.assertTrue(succeed)
resp = _requests.get(end_point, headers=headers)
data = resp.json()
new_users = [user.get('username') for user in data.get('data').get('read')]
self.assertCountEqual(new_users, [self.user_id])
# grant access to tgu3
new_user = 'tgu3'
succeed = handler.add_read_acl(self.ctx, hids, username=new_user)[0]
self.assertTrue(succeed)
resp = _requests.get(end_point, headers=headers)
data = resp.json()
new_users = [user.get('username') for user in data.get('data').get('read')]
self.assertCountEqual(new_users, [self.user_id, new_user])
handles_to_delete = handler.fetch_handles_by(self.ctx, {'elements': hids, 'field_name': 'hid'})[0]
delete_count = handler.delete_handles(self.ctx, handles_to_delete)[0]
self.assertEqual(delete_count, len(hids))
| StarcoderdataPython |
8088706 | <filename>beanie/odm/settings/document.py<gh_stars>0
import warnings
from typing import Optional, Type, List
from motor.motor_asyncio import AsyncIOMotorDatabase
from pydantic import Field
from pymongo import IndexModel
from beanie.exceptions import MongoDBVersionError
from beanie.odm.settings.base import ItemSettings
from beanie.odm.settings.timeseries import TimeSeriesConfig
class IndexModelField(IndexModel):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
if isinstance(v, IndexModel):
return v
else:
return IndexModel(v)
class DocumentSettings(ItemSettings):
use_state_management: bool = False
state_management_replace_objects: bool = False
validate_on_save: bool = False
use_revision: bool = False
indexes: List[IndexModelField] = Field(default_factory=list)
timeseries: Optional[TimeSeriesConfig] = None
@classmethod
async def init(
cls,
database: AsyncIOMotorDatabase,
document_model: Type,
allow_index_dropping: bool,
) -> "DocumentSettings":
settings_class = getattr(document_model, "Settings", None)
settings_vars = (
{} if settings_class is None else dict(vars(settings_class))
)
# deprecated Collection class support
collection_class = getattr(document_model, "Collection", None)
if collection_class is not None:
warnings.warn(
"Collection inner class is deprecated, use Settings instead",
DeprecationWarning,
)
collection_vars = (
{} if collection_class is None else dict(vars(collection_class))
)
settings_vars.update(collection_vars)
# ------------------------------------ #
document_settings = DocumentSettings.parse_obj(settings_vars)
document_settings.motor_db = database
# register in the Union Doc
if document_settings.union_doc is not None:
document_settings.name = document_settings.union_doc.register_doc(
document_model
)
# set a name
if not document_settings.name:
document_settings.name = document_model.__name__
# check mongodb version
build_info = await database.command({"buildInfo": 1})
mongo_version = build_info["version"]
major_version = int(mongo_version.split(".")[0])
if document_settings.timeseries is not None and major_version < 5:
raise MongoDBVersionError(
"Timeseries are supported by MongoDB version 5 and higher"
)
# create motor collection
if (
document_settings.timeseries is not None
and document_settings.name
not in await database.list_collection_names()
):
collection = await database.create_collection(
**document_settings.timeseries.build_query(
document_settings.name
)
)
else:
collection = database[document_settings.name]
document_settings.motor_collection = collection
# indexes
old_indexes = (await collection.index_information()).keys()
new_indexes = ["_id_"]
# Indexed field wrapped with Indexed()
found_indexes = [
IndexModel(
[
(
fvalue.alias,
fvalue.type_._indexed[0],
)
],
**fvalue.type_._indexed[1]
)
for _, fvalue in document_model.__fields__.items()
if hasattr(fvalue.type_, "_indexed") and fvalue.type_._indexed
]
# get indexes from the Collection class
if document_settings.indexes:
found_indexes += document_settings.indexes
# create indices
if found_indexes:
new_indexes += await collection.create_indexes(found_indexes)
# delete indexes
# Only drop indexes if the user specifically allows for it
if allow_index_dropping:
for index in set(old_indexes) - set(new_indexes):
await collection.drop_index(index)
return document_settings
class Config:
arbitrary_types_allowed = True
| StarcoderdataPython |
5036366 | <reponame>dmcinerney/Summarization
import torch
from torch import nn
from torch.nn import functional as F
from models.beam_search import beam_search
from models.submodules import LSTMTextEncoder, CombineContext, LSTMSummaryDecoder, ContextVectorNN, VocabularyDistributionNN, ProbabilityNN
from models.model_helpers import GeneratedSummary, GeneratedSummaryHypothesis, PointerInfo, trim_text
import parameters as p
# Outline:
# a) Summarizer
# b) Encoder
# c) Decoder
class Summarizer(nn.Module):
def __init__(self, vectorizer, start_index, end_index, num_hidden=None, attn_hidden=None, with_coverage=False, gamma=1, with_pointer=False, encoder_base=LSTMTextEncoder, decoder_base=LSTMSummaryDecoder, decoder_parallel_base=None):
super(Summarizer, self).__init__()
self.vectorizer = vectorizer
self.start_index = start_index
self.end_index = end_index
self.num_hidden = vectorizer.vector_size//2 if num_hidden is None else num_hidden
self.attn_hidden = attn_hidden
self.with_coverage = with_coverage
self.gamma = gamma
self.with_pointer = with_pointer
self.encoder_base = encoder_base
self.decoder_base = decoder_base
self.decoder_parallel_base = decoder_parallel_base
self.init_submodules()
def init_submodules(self):
decoder_class = Decoder if not self.with_pointer else PointerGenDecoder
self.encoder = Encoder(self.vectorizer, self.num_hidden, encoder_base=self.encoder_base)
self.decoder = decoder_class(self.vectorizer, self.start_index, self.end_index, self.num_hidden, attn_hidden=self.attn_hidden, with_coverage=self.with_coverage, gamma=self.gamma, decoder_base=self.decoder_base, decoder_parallel_base=self.decoder_parallel_base)
def forward(self, text, text_length, text_oov_indices=None, summary=None, summary_length=None, beam_size=1, store=None):
text, text_length = trim_text(text, text_length, p.MAX_TEXT_LENGTH)
if summary is not None:
summary, summary_length = trim_text(summary, summary_length, p.MAX_SUMMARY_LENGTH)
text_states, state = self.encoder(text, text_length, store=store)
if self.with_pointer:
self.decoder.set_pointer_info(PointerInfo(text, text_oov_indices))
return self.decoder(text_states, text_length, state, summary=summary, summary_length=summary_length, beam_size=beam_size)
class Encoder(nn.Module):
def __init__(self, vectorizer, num_hidden, encoder_base=LSTMTextEncoder):
super(Encoder, self).__init__()
self.vectorizer = vectorizer
num_features = self.vectorizer.vector_size
self.text_encoder = encoder_base(num_features, num_hidden*2)
def forward(self, text, text_length, store=None):
# get batch with vectors from index batch
# text = [self.vectorizer.get_text_matrix(example[:text_length[i]], text.size(1))[0].unsqueeze(0) for i,example in enumerate(text)]
# text = torch.cat(text, 0)
text = self.vectorizer(text, text_length)
# run text through lstm encoder
text_states, state = self.text_encoder(text, text_length, store=store)
return text_states, state
class Decoder(nn.Module):
def __init__(self, vectorizer, start_index, end_index, num_hidden, attn_hidden=None, with_coverage=False, gamma=1, decoder_base=LSTMSummaryDecoder, decoder_parallel_base=None):
super(Decoder, self).__init__()
self.vectorizer = vectorizer
self.start_index = start_index
self.end_index = end_index
self.num_hidden = num_hidden
self.attn_hidden = num_hidden//2 if attn_hidden is None else attn_hidden
self.with_coverage = with_coverage
self.gamma = gamma
self.num_features = self.vectorizer.vector_size
self.num_vocab = self.vectorizer.vocab_size
self.decoder_base = decoder_base
self.decoder_parallel_base = decoder_parallel_base
self.init_submodules()
def init_submodules(self):
self.combine_context = CombineContext(self.num_features, self.num_hidden*2)
self.summary_decoder = self.decoder_base(self.num_features, self.num_hidden*2)
if self.decoder_parallel_base is not None:
# self.summary_decoder_parallel = self.decoder_parallel_base(self.summary_decoder)
raise NotImplementedError("Parallel base optimized mode is still under construction!")
self.context_nn = ContextVectorNN(self.num_hidden*4+1, self.attn_hidden)
self.vocab_nn = VocabularyDistributionNN(self.num_hidden*4, self.num_hidden, self.num_vocab+1)
def forward(self, text_states, text_length, state, summary=None, summary_length=None, beam_size=1):
if summary is None:
return self.decode_generate(text_states, text_length, state, beam_size=beam_size)
else:
return self.decode_train(text_states, text_length, state, summary, summary_length)
# return self.decode_train_optimized(text_states, text_length, state, summary, summary_length)
def decode_generate(self, text_states, text_length, state, beam_size=1):
# initialize
batch_length = text_states.size(0)
device = text_states.device
generated_summary = GeneratedSummary(batch_length, device, self.start_index, self.end_index)
coverage = torch.zeros((batch_length, text_states.size(1)), device=device)
context_vector = torch.zeros((batch_length, text_states.size(2)), device=device)
hypothesis = GeneratedSummaryHypothesis(self, generated_summary, text_states, text_length, state, coverage, context_vector)
summary_hyps = beam_search(hypothesis.next_hypotheses(beam_size), beam_size)
results = [summary_hyp.generated_summary.return_info() for summary_hyp in summary_hyps]
for r in results:
indices = r[0]
self.map_generated_indices_(indices)
return results
# implements the forward pass of the decoder for training
# this uses teacher forcing, but conceivably one could try
# and should add other algorithms for training
def decode_train(self, text_states, text_length, state, summary, summary_length):
# initialize
batch_length = text_states.size(0)
device = text_states.device
context_vector = torch.zeros((batch_length, text_states.size(2)), device=device)
coverage = torch.zeros((batch_length, text_states.size(1)), device=device)
loss_unnormalized = torch.zeros(batch_length, device=device)
summary_tp1 = summary[:,0]
for t in range(summary.size(1)-1):
# set timestep words
summary_t = summary_tp1
# get indices of instances that are not finished
valid_indices = torch.nonzero((summary_length-t-1) > 0)[:,0]
# take a time step
vocab_dist, state, attention, context_vector = self.timestep_wrapper(valid_indices, summary_t, text_states, text_length, state, coverage, context_vector)
# get next time step words
summary_tp1 = summary[:,t+1]
# calculate log prob, calculate covloss if aplicable, update coverage if aplicable
summary_tp1_valid = summary_tp1[valid_indices]
self.map_input_indices_(summary_tp1_valid)
log_prob = torch.zeros(batch_length, device=device)
log_prob[valid_indices] = self.calculate_log_prob(vocab_dist, summary_tp1_valid)
if self.with_coverage:
covloss = torch.zeros(batch_length, device=device)
covloss[valid_indices] = self.calculate_covloss(coverage[valid_indices], attention[valid_indices])
coverage += attention
# update unnormalized loss
loss_unnormalized += -log_prob + self.gamma*(covloss if self.with_coverage else 0)
return dict(loss=(loss_unnormalized/(summary_length.float()-1)))
# def decode_train_optimized(self, text_states, text_length, state, summary, summary_length):
# outputs, context_vectors, attentions, coverages = self.parallelized_pass(text_states, text_length, state, summary[:,:-1])
# targets = summary[:,1:]
# b, s_d = targets.size()
# losses = []
# for t in range(s_d):
# vocab_dist = self.vocab_nn(context_vectors[:,t], outputs[:,t])
# target = targets[:,t]
# self.map_input_indices_(target)
# log_prob = self.calculate_log_prob(vocab_dist, target)
# loss = -log_prob + self.gamma*(self.calculate_covloss(coverages[:,t], attentions[:,t]) if self.with_coverage else 0)
# losses.append(loss.unsqueeze(1))
# losses = torch.cat(losses, 1)
# mask = torch.arange(s_d, device=summary_length.device, dtype=torch.long).unsqueeze(0) < (summary_length.unsqueeze(1)-1)
# loss_unnormalized = (losses*mask.float()).sum(1)
# return dict(loss=(loss_unnormalized/(summary_length.float()-1)))
# def parallelized_pass(self, text_states, text_length, state, summary):
# # pass through the vectorizer
# s_e = text_states.size(1)
# b, s_d = summary.size()
# summary = self.vectorizer(summary, torch.ones(b, dtype=torch.long)*s_d)
# # pass through the decoder base (can only be parallelized when given a decoder parallel base)
# if self.decoder_parallel_base is not None:
# outputs, state = self.summary_decoder_parallel(summary, state)
# else:
# outputs = []
# for t in range(s_d):
# output, state = self.summary_decoder(summary[:,t], state)
# outputs.append(output.unsqueeze(1))
# outputs = torch.cat(outputs, 1)
# # pass through the attention (can only be parallelized when not with coverage)
# coverage = torch.zeros((b, s_e), device=summary.device)
# if not self.with_coverage:
# context_vectors, attentions = self.context_nn(text_states, text_length, outputs, coverage)
# coverages = coverage.unsqueeze(1).expand(b, s_d, s_e)
# else:
# context_vectors, attentions, coverages = [], [], []
# for t in range(s_d):
# context_vector, attention = self.context_nn(text_states, text_length, outputs[:,t].unsqueeze(1), coverage)
# context_vectors.append(context_vector)
# attentions.append(attention)
# coverages.append(coverage.unsqueeze(1))
# coverage = coverage + attention.squeeze(1)
# context_vectors, attentions, coverages = torch.cat(context_vectors, 1), torch.cat(attentions, 1), torch.cat(coverages, 1)
# return outputs, context_vectors, attentions, coverages
# this timestep calls timestep forward and converts the inputs to and from just the valid batch examples
# of those inputs at that time step
def timestep_wrapper(self, valid_indices, summary_t, text_states, text_length, prev_state, coverage, prev_context_vector):
# create tensors for returned values that need to have first dim of size batch_size
attention = torch.zeros_like(coverage, device=coverage.device)
context_vector = torch.zeros_like(prev_context_vector, device=prev_context_vector.device)
# NOTE: vocab_dist is returned with a first dim size of valid_indices.size(0)
# because we never need it to be of the full batch size
# do forward pass
vocab_dist, _, state_temp, attention[valid_indices], context_vector[valid_indices] = self.timestep(summary_t[valid_indices], text_states[valid_indices], text_length[valid_indices], prev_state[valid_indices], coverage[valid_indices], prev_context_vector[valid_indices])
# create new state of full batch size (need to do this afterwards because it could be variable length
# so need to get new sizes from state_temp)
state = torch.zeros((prev_state.size(0), *state_temp.shape[1:]), device=state_temp.device)
state[valid_indices] = state_temp
# vocab_dist = vocab_dist + p.EPSILON
# vocab_dist = vocab_dist/vocab_dist.sum(1, keepdim=True)
return vocab_dist, state, attention, context_vector
# runs the inputs for a time step through the neural nets to get the vocab distribution for that timestep
# and other necessary information: inputs to the next hidden state in the decoder, attention, and the context vector
# (the context vector is only needed in the subclass of this so kinda bad style but whatever)
def timestep(self, summary_t, text_states, text_length, prev_state, coverage, prev_context_vector):
# summary_vec_t = self.vectorizer.get_text_matrix(summary_t, len(summary_t))[0]
summary_vec_t = self.vectorizer(summary_t.view(1,-1), torch.tensor([len(summary_t)], device=summary_t.device))[0]
summary_vec_t_mod = self.combine_context(summary_vec_t, prev_context_vector)
output, state = self.summary_decoder(summary_vec_t_mod, prev_state)
context_vector, attention = self.context_nn(text_states, text_length, output.unsqueeze(1), coverage)
context_vector, attention = context_vector[:,0], attention[:,0]
vocab_dist = self.vocab_nn(context_vector, output)
return vocab_dist, output, state, attention, context_vector
# calulates the log probability of the summary at a time step given the vocab distribution for that time step
def calculate_log_prob(self, vocab_dist, summary_tp1):
return torch.log(vocab_dist[torch.arange(summary_tp1.size(0)).long(),summary_tp1.long()])
# calculates the coverage loss for each batch example at a time step
def calculate_covloss(self, coverage, attention):
return torch.min(torch.cat((coverage.unsqueeze(0), attention.unsqueeze(0)), 0), 0)[0].sum(1)
# map oov indices maps the indices of oov words to a specific index corresponding to the position in the vocab distribution
# that represents an oov word
def map_input_indices_(self, indices):
indices[indices == -1] = self.vectorizer.vocab_size
def map_generated_indices_(self, indices):
indices[indices == self.vectorizer.vocab_size] = -1
# this adds any extra information you may want to add to a summary
def get_extras(self):
return ()
# This model subclasses the generator model so that on each forward timestep, it averages the generator vocab distribution
# with a pointer distribution obtained from the attention distribution and these are weighted by p_gen and 1-p_gen respectively
# where p_gen is the probability of generating vs copying
class PointerGenDecoder(Decoder):
def __init__(self, *args, **kwargs):
super(PointerGenDecoder, self).__init__(*args, **kwargs)
self.pointer_info = None
def init_submodules(self):
super(PointerGenDecoder, self).init_submodules()
self.probability_layer = ProbabilityNN(self.num_hidden*4)
def set_pointer_info(self, pointer_info):
self.pointer_info = pointer_info
# this is a little bit of a hacky solution, setting the pointer info as an object attribute temporarily
def forward(self, *args, **kwargs):
return_values = super(PointerGenDecoder, self).forward(*args, **kwargs)
self.pointer_info = None
return return_values
# def decode_train_optimized(self, text_states, text_length, state, summary, summary_length):
# outputs, context_vectors, attentions, coverages = self.parallelized_pass(text_states, text_length, state, summary[:,:-1])
# targets = summary[:,1:]
# b, s_d = targets.size()
# losses = []
# for t in range(s_d):
# vocab_dist = self.vocab_nn(context_vectors[:,t], outputs[:,t])
# final_vocab_dist = self.timestep_addon(vocab_dist, outputs[:,t], attentions[:,t], context_vectors[:,t])
# target = targets[:,t]
# self.map_input_indices_(target)
# log_prob = self.calculate_log_prob(final_vocab_dist, target)
# loss = -log_prob + self.gamma*(self.calculate_covloss(coverages[:,t], attentions[:,t]) if self.with_coverage else 0)
# losses.append(loss.unsqueeze(1))
# losses = torch.cat(losses, 1)
# mask = torch.arange(s_d, device=summary_length.device, dtype=torch.long).unsqueeze(0) < (summary_length.unsqueeze(1)-1)
# loss_unnormalized = (losses*mask.float()).sum(1)
# return dict(loss=(loss_unnormalized/(summary_length.float()-1)))
def timestep_wrapper(self, valid_indices, summary_t, text_states, text_length, prev_state, coverage, prev_context_vector):
self.pointer_info.update_valid_indices(valid_indices)
return super(PointerGenDecoder, self).timestep_wrapper(valid_indices, summary_t, text_states, text_length, prev_state, coverage, prev_context_vector)
def timestep(self, summary_t, text_states, text_length, prev_state, coverage, prev_context_vector):
if self.pointer_info is None:
raise Exception
# execute the normal timestep_forward function
vocab_dist, output, state, attention, context_vector = super(PointerGenDecoder, self).timestep(summary_t, text_states, text_length, prev_state, coverage, prev_context_vector)
final_vocab_dist = self.timestep_addon(vocab_dist, output, attention, context_vector)
return final_vocab_dist, output, state, attention, context_vector
def timestep_addon(self, vocab_dist, output, attention, context_vector):
# get probability of generating vs copying
if p.P_GEN is None:
p_gen = self.probability_layer(context_vector, output)
else:
p_gen = torch.zeros((context_vector.size(0),1), device=context_vector.device) + p.P_GEN
self.pointer_info.update_p_gen(p_gen)
# get text
# get unique word indices
# and the maximum number of oov words in the batch
text = self.pointer_info.get_text()
word_indices = self.pointer_info.word_indices
max_num_oov = self.pointer_info.max_num_oov
new_vocab_size = (vocab_dist.size(0),vocab_dist.size(1)+max_num_oov)
# create distrubtion over vocab using attention
# NOTE: for the same words in the text, the attention probability is summed from those indices
# indicator of size (# of unique words, batch size, seq length) such that
# element (i, j, k) indicates whether unique word i is in batch example j at sequence position k
indicator = text.expand(word_indices.size(0),*text.size()) == word_indices.view(-1,1,1).expand(-1,*text.size())
# attention_indicator of size (# of unique words, batch size, seq length) such that
# element (i, j, k) is the attention of batch example j at sequence position k if that word is unique word i else 0
attention_indicator = torch.zeros(indicator.size(), device=indicator.device)
attention_indicator[indicator] = attention.unsqueeze(0).expand(*indicator.size())[indicator]
# sums up attention along each batch for each unique word
# resulting in a matrix of size (batch size, # of unique words) where
# element (i, j) expresses the probability mass in batch example i on unique word j
# Note that the attention on a word after the sequence ends is 0 so we do not need to worry when we sum
word_probabilities = torch.transpose(attention_indicator.sum(-1), 0, 1)
add_at_indices = word_indices.expand(text.size(0),word_indices.size(0)).long()
add_at_indices[add_at_indices < 0] += new_vocab_size[1].long()
# attain mixture of the distributions according to p_gen
# pad vocab distribution
vocab_dist_probs = F.pad(vocab_dist, (0,max_num_oov))
# get indices to add at
final_vocab_dist = (p_gen*vocab_dist_probs).scatter_add(1, add_at_indices, (1-p_gen)*word_probabilities)
return final_vocab_dist
# this changes it so that only words that don't appear in the text and the static vocab are mapped to the oov index
# used to get indices for computing loss
# Note: DEPENDENT ON VALID INDICES IN POINTER_INFO
def map_input_indices_(self, indices):
# set oov not in text to oov index
indices[indices < -self.pointer_info.max_num_oov] = self.vectorizer.vocab_size
oov_places = torch.nonzero(indices < 0)
if oov_places.dim() > 1:
batch_indices, oov_indices = oov_places[:,0], -1-indices[oov_places[:,0]]
holes = self.pointer_info.get_oov_holes()
indices[batch_indices[holes[batch_indices, oov_indices.long()].byte()]] = self.vectorizer.vocab_size
def map_generated_indices_(self, indices):
indices[indices >= self.vectorizer.vocab_size] -= (self.vectorizer.vocab_size+1+self.pointer_info.max_num_oov).cpu().numpy()
# used to get indices to return after generating summary
# Note: DEPENDENT ON CURRENT_P_GEN IN POINTER_INFO
def get_extras(self):
return (self.pointer_info.current_p_gen,)
| StarcoderdataPython |
11229620 | <filename>api/exceptions.py<gh_stars>0
# TODO
class DatasourceBaseError(Exception):
pass
class DatasourceBadObjectError(DatasourceBaseError):
pass
class DatasourceObjectParseError(DatasourceBaseError):
pass
class DatasourceBadParameterTypeError(DatasourceBaseError):
"""Raised when a parameter or field is set with improper type."""
pass
| StarcoderdataPython |
152847 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'RouterStatusResult',
'AwaitableRouterStatusResult',
'router_status',
'router_status_output',
]
@pulumi.output_type
class RouterStatusResult:
"""
A collection of values returned by RouterStatus.
"""
def __init__(__self__, best_routes=None, best_routes_for_routers=None, id=None, name=None, network=None, project=None, region=None):
if best_routes and not isinstance(best_routes, list):
raise TypeError("Expected argument 'best_routes' to be a list")
pulumi.set(__self__, "best_routes", best_routes)
if best_routes_for_routers and not isinstance(best_routes_for_routers, list):
raise TypeError("Expected argument 'best_routes_for_routers' to be a list")
pulumi.set(__self__, "best_routes_for_routers", best_routes_for_routers)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network and not isinstance(network, str):
raise TypeError("Expected argument 'network' to be a str")
pulumi.set(__self__, "network", network)
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
pulumi.set(__self__, "project", project)
if region and not isinstance(region, str):
raise TypeError("Expected argument 'region' to be a str")
pulumi.set(__self__, "region", region)
@property
@pulumi.getter(name="bestRoutes")
def best_routes(self) -> Sequence['outputs.RouterStatusBestRouteResult']:
return pulumi.get(self, "best_routes")
@property
@pulumi.getter(name="bestRoutesForRouters")
def best_routes_for_routers(self) -> Sequence['outputs.RouterStatusBestRoutesForRouterResult']:
return pulumi.get(self, "best_routes_for_routers")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def network(self) -> str:
"""
The network name or resource link to the parent
network of this subnetwork.
"""
return pulumi.get(self, "network")
@property
@pulumi.getter
def project(self) -> Optional[str]:
return pulumi.get(self, "project")
@property
@pulumi.getter
def region(self) -> str:
return pulumi.get(self, "region")
class AwaitableRouterStatusResult(RouterStatusResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return RouterStatusResult(
best_routes=self.best_routes,
best_routes_for_routers=self.best_routes_for_routers,
id=self.id,
name=self.name,
network=self.network,
project=self.project,
region=self.region)
def router_status(name: Optional[str] = None,
project: Optional[str] = None,
region: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableRouterStatusResult:
"""
Get a Cloud Router's status within GCE from its name and region. This data source exposes the
routes learned by a Cloud Router via BGP peers.
For more information see [the official documentation](https://cloud.google.com/network-connectivity/docs/router/how-to/viewing-router-details)
and
[API](https://cloud.google.com/compute/docs/reference/rest/v1/routers/getRouterStatus).
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_router = gcp.compute.router_status(name="myrouter")
```
:param str name: The name of the router.
:param str project: The ID of the project in which the resource
belongs. If it is not provided, the provider project is used.
:param str region: The region this router has been created in. If
unspecified, this defaults to the region configured in the provider.
"""
__args__ = dict()
__args__['name'] = name
__args__['project'] = project
__args__['region'] = region
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:compute/routerStatus:RouterStatus', __args__, opts=opts, typ=RouterStatusResult).value
return AwaitableRouterStatusResult(
best_routes=__ret__.best_routes,
best_routes_for_routers=__ret__.best_routes_for_routers,
id=__ret__.id,
name=__ret__.name,
network=__ret__.network,
project=__ret__.project,
region=__ret__.region)
@_utilities.lift_output_func(router_status)
def router_status_output(name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
region: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[RouterStatusResult]:
"""
Get a Cloud Router's status within GCE from its name and region. This data source exposes the
routes learned by a Cloud Router via BGP peers.
For more information see [the official documentation](https://cloud.google.com/network-connectivity/docs/router/how-to/viewing-router-details)
and
[API](https://cloud.google.com/compute/docs/reference/rest/v1/routers/getRouterStatus).
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_router = gcp.compute.router_status(name="myrouter")
```
:param str name: The name of the router.
:param str project: The ID of the project in which the resource
belongs. If it is not provided, the provider project is used.
:param str region: The region this router has been created in. If
unspecified, this defaults to the region configured in the provider.
"""
...
| StarcoderdataPython |
11212155 |
COMMANDS = ['version']
HELP = {
'version': 'Print the framework version'
}
def execute(**kargs):
env = kargs.get("env")
showLogo = kargs.get("showLogo")
# If we've shown the logo, then the version has already been printed
if not showLogo:
print(env["version"])
| StarcoderdataPython |
1635552 | <reponame>MichalKyjovsky/NPRG065_Programing_in_Python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections
class OrderedAttributes(type):
"""
Classes created from this metaclass will remember the order in which the attributes are created
"""
@classmethod
def __prepare__(metacls, name, bases, **kwds):
"""
The __prepare__() method is executed prior to the creation of the class.
It creates the initial namespace object into which the definitions will be added.
"""
return collections.OrderedDict()
def __new__(cls, name, bases, namespace, **kwds):
result = super().__new__(cls, name, bases, namespace)
result._order = tuple(n for n in namespace if not n.startswith('__'))
return result
class Something(metaclass=OrderedAttributes):
this = 'text'
def z(self):
return False
b = 'order is preserved'
a = 'more text'
print(Something._order)
# We want to convert between different length units (inches, feet, meters,...).
# A possible simple solution is to define a "standard unit" and then conversion between
# the standard unit and all other units (another much more complex solution would be to
# a full matrix of conversions between all combinations of units).
# Thus, we create the Unit class, that has a reference to the standard unit and factor, how to
# convert between the unit and the std one.
class Unit:
"""Full name for the unit."""
factor = 1.0
standard = None # Reference to the appropriate StandardUnit
name = "" # Abbreviation of the unit's name.
@classmethod
def value(class_, value):
if value is None:
return None
return value / class_.factor
@classmethod
def convert(class_, value):
if value is None:
return None
return value * class_.factor
# Then we will inherit all units from the unit.
# The issue here is that in the standard unit we need to refer to
# itself, but in the definition it is not possible, i.e.,
#
# class INCH(Unit):
# standard = INCH
#
# is incorrect definition.
#
# We can overcome it as follows but it is ugly.
#
# class INCH:
# pass
# INCH.standard = INCH
#
# Nicer solution is via metaclass where we can make the reference
# Note that we have to use the metaclass. It is not possible to use __new__ in the class StandardUnit because
# that one is called only when the class is instantiated and affects only the instance, but we don't instantiate
# INCH anywhere in the code and instead we e require the class INCH to already have the "standard" attribute set to INCH
class UnitMeta(type):
def __new__(cls, name, bases, dict):
new_class = super().__new__(cls, name, bases, dict)
new_class.standard = new_class
return new_class
class StandardUnit(Unit, metaclass=UnitMeta):
pass
class INCH(StandardUnit):
"""Inches"""
name = "in"
class FOOT(Unit):
"""Feet"""
name = "ft"
standard = INCH
factor = 1/12
class CENTIMETER(Unit):
"""Centimeters"""
name = "cm"
standard = INCH
factor = 2.54
class METER( Unit ):
"""Meters"""
name = "m"
standard = INCH
factor = .0254
x = INCH.value(1)
print(FOOT.convert(x))
print(CENTIMETER.convert(x))
print(METER.convert(x))
print(INCH.convert(x))
one_meter = METER.value(1)
print(INCH.convert(one_meter))
print(INCH.standard.__name__)
print(FOOT.standard.__name__)
| StarcoderdataPython |
3297118 | from pythonds.basic import Stack
def postfixEval(postfixExpr):
operandStack = Stack()
tokenList = postfixExpr.split()
for token in tokenList:
if token in "0123456789":
operandStack.push(int(token))
else:
operand2 = operandStack.pop()
operand1 = operandStack.pop()
result = doMath(token,operand1,operand2)
operandStack.push(result)
return operandStack.pop()
def doMath(op, op1, op2):
if op == "*":
return op1 * op2
elif op == "/":
return op1 / op2
elif op == "+":
return op1 + op2
else:
return op1 - op2
| StarcoderdataPython |
4968717 | <filename>reeds/tests/pipeline_test/PNMT_WATER_PIPELINE/c_job_eoff_estm.py
#!/usr/bin/env python3
import os, sys, glob
from reeds.modules import do_RE_EDS_eoffEstimation as eoffEstm
sys.path.append(os.getcwd())
from global_definitions import fM, bash
from global_definitions import name, root_dir
from global_definitions import gromosXX_bin, gromosPP_bin, ene_ana_lib
from global_definitions import in_top_file, in_pert_file, in_disres_file, in_template_reeds_imd
#STEP specifics
out_eoff_dir = root_dir+"/TEST_c_eoff_TEST"
next_lowerBound_dir = root_dir+"/input/1_next_eoff"
in_name = name+"_energy_offsets"
##make folder
out_eoff_dir = bash.make_folder(out_eoff_dir)
#In- Files
topology = fM.Topology(top_path=in_top_file, disres_path=in_disres_file, pertubation_path=in_pert_file)
coords =glob.glob(next_lowerBound_dir+"/*.cnf")
system = fM.System(coordinates=coords, name=in_name, top=topology)
print(system)
last_jobID = eoffEstm.do(out_root_dir=out_eoff_dir, in_simSystem=system,
in_template_imd_path=in_template_reeds_imd, in_ene_ana_lib=ene_ana_lib, duration_per_job="04:00")
| StarcoderdataPython |
11227325 | # Generated by Django 2.1.1 on 2018-10-16 11:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='recomendacao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=50)),
],
),
migrations.RemoveField(
model_name='post',
name='id',
),
migrations.AlterField(
model_name='post',
name='idClick',
field=models.CharField(max_length=250, primary_key=True, serialize=False),
),
]
| StarcoderdataPython |
4937053 | <gh_stars>10-100
from abc import ABC, abstractmethod
from argparse import Namespace
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import pytorch_lightning as pl
import torch
import torch.nn as nn
from loguru import logger
from omegaconf import DictConfig
from slp.config.omegaconf import OmegaConf
from slp.util.pytorch import pad_mask, subsequent_mask
from slp.util.system import print_separator
from slp.util.types import Configuration, LossType
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler
class _Predictor(ABC):
"""Base predictor class
Define an interface that can be used to extend the lightning module to new tasks and models
* parse_batch: Parse input batch and extract necessery masks etc.
* get_predictions_and_targets: Perform a forward pass through the model to get the logits and return logits and targets
"""
@abstractmethod
def parse_batch(self, batch: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]:
"""Abstract parse_batch method to be implemented by child class
Args:
batch (Tuple[torch.Tensor, ...]): A tuple of tensors that contains inputs to the model and targets
Returns:
Tuple[torch.Tensor, ...]: The processed inputs, ready to provide to the model
"""
raise NotImplementedError
@abstractmethod
def get_predictions_and_targets(
self, model: nn.Module, batch: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Abstract get_predictions and targets method to be implemented by child class
This method gets exposed to the PLModule classes
Args:
model (nn.Module): model to use for forward pass
batch (Tuple[torch.Tensor, torch.Tensor]): A tuple of tensors that contains inputs to the model and targets
**Note**: Maybe it should be useful to move loss calculation here. Then multitask learning and auxiliary losses should be easier
Returns:
Tuple[torch.Tensor, torch.Tensor]: (logits, ground_truths), ready to be passed to the loss function
"""
raise NotImplementedError
class _Classification(_Predictor):
"""Classification task"""
def parse_batch(self, batch: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]:
"""Parse incoming batch
Input batch just contains inputs and targets
Args:
batch (Tuple[torch.Tensor, torch.Tensor]): (inputs, labels)
Returns:
Tuple[torch.Tensor, torch.Tensor]: (inputs, labels)
"""
inputs = batch[0]
targets = batch[1]
return inputs, targets
def get_predictions_and_targets(
self, model: nn.Module, batch: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Return logits and ground truths to be passed in loss function
Args:
model (nn.Module): Model to use for prediction
batch (Tuple[torch.Tensor, torch.Tensor]): (inputs, labels)
Returns:
Tuple[torch.Tensor, torch.Tensor]: (logits, labels)
"""
inputs, targets = self.parse_batch(batch)
y_pred = model(inputs)
return y_pred.squeeze(), targets.squeeze()
class _AutoEncoder(_Predictor):
"""Autoencoder task"""
def parse_batch(self, batch: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]:
"""Parse incoming batch
Input batch just contains inputs. Targets are the same as inputs, because we are doing reconstruction.
Args:
batch (Tuple[torch.Tensor]): (inputs)
Returns:
Tuple[torch.Tensor, torch.Tensor]: (inputs, inputs)
"""
inputs = batch[0]
return inputs, inputs
def get_predictions_and_targets(
self, model: nn.Module, batch: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Return logits and ground truths to be passed in loss function
Args:
model (nn.Module): Model to use for prediction
batch (Tuple[torch.Tensor, torch.Tensor]): (inputs, inputs)
Returns:
Tuple[torch.Tensor, torch.Tensor]: (logits, inputs)
"""
inputs, targets = self.parse_batch(batch)
y_pred = model(inputs)
return y_pred.view(y_pred.size(0), -1), targets.view(targets.size(0), -1)
class _RnnClassification(_Predictor):
"""RNN classification task"""
def parse_batch(self, batch: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]:
"""Parse incoming batch
Input batch just contains inputs, targets and lengths.
Comes from slp.data.collators.SequentialCollator.
Args:
batch (Tuple[torch.Tensor]): (inputs)
Returns:
Tuple[torch.Tensor, torch.Tensor]: (inputs, inputs)
"""
inputs = batch[0]
targets = batch[1]
lengths = batch[2]
return inputs, targets, lengths
def get_predictions_and_targets(
self, model: nn.Module, batch: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Return logits and ground truths to be passed in loss function
Args:
model (nn.Module): Model to use for prediction
batch (Tuple[torch.Tensor, torch.Tensor]): (inputs, inputs)
Returns:
Tuple[torch.Tensor, torch.Tensor]: (logits, inputs)
"""
inputs, targets, lengths = self.parse_batch(batch)
y_pred = model(inputs, lengths)
return y_pred.squeeze(), targets.squeeze()
class _TransformerClassification(_Predictor):
"""Transformer classification task"""
def parse_batch(self, batch: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]:
"""Parse incoming batch
Input batch just contains inputs, targets and lengths.
Comes from slp.data.collators.SequentialCollator.
Create pad masks to be passed to transformer attention
Args:
batch (Tuple[torch.Tensor]): (inputs)
Returns:
Tuple[torch.Tensor, torch.Tensor]: (inputs, inputs)
"""
inputs = batch[0]
targets = batch[1]
lengths = batch[2]
attention_mask = pad_mask(lengths, max_length=inputs.size(1))
return inputs, targets, attention_mask
def get_predictions_and_targets(
self, model: nn.Module, batch: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Return logits and ground truths to be passed in loss function
Args:
model (nn.Module): Model to use for prediction
batch (Tuple[torch.Tensor, torch.Tensor]): (inputs, inputs)
Returns:
Tuple[torch.Tensor, torch.Tensor]: (logits, inputs)
"""
inputs, targets, attention_mask = self.parse_batch(batch)
y_pred = model(inputs, attention_mask=attention_mask)
return y_pred.squeeze(), targets.squeeze()
class _MultimodalTransformerClassification(_Predictor):
"""Transformer classification task"""
def parse_batch(self, batch):
"""Parse incoming batch
Input batch just contains inputs, targets and lengths.
Comes from slp.data.collators.SequentialCollator.
Create pad masks to be passed to transformer attention
Args:
batch (Tuple[Dict[str, torch.Tensor], torch.Tensor, Dict[str, torch.Tensor]]): (inputs, targets, lengths)
Returns:
Tuple[Dict[str, torch.Tensor], torch.Tensor, Dict[str, torch.Tensor]]: (inputs, targets, attention_masks)
"""
inputs = batch[0]
targets = batch[1]
lengths = batch[2]
attention_masks = {
m: pad_mask(lengths[m], max_length=inputs[m].size(1))
for m in lengths.keys()
}
return inputs, targets, attention_masks
def get_predictions_and_targets(
self, model: nn.Module, batch: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Return logits and ground truths to be passed in loss function
Args:
model (nn.Module): Model to use for prediction
batch (Tuple[Dict[str, torch.Tensor], torch.Tensor, Dict[str, torch.Tensor]]): (inputs, targets, lengths)
Returns:
Tuple[torch.Tensor, torch.Tensor]: (logits, targets)
"""
inputs, targets, attention_masks = self.parse_batch(batch)
y_pred = model(inputs, attention_masks=attention_masks)
return y_pred.squeeze(), targets.squeeze()
class _Transformer(_Predictor):
"""Generic transformer seq2seq task"""
def parse_batch(self, batch: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]:
"""Parse incoming batch
Input batch just contains inputs, targets and lengths.
Comes from slp.data.collators.SequentialCollator.
Create pad masks and subsequent_masks to be passed to transformer attention
Args:
batch (Tuple[torch.Tensor]): (inputs)
Returns:
Tuple[torch.Tensor, torch.Tensor]: (inputs, targets)
"""
inputs = batch[0]
targets = batch[1]
lengths_inputs = batch[2]
lengths_targets = batch[3]
pad_inputs = pad_mask(
lengths_inputs,
max_length=inputs.size(1),
)
pad_targets = pad_mask(
lengths_targets,
max_length=targets.size(1),
)
sub_m = subsequent_mask(targets.size(1)) # type: ignore
pad_targets = pad_targets.unsqueeze(-2) * sub_m.to(pad_targets.device)
return inputs, targets, pad_inputs, pad_targets
def get_predictions_and_targets(
self, model: nn.Module, batch: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Return logits and ground truths to be passed in loss function
Args:
model (nn.Module): Model to use for prediction
batch (Tuple[torch.Tensor, torch.Tensor]): (inputs, inputs)
Returns:
Tuple[torch.Tensor, torch.Tensor]: (logits, inputs)
"""
inputs, targets, source_mask, target_mask = self.parse_batch(batch)
y_pred = model(
inputs, targets, source_mask=source_mask, target_mask=target_mask
)
y_pred = y_pred.view(-1, y_pred.size(-1))
targets = targets.view(-1)
return y_pred, targets
class _BertSequenceClassification(_Predictor):
"""Bert Classification task"""
def parse_batch(self, batch: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]:
"""Parse incoming batch
Input batch just contains inputs, targets and lengths.
Comes from slp.data.collators.SequentialCollator.
Create pad masks to be passed to BERT attention
Args:
batch (Tuple[torch.Tensor]): (inputs)
Returns:
Tuple[torch.Tensor, torch.Tensor]: (inputs, inputs)
"""
inputs = batch[0]
targets = batch[1]
lengths = batch[2]
attention_mask = pad_mask(lengths, inputs.size(1))
return inputs, targets, attention_mask
def get_predictions_and_targets(
self, model: nn.Module, batch: Tuple[torch.Tensor, ...]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Return logits and ground truths to be passed in loss function
Args:
model (nn.Module): Model to use for prediction
batch (Tuple[torch.Tensor, torch.Tensor]): (inputs, inputs)
Returns:
Tuple[torch.Tensor, torch.Tensor]: (logits, inputs)
"""
inputs, targets, attention_mask = self.parse_batch(batch)
out = model(
input_ids=inputs,
attention_mask=attention_mask,
labels=None,
return_dict=False,
)
y_pred = out[0].view(-1, out[0].size(-1))
targets = targets.view(-1)
return y_pred.squeeze(), targets.squeeze()
class SimplePLModule(pl.LightningModule):
def __init__(
self,
model: nn.Module,
optimizer: Union[Optimizer, List[Optimizer]],
criterion: LossType,
lr_scheduler: Union[_LRScheduler, List[_LRScheduler]] = None,
hparams: Configuration = None,
metrics: Optional[Dict[str, pl.metrics.Metric]] = None,
predictor_cls=_Classification,
calculate_perplexity: bool = False, # for LM. Dirty but much more efficient
):
"""Wraps a (model, optimizer, criterion, lr_scheduler) tuple in a LightningModule
Handles the boilerplate for metrics calculation and logging and defines the train_step / val_step / test_step
with use of the predictor helper classes (e.g. _Classification, _RnnClassification)
Args:
model (nn.Module): Module to use for prediction
optimizer (Union[Optimizer, List[Optimizer]]): Optimizers to use for training
criterion (LossType): Task loss
lr_scheduler (Union[_LRScheduler, List[_LRScheduler]], optional): Learning rate scheduler. Defaults to None.
hparams (Configuration, optional): Hyperparameter values. This ensures they are logged with trainer.loggers. Defaults to None.
metrics (Optional[Dict[str, pl.metrics.Metric]], optional): Metrics to track. Defaults to None.
predictor_cls ([type], optional): Class that defines a parse_batch and a
get_predictions_and_targets method. Defaults to _Classification.
calculate_perplexity (bool, optional): Whether to calculate perplexity.
Would be cleaner as a metric, but this is more efficient. Defaults to False.
"""
super(SimplePLModule, self).__init__()
self.calculate_perplexity = calculate_perplexity
self.model = model
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.criterion = criterion
if metrics is not None:
self.train_metrics = nn.ModuleDict(metrics)
self.val_metrics = nn.ModuleDict({k: v.clone() for k, v in metrics.items()})
self.test_metrics = nn.ModuleDict(
{k: v.clone() for k, v in metrics.items()}
)
else:
self.train_metrics = nn.ModuleDict(modules=None)
self.val_metrics = nn.ModuleDict(modules=None)
self.test_metrics = nn.ModuleDict(modules=None)
self.predictor = predictor_cls()
if hparams is not None:
if isinstance(hparams, Namespace):
dict_params = vars(hparams)
elif isinstance(hparams, DictConfig):
dict_params = cast(Dict[str, Any], OmegaConf.to_container(hparams))
else:
dict_params = hparams
# self.hparams = dict_params
self.save_hyperparameters(dict_params)
def configure_optimizers(self):
"""Return optimizers and learning rate schedulers
Returns:
Tuple[List[Optimizer], List[_LRScheduler]]: (optimizers, lr_schedulers)
"""
if self.lr_scheduler is not None:
scheduler = {
"scheduler": self.lr_scheduler,
"interval": "epoch",
"monitor": "val_loss",
}
return [self.optimizer], [scheduler]
return self.optimizer
def forward(self, *args, **kwargs):
"""Call wrapped module forward"""
return self.model(*args, **kwargs)
def _compute_metrics(self, metrics, loss, y_hat, targets, mode="train"):
"""Compute all metrics and aggregate in a dict
Args:
metrics (Dict[str, pl.metrics.Metric]): metrics to compute
loss (torch.Tensor): Computed loss
y_hat (torch.Tensor): Logits
targets (torch.Tensor): Ground Truths
mode (str, optional): "train", "val" or "test". Defaults to "train".
"""
def fmt(name):
"""Format metric name"""
return f"{mode}_{name}"
metrics = {f"{mode}_{k}": v(y_hat, targets) for k, v in metrics.items()}
if mode == "train":
metrics["loss"] = loss
else:
metrics[fmt("loss")] = loss
if self.calculate_perplexity:
metrics[fmt("ppl")] = torch.exp(loss)
return metrics
def log_to_console(self, metrics, mode="Training"):
"""Log metrics to console
Args:
metrics (Dict[str, torch.Tensor]): Computed metrics
mode (str, optional): "Training", "Validation" or "Testing". Defaults to "Training".
"""
logger.info("Epoch {} {} results".format(self.current_epoch + 1, mode))
print_separator(symbol="-", n=50, print_fn=logger.info)
for name, value in metrics.items():
if name == "epoch":
continue
logger.info("{:<15} {:<15}".format(name, value))
print_separator(symbol="%", n=50, print_fn=logger.info)
def aggregate_epoch_metrics(self, outputs, mode="Training"):
"""Aggregate metrics over a whole epoch
Args:
outputs (List[Dict[str, torch.Tensor]]): Aggregated outputs from train_step, validation_step or test_step
mode (str, optional): "Training", "Validation" or "Testing". Defaults to "Training".
"""
def fmt(name):
"""Format metric name"""
return f"{name}" if name != "loss" else "train_loss"
keys = list(outputs[0].keys())
aggregated = {fmt(k): torch.stack([x[k] for x in outputs]).mean() for k in keys}
aggregated["epoch"] = self.current_epoch + 1
self.log_dict(aggregated, logger=True, prog_bar=False, on_epoch=True)
return aggregated
def training_step(self, batch, batch_idx):
"""Compute loss for a single training step and log metrics to loggers
Args:
batch (Tuple[torch.Tensor, ...]): Input batch
batch_idx (int): Index of batch
Returns:
Dict[str, torch.Tensor]: computed metrics
"""
y_hat, targets = self.predictor.get_predictions_and_targets(self.model, batch)
loss = self.criterion(y_hat, targets)
metrics = self._compute_metrics(
self.train_metrics, loss, y_hat, targets, mode="train"
)
self.log_dict(
metrics,
on_step=True,
on_epoch=False,
logger=True,
prog_bar=False,
)
metrics["loss"] = loss
return metrics
def training_epoch_end(self, outputs):
"""Aggregate metrics of a training epoch
Args:
outputs (List[Dict[str, torch.Tensor]]): Aggregated outputs from train_step
"""
outputs = self.aggregate_epoch_metrics(outputs, mode="Training")
self.log_to_console(outputs, mode="Training")
def validation_step(self, batch, batch_idx):
"""Compute loss for a single validation step and log metrics to loggers
Args:
batch (Tuple[torch.Tensor, ...]): Input batch
batch_idx (int): Index of batch
Returns:
Dict[str, torch.Tensor]: computed metrics
"""
y_hat, targets = self.predictor.get_predictions_and_targets(self, batch)
loss = self.criterion(y_hat, targets)
metrics = self._compute_metrics(
self.val_metrics, loss, y_hat, targets, mode="val"
)
metrics[
"best_score"
] = self.trainer.early_stopping_callback.best_score.detach().cpu()
return metrics
def validation_epoch_end(self, outputs):
"""Aggregate metrics of a validation epoch
Args:
outputs (List[Dict[str, torch.Tensor]]): Aggregated outputs from validation_step
"""
outputs = self.aggregate_epoch_metrics(outputs, mode="Validation")
if torch.isnan(outputs["val_loss"]) or torch.isinf(outputs["val_loss"]):
outputs["val_loss"] = 1000000
outputs["best_score"] = min(
outputs[self.trainer.early_stopping_callback.monitor].detach().cpu(),
self.trainer.early_stopping_callback.best_score.detach().cpu(),
)
self.log_to_console(outputs, mode="Validation")
def test_step(self, batch, batch_idx):
"""Compute loss for a single test step and log metrics to loggers
Args:
batch (Tuple[torch.Tensor, ...]): Input batch
batch_idx (int): Index of batch
Returns:
Dict[str, torch.Tensor]: computed metrics
"""
y_hat, targets = self.predictor.get_predictions_and_targets(self, batch)
loss = self.criterion(y_hat, targets)
metrics = self._compute_metrics(
self.test_metrics, loss, y_hat, targets, mode="test"
)
return metrics
def test_epoch_end(self, outputs):
"""Aggregate metrics of a test epoch
Args:
outputs (List[Dict[str, torch.Tensor]]): Aggregated outputs from test_step
"""
outputs = self.aggregate_epoch_metrics(outputs, mode="Test")
self.log_to_console(outputs, mode="Test")
class PLModule(SimplePLModule):
def __init__(
self,
model: nn.Module,
optimizer: Union[Optimizer, List[Optimizer]],
criterion: LossType,
lr_scheduler: Union[_LRScheduler, List[_LRScheduler]] = None,
hparams: Configuration = None,
metrics: Optional[Dict[str, pl.metrics.Metric]] = None,
calculate_perplexity=False,
):
"""Pass arguments through to base class"""
super(PLModule, self).__init__(
model,
optimizer,
criterion,
predictor_cls=_Classification,
lr_scheduler=lr_scheduler,
hparams=hparams,
metrics=metrics,
calculate_perplexity=calculate_perplexity,
)
class AutoEncoderPLModule(SimplePLModule):
def __init__(
self,
model: nn.Module,
optimizer: Union[Optimizer, List[Optimizer]],
criterion: LossType,
lr_scheduler: Union[_LRScheduler, List[_LRScheduler]] = None,
hparams: Configuration = None,
metrics: Optional[Dict[str, pl.metrics.Metric]] = None,
calculate_perplexity=False,
):
"""Pass arguments through to base class"""
super(AutoEncoderPLModule, self).__init__(
model,
optimizer,
criterion,
predictor_cls=_AutoEncoder,
lr_scheduler=lr_scheduler,
hparams=hparams,
metrics=metrics,
calculate_perplexity=calculate_perplexity,
)
class RnnPLModule(SimplePLModule):
def __init__(
self,
model: nn.Module,
optimizer: Union[Optimizer, List[Optimizer]],
criterion: LossType,
lr_scheduler: Union[_LRScheduler, List[_LRScheduler]] = None,
hparams: Configuration = None,
metrics: Optional[Dict[str, pl.metrics.Metric]] = None,
calculate_perplexity=False,
):
"""Pass arguments through to base class"""
super(RnnPLModule, self).__init__(
model,
optimizer,
criterion,
predictor_cls=_RnnClassification,
lr_scheduler=lr_scheduler,
hparams=hparams,
metrics=metrics,
calculate_perplexity=calculate_perplexity,
)
class TransformerClassificationPLModule(SimplePLModule):
def __init__(
self,
model: nn.Module,
optimizer: Union[Optimizer, List[Optimizer]],
criterion: LossType,
lr_scheduler: Union[_LRScheduler, List[_LRScheduler]] = None,
hparams: Configuration = None,
metrics: Optional[Dict[str, pl.metrics.Metric]] = None,
calculate_perplexity=False,
):
"""Pass arguments through to base class"""
super(TransformerClassificationPLModule, self).__init__(
model,
optimizer,
criterion,
predictor_cls=_TransformerClassification,
lr_scheduler=lr_scheduler,
hparams=hparams,
metrics=metrics,
calculate_perplexity=calculate_perplexity,
)
class TransformerPLModule(SimplePLModule):
def __init__(
self,
model: nn.Module,
optimizer: Union[Optimizer, List[Optimizer]],
criterion: LossType,
lr_scheduler: Union[_LRScheduler, List[_LRScheduler]] = None,
hparams: Configuration = None,
metrics: Optional[Dict[str, pl.metrics.Metric]] = None,
calculate_perplexity=False,
):
"""Pass arguments through to base class"""
super(TransformerPLModule, self).__init__(
model,
optimizer,
criterion,
predictor_cls=_Transformer,
lr_scheduler=lr_scheduler,
hparams=hparams,
metrics=metrics,
calculate_perplexity=calculate_perplexity,
)
class BertPLModule(SimplePLModule):
def __init__(
self,
model: nn.Module,
optimizer: Union[Optimizer, List[Optimizer]],
criterion: LossType,
lr_scheduler: Union[_LRScheduler, List[_LRScheduler]] = None,
hparams: Configuration = None,
metrics: Optional[Dict[str, pl.metrics.Metric]] = None,
calculate_perplexity=False,
):
"""Pass arguments through to base class"""
super(BertPLModule, self).__init__(
model,
optimizer,
criterion,
predictor_cls=_BertSequenceClassification,
lr_scheduler=lr_scheduler,
hparams=hparams,
metrics=metrics,
calculate_perplexity=calculate_perplexity,
)
class MultimodalTransformerClassificationPLModule(SimplePLModule):
def __init__(
self,
model: nn.Module,
optimizer: Union[Optimizer, List[Optimizer]],
criterion: LossType,
lr_scheduler: Union[_LRScheduler, List[_LRScheduler]] = None,
hparams: Configuration = None,
metrics: Optional[Dict[str, pl.metrics.Metric]] = None,
calculate_perplexity=False,
):
"""Pass arguments through to base class"""
super(MultimodalTransformerClassificationPLModule, self).__init__(
model,
optimizer,
criterion,
predictor_cls=_MultimodalTransformerClassification,
lr_scheduler=lr_scheduler,
hparams=hparams,
metrics=metrics,
calculate_perplexity=calculate_perplexity,
)
| StarcoderdataPython |
258345 | <reponame>maneeshd/braintree_python
import braintree
import warnings
from decimal import Decimal
from braintree.add_on import AddOn
from braintree.apple_pay_card import ApplePayCard
from braintree.authorization_adjustment import AuthorizationAdjustment
from braintree.coinbase_account import CoinbaseAccount
from braintree.android_pay_card import AndroidPayCard
from braintree.amex_express_checkout_card import AmexExpressCheckoutCard
from braintree.venmo_account import VenmoAccount
from braintree.disbursement_detail import DisbursementDetail
from braintree.dispute import Dispute
from braintree.discount import Discount
from braintree.successful_result import SuccessfulResult
from braintree.status_event import StatusEvent
from braintree.error_result import ErrorResult
from braintree.resource import Resource
from braintree.address import Address
from braintree.configuration import Configuration
from braintree.credit_card import CreditCard
from braintree.customer import Customer
from braintree.paypal_account import PayPalAccount
from braintree.paypal_here import PayPalHere
from braintree.europe_bank_account import EuropeBankAccount
from braintree.subscription_details import SubscriptionDetails
from braintree.resource_collection import ResourceCollection
from braintree.transparent_redirect import TransparentRedirect
from braintree.exceptions.not_found_error import NotFoundError
from braintree.descriptor import Descriptor
from braintree.risk_data import RiskData
from braintree.three_d_secure_info import ThreeDSecureInfo
from braintree.transaction_line_item import TransactionLineItem
from braintree.us_bank_account import UsBankAccount
# NEXT_MAJOR_VERSION Remove this class as legacy Ideal has been removed/disabled in the Braintree Gateway
# DEPRECATED If you're looking to accept iDEAL as a payment method contact <EMAIL> for a solution.
from braintree.ideal_payment import IdealPayment
from braintree.local_payment import LocalPayment
from braintree.visa_checkout_card import VisaCheckoutCard
from braintree.masterpass_card import MasterpassCard
from braintree.facilitated_details import FacilitatedDetails
from braintree.facilitator_details import FacilitatorDetails
from braintree.payment_instrument_type import PaymentInstrumentType
from braintree.samsung_pay_card import SamsungPayCard
class Transaction(Resource):
"""
A class representing Braintree Transaction objects.
An example of creating a sale transaction with all available fields::
result = Transaction.sale({
"amount": "100.00",
"order_id": "123",
"channel": "MyShoppingCartProvider",
"credit_card": {
"number": "5105105105105100",
"expiration_date": "05/2011",
"cvv": "123"
},
"customer": {
"first_name": "Dan",
"last_name": "Smith",
"company": "Braintree",
"email": "<EMAIL>",
"phone": "419-555-1234",
"fax": "419-555-1235",
"website": "https://www.braintreepayments.com"
},
"billing": {
"first_name": "Carl",
"last_name": "Jones",
"company": "Braintree",
"street_address": "123 E Main St",
"extended_address": "Suite 403",
"locality": "Chicago",
"region": "IL",
"postal_code": "60622",
"country_name": "United States of America"
},
"shipping": {
"first_name": "Andrew",
"last_name": "Mason",
"company": "Braintree",
"street_address": "456 W Main St",
"extended_address": "Apt 2F",
"locality": "Bartlett",
"region": "IL",
"postal_code": "60103",
"country_name": "United States of America"
}
})
print(result.transaction.amount)
print(result.transaction.order_id)
For more information on Transactions, see https://developers.braintreepayments.com/reference/request/transaction/sale/python
"""
def __repr__(self):
detail_list = [
"id",
"graphql_id",
"additional_processor_response",
"amount",
"authorization_adjustments",
"authorization_expires_at",
"avs_error_response_code",
"avs_postal_code_response_code",
"avs_street_address_response_code",
"channel",
"created_at",
"credit_card_details",
"currency_iso_code",
"customer_id",
"cvv_response_code",
"discount_amount",
"disputes",
"escrow_status",
"gateway_rejection_reason",
"master_merchant_account_id",
"merchant_account_id",
"network_response_code",
"network_response_text",
"network_transaction_id",
"order_id",
"payment_instrument_type",
"payment_method_token",
"plan_id",
"processor_authorization_code",
"processor_response_code",
"processor_response_text",
"processor_settlement_response_code",
"processor_settlement_response_text",
"purchase_order_number",
"recurring",
"refund_id",
"refunded_transaction_id",
"service_fee_amount",
"settlement_batch_id",
"shipping_amount",
"ships_from_postal_code",
"status",
"status_history",
"sub_merchant_account_id",
"subscription_id",
"tax_amount",
"tax_exempt",
"type",
"updated_at",
"voice_referral_number",
]
return super(Transaction, self).__repr__(detail_list)
class CreatedUsing(object):
"""
Constants representing how the transaction was created. Available types are:
* braintree.Transaction.CreatedUsing.FullInformation
* braintree.Transaction.CreatedUsing.Token
"""
FullInformation = "full_information"
Token = "token"
Unrecognized = "unrecognized"
class GatewayRejectionReason(object):
"""
Constants representing gateway rejection reasons. Available types are:
* braintree.Transaction.GatewayRejectionReason.Avs
* braintree.Transaction.GatewayRejectionReason.AvsAndCvv
* braintree.Transaction.GatewayRejectionReason.Cvv
* braintree.Transaction.GatewayRejectionReason.Duplicate
* braintree.Transaction.GatewayRejectionReason.Fraud
* braintree.Transaction.GatewayRejectionReason.ThreeDSecure
"""
ApplicationIncomplete = "application_incomplete"
Avs = "avs"
AvsAndCvv = "avs_and_cvv"
Cvv = "cvv"
Duplicate = "duplicate"
Fraud = "fraud"
ThreeDSecure = "three_d_secure"
TokenIssuance = "token_issuance"
Unrecognized = "unrecognized"
class Source(object):
Api = "api"
ControlPanel = "control_panel"
Recurring = "recurring"
Unrecognized = "unrecognized"
class EscrowStatus(object):
"""
Constants representing transaction escrow statuses. Available statuses are:
* braintree.Transaction.EscrowStatus.HoldPending
* braintree.Transaction.EscrowStatus.Held
* braintree.Transaction.EscrowStatus.ReleasePending
* braintree.Transaction.EscrowStatus.Released
* braintree.Transaction.EscrowStatus.Refunded
"""
HoldPending = "hold_pending"
Held = "held"
ReleasePending = "release_pending"
Released = "released"
Refunded = "refunded"
Unrecognized = "unrecognized"
class Status(object):
"""
Constants representing transaction statuses. Available statuses are:
* braintree.Transaction.Status.AuthorizationExpired
* braintree.Transaction.Status.Authorized
* braintree.Transaction.Status.Authorizing
* braintree.Transaction.Status.SettlementPending
* braintree.Transaction.Status.SettlementDeclined
* braintree.Transaction.Status.Failed
* braintree.Transaction.Status.GatewayRejected
* braintree.Transaction.Status.ProcessorDeclined
* braintree.Transaction.Status.Settled
* braintree.Transaction.Status.Settling
* braintree.Transaction.Status.SubmittedForSettlement
* braintree.Transaction.Status.Voided
"""
AuthorizationExpired = "authorization_expired"
Authorized = "authorized"
Authorizing = "authorizing"
Failed = "failed"
GatewayRejected = "gateway_rejected"
ProcessorDeclined = "processor_declined"
Settled = "settled"
SettlementConfirmed = "settlement_confirmed"
SettlementDeclined = "settlement_declined"
SettlementFailed = "settlement_failed"
SettlementPending = "settlement_pending"
Settling = "settling"
SubmittedForSettlement = "submitted_for_settlement"
Voided = "voided"
# NEXT_MAJOR_VERSION this is never used and should be removed
Unrecognized = "unrecognized"
class Type(object):
"""
Constants representing transaction types. Available types are:
* braintree.Transaction.Type.Credit
* braintree.Transaction.Type.Sale
"""
Credit = "credit"
Sale = "sale"
class IndustryType(object):
Lodging = "lodging"
TravelAndCruise = "travel_cruise"
TravelAndFlight = "travel_flight"
class AdditionalCharge(object):
Restaurant = "restaurant"
GiftShop = "gift_shop"
MiniBar = "mini_bar"
Telephone = "telephone"
Laundry = "laundry"
Other = "other"
@staticmethod
def clone_transaction(transaction_id, params):
return Configuration.gateway().transaction.clone_transaction(transaction_id, params)
@staticmethod
def cancel_release(transaction_id):
"""
Cancels a pending release from escrow for a transaction.
Requires the transaction id::
result = braintree.Transaction.cancel_release("my_transaction_id")
"""
return Configuration.gateway().transaction.cancel_release(transaction_id)
@staticmethod
def confirm_transparent_redirect(query_string):
"""
Confirms a transparent redirect request. It expects the query string from the
redirect request. The query string should _not_ include the leading "?" character. ::
result = braintree.Transaction.confirm_transparent_redirect_request("foo=bar&id=12345")
"""
warnings.warn("Please use TransparentRedirect.confirm instead", DeprecationWarning)
return Configuration.gateway().transaction.confirm_transparent_redirect(query_string)
@staticmethod
def credit(params=None):
"""
Creates a transaction of type Credit.
Amount is required. Also, a credit card,
customer_id or payment_method_token is required. ::
result = braintree.Transaction.credit({
"amount": "100.00",
"payment_method_token": "my_token"
})
result = braintree.Transaction.credit({
"amount": "100.00",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "12/2012"
}
})
result = braintree.Transaction.credit({
"amount": "100.00",
"customer_id": "my_customer_id"
})
"""
if params is None:
params = {}
params["type"] = Transaction.Type.Credit
return Transaction.create(params)
@staticmethod
def find(transaction_id):
"""
Find a transaction, given a transaction_id. This does not return
a result object. This will raise a :class:`NotFoundError <braintree.exceptions.not_found_error.NotFoundError>` if the provided
credit_card_id is not found. ::
transaction = braintree.Transaction.find("my_transaction_id")
"""
return Configuration.gateway().transaction.find(transaction_id)
@staticmethod
def line_items(transaction_id):
"""
Find a transaction's line items, given a transaction_id. This does not return
a result object. This will raise a :class:`NotFoundError <braintree.exceptions.not_found_error.NotFoundError>` if the provided transaction_id is not found. ::
"""
return Configuration.gateway().transaction_line_item.find_all(transaction_id)
@staticmethod
def hold_in_escrow(transaction_id):
"""
Holds an existing submerchant transaction for escrow.
It expects a transaction_id.::
result = braintree.Transaction.hold_in_escrow("my_transaction_id")
"""
return Configuration.gateway().transaction.hold_in_escrow(transaction_id)
@staticmethod
def refund(transaction_id, amount_or_options=None):
"""
Refunds an existing transaction.
It expects a transaction_id.::
result = braintree.Transaction.refund("my_transaction_id")
"""
return Configuration.gateway().transaction.refund(transaction_id, amount_or_options)
@staticmethod
def sale(params=None):
"""
Creates a transaction of type Sale. Amount is required. Also, a credit card,
customer_id or payment_method_token is required. ::
result = braintree.Transaction.sale({
"amount": "100.00",
"payment_method_token": "my_token"
})
result = braintree.Transaction.sale({
"amount": "100.00",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "12/2012"
}
})
result = braintree.Transaction.sale({
"amount": "100.00",
"customer_id": "my_customer_id"
})
"""
if params is None:
params = {}
params["type"] = Transaction.Type.Sale
return Transaction.create(params)
@staticmethod
def search(*query):
return Configuration.gateway().transaction.search(*query)
@staticmethod
def release_from_escrow(transaction_id):
"""
Submits an escrowed transaction for release.
Requires the transaction id::
result = braintree.Transaction.release_from_escrow("my_transaction_id")
"""
return Configuration.gateway().transaction.release_from_escrow(transaction_id)
@staticmethod
def submit_for_settlement(transaction_id, amount=None, params=None):
"""
Submits an authorized transaction for settlement.
Requires the transaction id::
result = braintree.Transaction.submit_for_settlement("my_transaction_id")
"""
if params is None:
params = {}
return Configuration.gateway().transaction.submit_for_settlement(transaction_id, amount, params)
@staticmethod
def update_details(transaction_id, params=None):
"""
Updates exisiting details for transaction submtted_for_settlement.
Requires the transaction id::
result = braintree.Transaction.update_details("my_transaction_id", {
"amount": "100.00",
"order_id": "123",
"descriptor": {
"name": "123*123456789012345678",
"phone": "3334445555",
"url": "url.com"
}
)
"""
if params is None:
params = {}
return Configuration.gateway().transaction.update_details(transaction_id, params)
@staticmethod
def tr_data_for_credit(tr_data, redirect_url):
"""
Builds tr_data for a Transaction of type Credit
"""
return Configuration.gateway().transaction.tr_data_for_credit(tr_data, redirect_url)
@staticmethod
def tr_data_for_sale(tr_data, redirect_url):
"""
Builds tr_data for a Transaction of type Sale
"""
return Configuration.gateway().transaction.tr_data_for_sale(tr_data, redirect_url)
@staticmethod
def transparent_redirect_create_url():
"""
Returns the url to be used for creating Transactions through transparent redirect.
"""
warnings.warn("Please use TransparentRedirect.url instead", DeprecationWarning)
return Configuration.gateway().transaction.transparent_redirect_create_url()
@staticmethod
def void(transaction_id):
"""
Voids an existing transaction.
It expects a transaction_id.::
result = braintree.Transaction.void("my_transaction_id")
"""
return Configuration.gateway().transaction.void(transaction_id)
@staticmethod
def create(params):
"""
Creates a transaction. Amount and type are required. Also, a credit card,
customer_id or payment_method_token is required. ::
result = braintree.Transaction.sale({
"type": braintree.Transaction.Type.Sale,
"amount": "100.00",
"payment_method_token": "<PASSWORD>"
})
result = braintree.Transaction.sale({
"type": braintree.Transaction.Type.Sale,
"amount": "100.00",
"credit_card": {
"number": "4111111111111111",
"expiration_date": "12/2012"
}
})
result = braintree.Transaction.sale({
"type": braintree.Transaction.Type.Sale,
"amount": "100.00",
"customer_id": "my_customer_id"
})
"""
return Configuration.gateway().transaction.create(params)
@staticmethod
def clone_signature():
return ["amount", "channel", {"options": ["submit_for_settlement"]}]
@staticmethod
def create_signature():
return [
"amount", "customer_id", "device_session_id", "fraud_merchant_id", "merchant_account_id", "order_id", "channel",
"payment_method_token", "purchase_order_number", "recurring", "transaction_source", "shipping_address_id",
"device_data", "billing_address_id", "payment_method_nonce", "tax_amount",
"shared_payment_method_token", "shared_customer_id", "shared_billing_address_id", "shared_shipping_address_id", "shared_payment_method_nonce",
"discount_amount", "shipping_amount", "ships_from_postal_code",
"tax_exempt", "three_d_secure_token", "type", "venmo_sdk_payment_method_code", "service_fee_amount",
{
"risk_data": [
"customer_browser", "customer_ip"
]
},
{
"credit_card": [
"token", "cardholder_name", "cvv", "expiration_date", "expiration_month", "expiration_year", "number"
]
},
{
"customer": [
"id", "company", "email", "fax", "first_name", "last_name", "phone", "website"
]
},
{
"billing": [
"first_name", "last_name", "company", "country_code_alpha2", "country_code_alpha3",
"country_code_numeric", "country_name", "extended_address", "locality",
"postal_code", "region", "street_address"
]
},
{
"shipping": [
"first_name", "last_name", "company", "country_code_alpha2", "country_code_alpha3",
"country_code_numeric", "country_name", "extended_address", "locality",
"postal_code", "region", "street_address"
]
},
{
"three_d_secure_pass_thru": [
"eci_flag",
"cavv",
"xid",
"authentication_response",
"directory_response",
"cavv_algorithm",
"ds_transaction_id",
"three_d_secure_version"
]
},
{
"options": [
"add_billing_address_to_payment_method",
"hold_in_escrow",
"store_in_vault",
"store_in_vault_on_success",
"store_shipping_address_in_vault",
"submit_for_settlement",
"venmo_sdk_session",
"payee_id",
"payee_email",
"skip_advanced_fraud_checking",
"skip_avs",
"skip_cvv",
{
"credit_card": [
"account_type"
],
"paypal": [
"payee_id",
"payee_email",
"custom_field",
"description",
{"supplementary_data": ["__any_key__"]}
],
"three_d_secure": [
"required"
],
"amex_rewards": [
"request_id",
"points",
"currency_amount",
"currency_iso_code"
],
"venmo_merchant_data": [
"venmo_merchant_public_id",
"originating_transaction_id",
"originating_merchant_id",
"originating_merchant_kind"
],
"venmo": [
"profile_id"
],
},
{
"adyen": [
"overwrite_brand",
"selected_brand"
]
}
]
},
{"custom_fields": ["__any_key__"]},
{"external_vault": ["status", "previous_network_transaction_id"]},
{"descriptor": ["name", "phone", "url"]},
{"paypal_account": ["payee_id", "payee_email", "payer_id", "payment_id"]},
{"industry":
[
"industry_type",
{
"data": [
"folio_number", "check_in_date", "check_out_date", "departure_date", "lodging_check_in_date", "lodging_check_out_date", "travel_package", "lodging_name", "room_rate",
"passenger_first_name", "passenger_last_name", "passenger_middle_initial", "passenger_title", "issued_date", "travel_agency_name", "travel_agency_code", "ticket_number",
"issuing_carrier_code", "customer_code", "fare_amount", "fee_amount", "room_tax", "tax_amount", "restricted_ticket", "no_show", "advanced_deposit", "fire_safe", "property_phone",
{
"legs": [
"conjunction_ticket", "exchange_ticket", "coupon_number", "service_class", "carrier_code", "fare_basis_code", "flight_number", "departure_date", "departure_airport_code", "departure_time",
"arrival_airport_code", "arrival_time", "stopover_permitted", "fare_amount", "fee_amount", "tax_amount", "endorsement_or_restrictions"
]
},
{
"additional_charges": [
"kind", "amount"
],
}
]
}
]
},
{"line_items":
[
"quantity", "name", "description", "kind", "unit_amount", "unit_tax_amount", "total_amount", "discount_amount", "tax_amount", "unit_of_measure", "product_code", "commodity_code", "url",
]
},
]
@staticmethod
def submit_for_settlement_signature():
return [
"order_id",
{"descriptor": ["name", "phone", "url"]},
"purchase_order_number",
"tax_amount",
"tax_exempt",
"discount_amount",
"shipping_amount",
"ships_from_postal_code",
{"line_items":
[
"quantity", "name", "description", "kind", "unit_amount", "unit_tax_amount", "total_amount", "discount_amount", "tax_amount", "unit_of_measure", "product_code", "commodity_code", "url",
]
},
]
@staticmethod
def update_details_signature():
return ["amount", "order_id", {"descriptor": ["name", "phone", "url"]}]
@staticmethod
def refund_signature():
return ["amount", "order_id"]
@staticmethod
def submit_for_partial_settlement(transaction_id, amount, params=None):
"""
Creates a partial settlement transaction for an authorized transaction
Requires the transaction id of the authorized transaction and an amount::
result = braintree.Transaction.submit_for_partial_settlement("my_transaction_id", "20.00")
"""
if params is None:
params = {}
return Configuration.gateway().transaction.submit_for_partial_settlement(transaction_id, amount, params)
def __init__(self, gateway, attributes):
if "refund_id" in attributes:
self._refund_id = attributes["refund_id"]
del(attributes["refund_id"])
else:
self._refund_id = None
Resource.__init__(self, gateway, attributes)
self.amount = Decimal(self.amount)
if "tax_amount" in attributes and self.tax_amount:
self.tax_amount = Decimal(self.tax_amount)
if "discount_amount" in attributes and self.discount_amount:
self.discount_amount = Decimal(self.discount_amount)
if "shipping_amount" in attributes and self.shipping_amount:
self.shipping_amount = Decimal(self.shipping_amount)
if "billing" in attributes:
self.billing_details = Address(gateway, attributes.pop("billing"))
if "credit_card" in attributes:
self.credit_card_details = CreditCard(gateway, attributes.pop("credit_card"))
if "paypal" in attributes:
self.paypal_details = PayPalAccount(gateway, attributes.pop("paypal"))
if "paypal_here" in attributes:
self.paypal_here_details = PayPalHere(gateway, attributes.pop("paypal_here"))
if "local_payment" in attributes:
self.local_payment_details = LocalPayment(gateway, attributes.pop("local_payment"))
if "europe_bank_account" in attributes:
self.europe_bank_account_details = EuropeBankAccount(gateway, attributes.pop("europe_bank_account"))
if "us_bank_account" in attributes:
self.us_bank_account = UsBankAccount(gateway, attributes.pop("us_bank_account"))
# NEXT_MAJOR_VERSION Remove this class as legacy Ideal has been removed/disabled in the Braintree Gateway
# DEPRECATED If you're looking to accept iDEAL as a payment method contact <EMAIL> for a solution.
if "ideal_payment" in attributes:
self.ideal_payment_details = IdealPayment(gateway, attributes.pop("ideal_payment"))
if "apple_pay" in attributes:
self.apple_pay_details = ApplePayCard(gateway, attributes.pop("apple_pay"))
if "coinbase_account" in attributes:
self.coinbase_details = CoinbaseAccount(gateway, attributes.pop("coinbase_account"))
if "android_pay_card" in attributes:
self.android_pay_card_details = AndroidPayCard(gateway, attributes.pop("android_pay_card"))
if "amex_express_checkout_card" in attributes:
self.amex_express_checkout_card_details = AmexExpressCheckoutCard(gateway, attributes.pop("amex_express_checkout_card"))
if "venmo_account" in attributes:
self.venmo_account_details = VenmoAccount(gateway, attributes.pop("venmo_account"))
if "visa_checkout_card" in attributes:
self.visa_checkout_card_details = VisaCheckoutCard(gateway, attributes.pop("visa_checkout_card"))
if "masterpass_card" in attributes:
self.masterpass_card_details = MasterpassCard(gateway, attributes.pop("masterpass_card"))
if "samsung_pay_card" in attributes:
self.samsung_pay_card_details = SamsungPayCard(gateway, attributes.pop("samsung_pay_card"))
if "customer" in attributes:
self.customer_details = Customer(gateway, attributes.pop("customer"))
if "shipping" in attributes:
self.shipping_details = Address(gateway, attributes.pop("shipping"))
if "add_ons" in attributes:
self.add_ons = [AddOn(gateway, add_on) for add_on in self.add_ons]
if "discounts" in attributes:
self.discounts = [Discount(gateway, discount) for discount in self.discounts]
if "status_history" in attributes:
self.status_history = [StatusEvent(gateway, status_event) for status_event in self.status_history]
if "subscription" in attributes:
self.subscription_details = SubscriptionDetails(attributes.pop("subscription"))
if "descriptor" in attributes:
self.descriptor = Descriptor(gateway, attributes.pop("descriptor"))
if "disbursement_details" in attributes:
self.disbursement_details = DisbursementDetail(attributes.pop("disbursement_details"))
if "disputes" in attributes:
self.disputes = [Dispute(dispute) for dispute in self.disputes]
if "authorization_adjustments" in attributes:
self.authorization_adjustments = [AuthorizationAdjustment(authorization_adjustment) for authorization_adjustment in self.authorization_adjustments]
if "payment_instrument_type" in attributes:
self.payment_instrument_type = attributes["payment_instrument_type"]
if "risk_data" in attributes:
self.risk_data = RiskData(attributes["risk_data"])
else:
self.risk_data = None
if "three_d_secure_info" in attributes and not attributes["three_d_secure_info"] is None:
self.three_d_secure_info = ThreeDSecureInfo(attributes["three_d_secure_info"])
else:
self.three_d_secure_info = None
if "facilitated_details" in attributes:
self.facilitated_details = FacilitatedDetails(attributes.pop("facilitated_details"))
if "facilitator_details" in attributes:
self.facilitator_details = FacilitatorDetails(attributes.pop("facilitator_details"))
if "network_transaction_id" in attributes:
self.network_transaction_id = attributes["network_transaction_id"]
@property
def refund_id(self):
warnings.warn("Please use Transaction.refund_ids instead", DeprecationWarning)
return self._refund_id
@property
def vault_billing_address(self):
"""
The vault billing address associated with this transaction
"""
return self.gateway.address.find(self.customer_details.id, self.billing_details.id)
@property
def vault_credit_card(self):
"""
The vault credit card associated with this transaction
"""
if self.credit_card_details.token is None:
return None
return self.gateway.credit_card.find(self.credit_card_details.token)
@property
def vault_customer(self):
"""
The vault customer associated with this transaction
"""
if self.customer_details.id is None:
return None
return self.gateway.customer.find(self.customer_details.id)
@property
def is_disbursed(self):
return self.disbursement_details.is_valid
@property
def line_items(self):
"""
The line items associated with this transaction
"""
return self.gateway.transaction_line_item.find_all(self.id)
| StarcoderdataPython |
1839905 | #!python
import string
# Hint: Use these string constants to encode/decode hexadecimal digits and more
# string.digits is '0123456789'
# string.hexdigits is '0123456789abcdefABCDEF'
# string.ascii_lowercase is 'abcdefghijklmnopqrstuvwxyz'
# string.ascii_uppercase is 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# string.ascii_letters is ascii_lowercase + ascii_uppercase
# string.printable is digits + ascii_letters + punctuation + whitespace
# Create our char mappings for working with up to 36 bases
all_chars = string.digits + string.ascii_lowercase
chars = dict(zip(all_chars, range(len(all_chars))))
def decode(digits: str, base: int, is_fraction: bool = False) -> int or float:
"""Decode given digits in given base to number in base 10.
digits: str -- string representation of number (in given base)
base: int -- base of given number
return: float or int -- integer representation of number (in base 10)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, "base is out of range: {}".format(base)
result_num = 0
decimal_value = 0
copied_digits = digits
# check to see if the digits contains decimal point values
if "." in digits:
broken_up_digits = copied_digits.split(".")
copied_digits = broken_up_digits[0]
decimal_digits = broken_up_digits[1]
decimal_value = decode(decimal_digits, base, is_fraction=True)
# if the current number is a decimal, step and power need to be -1
# and the digits can stay where they are. If it's not a decimal, step
# needs to be 1 and power needs to be 0 with the digits reversed so we can
# start with the least magnitude
if is_fraction:
step = -1
power = -1
else:
copied_digits = copied_digits[::-1]
step = 1
power = 0
# Iterate through all the digits (either reversed, or normal for fractional values)
for curr_digit in copied_digits:
lowercased_digit = curr_digit.lower()
result_num += chars[lowercased_digit] * (base ** power)
power += step
return result_num + decimal_value
def encode(number: int, base: int) -> str:
"""Encode given number in base 10 to digits in given base.
number: int -- integer representation of number (in base 10)
base: int -- base to convert to
return: str -- string representation of number (in given base)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base <= 36, "base is out of range: {}".format(number)
# Handle unsigned numbers only for now
assert number >= 0, "number is negative: {}".format(number)
result_list = []
whole_num = number
whole_num = int(number // 1)
decimal_value = number - whole_num
# Keep dividing the number as long as it's greater than 0
while whole_num != 0:
whole_num, remainder = divmod(whole_num, base)
print(remainder)
# Map the remainder to it's encoded representation
result_list.append(all_chars[remainder])
# Check if there is a decimal value, if so reverse the list now
# otherwise, return the current list
if decimal_value:
result_list = result_list[::-1]
result_list.append(".")
else:
return "".join(reversed(result_list))
# Encode the decimal point to it's respective base
while decimal_value != 0:
decimal_value = decimal_value * base
# Obtain the whole number from our floating point valeu
whole_num = int(decimal_value // 1)
# Obtain just the decimal
decimal_value = decimal_value - whole_num
# Add our whole number mapping to the list
result_list.append(all_chars[whole_num])
return "".join(result_list)
def convert(digits: str, base1: int, base2: int) -> str:
"""Convert given digits in base1 to digits in base2.
digits: str -- string representation of number (in base1)
base1: int -- base of given number
base2: int -- base to convert to
return: str -- string representation of number (in base2)"""
# Handle up to base 36 [0-9a-z]
assert 2 <= base1 <= 36, "base1 is out of range: {}".format(base1)
assert 2 <= base2 <= 36, "base2 is out of range: {}".format(base2)
return encode(decode(digits, base1), base2)
def convert_negative_binary_nums(digits: str, base: int) -> str:
"""
Convert negative binary numbers to a specific base
Args:
digits - The binary digits we'd like to convert
base - The base we'd like to convert to
Return:
A string containing the negative binary digits encoded to
a new base
"""
bits = []
is_negative = False
# Check if the number is negative according to twos complement
if digits[0] == "1":
is_negative = True
flip = False
# Flip necessary bits
for char in reversed(digits):
if flip:
bits.append("0" if char == "1" else "1")
else:
bits.append(char)
if char == "1":
flip = True
bits = bits[::-1]
resulting_value = encode(decode(bits or digits, 2), base)
if is_negative:
return "-" + resulting_value
return resulting_value
def main():
"""Read command-line arguments and convert given digits between bases."""
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) == 3:
digits = args[0]
base1 = int(args[1])
base2 = int(args[2])
# Convert given digits between bases
result = convert(digits, base1, base2)
print("{} in base {} is {} in base {}".format(digits, base1, result, base2))
else:
print("Usage: {} digits base1 base2".format(sys.argv[0]))
print("Converts digits from base1 to base2")
if __name__ == "__main__":
main()
| StarcoderdataPython |
1729309 | <reponame>jjandnn/ROMP
from os import remove
from sklearn.model_selection import PredefinedSplit
import torch
import sys
def remove_prefix(state_dict, prefix='module.', remove_keys=['_result_parser', '_calc_loss']):
keys = list(state_dict.keys())
print('orginal keys:', keys)
for key in keys:
exist_flag = True
for rkey in remove_keys:
if rkey in key:
del state_dict[key]
exist_flag = False
if not exist_flag:
continue
if prefix in key:
state_dict[key.replace(prefix, '')] = state_dict[key]
del state_dict[key]
keys = list(state_dict.keys())
print('new keys:', keys)
return state_dict
if __name__ == '__main__':
model_path = sys.argv[1]
save_path = sys.argv[2]
state_dict = remove_prefix(torch.load(model_path), prefix='module.')
torch.save(state_dict, save_path)
| StarcoderdataPython |
1909849 | # get CPU/GPU RAM usage reports like:
# Gen RAM Free: 11.6 GB | Proc size: 666.0 MB
# GPU RAM Free: 566MB | Used: 10873MB | Util 95% | Total 11439MB
# needed on google colab
!ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi
# memory footprint support libraries/code
!pip install gputil
!pip install psutil
!pip install humanize
import psutil
import humanize
import os
import GPUtil as GPU
GPUs = GPU.getGPUs()
# XXX: only one GPU on Colab and isn’t guaranteed
gpu = GPUs[0]
def printm():
process = psutil.Process(os.getpid())
print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " I Proc size: " + humanize.naturalsize( process.memory_info().rss))
print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
printm()
| StarcoderdataPython |
168977 | <reponame>CaptainDario/ETLCDB_data_reader
import sys
import os
import time
sys.path.append(os.path.abspath(os.getcwd()))
from etldr.etl_data_reader import ETLDataReader
def load_one_data_set_file(reader : ETLDataReader):
"""The first example of the README.
Args:
reader : ETLDataReader instance to load the data set part.
"""
from etldr.etl_data_names import ETLDataNames
from etldr.etl_character_groups import ETLCharacterGroups
include = [ETLCharacterGroups.katakana, ETLCharacterGroups.number]
imgs, labels = reader.read_dataset_file(2, ETLDataNames.ETL7, include)
def load_one_data_set_part(reader : ETLDataReader):
"""The second example of the README.
Args:
reader : ETLDataReader instance to load the data set part.
"""
from etldr.etl_data_names import ETLDataNames
from etldr.etl_character_groups import ETLCharacterGroups
include = [ETLCharacterGroups.kanji, ETLCharacterGroups.hiragana]
imgs, labels = reader.read_dataset_part(ETLDataNames.ETL2, include)
def load_one_data_set_part_parallel(reader : ETLDataReader):
"""The second example of the README.
Args:
reader : ETLDataReader instance to load the data set part.
"""
from etldr.etl_data_names import ETLDataNames
from etldr.etl_character_groups import ETLCharacterGroups
include = [ETLCharacterGroups.kanji, ETLCharacterGroups.hiragana]
imgs, labels = reader.read_dataset_part(ETLDataNames.ETL2, include, 16)
def load_the_whole_data_set(reader : ETLDataReader):
"""The third example of the README.
Args:
reader : ETLDataReader instance to load the data set part.
"""
from etldr.etl_character_groups import ETLCharacterGroups
include = [ETLCharacterGroups.roman, ETLCharacterGroups.symbols]
imgs, labels = reader.read_dataset_whole(include)
def load_the_whole_data_set_parallel(reader : ETLDataReader):
"""The third example of the README.
Args:
reader : ETLDataReader instance to load the data set part.
"""
from etldr.etl_character_groups import ETLCharacterGroups
include = [ETLCharacterGroups.roman, ETLCharacterGroups.symbols]
imgs, labels = reader.read_dataset_whole(include, 16)
if __name__ == "__main__":
path_to_data_set = r"F:\data_sets\ETL_kanji"
reader = ETLDataReader(path_to_data_set)
# uncomment one of these examples
#load_one_data_set_file(reader)
#load_one_data_set_part(reader)
#load_the_whole_data_set(reader)
#load_one_data_set_part_parallel(reader)
#load_the_whole_data_set_parallel(reader)
| StarcoderdataPython |
5092093 | <reponame>mvalente/hub2tweet
from google.appengine.ext import db
"""Data models for hub2tweet."""
# Note on OAuthConfig:
#
# The configuration is stored in datastore as we don't want to check our
# private keys into an open source project. The first config in the
# datastore will be used.
#
# To help make this easier to set, a simple admin config page was added.
# On your server, visit: http://<server>/admin/oauth_config
#
# And add a config to the datastore manually (copy paste this code, and fill in
# the right key and secret). You can verify and change with the Datastore
# Viewer.
class OAuthConfig(db.Model):
"""OAuth configuration"""
consumer_key = db.StringProperty(required=True)
consumer_secret = db.StringProperty(required=True)
class OAuthToken(db.Model):
# key name is token
secret = db.StringProperty(required=True)
class TwitterUser(db.Model):
# key name is token
secret = db.StringProperty(required=True)
user_id = db.IntegerProperty(required=True)
screen_name = db.StringProperty(required=True)
class TopicSubscription(db.Model):
user_id = db.IntegerProperty(required=True)
topic = db.StringProperty(required=True)
verify_token = db.StringProperty(required=True)
verified = db.BooleanProperty(default=False)
| StarcoderdataPython |
4999716 | import array
import itertools
def make_bytearray(n, undefined_value):
return array.array('B', itertools.repeat(undefined_value, n))
class SparseBytes():
""" A page-based sparse array. The content is backed by a dict() (of array.array('B') by default)"""
def __init__(self, pagesize=4096, undefined_value=0,
array_constructor=None):
assert isinstance(pagesize, int), "pagesize must be integer"
assert isinstance(
undefined_value, int) and undefined_value % 255 == undefined_value, "undefined_value must be a byte"
if array_constructor:
self.array_constructor = array_constructor
else:
# set default constructor to array.array('B')
self.array_constructor = make_bytearray
self.pages = dict()
self.pagesize = pagesize
self.undefined_value = undefined_value
def __getitem__(self, index):
assert isinstance(index, int), "indices must be integers"
dict_index = index // self.pagesize
array_index = index % self.pagesize
bytes_ = self.pages[dict_index]
return bytes_[array_index]
def __setitem__(self, index, item):
assert isinstance(index, int), "indices must be integers"
dict_index = index // self.pagesize
array_index = index % self.pagesize
try:
bytes_ = self.pages[dict_index]
except KeyError:
bytes_ = self.__makePage(dict_index)
bytes_[array_index] = item
def bytes_at(self, index, length):
""" Return a bytearray of the content starting at index
and ending at index + length - 1"""
ps = self.pagesize
if (index + length) // ps == index // ps:
p = self.pages[index // ps]
return bytes(p[index % ps: (index + length) % ps])
l = bytearray()
for i in range(index, index + length - 1):
try:
l.append(self[i])
except KeyError:
break
return bytes(l)
def __makePage(self, index):
""" Fills a page bytes """
bytes_ = self.array_constructor(self.pagesize, self.undefined_value)
self.pages[index] = bytes_
return bytes_
| StarcoderdataPython |
11295395 | <reponame>MarcosRibas/Projeto100Exercicios
#Ex050 Desenvolva um programa que leia seis números inteiros e mostre a soma apenas daqueles que forem pares.
# Se o valor digitado for ímpar, desconsidere-o.
par = 0;
for c in range(1, 7):
num = int(input('Digite um número: '))
if num % 2 == 0:
par = par + num
print(f'A soma dos números pares que você digitou foi {par}')
| StarcoderdataPython |
5185122 | <gh_stars>1-10
from absl.testing import absltest
from matplotlib import pyplot
import networkx as nx
from oblique import extmodule
from oblique import nxutils
class ApiTests(absltest.TestCase):
def test_convert_to_nx(self):
db = extmodule.parse_string("""\
Conquer the world.
""")
g = nxutils.convert_to_nx(db)
self.assertIsInstance(g, nx.DiGraph)
if __name__ == '__main__':
absltest.main()
| StarcoderdataPython |
4993549 | # MIT License
#
# Copyright (c) 2021 TrigonDev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
import asyncio
from .task_manager import TaskManager
class IpcBase:
"""Base class for IpcClient and IpcServer"""
stop_future: asyncio.Future[None] | None
ready_future: asyncio.Future[None] | None
tasks: TaskManager
def stop(self) -> None:
"""Tell the client/server to stop."""
if self.stop_future and not self.stop_future.done():
self.stop_future.set_result(None)
if self.ready_future and not self.ready_future.done():
self.ready_future.cancel()
async def close(self) -> None:
"""Disconnect and close the client/server."""
self.tasks.cancel_all()
await self.tasks.wait_for_all()
async def wait_until_ready(self) -> None:
"""Wait until the client/server is either ready or shutting down.
Raises:
asyncio.CancelledError: The client/server shut down before it was
ready.
"""
assert self.ready_future is not None
await self.ready_future
async def join(self) -> None:
"""Wait until the client/server is shutting down."""
assert self.stop_future is not None
await self.stop_future
| StarcoderdataPython |
3494993 | # Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from rclpy.node import Node
import test_msgs.msg
class PubSubNode(Node):
def __init__(self, name='pub_sub_node'):
super().__init__(name)
self.publisher = self.create_publisher(
test_msgs.msg.Strings, '~/pub', 1
)
self.subscription = self.create_subscription(
test_msgs.msg.Strings, '~/sub', lambda msg: None, 1
)
def destroy_node(self):
self.publisher.destroy()
self.subscription.destroy()
super().destroy_node()
def main(args=None):
rclpy.init(args=args)
node = PubSubNode()
try:
rclpy.spin(node)
except KeyboardInterrupt:
print('node stopped cleanly')
finally:
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| StarcoderdataPython |
11232364 | <reponame>kiteco/kiteco-public
import json
import string
def main():
with open("report.txt", "r") as fp:
template = string.Template(fp.read())
with open("params.json", "r") as fp:
params = json.load(fp)
lines = [
"parameter|value",
"-|-",
]
lines.extend(f"{param}|{value}" for param, value in params.items())
table = "\n".join(lines)
report = template.substitute(coefficients=table)
with open("README.md", "w") as fp:
fp.write(report)
if __name__ == "__main__":
main()
| StarcoderdataPython |
6572736 | <reponame>nkenealy/shopify-product-load<filename>app/api_1_0/variants.py
from flask import jsonify, request, g, url_for, current_app
from .. import db
from ..models import Post, Permission, Product, Variant, Image
from . import api
from .decorators import permission_required
import logging
@api.route('/variants/')
def get_variants():
page = request.args.get('page', 1, type=int)
pagination = Vriant.query.order_by(Variant.timestamp.desc()).paginate(
# TODO: need to get new static variables for FLASKY_VARIANTS_PER_PAGE and FLASKY_PRODUCTS_PER_PAGE
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
variants = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_variants', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_variants', page=page+1, _external=True)
return jsonify({
'products': [variant.to_json() for variant in variants],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/variants/', methods=['POST'])
#permission_required(Permission.WRITE_ARTICLES)
def new_variant():
variant = Variant.from_json(request.json)
print variant
db.session.add(variant)
db.session.commit()
return jsonify(variant.to_json()), 201, \
{'Location': url_for('api.get_variant', id=variant.id, _external=True)}
@api.route('/variants/<int:id>')
def get_variant(id):
variant = Variant.query.get_or_404(id)
return jsonify(variant.to_json())
@api.route('/products/<int:id>/variants/')
def get_product_variants(id):
product = Product.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = product.variants.order_by(Variant.timestamp.asc()).paginate(
#TODO: replace static
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
variants = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_variants', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_variants', page=page+1, _external=True)
return jsonify({
'products': [variant.to_json() for variant in variants],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/products/variants/', methods=['POST'])
#TODO: put permission back on and make it product or variant specific
#@permission_required(Permission.COMMENT)
def new_product_variant():
variant = Variant.from_json(request.json)
product = Product.query.filter_by(pos_product_id=variant.pos_product_id).first()
logging.basicConfig(filename='sep10.log',level=logging.DEBUG)
logging.debug('This %s message should go to the log file',product)
variant.product = product
db.session.add(variant)
db.session.commit()
return jsonify(variant.to_json()), 201, \
{'Location': url_for('api.get_variant', id=variant.id,
_external=True)}
@api.route('/products/images/', methods=['POST'])
#TODO: put permission back on and make it product or variant specific
#@permission_required(Permission.COMMENT)
def new_product_image():
image = Image.from_json(request.json)
product = Product.query.filter_by(pos_product_id=image.pos_product_id).first()
logging.basicConfig(filename='sep10.log',level=logging.DEBUG)
logging.debug('This %s message should go to the log file',product)
image.product = product
db.session.add(image)
db.session.commit()
return jsonify(image.to_json()) | StarcoderdataPython |
1940855 | from django.urls import path
from rango import views
app_name = 'rango'
urlpatterns = [
path('', views.index, name='index'),
path('about/', views.about, name='about'),
path('category/<slug:category_name_slug>/',
views.show_category, name='show_category'),
path('add_category/', views.add_category, name='add_category'),
path('category/<slug:category_name_slug>/add_page/',
views.add_page, name='add_page'),
path('register/', views.register, name='register'),
path('login/', views.user_login, name='login'),
path('restricted/', views.restricted, name='restricted'),
path('logout/', views.user_logout, name='logout')
]
| StarcoderdataPython |
8024552 | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: DeepSeaVectorDraw
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class Color(object):
__slots__ = ['_tab']
@classmethod
def SizeOf(cls):
return 4
# Color
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Color
def R(self): return self._tab.Get(flatbuffers.number_types.Uint8Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0))
# Color
def G(self): return self._tab.Get(flatbuffers.number_types.Uint8Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(1))
# Color
def B(self): return self._tab.Get(flatbuffers.number_types.Uint8Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(2))
# Color
def A(self): return self._tab.Get(flatbuffers.number_types.Uint8Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(3))
def CreateColor(builder, r, g, b, a):
builder.Prep(1, 4)
builder.PrependUint8(a)
builder.PrependUint8(b)
builder.PrependUint8(g)
builder.PrependUint8(r)
return builder.Offset()
| StarcoderdataPython |
5075603 | <reponame>Philipnah/DailyCodingProblem<filename>Problem4[Medium]/main.py
searchQuery = input("Query: ")
stringArray = ["dog", "deer", "deal"]
cut = len(searchQuery)
cutArray = []
i = 0
while i < len(stringArray):
cutArray.append(stringArray[i][:cut])
i += 1
response = []
i = 0
while i < len(cutArray):
if searchQuery == cutArray[i]:
response.append(stringArray[i])
i += 1
print(response) | StarcoderdataPython |
4889322 | <reponame>Qiwen-Yu/cool_ssg_generator<filename>cool_ssg/utils/config_util.py<gh_stars>0
from pathlib import Path
import json
import sys
def get_config(config_path, options):
if Path(config_path).exists() and config_path.endswith(".json"):
loadedConfig = json.load(open(config_path, "r"))
if "input" in loadedConfig:
options["input"] = [loadedConfig["input"]]
if "stylesheets" in loadedConfig:
options["stylesheets"] = [loadedConfig["stylesheets"]]
if "lang" in loadedConfig:
options["lang"] = loadedConfig["lang"]
if "output" in loadedConfig:
options["output"] = loadedConfig["output"]
return options
print("ERROR: Could not find config file")
sys.exit(1)
def get_sidebar_config(config_path, options):
if Path(config_path).exists() and config_path.endswith(".json"):
loadedConfig = json.load(open(config_path, "r"))
options["sidebar"] = loadedConfig["sidebar"]
options["sidebar"]["items"] = (
[options["sidebar"]["items"]]
if isinstance(options["sidebar"]["items"], str)
else options["sidebar"]["items"]
)
| StarcoderdataPython |
5184903 | <gh_stars>0
## import the necessary packages
from imutils.perspective import four_point_transform
from imutils import contours
import imutils
import cv2
import numpy as np
def measure():
# define the dictionary of digit segments so we can identify
# each digit on the thermostat
DIGITS_LOOKUP = {
(1, 1, 1, 0, 1, 1, 1): 0,
(0, 0, 1, 0, 0, 1, 0): 1,
(1, 0, 1, 1, 1, 1, 0): 2,
(1, 0, 1, 1, 0, 1, 1): 3,
(0, 1, 1, 1, 0, 1, 0): 4,
(1, 1, 0, 1, 0, 1, 1): 5,
(1, 1, 0, 1, 1, 1, 1): 6,
(1, 0, 1, 0, 0, 1, 0): 7,
(1, 1, 1, 1, 1, 1, 1): 8,
(1, 1, 1, 1, 0, 1, 1): 9
}
#image = cv2.imread("rangeMeasure.jpg")
# crop THIS IS SPECIFIC TO THIS IMAGE!!!
#image = image[2300:2700, 1400:1800] #y:y+h x:x+w
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kernel = np.ones((5,5), np.uint8)
gray = cv2.dilate(gray, kernel, iterations=2)
gray = cv2.erode(gray, kernel, iterations=2)
thresh = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 5))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
# find contours in the thresholded image, then initialize the
# digit contours lists
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if w >= 20 and (h >= 40 and h <= 250): #this eliminates "." and anomolies
digitCnts.append(c)
#cv2.imshow("image", thresh)
#cv2.waitKey(0)
# sort the contours from left-to-right, then initialize the
# actual digits themselves
digitCnts = contours.sort_contours(digitCnts,
method="left-to-right")[0]
digits = []
# loop over each of the digits
for c in digitCnts:
# extract the digit ROI
(x, y, w, h) = cv2.boundingRect(c)
if w <= 40: #account for "1" being small
x = x-60
w = 80
roi = thresh[y:y + h, x:x + w]
#cv2.imshow("roi", roi)
#cv2.waitKey(0)
# compute the width and height of each of the 7 segments
# we are going to examine
(roiH, roiW) = roi.shape
(dW, dH) = (int(roiW * 0.25), int(roiH * 0.15))
dHC = int(roiH * 0.05)
# define the set of 7 segments
segments = [
((0, 0), (w, dH)), # top
((0, 0), (dW, h // 2)), # top-left
((w - dW, 0), (w, h // 2)), # top-right
((0, (h // 2) - dHC) , (w, (h // 2) + dHC)), # center
((0, h // 2), (dW, h)), # bottom-left
((w - dW, h // 2), (w, h)), # bottom-right
((0, h - dH), (w, h)) # bottom
]
on = [0] * len(segments)
# loop over the segments
for (i, ((xA, yA), (xB, yB))) in enumerate(segments):
# extract the segment ROI, count the total number of
# thresholded pixels in the segment, and then compute
# the area of the segment
segROI = roi[yA:yB, xA:xB]
total = cv2.countNonZero(segROI)
area = (xB - xA) * (yB - yA)
# if the total number of non-zero pixels is greater than
# 50% of the area, mark the segment as "on"
if total / float(area) > 0.5:
on[i]= 1
# lookup the digit and draw it on the image
digit = DIGITS_LOOKUP[tuple(on)]
digits.append(digit)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 1)
cv2.putText(image, str(digit), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
# display the digits
dist = "".join([str(digits[x]) for x in range(0, len(digits) - 1)]) + "." + str(digits[-1])
print("Range: {}".format(dist))
return dist
# print(u"{}{}.{} m".format(*digits))
cv2.imshow("Output", image)
cv2.waitKey(0)
| StarcoderdataPython |
41954 | <reponame>ventris/tateru<gh_stars>0
#!/usr/bin/env python3
# TODO: describe
# Copyright 2020 Tateru Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCUMENTATION = r'''
---
module: boot
short_description: Tateru installer address finder mopdule
version_added: "0.0.1"
description: Tateru installer address module is used to find the address to the Tateru installer instance running on a given machine
options:
machine:
description: The machine name lookup.
required: true
type: str
extends_documentation_fragment:
- tateru.deploy.installer_address
author:
- Tateru Authors
'''
EXAMPLES = r'''
# Find address of the installer running at test1
- name: Wait for installer address for test1
tateru.deploy.installer_address:
machine: test1
register: installer_address
'''
RETURN = r'''
address:
description: The ephemeral address the installer is reachable by.
type: str
returned: always
sample: '2001:0db8:85a3::8a2e:0370:7334'
port:
description: The port to use to reach the installer.
type: int
returned: always
sample: 22
'''
from ansible.module_utils.basic import AnsibleModule
import time
def run_module():
module_args = dict(
machine=dict(type='str', required=True),
)
result = dict(
changed=False,
address='',
port=22,
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
if module.check_mode:
module.exit_json(**result)
result['address'] = 'localhost'
result['port'] = 5555
# TODO: Fake wait to demo flow
time.sleep(3)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
| StarcoderdataPython |
11392057 | from .virtual_gateway import VirtualGateway
| StarcoderdataPython |
3249986 | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import thinkplot
import thinkstats2
def read_statistics(year):
draws = pd.read_csv('./euromillions-past-draws-archive/euromillions-past-draws-archive' + str(year) +'.txt', skiprows=2, usecols=[1,2,3,4,5], sep = '\t', names=['1','2','3','4','5'])
return draws.iloc[::-1]
#df = read_statistics(2016);
df = pd.concat([read_statistics(y) for y in [2012, 2013, 2014, 2015, 2016, 2017]])
sample_pdf = thinkstats2.EstimatedPdf(df['1'])
thinkplot.Pdf(sample_pdf, label='sample KDE')
mean1 = df['5'].mean()
std1 = df['5'].std()
normal1 = thinkstats2.NormalPdf(mean1, std1)
thinkplot.Pdf(normal1, label="normal1", color="red")
print(mean1)
print(std1)
"""
Correlation
"""
print( 'Spearman Correlation (%d,%d)->%f '% (1,2,df['1'].corr(df['2'], 'spearman') ) )
print( 'Spearman Correlation (%d,%d)->%f '% (1,3,df['1'].corr(df['3'], 'spearman') ) )
print( 'Spearman Correlation (%d,%d)->%f '% (1,4,df['1'].corr(df['4'], 'spearman') ) )
print( 'Spearman Correlation (%d,%d)->%f '% (1,5,df['1'].corr(df['5'], 'spearman') ) )
print( 'Spearman Correlation (%d,%d)->%f '% (4,5,df['4'].corr(df['5']) ) )
df['1'] = thinkstats2.Jitter(df['1'], 0.5)
df['2'] = thinkstats2.Jitter(df['2'], 0.5)
#thinkplot.Scatter(df['1'], df['2'], alpha=0.2)
#thinkplot.Show(xlabel='1', ylabel='2', axis=[1,50,1,50])
#thinkplot.HexBin(df['4'].values, df['5'].values)
"""
Correlation
"""
sample_pdf = thinkstats2.EstimatedPdf(df['2'])
thinkplot.Pdf(sample_pdf, label='sample KDE')
sample_pdf = thinkstats2.EstimatedPdf(df['3'])
thinkplot.Pdf(sample_pdf, label='sample KDE')
sample_pdf = thinkstats2.EstimatedPdf(df['4'])
thinkplot.Pdf(sample_pdf, label='sample KDE')
sample_pdf = thinkstats2.EstimatedPdf(df['5'])
thinkplot.Pdf(sample_pdf, label='sample KDE')
df_tot = pd.concat([df[i] for i in '12345'])
sample_pdf = thinkstats2.EstimatedPdf(df_tot)
thinkplot.Pdf(sample_pdf, label='sample KDE')
pmf = thinkstats2.Pmf(df_tot);
thinkplot.Hist(pmf)
cdf = thinkstats2.Cdf(df_tot, label='actual')
#thinkplot.Cdf(cdf)
| StarcoderdataPython |
6410274 | <reponame>jjwatts/gigantum-client
# Copyright (c) 2017 FlashX, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import (Any, List, Dict, Optional)
from collections import OrderedDict
import json
from natsort import natsorted
from distutils.version import StrictVersion
from distutils.version import LooseVersion
from gtmcore.environment.packagemanager import PackageManager, PackageResult
from gtmcore.container.container import ContainerOperations
from gtmcore.container.exceptions import ContainerException
from gtmcore.labbook import LabBook
from gtmcore.logging import LMLogger
logger = LMLogger.get_logger()
class CondaPackageManagerBase(PackageManager):
"""Class to implement the conda package manager
"""
def __init__(self):
# String to be set in child classes indicating which python version you are checking. Typically should be either
# python 3.6* or python 2.7*
self.python_depends_str = None
# String of the name of the conda environment (e.g. py36 or py27, as created via container build)
self.python_env = None
def search(self, search_str: str, labbook: LabBook, username: str) -> List[str]:
"""Method to search a package manager for packages based on a string. The string can be a partial string.
Args:
search_str: The string to search on
labbook: Subject LabBook
username: username of current user
Returns:
list(str): The list of package names that match the search string
"""
# Add wildcard for search
if search_str[-1] != '*':
search_str = search_str + '*'
try:
result = ContainerOperations.run_command(f'conda search --json "{search_str}"',
labbook=labbook, username=username,
fallback_image=self.fallback_image(labbook))
except ContainerException as e:
logger.error(e)
return list()
data = json.loads(result.decode())
if 'exception_name' in data:
if data.get('exception_name') in ['PackagesNotFoundError', 'PackageNotFoundError']:
# This means you entered an invalid package name that didn't resolve to anything
return list()
else:
raise Exception(f"An error occurred while searching for packages: {data.get('exception_name')}")
if data:
return list(data.keys())
else:
return list()
def list_versions(self, package_name: str, labbook: LabBook, username: str) -> List[str]:
"""Method to list all available versions of a package based on the package name
Args:
package_name: Name of the package to query
labbook: Subject LabBook
username: username of current user
Returns:
list(str): Version strings
"""
try:
result = ContainerOperations.run_command(f"conda info --json {package_name}", labbook, username,
fallback_image=self.fallback_image(labbook))
data = json.loads(result.decode())
except ContainerException as e:
logger.error(e)
data = {}
# TODO: Conda does not seem to throw this anymore. Remove once confirmed
if 'exception_name' in data:
raise ValueError(f"An error occurred while getting package versions: {data.get('exception_name')}")
if len(data.keys()) == 0 or len(data.get(package_name)) == 0:
raise ValueError(f"Package {package_name} not found")
# Check to see if this is a python package. If so, filter based on the current version of python (set in child)
if any([True for x in data.get(package_name) if self.python_depends_str in x.get('depends')]):
versions = [x.get('version') for x in data.get(package_name) if self.python_depends_str in x.get('depends')]
else:
versions = [x.get('version') for x in data.get(package_name)]
versions = list(OrderedDict.fromkeys(versions))
try:
versions.sort(key=StrictVersion)
except ValueError as e:
if 'invalid version number' in str(e):
try:
versions.sort(key=LooseVersion)
except Exception:
versions = natsorted(versions, key=lambda x: x.replace('.', '~') + 'z')
else:
raise e
versions.reverse()
return versions
def latest_version(self, package_name: str, labbook: LabBook, username: str) -> str:
"""Method to get the latest version string for a package
Args:
package_name: Name of the package to query
labbook: Subject LabBook
username: username of current user
Returns:
str: latest version string
"""
result = ContainerOperations.run_command(f"conda install --dry-run --no-deps --json {package_name}",
labbook, username, override_image_tag=self.fallback_image(labbook))
data = json.loads(result.decode().strip())
if data.get('message') == 'All requested packages already installed.':
# We enter this block if the given package_name is already installed to the latest version.
# Then we have to retrieve the latest version using conda list
result = ContainerOperations.run_command("conda list --json", labbook, username,
override_image_tag=self.fallback_image(labbook))
data = json.loads(result.decode().strip())
for pkg in data:
if pkg.get('name') == package_name:
return pkg.get('version')
else:
if isinstance(data.get('actions'), dict) is True:
# New method - added when bases updated to conda 4.5.1
for p in data.get('actions').get('LINK'):
if p.get('name') == package_name:
return p.get("version")
else:
# legacy methods to handle older bases built on conda 4.3.31
try:
for p in [x.get('LINK')[0] for x in data.get('actions') if x]:
if p.get('name') == package_name:
return p.get("version")
except Exception:
for p in [x.get('LINK') for x in data.get('actions') if x]:
if p.get('name') == package_name:
return p.get("version")
# if you get here, failed to find the package in the result from conda
raise ValueError(f"Could not retrieve version list for provided package name: {package_name}")
def latest_versions(self, package_names: List[str], labbook: LabBook, username: str) -> List[str]:
"""Method to get the latest version string for a list of packages
Args:
package_names: list of names of the packages to query
labbook: Subject LabBook
username: username of current user
Returns:
list: latest version strings
"""
cmd = ['conda', 'install', '--dry-run', '--no-deps', '--json', *package_names]
try:
result = ContainerOperations.run_command(
' '.join(cmd), labbook, username, override_image_tag=self.fallback_image(labbook)
).decode().strip()
except Exception as e:
logger.error(e)
pkgs = ", ".join(package_names)
raise ValueError(f"Could not retrieve latest versions due to invalid package name in list: {pkgs}")
versions = {pn: "" for pn in package_names}
if result:
data = json.loads(result)
if data.get('exception_name') == "PackagesNotFoundError":
# Conda failed because of invalid packages. indicate failure
err_pkgs = [x for x in data.get('packages')]
raise ValueError(f"Could not retrieve latest versions due to invalid package name in list: {err_pkgs}")
if data.get('actions') is not None:
for package_name in package_names:
if isinstance(data.get('actions'), dict) is True:
# New method - added when bases updated to conda 4.5.1
for p in data.get('actions').get('LINK'):
if p.get('name') == package_name:
versions[package_name] = p.get("version")
else:
# legacy methods to handle older bases built on conda 4.3.31
try:
for p in [x.get('LINK')[0] for x in data.get('actions') if x]:
if p.get('name') == package_name:
versions[package_name] = p.get("version")
except Exception as e:
for p in [x.get('LINK') for x in data.get('actions') if x]:
if p.get('name') == package_name:
versions[package_name] = p.get("version")
# For any packages whose versions could not be found (because they are installed and latest)
# just look up the installed versions
missing_keys = [k for k in versions.keys() if versions[k] == ""]
if missing_keys:
cmd = ['conda', 'list', '--no-pip', '--json']
result = ContainerOperations.run_command(
' '.join(cmd), labbook, username, override_image_tag=self.fallback_image(labbook)).decode().strip()
installed_info = json.loads(result)
installed_versions = {pkg['name']: pkg['version'] for pkg in installed_info}
for pn in missing_keys:
versions[pn] = installed_versions[pn]
# Reformat into list and return
output_versions = [versions[p] for p in package_names]
return output_versions
def list_installed_packages(self, labbook: LabBook, username: str) -> List[Dict[str, str]]:
"""Method to get a list of all packages that are currently installed
Note, this will return results for the computer/container in which it is executed. To get the properties of
a LabBook container, a docker exec command would be needed from the Gigantum application container.
return format is a list of dicts with the format (name: <package name>, version: <version string>)
Returns:
list
"""
result = ContainerOperations.run_command(f"conda list --no-pip --json", labbook, username)
data = json.loads(result.decode().strip())
if data:
return [{"name": x['name'], 'version': x['version']} for x in data]
else:
return []
def list_available_updates(self, labbook: LabBook, username: str) -> List[Dict[str, str]]:
"""Method to get a list of all installed packages that could be updated and the new version string
Note, this will return results for the computer/container in which it is executed. To get the properties of
a LabBook container, a docker exec command would be needed from the Gigantum application container.
return format is a list of dicts with the format
{name: <package name>, version: <currently installed version string>, latest_version: <latest version string>}
Returns:
list
"""
# This may never need to be used and is not currently used by the API.
return []
# res = ContainerOperations.run_command("conda search --json --outdated", labbook, username)
# data = json.loads(res.decode().strip())
# packages = [x for x in data if data.get(x)]
# return packages
def validate_packages(self, package_list: List[Dict[str, str]], labbook: LabBook, username: str) \
-> List[PackageResult]:
"""Method to validate a list of packages, and if needed fill in any missing versions
Should check both the provided package name and version. If the version is omitted, it should be generated
from the latest version.
Args:
package_list(list): A list of dictionaries of packages to validate
labbook(str): The labbook instance
username(str): The username for the logged in user
Returns:
namedtuple: namedtuple indicating if the package and version are valid
"""
# Build install string
pkgs = list()
for p in package_list:
if p['version']:
pkgs.append(f"{p['package']}={p['version']}")
else:
pkgs.append(p['package'])
cmd = ['conda', 'install', '--dry-run', '--no-deps', '--json', *pkgs]
try:
cmd_result = ContainerOperations.run_command(' '.join(cmd), labbook, username,
override_image_tag=self.fallback_image(labbook))
container_result = cmd_result.decode().strip()
except Exception as e:
logger.error(e)
raise ValueError(f"An error occured while validating packages")
if not container_result:
raise ValueError(f"Failed to get response from Docker while querying for package info")
# Good to process
data = json.loads(container_result)
if data.get('exception_name') == "PackagesNotFoundError":
# Conda failed because of invalid packages. indicate failures.
result = list()
for pkg_str, pkg_data in zip(pkgs, package_list):
if pkg_str in data.get('packages'):
result.append(PackageResult(package=pkg_data['package'],
version=pkg_data['version'],
error=True))
else:
result.append(PackageResult(package=pkg_data['package'],
version=pkg_data['version'],
error=False))
return result
# All packages are valid, collect data
conda_data = dict()
if isinstance(data.get('actions'), dict) is True:
# New method - added when bases updated to conda 4.5.1
for p in data.get('actions').get('LINK'):
conda_data[p.get('name')] = p.get('version')
else:
# legacy methods to handle older bases built on conda 4.3.31
try:
for p in [x.get('LINK')[0] for x in data.get('actions') if x]:
conda_data[p.get('name')] = p.get('version')
except Exception:
for p in [x.get('LINK') for x in data.get('actions') if x]:
conda_data[p.get('name')] = p.get('version')
# Return properly formatted data
return [PackageResult(package=x['package'],
version=conda_data[x['package']],
error=False) for x in package_list]
def generate_docker_install_snippet(self, packages: List[Dict[str, str]], single_line: bool = False) -> List[str]:
"""Method to generate a docker snippet to install 1 or more packages
Note: Because conda be so slow to solve environments with conda-forge included, always single line it.
Args:
packages(list(dict)): A list of package names and versions to install
single_line(bool): If true, collapse
Returns:
list
"""
package_strings = [f"{x['name']}={x['version']}" for x in packages]
if single_line:
return [f"RUN conda install -yq {' '.join(package_strings)}"]
else:
return [f"RUN conda install -yq {' '.join(package_strings)}"]
class Conda3PackageManager(CondaPackageManagerBase):
"""Class to implement the conda3 package manager
"""
def __init__(self):
super().__init__()
self.python_depends_str = 'python 3.6*'
self.python_env = 'py36'
class Conda2PackageManager(CondaPackageManagerBase):
"""Class to implement the conda2 package manager
"""
def __init__(self):
super().__init__()
self.python_depends_str = 'python 2.7*'
self.python_env = 'py27'
| StarcoderdataPython |
6643228 | from pynput import keyboard
import RPi.GPIO as GPIO
import time
import cv2
def on_press(key):
try: k = key.char # single-char keys
except: k = key.name # other keys
if key == keyboard.Key.esc: return False # stop listener
GPIO.setmode(GPIO.BOARD)
GPIO.setup(32,GPIO.OUT)
GPIO.setup(31,GPIO.OUT)
GPIO.setup(35,GPIO.OUT)
GPIO.setup(33,GPIO.OUT)
GPIO.output(32,GPIO.HIGH)
GPIO.output(31,GPIO.HIGH)
GPIO.output(33,GPIO.HIGH)
GPIO.output(35,GPIO.HIGH)
if k in ['up']: # keys interested
# self.keys.append(k) # store it in global-like variable
print('Key pressed: ' + k, ' going straight')
#GPIO.output(36,GPIO.HIGH)
GPIO.output(32,GPIO.LOW)
if k in ['down']: # keys interested
# self.keys.append(k) # store it in global-like variable
print('Key pressed: ' + k, ' going back')
#GPIO.output(32,GPIO.HIGH)
GPIO.output(33,GPIO.LOW)
if k in ['left']: # keys interested
# self.keys.append(k) # store it in global-like variable
print('Key pressed: ' + k, ' going left')
#GPIO.output(35,GPIO.HIGH)
GPIO.output(31,GPIO.LOW)
if k in ['right']: # keys interested
# self.keys.append(k) # store it in global-like variable
print('Key pressed: ' + k, ' going right')
#GPIO.output(37,GPIO.HIGH)
GPIO.output(35,GPIO.LOW)
if k == 'q':
print('cleaning')
GPIO.cleanup()
def on_release(key):
try: k = key.char # single-char keys
except: k = key.name # other keys
if key == keyboard.Key.esc: return False # stop listener
GPIO.setmode(GPIO.BOARD)
GPIO.setup(32,GPIO.OUT)
GPIO.setup(36,GPIO.OUT)
GPIO.setup(35,GPIO.OUT)
GPIO.setup(37,GPIO.OUT)
if k in ['up']: # keys interested
# self.keys.append(k) # store it in global-like variable
print('Key release: ' + k, ' stop going straight')
GPIO.output(32,GPIO.HIGH)
if k in ['down']: # keys interested
# self.keys.append(k) # store it in global-like variable
print('Key release: ' + k, ' stop going back')
GPIO.output(33,GPIO.HIGH)
if k in ['left']: # keys interested
# self.keys.append(k) # store it in global-like variable
print('Key release: ' + k, ' stop going left')
GPIO.output(31,GPIO.HIGH)
if k in ['right']: # keys interested
# self.keys.append(k) # store it in global-like variable
print('Key release: ' + k, ' stop going right')
GPIO.output(35,GPIO.HIGH)
#return False # remove this if want more keys
#GPIO.setmode(GPIO.BOARD)
#GPIO.setup(32,GPIO.OUT)
#GPIO.output(32,GPIO.LOW)
#time.sleep(3)
#GPIO.output(32,GPIO.HIGH)
lis = keyboard.Listener(on_press=on_press, on_release=on_release)
lis.start() # start to listen on a separate thread
lis.join() | StarcoderdataPython |
5181877 | <reponame>qais-yousef/lisa
# Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
from builtins import str
import os
import re
import matplotlib, tempfile
import trappy
from test_thermal import BaseTestThermal
class TestTrappy(BaseTestThermal):
def __init__(self, *args, **kwargs):
super(TestTrappy, self).__init__(*args, **kwargs)
self.map_label = {"00000000,00000039": "A53", "00000000,00000006": "A57"}
self.actor_order = ["GPU", "A57", "A53"]
def test_summary_plots(self):
"""Test summary_plots()
Can't check that the graphs are ok, so just see that the method doesn't blow up"""
trappy.summary_plots(self.actor_order, self.map_label)
matplotlib.pyplot.close('all')
trappy.summary_plots(self.actor_order, self.map_label, width=14,
title="Foo")
matplotlib.pyplot.close('all')
def test_summary_plots_bad_parameters(self):
"""When summary_plots() receives bad parameters, it offers an understandable error"""
self.assertRaises(TypeError, trappy.summary_plots,
(self.map_label, self.actor_order))
try:
trappy.summary_plots(self.map_label, self.actor_order)
except TypeError as exception:
self.assertTrue("actor_order" in str(exception))
else:
self.fail()
try:
trappy.summary_plots(self.actor_order, self.actor_order)
except TypeError as exception:
self.assertTrue("map_label" in str(exception))
else:
self.fail()
def test_summary_other_dir(self):
"""Test summary_plots() with another directory"""
other_random_dir = tempfile.mkdtemp()
os.chdir(other_random_dir)
trappy.summary_plots(self.actor_order, self.map_label, path=self.out_dir)
matplotlib.pyplot.close('all')
# Sanity check that the test actually ran from another directory
self.assertEqual(os.getcwd(), other_random_dir)
def test_summary_plots_only_power_allocator_trace(self):
"""Test that summary_plots() work if there is only power allocator
trace"""
# Strip out "thermal_temperature" from the trace
trace_out = ""
with open("trace.txt") as fin:
for line in fin:
if not re.search("thermal_temperature:", line):
trace_out += line
with open("trace.txt", "w") as fout:
fout.write(trace_out)
trappy.summary_plots(self.actor_order, self.map_label)
matplotlib.pyplot.close('all')
def test_summary_plots_no_gpu(self):
"""summary_plots() works if there is no GPU trace"""
# Strip out devfreq traces
trace_out = ""
with open("trace.txt") as fin:
for line in fin:
if ("thermal_power_devfreq_get_power:" not in line) and \
("thermal_power_devfreq_limit:" not in line):
trace_out += line
with open("trace.txt", "w") as fout:
fout.write(trace_out)
trappy.summary_plots(self.actor_order, self.map_label)
matplotlib.pyplot.close('all')
def test_summary_plots_one_actor(self):
"""summary_plots() works if there is only one actor"""
# Strip out devfreq and little traces
trace_out = ""
with open("trace.txt") as fin:
for line in fin:
if ("thermal_power_devfreq_get_power:" not in line) and \
("thermal_power_devfreq_limit:" not in line) and \
("thermal_power_cpu_get_power: cpus=00000000,00000039" not in line) and \
("thermal_power_cpu_limit: cpus=00000000,00000039" not in line):
trace_out += line
with open("trace.txt", "w") as fout:
fout.write(trace_out)
map_label = {"00000000,00000006": "A57"}
trappy.summary_plots(self.actor_order, map_label)
matplotlib.pyplot.close('all')
def test_compare_runs(self):
"""Basic compare_runs() functionality"""
trappy.compare_runs(self.actor_order, self.map_label,
runs=[("new", "."), ("old", self.out_dir)])
matplotlib.pyplot.close('all')
| StarcoderdataPython |
3433805 | aktivirao = _State(0)
weight = 8
def run():
if not State.goldenium_activated.val:
return False
x,y=coord('goldenium')
#r.goto(*coord('goldenium'))
if not pathfind(x-16,y+8,-1):
return False
r.absrot(-90)
napgold(2)
sleep(0.3)
pump(2,1)
# Implementirati stak umesto ovoga
r.speed(60)
r.forward(96)
sleep(0.3)
r.speed(140)
r.forward(-86)
sleep(0.2)
napgold(0)
State.goldenium_picked.val = 1
| StarcoderdataPython |
8149937 | import json
import pandas as pd
import degiroapi
from degiroapi.product import Product
from degiroapi.order import Order
from degiroapi.utils import pretty_json
with open('c:\git\degiro\config.json','r') as file:
data = json.load(file)
user = data['degiro_user']
password = data['<PASSWORD>']
degiro = degiroapi.DeGiro()
degiro.login(user, password)
portfolio = pd.DataFrame(degiro.getdata(degiroapi.Data.Type.PORTFOLIO, False))
tickerinfo = portfolio.apply(lambda row : degiro.product_info(row['id']), axis = 1, result_type = 'expand')
portfolio[tickerinfo.columns] = tickerinfo
del(tickerinfo)
degiro.logout() | StarcoderdataPython |
5029078 | # Copyright 2018-Present Shanghai Yitu Technology Co., Ltd.
# Licensed under the Apache License, Version 2.0 | StarcoderdataPython |
1826725 | <reponame>cjoakim/azure-datagen<filename>python/main.py
"""
Usage:
python main.py gen_customers <count>
python main.py gen_products <count>
python main.py gen_aus_online_txn 2021-02-25 2022-02-25 100 > data\online_txn.json
python main.py gen_aus_flybuy_txn 2021-02-25 2022-02-25 100 > data\flybuy_txn.json
Options:
-h --help Show this screen.
--version Show version.
"""
__author__ = '<NAME>'
__email__ = "<EMAIL>"
__license__ = "MIT"
__version__ = "February 2022"
import csv
import json
import os
import random
import sys
import uuid
import arrow
from docopt import docopt
from faker import Faker # https://faker.readthedocs.io/en/master/index.html
from pysrc.env import Env
from pysrc.excel import Excel
from pysrc.fs import FS
from pysrc.template import Template
def gen_customers(count):
print('gen_customers, {}'.format(count))
fake = Faker()
json_lines = list()
for idx in range(count):
first = fake.first_name().replace(',',' ')
last = fake.last_name().replace(',',' ')
obj = dict()
obj['customer_id'] = str(uuid.uuid4())
obj['first_name'] = first
obj['last_name'] = last
obj['full_name'] = '{} {}'.format(first, last)
obj['address'] = fake.street_address().replace(',',' ')
obj['city'] = fake.city()
obj['state'] = fake.state_abbr()
json_lines.append(json.dumps(obj))
write_lines('data/customers.json', json_lines)
def gen_products(count):
print('gen_products, {}'.format(count))
fake = Faker()
json_lines = list()
upc_dict = dict()
for idx in range(count):
upc = random_upc(upc_dict, fake)
price = random_price(fake)
obj = dict()
obj['seq_num'] = idx + 1
obj['upc'] = random_upc(upc_dict, fake)
obj['desc'] = ' '.join(fake.words(nb=5)).strip()
obj['price'] = float('{:.2f}'.format(price))
json_lines.append(json.dumps(obj))
write_lines('data/products.json', json_lines)
def gen_aus_online_txn(start_date, end_date, avg_per_day):
calendar_days = read_csv('data/calendar.csv')
customers = read_json_objects('data/customers.json')
products = read_json_objects('data/products.json')
min = int(float(avg_per_day * 0.75))
max = int(float(avg_per_day * 1.25))
for date_row in calendar_days:
date = date_row[1]
if date >= start_date:
if date <= end_date:
count = random.randint(min, max)
for i in range(count):
now = arrow.utcnow()
customer = random_list_element(customers)
product = random_list_element(products)
id_pk = str(uuid.uuid4())
txn = dict()
txn['id'] = id_pk
txn['pk'] = id_pk
txn['ccpID'] = customer['customer_id']
txn['productID'] = product['upc']
txn['productDesc'] = product['desc']
txn['productQty'] = random_int(1, 5)
txn['transactionDate'] = random_utc_time(date)
print(json.dumps(txn))
def gen_aus_flybuy_txn(start_date, end_date, avg_per_day):
calendar_days = read_csv('data/calendar.csv')
customers = read_json_objects('data/customers.json')
products = read_json_objects('data/products.json')
min = int(float(avg_per_day * 0.75))
max = int(float(avg_per_day * 1.25))
for date_row in calendar_days:
date = date_row[1]
if date >= start_date:
if date <= end_date:
count = random.randint(min, max)
for i in range(count):
now = arrow.utcnow()
customer = random_list_element(customers)
product = random_list_element(products)
id_pk = str(uuid.uuid4())
txn = dict()
txn['id'] = id_pk
txn['pk'] = id_pk
txn['flybuyID'] = customer['customer_id']
txn['productID'] = product['upc']
txn['productDesc'] = product['desc']
txn['productQty'] = random_int(1, 5)
txn['transactionDate'] = random_utc_time(date)
print(json.dumps(txn))
def random_int(min, max):
return random.randint(min, max)
def random_list_element(elements):
idx = random.randint(0, len(elements) - 1)
return elements[idx]
def random_utc_time(date):
epoch = random_int(0, 1645824823)
tokens = str(arrow.get(epoch)).split('T')
tokens[0] = date
return 'T'.join(tokens)
def random_zero_padded_int(min, max):
i = random_int(min, max)
if i < 10:
return '0{}'.format(i)
else:
return str(i)
def random_utc_seconds():
f = random.uniform(1.5, 1.9)
def random_upc(upc_dict, fake):
continue_to_process = True
while continue_to_process:
ean = fake.localized_ean13()
if ean in upc_dict.keys():
pass # try again
else:
upc_dict[ean] = ean
continue_to_process = False
return ean
def random_price(fake):
# pyfloat(left_digits=None, right_digits=None, positive=False, min_value=None, max_value=None)
return fake.pyfloat(positive=True, min_value=1, max_value=1500)
def read_json_objects(infile):
objects = list()
it = text_file_iterator(infile)
for i, line in enumerate(it):
s = line.strip()
if len(s) > 3:
obj = json.loads(line.strip())
objects.append(obj)
return objects
def text_file_iterator(infile):
# return a line generator that can be iterated with iterate()
with open(infile, 'rt') as f:
for line in f:
yield line.strip()
def write_lines(outfile, lines):
with open(outfile, 'wt') as out:
for line in lines:
out.write("{}{}".format(line.strip(), "\n"))
print('file_written: {}'.format(outfile))
def read_csv(infile, reader='default', delim=',', dialect='excel', skip=0):
rows = list()
if reader == 'dict':
with open(infile, 'rt') as csvfile:
rdr = csv.DictReader(csvfile, dialect=dialect, delimiter=delim)
for row in rdr:
rows.append(row)
else:
with open(infile) as csvfile:
rdr = csv.reader(csvfile, delimiter=delim)
for idx, row in enumerate(rdr):
if idx >= skip:
rows.append(row)
return rows
def read_json(infile):
with open(infile, 'rt') as f:
return json.loads(f.read())
def write_obj_as_json_file(outfile, obj):
txt = json.dumps(obj, sort_keys=False, indent=2)
with open(outfile, 'wt') as f:
f.write(txt)
print("file written: " + outfile)
def print_options(msg):
print(msg)
arguments = docopt(__doc__, version=__version__)
print(arguments)
if __name__ == "__main__":
func = sys.argv[1].lower()
if func == 'gen_customers':
count = int(sys.argv[2])
gen_customers(count)
elif func == 'gen_products':
count = int(sys.argv[2])
gen_products(count)
elif func == 'gen_aus_online_txn':
start_date = sys.argv[2]
end_date = sys.argv[3]
avg_per_day = int(sys.argv[4])
gen_aus_online_txn(start_date, end_date, avg_per_day)
elif func == 'gen_aus_flybuy_txn':
start_date = sys.argv[2]
end_date = sys.argv[3]
avg_per_day = int(sys.argv[4])
gen_aus_flybuy_txn(start_date, end_date, avg_per_day)
else:
print_options('Error: invalid function: {}'.format(func))
| StarcoderdataPython |
1944361 | <gh_stars>100-1000
#! /usr/bin/env python3
"""Downloads project source checkouts for integration as samples"""
from __future__ import absolute_import
from __future__ import print_function
import os, subprocess
SAMPLE_DIRECTORY = '.samples'
class BaseSource( object ):
def __init__( self, root, project=None, dirname=None ):
self.root = root
self.project = project
if dirname is None:
dirname = project
self.dirname = dirname
def checkout( self ):
command = self.checkout_command
print(command)
subprocess.check_output(command,shell=True)
def update( self ):
if os.path.exists( self.dirname ):
cwd = os.getcwd()
try:
os.chdir( self.dirname )
command = self.update_command
print(command)
subprocess.check_output(command,shell=True)
finally:
os.chdir( cwd )
else:
self.checkout()
class CVSSource( BaseSource ):
@property
def checkout_command( self ):
assert self.project
assert self.root
assert self.dirname
return 'cvs -d%s co -d%s %s'%(
self.root, self.dirname, self.project,
)
@property
def update_command( self ):
return 'cvs up -C'
class SVNSource( BaseSource ):
@property
def checkout_command( self ):
assert self.root
assert self.dirname
return 'svn co %s %s'%(
self.root, self.dirname,
)
@property
def update_command( self ):
return 'svn up'
class BZRSource( BaseSource ):
@property
def checkout_command( self ):
assert self.root
assert self.dirname
return 'bzr checkout --lightweight %s %s'%(
self.root, self.dirname,
)
@property
def update_command( self ):
return 'bzr update'
class HgSource( BaseSource ):
@property
def checkout_command( self ):
assert self.root
assert self.dirname
return 'hg clone %s %s'%(
self.root, self.dirname,
)
@property
def update_command( self ):
"""Note: requires enabling the fetch extension (sigh)"""
return 'hg pull && hg update'
class GITSource( BaseSource ):
@property
def checkout_command( self ):
assert self.root
assert self.dirname
return 'git clone %s %s'%(
self.root, self.dirname,
)
@property
def update_command( self ):
return 'git pull'
checkouts = [
GITSource(
'https://github.com/mcfletch/openglcontext.git',
'OpenGLContext',
),
GITSource(
'https://github.com/mcfletch/pyopengl-demo.git',
'PyOpenGL-Demo',
),
# CVSSource(
# ':pserver:anonymous@glinter.cvs.sourceforge.net:/cvsroot/glinter',
# 'Glinter',
# ),
# CVSSource(
# ':pserver:<EMAIL>@py<EMAIL>:/cvsroot/pybzedit',
# 'pybzedit',
# ),
# CVSSource(
# ':pserver:anonymous@pyui.cvs.sourceforge.net:/cvsroot/pyui',
# 'PyUIcvs',
# 'pyui',
# ),
GITSource(
'https://github.com/Ripsnorta/pyui2.git',
'pyui2',
),
# SVNSource(
# 'https://svn.code.sf.net/p/pymmlib/code/trunk',
# 'pymmlib',
# ),
GITSource(
'https://github.com/masci/mmLib.git',
dirname = 'mmlib',
),
# SVNSource(
# 'http://visionegg.org/svn/trunk/visionegg',
# dirname = 'visionegg',
# ),
GITSource(
'git://github.com/visionegg/visionegg.git',
dirname = 'visionegg',
),
GITSource(
'git://github.com/tito/pymt.git',
dirname = 'pymt',
),
GITSource(
'git://github.com/rossant/galry.git',
dirname = 'galry',
),
# SVNSource(
# 'http://svn.gnome.org/svn/gnome-games/trunk/glchess',
# dirname = 'glchess',
# ),
GITSource(
'https://github.com/sparkslabs/kamaelia.git',
dirname = 'kamaelia',
),
GITSource(
'https://github.com/philippTheCat/pyggel.git',
dirname = 'pyggel',
),
GITSource(
'https://github.com/RyanHope/PyGL2D.git',
dirname = 'pygl2d',
),
BZRSource(
'https://code.launchpad.net/~bebraw/scocca/devel',
dirname = 'scocca',
),
GITSource(
'https://github.com/tartley/gltutpy.git',
dirname = 'gltutpy',
),
GITSource(
'https://github.com/tartley/algorithmic-generation-of-opengl-geometry.git',
dirname = 'agog',
),
GITSource(
'https://github.com/tartley/gloopy.git',
dirname = 'gloopy',
),
GITSource(
'https://github.com/almarklein/visvis',
dirname = 'visvis',
),
HgSource(
'https://bitbucket.org/rndblnch/opengl-programmable/',
dirname = 'programmable',
),
GITSource(
'https://github.com/mmatl/pyrender.git',
dirname='pyrender',
),
# pymol # not pyopengl AFAICS
# {LGPL} mirra # no online view of code AFAICS
# soccerbots http://soccerbots.googlecode.com/svn/
# enough http://enough.googlecode.com/svn/ trunk/
# flyback http://flyback.googlecode.com/svn/ trunk/
# threeDS
# pyODE (examples)
# Dice3DS (BSD)
# KeyJnote (GPL)
# PyGauntlet (GPL) http://pygauntlet.googlecode.com/svn
# LPhoto (GPL) lphoto_2.0.42-0.0.0.45.lindows0.1.tar.gz
# http://crystaltowers.googlecode.com/svn/ trunk/
### beryl-mesa-0.1.4.tar.bz2
### Mesa source distribution mesa/glapi
]
if __name__ == "__main__":
if not os.path.exists(SAMPLE_DIRECTORY):
os.makedirs(SAMPLE_DIRECTORY)
os.chdir( '.samples' )
for checkout in checkouts:
print(('Project:', checkout.dirname))
checkout.update()
| StarcoderdataPython |
359356 | <reponame>ADGiuliano/DevAndCompForRecSys2016
"""
@author: <NAME>
@contact: <EMAIL>
@organization: University of Padua
"""
import cPickle as Pickle
from Functions import Check_File as cf
#File contenente la classe che si occupa del salvataggio dei vari dizionari nei file .pik
class Pickle_operator:
def __init__(self):
self.users_d_path = 'Files_pik/users_d_lite.pik';
self.int_l_path = 'Files_pik/int_l_lite.pik';
self.int_d_path = 'Files_pik/int_d_lite.pik';
self.item_imp_l_path = 'Files_pik/item_imp_l_lite.pik';
self.imp_l_path = 'Files_pik/imp_l_lite.pik';
self.imp_d_path = 'Files_pik/imp_d_lite.pik';
self.item_score_d_path = 'Files_pik/item_score_d_lite.pik';
self.item_list_path = 'Files_pik/item_list_lite.pik';
self.users_list_path = 'Files_pik/users_list_lite.pik';
self.filtered_pik_path = 'Files_pik/filtered.pik';
def check_users_dict_file(self):
return cf.check(self.users_d_path);
def save_users_dict(self,obj):
fileobj = open(self.users_d_path, 'wb');
Pickle.dump(obj,fileobj);
fileobj.close();
def load_users_dict(self):
with open(self.users_d_path, 'rb') as pickle_file:
users_d = Pickle.load(pickle_file);
return users_d;
def check_int_list_file(self):
return cf.check(self.int_l_path);
def save_int_list(self, obj):
fileobj = open(self.int_l_path, 'wb');
Pickle.dump(obj, fileobj);
fileobj.close();
def load_int_list(self):
with open(self.int_l_path, 'rb') as pickle_file:
items_d = Pickle.load(pickle_file);
return items_d;
def check_int_dist_file(self):
return cf.check(self.int_d_path);
def save_int_dist(self, obj):
fileobj = open(self.int_d_path, 'wb');
Pickle.dump(obj, fileobj);
fileobj.close();
def load_int_dist(self):
with open(self.int_d_path, 'rb') as pickle_file:
items_d = Pickle.load(pickle_file);
return items_d;
def check_item_imp_list_file(self):
return cf.check(self.item_imp_l_path);
def save_item_imp_list(self, obj):
fileobj = open(self.item_imp_l_path, 'wb');
Pickle.dump(obj, fileobj);
fileobj.close();
def load_item_imp_list(self):
with open(self.item_imp_l_path, 'rb') as pickle_file:
items_d = Pickle.load(pickle_file);
return items_d;
def check_imp_list_file(self):
return cf.check(self.imp_l_path);
def save_imp_list(self, obj):
fileobj = open(self.imp_l_path, 'wb');
Pickle.dump(obj, fileobj);
fileobj.close();
def load_imp_list(self):
with open(self.imp_l_path, 'rb') as pickle_file:
items_d = Pickle.load(pickle_file);
return items_d;
def check_imp_dict_file(self):
return cf.check(self.imp_d_path);
def save_imp_dict(self, obj):
fileobj = open(self.imp_d_path, 'wb');
Pickle.dump(obj, fileobj);
fileobj.close();
def load_imp_dict(self):
with open(self.imp_d_path, 'rb') as pickle_file:
items_d = Pickle.load(pickle_file);
return items_d;
def check_item_score_dict_file(self):
return cf.check(self.item_score_d_path);
def save_item_score_dict(self, obj):
fileobj = open(self.item_score_d_path, 'wb');
Pickle.dump(obj, fileobj);
fileobj.close();
def load_item_score_dict(self):
with open(self.item_score_d_path, 'rb') as pickle_file:
items_d = Pickle.load(pickle_file);
return items_d;
def check_item_list_file(self):
return cf.check(self.item_list_path);
def save_item_list(self, obj):
fileobj = open(self.item_list_path, 'wb');
Pickle.dump(obj, fileobj);
fileobj.close();
def load_item_list(self):
with open(self.item_list_path, 'rb') as pickle_file:
items_d = Pickle.load(pickle_file);
return items_d;
def check_users_list_file(self):
return cf.check(self.users_list_path);
def save_users_list(self, obj):
fileobj = open(self.users_list_path, 'wb');
Pickle.dump(obj, fileobj);
fileobj.close();
def load_users_list(self):
with open(self.users_list_path, 'rb') as pickle_file:
items_d = Pickle.load(pickle_file);
return items_d;
def check_filtered_file(self):
return cf.check(self.filtered_pik_path);
def save_filtered_file(self, obj):
fileobj = open(self.filtered_pik_path, 'wb');
Pickle.dump(obj, fileobj);
fileobj.close();
def load_filtered_file(self):
with open(self.filtered_pik_path, 'rb') as pickle_file:
items_d = Pickle.load(pickle_file);
return items_d; | StarcoderdataPython |
8136204 | import sqlite3
def create_splits(symbol, date, from_factor, to_factor):
"""
returns INTEGER
- last id of row created
"""
conn = sqlite3.connect('robinhood.db')
command = "INSERT INTO splits\
(symbol, date, from_factor, to_factor)\
VALUES ('{}', '{}', '{}', '{}');".format(symbol, date, from_factor, to_factor)
cursor = conn.execute(command)
conn.commit()
conn.close()
return cursor.lastrowid
def get_splits(symbol):
"""
returns a TUPLE
- empty if it doesn't exist
- contains row if it does exist
"""
conn = sqlite3.connect('robinhood.db')
command = "SELECT symbol, date, from_factor, to_factor FROM splits WHERE\
symbol='{}';".format(symbol)
cursor = conn.execute(command)
all_rows = cursor.fetchall()
conn.commit()
conn.close()
return all_rows
| StarcoderdataPython |
3299963 | <reponame>rootless4real/cozmo-python-sdk<filename>src/cozmo/event.py
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Event dispatch system.
The SDK is based around the dispatch and observation of events.
Objects inheriting from the :class:`Dispatcher` generate and
dispatch events as the state of the robot and its world are updated.
For example the :class:`cozmo.objects.LightCube` class generates an
:class:`~cozmo.objects.EvtObjectTapped` event anytime the cube the object
represents is tapped.
The event can be observed in a number of different ways:
#. By calling the :meth:`~Dispatcher.wait_for` method on the object to observe.
This will wait until the specific event has been sent to that object and
return the generated event.
#. By calling :meth:`~Dispatcher.add_event_handler` on the object
to observe, which will cause the supplied function to be called every time
the specified event occurs (use the :func:`oneshot` decorator
to only have the handler called once)
#. By sub-classing a type and implementing a receiver method.
For example, subclass the :class:`cozmo.objects.LightCube` type and implement `evt_object_tapped`.
Note that the factory attribute would need to be updated on the
generating class for your type to be used by the SDK.
For example, :attr:`~cozmo.world.World.light_cube_factory` in this example.
#. By subclassing a type and implementing a default receiver method.
Events not dispatched to an explicit receiver method are dispatched to
`recv_default_handler`.
Events are dispatched to a target object (by calling :meth:`dispatch_event`
on the receiving object). In line with the above, upon receiving an event,
the object will:
#. Dispatch the event to any handlers which have explicitly registered interest
in the event (or a superclass of the event) via
:meth:`~Dispatcher.add_event_handler` or via :meth:`Dispatcher.wait_for`
#. Dispatch the event to any "children" of the object (see below)
#. Dispatch the event to method handlers on the receiving object, or the
`recv_default_handler` if it has no matching handler
#. Dispatch the event to the parent of the object (if any), and in turn onto
the parent's parents.
Any handler may raise a :class:`~cozmo.exceptions.StopPropogation` exception
to prevent the event reaching any subsequent handlers (but generally should
have no need to do so).
Child objects receive all events that are sent to the originating object
(which may have multiple children).
Originating objects may have one parent object, which receives all events sent
to its child.
For example, :class:`cozmo.robot.Robot` creates a :class:`cozmo.world.World`
object and sets itself as a parent and the World as the child; both receive
events sent to the other.
The World class creates individual :class:`cozmo.objects.ObservableObject` objects
as they are discovered and makes itself a parent, so as to receive all events
sent to the child. However, it does not make those ObservableObject objects children
for the sake of message dispatch as they only need to receive a small subset
of messages the World object receives.
'''
# __all__ should order by constants, event classes, other classes, functions.
__all__ = ['Event', 'Dispatcher', 'Filter', 'Handler',
'oneshot', 'filter_handler', 'wait_for_first']
import asyncio
import collections
import inspect
import re
import weakref
from . import base
from . import exceptions
from . import logger
# from https://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
_first_cap_re = re.compile('(.)([A-Z][a-z]+)')
_all_cap_re = re.compile('([a-z0-9])([A-Z])')
def _uncamelcase(name):
s1 = _first_cap_re.sub(r'\1_\2', name)
return _all_cap_re.sub(r'\1_\2', s1).lower()
registered_events = {}
active_dispatchers = weakref.WeakSet()
class _rprop:
def __init__(self, value):
self._value = value
def __get__(self, instance, owner):
return self._value
class docstr(str):
@property
def __doc__(self):
return self.__str__()
class _AutoRegister(type):
'''helper to automatically register event classes wherever they're defined
without requiring a class decorator'''
def __new__(mcs, name, bases, attrs, **kw):
if name in ('Event',):
return super().__new__(mcs, name, bases, attrs, **kw)
if not (name.startswith('Evt') or name.startswith('_Evt') or name.startswith('_Msg')):
raise ValueError('Event class names must begin with "Evt (%s)"' % name)
if '__doc__' not in attrs:
raise ValueError('Event classes must have a docstring')
props = set()
for base in bases:
if hasattr(base, '_props'):
props.update(base._props)
newattrs = {'_internal': False}
for k, v in attrs.items():
if k[0] == '_':
newattrs[k] = v
continue
if k in props:
raise ValueError("Event class %s duplicates property %s defined in superclass" % (mcs, k))
props.add(k)
newattrs[k] = docstr(v)
newattrs['_props'] = props
newattrs['_props_sorted'] = sorted(props)
if name[0] == '_':
newattrs['_internal'] = True
name = name[1:]
# create a read only property for the event name
newattrs['event_name'] = _rprop(name)
return super().__new__(mcs, name, bases, newattrs, **kw)
def __init__(cls, name, bases, attrs, **kw):
if name in registered_events:
raise ValueError("Duplicate event name %s (%s duplicated by %s)"
% (name, _full_qual_name(cls), _full_qual_name(registered_events[name])))
registered_events[name] = cls
super().__init__(name, bases, attrs, **kw)
def _full_qual_name(obj):
return obj.__module__ + '.' + obj.__qualname__
class Event(metaclass=_AutoRegister):
'''An event representing an action that has occurred.
Instances of an Event have attributes set to values passed to the event.
For example, :class:`cozmo.objects.EvtObjectTapped` defines obj and tap_count
parameters which can be accessed as ``evt.obj`` and ``evt.tap_count``.
'''
#_first_raised_by = "The object that generated the event"
#_last_raised_by = "The object that last relayed the event to the dispatched handler"
#pylint: disable=no-member
# Event Metaclass raises "no-member" pylint errors in pylint within this scope.
def __init__(self, **kwargs):
unset = self._props.copy()
for k, v in kwargs.items():
if k not in self._props:
raise ValueError("Event %s has no parameter called %s" % (self.event_name, k))
setattr(self, k, v)
unset.remove(k)
for k in unset:
setattr(self, k, None)
self._delivered_to = set()
def __repr__(self):
kvs = {'name': self.event_name}
for k in self._props_sorted:
kvs[k] = getattr(self, k)
return '<%s %s>' % (self.__class__.__name__, ' '.join(['%s=%s' % kv for kv in kvs.items()]),)
def _params(self):
return {k: getattr(self, k) for k in self._props}
@classmethod
def _handler_method_name(cls):
name = 'recv_' + _uncamelcase(cls.event_name)
if cls._internal:
name = '_' + name
return name
def _dispatch_to_func(self, f):
return f(self, **self._params())
def _dispatch_to_obj(self, obj, fallback_to_default=True):
for cls in self._parent_event_classes():
f = getattr(obj, cls._handler_method_name(), None)
if f and not self._is_filtered(f):
return self._dispatch_to_func(f)
if fallback_to_default:
name = 'recv_default_handler'
if self._internal:
name = '_' + name
f = getattr(obj, name, None)
if f and not self._is_filtered(f):
return f(self, **self._params())
def _dispatch_to_future(self, fut):
if not fut.done():
fut.set_result(self)
def _is_filtered(self, f):
filters = getattr(f, '_handler_filters', None)
if filters is None:
return False
for filter in filters:
if filter(self):
return False
return True
def _parent_event_classes(self):
for cls in self.__class__.__mro__:
if cls != Event and issubclass(cls, Event):
yield cls
def _register_dynamic_event_type(event_name, attrs):
return type(event_name, (Event,), attrs)
class Handler(collections.namedtuple('Handler', 'obj evt f')):
'''A Handler is returned by :meth:`Dispatcher.add_event_handler`
The handler can be disabled at any time by calling its :meth:`disable`
method.
'''
__slots__ = ()
def disable(self):
'''Removes the handler from the object it was originally registered with.'''
return self.obj.remove_event_handler(self.evt, self.f)
@property
def oneshot(self):
'''bool: True if the wrapped handler function will only be called once.'''
return getattr(self.f, '_oneshot_handler', False)
class NullHandler(Handler):
def disable(self):
pass
class Dispatcher(base.Base):
'''Mixin to provide event dispatch handling.'''
def __init__(self, *a, dispatch_parent=None, loop=None, **kw):
super().__init__(**kw)
active_dispatchers.add(self)
self._dispatch_parent = dispatch_parent
self._dispatch_children = []
self._dispatch_handlers = collections.defaultdict(list)
if not loop:
raise ValueError("Loop was not supplied to "+self.__class__.__name__)
self._loop = loop or asyncio.get_event_loop()
self._dispatcher_running = True
def _set_parent_dispatcher(self, parent):
self._dispatch_parent = parent
def _add_child_dispatcher(self, child):
self._dispatch_children.append(child)
def _stop_dispatcher(self):
"""Stop dispatching events - call before closing the connection to prevent stray dispatched events"""
self._dispatcher_running = False
def add_event_handler(self, event, f):
"""Register an event handler to be notified when this object receives a type of Event.
Expects a subclass of Event as the first argument. If the class has
subclasses then the handler will be notified for events of that subclass too.
For example, adding a handler for :class:`~cozmo.action.EvtActionCompleted`
will cause the handler to also be notified for
:class:`~cozmo.anim.EvtAnimationCompleted` as it's a subclass.
Callable handlers (e.g. functions) are called with a first argument
containing an Event instance and the remaining keyword arguments set as
the event parameters.
For example, ``def my_ontap_handler(evt, *, obj, tap_count, **kwargs)``
or ``def my_ontap_handler(evt, obj=None, tap_count=None, **kwargs)``
It's recommended that a ``**kwargs`` parameter be included in the
definition so that future expansion of event parameters do not cause
the handler to fail.
Callable handlers may raise an events.StopProgation exception to prevent
other handlers listening to the same event from being triggered.
:class:`asyncio.Future` handlers are called with a result set to the event.
Args:
event (:class:`Event`): A subclass of :class:`Event` (not an instance of that class)
f (callable): A callable or :class:`asyncio.Future` to execute when the event is received
Raises:
:class:`TypeError`: An invalid event type was supplied
"""
if not issubclass(event, Event):
raise TypeError("event must be a subclass of Event (not an instance)")
if not self._dispatcher_running:
return NullHandler(self, event, f)
if isinstance(f, asyncio.Future):
# futures can only be called once.
f = oneshot(f)
handler = Handler(self, event, f)
self._dispatch_handlers[event.event_name].append(handler)
return handler
def remove_event_handler(self, event, f):
"""Remove an event handler for this object.
Args:
event (:class:`Event`): The event class, or an instance thereof,
used with register_event_handler.
f (callable or :class:`Handler`): The callable object that was
passed as a handler to :meth:`add_event_handler`, or a
:class:`Handler` instance that was returned by
:meth:`add_event_handler`.
Raises:
:class:`ValueError`: No matching handler found.
"""
if not (isinstance(event, Event) or (isinstance(event, type) and issubclass(event, Event))):
raise TypeError("event must be a subclasss or instance of Event")
if isinstance(f, Handler):
for i, h in enumerate(self._dispatch_handlers[event.event_name]):
if h == f:
del self._dispatch_handlers[event.event_name][i]
return
else:
for i, h in enumerate(self._dispatch_handlers[event.event_name]):
if h.f == f:
del self._dispatch_handlers[event.event_name][i]
return
raise ValueError("No matching handler found for %s (%s)" % (event.event_name, f) )
def dispatch_event(self, event, **kw):
'''Dispatches a single event to registered handlers.
Not generally called from user-facing code.
Args:
event (:class:`Event`): An class or instance of :class:`Event`
kw (dict): If a class is passed to event, then the remaining keywords
are passed to it to create an instance of the event.
Returns:
A :class:`asyncio.Task` or :class:`asyncio.Future` that will
complete once all event handlers have been called.
Raises:
:class:`TypeError` if an invalid event is supplied.
'''
if not self._dispatcher_running:
return
event_cls = event
if not isinstance(event, Event):
if not isinstance(event, type) or not issubclass(event, Event):
raise TypeError("events must be a subclass or instance of Event")
# create an instance of the event if passed a class
event = event(**kw)
else:
event_cls = event.__class__
if id(self) in event._delivered_to:
return
event._delivered_to.add(id(self))
handlers = set()
for cls in event._parent_event_classes():
for handler in self._dispatch_handlers[cls.event_name]:
if event._is_filtered(handler.f):
continue
if getattr(handler.f, '_oneshot_handler', False):
# Disable oneshot events prior to actual dispatch
handler.disable()
handlers.add(handler)
return asyncio.ensure_future(self._dispatch_event(event, handlers), loop=self._loop)
async def _dispatch_event(self, event, handlers):
# iterate through events from child->parent
# update the dispatched_to set for each event so each handler
# only receives the most specific event if they are monitoring for both.
try:
# dispatch to local handlers
for handler in handlers:
if isinstance(handler.f, asyncio.Future):
event._dispatch_to_future(handler.f)
else:
result = event._dispatch_to_func(handler.f)
if asyncio.iscoroutine(result):
await result
# dispatch to children
for child in self._dispatch_children:
child.dispatch_event(event)
# dispatch to self methods
result = event._dispatch_to_obj(self)
if asyncio.iscoroutine(result):
await result
# dispatch to parent dispatcher
if self._dispatch_parent:
self._dispatch_parent.dispatch_event(event)
except exceptions.StopPropogation:
pass
def _abort_event_futures(self, exc):
'''Sets an exception on all pending Future handlers
This prevents coroutines awaiting a Future from blocking forever
should a hard failure occur with the connection.
'''
handlers = set()
for evh in self._dispatch_handlers.values():
for h in evh:
handlers.add(h)
for handler in handlers:
if isinstance(handler.f, asyncio.Future):
if not handler.f.done():
handler.f.set_exception(exc)
handler.disable()
async def wait_for(self, event_or_filter, timeout=30):
'''Waits for the specified event to be sent to the current object.
Args:
event_or_filter (:class:`Event`): Either a :class:`Event` class
or a :class:`Filter` instance to wait to trigger
timeout: Maximum time to wait for the event. Pass None to wait indefinitely.
Returns:
The :class:`Event` instance that was dispatched
Raises:
:class:`asyncio.TimeoutError`
'''
f = asyncio.Future(loop=self._loop) # replace with loop.create_future in 3.5.2
# TODO: add a timer that logs every 5 seconds that the event is still being
# waited on. Will help novice programmers realize why their program is hanging.
f = oneshot(f)
if isinstance(event_or_filter, Filter):
f = filter_handler(event_or_filter)(f)
event = event_or_filter._event
else:
event = event_or_filter
self.add_event_handler(event, f)
if timeout:
return await asyncio.wait_for(f, timeout, loop=self._loop)
return await f
def oneshot(f):
'''Event handler decorator; causes the handler to only be dispatched to once.'''
f._oneshot_handler = True
return f
def filter_handler(event, **filters):
'''Decorates a handler function or Future to only be called if a filter is matched.
A handler may apply multiple separate filters; the handlers will be called
if any of those filters matches.
For example::
# Handle only if the anim_majorwin animation completed
@filter_handler(cozmo.anim.EvtAnimationCompleted, animation_name="anim_majorwin")
# Handle only when the observed object is a LightCube
@filter_handler(cozmo.objects.EvtObjectObserved, obj=lambda obj: isinstance(cozmo.objects.LightCube))
Args:
event (:class:`Event`): The event class to match on
filters (dict): Zero or more event parameters to filter on. Values may
be either strings for exact matches, or functions which accept the
value as the first argument and return a bool indicating whether
the value passes the filter.
'''
if isinstance(event, Filter):
if len(filters) != 0:
raise ValueError("Cannot supply filter values when passing a Filter as the first argument")
filter = event
else:
filter = Filter(event, **filters)
def filter_property(f):
if hasattr(f, '_handler_filters'):
f._handler_filters.append(filter)
else:
f._handler_filters = [filter]
return f
return filter_property
class Filter:
"""Provides fine-grain filtering of events for dispatch.
See the ::func::`filter_handler` method for further details.
"""
def __init__(self, event, **filters):
if not issubclass(event, Event):
raise TypeError("event must be a subclass of Event (not an instance)")
self._event = event
self._filters = filters
for key in self._filters.keys():
if not hasattr(event, key):
raise AttributeError("Event %s does not define property %s", event.__name__, key)
def __setattr__(self, key, val):
if key[0] == '_':
return super().__setattr__(key, val)
if not hasattr(self._event, key):
raise AttributeError("Event %s does not define property %s", self._event.__name__, key)
self._filters[key] = val
def __call__(self, evt):
for prop, filter in self._filters.items():
val = getattr(evt, prop)
if callable(filter):
if not filter(val):
return False
elif val != filter:
return False
return True
async def wait_for_first(*futures, discard_remaining=True, loop=None):
'''Wait the first of a set of futures to complete.
Eg::
event = cozmo.event.wait_for_first(
coz.world.wait_for_new_cube(),
playing_anim.wait_for(cozmo.anim.EvtAnimationCompleted)
)
If more than one completes during a single event loop run, then
if any of those results are not exception, one of them will be selected
(at random, as determined by ``set.pop``) to be returned, else one
of the result exceptions will be raised instead.
Args:
futures (list of :class:`asyncio.Future`): The futures or coroutines to wait on.
discard_remaining (bool): Cancel or discard the results of the futures
that did not return first.
loop (:class:`asyncio.BaseEventLoop`): The event loop to wait on.
Returns:
The first result, or raised exception
'''
done, pending = await asyncio.wait(futures, loop=loop, return_when=asyncio.FIRST_COMPLETED)
# collect the results from all "done" futures; only one will be returned
result = None
for fut in done:
try:
fut_result = fut.result()
if result is None or isinstance(result, BaseException):
result = fut_result
except Exception as exc:
if result is None:
result = exc
if discard_remaining:
# cancel the pending futures
for fut in pending:
fut.cancel()
if isinstance(result, BaseException):
raise result
return result
def _abort_futures(exc):
'''Trigger the exception handler for all pending Future handlers.'''
for obj in active_dispatchers:
obj._abort_event_futures(exc)
| StarcoderdataPython |
9629559 | from aws_xray_sdk.core import xray_recorder
from moto import mock_kinesis
from resources.events import event_service
from test.util import create_event_stream
xray_recorder.begin_segment("Test")
def test_stream_name(event_streams_table):
stream_name = event_service()._stream_name("foo", "1", "green")
assert stream_name == "dp.green.foo.incoming.1.json"
def test_stream_name_raw(event_streams_table):
event_streams_table.put_item(
Item={"id": "foo/1", "config_version": 2, "create_raw": True}
)
stream_name = event_service()._stream_name("foo", "1", "green")
assert stream_name == "dp.green.foo.raw.1.json"
def test_event_records():
event_body = [
{"key00": "value00", "key01": "value01"},
{"key10": "value10", "key11": "value11"},
{"key20": "value20", "key21": "value21"},
{"key30": "value30", "key31": "value31"},
]
expected = [
{"PartitionKey": "aa-bb", "Data": '{"key00": "value00", "key01": "value01"}\n'},
{"PartitionKey": "aa-bb", "Data": '{"key10": "value10", "key11": "value11"}\n'},
{"PartitionKey": "aa-bb", "Data": '{"key20": "value20", "key21": "value21"}\n'},
{"PartitionKey": "aa-bb", "Data": '{"key30": "value30", "key31": "value31"}\n'},
]
records = event_service()._event_records(event_body)
assert all(x["Data"] == y["Data"] for x, y in zip(records, expected))
@mock_kinesis
def test_put_records_to_kinesis():
create_event_stream("foo")
record_list = [
{
"PartitionKey": "aa-bb",
"Data": '{"data": {"key30": "value30", "key31": "value31"}, "datasetId": "d123", "version": "1"}',
}
] * 100
response = event_service()._put_records_to_kinesis(record_list, "foo")
assert response["FailedRecordCount"] == 0
def test_failed_records():
put_records_response = {
"FailedRecordCount": 2,
"Records": [
{
"SequenceNumber": "21269319989900637946712965403778482371",
"ShardId": "shardId-000000000001",
},
{
"ErrorCode": "ProvisionedThroughputExceededException",
"ErrorMessage": "Rate exceeded for shard shardId...",
},
{
"SequenceNumber": "21269319989900637946712965403778482371",
"ShardId": "shardId-000000000001",
},
{
"ErrorCode": "ProvisionedThroughputExceededException",
"ErrorMessage": "Rate exceeded for shard shardId...",
},
],
}
record_list = [
{"PartitionKey": "aa-bb", "Data": '{"key00": "value00", "key01": "value01"}'},
{"PartitionKey": "aa-bb", "Data": '{"key10": "value10", "key11": "value11"}'},
{"PartitionKey": "aa-bb", "Data": '{"key20": "value20", "key21": "value21"}'},
{"PartitionKey": "aa-bb", "Data": '{"key30": "value30", "key31": "value31"}'},
]
expected = [
{"PartitionKey": "aa-bb", "Data": '{"key10": "value10", "key11": "value11"}'},
{"PartitionKey": "aa-bb", "Data": '{"key30": "value30", "key31": "value31"}'},
]
failed_records_list = event_service()._failed_records(
record_list, put_records_response["Records"]
)
assert failed_records_list == expected
| StarcoderdataPython |
11257059 | """
Contain Task to set a scheduler task for todofehrist application
================================================================
Task is responsible to send emails to systems users who have pending
tasks with due_datetime on that particular day. Task/Method will be
invoked at 12 AM everyday (UTC Standard)
"""
from __future__ import absolute_import, unicode_literals
import logging
import os
from datetime import date
from django.db.models import Count
# Celery imports
from celery import Celery
from celery.schedules import crontab
# Project Settings import
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'emumbaproject.settings')
app = Celery('emumbaproject')
app.config_from_object('django.conf:settings')
# Load task modules from all registered Django app configs.
app.autodiscover_tasks(settings.INSTALLED_APPS)
@app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
"""
This method uses Celery's crontab functionality to register
a task in queue at a specific time to be processed by active worker
"""
# Schedule Task Every Day at 12:00 AM UTC Time
sender.add_periodic_task(
crontab(hour=0, minute=0),
to_do_fehrist_tasks_reminder.s(),
)
# Reference add_periodic_table call method via s method
# https://docs.celeryproject.org/en/stable/userguide/periodic-tasks.html
# Setting these up from within the on_after_configure handler means that
# we’ll not evaluate the app at module level when using test.s()
@app.task
def to_do_fehrist_tasks_reminder():
"""
This method will send reminder to every user
by email who have some pending tasks in to-do
list
"""
from todofehrist.models import Task, User
from todofehrist.utility import send_email
result = Task.objects.filter(
completion_status=0, completion_datetime__date=date.today()).values("user").annotate(
count=Count("user"))
for user_tasks_entry in result:
email_address = User.objects.get(pk=user_tasks_entry["user"]).email
send_email("ToDoFehrist - Pending Tasks Reminder",
f"You have {user_tasks_entry['count']} pending tasks due today.",
email_address)
logging.debug(f"Reminder Email sent to user with email address {email_address}")
| StarcoderdataPython |
1737119 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.gis.db import models
from django.contrib.gis.geos import GEOSGeometry
# Create your models here.
class firewalls(models.Model):
geom = models.MultiLineStringField(srid=3857)
type = models.IntegerField()
descript = models.CharField(max_length=70) | StarcoderdataPython |
3252198 | <gh_stars>0
special.jacobi(n, alpha, beta[, monic]) | StarcoderdataPython |
25749 | from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect
from campy.gui.events.mouse import onmouseclicked
import random
WINDOW_WIDTH = 600
WINDOW_HEIGHT = 400
ZONE_WIDTH = 100
ZONE_HEIGHT = 100
BALL_RADIUS = 15
MAX_SPEED = 6
MIN_Y_SPEED = 2
class ZoneGraphics:
def __init__(self, window_width=WINDOW_WIDTH, window_height=WINDOW_HEIGHT,
zone_width=ZONE_WIDTH, zone_height=ZONE_HEIGHT, ball_radius=BALL_RADIUS):
# Create window
self.window = GWindow(window_width, window_height, title='Zone Game')
# Create zone
self.zone = GRect(zone_width, zone_height, x=(window_width - zone_width) / 2,
y=(window_height - zone_height) / 2)
self.zone.color = 'blue'
self.window.add(self.zone)
# Create ball and initialize velocity/position
self.ball = GOval(2 * ball_radius, 2 * ball_radius)
self.ball.filled = True
self.ball.fill_color = 'salmon'
self.dx = 0
self.dy = 0
self.reset_ball()
# Initialize mouse listeners
onmouseclicked(self.handle_click)
# Set ball position at random inside the window
def set_ball_position(self):
self.ball.x = random.randint(0, self.window.width - self.ball.width)
self.ball.y = random.randint(0, self.window.height - self.ball.height)
def set_ball_velocity(self):
self.dx = random.randint(0, MAX_SPEED)
if random.random() > 0.5:
self.dx = -self.dx
self.dy = random.randint(MIN_Y_SPEED, MAX_SPEED)
if random.random() > 0.5:
self.dy = -self.dy
def reset_ball(self):
self.set_ball_position()
while self.ball_in_zone():
self.set_ball_position()
self.set_ball_velocity()
self.window.add(self.ball)
def move_ball(self):
self.ball.move(self.dx, self.dy)
def handle_wall_collisions(self):
if self.ball.x + self.ball.width >= self.window.width or self.ball.x <= 0:
self.dx = -self.dx
if self.ball.y + self.ball.height >= self.window.height or self.ball.y <= 0:
self.dy = -self.dy
def ball_in_zone(self):
zone_left_side = self.zone.x
zone_right_side = self.zone.x + self.zone.width
ball_x_in_zone = zone_left_side <= self.ball.x <= zone_right_side - self.ball.width
zone_top_side = self.zone.y
zone_bottom_side = self.zone.y + self.zone.height
ball_y_in_zone = zone_top_side <= self.ball.y <= zone_bottom_side - self.ball.height
return ball_x_in_zone and ball_y_in_zone
def handle_click(self, event):
obj = self.window.get_object_at(event.x, event.y)
if self.ball == obj:
self.reset_ball()
| StarcoderdataPython |
1977 | <reponame>mstoelzle/raisimLib
import os
import numpy as np
import raisimpy as raisim
import math
import time
raisim.World.setLicenseFile(os.path.dirname(os.path.abspath(__file__)) + "/../../rsc/activation.raisim")
world = raisim.World()
ground = world.addGround()
world.setTimeStep(0.001)
world.setMaterialPairProp("steel", "steel", 0.1, 1.0, 0.0)
pin1 = world.addSphere(0.1, 0.8)
pin1.setAppearance("1,0,0,0.3")
pin1.setPosition(0.0, 0.0, 3.0)
pin1.setBodyType(raisim.BodyType.STATIC)
pin2 = world.addSphere(0.1, 0.8)
pin2.setAppearance("0,1,0,0.3")
pin2.setPosition(0.3, 0.0, 3.0)
pin2.setBodyType(raisim.BodyType.STATIC)
pin3 = world.addSphere(0.1, 0.8)
pin3.setAppearance("0,0,1,0.3")
pin3.setPosition(0.6, 0.0, 3.0)
pin3.setBodyType(raisim.BodyType.STATIC)
pin4 = world.addSphere(0.1, 0.8)
pin4.setAppearance("1,0,0,0.3")
pin4.setPosition(0.9, 0.0, 3.0)
pin4.setBodyType(raisim.BodyType.STATIC)
pin5 = world.addSphere(0.1, 0.8)
pin5.setPosition(0.9, 0.0, 6.0)
pin5.setBodyType(raisim.BodyType.STATIC)
pin6 = world.addSphere(0.1, 0.8)
pin6.setPosition(-3., 0.0, 7.0)
pin6.setBodyType(raisim.BodyType.STATIC)
pin7 = world.addSphere(0.1, 0.8)
pin7.setPosition(-4., 0.0, 7.0)
pin7.setBodyType(raisim.BodyType.STATIC)
anymalB_urdf_file = os.path.dirname(os.path.abspath(__file__)) + "/../../rsc/anymal/urdf/anymal.urdf"
anymalC_urdf_file = os.path.dirname(os.path.abspath(__file__)) + "/../../rsc/anymal_c/urdf/anymal.urdf"
anymalC = world.addArticulatedSystem(anymalC_urdf_file)
anymalB = world.addArticulatedSystem(anymalB_urdf_file)
jointNominalConfig = np.array([-3, 0, 4.54, 1.0, 0.0, 0.0, 0.0, 0.03, 0.4, -0.8, -0.03, 0.4, -0.8, 0.03, -0.4, 0.8, -0.03, -0.4, 0.8])
jointVelocityTarget = np.zeros([anymalC.getDOF()])
jointPgain = np.ones(anymalC.getDOF()) * 100.0
jointDgain = np.ones(anymalC.getDOF()) * 1.0
anymalC.setGeneralizedCoordinate(jointNominalConfig)
anymalC.setPdGains(jointPgain, jointDgain)
anymalC.setPdTarget(jointNominalConfig, jointVelocityTarget)
anymalC.setName("anymalC")
jointNominalConfig[0] = -4
anymalB.setGeneralizedCoordinate(jointNominalConfig)
anymalB.setPdGains(jointPgain, jointDgain)
anymalB.setPdTarget(jointNominalConfig, jointVelocityTarget)
anymalB.setName("anymalB")
ball1 = world.addSphere(0.1498, 0.8, "steel")
ball1.setPosition(0, 0.0, 1.0)
ball2 = world.addSphere(0.1499, 0.8, "steel")
ball2.setPosition(0.3, 0.0, 1.0)
ball3 = world.addSphere(0.1499, 0.8, "steel")
ball3.setPosition(0.6, 0.0, 1.0)
ball4 = world.addSphere(0.1499, 0.8, "steel")
ball4.setPosition(2.9, 0.0, 3.0)
box = world.addBox(.1, .1, .1, 1)
box.setPosition(0.9, 0.0, 4.2)
world.addStiffWire(pin1, 0, np.zeros(3), ball1, 0, np.zeros(3), 2.0)
world.addStiffWire(pin2, 0, np.zeros(3), ball2, 0, np.zeros(3), 2.0)
world.addStiffWire(pin3, 0, np.zeros(3), ball3, 0, np.zeros(3), 2.0)
world.addStiffWire(pin4, 0, np.zeros(3), ball4, 0, np.zeros(3), 2.0)
wire5 = world.addCompliantWire(pin5, 0, np.zeros(3), box, 0, np.zeros(3), 2.0, 200)
wire5.setStretchType(raisim.StretchType.BOTH)
wire6 = world.addCompliantWire(pin6, 0, np.zeros(3), anymalC, 0, np.zeros(3), 2.0, 1000)
wire6.setStretchType(raisim.StretchType.BOTH)
wire7 = world.addCustomWire(pin7, 0, np.zeros(3), anymalB, 0, np.zeros(3), 2.0)
wire7.setTension(310)
server = raisim.RaisimServer(world)
server.launchServer(8080)
for i in range(500000):
time.sleep(0.001)
server.integrateWorldThreadSafe()
if i == 5000:
world.removeObject(wire7)
server.killServer()
| StarcoderdataPython |
3390971 |
import fboxlib
fboxlib.open()
ba = fboxlib.boxarray([[(0,0), (100,100)]])
la = fboxlib.layout(ba)
mf = fboxlib.multifab(la, nc=3, ng=0)
print "#"*80
print "# before regridding"
la.echo()
fab = mf.fab(1)
fab.array[...] = 1.0
fab.array[10:20,10:20] = 2.0
fab.array[50:60,50:60] = 2.0
def tag_boxes(mf, tb, dx, lev):
if lev > 1:
return
mf = fboxlib.multifab(cptr=mf)
tb = fboxlib.lmultifab(cptr=tb)
mfab = mf.fab(1)
tfab = tb.fab(1)
tfab.array[mfab.array[:,:,0] > 1.0] = 1
mfs = fboxlib.regrid([la], [mf], [0.5], tag_boxes)
print "#"*80
print "# after regridding"
for mf in mfs:
mf.layout.echo()
| StarcoderdataPython |
11272033 | # -*- coding: UTF-8 -*-
"""
Tushare数据接口封装
====================================================================
"""
import requests
import os
import time
from datetime import datetime
import pandas as pd
import tushare as ts
from tma import DATA_PATH
TS_PRO_API = "http://api.tushare.pro"
FILE_TOKEN = os.path.join(DATA_PATH, "tushare_pro.token")
def set_token(token):
with open(FILE_TOKEN, 'w') as f:
f.write(token)
def query_pro(api_name, fields='', **kwargs):
"""通过 tushare pro api 获取数据
:param api_name: str
:param fields: list
:return: pd.DataFrame
"""
if not os.path.exists(FILE_TOKEN):
raise EnvironmentError("%s 文件不存在,请先调用"
"set_token()配置token" % FILE_TOKEN)
with open(FILE_TOKEN, 'r') as f:
token = f.readline()
req_params = {
'api_name': api_name,
'token': token,
'params': kwargs,
'fields': fields
}
result = requests.post(TS_PRO_API, json=req_params).json()
if result['code'] != 0:
raise Exception(result['msg'])
else:
data = result['data']
columns = data['fields']
items = data['items']
return pd.DataFrame(items, columns=columns)
# --------------------------------------------------------------------
def get_market_basic(cache=True, use_cache=False):
"""返回A股所有股票的基础信息"""
FILE_BASIC = os.path.join(DATA_PATH, "market_basic.csv")
if os.path.exists(FILE_BASIC):
now_t = time.time()
modify_t = os.path.getmtime(FILE_BASIC)
if use_cache and now_t - modify_t < 3600 * 12:
basic_df = pd.read_csv(FILE_BASIC, dtype={"code": str})
return basic_df
basic_df = ts.get_stock_basics()
basic_df.reset_index(inplace=True)
basic_df['code'] = basic_df['code'].astype(str)
if cache:
basic_df.to_csv(FILE_BASIC, encoding='utf-8', index=False)
return basic_df
def get_all_codes():
"""返回A股所有股票的代码"""
basic_df = get_market_basic(cache=True, use_cache=True)
return list(basic_df['code'])
# --------------------------------------------------------------------
def get_indices():
"""指数行情接口"""
return ts.get_index()
def get_price(code):
"""获取一只股票的最新价格
:param code: str
股票代码,如:600122
:return: float
"""
data = ts.get_realtime_quotes(code)
return float(data.loc[0, 'price'])
def get_ticks(code, source="spider", date=None, cons=None):
"""返回date日期的分笔数据
:param source:
:param code: str: 股票代码,如 603655
:param date: str: 日期,如 2018-03-15
:param cons: tushare的api连接
:return:
"""
if not date:
date = datetime.now().date().__str__()
TODAY = datetime.now().date().__str__()
# 统一 ticks 的输出结果
def _unify_out(ticks, date):
ticks = ticks[['time', 'price', 'volume', 'type']]
ticks['datetime'] = ticks['time'].apply(lambda x: datetime.strptime(date + " " + x, "%Y-%m-%d %H:%M:%S"))
ticks['vol'] = ticks['volume']
type_convert = {
"买盘": 0,
"卖盘": 1,
"中性盘": 2,
"0": 2
}
ticks['type'] = ticks["type"].apply(lambda x: type_convert[str(x)])
ticks.drop(['time', 'volume'], axis=1, inplace=True)
ticks.sort_values('datetime', inplace=True)
ticks.reset_index(drop=True, inplace=True)
return ticks[['datetime', 'price', 'vol', 'type']]
if source == "spider" and date == TODAY:
ticks = ts.get_today_ticks(code=code)
ticks = _unify_out(ticks, date=TODAY)
elif source == "spider" and date != TODAY:
ticks = ts.get_tick_data(code=code, date=date)
ticks = _unify_out(ticks, date=date)
else:
if not cons:
cons = ts.get_apis()
ticks = ts.tick(code=code, conn=cons, date=date)
return ticks
ticks = get_ticks
def get_bars(codes):
"""获取codes的实时quotes"""
return ts.get_realtime_quotes(codes)
bars = get_bars
# K线
# --------------------------------------------------------------------
def get_klines(code, freq="D", start_date=None):
"""获取K线数据
:param code: str 股票代码
:param start_date: str 默认为 None
开始日期。值为None时,默认获取全部。
:param freq: str 默认为 "D"
K线周期,可选值 D=日k线 W=周 M=月 5=5分钟 15=15分钟 30=30分钟 60=60分钟
:return: pd.DataFrame
"""
if start_date is None:
data = ts.get_k_data(code=code, ktype=freq)
else:
data = ts.get_k_data(code=code, start=start_date, ktype=freq)
return data
klines = get_klines
# 全市场行情
# --------------------------------------------------------------------
def filter_tp(tm):
"""停盘股过滤
:param tm: return of function today_market
:return:
"""
tm1 = tm[tm['volume'] != 0.0]
tm1.reset_index(drop=True, inplace=True)
return tm1
def filter_st(tm):
"""ST股过滤
:param tm: return of function today_market
:return:
"""
fst = tm['name'].apply(lambda x: True if "ST" not in x else False)
tm1 = tm[fst]
tm1.reset_index(drop=True, inplace=True)
return tm1
def get_today_market(filters=None, save=True,
use_latest=False, interval=600):
"""返回最近一个交易日所有股票的交易数据
:param filters: list 默认为 ['tp']
需要应用的过滤规则,可选 "tp"、"st"。tp表示过滤停盘股,st表示过滤st股。
:param save: bool 默认为 True
是否缓存到user目录下
:param use_latest: bool 默认为 False
是否使用最近缓存的数据
:param interval: int 默认 600
更新行情的最小间隔(单位:s),即:如果DATA_PATH路径下的latest_market的修改时间
与当前时间的间隔小于interval设定的数值,且use_latest为True,
将使用latest_market.csv中的行情
:return: pd.DataFrame
最新的市场行情
"""
if filters is None:
filters = ['tp']
tm_csv = os.path.join(DATA_PATH, 'latest_market.csv')
if use_latest and os.path.exists(tm_csv) \
and time.time() - os.path.getmtime(tm_csv) < interval:
tm = pd.read_csv(tm_csv, encoding='utf-8')
return tm
tm = ts.get_today_all()
if filters is None:
return tm
filters = [x.lower() for x in filters]
if "tp" in filters:
tm = filter_tp(tm)
if "st" in filters:
tm = filter_st(tm)
if save:
tm.to_csv(tm_csv, index=False, encoding='utf-8')
return tm
today_market = get_today_market
def get_hist_market(date):
"""历史行情数据
:param date: str:
指定日期,如 "2018-03-19"
:return:
"""
hm = ts.get_day_all(date)
hm['date'] = date
return hm
hist_market = get_hist_market
# 融资融券
# --------------------------------------------------------------------
# tushare接口: sh_margins | sh_margin_details
# sz_margins | sz_margin_details
| StarcoderdataPython |
170603 | """
Copyright (C) 2021 NVIDIA Corporation. All rights reserved.
Licensed under the NVIDIA Source Code License. See LICENSE at the main github page.
Authors: <NAME>, <NAME>, <NAME>, <NAME>
"""
import torch
from torch import nn
from torch.nn import functional as F
from simulator_model import layers
import functools
import sys
sys.path.append('..')
class convLinearSPADE(nn.Module):
def __init__(self, channel, h, w, linear_input_channel, opts):
super().__init__()
self.h = h
self.w = w
self.param_free_norm = nn.InstanceNorm2d(channel, affine=False)
self.mlp_gamma = nn.Linear(linear_input_channel, channel)
self.mlp_beta = nn.Linear(linear_input_channel, channel)
self.activation = nn.LeakyReLU(0.2)
def forward(self, x, y, resize=True):
if resize:
x = x.view(x.size(0), -1, self.h, self.w)
normalized = self.param_free_norm(x)
y = y.view(y.size(0), -1)
gamma = self.mlp_gamma(y).view(y.size(0), -1, 1, 1)
beta = self.mlp_beta(y).view(y.size(0), -1, 1, 1)
out = normalized * (1 + gamma) + beta
return self.activation(out)
class View(nn.Module):
def __init__(self, size):
super(View, self).__init__()
self.size = size
def forward(self, tensor):
return tensor.view(self.size)
def choose_netG_encoder(input_dim=512, basechannel=512, opts=None):
enc = nn.Sequential(
nn.Linear(input_dim, basechannel),
nn.LeakyReLU(0.2),
nn.Linear(basechannel, basechannel),
nn.LeakyReLU(0.2),
nn.Linear(basechannel, basechannel),
nn.LeakyReLU(0.2),
nn.Linear(basechannel, basechannel),
nn.LeakyReLU(0.2)
)
return enc
def choose_netD_temporal(opts, conv3d_dim, window=[]):
in_dim = opts.nfilterD * 16
in_dim = in_dim * 2
extractors, finals = [], []
which_conv = functools.partial(layers.SNConv2d,
kernel_size=3, padding=0,
num_svs=1, num_itrs=1,
eps=1e-12)
net1 = nn.Sequential(
which_conv(in_dim, conv3d_dim // 4, kernel_size=(3, 1), stride=(2, 1)),
nn.LeakyReLU(0.2)
)
head1 = nn.Sequential(
which_conv(conv3d_dim // 4, 1, kernel_size=(2, 1), stride=(1, 1)),
)
extractors.append(net1)
finals.append(head1)
if window >= 12:
net2 = nn.Sequential(
which_conv(conv3d_dim // 4, conv3d_dim // 2, kernel_size=(3, 1), stride=(1, 1)),
nn.LeakyReLU(0.2),
)
head2 = nn.Sequential(
which_conv(conv3d_dim // 2, 1, kernel_size=(3, 1)),
)
extractors.append(net2)
finals.append(head2)
if window >= 18:
net3 = nn.Sequential(
which_conv(conv3d_dim // 2, conv3d_dim, kernel_size=(2, 1), stride=(2, 1)),
nn.LeakyReLU(0.2),
)
head3 = nn.Sequential(
which_conv(conv3d_dim, 1, kernel_size=(3, 1)),
)
extractors.append(net3)
finals.append(head3)
if window >= 36:
net4 = nn.Sequential(
which_conv(conv3d_dim, conv3d_dim, kernel_size=(2, 1), stride=(2, 1)),
nn.LeakyReLU(0.2),
)
head4 = nn.Sequential(
which_conv(conv3d_dim, 1, kernel_size=(3, 1)),
)
extractors.append(net4)
finals.append(head4)
return extractors, finals
| StarcoderdataPython |
9603744 | from .commands import plot_kraken
| StarcoderdataPython |
327025 | <gh_stars>1-10
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from typing import Any, Callable, List, Optional, Type, Union
import torch
import torch.nn as nn
from monai.networks.layers.factories import Conv, Norm, Pool
from monai.networks.layers.utils import get_pool_layer
from monai.utils.module import look_up_option
__all__ = ["ResNet", "resnet10", "resnet18", "resnet34", "resnet50", "resnet101", "resnet152", "resnet200"]
from monai.utils import deprecated_arg
def get_inplanes():
return [64, 128, 256, 512]
def get_avgpool():
return [0, 1, (1, 1), (1, 1, 1)]
def get_conv1(conv1_t_size: int, conv1_t_stride: int):
return (
[0, conv1_t_size, (conv1_t_size, 7), (conv1_t_size, 7, 7)],
[0, conv1_t_stride, (conv1_t_stride, 2), (conv1_t_stride, 2, 2)],
[0, (conv1_t_size // 2), (conv1_t_size // 2, 3), (conv1_t_size // 2, 3, 3)],
)
class ResNetBlock(nn.Module):
expansion = 1
def __init__(
self,
in_planes: int,
planes: int,
spatial_dims: int = 3,
stride: int = 1,
downsample: Union[nn.Module, partial, None] = None,
) -> None:
"""
Args:
in_planes: number of input channels.
planes: number of output channels.
spatial_dims: number of spatial dimensions of the input image.
stride: stride to use for first conv layer.
downsample: which downsample layer to use.
"""
super().__init__()
conv_type: Callable = Conv[Conv.CONV, spatial_dims]
norm_type: Callable = Norm[Norm.BATCH, spatial_dims]
self.conv1 = conv_type(in_planes, planes, kernel_size=3, padding=1, stride=stride, bias=False)
self.bn1 = norm_type(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv_type(planes, planes, kernel_size=3, padding=1, bias=False)
self.bn2 = norm_type(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: torch.Tensor) -> torch.Tensor:
residual = x
out: torch.Tensor = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNetBottleneck(nn.Module):
expansion = 4
def __init__(
self,
in_planes: int,
planes: int,
spatial_dims: int = 3,
stride: int = 1,
downsample: Union[nn.Module, partial, None] = None,
) -> None:
"""
Args:
in_planes: number of input channels.
planes: number of output channels (taking expansion into account).
spatial_dims: number of spatial dimensions of the input image.
stride: stride to use for second conv layer.
downsample: which downsample layer to use.
"""
super().__init__()
conv_type: Callable = Conv[Conv.CONV, spatial_dims]
norm_type: Callable = Norm[Norm.BATCH, spatial_dims]
self.conv1 = conv_type(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = norm_type(planes)
self.conv2 = conv_type(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = norm_type(planes)
self.conv3 = conv_type(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = norm_type(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: torch.Tensor) -> torch.Tensor:
residual = x
out: torch.Tensor = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
"""
ResNet based on: `Deep Residual Learning for Image Recognition <https://arxiv.org/pdf/1512.03385.pdf>`_
and `Can Spatiotemporal 3D CNNs Retrace the History of 2D CNNs and ImageNet? <https://arxiv.org/pdf/1711.09577.pdf>`_.
Adapted from `<https://github.com/kenshohara/3D-ResNets-PyTorch/tree/master/models>`_.
Args:
block: which ResNet block to use, either Basic or Bottleneck.
layers: how many layers to use.
block_inplanes: determine the size of planes at each step. Also tunable with widen_factor.
spatial_dims: number of spatial dimensions of the input image.
n_input_channels: number of input channels for first convolutional layer.
conv1_t_size: size of first convolution layer, determines kernel and padding.
conv1_t_stride: stride of first convolution layer.
no_max_pool: bool argument to determine if to use maxpool layer.
shortcut_type: which downsample block to use. Options are 'A', 'B', default to 'B'.
- 'A': using `self._downsample_basic_block`.
- 'B': kernel_size 1 conv + norm.
widen_factor: widen output for each layer.
num_classes: number of output (classifications).
feed_forward: whether to add the FC layer for the output, default to `True`.
.. deprecated:: 0.6.0
``n_classes`` is deprecated, use ``num_classes`` instead.
"""
@deprecated_arg("n_classes", since="0.6")
def __init__(
self,
block: Type[Union[ResNetBlock, ResNetBottleneck]],
layers: List[int],
block_inplanes: List[int],
spatial_dims: int = 3,
n_input_channels: int = 3,
conv1_t_size: int = 7,
conv1_t_stride: int = 1,
no_max_pool: bool = False,
shortcut_type: str = "B",
widen_factor: float = 1.0,
num_classes: int = 400,
feed_forward: bool = True,
n_classes: Optional[int] = None,
) -> None:
super().__init__()
# in case the new num_classes is default but you still call deprecated n_classes
if n_classes is not None and num_classes == 400:
num_classes = n_classes
conv_type: Type[Union[nn.Conv1d, nn.Conv2d, nn.Conv3d]] = Conv[Conv.CONV, spatial_dims]
norm_type: Type[Union[nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]] = Norm[Norm.BATCH, spatial_dims]
pool_type: Type[Union[nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool3d]] = Pool[Pool.MAX, spatial_dims]
avgp_type: Type[Union[nn.AdaptiveAvgPool1d, nn.AdaptiveAvgPool2d, nn.AdaptiveAvgPool3d]] = Pool[
Pool.ADAPTIVEAVG, spatial_dims
]
block_avgpool = get_avgpool()
conv1_kernel, conv1_stride, conv1_padding = get_conv1(conv1_t_size, conv1_t_stride)
block_inplanes = [int(x * widen_factor) for x in block_inplanes]
self.in_planes = block_inplanes[0]
self.no_max_pool = no_max_pool
self.conv1 = conv_type(
n_input_channels,
self.in_planes,
kernel_size=conv1_kernel[spatial_dims],
stride=conv1_stride[spatial_dims],
padding=conv1_padding[spatial_dims],
bias=False,
)
self.bn1 = norm_type(self.in_planes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = pool_type(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, block_inplanes[0], layers[0], spatial_dims, shortcut_type)
self.layer2 = self._make_layer(block, block_inplanes[1], layers[1], spatial_dims, shortcut_type, stride=2)
self.layer3 = self._make_layer(block, block_inplanes[2], layers[2], spatial_dims, shortcut_type, stride=2)
self.layer4 = self._make_layer(block, block_inplanes[3], layers[3], spatial_dims, shortcut_type, stride=2)
self.avgpool = avgp_type(block_avgpool[spatial_dims])
self.fc = nn.Linear(block_inplanes[3] * block.expansion, num_classes) if feed_forward else None
for m in self.modules():
if isinstance(m, conv_type):
nn.init.kaiming_normal_(torch.as_tensor(m.weight), mode="fan_out", nonlinearity="relu")
elif isinstance(m, norm_type):
nn.init.constant_(torch.as_tensor(m.weight), 1)
nn.init.constant_(torch.as_tensor(m.bias), 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(torch.as_tensor(m.bias), 0)
def _downsample_basic_block(self, x: torch.Tensor, planes: int, stride: int, spatial_dims: int = 3) -> torch.Tensor:
out: torch.Tensor = get_pool_layer(("avg", {"kernel_size": 1, "stride": stride}), spatial_dims=spatial_dims)(x)
zero_pads = torch.zeros(out.size(0), planes - out.size(1), *out.shape[2:], dtype=out.dtype, device=out.device)
out = torch.cat([out.data, zero_pads], dim=1)
return out
def _make_layer(
self,
block: Type[Union[ResNetBlock, ResNetBottleneck]],
planes: int,
blocks: int,
spatial_dims: int,
shortcut_type: str,
stride: int = 1,
) -> nn.Sequential:
conv_type: Callable = Conv[Conv.CONV, spatial_dims]
norm_type: Callable = Norm[Norm.BATCH, spatial_dims]
downsample: Union[nn.Module, partial, None] = None
if stride != 1 or self.in_planes != planes * block.expansion:
if look_up_option(shortcut_type, {"A", "B"}) == "A":
downsample = partial(
self._downsample_basic_block,
planes=planes * block.expansion,
stride=stride,
spatial_dims=spatial_dims,
)
else:
downsample = nn.Sequential(
conv_type(self.in_planes, planes * block.expansion, kernel_size=1, stride=stride),
norm_type(planes * block.expansion),
)
layers = [
block(
in_planes=self.in_planes, planes=planes, spatial_dims=spatial_dims, stride=stride, downsample=downsample
)
]
self.in_planes = planes * block.expansion
for _i in range(1, blocks):
layers.append(block(self.in_planes, planes, spatial_dims=spatial_dims))
return nn.Sequential(*layers)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if not self.no_max_pool:
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.fc is not None:
x = self.fc(x)
return x
def _resnet(
arch: str,
block: Type[Union[ResNetBlock, ResNetBottleneck]],
layers: List[int],
block_inplanes: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any,
) -> ResNet:
model: ResNet = ResNet(block, layers, block_inplanes, **kwargs)
if pretrained:
# Author of paper zipped the state_dict on googledrive,
# so would need to download, unzip and read (2.8gb file for a ~150mb state dict).
# Would like to load dict from url but need somewhere to save the state dicts.
raise NotImplementedError(
"Currently not implemented. You need to manually download weights provided by the paper's author"
" and load then to the model with `state_dict`. See https://github.com/Tencent/MedicalNet"
)
return model
def resnet10(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-10 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 23 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet10", ResNetBlock, [1, 1, 1, 1], get_inplanes(), pretrained, progress, **kwargs)
def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-18 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 23 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet18", ResNetBlock, [2, 2, 2, 2], get_inplanes(), pretrained, progress, **kwargs)
def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-34 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 23 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet34", ResNetBlock, [3, 4, 6, 3], get_inplanes(), pretrained, progress, **kwargs)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-50 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 23 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet50", ResNetBottleneck, [3, 4, 6, 3], get_inplanes(), pretrained, progress, **kwargs)
def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-101 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 8 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet101", ResNetBottleneck, [3, 4, 23, 3], get_inplanes(), pretrained, progress, **kwargs)
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-152 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 8 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet152", ResNetBottleneck, [3, 8, 36, 3], get_inplanes(), pretrained, progress, **kwargs)
def resnet200(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
"""ResNet-200 with optional pretrained support when `spatial_dims` is 3.
Pretraining from `Med3D: Transfer Learning for 3D Medical Image Analysis <https://arxiv.org/pdf/1904.00625.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on 8 medical datasets
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet200", ResNetBottleneck, [3, 24, 36, 3], get_inplanes(), pretrained, progress, **kwargs)
| StarcoderdataPython |
9775178 | <filename>src/models/architectures/tools/interp_utils.py
import torch
import torch.nn as nn
import numpy as np
from scipy.interpolate import interp1d
# from lib.utils.geometry import rotation_matrix_to_angle_axis, rot6d_to_rotmat
# from lib.models.smpl import SMPL, SMPL_MODEL_DIR, H36M_TO_J14, SMPL_MEAN_PARAMS
from .interp1d import Interp1d
def projection(pred_joints, pred_camera):
pred_cam_t = torch.stack([pred_camera[:, 1],
pred_camera[:, 2],
2 * 5000. / (224. * pred_camera[:, 0] + 1e-9)], dim=-1)
batch_size = pred_joints.shape[0]
camera_center = torch.zeros(batch_size, 2)
pred_keypoints_2d = perspective_projection(pred_joints,
rotation=torch.eye(3).unsqueeze(0).expand(batch_size, -1, -1).to(pred_joints.device),
translation=pred_cam_t,
focal_length=5000.,
camera_center=camera_center)
# Normalize keypoints to [-1,1]
pred_keypoints_2d = pred_keypoints_2d / (224. / 2.)
return pred_keypoints_2d
def perspective_projection(points, rotation, translation,
focal_length, camera_center):
"""
This function computes the perspective projection of a set of points.
Input:
points (bs, N, 3): 3D points
rotation (bs, 3, 3): Camera rotation
translation (bs, 3): Camera translation
focal_length (bs,) or scalar: Focal length
camera_center (bs, 2): Camera center
"""
batch_size = points.shape[0]
K = torch.zeros([batch_size, 3, 3], device=points.device)
K[:,0,0] = focal_length
K[:,1,1] = focal_length
K[:,2,2] = 1.
K[:,:-1, -1] = camera_center
# Transform points
points = torch.einsum('bij,bkj->bki', rotation, points)
points = points + translation.unsqueeze(1)
# Apply perspective distortion
projected_points = points / points[:,:,-1].unsqueeze(-1)
# Apply camera intrinsics
projected_points = torch.einsum('bij,bkj->bki', K, projected_points)
return projected_points[:, :, :-1]
class Dilator(nn.Module):
def __init__(
self,
dilation_rate=1,
temporal_axis=1,
):
super(Dilator, self).__init__()
assert (type(dilation_rate) == int)
self.dilation_rate = dilation_rate
if temporal_axis != 1:
raise ValueError('currently supporting only temporal_axis=1 (got {})'.format(temporal_axis))
self.is_dilated = self.dilation_rate > 1
if self.is_dilated:
print('Dilator will perform dilation with rate [{}]'.format(self.dilation_rate))
else:
print('No dilation!')
self.temporal_axis = temporal_axis
def forward(self, inp):
if not self.is_dilated:
return inp
seqlen = list(inp.values())[0].shape[self.temporal_axis]
timeline = torch.arange(seqlen).to(list(inp.values())[0].device)
sample_timeline = timeline[0::self.dilation_rate]
if sample_timeline[-1] != timeline[-1]:
# sample_timeline = np.append(sample_timeline, timeline[-1])
sample_timeline = torch.cat((sample_timeline, timeline[-1].view(1,)))
out = {}
for k, v in inp.items():
if hasattr(v, 'shape'):
assert v.shape[self.temporal_axis] == seqlen
out[k] = v[:, sample_timeline, ...] # temporal_axis=1
else:
print('WARNING: [{}] has no attribute shape, dilation was not operated.'.format(k))
return out, sample_timeline
class Interpolator(nn.Module):
def __init__(
self,
interp_type='linear',
temporal_axis=1,
):
super(Interpolator, self).__init__()
self.interp_type = interp_type
if temporal_axis != 1:
raise ValueError('currently supporting only temporal_axis=1 (got {})'.format(temporal_axis))
self.temporal_axis = temporal_axis
print('Interpolator - running [{}]'.format(self.interp_type))
def forward(self, inp, inp_timeline):
# TODO - implement with torch
orig_seqlen = list(inp.values())[0].shape[self.temporal_axis]
out_seqlen = inp_timeline[-1] + 1 # assuming timeline must include the last time step
assert len(inp_timeline) == orig_seqlen
out_timeline = np.arange(out_seqlen.cpu())
if orig_seqlen == out_seqlen:
print('WARNING - Interpolator: interpolation was not operated.')
return inp
# interpolote
interped = {}
for k, v in inp.items():
interp_fn = interp1d(inp_timeline.cpu().numpy(), v.cpu().numpy(), axis=self.temporal_axis, kind=self.interp_type)
interped[k] = interp_fn(out_timeline)
for i in range(len(v.shape)):
if i == self.temporal_axis:
assert interped[k].shape[i] == out_seqlen
else:
assert interped[k].shape[i] == v.shape[i]
interped[k] = torch.tensor(interped[k], device=v.device, dtype=torch.float32)
return interped
class DiffInterpolator(nn.Module):
# differentiable interpolation, based on torchinterp1d
# for differentiability, we assume x_sample is sorted
def __init__(
self,
interp_type='linear',
sample_type='non_adaptive',
temporal_axis=1,
):
super(DiffInterpolator, self).__init__()
if temporal_axis != 1:
raise ValueError('currently supporting only temporal_axis=1 (got {})'.format(temporal_axis))
if interp_type not in ['linear']:
raise ValueError('Unsupported interp_type [{}]'.format(interp_type))
if sample_type not in ['non-adaptive']:
raise ValueError('Unsupported interp_type [{}]'.format(sample_type))
self.interp_type = interp_type
self.sample_type = sample_type
self.temporal_axis = temporal_axis
self.interpolator = Interp1d()
print('Diff Interpolator - running [{}, {}]'.format(self.interp_type, self.sample_type))
def forward(self, inp, inp_timeline):
# inp_timeline -> x
# inp -> y
# out_timeline -> x_new
# interped -> y_new
orig_seqlen = list(inp.values())[0].shape[self.temporal_axis]
out_seqlen = inp_timeline[-1] + 1 # assuming timeline must include the last time step
assert len(inp_timeline) == orig_seqlen
out_timeline = torch.arange(out_seqlen).to(list(inp.values())[0].device)
if orig_seqlen == out_seqlen:
print('WARNING - Interpolator: interpolation was not operated.')
return inp
# interpolate
# print(inp.keys())
interped = {}
for k in inp.keys():
# y_pred = self.interp(x_hat, y_hat, x_new)
# print((inp_timeline.shape, type(inp_timeline)))
# print((inp[k].shape, type(inp[k])))
# print((out_timeline.shape, type(out_timeline)))
# print(k)
interped[k] = self.interp(inp_timeline, inp[k], out_timeline)
# print(interped[k].shape)
expected_shape = list(inp[k].shape)
expected_shape[self.temporal_axis] = out_seqlen
assert interped[k].shape == torch.Size(expected_shape)
return interped
def interp(self, x, y, xnew):
# A wrapper for interp1d call
# print(y.shape)
orig_shape = y.shape # [bs, in_seqlen, f]
in_seqlen = orig_shape[self.temporal_axis]
out_seqlen = xnew.shape[0]
out_shape = list(orig_shape)
out_shape[self.temporal_axis] = out_seqlen # [bs, out_seqlen, [f]]
out_shape = torch.Size(out_shape)
out_shape_before_swap = list(out_shape) # [bs, [f], out_seqlen]
out_shape_before_swap[self.temporal_axis] = out_shape[-1]
out_shape_before_swap[-1] = out_shape[self.temporal_axis]
out_shape_before_swap = torch.Size(out_shape_before_swap)
# print('out_shape_before_swap: {}'.format(out_shape_before_swap))
# print('out_shape: {}'.format(out_shape))
# print('out_seqlen: {}'.format(out_seqlen))
#
# print('GUYGUY1:y {}'.format(y.shape))
_y = torch.transpose(y, self.temporal_axis, -1)
_y = _y.reshape(-1, in_seqlen)
_x = torch.tile(x.view(1, -1), (_y.shape[0], 1))
_xnew = torch.tile(xnew.view(1, -1), (_y.shape[0], 1))
# print('GUYGUY2:_y {}'.format(_y.shape))
# print('GUYGUY2:_x {}'.format(_x.shape))
# print('GUYGUY2:_xnew {}'.format(_xnew.shape))
# ynew = self.interpolator(x, _y.view(-1, orig_seqlen), xnew)
ynew = self.interpolator(_x, _y, xnew).view(out_shape_before_swap)
# print('GUYGUY3: {}'.format(ynew.shape))
return torch.transpose(ynew, -1, self.temporal_axis)
# class GeometricProcess(nn.Module):
# def __init__(
# self
# ):
# super(GeometricProcess, self).__init__()
#
# self.smpl = SMPL(
# SMPL_MODEL_DIR,
# batch_size=64,
# create_transl=False
# )
#
# def forward(self, pred, J_regressor=None):
#
# bs, seqlen, _ = pred['cam'].shape
# flat_bs = bs * seqlen
#
# # flatten
# flat = {}
# flat['pose'] = pred['pose'].reshape(flat_bs, -1, 6)
# flat['shape'] = pred['shape'].reshape(flat_bs, 10)
# flat['cam'] = pred['cam'].reshape(flat_bs, 3)
#
# pred_rotmat = rot6d_to_rotmat(flat['pose']).view(flat_bs, 24, 3, 3)
# # print(pred_rotmat.device)
# # for k, v in pred.items():
# # print('{}: {}, {}'.format(k, v.shape, v.device))
# pred_output = self.smpl(
# betas=flat['shape'],
# body_pose=pred_rotmat[:, 1:],
# global_orient=pred_rotmat[:, 0].unsqueeze(1),
# pose2rot=False
# )
#
# pred_vertices = pred_output.vertices
# pred_joints = pred_output.joints
#
# if J_regressor is not None:
# J_regressor_batch = J_regressor[None, :].expand(pred_vertices.shape[0], -1, -1).to(pred_vertices.device)
# pred_joints = torch.matmul(J_regressor_batch, pred_vertices)
# pred_joints = pred_joints[:, H36M_TO_J14, :]
#
# pred_keypoints_2d = projection(pred_joints, flat['cam'])
#
# pose = rotation_matrix_to_angle_axis(pred_rotmat.reshape(-1, 3, 3)).reshape(-1, 72)
#
# smpl_output = {
# 'theta' : torch.cat([flat['cam'], pose, flat['shape']], dim=1),
# 'verts' : pred_vertices,
# 'kp_2d' : pred_keypoints_2d,
# 'kp_3d' : pred_joints,
# 'rotmat' : pred_rotmat
# }
#
# smpl_output['theta'] = smpl_output['theta'].reshape(bs, seqlen, -1)
# smpl_output['verts'] = smpl_output['verts'].reshape(bs, seqlen, -1, 3)
# smpl_output['kp_2d'] = smpl_output['kp_2d'].reshape(bs, seqlen, -1, 2)
# smpl_output['kp_3d'] = smpl_output['kp_3d'].reshape(bs, seqlen, -1, 3)
# smpl_output['rotmat'] = smpl_output['rotmat'].reshape(bs, seqlen, -1, 3, 3)
#
# return smpl_output
class Smoother(nn.Module):
def __init__(
self,
filter_size,
temporal_axis=1,
):
super(Smoother, self).__init__()
self.filter_size = filter_size
if temporal_axis != 1:
raise ValueError('currently supporting only temporal_axis=1 (got {})'.format(temporal_axis))
self.temporal_axis = temporal_axis
self.filter = torch.ones([1, 1, 1, self.filter_size]) / self.filter_size # [out_ch, in_ch, filter_h, filter_w]
def forward(self, inp):
smoothed = {}
for k in inp.keys():
# reshape
orig_shape = inp[k].shape
smoothed[k] = inp[k]
if len(orig_shape) > 3:
smoothed[k] = smoothed[k].view(orig_shape[0], orig_shape[1], -1)
smoothed[k] = torch.swapaxes(smoothed[k], self.temporal_axis, -1)
smoothed[k] = smoothed[k].unsqueeze(1) # [bs, in_ch(1), n_features, seqlen]
# conv
smoothed[k] = torch.nn.functional.conv2d(smoothed[k], self.filter.to(smoothed[k].device), padding='same')
# rereshape
smoothed[k] = smoothed[k].squeeze()
smoothed[k] = torch.swapaxes(smoothed[k], self.temporal_axis, -1)
smoothed[k] = smoothed[k].view(orig_shape)
assert smoothed[k].shape == inp[k].shape
return smoothed | StarcoderdataPython |
3290140 | class DataObject:
'''
This class represents the scraped data as a python object.
'''
def __init__(self, content, title, source, date):
'''
__init__ initializes the data object.
@param content will be the content of the scraped data.
@param title will be the title of the scraped data.
@param source will be the source the scraped data is scraped of.
@param date will be the date, the data has been published.
'''
self.content = content
self.title = title
self.source = source
self.date = date
def __json__(self):
'''
__json__ will transform the data object into a json object.
'''
return {
"content": self.content,
"title": self.title,
"source": self.source
}
| StarcoderdataPython |
5069669 | # In order to compute the collision percentage,
# just uncomment from createRT.c the 2 printfs
# to the create_chain function. Then compile
# again, and run "./rt.out 5 > passes.csv".
# Finally run "python collision_detector.py".
words = set(open('passes.csv').read().split())
unique = len(words)
print "Unique ", unique, "/ 10000 words."
print ((1-(unique/ float(10000))) * 100), "% collisions"
| StarcoderdataPython |
6688392 | <filename>setup.py
from setuptools import setup, find_packages
setup(name='maprdb_python_client',
version='1.1.4',
description='MapR-DB Python Client',
url='https://github.com/mapr/maprdb-python-client/',
author='MapR, Inc.',
keywords='ojai python client mapr maprdb',
packages=find_packages(exclude=['test*', 'docs*', 'examples*']),
install_requires=['aenum>=2.0.10', 'grpcio>=1.9.1', 'grpcio-tools>=1.9.1', 'ojai-python-api>=1.1',
'python-dateutil>=2.6.1', 'retrying>=1.3.3', 'future>=0.16.0'],
python_requires='>=2.7.*',
long_description='A simple, lightweight library that provides access to MapR-DB.'
' The client library supports all existing OJAI functionality'
' and is absolutely compatible with Java OJAI connector,'
' that runs under the MapR Data Access Gateway.'
)
| StarcoderdataPython |
280196 | <gh_stars>10-100
# Copyright 2021 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pathlib
import pytest
from rosidl_cli.command.helpers import interface_path_as_tuple
from rosidl_cli.command.helpers import legacy_generator_arguments_file
def test_interface_path_as_tuple():
prefix, path = interface_path_as_tuple('/tmp:msg/Empty.idl')
assert pathlib.Path('msg/Empty.idl') == path
assert pathlib.Path(os.path.abspath('/tmp')) == prefix
prefix, path = interface_path_as_tuple('tmp:msg/Empty.idl')
assert pathlib.Path('msg/Empty.idl') == path
assert pathlib.Path.cwd() / 'tmp' == prefix
prefix, path = interface_path_as_tuple('msg/Empty.idl')
assert pathlib.Path('msg/Empty.idl') == path
assert pathlib.Path.cwd() == prefix
@pytest.fixture
def current_path(request):
path = pathlib.Path(request.module.__file__)
path = path.resolve()
path = path.parent
cwd = pathlib.Path.cwd()
try:
os.chdir(str(path))
yield path
finally:
os.chdir(str(cwd))
def test_legacy_generator_arguments_file(current_path):
with legacy_generator_arguments_file(
package_name='foo',
interface_files=['msg/Foo.idl'],
include_paths=['test_files/bar'],
templates_path='templates',
output_path='tmp',
) as path:
with open(path, 'r') as fd:
args = json.load(fd)
assert args['package_name'] == 'foo'
assert args['output_dir'] == str(current_path / 'tmp')
assert args['template_dir'] == str(current_path / 'templates')
assert args['idl_tuples'] == [f'{current_path}:msg/Foo.idl']
path_to_dep = pathlib.Path('test_files/bar/msg/Bar.idl')
assert args['ros_interface_dependencies'] == [
'bar:' + str(current_path / path_to_dep)
]
assert not pathlib.Path(path).exists()
| StarcoderdataPython |
189439 | <reponame>PatchyVideo/PatchyVideo
from .init import routes, init_funcs
import os
import sys
import time
import asyncio
import traceback
import PIL
import copy
from aiohttp import web
from aiohttp import ClientSession
from bson.json_util import dumps, loads
from init import rdb
from utils.jsontools import *
from utils.dbtools import makeUserMeta, MongoTransaction
from utils.crypto import random_bytes_str
from utils.http import clear_url
from utils.rwlock_async import modifyingResourceAsync, usingResourceAsync
from utils.lock_async import RedisLockAsync
from utils.exceptions import UserError
from .video import dispatch
from db import tagdb, db, client, playlist_db
from bson import ObjectId
from services.playlist import addVideoToPlaylist, addVideoToPlaylistLockFree, insertIntoPlaylist, insertIntoPlaylistLockFree
from services.tcb import filterOperation
from services.autotag import inferTagsFromVideo
from config import VideoConfig
from PIL import Image, ImageSequence
from utils.logger import log_e, setEventUserAndID, setEventOp
from config import PlaylistConfig
from datetime import datetime
import io
import json
_COVER_PATH = os.getenv('IMAGE_PATH', "/images") + "/covers/"
def _gif_thumbnails(frames):
for frame in frames:
thumbnail = frame.copy()
thumbnail.thumbnail((320, 200), Image.ANTIALIAS)
yield thumbnail
# TODO: maybe make save image async?
def _cleanUtags(utags) :
utags = [utag.replace(' ', '') for utag in utags]
return list(set(utags))
_download_sem = asyncio.Semaphore(10)
async def notify_video_update(vid) :
async with ClientSession() as session:
async with session.post("http://patchyvideo-related-video-finder:5010/insert", json = {'vid': {'$oid': str(vid)}}) as resp:
return
async def _download_thumbnail(url, user, event_id) :
filename = ""
if url :
for attemp in range(3) :
try :
async with _download_sem :
async with ClientSession() as session:
async with session.get(url) as resp:
if resp.status == 200 :
img = Image.open(io.BytesIO(await resp.read()))
if isinstance(img, PIL.GifImagePlugin.GifImageFile) :
filename = random_bytes_str(24) + ".gif"
frames = ImageSequence.Iterator(img)
frames = _gif_thumbnails(frames)
om = next(frames) # Handle first frame separately
om.info = img.info # Copy sequence info
om.save(_COVER_PATH + filename, save_all = True, append_images = list(frames), loop = 0)
else :
filename = random_bytes_str(24) + ".png"
img.thumbnail((320, 200), Image.ANTIALIAS)
img.save(_COVER_PATH + filename)
log_e(event_id, user, 'download_cover', obj = {'filename': filename})
break
else :
log_e(event_id, user, 'download_cover', 'WARN', {'status_code': resp.status, 'attemp': attemp})
except Exception as ex :
log_e(event_id, user, 'download_cover', 'WARN', {'ex': str(ex), 'attemp': attemp})
continue
return filename
async def _make_video_data(data, copies, playlists, url, user, event_id) :
if 'cover_image_override' in data and data['cover_image_override'] :
filename = data['cover_image_override']
else :
filename = await _download_thumbnail(data['thumbnailURL'], user, event_id)
ret = {
"url": (data['url_overwrite'] if 'url_overwrite' in data else url),
"title": data['title'],
"desc": data['desc'],
"thumbnail_url": data['thumbnailURL'],
"cover_image": filename,
'site': data['site'],
"unique_id": data['unique_id'],
'series': playlists,
'copies': copies,
'upload_time': data['uploadDate'],
'repost_type': data['repost_type'],
'views': -1,
'rating': -1.0,
"utags": _cleanUtags(data['utags']) if 'utags' in data else [],
"user_space_urls": data['user_space_urls'] if 'user_space_urls' in data else [],
"placeholder": data["placeholder"] if 'placeholder' in data else False
}
if 'extra' in data :
for k, v in data['extra'].items() :
ret[k] = v
return ret
async def _make_video_data_update(data, url, user, event_id, thumbnail_url = None) :
if 'cover_image_override' in data and data['cover_image_override'] :
filename = data['cover_image_override']
else :
filename = await _download_thumbnail(thumbnail_url, user, event_id)
ret = {
"url": (data['url_overwrite'] if 'url_overwrite' in data else url),
"title": data['title'],
"desc": data['desc'],
"thumbnail_url": data['thumbnailURL'],
'site': data['site'],
"unique_id": data['unique_id'],
'upload_time': data['uploadDate'],
'repost_type': data['repost_type'],
'views': -1,
'rating': -1.0,
"utags": _cleanUtags(data['utags']) if 'utags' in data else [],
"user_space_urls": data['user_space_urls'] if 'user_space_urls' in data else [],
"placeholder": data["placeholder"] if 'placeholder' in data else False
}
if 'extra' in data :
for k, v in data['extra'].items() :
ret[k] = v
if filename :
ret['cover_image'] = filename
return ret
def _getAllCopies(vid, session, use_unique_id = False) :
if not vid :
return []
if use_unique_id :
this_video = tagdb.retrive_item({"item.unique_id": vid}, session = session)
else :
this_video = tagdb.retrive_item({"_id": ObjectId(vid)}, session = session)
if this_video is None :
return []
copies = this_video['item']['copies']
# add self
copies.append(ObjectId(this_video['_id']))
# use set to remove duplicated items
return list(set(copies))
def _addThiscopy(dst_vid, this_vid, user, session):
if this_vid is None :
return
dst_video = tagdb.retrive_item({"_id": ObjectId(dst_vid)}, session = session)
if dst_video is None :
return
dst_copies = dst_video['item']['copies']
if isinstance(this_vid, list) :
dst_copies.extend(this_vid)
else :
dst_copies.append(ObjectId(this_vid))
dst_copies = list(set(dst_copies) - set([ObjectId(dst_vid)]))
tagdb.update_item_query(ObjectId(dst_vid), {"$set": {"item.copies": dst_copies}}, user = user, session = session)
class _PlaylistReorederHelper() :
def __init__(self) :
self.playlist_map = {}
async def _add_to_playlist(self, dst_playlist, event_id, user_global) :
if self.playlist_map[dst_playlist] :
dst_rank = self.playlist_map[dst_playlist]['rank']
playlist_ordered = self.playlist_map[dst_playlist]['all']
try :
# fast method
async with RedisLockAsync(rdb, "playlistEdit:" + dst_playlist), MongoTransaction(client) as s :
cur_rank = 0
playlist = playlist_db.retrive_item(dst_playlist, session = s())
if playlist is None :
raise UserError('PLAYLIST_NOT_EXIST')
if playlist["item"]["videos"] + len(self.playlist_map[dst_playlist]['succeed']) > PlaylistConfig.MAX_VIDEO_PER_PLAYLIST :
raise UserError('VIDEO_LIMIT_EXCEEDED')
playlist_videos = playlist["item"]['videos']
for unique_id in playlist_ordered :
if unique_id in self.playlist_map[dst_playlist]['succeed'] :
(video_id, _, user) = self.playlist_map[dst_playlist]['succeed'][unique_id]
if dst_rank == -1 :
if filterOperation('editPlaylist', user, playlist, False) :
if addVideoToPlaylistLockFree(dst_playlist, video_id, user, playlist_videos, session = s()) :
playlist_videos += 1
else :
if filterOperation('editPlaylist', user, playlist, False) :
if insertIntoPlaylistLockFree(dst_playlist, video_id, dst_rank + cur_rank, user, session = s()) :
cur_rank += 1
s.mark_succeed()
except UserError as ue :
# UserError, rereaise to upper level
log_e(event_id, user_global, '_add_to_playlist', 'ERR', {'ex': str(ex), 'tb': traceback.format_exc()})
del self.playlist_map[dst_playlist]
rdb.set(f'playlist-batch-post-event-{dst_playlist}', b'done')
raise ue
except Exception as ex :
# if anything goes wrong, fallback to slow method
log_e(event_id, user_global, '_add_to_playlist', 'ERR', {'ex': str(ex), 'tb': traceback.format_exc()})
cur_rank = 0
for unique_id in playlist_ordered :
if unique_id in self.playlist_map[dst_playlist]['succeed'] :
(video_id, _, user) = self.playlist_map[dst_playlist]['succeed'][unique_id]
# ignore error, add next video
try :
if dst_rank == -1 :
addVideoToPlaylist(dst_playlist, video_id, user)
else :
insertIntoPlaylist(dst_playlist, video_id, dst_rank + cur_rank, user)
cur_rank += 1
except :
pass
log_e(event_id, user_global, '_add_to_playlist', 'MSG', {'succedd': len(self.playlist_map[dst_playlist]['succeed']), 'all': len(self.playlist_map[dst_playlist]['all']), 'pid': dst_playlist})
del self.playlist_map[dst_playlist]
rdb.set(f'playlist-batch-post-event-{dst_playlist}', b'done')
async def post_video_succeed(self, video_id, unique_id, dst_playlist, playlist_ordered, dst_rank, user, event_id) :
if video_id and unique_id and dst_playlist and playlist_ordered :
if dst_playlist not in self.playlist_map :
self.playlist_map[dst_playlist] = {}
self.playlist_map[dst_playlist]['succeed'] = {}
self.playlist_map[dst_playlist]['failed'] = {}
self.playlist_map[dst_playlist]['rank'] = dst_rank
self.playlist_map[dst_playlist]['all'] = playlist_ordered
self.playlist_map[dst_playlist]['rank'] = min(dst_rank, self.playlist_map[dst_playlist]['rank'])
self.playlist_map[dst_playlist]['succeed'][unique_id] = (video_id, unique_id, user)
if unique_id in self.playlist_map[dst_playlist]['failed'] :
del self.playlist_map[dst_playlist]['failed'][unique_id]
if len(self.playlist_map[dst_playlist]['succeed']) + len(self.playlist_map[dst_playlist]['failed']) >= len(self.playlist_map[dst_playlist]['all']) :
await self._add_to_playlist(dst_playlist, event_id, user)
async def post_video_failed(self, unique_id, dst_playlist, playlist_ordered, dst_rank, user, event_id) :
if unique_id and dst_playlist and playlist_ordered :
if dst_playlist not in self.playlist_map :
self.playlist_map[dst_playlist] = {}
self.playlist_map[dst_playlist]['succeed'] = {}
self.playlist_map[dst_playlist]['failed'] = {}
self.playlist_map[dst_playlist]['rank'] = dst_rank
self.playlist_map[dst_playlist]['all'] = playlist_ordered
self.playlist_map[dst_playlist]['rank'] = min(dst_rank, self.playlist_map[dst_playlist]['rank'])
self.playlist_map[dst_playlist]['failed'][unique_id] = unique_id
if unique_id in self.playlist_map[dst_playlist]['succeed'] :
del self.playlist_map[dst_playlist]['succeed'][unique_id]
if len(self.playlist_map[dst_playlist]['succeed']) + len(self.playlist_map[dst_playlist]['failed']) >= len(self.playlist_map[dst_playlist]['all']) :
await self._add_to_playlist(dst_playlist, event_id, user)
_playlist_reorder_helper = _PlaylistReorederHelper()
@usingResourceAsync('tags')
async def postVideoAsync(url, tags, dst_copy, dst_playlist, dst_rank, other_copies, repost_type, playlist_ordered, user, update_video_detail, event_id, field_override = None, use_autotag = False):
parsed = None
try :
dst_playlist = str(dst_playlist)
dst_rank = -1 if dst_rank is None else dst_rank
#tags = tagdb.filter_and_translate_tags(tags)
parsed, url = dispatch(url)
except :
pass
if parsed is None :
log_e(event_id, user, 'dispatcher', 'ERR', {'msg': 'PARSE_FAILED', 'url': url})
await _playlist_reorder_helper.post_video_failed(url, dst_playlist, playlist_ordered, dst_rank, user, event_id)
return "PARSE_FAILED", {}
unique_id = await parsed.unique_id_async(self = parsed, link = url) # empty unique_id for b23.tv posts, fuck bilibli
if not unique_id :
ret = await parsed.get_metadata_async(parsed, url, update_video_detail)
unique_id = ret['data']['unique_id']
log_e(event_id, user, 'scraper', 'MSG', {'url': url, 'dst_copy': dst_copy, 'other_copies': other_copies, 'dst_playlist': dst_playlist})
setEventOp('scraper')
try :
lock_id = "videoEdit:" + unique_id
async with RedisLockAsync(rdb, lock_id) :
unique, conflicting_item = verifyUniqueness(unique_id)
if unique or update_video_detail :
async with _download_sem :
ret = await parsed.get_metadata_async(parsed, url, update_video_detail)
print('-------------------', file = sys.stderr)
print(ret, file = sys.stderr)
print(ret['data'], file = sys.stderr)
print('-------------------', file = sys.stderr)
if repost_type :
ret['data']['repost_type'] = repost_type
else :
ret['data']['repost_type'] = 'unknown'
if ret["status"] == 'FAILED' :
log_e(event_id, user, 'downloader', 'WARN', {'msg': 'FETCH_FAILED', 'ret': ret})
await _playlist_reorder_helper.post_video_failed(unique_id, dst_playlist, playlist_ordered, dst_rank, user, event_id)
return "FETCH_FAILED", ret
else :
unique_id = ret['data']['unique_id']
else :
# build ret
ret = makeResponseSuccess({
'thumbnailURL': conflicting_item['item']['thumbnail_url'],
'title' : conflicting_item['item']['title'],
'desc' : conflicting_item['item']['desc'],
'site': conflicting_item['item']['site'],
'uploadDate' : conflicting_item['item']['upload_time'],
"unique_id": conflicting_item['item']['unique_id'],
"utags": conflicting_item['item']['utags']
})
for k, v in conflicting_item['item'].items() :
ret['data'][k] = v
if 'part_name' in conflicting_item['item'] :
ret['part_name'] = conflicting_item['item']['part_name']
if 'repost_type' in conflicting_item['item'] and conflicting_item['item']['repost_type'] :
ret['data']['repost_type'] = repost_type
else :
ret['data']['repost_type'] = 'unknown'
tagdb.update_item_query(conflicting_item, {'$set': {'item.repost_type': repost_type}}, user = makeUserMeta(user))
#if hasattr(parsed, 'LOCAL_CRAWLER') :
# url = ret["data"]["url"]
#else :
# url = clear_url(url)
use_override = False
if field_override and '__condition' in field_override :
condition = field_override['__condition']
del field_override['__condition']
if condition == 'any' :
use_override = True
elif condition == 'placeholder' and 'placeholder' in ret["data"] and ret["data"]['placeholder'] :
use_override = True
if use_override :
for key in field_override :
ret['data'][key] = field_override[key]
playlists = []
#playlist_lock = None
if dst_playlist :
#playlist_lock = RedisLockAsync(rdb, "playlistEdit:" + str(dst_playlist))
#playlist_lock.acquire()
if playlist_db.retrive_item(dst_playlist) is not None :
playlists = [ ObjectId(dst_playlist) ]
if not unique:
log_e(event_id, user, 'scraper', level = 'MSG', obj = {'msg': 'ALREADY_EXIST', 'unique_id': ret["data"]["unique_id"]})
"""
Update existing video
"""
if update_video_detail :
log_e(event_id, user, 'scraper', level = 'MSG', obj = 'Updating video detail')
with MongoTransaction(client) as s :
old_item = tagdb.retrive_item(conflicting_item['_id'], session = s())['item']
if old_item['thumbnail_url'] and old_item['cover_image'] :
# old thumbnail exists, no need to download again
new_detail = await _make_video_data_update(ret["data"], url, user, event_id)
else :
# old thumbnail does not exists, add to dict
new_detail = await _make_video_data_update(ret["data"], url, user, event_id, ret["data"]["thumbnailURL"])
for key in new_detail.keys() :
old_item[key] = new_detail[key] # overwrite or add new field
setEventUserAndID(user, event_id)
tagdb.update_item_query(conflicting_item['_id'], {'$set': {'item': old_item}}, ['title', 'desc'], user = makeUserMeta(user), session = s())
s.mark_succeed()
return 'SUCCEED', conflicting_item['_id']
# this video already exist in the database
# if the operation is to add a link to other copies and not adding self
if (dst_copy and dst_copy != conflicting_item['_id']) or other_copies :
log_e(event_id, user, 'scraper', level = 'MSG', obj = 'Adding to to copies')
async with RedisLockAsync(rdb, 'editLink'), MongoTransaction(client) as s :
log_e(event_id, user, level = 'MSG', obj = 'Adding to to copies, lock acquired')
# find all copies of video dst_copy, self included
all_copies = _getAllCopies(dst_copy, session = s())
# find all videos linked to source video
all_copies += _getAllCopies(conflicting_item['_id'], session = s())
# add videos from other copies
for uid in other_copies :
all_copies += _getAllCopies(uid, session = s(), use_unique_id = True)
# remove duplicated items
all_copies = list(set(all_copies))
# add this video to all other copies found
if len(all_copies) <= VideoConfig.MAX_COPIES :
for dst_vid in all_copies :
setEventUserAndID(user, event_id)
_addThiscopy(dst_vid, all_copies, makeUserMeta(user), session = s())
log_e(event_id, user, 'scraper', level = 'MSG', obj = 'Successfully added to copies')
s.mark_succeed()
else :
#if playlist_lock :
# playlist_lock.release()
log_e(event_id, user, 'scraper', level = 'MSG', obj = 'Too many copies')
await _playlist_reorder_helper.post_video_failed(unique_id, dst_playlist, playlist_ordered, dst_rank, user, event_id)
return "TOO_MANY_COPIES", {}
# if the operation is adding this video to playlist
if dst_playlist :
log_e(event_id, user, 'scraper', level = 'MSG', obj = {'msg': 'Adding to playlist at position', 'rank': dst_rank})
if playlist_ordered :
await _playlist_reorder_helper.post_video_succeed(conflicting_item['_id'], unique_id, dst_playlist, playlist_ordered, dst_rank, user, event_id)
else :
setEventUserAndID(user, event_id)
if dst_rank == -1 :
addVideoToPlaylist(dst_playlist, conflicting_item['_id'], user)
else :
insertIntoPlaylist(dst_playlist, conflicting_item['_id'], dst_rank, user)
# merge tags
async with MongoTransaction(client) as s :
log_e(event_id, user, 'scraper', level = 'MSG', obj = 'Merging tags')
setEventUserAndID(user, event_id)
tagdb.update_item_tags_merge(conflicting_item['_id'], tags, makeUserMeta(user), session = s(), remove_tagids = [354])
s.mark_succeed()
#if playlist_lock :
# playlist_lock.release()
#return "VIDEO_ALREADY_EXIST", conflicting_item['_id']
return "SUCCEED", conflicting_item['_id']
else :
# expand dst_copy to all copies linked to dst_copy
if dst_copy or other_copies :
log_e(event_id, user, 'scraper', level = 'MSG', obj = 'Adding to to copies')
async with RedisLockAsync(rdb, 'editLink'), MongoTransaction(client) as s :
log_e(event_id, user, 'scraper', level = 'MSG', obj = 'Adding to to copies, lock acquired')
all_copies = _getAllCopies(dst_copy, session = s())
# add videos from other copies
for uid in other_copies :
all_copies += _getAllCopies(uid, session = s(), use_unique_id = True)
video_data = await _make_video_data(ret["data"], all_copies, playlists, url, user, event_id)
setEventUserAndID(user, event_id)
new_item_id = tagdb.add_item(tags, video_data, 3, ['title', 'desc'], makeUserMeta(user), session = s())
all_copies.append(ObjectId(new_item_id))
# remove duplicated items
all_copies = list(set(all_copies))
if len(all_copies) <= VideoConfig.MAX_COPIES :
for dst_vid in all_copies :
setEventUserAndID(user, event_id)
_addThiscopy(dst_vid, all_copies, makeUserMeta(user), session = s())
log_e(event_id, user, 'scraper', level = 'MSG', obj = 'Successfully added to copies')
s.mark_succeed()
else :
#if playlist_lock :
# playlist_lock.release()
log_e(event_id, user, 'scraper', level = 'MSG', obj = 'Too many copies')
await _playlist_reorder_helper.post_video_failed(unique_id, dst_playlist, playlist_ordered, dst_rank, user, event_id)
return "TOO_MANY_COPIES", {}
else :
async with MongoTransaction(client) as s :
video_data = await _make_video_data(ret["data"], [], playlists, url, user, event_id)
setEventUserAndID(user, event_id)
if use_autotag :
tags.extend(inferTagsFromVideo(video_data['utags'], video_data['title'], video_data['desc'], 'CHS', video_data['url'], video_data['user_space_urls']))
new_item_id = tagdb.add_item(tags, video_data, 3, ['title', 'desc'], makeUserMeta(user), session = s())
log_e(event_id, user, 'scraper', level = 'MSG', obj = {'msg': 'New video added to database', 'vid': new_item_id})
s.mark_succeed()
# if the operation is adding this video to playlist
if dst_playlist :
log_e(event_id, user, 'scraper', level = 'MSG', obj = {'msg': 'Adding to playlist at position', 'rank': dst_rank})
if playlist_ordered :
await _playlist_reorder_helper.post_video_succeed(new_item_id, unique_id, dst_playlist, playlist_ordered, dst_rank, user, event_id)
else :
setEventUserAndID(user, event_id)
if dst_rank == -1 :
addVideoToPlaylist(dst_playlist, new_item_id, user)
else :
insertIntoPlaylist(dst_playlist, new_item_id, dst_rank, user)
#if playlist_lock :
# playlist_lock.release()
log_e(event_id, user, 'scraper', level = 'MSG', obj = 'Done')
return 'SUCCEED', new_item_id
except UserError as ue :
await _playlist_reorder_helper.post_video_failed(unique_id, dst_playlist, playlist_ordered, dst_rank, user, event_id)
log_e(event_id, user, 'scraper', level = 'WARN', obj = {'ue': str(ue), 'tb': traceback.format_exc()})
return ue.msg, {"aux": ue.aux, "traceback": traceback.format_exc()}
except Exception as ex:
await _playlist_reorder_helper.post_video_failed(unique_id, dst_playlist, playlist_ordered, dst_rank, user, event_id)
log_e(event_id, user, 'scraper', level = 'ERR', obj = {'ex': str(ex), 'tb': traceback.format_exc()})
try :
problematic_lock = RedisLockAsync(rdb, 'editLink')
problematic_lock.reset()
except Exception :
pass
return "UNKNOWN", {"aux": "none", "traceback": traceback.format_exc()}#'\n'.join([repr(traceback.format_exc()), repr(traceback.extract_stack())])
async def postVideoAsyncJSON(param_json) :
url = param_json['url']
tags = param_json['tags']
dst_copy = param_json['dst_copy']
dst_playlist = param_json['dst_playlist']
dst_rank = param_json['dst_rank']
other_copies = param_json['other_copies']
user = param_json['user']
playlist_ordered = param_json['playlist_ordered']
event_id = param_json['event_id']
repost_type = param_json['repost_type']
field_overrides = param_json['field_overrides'] if 'field_overrides' in param_json else None
update_video_detail = param_json['update_video_detail'] if 'update_video_detail' in param_json else False
use_autotag = param_json['use_autotag'] if 'use_autotag' in param_json else False
ret, ret_obj = await postVideoAsync(url, tags, dst_copy, dst_playlist, dst_rank, other_copies, repost_type, playlist_ordered, user, update_video_detail, event_id, field_overrides, use_autotag)
if not isinstance(ret_obj, dict) :
try :
await notify_video_update(ObjectId(ret_obj))
except Exception as e :
pass
return {'result' : ret, 'result_obj' : ret_obj}
def verifyUniqueness(postingId):
if not postingId :
return True, None
val = tagdb.retrive_item({"item.unique_id": postingId})
return val is None, val
async def func_with_write_result(func, task_id, param_json) :
ret = await func(param_json)
key = 'posttasks-' + str(param_json['user']['_id'])
rdb.lrem(key, 1, task_id)
log_e(param_json['event_id'], param_json['user'], op = 'task_finished', obj = {'task_id': task_id})
rdb.delete(f'task-{task_id}')
if ret['result'] != 'SUCCEED' :
param_json_for_user = copy.deepcopy(param_json)
del param_json_for_user['user']
del param_json_for_user['event_id']
del param_json_for_user['playlist_ordered']
if 'field_overrides' in param_json_for_user :
del param_json_for_user['field_overrides']
tagdb.db.failed_posts.insert_one({'uid': ObjectId(param_json['user']['_id']), 'ret': ret['result_obj'], 'post_param': param_json_for_user, 'time': datetime.now()})
async def task_runner(func, queue) :
while True :
task_param, task_id = await queue.get()
task = asyncio.create_task(func_with_write_result(func, task_id, task_param))
asyncio.gather(task)
#await task
queue.task_done()
async def put_task(queue, param_json) :
task_id = random_bytes_str(16)
param_json_for_user = copy.deepcopy(param_json)
del param_json_for_user['user']
del param_json_for_user['playlist_ordered']
del param_json_for_user['event_id']
if 'field_overrides' in param_json_for_user :
del param_json_for_user['field_overrides']
log_e(param_json['event_id'], param_json['user'], op = 'put_task', obj = {'task_id': task_id})
ret_json = dumps({'finished' : False, 'key': task_id, 'data' : None, 'params': param_json_for_user})
rdb.set(f'task-{task_id}', ret_json)
key = 'posttasks-' + str(param_json['user']['_id'])
rdb.lpush(key, task_id)
await queue.put((param_json, task_id))
return task_id
_async_queue = asyncio.Queue()
async def putVideoTask(video_json_obj) :
return await put_task(_async_queue, video_json_obj)
@routes.post("/video")
async def post_video_async(request):
rj = loads(await request.text())
task_id = await put_task(_async_queue, rj)
return web.json_response({'task_id' : task_id})
async def init() :
# schedule task_runner to run
task_runner_task = asyncio.create_task(task_runner(postVideoAsyncJSON, _async_queue))
asyncio.gather(task_runner_task)
init_funcs.append(init)
| StarcoderdataPython |
11291774 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
parse_duration,
unified_strdate,
)
from .subtitles import SubtitlesInfoExtractor
class NRKIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?nrk\.no/(?:video|lyd)/[^/]+/(?P<id>[\dA-F]{16})'
_TESTS = [
{
'url': 'http://www.nrk.no/video/dompap_og_andre_fugler_i_piip_show/D0FA54B5C8B6CE59/emne/piipshow/',
'md5': 'a6eac35052f3b242bb6bb7f43aed5886',
'info_dict': {
'id': '150533',
'ext': 'flv',
'title': 'Dompap og andre fugler i Piip-Show',
'description': 'md5:d9261ba34c43b61c812cb6b0269a5c8f'
}
},
{
'url': 'http://www.nrk.no/lyd/lyd_av_oppleser_for_blinde/AEFDDD5473BA0198/',
'md5': '3471f2a51718195164e88f46bf427668',
'info_dict': {
'id': '154915',
'ext': 'flv',
'title': 'Slik høres internett ut når du er blind',
'description': 'md5:a621f5cc1bd75c8d5104cb048c6b8568',
}
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
page = self._download_webpage(url, video_id)
video_id = self._html_search_regex(r'<div class="nrk-video" data-nrk-id="(\d+)">', page, 'video id')
data = self._download_json(
'http://v7.psapi.nrk.no/mediaelement/%s' % video_id, video_id, 'Downloading media JSON')
if data['usageRights']['isGeoBlocked']:
raise ExtractorError('NRK har ikke rettig-heter til å vise dette programmet utenfor Norge', expected=True)
video_url = data['mediaUrl'] + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124'
images = data.get('images')
if images:
thumbnails = images['webImages']
thumbnails.sort(key=lambda image: image['pixelWidth'])
thumbnail = thumbnails[-1]['imageUrl']
else:
thumbnail = None
return {
'id': video_id,
'url': video_url,
'ext': 'flv',
'title': data['title'],
'description': data['description'],
'thumbnail': thumbnail,
}
class NRKTVIE(SubtitlesInfoExtractor):
_VALID_URL = r'(?P<baseurl>http://tv\.nrk(?:super)?\.no/)(?:serie/[^/]+|program)/(?P<id>[a-zA-Z]{4}\d{8})(?:/\d{2}-\d{2}-\d{4})?(?:#del=(?P<part_id>\d+))?'
_TESTS = [
{
'url': 'http://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014',
'md5': 'adf2c5454fa2bf032f47a9f8fb351342',
'info_dict': {
'id': 'MUHH48000314',
'ext': 'flv',
'title': '20 spørsmål',
'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
'upload_date': '20140523',
'duration': 1741.52,
},
},
{
'url': 'http://tv.nrk.no/program/mdfp15000514',
'md5': '383650ece2b25ecec996ad7b5bb2a384',
'info_dict': {
'id': 'mdfp15000514',
'ext': 'flv',
'title': 'Kunnskapskanalen: Grunnlovsjubiléet - Stor ståhei for ingenting',
'description': 'md5:654c12511f035aed1e42bdf5db3b206a',
'upload_date': '20140524',
'duration': 4605.0,
},
},
{
# single playlist video
'url': 'http://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2',
'md5': 'adbd1dbd813edaf532b0a253780719c2',
'info_dict': {
'id': 'MSPO40010515-part2',
'ext': 'flv',
'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn 06.01.2015 (del 2:2)',
'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
'upload_date': '20150106',
},
'skip': 'Only works from Norway',
},
{
'url': 'http://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015',
'playlist': [
{
'md5': '9480285eff92d64f06e02a5367970a7a',
'info_dict': {
'id': 'MSPO40010515-part1',
'ext': 'flv',
'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn 06.01.2015 (del 1:2)',
'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
'upload_date': '20150106',
},
},
{
'md5': 'adbd1dbd813edaf532b0a253780719c2',
'info_dict': {
'id': 'MSPO40010515-part2',
'ext': 'flv',
'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn 06.01.2015 (del 2:2)',
'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
'upload_date': '20150106',
},
},
],
'info_dict': {
'id': 'MSPO40010515',
'title': 'Tour de Ski: Sprint fri teknikk, kvinner og menn',
'description': 'md5:238b67b97a4ac7d7b4bf0edf8cc57d26',
'upload_date': '20150106',
'duration': 6947.5199999999995,
},
'skip': 'Only works from Norway',
}
]
def _seconds2str(self, s):
return '%02d:%02d:%02d.%03d' % (s / 3600, (s % 3600) / 60, s % 60, (s % 1) * 1000)
def _debug_print(self, txt):
if self._downloader.params.get('verbose', False):
self.to_screen('[debug] %s' % txt)
def _extract_captions(self, subtitlesurl, video_id, baseurl):
url = "%s%s" % (baseurl, subtitlesurl)
self._debug_print('%s: Subtitle url: %s' % (video_id, url))
captions = self._download_xml(url, video_id, 'Downloading subtitles')
lang = captions.get('lang', 'no')
ps = captions.findall('./{0}body/{0}div/{0}p'.format('{http://www.w3.org/ns/ttml}'))
srt = ''
for pos, p in enumerate(ps):
begin = parse_duration(p.get('begin'))
duration = parse_duration(p.get('dur'))
starttime = self._seconds2str(begin)
endtime = self._seconds2str(begin + duration)
text = '\n'.join(p.itertext())
srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (str(pos), starttime, endtime, text)
return {lang: srt}
def _extract_f4m(self, manifest_url, video_id):
return self._extract_f4m_formats(manifest_url + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124', video_id)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
part_id = mobj.group('part_id')
baseurl = mobj.group('baseurl')
webpage = self._download_webpage(url, video_id)
title = self._html_search_meta(
'title', webpage, 'title')
description = self._html_search_meta(
'description', webpage, 'description')
thumbnail = self._html_search_regex(
r'data-posterimage="([^"]+)"',
webpage, 'thumbnail', fatal=False)
upload_date = unified_strdate(self._html_search_meta(
'rightsfrom', webpage, 'upload date', fatal=False))
duration = float_or_none(self._html_search_regex(
r'data-duration="([^"]+)"',
webpage, 'duration', fatal=False))
# playlist
parts = re.findall(
r'<a href="#del=(\d+)"[^>]+data-argument="([^"]+)">([^<]+)</a>', webpage)
if parts:
entries = []
for current_part_id, stream_url, part_title in parts:
if part_id and current_part_id != part_id:
continue
video_part_id = '%s-part%s' % (video_id, current_part_id)
formats = self._extract_f4m(stream_url, video_part_id)
entries.append({
'id': video_part_id,
'title': part_title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'formats': formats,
})
if part_id:
if entries:
return entries[0]
else:
playlist = self.playlist_result(entries, video_id, title, description)
playlist.update({
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
})
return playlist
formats = []
f4m_url = re.search(r'data-media="([^"]+)"', webpage)
if f4m_url:
formats.extend(self._extract_f4m(f4m_url.group(1), video_id))
m3u8_url = re.search(r'data-hls-media="([^"]+)"', webpage)
if m3u8_url:
formats.extend(self._extract_m3u8_formats(m3u8_url.group(1), video_id, 'mp4'))
self._sort_formats(formats)
subtitles_url = self._html_search_regex(
r'data-subtitlesurl[ ]*=[ ]*"([^"]+)"',
webpage, 'subtitle URL', default=None)
subtitles = None
if subtitles_url:
subtitles = self._extract_captions(subtitles_url, video_id, baseurl)
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, subtitles)
return
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
| StarcoderdataPython |
258349 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Extract the net parameters from the torch file and store them as python dict
using cPickle'''
import os
import torchfile
import numpy as np
import cPickle as pickle
from argparse import ArgumentParser
import model
verbose=False
def add_param(idx, name, val, params):
if type(params) == dict:
assert name not in params, 'duplicated param %s' % name
params[name] = val
else:
assert params[idx].size() == val.size, 'size mismatch for %s: %s - %s' % (name, (params[idx].shape,), (val.shape,))
params[idx].copy_from_numpy(val)
if verbose:
print name, val.shape
def conv(m, idx, params, param_names):
outplane = m['weight'].shape[0]
name = param_names[idx]
val = np.reshape(m['weight'], (outplane, -1))
add_param(idx, name, val, params)
return idx + 1
def batchnorm(m, idx, params, param_names):
add_param(idx, param_names[idx], m['weight'], params)
add_param(idx + 1, param_names[idx + 1], m['bias'], params)
add_param(idx + 2, param_names[idx + 2], m['running_mean'], params)
add_param(idx + 3, param_names[idx + 3], m['running_var'], params)
return idx + 4
def linear(m, idx, params, param_names):
add_param(idx, param_names[idx], np.transpose(m['weight']), params)
add_param(idx + 1, param_names[idx + 1], m['bias'], params)
return idx + 2
def traverse(m, idx, params, param_names):
''' Traverse all modules of the torch checkpoint file to extract params.
Args:
m, a TorchObject
idx, index for the current cursor of param_names
params, an empty dictionary (name->numpy) to dump the params via pickle;
or a list of tensor objects which should be in the same order as
param_names, called to initialize net created in SINGA directly
using param values from torch checkpoint file.
Returns:
the updated idx
'''
module_type = m.__dict__['_typename']
if module_type in ['nn.Sequential', 'nn.ConcatTable'] :
for x in m.modules:
idx = traverse(x, idx, params, param_names)
elif 'SpatialConvolution' in module_type:
idx = conv(m, idx, params, param_names)
elif 'SpatialBatchNormalization' in module_type:
idx = batchnorm(m, idx, params, param_names)
elif 'Linear' in module_type:
idx = linear(m, idx, params, param_names)
return idx
if __name__ == '__main__':
parser = ArgumentParser(description='Convert params from torch to python '
'dict. \n resnet could have depth of 18, 34, 101, 152; \n wrn has depth 50; preact has depth 200; addbn has depth 50')
parser.add_argument("infile", help="torch checkpoint file")
parser.add_argument("model", choices = ['resnet', 'wrn', 'preact', 'addbn'])
parser.add_argument("depth", type=int, choices = [18, 34, 50, 101, 152, 200])
args = parser.parse_args()
net = model.create_net(args.model, args.depth)
# model.init_params(net)
m = torchfile.load(args.infile)
params = {}
# params = net.param_values()
param_names = net.param_names()
traverse(m, 0, params, param_names)
miss = [name for name in param_names if name not in params]
if len(miss) > 0:
print 'The following params are missing from torch file'
print miss
outfile = os.path.splitext(args.infile)[0] + '.pickle'
with open(outfile, 'wb') as fd:
pickle.dump(params, fd)
| StarcoderdataPython |
3355350 | import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
setup(
name='graphene_django_firebase_auth',
version='1.0.0',
author='<NAME>',
author_email='<EMAIL>',
description=(
"Authentication provider for graphene-django and Google Firebase's "
"Authentication service."
),
license='MIT',
keywords='graphene django firebase auth',
url='https://github.com/dspacejs/graphene-django-firebase-auth',
packages=['firebase_auth'],
install_requires=['django', 'firebase-admin'],
long_description=README,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 2.1',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
],
)
| StarcoderdataPython |
12861076 |
def sumOf(s, offset):
sum = 0
n = len(s)
for i in range(0, len(s)):
if s[i] == s[(i + offset) % n]:
sum += int(s[i])
return sum
file = open("./input/input1.txt", "r")
for s in file:
s = s.strip()
print('Part 1: ', sumOf(s, 1))
print('Part 2: ', sumOf(s, int(len(s)/2)))
file.close() | StarcoderdataPython |
3401843 | #!/usr/bin/env python
'''
Selects a random word from words.txt.out
and scrambles the letters inside the word in various ways
'''
import logging
import random
import string
import sys
import os
import time
__author__ = "<NAME>"
__copyright__ = "Gytha Ogg"
__credits__ = ["Gytha Ogg"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "development"
WORD_LEN_LIMIT = 5
FILE_NAME = '3000.txt.out'
REL_PATH = 'data'
def alpha_shuffle(word):
''' alphabetically sorts the letters
between the first and last letter of a word
'''
alpha_shuffled = word[0] + ''.join(sorted(word[1:-1])) + word[-1]
logging.debug('%s has been shuffled to %s', word, alpha_shuffled)
return alpha_shuffled
def random_shuffle(word):
''' randomly sorts the letters
between the first and last letter of a word
'''
mid_section = [letter for letter in word[1:-1]]
random.shuffle(mid_section)
random_shuffled = word[0] + ''.join(mid_section) + word[-1]
logging.debug('%s has been shuffled to %s', word, random_shuffled)
return random_shuffled
def get_word_list(n):
''' gets n words from the input file'''
fullPath = os.path.join(
os.path.dirname(__file__), os.path.join(REL_PATH, FILE_NAME))
wordList = None
with open(fullPath) as inputFile:
wordList = random.sample(inputFile.readlines(), n)
logging.debug('Selected words: %s', wordList)
return [string.rstrip(w) for w in wordList]
def sample_test():
wordList = get_word_list(5)
for word in wordList:
logging.info('Original: %s, Alpha shuffle: %s, Random shuffle: %s',
word, alpha_shuffle(word), random_shuffle(word))
return
def quiz():
word_list = get_word_list(10)
answer = ''
score = []
time_taken = []
for word in word_list:
print string.upper(
random_shuffle(word)), ' -- Enter the correct word: '
start = time.time()
answer = raw_input()
end = time.time()
time_taken.append(end-start)
answer = string.strip(answer)
if string.lower(answer) == string.lower(word):
score.append(1)
else:
score.append(0)
performance_analysis(word_list, time_taken, score)
def performance_analysis(word_list, time_taken, score):
time_correct = 0
n_correct = 0
time_wrong = 0
n_wrong = 0
for w, t, s in zip(word_list, time_taken, score):
if s == 0:
time_wrong += t
n_wrong += 1
print string.upper(w), t, ' seconds --- INCORRECT'
else:
time_correct += t
n_correct += 1
print string.upper(w), t, ' seconds'
print 'Score: ', sum(score), '/', len(score)
print 'Total time taken: ', sum(time_taken), ' seconds'
print 'Average time taken per word: ', (
sum(time_taken)/len(time_taken)), ' seconds'
if n_correct is not 0:
print 'Average time taken per correct answer', time_correct/n_correct
if n_wrong is not 0:
print 'Average time taken per wrong answer', time_wrong/n_wrong
def main(argv):
log_level = logging.WARNING
if 'debug' in argv:
log_level = min(log_level, logging.DEBUG)
if 'test' in argv:
log_level = min(log_level, logging.INFO)
logging.basicConfig(level=log_level,
format='%(asctime)s %(levelname)s %(message)s')
if 'test' in argv:
sample_test()
elif 'quiz' in argv:
quiz()
return
if __name__ == "__main__":
main(sys.argv)
__author__ = "<NAME>"
__copyright__ = "Gytha Ogg"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "development"
| StarcoderdataPython |
12819848 | # Generated by Django 3.2.6 on 2021-09-03 10:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("order", "0115_alter_order_language_code"),
("order", "0116_merge_20210824_1103"),
]
operations = []
| StarcoderdataPython |
1873526 | <filename>test/parser_2_segments_core_test.py<gh_stars>0
""" The Test file for The New Parser (Marker Classes)"""
import pytest
from sqlfluff.parser_2.markers import FilePositionMarker
from sqlfluff.parser_2.segments_base import RawSegment
from sqlfluff.parser_2.segments_core import KeywordSegment
@pytest.fixture(scope="module")
def raw_seg_list():
return [
RawSegment(
'bar',
FilePositionMarker.from_fresh()
),
RawSegment(
'foo',
FilePositionMarker.from_fresh().advance_by('bar')
),
RawSegment(
'bar',
FilePositionMarker.from_fresh().advance_by('barfoo')
)
]
def test__parser_2__core_keyword(raw_seg_list):
""" Test the Mystical KeywordSegment """
# First make a keyword
FooKeyword = KeywordSegment.make('foo')
# Check it looks as expected
assert issubclass(FooKeyword, KeywordSegment)
assert FooKeyword.__name__ == "FOO_KeywordSegment"
assert FooKeyword._template == 'FOO'
# Match it against a list and check it doesn't match
assert not FooKeyword.match(raw_seg_list)
# Match it against a the first element and check it doesn't match
assert not FooKeyword.match(raw_seg_list[0])
# Match it against a the first element as a list and check it doesn't match
assert not FooKeyword.match([raw_seg_list[0]])
# Match it against the final element (returns tuple)
m = FooKeyword.match(raw_seg_list[1])
assert m
assert m.matched_segments[0].raw == 'foo'
assert isinstance(m.matched_segments[0], FooKeyword)
# Match it against the final element as a list
assert FooKeyword.match([raw_seg_list[1]])
# Match it against a list slice and check it still works
assert FooKeyword.match(raw_seg_list[1:])
| StarcoderdataPython |
11387407 | import copy
from datetime import datetime
from dateutil import parser
from contextlib import contextmanager
from memsql_loader.util import apsw_helpers, super_json as json
from memsql_loader.util.apsw_sql_step_queue.errors import TaskDoesNotExist, StepAlreadyStarted, StepNotStarted, StepAlreadyFinished, StepRunning, AlreadyFinished
from memsql_loader.util.apsw_sql_step_queue.time_helpers import unix_timestamp
def timedelta_total_seconds(td):
""" Needed for python 2.6 compat """
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10. ** 6) / 10. ** 6
class TaskHandler(object):
def __init__(self, execution_id, task_id, queue):
self.execution_id = execution_id
self.task_id = task_id
self._queue = queue
self.storage = queue.storage
self.started = 0
self.finished = None
self.data = None
self.result = None
# NOTE: These fields are specific to the memsql-loader use case;
# they are not necessary for the queue functionality.
self.job_id = None
self.file_id = None
self.md5 = None
self.bytes_total = None
self.bytes_downloaded = None
self.download_rate = None
self.steps = None
self._refresh()
###############################
# Public Interface
def valid(self):
""" Check to see if we are still active. """
if self.finished is not None:
return False
with self.storage.cursor() as cursor:
row = apsw_helpers.get(cursor, '''
SELECT (last_contact > datetime(:now, 'unixepoch', '-%s second')) AS valid
FROM %s
WHERE
id = :task_id
AND execution_id = :execution_id
''' % (self._queue.execution_ttl, self._queue.table_name),
now=unix_timestamp(datetime.utcnow()),
task_id=self.task_id,
execution_id=self.execution_id)
return bool(row is not None and row.valid)
def ping(self):
""" Notify the queue that this task is still active. """
if self.finished is not None:
raise AlreadyFinished()
with self.storage.cursor() as cursor:
affected_row = apsw_helpers.get(cursor, '''
SELECT * from %s
WHERE
id = :task_id
AND execution_id = :execution_id
AND last_contact > datetime(:now, 'unixepoch', '-%s second')
''' % (self._queue.table_name, self._queue.execution_ttl),
now=unix_timestamp(datetime.utcnow()),
task_id=self.task_id,
execution_id=self.execution_id)
if not affected_row:
raise TaskDoesNotExist()
with self.storage.transaction() as cursor:
apsw_helpers.query(cursor, '''
UPDATE %s
SET
last_contact=datetime(:now, 'unixepoch'),
update_count=update_count + 1
WHERE
id = :task_id
AND execution_id = :execution_id
AND last_contact > datetime(:now, 'unixepoch', '-%s second')
''' % (self._queue.table_name, self._queue.execution_ttl),
now=unix_timestamp(datetime.utcnow()),
task_id=self.task_id,
execution_id=self.execution_id)
def finish(self, result='success'):
if self._running_steps() != 0:
raise StepRunning()
if self.finished is not None:
raise AlreadyFinished()
self._save(finished=datetime.utcnow(), result=result)
def requeue(self):
if self._running_steps() != 0:
raise StepRunning()
if self.finished is not None:
raise AlreadyFinished()
with self.storage.cursor() as cursor:
affected_row = apsw_helpers.get(cursor, '''
SELECT * from %s
WHERE
id = :task_id
AND execution_id = :execution_id
AND last_contact > datetime(:now, 'unixepoch', '-%s second')
''' % (self._queue.table_name, self._queue.execution_ttl),
now=unix_timestamp(datetime.utcnow()),
task_id=self.task_id,
execution_id=self.execution_id)
if affected_row is None:
raise TaskDoesNotExist()
with self.storage.transaction() as cursor:
apsw_helpers.query(cursor, '''
UPDATE %s
SET
last_contact=NULL,
update_count=update_count + 1,
started=NULL,
steps=NULL,
execution_id=NULL,
finished=NULL,
result=NULL
WHERE
id = :task_id
''' % self._queue.table_name,
task_id=self.task_id)
def start_step(self, step_name):
""" Start a step. """
if self.finished is not None:
raise AlreadyFinished()
step_data = self._get_step(step_name)
if step_data is not None:
if 'stop' in step_data:
raise StepAlreadyFinished()
else:
raise StepAlreadyStarted()
steps = copy.deepcopy(self.steps)
steps.append({
"start": datetime.utcnow(),
"name": step_name
})
self._save(steps=steps)
def stop_step(self, step_name):
""" Stop a step. """
if self.finished is not None:
raise AlreadyFinished()
steps = copy.deepcopy(self.steps)
step_data = self._get_step(step_name, steps=steps)
if step_data is None:
raise StepNotStarted()
elif 'stop' in step_data:
raise StepAlreadyFinished()
step_data['stop'] = datetime.utcnow()
step_data['duration'] = timedelta_total_seconds(step_data['stop'] - step_data['start'])
self._save(steps=steps)
@contextmanager
def step(self, step_name):
self.start_step(step_name)
yield
self.stop_step(step_name)
def refresh(self):
self._refresh()
def save(self):
self._save()
###############################
# Private Interface
def _get_step(self, step_name, steps=None):
for step in (steps if steps is not None else self.steps):
if step['name'] == step_name:
return step
return None
def _running_steps(self):
return len([s for s in self.steps if 'stop' not in s])
def _refresh(self):
with self.storage.cursor() as cursor:
row = apsw_helpers.get(cursor, '''
SELECT * FROM %s
WHERE
id = :task_id
AND execution_id = :execution_id
AND last_contact > datetime(:now, 'unixepoch', '-%s second')
''' % (self._queue.table_name, self._queue.execution_ttl),
now=unix_timestamp(datetime.utcnow()),
task_id=self.task_id,
execution_id=self.execution_id)
if not row:
raise TaskDoesNotExist()
self.task_id = row.id
self.data = json.loads(row.data)
self.result = row.result
self.job_id = row.job_id
self.file_id = row.file_id
self.md5 = row.md5
self.bytes_total = row.bytes_total
self.bytes_downloaded = row.bytes_downloaded
self.download_rate = row.download_rate
self.steps = self._load_steps(json.loads(row.steps))
self.started = row.started
self.finished = row.finished
def _load_steps(self, raw_steps):
""" load steps -> basically load all the datetime isoformats into datetimes """
for step in raw_steps:
if 'start' in step:
step['start'] = parser.parse(step['start'])
if 'stop' in step:
step['stop'] = parser.parse(step['stop'])
return raw_steps
def _save(self, finished=None, steps=None, result=None, data=None):
finished = finished if finished is not None else self.finished
with self.storage.transaction() as cursor:
apsw_helpers.query(cursor, '''
UPDATE %s
SET
last_contact=datetime(:now, 'unixepoch'),
update_count=update_count + 1,
steps=:steps,
finished=datetime(:finished, 'unixepoch'),
result=:result,
bytes_downloaded=:bytes_downloaded,
download_rate=:download_rate,
data=:data
WHERE
id = :task_id
AND execution_id = :execution_id
AND last_contact > datetime(:now, 'unixepoch', '-%s second')
''' % (self._queue.table_name, self._queue.execution_ttl),
now=unix_timestamp(datetime.utcnow()),
task_id=self.task_id,
execution_id=self.execution_id,
steps=json.dumps(steps if steps is not None else self.steps),
finished=unix_timestamp(finished) if finished else None,
result=result if result is not None else self.result,
bytes_downloaded=self.bytes_downloaded,
download_rate=self.download_rate,
data=json.dumps(data if data is not None else self.data))
affected_row = apsw_helpers.get(cursor, '''
SELECT * from %s
WHERE
id = :task_id
AND execution_id = :execution_id
AND last_contact > datetime(:now, 'unixepoch', '-%s second')
''' % (self._queue.table_name, self._queue.execution_ttl),
now=unix_timestamp(datetime.utcnow()),
task_id=self.task_id,
execution_id=self.execution_id)
if not affected_row:
raise TaskDoesNotExist()
else:
if steps is not None:
self.steps = steps
if finished is not None:
self.finished = finished
if result is not None:
self.result = result
if data is not None:
self.data = data
| StarcoderdataPython |
11341325 | """Support for deCONZ sensors."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from datetime import datetime
from pydeconz.sensor import (
AirQuality,
Consumption,
Daylight,
GenericStatus,
Humidity,
LightLevel,
Power,
Pressure,
SensorBase as PydeconzSensor,
Switch,
Temperature,
Time,
)
from homeassistant.components.sensor import (
DOMAIN,
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_TEMPERATURE,
ATTR_VOLTAGE,
CONCENTRATION_PARTS_PER_BILLION,
ENERGY_KILO_WATT_HOUR,
LIGHT_LUX,
PERCENTAGE,
POWER_WATT,
PRESSURE_HPA,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
import homeassistant.util.dt as dt_util
from .const import ATTR_DARK, ATTR_ON
from .deconz_device import DeconzDevice
from .gateway import DeconzGateway, get_gateway_from_config_entry
PROVIDES_EXTRA_ATTRIBUTES = (
"battery",
"consumption",
"status",
"humidity",
"light_level",
"power",
"pressure",
"temperature",
)
ATTR_CURRENT = "current"
ATTR_POWER = "power"
ATTR_DAYLIGHT = "daylight"
ATTR_EVENT_ID = "event_id"
@dataclass
class DeconzSensorDescriptionMixin:
"""Required values when describing secondary sensor attributes."""
update_key: str
value_fn: Callable[[PydeconzSensor], float | int | str | None]
@dataclass
class DeconzSensorDescription(
SensorEntityDescription,
DeconzSensorDescriptionMixin,
):
"""Class describing deCONZ binary sensor entities."""
suffix: str = ""
ENTITY_DESCRIPTIONS = {
AirQuality: [
DeconzSensorDescription(
key="air_quality",
value_fn=lambda device: device.air_quality, # type: ignore[no-any-return]
update_key="airquality",
state_class=SensorStateClass.MEASUREMENT,
),
DeconzSensorDescription(
key="air_quality_ppb",
value_fn=lambda device: device.air_quality_ppb, # type: ignore[no-any-return]
suffix="PPB",
update_key="airqualityppb",
device_class=SensorDeviceClass.AQI,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=CONCENTRATION_PARTS_PER_BILLION,
),
],
Consumption: [
DeconzSensorDescription(
key="consumption",
value_fn=lambda device: device.scaled_consumption, # type: ignore[no-any-return]
update_key="consumption",
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
)
],
Daylight: [
DeconzSensorDescription(
key="status",
value_fn=lambda device: device.status, # type: ignore[no-any-return]
update_key="status",
icon="mdi:white-balance-sunny",
entity_registry_enabled_default=False,
)
],
GenericStatus: [
DeconzSensorDescription(
key="status",
value_fn=lambda device: device.status, # type: ignore[no-any-return]
update_key="status",
)
],
Humidity: [
DeconzSensorDescription(
key="humidity",
value_fn=lambda device: device.scaled_humidity, # type: ignore[no-any-return]
update_key="humidity",
device_class=SensorDeviceClass.HUMIDITY,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=PERCENTAGE,
)
],
LightLevel: [
DeconzSensorDescription(
key="light_level",
value_fn=lambda device: device.scaled_light_level, # type: ignore[no-any-return]
update_key="lightlevel",
device_class=SensorDeviceClass.ILLUMINANCE,
native_unit_of_measurement=LIGHT_LUX,
)
],
Power: [
DeconzSensorDescription(
key="power",
value_fn=lambda device: device.power, # type: ignore[no-any-return]
update_key="power",
device_class=SensorDeviceClass.POWER,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=POWER_WATT,
)
],
Pressure: [
DeconzSensorDescription(
key="pressure",
value_fn=lambda device: device.pressure, # type: ignore[no-any-return]
update_key="pressure",
device_class=SensorDeviceClass.PRESSURE,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=PRESSURE_HPA,
)
],
Temperature: [
DeconzSensorDescription(
key="temperature",
value_fn=lambda device: device.scaled_temperature, # type: ignore[no-any-return]
update_key="temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=TEMP_CELSIUS,
)
],
Time: [
DeconzSensorDescription(
key="last_set",
value_fn=lambda device: device.last_set, # type: ignore[no-any-return]
update_key="lastset",
device_class=SensorDeviceClass.TIMESTAMP,
state_class=SensorStateClass.TOTAL_INCREASING,
)
],
}
SENSOR_DESCRIPTIONS = [
DeconzSensorDescription(
key="battery",
value_fn=lambda device: device.battery, # type: ignore[no-any-return]
suffix="Battery",
update_key="battery",
device_class=SensorDeviceClass.BATTERY,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=PERCENTAGE,
entity_category=EntityCategory.DIAGNOSTIC,
),
DeconzSensorDescription(
key="secondary_temperature",
value_fn=lambda device: device.secondary_temperature, # type: ignore[no-any-return]
suffix="Temperature",
update_key="temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=TEMP_CELSIUS,
),
]
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the deCONZ sensors."""
gateway = get_gateway_from_config_entry(hass, config_entry)
gateway.entities[DOMAIN] = set()
battery_handler = DeconzBatteryHandler(gateway)
@callback
def async_add_sensor(sensors: list[PydeconzSensor] | None = None) -> None:
"""Add sensors from deCONZ.
Create DeconzBattery if sensor has a battery attribute.
Create DeconzSensor if not a battery, switch or thermostat and not a binary sensor.
"""
entities: list[DeconzSensor] = []
if sensors is None:
sensors = gateway.api.sensors.values()
for sensor in sensors:
if not gateway.option_allow_clip_sensor and sensor.type.startswith("CLIP"):
continue
if sensor.battery is None:
battery_handler.create_tracker(sensor)
known_entities = set(gateway.entities[DOMAIN])
for description in (
ENTITY_DESCRIPTIONS.get(type(sensor), []) + SENSOR_DESCRIPTIONS
):
if (
not hasattr(sensor, description.key)
or description.value_fn(sensor) is None
):
continue
new_entity = DeconzSensor(sensor, gateway, description)
if new_entity.unique_id not in known_entities:
entities.append(new_entity)
if description.key == "battery":
battery_handler.remove_tracker(sensor)
if entities:
async_add_entities(entities)
config_entry.async_on_unload(
async_dispatcher_connect(
hass,
gateway.signal_new_sensor,
async_add_sensor,
)
)
async_add_sensor(
[gateway.api.sensors[key] for key in sorted(gateway.api.sensors, key=int)]
)
class DeconzSensor(DeconzDevice, SensorEntity):
"""Representation of a deCONZ sensor."""
TYPE = DOMAIN
_device: PydeconzSensor
entity_description: DeconzSensorDescription
def __init__(
self,
device: PydeconzSensor,
gateway: DeconzGateway,
description: DeconzSensorDescription,
) -> None:
"""Initialize deCONZ sensor."""
self.entity_description = description
super().__init__(device, gateway)
if description.suffix:
self._attr_name = f"{device.name} {description.suffix}"
self._update_keys = {description.update_key, "reachable"}
if self.entity_description.key in PROVIDES_EXTRA_ATTRIBUTES:
self._update_keys.update({"on", "state"})
@property
def unique_id(self) -> str:
"""Return a unique identifier for this device."""
if (
self.entity_description.key == "battery"
and self._device.manufacturer == "Danfoss"
and self._device.model_id
in [
"0x8030",
"0x8031",
"0x8034",
"0x8035",
]
):
return f"{super().unique_id}-battery"
if self.entity_description.suffix:
return f"{self.serial}-{self.entity_description.suffix.lower()}"
return super().unique_id
@callback
def async_update_callback(self) -> None:
"""Update the sensor's state."""
if self._device.changed_keys.intersection(self._update_keys):
super().async_update_callback()
@property
def native_value(self) -> StateType | datetime:
"""Return the state of the sensor."""
if self.entity_description.device_class is SensorDeviceClass.TIMESTAMP:
return dt_util.parse_datetime(
self.entity_description.value_fn(self._device) # type: ignore[arg-type]
)
return self.entity_description.value_fn(self._device)
@property
def extra_state_attributes(self) -> dict[str, bool | float | int | str | None]:
"""Return the state attributes of the sensor."""
attr: dict[str, bool | float | int | str | None] = {}
if self.entity_description.key not in PROVIDES_EXTRA_ATTRIBUTES:
return attr
if self._device.on is not None:
attr[ATTR_ON] = self._device.on
if self._device.secondary_temperature is not None:
attr[ATTR_TEMPERATURE] = self._device.secondary_temperature
if isinstance(self._device, Consumption):
attr[ATTR_POWER] = self._device.power
elif isinstance(self._device, Daylight):
attr[ATTR_DAYLIGHT] = self._device.daylight
elif isinstance(self._device, LightLevel):
if self._device.dark is not None:
attr[ATTR_DARK] = self._device.dark
if self._device.daylight is not None:
attr[ATTR_DAYLIGHT] = self._device.daylight
elif isinstance(self._device, Power):
attr[ATTR_CURRENT] = self._device.current
attr[ATTR_VOLTAGE] = self._device.voltage
elif isinstance(self._device, Switch):
for event in self.gateway.events:
if self._device == event.device:
attr[ATTR_EVENT_ID] = event.event_id
return attr
class DeconzSensorStateTracker:
"""Track sensors without a battery state and signal when battery state exist."""
def __init__(self, sensor: PydeconzSensor, gateway: DeconzGateway) -> None:
"""Set up tracker."""
self.sensor = sensor
self.gateway = gateway
sensor.register_callback(self.async_update_callback)
@callback
def close(self) -> None:
"""Clean up tracker."""
self.sensor.remove_callback(self.async_update_callback)
self.sensor = None
@callback
def async_update_callback(self) -> None:
"""Sensor state updated."""
if "battery" in self.sensor.changed_keys:
async_dispatcher_send(
self.gateway.hass,
self.gateway.signal_new_sensor,
[self.sensor],
)
class DeconzBatteryHandler:
"""Creates and stores trackers for sensors without a battery state."""
def __init__(self, gateway: DeconzGateway) -> None:
"""Set up battery handler."""
self.gateway = gateway
self._trackers: set[DeconzSensorStateTracker] = set()
@callback
def create_tracker(self, sensor: PydeconzSensor) -> None:
"""Create new tracker for battery state."""
for tracker in self._trackers:
if sensor == tracker.sensor:
return
self._trackers.add(DeconzSensorStateTracker(sensor, self.gateway))
@callback
def remove_tracker(self, sensor: PydeconzSensor) -> None:
"""Remove tracker of battery state."""
for tracker in self._trackers:
if sensor == tracker.sensor:
tracker.close()
self._trackers.remove(tracker)
break
| StarcoderdataPython |
1895155 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
UNIVERSIDAD EL BOSQUE
PROYECTO INTEGRADO - TRANSFORMADAS / TRANSDUCTORES
Obtiene trozos de audio del microfono de forma recurrente
y grafica sus componentes en tiempo y frecuencia (FFT).
Detecta un tono puro en cualquier frecuencia audible,
reproduce el tono puro y emite una senal de cancelacion
<NAME>
<NAME>
<NAME>
Copyright:
Utiliza Libreria SWHear
descargada de: http://github.com/swharden
"""
from PyQt4 import QtGui,QtCore
import sys
import ui_plot
import numpy as np
import pyqtgraph
import SWHear
from ctypes import *
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
#IMPLEMENTAMOS PRODUCTOR/CONSUMIDOR
class Cancelador(QtGui.QMainWindow, ui_plot.Ui_MainWindow):
def __init__(self, parent=None):
pyqtgraph.setConfigOption('background', 'w') #Antes de ejecutar el widget
super(Cancelador, self).__init__(parent)
self.setupUi(self)
self.audioFFT.plotItem.showGrid(True, True, 0.7)
self.audioPCM.plotItem.showGrid(True, True, 0.7)
self.maxFFT=0
self.maxPCM=0
self.ear = SWHear.SWHear() # INSTANCIAMOS PRODUCTOR/CONSUMIDOR
self.ear.stream_start()
self.deteccionIniciada = False
self.cancelacionIniciada = False
self.btnDetectar.clicked.connect(self.detectar)
self.btnCancelar.clicked.connect(self.cancelar)
self.vecesleido = 0
self.terminal = sys.stdout
self.cancelacionEnCurso=False
def detectar(self):
if not self.deteccionIniciada:
self.deteccionIniciada = 1
self.btnDetectar.setText(_translate("MainWindow", "SUSPENDER DETECCIÓN", None))
else:
self.deteccionIniciada = False
self.btnDetectar.setText(_translate("MainWindow", "INICIAR DETECCIÓN", None))
def cancelar(self):
if not self.cancelacionIniciada:
self.cancelacionIniciada = 1
self.ear.AUDIOplay=False
self.btnCancelar.setText(_translate("MainWindow", "APAGAR CANCELADOR", None))
else:
self.cancelacionIniciada = False
self.cancelacionEnCurso=False
self.ear.AUDIOplay=False
self.ear.controlarCancelacion = False
self.ear.ou.terminate();
self.ear.fase = 0
self.btnCancelar.setText(_translate("MainWindow", "ACTIVAR CANCELADOR", None))
#INICIAMOS ACTUALIZACION PERMANENTE
def update(self):
if not self.ear.data is None and not self.ear.fft is None:
pcmMax=np.max(np.abs(self.ear.data))
if pcmMax>self.maxPCM:
self.maxPCM=pcmMax #Obtenemos Amplitud maxima de Audio de Mic leido de productor
self.audioPCM.plotItem.setRange(yRange=[-pcmMax,pcmMax])
if np.max(self.ear.fft)>self.maxFFT:
self.maxFFT=np.max(np.abs(self.ear.fft)) #Obtenemos amplitud maxima de FFT
self.audioFFT.plotItem.setRange(yRange=[0,self.maxFFT]) #ESCALA SEGUN FREQ MAXIMA
#self.pbLevel.setValue(1000*pcmMax/self.maxPCM)
pen=pyqtgraph.mkPen(color='b')
self.audioPCM.plot(self.ear.datax,self.ear.data,
pen=pen,clear=True)
pen=pyqtgraph.mkPen(color='r')
self.audioFFT.plot(self.ear.fftx[:2048],self.ear.fft[:2048],
pen=pen,clear=True) # Solo graficamos 2048 -
#CODIGO PARA DETECTAR PICOS EN ESPECTRO
if self.deteccionIniciada:
indiceFFTX = np.argmax(self.ear.fft)
freq1 = self.ear.fftx[indiceFFTX] # Frecuencia Candidata a cancelar
if freq1 > 60:
self.vecesleido=self.vecesleido+1
if self.vecesleido > 3:
self.ear.frecuenciaAcancelar = int(freq1) #Establecemos la frecuencia a cancelar en EAR
self.ear.indiceFFTX = indiceFFTX #Informamos a EAR como leer la amplitud de la freq
self.vecesleido = 0
self.detectar()
self.dataLog.append('Freq. Candidata en: '+str(int(freq1))+' Hz')
#CODIGO CANCELADOR DE AUDIO
if self.cancelacionIniciada:
if self.ear.frecuenciaAcancelar != 0:
self.dataLog.moveCursor(QtGui.QTextCursor.End) #Aseguramos que el cursor del text este al final
# Iniciar prueba de fases
if not self.cancelacionEnCurso:
self.dataLog.append("--Generando 360 fases para tono de "+str(self.ear.frecuenciaAcancelar)+" Hz")
self.dataLog.append("...Espere...")
self.ear.log = self.dataLog
self.ear.controlarCancelacion = True
self.ear.activarCancelacion()
self.cancelacionEnCurso=True
#Pintamos Periodo de Tono Puro en Grafica
if self.ear.papa.poll():
msg = self.ear.papa.recv() # El cancelador nos esta hablando
self.dataLog.append(' +'+str(msg)+' grados'+' Amplitud: '+str(self.ear.fft[self.ear.indiceFFTX])+' dB')
pen=pyqtgraph.mkPen(color='b')
try:
plotfase = self.ear.fases[int(msg)]
plotfase = plotfase[0:self.ear.periodoAntiTono]
self.cancelData.plot(plotfase, pen=pen,clear=True)
except:
pass
else:
self.dataLog.append('No hay freq aun')
QtCore.QTimer.singleShot(1, self.update) # Actualizar graficos tan tapido como pueda
if __name__=="__main__":
app = QtGui.QApplication(sys.argv)
form = Cancelador()
form.show()
form.update()
app.exec_()
print("LISTO")
| StarcoderdataPython |
6410497 | <filename>examples/enhancement.py
#!/usr/bin/env python
""" Explore image enhancement techniques """
from __future__ import division
import cv2
import numpy as np
import scipy.fftpack
from skimage import restoration
from examples import config
from selam.utils import img
from selam import colorconstancy as cc
from selam import enhancement as en
from selam import preprocess as pre
from lib.DCP import DarkChannelRecover as dcp
def darkChannelPrior(im, display=True):
radiance = dcp.getRecoverScene(im)
if display:
cv2.imshow('dehazed', np.hstack((im, radiance)))
cv2.waitKey(0)
def taiwanLightCompensation(im, theta=0.5, thresh=0.2, B=[10, 30]):
""" Perform light compensation
http://www.csie.ntu.edu.tw/~fuh/personal/LightCompensation.pdf
:param theta: weight of corrected image in final output
:return: light compensated image
"""
def getOptimalB(im, thresh, B):
""" Determine optimal B for logarithmic correction
:param thresh: threshold to count dark pixels
:param limit: [B_min, B_max]
"""
HSV = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
v = img.norm(HSV[..., 2])
# Total number of pixels
N_total = v.size
N_dark = v[v < thresh].size
B_optimal = (B[1] - B[0]) * (N_dark / N_total) + B[0]
return B_optimal
I_corrected = logCorrection(im, getOptimalB(im, thresh, B))
final = I_corrected * (1 - theta) + im * theta
return np.uint8(final)
def chenLightCompensation(im, scaler=15, power=0.5):
""" Two stage brightness compensation by H.T.Chen National Taiwan University
:param scaler: strength of brightening effect
:param power: strenght of darken effect
:return: brighten image, darken image
"""
def brighten(im, scaler):
HSV = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
v = img.norm(HSV[..., 2])
brighter = img.normUnity(scaler * np.log1p(2 - v) * v)
v_scaled = np.uint8(brighter * 255)
return cv2.cvtColor(cv2.merge((HSV[..., 0], HSV[..., 1], v_scaled)), cv2.COLOR_HSV2BGR)
def darken(im, power):
HSV = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
s = img.norm(HSV[..., 1])
v = img.norm(HSV[..., 2])
avgV = cv2.blur(v, (5, 5))
avgS = cv2.blur(s, (5, 5))
D_saturation = (avgS + 1) / 2
D_brightness = 1 / ((1 + avgV)**power)
v_scaled = D_brightness * D_saturation
v_scaled = np.uint8(v_scaled * v * 255)
return cv2.cvtColor(cv2.merge((HSV[..., 0], HSV[..., 1], v_scaled)), cv2.COLOR_HSV2BGR)
I_bright = brighten(im, scaler)
I_darken = darken(im, power)
return I_bright, I_darken
def logCorrection(im, B=3.0):
""" Brightness correction using log curve which obeys Weber-Fechner
law of JND response in human
"""
im = img.norm(im)
corrected = np.log(im * (B-1) + 1) / np.log(B)
return np.uint8(corrected * 255)
def enhanceFusion(im):
""" Effective Single Underwater Image Enhancement by Fusion
http://www.jcomputers.us/vol8/jcp0804-10.pdf
"""
def genImgPyramid(im, n_scales=6):
""" Generate Gaussian and Laplacian pyramid
:param n_scales: number of pyramid layers
:return: Gaussian pyramid, Laplacian pyramid
"""
G = im.copy()
gp = [G]
# Generate Gaussian Pyramid
for i in range(n_scales):
G = cv2.pyrDown(G)
gp.append(G)
lp = [gp[n_scales-1]]
# Generate Laplacian Pyramid
for i in range(n_scales - 1, 0, -1):
size = (gp[i - 1].shape[1], gp[i - 1].shape[0])
GE = cv2.pyrUp(gp[i], dstsize=size)
L = cv2.subtract(gp[i - 1], GE)
lp.append(L)
return gp, lp
def saliencyMap(im):
Lab = cv2.cvtColor(im, cv2.COLOR_BGR2Lab)
Lab_blur = cv2.GaussianBlur(Lab, (5, 5), 20)
Lab_blur = img.norm(Lab_blur)
Lab = img.norm(Lab)
I_mean = np.zeros_like(im)
I_mean[..., 0] = np.mean(Lab[..., 0])
I_mean[..., 1] = np.mean(Lab[..., 1])
I_mean[..., 2] = np.mean(Lab[..., 1])
saliencyMap = np.linalg.norm(I_mean - Lab_blur, axis=2)
return saliencyMap
def chromaticMap(im, sigma=0.3):
hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
hsv = img.norm(hsv)
s = hsv[..., 1]
num = - (s - np.max(s))**2
chromaticMap = np.exp(num / (2 * sigma * sigma))
return chromaticMap
def luminanceMap(im):
hsv = cv2.cvtColor(im, cv2.COLOR_BGR2HSV)
hsv = img.norm(hsv)
bgr = img.norm(im)
b, g, r = np.dsplit(bgr, 3)
h, s, v = np.dsplit(hsv, 3)
luminanceMap = np.std([b, g, r, v], axis=0)
return np.squeeze(luminanceMap, axis=(2,))
def generateMaps(im):
lMap = img.normUnity(luminanceMap(im))
cMap = img.normUnity(chromaticMap(im))
sMap = img.normUnity(saliencyMap(im))
maps = [cMap, lMap, sMap]
out = np.hstack((np.uint8(cMap * 255), np.uint8(lMap * 255), np.uint8(sMap * 255)))
return maps, out
input1 = cc.shadegrey(im)
input2 = en.claheColor(cv2.cvtColor(input1, cv2.COLOR_BGR2Lab))
input2 = cv2.cvtColor(input2, cv2.COLOR_Lab2BGR)
maps1, out1 = generateMaps(input1)
cv2.imwrite('/home/batumon/Downloads/map1.png', out1)
maps2, out2 = generateMaps(input2)
sumCMap = maps1[0] + maps2[0] + 0.0001
sumLMap = maps1[1] + maps2[1] + 0.0001
sumSMap = maps1[2] + maps2[2] + 0.0001
normCMap1 = maps1[0] / sumCMap
normLMap1 = maps1[1] / sumLMap
normSMap1 = maps1[2] / sumSMap
normCMap2 = maps2[0] / sumCMap
normLMap2 = maps2[1] / sumLMap
normSMap2 = maps2[2] / sumSMap
finalMap1 = img.normUnity(np.sum([normCMap1, normLMap1, normSMap1], axis=0))
finalMap1 = np.repeat(finalMap1[:, :, np.newaxis], 3, axis=2)
finalMap2 = img.normUnity(np.sum([normCMap2, normLMap2, normSMap2], axis=0))
finalMap2 = np.repeat(finalMap2[:, :, np.newaxis], 3, axis=2)
gp1, _ = genImgPyramid(finalMap1)
gp2, _ = genImgPyramid(finalMap2)
_, lp1 = genImgPyramid(input1)
_, lp2 = genImgPyramid(input2)
f = []
for i in xrange(6):
f1 = gp1[5 - i] * lp1[i]
f2 = gp2[5 - i] * lp2[i]
res = np.uint8(0.5 * f1 + 0.5 * f2)
f.append(res)
ls_ = f[0]
for i in xrange(1, 6):
size = (f[i].shape[1], f[i].shape[0])
ls_ = cv2.pyrUp(ls_, dstsize=size)
ls_ = cv2.add(ls_, f[i])
return maps1, maps2, finalMap1, finalMap2, ls_
def homomorphicFilterColor(im):
""" Perform homomorphic filter on color image """
a, b, c = cv2.split(im)
a_filt = homomorphicFilter(a)
b_filt = homomorphicFilter(b)
c_filt = homomorphicFilter(c)
return cv2.merge((a_filt, b_filt, c_filt))
def homomorphicFilter(im):
""" Homomorphic filtering on single channel image
http://stackoverflow.com/questions/24731810/segmenting-license-plate-characters
"""
# Normalizes image to [0, 1]
im = img.norm(im)
rows, cols = im.shape
# Convert to log(1 + I)
imgLog = np.log1p(im)
# Create Gaussian mask of sigma = 10
M = 2*rows + 1
N = 2*cols + 1
sigma = 10
(X, Y) = np.meshgrid(np.linspace(0, N-1, N), np.linspace(0, M-1, M))
centerX = np.ceil(N/2)
centerY = np.ceil(M/2)
gaussianNumerator = (X - centerX)**2 + (Y - centerY)**2
# Low pass and high pass filters
Hlow = np.exp(-gaussianNumerator / (2*sigma*sigma))
Hhigh = 1 - Hlow
# Move origin of filters so that it's at the top left corner to
# match with the input image
HlowShift = scipy.fftpack.ifftshift(Hlow.copy())
HhighShift = scipy.fftpack.ifftshift(Hhigh.copy())
# Filter the image and crop
If = scipy.fftpack.fft2(imgLog.copy(), (M, N))
Ioutlow = scipy.real(scipy.fftpack.ifft2(If.copy() * HlowShift, (M, N)))
Iouthigh = scipy.real(scipy.fftpack.ifft2(If.copy() * HhighShift, (M, N)))
# Set scaling factors and add
gamma1 = 0.3
gamma2 = 1.5
Iout = gamma1*Ioutlow[0:rows, 0:cols] + gamma2*Iouthigh[0:rows, 0:cols]
# Anti-log then rescale to [0,1]
Ihmf = np.expm1(Iout)
Ihmf = (Ihmf - np.min(Ihmf)) / (np.max(Ihmf) - np.min(Ihmf))
Ihmf2 = np.array(255*Ihmf, dtype="uint8")
# Threshold the image - Anything below intensity 65 gets set to white
Ithresh = Ihmf2.copy()
Ithresh[Ithresh < 65] = 255
# Clear off the border. Choose a border radius of 5 pixels
Iclear = img.clearborder(Ithresh, 5)
# Eliminate regions that have areas below 120 pixels
Iopen = img.bwareaopen(Iclear, 120)
return Ihmf2
def anisodiffColor(img, niter=1, kappa=50, gamma=0.1, step=(1., 1.),
option=1):
""" Anisotropic diffusion on color image by performing diffusion
on independent color channels
"""
a, b, c = cv2.split(img)
a_diff = anisodiff(a, niter, kappa, gamma, step, option)
b_diff = anisodiff(b, niter, kappa, gamma, step, option)
c_diff = anisodiff(c, niter, kappa, gamma, step, option)
return np.uint8(cv2.merge((a_diff, b_diff, c_diff)))
def anisodiff(img, niter=1, kappa=50, gamma=0.1, step=(1., 1.),
option=1):
"""
Anisotropic diffusion on single channel image
Usage:
imgout = anisodiff(im, niter, kappa, gamma, option)
Arguments:
img - input image
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
ploton - if True, the image will be plotted on every iteration
Returns:
imgout - diffused image.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence diffusion
across step edges. A large value reduces the influence of intensity
gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between adjacent
pixels differs in the x and y axes
Diffusion equation 1 favours high contrast edges over low contrast ones.
Diffusion equation 2 favours wide regions over smaller ones.
Reference:
<NAME> and <NAME>.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
Original MATLAB code by <NAME>
School of Computer Science & Software Engineering
The University of Western Australia
pk @ csse uwa edu au
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by <NAME>
Department of Pharmacology
University of Oxford
<<EMAIL>>
June 2000 original version
March 2002 corrected diffusion eqn No 2.
July 2012 translated to Python
"""
# initialize output array
img = img.astype('float32')
imgout = img.copy()
# initialize some internal variables
deltaS = np.zeros_like(imgout)
deltaE = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
gS = np.ones_like(imgout)
gE = gS.copy()
for ii in xrange(niter):
# calculate the diffs
deltaS[:-1, :] = np.diff(imgout, axis=0)
deltaE[:, :-1] = np.diff(imgout, axis=1)
# conduction gradients (only need to compute one per dim!)
if option == 1:
gS = np.exp(-(deltaS/kappa)**2.)/step[0]
gE = np.exp(-(deltaE/kappa)**2.)/step[1]
elif option == 2:
gS = 1./(1.+(deltaS/kappa)**2.)/step[0]
gE = 1./(1.+(deltaE/kappa)**2.)/step[1]
# update matrices
E = gE*deltaE
S = gS*deltaS
# subtract a copy that has been shifted 'North/West' by one
# pixel. don't as questions. just do it. trust me.
NS[:] = S
EW[:] = E
NS[1:, :] -= S[:-1, :]
EW[:, 1:] -= E[:, :-1]
# update the image
imgout += gamma*(NS+EW)
return imgout
if __name__ == '__main__':
path = './benchmark/datasets/buoy/size_change'
imgs = img.get_jpgs(path)
enhanceFusion(imgs[0])
| StarcoderdataPython |
4933688 | <filename>graphium/utilities/algorithm/routing_algorithm.py
# -*- coding: utf-8 -*-
"""
/***************************************************************************
QGIS plugin 'Graphium'
/***************************************************************************
*
* Copyright 2020 <NAME> @ Salzburg Research
* eMail <EMAIL>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
***************************************************************************/
"""
import os
from datetime import datetime
# PyQt5 imports
from PyQt5.QtCore import QVariant
from PyQt5.QtGui import (QIcon)
# qgis imports
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (QgsProcessingParameterEnum, QgsProcessingParameterString,
QgsProcessingParameterPoint, QgsProcessingParameterFeatureSink, QgsProcessing, QgsFeature,
QgsFeatureSink, QgsWkbTypes, QgsCoordinateReferenceSystem, QgsVectorLayer, QgsField, QgsGeometry,
QgsProcessingAlgorithm, QgsProcessingMultiStepFeedback)
# plugin
from ...connection.model.graphium_server_type import GraphiumServerType
from ..graphium_utilities_api import GraphiumUtilitiesApi
from ....graphium.connection.graphium_connection_manager import GraphiumConnectionManager
from ....graphium.settings import Settings
class RoutingAlgorithm(QgsProcessingAlgorithm):
"""
This algorithm finds the fastest or shortest route between two coordinates.
"""
plugin_path = os.path.split(os.path.split(os.path.split(os.path.dirname(__file__))[0])[0])[0]
def __init__(self):
super().__init__()
# Constants used to refer to parameters and outputs. They will be
# used when calling the algorithm from another algorithm, or when
# calling from the QGIS console.
self.alg_group = "Utilities"
self.alg_group_id = "graphutilities"
self.alg_name = "routing"
self.alg_display_name = "Routing"
self.START_COORDINATE = 'START_COORDINATE'
self.END_COORDINATE = 'END_COORDINATE'
# self.CUT_SEGMENTS = 'CUT_SEGMENTS'
self.ROUTING_MODE = 'ROUTING_MODE'
self.ROUTING_CRITERIA = 'ROUTING_CRITERIA'
self.SERVER_NAME = 'SERVER_NAME'
self.GRAPH_NAME = 'OVERRIDE_GRAPH_NAME'
self.GRAPH_VERSION = 'OVERRIDE_GRAPH_VERSION'
self.OUTPUT = 'OUTPUT'
self.OUTPUT_PATH = 'OUTPUT_PATH'
self.connection_manager = GraphiumConnectionManager()
self.server_name_options = list()
self.routing_mode_options = ['CAR', 'BIKE', 'PEDESTRIAN', 'PEDESTRIAN_BARRIERFREE']
self.routing_criteria_options = ['LENGTH', 'MIN_DURATION', 'CURRENT_DURATION']
def createInstance(self):
return RoutingAlgorithm()
def group(self):
return self.tr(self.alg_group)
def groupId(self):
return self.alg_group_id
def name(self):
return self.alg_name
def displayName(self):
return self.tr(self.alg_display_name)
def shortHelpString(self):
return self.tr('Use this algorithm to find the fastest or shortest route between two coordinates. The route '
'can be optimized for different modes of transport.\n\n'
'The start and end coordinates can be set (1) with the [...] button right to the text input or '
'(2) manually according to format "lon,lat [coordinate reference system]" '
'(e.g. 13.0,47.8 [EPSG:4326])')
def icon(self):
return QIcon(os.path.join(self.plugin_path, 'icons/icon_routing.svg'))
def tr(self, string):
"""
Returns a translatable string with the self.tr() function.
"""
return QCoreApplication.translate('Processing', string)
def initAlgorithm(self, config=None):
"""
Definition of inputs and outputs of the algorithm, along with some other properties.
"""
# We add the input vector layer. It can have any kind of geometry
# It is a mandatory (not optional) one, hence the False argument
# self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT, self.tr('Input gpx file'),
# [QgsProcessing.TypeVectorLine]))
self.addParameter(QgsProcessingParameterPoint(self.START_COORDINATE,
self.tr('Start coordinate'),
None, False))
self.addParameter(QgsProcessingParameterPoint(self.END_COORDINATE,
self.tr('End coordinate'),
None, False))
self.addParameter(QgsProcessingParameterEnum(self.ROUTING_MODE,
self.tr('Select routing mode'),
options=self.routing_mode_options,
allowMultiple=False, defaultValue=0, optional=False))
self.addParameter(QgsProcessingParameterEnum(self.ROUTING_CRITERIA,
self.tr('Select routing criteria'),
options=self.routing_criteria_options,
allowMultiple=False, defaultValue=1, optional=False))
# self.addParameter(QgsProcessingParameterBoolean(self.CUT_SEGMENTS,
# self.tr('Cut segments'),
# True, True))
# read server connections and prepare enum items
self.server_name_options.clear()
selected_graph_server = Settings.get_selected_graph_server()
selected_index = 0
for index, connection in enumerate(self.connection_manager.read_connections()):
self.server_name_options.append(connection.name)
if selected_index == 0 and isinstance(selected_graph_server, str)\
and connection.name == selected_graph_server:
selected_index = index
self.addParameter(QgsProcessingParameterEnum(self.SERVER_NAME, self.tr('Server name'),
self.server_name_options, False, selected_index, False))
s = Settings.get_selected_graph_name()
default_graph_name = ''
if isinstance(s, str):
default_graph_name = s
self.addParameter(QgsProcessingParameterString(self.GRAPH_NAME, self.tr('Graph name'),
default_graph_name, False, True))
s = Settings.get_selected_graph_version()
default_graph_version = ''
if isinstance(s, str):
default_graph_version = s
self.addParameter(QgsProcessingParameterString(self.GRAPH_VERSION, self.tr('Graph version'),
default_graph_version, False, True))
# We add a vector layer as output
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Routing output'),
QgsProcessing.TypeVectorLine))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT_PATH, self.tr('Routing path output'),
QgsProcessing.TypeVector))
def processAlgorithm(self, parameters, context, model_feedback):
# source: 'export as python script' in processing modeler
feedback = QgsProcessingMultiStepFeedback(4, model_feedback)
start_coordinate = self.parameterAsPoint(parameters, self.START_COORDINATE, context,
QgsCoordinateReferenceSystem(4326))
end_coordinate = self.parameterAsPoint(parameters, self.END_COORDINATE, context,
QgsCoordinateReferenceSystem(4326))
routing_mode = self.routing_mode_options[self.parameterAsInt(parameters, self.ROUTING_MODE, context)]
routing_criteria = self.routing_criteria_options[self.parameterAsInt(parameters, self.ROUTING_CRITERIA,
context)]
# cut_segments = self.parameterAsBool(parameters, self.CUT_SEGMENTS, context)
server_name = self.server_name_options[self.parameterAsInt(parameters, self.SERVER_NAME, context)]
graph_name = self.parameterAsString(parameters, self.GRAPH_NAME, context)
graph_version = self.parameterAsString(parameters, self.GRAPH_VERSION, context)
# Connect to Graphium
feedback.setCurrentStep(2)
feedback.pushInfo("Connect to Graphium server '" + server_name + "' ...")
graphium = GraphiumUtilitiesApi(feedback)
selected_connection = self.connection_manager.select_graphium_server(server_name)
if selected_connection is None:
feedback.reportError('Cannot select connection to Graphium', True)
return {self.OUTPUT: None, self.OUTPUT_PATH: None}
if graphium.connect(selected_connection) is False:
feedback.reportError('Cannot connect to Graphium', True)
return {self.OUTPUT: None, self.OUTPUT_PATH: None}
feedback.pushInfo("Start Routing task on Graphium server '" + server_name + "' ...")
response = graphium.do_routing(graph_name, graph_version, start_coordinate.x(), start_coordinate.y(),
end_coordinate.x(), end_coordinate.y(), datetime.today(), None,
routing_mode, routing_criteria)
# Process routing result
if 'route' in response:
if response['route']['length'] == 0:
feedback.reportError('No route found', False)
return {self.OUTPUT: None, self.OUTPUT_PATH: None}
elif 'error' in response:
if 'msg' in response['error']:
if response['error']['msg'] == 'ContentNotFoundError':
feedback.reportError('Graphium server "' + server_name + '" does not support routing',
True)
else:
feedback.reportError(response['error']['msg'], True)
return {self.OUTPUT: None, self.OUTPUT_PATH: None}
else:
feedback.reportError('Unknown routing error', True)
feedback.reportError(str(response), True)
return {self.OUTPUT: None, self.OUTPUT_PATH: None}
# create feature output
feedback.setCurrentStep(3)
vector_layer = self.prepare_vector_layer('route')
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context, vector_layer.fields(),
QgsWkbTypes.LineString, vector_layer.sourceCrs())
if response['route']['geometry'] is not None:
feature = QgsFeature()
feature.setGeometry(QgsGeometry.fromWkt(response['route']['geometry']))
feature.setFields(vector_layer.fields(), True)
for attribute_key in response['route']:
if feedback.isCanceled():
break
try:
feature.setAttribute(attribute_key, response['route'][attribute_key])
except KeyError:
pass
sink.addFeature(feature, QgsFeatureSink.FastInsert)
# create path output
feedback.setCurrentStep(4)
path_layer = self.prepare_path_layer('route_path')
(sink_path, dest_id_path) = self.parameterAsSink(parameters, self.OUTPUT_PATH, context, path_layer.fields(),
QgsWkbTypes.NoGeometry, vector_layer.sourceCrs())
if response['route']['geometry'] is not None:
total = 100.0 / len(response['route']['segments'])
for current, path_segment in enumerate(response['route']['segments']):
if feedback.isCanceled():
break
feature = QgsFeature()
feature.setFields(path_layer.fields(), True)
feature.setAttribute('order', current)
feature.setAttribute('segment_id', path_segment['id'])
feature.setAttribute('linkDirectionForward', path_segment['linkDirectionForward'])
sink_path.addFeature(feature, QgsFeatureSink.FastInsert)
feedback.setProgress(int(current * total))
return {self.OUTPUT: dest_id, self.OUTPUT_PATH: dest_id_path}
@staticmethod
def prepare_vector_layer(layer_name):
layer_definition = 'LineString?crs=epsg:4326'
vector_layer = QgsVectorLayer(layer_definition, layer_name, "memory")
# Enter editing mode
vector_layer.startEditing()
attributes = list()
attributes.append(QgsField('length', QVariant.Double, 'Real'))
attributes.append(QgsField('duration', QVariant.Int, 'Integer'))
attributes.append(QgsField('runtimeInMs', QVariant.Int, 'Integer'))
attributes.append(QgsField('graphName', QVariant.String, 'String'))
attributes.append(QgsField('graphVersion', QVariant.String, 'String'))
vector_layer.dataProvider().addAttributes(attributes)
vector_layer.updateFields()
return vector_layer
@staticmethod
def prepare_path_layer(layer_name):
path_layer = QgsVectorLayer('None', layer_name, "memory")
# Enter editing mode
path_layer.startEditing()
attributes = list()
attributes.append(QgsField('order', QVariant.Int, 'Integer'))
attributes.append(QgsField('segment_id', QVariant.Int, 'Integer'))
attributes.append(QgsField('linkDirectionForward', QVariant.String, 'String'))
path_layer.dataProvider().addAttributes(attributes)
path_layer.updateFields()
return path_layer
| StarcoderdataPython |
6422155 | <filename>tests/test_contacts/test_delete_contact.py
from model.contact import Contact
import random
def test_delete_some_contact(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="Dara", middlename="Test", lastname="Bozhenko", username="dbozhenko",
title="TestContact", company="Company", address="Address,105/10",
homephone="+38048578993", mobilephone="+38902849903", workphone="+98248585985",
fax="+89240984749", email="<EMAIL>", email2="<EMAIL>",
email3="<EMAIL>", homepage="test.db.com", birthday="4",
birthmonth="July", birthyear="1989", aday="26", amonth="November", ayear="2000",
address2="Second Adress, 35/9", phone2="+924050485498", notes="Testing notest"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contact.delete_contact_by_id(contact.id)
new_contacts = db.get_contact_list()
assert len(old_contacts) - 1 == len(new_contacts)
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
def clean(contact):
return Contact(id=contact.id, firstname=contact.firstname.strip(), lastname=contact.lastname.strip())
db_list = map(clean, db.get_contact_list())
assert sorted(app.contact.get_contact_list(), key=Contact.id_or_max) == sorted(db_list, key=Contact.id_or_max)
def test_delete_contact_from_modify_page(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="Dara", middlename="Test", lastname="Bozhenko", username="dbozhenko",
title="TestContact", company="Company", address="Address,105/10",
homephone="+38048578993", mobilephone="+38902849903", workphone="+98248585985",
fax="+89240984749", email="<EMAIL>", email2="<EMAIL>",
email3="<EMAIL>", homepage="test.db.com", birthday="4",
birthmonth="July", birthyear="1989", aday="26", amonth="November", ayear="2000",
address2="Second Adress, 35/9", phone2="+924050485498", notes="Testing notest"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contact.delete_contact_from_modify_page_by_id(contact.id)
new_contacts = db.get_contact_list()
assert len(old_contacts) - 1 == len(new_contacts)
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
def clean(contact):
return Contact(id=contact.id, firstname=contact.firstname.strip(), lastname=contact.lastname.strip())
db_list = map(clean, db.get_contact_list())
assert sorted(app.contact.get_contact_list(), key=Contact.id_or_max) == sorted(db_list, key=Contact.id_or_max)
| StarcoderdataPython |
1648823 | # slope
"""
x_0:
initial position
dt:
dt float
iteration:
integer
a:
accerelation
0 < a < 1
t:
dt * iteration
"""
# class slope_field(object):
# """docstring for slope_field"""
# def __init__(self, arg):
# super(slope_field, self).__init__()
# self.arg = arg
def slope(x_0, dt, iteration, a):
x = np.array([x_0])
t = np.array([0])
for i in range(iteration):
x_i = x[i]
t_i = t[i]
x_i = x_i + x_dot(x_i) * a
t_i = t_i + dt
x = np.append(x, np.array([x_i]))
t = np.append(t, np.array([t_i]))
return x, t | StarcoderdataPython |
219079 | # Eingabe ist eine Liste mit Zahlen, es soll augegebenwerden, wie viele Zahlen
# größer als ihre Vorgänger sind
incr = 0
letzte = int(input())
while True:
eingabe = input()
if eingabe != '' and eingabe.isdigit():
zahl = int(eingabe)
if zahl > letzte:
incr += 1
letzte = zahl
else:
break
print(incr) | StarcoderdataPython |
5012530 | # Generated by Django 2.0.3 on 2018-05-05 14:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hosts', '0002_auto_20180424_1114'),
]
operations = [
migrations.CreateModel(
name='Config',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('interface', models.CharField(max_length=200, unique=True)),
('proxy_host', models.CharField(blank=True, max_length=200)),
],
),
]
| StarcoderdataPython |
11225049 | """
Setup module for the utilbox package.
"""
import setuptools
from utilbox import __conf__
def read_file(file_path):
with open(file_path, "r") as target_file:
return target_file.read()
# retrieve information from package files
package_version = __conf__.config_map["version"]
package_requirements = read_file("requirements.txt").splitlines()
package_long_description = read_file("README.md")
package_list = setuptools.find_packages(exclude=["tests"])
config = {
"name": "utilbox",
"description": "Collection of utility packages for Python.",
"long_description": package_long_description,
"author": "<NAME>",
"author_email": "<EMAIL>",
"license": "MIT",
"platforms": ["Any"],
"url": "https://github.com/jensonjose/utilbox",
"version": package_version,
"install_requires": package_requirements,
"packages": package_list,
"classifiers": ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities"]
}
setuptools.setup(**config)
| StarcoderdataPython |
4990989 | <reponame>marionumza/vocal_v12
# -*- coding: utf-8 -*-
{
"name": "Amazon Odoo Affiliate Integration",
'summary': """Amazon Odoo Affiliate Integration""",
"category": "Website",
"version": "1.0.0",
"author": "",
"license": "Other proprietary",
"maintainer": "",
"website": "",
"description": """Odoo Affiliate Extension for Amazon Affiliate Management""",
"live_test_url": "",
"depends": [
'website_sale'
],
"data": [
'security/ir.model.access.csv',
'views/assets.xml',
'views/amazon_affiliate_template.xml',
'views/amazon_product_view.xml'
],
# "demo": ['data/demo_data_view.xml'],
# "images": ['static/description/Banner.png'],
"application": True,
"installable": True,
'sequence': 1
}
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.