id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
11382027 | <reponame>sweettuse/lifxlan3<filename>lifxlan3/routines/tile/tile_utils.py
from collections import defaultdict, Counter
from contextlib import suppress
from functools import lru_cache
from io import BytesIO
from itertools import islice, cycle, groupby, product
from types import SimpleNamespace
from typing import List, NamedTuple, Tuple, Dict, Optional, Callable, Iterable, Set, Union, Any
from PIL import Image
from lifxlan3 import RGBk, Color, Colors, init_log, timer
from lifxlan3.routines import colors_to_theme, ColorTheme
__author__ = 'acushner'
log = init_log(__name__)
default_shape = 8, 8
default_color = Colors.OFF
Shape = Tuple[int, int]
class RC(NamedTuple):
"""represent row/column coords"""
r: int
c: int
def to(self, other, row_inc=1, col_inc=1):
"""range from self to other"""
yield from (RC(r, c)
for r in range(self.r, other.r, row_inc)
for c in range(self.c, other.c, col_inc))
def in_bounds(self, rc_ul, rc_lr) -> bool:
"""return True if self inside the bounds of [upper_left, lower_right)"""
return not (self[0] < rc_ul[0] or self[1] < rc_ul[1]
or self[0] >= rc_lr[0] or self[1] >= rc_lr[1])
@property
def area(self):
return self.r * self.c
def __add__(self, other) -> 'RC':
return RC(self[0] + other[0], self[1] + other[1])
def __sub__(self, other) -> 'RC':
return RC(self[0] - other[0], self[1] - other[1])
def __floordiv__(self, other) -> 'RC':
return RC(self[0] // other[0], self[1] // other[1])
def __mod__(self, other) -> 'RC':
return RC(self[0] % other[0], self[1] % other[1])
def __lt__(self, other):
return self[0] < other[0] and self[1] < other[1]
def __eq__(self, other):
return self[0] == other[0] and self[1] == other[1]
def __rmod__(self, other) -> 'RC':
return self % other
def __divmod__(self, other) -> Tuple['RC', 'RC']:
return self // other, self % other
def __neg__(self):
return RC(-self[0], -self[1])
class TileInfo(NamedTuple):
idx: int
origin: RC
tile_map: Dict[RC, TileInfo] = {RC(1, 1): TileInfo(2, RC(0, 0)),
RC(1, 0): TileInfo(1, RC(0, 0)),
RC(0, 1): TileInfo(3, RC(0, 0)),
RC(2, -1): TileInfo(0, RC(1, 1)),
RC(0, 0): TileInfo(4, RC(1, 0))}
class DupesValids(NamedTuple):
"""
used by ColorMatrix to help find bounding boxes for things
"""
d: frozenset
v: frozenset
@property
def first_valid(self):
"""return first valid value"""
return min(self.v)
@property
def last_valid(self):
"""return last valid value"""
return max(self.v)
@property
@lru_cache()
def by_group(self):
"""return tuples of (start, end) for valid regions"""
t = sorted(self.v)
gb = groupby(zip(t, t[1:]), key=lambda p: (p[1] - p[0]) == 1)
valids = (list(v) for k, v in gb if k)
return [(v[0][0], v[-1][1]) for v in valids]
_sentinel = object()
class ColorMatrix(List[List[Color]]):
"""represent Colors in a 2d-array form that allows for easy setting of TileChain lights"""
def __init__(self, lst: Iterable = _sentinel, *, wrap=False):
if lst is _sentinel:
super().__init__()
else:
super().__init__(lst)
self.wrap = wrap
def __getitem__(self, item):
if self.wrap and isinstance(item, RC):
item %= self.shape
if isinstance(item, tuple):
r, c = item
return self[r][c]
return super().__getitem__(item)
def __setitem__(self, item, val):
if isinstance(item, tuple):
r, c = item
self[r][c] = val
return val
return super().__setitem__(item, val)
@classmethod
def from_filename(cls, fn) -> 'ColorMatrix':
"""read a png in using pillow and convert to ColorMatrix"""
return cls.from_image(Image.open(fn))
@classmethod
def from_bytes(cls, b: Union[bytes, BytesIO]):
if isinstance(b, bytes):
b = BytesIO(b)
return cls.from_image(Image.open(b))
@classmethod
def from_image(cls, im: Image):
px = im.convert('RGB').load()
return ColorMatrix([RGBk(*px[c, r]).color
for c in range(im.width)]
for r in range(im.height))
@classmethod
def from_shape(cls, shape: Shape = default_shape, default: Color = default_color) -> 'ColorMatrix':
"""create a ColorMatrix with shape `shape` and colors set to `default`"""
num_rows, num_cols = shape
return cls([default] * num_cols for _ in range(num_rows))
@classmethod
def from_colors(cls, colors: List[Color], shape: Shape = (8, 8)):
"""convert a list of colors into a ColorMatrix of shape `shape`"""
num_rows, num_cols = shape
if len(colors) != num_rows * num_cols:
raise ValueError('incompatible shape!')
cm = cls.from_shape(shape)
for r in range(num_rows):
for c in range(num_cols):
cm[r, c] = colors[r * num_rows + c]
return cm
@property
def flattened(self) -> List[Color]:
"""flatten ColorMatrix to 1d-array (opposite of `from_colors`)"""
return [c for row in self for c in row]
@property
def shape(self) -> Shape:
"""(num_rows, num_cols)"""
return len(self), len(self[0])
@property
def height(self) -> int:
return self.shape[0]
@property
def width(self) -> int:
return self.shape[1]
@property
def by_coords(self) -> Tuple[RC, Color]:
"""yield coordinates and their colors"""
yield from ((RC(r, c), color)
for r, row in enumerate(self)
for c, color in enumerate(row))
def copy(self) -> 'ColorMatrix':
return ColorMatrix([c for c in row] for row in self)
def set_max_brightness_pct(self, brightness_pct):
"""set brightness in all colors to at most `brightness_pct` pct"""
brightness = 65535 * min(100.0, max(0.0, brightness_pct)) // 100
for rc, c in self.by_coords:
self[rc] = c._replace(brightness=min(c.brightness, brightness))
def strip(self, strip_color: Optional[Color] = None) -> 'ColorMatrix':
"""strip out empty rows/cols from sides of image"""
row_info = self.duplicates(strip_color)
col_info = self.T.duplicates(strip_color)
res = ([color for c, color in enumerate(row)
if col_info.first_valid <= c <= col_info.last_valid]
for r, row in enumerate(self)
if row_info.first_valid <= r <= row_info.last_valid)
return ColorMatrix(res)
def duplicates(self, sentinel_color: Optional[Color] = None) -> DupesValids:
"""
return rows where all colors are either `sentinel_color` or dupes
to get columns, simply call with `self.T.duplicates()`
"""
dupes, valids = set(), set()
for r, row in enumerate(self):
it = iter(row)
cur = sentinel_color or next(it)
if any(color != cur for color in it):
valids.add(r)
continue
dupes.add(r)
return DupesValids(frozenset(dupes), frozenset(valids))
def split(self, split_color: Optional[Color] = None) -> List['ColorMatrix']:
"""
split image into boxes based on rows/columns of empty colors
_________________
| a | b | cccccc |
|-----------------|
| d | eeeee | fff |
|-----------------|
| g | h | iii | j |
|___|___|_____|___|
would end up with 10 images: a, b, cccccc, d, eeeee, fff, g, h, iii, and j
"""
row_info = self.duplicates(split_color)
row_wise = (self.get_range(RC(r_start, 0), RC(r_end + 1, self.width + 1), default_color)
for r_start, r_end in row_info.by_group)
return [r.get_range(RC(0, c_start), RC(r.height + 1, c_end + 1), default_color)
for r in row_wise
for c_start, c_end in r.T.duplicates(split_color).by_group]
def get_range(self, rc0, rc1, default: Color = default_color) -> 'ColorMatrix':
"""create new ColorMatrix from existing existing CM from the box bounded by rc0, rc1"""
shape = rc1 - rc0
cm = ColorMatrix.from_shape(shape)
for rc in rc0.to(rc1):
c = default
with suppress(IndexError):
c = self[rc]
cm[rc - rc0] = c
return cm
def replace(self, color_map: Dict[Color, ColorTheme]):
"""
modifies self
replace colors from keys of color_map with colors from values in ColorMatrix
"""
s = slice(0, 3)
color_map = {k[s]: cycle(colors_to_theme(v)) for k, v in color_map.items()}
for rc, c in self.by_coords:
if c[s] in color_map:
self[rc] = next(color_map[c[s]])
def find_all(self, color: Union[Color, Set[Color]]):
s = slice(0, 3)
if isinstance(color, Color):
color = {color}
color = {c[s] for c in color}
return [rc for rc, c in self.by_coords if c[s] in color]
def to_tiles(self, shape=default_shape, offset: RC = RC(0, 0), bg: Color = Color(0, 0, 0)) \
-> Dict[RC, 'ColorMatrix']:
"""
return dict of RC -> ColorMatrix, where this RC represents
the tile's coordinates vis-a-vis the rest of the group
_______________
| | |
|(0, 0) | (0, 1)|
|_______|_______|
| | |
|(1, 0) | (1, 1)|
|_______|_______|
"""
res = defaultdict(lambda: ColorMatrix.from_shape(shape, default=bg))
for rc, color in self.by_coords:
rc += offset
tile, new_rc = divmod(rc, shape)
res[tile][new_rc] = color
return {tile_idx: cm.rotate_from_origin(tile_map.get(tile_idx, SimpleNamespace(origin=RC(0, 0))).origin)
for tile_idx, cm in res.items()}
def rotate_from_origin(self, origin: RC) -> 'ColorMatrix':
n_r = {RC(0, 0): 0,
RC(0, 1): 3,
RC(1, 1): 2,
RC(1, 0): 1}
return self.rotate_clockwise(n_r[origin])
def rotate_clockwise(self, n=1) -> 'ColorMatrix':
m = self.copy()
for _ in range(n):
m.reverse()
m = list(zip(*m))
return self._from_zip(m)
@property
def T(self):
"""transpose"""
return self._from_zip(zip(*self))
@classmethod
def _from_zip(cls, zipped_vals) -> 'ColorMatrix':
"""convert zipped transpositions back to list of lists and ultimately a ColorMatrix"""
return cls([list(r) for r in zipped_vals])
@property
def color_str(self):
res = [80 * '=', f'ColorMatrix: Shape{self.shape}']
# encode groups with (color, num_repeats) tuples for less overhead
groups = (((c, sum(1 for _ in v)) for c, v in groupby(row)) for row in self)
res.extend(''.join(c.color_str(' ' * total, set_bg=True) for c, total in row) for row in groups)
res.append(80 * '=')
res.append('')
return '\n'.join(res)
@property
def describe(self) -> str:
"""
return a histogram string of sorts showing colors and a visual representation
of how much of that color is present in the image
"""
d = sorted(Counter(self.flattened).items(), key=lambda kv: -kv[1])
return '\n'.join(f'{str(c):>68}: {c.color_str(" " * count, set_bg=True)}' for c, count in d)
def cast(self, converter: Callable) -> 'ColorMatrix':
"""
cast individual colors using the converter callable
"""
return ColorMatrix([converter(c) for c in row] for row in self)
def resize(self, shape: Shape = (8, 8)) -> 'ColorMatrix':
"""resize image using pillow and return a new ColorMatrix"""
if self.shape == shape:
return self.copy()
im = Image.new('RGB', self.shape, 'black')
pixels = im.load()
for c, r in product(range(im.width), range(im.height)):
with suppress(IndexError):
pixels[c, r] = self[r][c].rgb[:3]
y, x = shape
im = im.resize((x, y), Image.ANTIALIAS)
pixels = im.load()
res = ColorMatrix.from_shape(shape)
for c, r in product(range(im.width), range(im.height)):
with suppress(IndexError):
res[r][c] = pixels[c, r]
return res.cast(lambda rgb: Color.from_rgb(RGBk(*rgb)))
# utils
def to_n_colors(*colors, n=64):
return list(islice(cycle(colors), n))
class ANode(NamedTuple):
"""represent a node in A* algo"""
parent: Optional['ANode']
pos: RC
g: int = 0 # distance to start node
h: int = 0 # heuristic for distance to goal
@property
def f(self) -> float:
return self.g + self.h
@classmethod
def create(cls, parent: 'ANode', pos, *goals: RC):
g = cls.calc_g(parent)
h = cls.calc_h(pos, *goals)
return cls(parent, pos, g, h)
@staticmethod
def calc_g(parent: Optional['ANode']):
return (parent.g if parent else 0) + 1
@staticmethod
def calc_h(pos: RC, *goals: RC):
diffs = (g - pos for g in goals)
return min(rc.r ** 2 + rc.c ** 2 for rc in diffs)
def __eq__(self, other):
return self.pos == other.pos
def __hash__(self):
return hash(self.pos)
def _get_path(node: ANode) -> List[RC]:
res = [node.pos]
p = node.parent
while p:
res.append(p.pos)
p = p.parent
res.reverse()
return res
def _create_children(shape_ul: RC, shape_lr: RC, impassable: Set[RC], n: ANode, goals: Iterable[RC]) -> List[ANode]:
offsets = RC(1, 0), RC(0, 1), RC(-1, 0), RC(0, -1)
res = []
for o in offsets:
rc = n.pos + o
if rc in impassable:
continue
if not rc.in_bounds(shape_ul, shape_lr):
continue
res.append(ANode.create(n, rc, *goals))
return res
class WrappedGoals(NamedTuple):
bounds: Tuple[RC, RC]
goals: Set[RC]
impassable: Set[RC]
def _create_wrapped_goals(shape: RC, goal: RC, impassable: Set[RC]) -> WrappedGoals:
"""set up maze for when an object is allowed to wrap around the screen"""
s = RC(*shape)
r_offsets = -s.r, 0, s.r
c_offsets = -s.c, 0, s.c
offsets = [RC(*rc) for rc in product(r_offsets, c_offsets)]
goals = {goal - rc for rc in offsets}
impassables = {impass - rc for impass in impassable for rc in offsets}
bounds = -shape, shape + shape
return WrappedGoals(bounds, goals, impassables)
# TODO: write custom a_star that deals with moving snek
@timer
def a_star(maze: List[List[Any]], start: RC, end: RC, impassable: Set[RC] = frozenset(), allow_wrap=False):
"""return a* path for maze"""
start_n = ANode(None, start)
shape_ul, shape_lr = RC(0, 0), RC(len(maze), len(maze[0]))
if allow_wrap:
(shape_ul, shape_lr), goals, impassable = _create_wrapped_goals(shape_lr, end, impassable)
else:
goals = {end, }
opened, closed = [start_n], set()
while opened:
cur_n, cur_i = opened[0], 0
for i, n in enumerate(opened):
if n.f < cur_n.f:
cur_n, cur_i = n, i
closed.add(opened.pop(cur_i))
# found the end
if cur_n.pos in goals:
return _get_path(cur_n)
children = _create_children(shape_ul, shape_lr, impassable, cur_n, goals)
for c in children:
if c in closed:
continue
with suppress(ValueError):
opened_idx = opened.index(c)
opened_c = opened[opened_idx]
if c.g > opened_c.g:
continue
opened.pop(opened_idx)
opened.append(c)
def play():
cm = ColorMatrix.from_shape((16, 16))
start = RC(1, 1)
end = RC(15, 15)
impassable = {RC(r, c)
for r in range(2, 15)
for c in range(14)}
print(a_star(cm, start, end, impassable, allow_wrap=True))
if __name__ == '__main__':
play()
| StarcoderdataPython |
6642906 | '''Faça um programa que peça 10 números para o usuário e guarde os números em uma lista.
Imprima os itens da lista preenchida de trás para frente.'''
cont = 0
lista_Numeros = []
qtde_Numeros = 10
numeros = float(input(f"Digite {qtde_Numeros} números: "))
while cont < qtde_Numeros:
lista_Numeros.append(numeros)
cont += 1
numeros = float(input(f"Digite {qtde_Numeros} números: "))
print(f"\nNúmeros armazenados na lista em ordem de inclusão:{lista_Numeros}")
print(f"\nNúmeros armazenados na lista em ordem decrescente: {sorted(lista_Numeros, reverse = True)}")
| StarcoderdataPython |
371666 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 17:49:42 2020
@author: sixingliu, yaruchen
"""
from sensorpowa.core.frame import SensorFrame
from sensorpowa.core.series import SensorSeries
import numpy as np
class _SensorACM(SensorFrame):
"""
The biosensor module also contains a three dimensional accelerometer
for measurements of physical activity. A microcontroller digitizes the
analog signals via a 12-bit A-D and the data is written to an onboard
microSD card.
# TODO: move to clinlic module
- Seizure:
Generalized tonic-clonic (GTC) seizures are composed of two primary phases
-- the tonic phase and the clonic phase.
The tonic phase involves stiffening of the limbs and flexion or extension of
the neck, back and extremities. During the clonic phase, muscles of the entire
body start to contract and relax rapidly. These convulsions are manifest in the
ACM signal as rhythmic activity typically above 2 Hz.
Thus, each epoch was evaluated for important periods using an algorithm by
Vlachos and colleagues (Vlachos et al., 2004). The underlying assumption is
that the magnitudes of the coefficients of the DFT of a non-periodic time
series are distributed according to an exponential distribution.
"""
def to_magnitude(self):
"""
Combined information from all three axes of the accelerometer to
calculate the magnitude of the net acceleration
Returns
-------
TYPE
DESCRIPTION.
"""
data = np.mat(self.vals)
magnitude = np.linalg.norm(data, ord=2, axis=1, keepdims=True)
return SensorSeries(magnitude)
def pipeline(self, frequency, *pipe):
"""
Parameters
----------
frequency : TYPE
DESCRIPTION.
Returns
-------
None.
"""
for filter_pipe in pipe:
self = filter_pipe.transform(self)
return self
def indicator_activate(self, window=None):
"""
Returns
-------
bool
DESCRIPTION.
"""
indicator = np.std
if indicator > 0.1:
return True
else:
return False
def feature_timedomain(self, window=None):
pass
def feature_freqdomain(self, window=None):
pass
def feature_nonlinear(self, window=None):
pass | StarcoderdataPython |
3227001 | # plot example sample to check
import pandas as pd
import matplotlib.pyplot as plt
# parameters
# ---
# which subset
subset = 'survey_only'
rho = 1700
# where are the samples and where to put results
dir_results = '../../results/neutral_data_fitm/'
# where to find the island area and richness data
fname_area = '../../data/processed/island_area.csv' # island_name,area_sq_m,area_sq_km
fname_rich = '../../data/processed/island_richness.csv' # island_name,richness
# which island subsets info kept
dirname_subsets = '../../data/processed/island_subsets/'
# which islands are we doing
# ---
islands = list( pd.read_csv( dirname_subsets + subset + '.csv', header=0 )['island_name'] )
# get real data's area vs richness
# ---
# create a dataframe: island_name, area, richness
df_area = pd.read_csv(fname_area)
df_rich = pd.read_csv(fname_rich)
assert len(df_area) == len(df_rich), f'Number of islands in {fname_area} =/= {fname_rich}'
df_data = pd.merge(df_area, df_rich, on="island_name")
# subset to islands of interest
df_data_sub = df_data[df_data['island_name'].isin(islands)]
A_tru = df_data_sub['area_sq_km'].values
S_tru = df_data_sub['richness'].values
# get the first sample's area vs richness
# ---
fname = dir_results + 'samples_' + subset + '_rho_' + str(rho) + '.csv'
df = pd.read_csv(fname)
df_1 = df.iloc[0]
# get order of island names and areas
islands = [ s[2:] for s in df.columns if s[0:2] == 'J_' ]
isle2area = dict(zip(df_data_sub['island_name'], df_data_sub['area_sq_km'])) # dictionary to turn island names into island areas
A_sam = [ isle2area[island] for island in islands ]
# get the richness of each island
data_row_as_str = df_1['presence_absence_matrix_cols_isles_concatenated']
data_row = [ 1 if c == 'p' else 0 for c in data_row_as_str ]
S = df_1['S']
H = df_1['H']
S_sam = [ sum(data_row[i:i+S]) for i, island in zip( range(0, S*H, S), islands ) ]
# plot both for comparison
# ---
plt.scatter(A_tru, S_tru, alpha=0.7, label='data')
plt.scatter(A_sam, S_sam, alpha=0.7, label='sample')
plt.xlabel(r'area (km$^2$)')
plt.ylabel(r'number of species')
plt.xscale('log')
plt.legend(loc='best')
plt.tight_layout()
plt.savefig(dir_results + 'sample_' + subset + '_rho_' + str(rho) + '.pdf')
plt.close()
| StarcoderdataPython |
1704114 | <reponame>marwanelshantaly/EIN-SELD<gh_stars>10-100
from pathlib import Path
import numpy as np
import pandas as pd
import methods.utils.SELD_evaluation_metrics_2019 as SELDMetrics2019
from methods.utils.data_utilities import (load_dcase_format,
to_metrics2020_format)
from methods.utils.SELD_evaluation_metrics_2020 import \
SELDMetrics as SELDMetrics2020
from methods.utils.SELD_evaluation_metrics_2020 import early_stopping_metric
def evaluate(cfg, dataset):
""" Evaluate scores
"""
'''Directories'''
print('Inference ID is {}\n'.format(cfg['inference']['infer_id']))
out_infer_dir = Path(cfg['workspace_dir']).joinpath('out_infer').joinpath(cfg['method']) \
.joinpath(cfg['inference']['infer_id'])
submissions_dir = out_infer_dir.joinpath('submissions')
main_dir = Path(cfg['dataset_dir'])
dev_meta_dir = main_dir.joinpath('metadata_dev')
eval_meta_dir = main_dir.joinpath('metadata_eval')
if cfg['inference']['testset_type'] == 'dev':
meta_dir = dev_meta_dir
elif cfg['inference']['testset_type'] == 'eval':
meta_dir = eval_meta_dir
pred_frame_begin_index = 0
gt_frame_begin_index = 0
frame_length = int(dataset.clip_length / dataset.label_resolution)
pred_output_dict, pred_sed_metrics2019, pred_doa_metrics2019 = {}, [], []
gt_output_dict, gt_sed_metrics2019, gt_doa_metrics2019= {}, [], []
for pred_path in sorted(submissions_dir.glob('*.csv')):
fn = pred_path.name
gt_path = meta_dir.joinpath(fn)
# pred
output_dict, sed_metrics2019, doa_metrics2019 = load_dcase_format(
pred_path, frame_begin_index=pred_frame_begin_index,
frame_length=frame_length, num_classes=len(dataset.label_set), set_type='pred')
pred_output_dict.update(output_dict)
pred_sed_metrics2019.append(sed_metrics2019)
pred_doa_metrics2019.append(doa_metrics2019)
pred_frame_begin_index += frame_length
# gt
output_dict, sed_metrics2019, doa_metrics2019 = load_dcase_format(
gt_path, frame_begin_index=gt_frame_begin_index,
frame_length=frame_length, num_classes=len(dataset.label_set), set_type='gt')
gt_output_dict.update(output_dict)
gt_sed_metrics2019.append(sed_metrics2019)
gt_doa_metrics2019.append(doa_metrics2019)
gt_frame_begin_index += frame_length
pred_sed_metrics2019 = np.concatenate(pred_sed_metrics2019, axis=0)
pred_doa_metrics2019 = np.concatenate(pred_doa_metrics2019, axis=0)
gt_sed_metrics2019 = np.concatenate(gt_sed_metrics2019, axis=0)
gt_doa_metrics2019 = np.concatenate(gt_doa_metrics2019, axis=0)
pred_metrics2020_dict = to_metrics2020_format(pred_output_dict,
pred_sed_metrics2019.shape[0], label_resolution=dataset.label_resolution)
gt_metrics2020_dict = to_metrics2020_format(gt_output_dict,
gt_sed_metrics2019.shape[0], label_resolution=dataset.label_resolution)
# 2019 metrics
num_frames_1s = int(1 / dataset.label_resolution)
ER_19, F_19 = SELDMetrics2019.compute_sed_scores(pred_sed_metrics2019, gt_sed_metrics2019, num_frames_1s)
LE_19, LR_19, _, _, _, _ = SELDMetrics2019.compute_doa_scores_regr(
pred_doa_metrics2019, gt_doa_metrics2019, pred_sed_metrics2019, gt_sed_metrics2019)
seld_score_19 = SELDMetrics2019.early_stopping_metric([ER_19, F_19], [LE_19, LR_19])
# 2020 metrics
dcase2020_metric = SELDMetrics2020(nb_classes=len(dataset.label_set), doa_threshold=20)
dcase2020_metric.update_seld_scores(pred_metrics2020_dict, gt_metrics2020_dict)
ER_20, F_20, LE_20, LR_20 = dcase2020_metric.compute_seld_scores()
seld_score_20 = early_stopping_metric([ER_20, F_20], [LE_20, LR_20])
metrics_scores ={
'ER20': ER_20,
'F20': F_20,
'LE20': LE_20,
'LR20': LR_20,
'seld20': seld_score_20,
'ER19': ER_19,
'F19': F_19,
'LE19': LE_19,
'LR19': LR_19,
'seld19': seld_score_19,
}
out_str = 'test: '
for key, value in metrics_scores.items():
out_str += '{}: {:.3f}, '.format(key, value)
print('---------------------------------------------------------------------------------------------------'
+'-------------------------------------------------')
print(out_str)
print('---------------------------------------------------------------------------------------------------'
+'-------------------------------------------------')
out_eval_dir = Path(cfg['workspace_dir']).joinpath('out_eval').joinpath(cfg['method']) \
.joinpath(cfg['inference']['infer_id'])
out_eval_dir.mkdir(parents=True, exist_ok=True)
result_path = out_eval_dir.joinpath('results.csv')
df = pd.DataFrame(metrics_scores, index=[0])
df.to_csv(result_path, sep=',', mode='a')
| StarcoderdataPython |
6402396 | <reponame>mudbungie/NetExplorer<filename>env/lib/python3.4/site-packages/bulbs/base/client.py
# -*- coding: utf-8 -*-
#
# Copyright 2011 <NAME> (http://jamesthornton.com)
# BSD License (see LICENSE for details)
#
"""
Bulbs supports pluggable backends. These are the abstract base classes that
provides the server-client interface. Implement these to create a new client.
"""
import inspect
from bulbs.config import Config, DEBUG
from bulbs.registry import Registry
from bulbs.utils import get_logger
from .typesystem import TypeSystem
SERVER_URI = "http://localhost"
log = get_logger(__name__)
# TODO: Consider making these real Python Abstract Base Classes (import abc)
class Request(object):
def __init__(self, config, content_type):
"""
Initializes a client object.
:param root_uri: the base URL of Rexster.
"""
self.config = config
self.content_type = content_type
self._initialize()
def _initialize(self):
pass
class Result(object):
"""
Abstract base class for a single result, not a list of results.
:param result: The raw result.
:type result: dict
:param config: The graph Config object.
:type config: Config
:ivar raw: The raw result.
:ivar data: The data in the result.
"""
def __init__(self, result, config):
self.config = config
# The raw result.
self.raw = result
# The data in the result.
self.data = None
def get_id(self):
"""
Returns the element ID.
:rtype: int
"""
raise NotImplementedError
def get_type(self):
"""
Returns the element's base type, either "vertex" or "edge".
:rtype: str
"""
raise NotImplementedError
def get_data(self):
"""
Returns the element's property data.
:rtype: dict
"""
raise NotImplementedError
def get_uri(self):
"""
Returns the element URI.
:rtype: str
"""
raise NotImplementedError
def get_outV(self):
"""
Returns the ID of the edge's outgoing vertex (start node).
:rtype: int
"""
raise NotImplementedError
def get_inV(self):
"""
Returns the ID of the edge's incoming vertex (end node).
:rtype: int
"""
raise NotImplementedError
def get_label(self):
"""
Returns the edge label (relationship type).
:rtype: str
"""
raise NotImplementedError
def get_index_name(self):
"""
Returns the index name.
:rtype: str
"""
raise NotImplementedError
def get_index_class(self):
"""
Returns the index class, either "vertex" or "edge".
:rtype: str
"""
raise NotImplementedError
def get(self, attribute):
"""
Returns the value of a client-specific attribute.
:param attribute: Name of the attribute:
:type attribute: str
:rtype: str
"""
return self.raw[attribute]
class Response(object):
"""
Abstract base class for the response returned by the request.
:param response: The raw response.
:type response: Depends on Client.
:param config: Config object.
:type config: bulbs.config.Config
:ivar config: Config object.
:ivar headers: Response headers.
:ivar content: A dict containing the response content.
:ivar results: A generator of Neo4jResult objects, a single Neo4jResult object,
or None, depending on the number of results returned.
:ivar total_size: The number of results returned.
:ivar raw: Raw HTTP response. Only set when log_level is DEBUG.
"""
result_class = Result
def __init__(self, response, config):
self.config = config
self.handle_response(response)
self.headers = self.get_headers(response)
self.content = self.get_content(response)
self.results, self.total_size = self.get_results()
self.raw = self._maybe_get_raw(response, config)
def _maybe_get_raw(self,response, config):
"""Returns the raw response if in DEBUG mode."""
# don't store raw response in production else you'll bloat the obj
if config.log_level == DEBUG:
return response
def handle_response(self, response):
"""
Check the server response and raise exception if needed.
:param response: Raw server response.
:type response: Depends on Client.
:rtype: None
"""
raise NotImplementedError
def get_headers(self, response):
"""
Returns a dict containing the headers from the response.
:param response: Raw server response.
:type response: tuple
:rtype: httplib2.Response
"""
raise NotImplementedError
def get_content(self, response):
"""
Returns a dict containing the content from the response.
:param response: Raw server response.
:type response: tuple
:rtype: dict or None
"""
raise NotImplementedError
def get_results(self):
"""
Returns the results contained in the response.
:return: A tuple containing two items: 1. Either a generator of Neo4jResult objects,
a single Neo4jResult object, or None, depending on the number of results
returned; 2. An int representing the number results returned.
:rtype: tuple
"""
raise NotImplementedError
def get(self, attribute):
"""Return a client-specific attribute."""
return self.content[attribute]
def one(self):
"""
Returns one result or raises an error if there is more than one result.
:rtype: Result
"""
# If you're using this utility, that means the results attribute in the
# Response object should always contain a single result object,
# not multiple items. But gremlin returns all results as a list
# even if the list contains only one element. And the Response class
# converts all lists to a generator of Result objects. Thus in that case,
# we need to grab the single Result object out of the list/generator.
if self.total_size > 1:
log.error('resp.results contains more than one item.')
raise ValueError
if inspect.isgenerator(self.results):
result = next(self.results)
else:
result = self.results
return result
class Client(object):
"""
Abstract base class for the low-level server client.
:param config: Optional Config object. Defaults to default Config.
:type config: bulbs.config.Config
:cvar default_uri: Default URI for the database.
:cvar request_class: Request class for the Client.
:ivar config: Config object.
:ivar registry: Registry object.
:ivar type_system: TypeSystem object.
:ivar request: Request object.
Example:
>>> from bulbs.neo4jserver import Neo4jClient
>>> client = Neo4jClient()
>>> script = client.scripts.get("get_vertices")
>>> response = client.gremlin(script, params=None)
>>> result = response.results.next()
"""
default_uri = SERVER_URI
request_class = Request
def __init__(self, config=None):
self.config = config or Config(self.default_uri)
self.registry = Registry(self.config)
self.type_system = TypeSystem()
self.request = self.request_class(self.config, self.type_system.content_type)
# Vertex Proxy
def create_vertex(self, data):
"""
Creates a vertex and returns the Response.
:param data: Property data.
:type data: dict
:rtype: Response
"""
raise NotImplementedError
def get_vertex(self, _id):
"""
Gets the vertex with the _id and returns the Response.
:param data: Vertex ID.
:type data: int
:rtype: Response
"""
raise NotImplementedError
def get_all_vertices(self):
"""
Returns a Response containing all the vertices in the Graph.
:rtype: Response
"""
raise NotImplementedError
def update_vertex(self, _id, data):
"""
Updates the vertex with the _id and returns the Response.
:param _id: Vertex ID.
:type _id: dict
:param data: Property data.
:type data: dict
:rtype: Response
"""
raise NotImplementedError
def delete_vertex(self, _id):
"""
Deletes a vertex with the _id and returns the Response.
:param _id: Vertex ID.
:type _id: dict
:rtype: Response
"""
raise NotImplementedError
# Edge Proxy
def create_edge(self, outV, label, inV, data=None):
"""
Creates a edge and returns the Response.
:param outV: Outgoing vertex ID.
:type outV: int
:param label: Edge label.
:type label: str
:param inV: Incoming vertex ID.
:type inV: int
:param data: Property data.
:type data: dict or None
:rtype: Response
"""
raise NotImplementedError
def get_edge(self, _id):
"""
Gets the edge with the _id and returns the Response.
:param data: Edge ID.
:type data: int
:rtype: Response
"""
raise NotImplementedError
def get_all_edges(self):
"""
Returns a Response containing all the edges in the Graph.
:rtype: Response
"""
raise NotImplementedError
def update_edge(self, _id, data):
"""
Updates the edge with the _id and returns the Response.
:param _id: Edge ID.
:type _id: dict
:param data: Property data.
:type data: dict
:rtype: Response
"""
raise NotImplementedError
def delete_edge(self, _id):
"""
Deletes a edge with the _id and returns the Response.
:param _id: Edge ID.
:type _id: dict
:rtype: Response
"""
raise NotImplementedError
# Vertex Container
def outE(self, _id, label=None):
"""
Returns the outgoing edges of the vertex.
:param _id: Vertex ID.
:type _id: dict
:param label: Optional edge label. Defaults to None.
:type label: str
:rtype: Response
"""
raise NotImplementedError
def inE(self, _id, label=None):
"""
Returns the incoming edges of the vertex.
:param _id: Vertex ID.
:type _id: dict
:param label: Optional edge label. Defaults to None.
:type label: str
:rtype: Response
"""
raise NotImplementedError
def bothE(self, _id, label=None):
"""
Returns the incoming and outgoing edges of the vertex.
:param _id: Vertex ID.
:type _id: dict
:param label: Optional edge label. Defaults to None.
:type label: str
:rtype: Response
"""
raise NotImplementedError
def outV(self, _id, label=None):
"""
Returns the out-adjacent vertices of the vertex.
:param _id: Vertex ID.
:type _id: dict
:param label: Optional edge label. Defaults to None.
:type label: str
:rtype: Response
"""
raise NotImplementedError
def inV(self, _id, label=None):
"""
Returns the in-adjacent vertices of the vertex.
:param _id: Vertex ID.
:type _id: dict
:param label: Optional edge label. Defaults to None.
:type label: str
:rtype: Response
"""
raise NotImplementedError
def bothV(self, _id, label=None):
"""
Returns the incoming- and outgoing-adjacent vertices of the vertex.
:param _id: Vertex ID.
:type _id: dict
:param label: Optional edge label. Defaults to None.
:type label: str
:rtype: Response
"""
raise NotImplementedError
# Index Proxy - Vertex
def create_vertex_index(self, params):
"""
Creates a vertex index with the specified params.
:param index_name: Name of the index to create.
:type index_name: str
:rtype: Response
"""
raise NotImplementedError
def get_vertex_index(self, index_name):
"""
Returns the vertex index with the index_name.
:param index_name: Name of the index.
:type index_name: str
:rtype: Response
"""
raise NotImplementedError
def delete_vertex_index(self, index_name):
"""
Deletes the vertex index with the index_name.
:param index_name: Name of the index.
:type index_name: str
:rtype: Response
"""
raise NotImplementedError
# Index Proxy - Edge
def create_edge_index(self, index_name):
"""
Creates a edge index with the specified params.
:param index_name: Name of the index.
:type index_name: str
:rtype: Response
"""
raise NotImplementedError
def get_edge_index(self, index_name):
"""
Returns the edge index with the index_name.
:param index_name: Name of the index.
:type index_name: str
:rtype: Response
"""
raise NotImplementedError
def delete_edge_index(self, index_name):
"""
Deletes the edge index with the index_name.
:param index_name: Name of the index.
:type index_name: str
:rtype: Response
"""
raise NotImplementedError
# Index Container - Vertex
def put_vertex(self, index_name, key, value, _id):
"""
Adds a vertex to the index with the index_name.
:param index_name: Name of the index.
:type index_name: str
:param key: Name of the key.
:type key: str
:param value: Value of the key.
:type value: str
:param _id: Vertex ID
:type _id: int
:rtype: Response
"""
raise NotImplementedError
def lookup_vertex(self, index_name, key, value):
"""
Returns the vertices indexed with the key and value.
:param index_name: Name of the index.
:type index_name: str
:param key: Name of the key.
:type key: str
:param value: Value of the key.
:type value: str
:rtype: Response
"""
raise NotImplementedError
def remove_vertex(self, index_name, _id, key=None, value=None):
"""
Removes a vertex from the index and returns the Response.
:param index_name: Name of the index.
:type index_name: str
:param key: Optional. Name of the key.
:type key: str
:param value: Optional. Value of the key.
:type value: str
:rtype: Response
"""
raise NotImplementedError
# Index Container - Edge
def put_edge(self, index_name, key, value, _id):
"""
Adds an edge to the index and returns the Response.
:param index_name: Name of the index.
:type index_name: str
:param key: Name of the key.
:type key: str
:param value: Value of the key.
:type value: str
:param _id: Edge ID
:type _id: int
:rtype: Response
"""
raise NotImplementedError
def lookup_edge(self, index_name, key, value):
"""
Looks up an edge in the index and returns the Response.
:param index_name: Name of the index.
:type index_name: str
:param key: Name of the key.
:type key: str
:param value: Value of the key.
:type value: str
:rtype: Response
"""
raise NotImplementedError
def remove_edge(self, index_name, _id, key=None, value=None):
"""
Removes an edge from the index and returns the Response.
:param index_name: Name of the index.
:type index_name: str
:param _id: Edge ID
:type _id: int
:param key: Optional. Name of the key.
:type key: str
:param value: Optional. Value of the key.
:type value: str
:rtype: Response
"""
raise NotImplementedError
# Model Proxy - Vertex
def create_indexed_vertex(self, data, index_name, keys=None):
"""
Creates a vertex, indexes it, and returns the Response.
:param data: Property data.
:type data: dict
:param index_name: Name of the index.
:type index_name: str
:param keys: Property keys to index.
:type keys: list
:rtype: Response
"""
raise NotImplementedError
def update_indexed_vertex(self, _id, data, index_name, keys=None):
"""
Updates an indexed vertex and returns the Response.
:param index_name: Name of the index.
:type index_name: str
:param data: Property data.
:type data: dict
:param index_name: Name of the index.
:type index_name: str
:param keys: Property keys to index.
:type keys: list
:rtype: Response
"""
raise NotImplementedError
# Model Proxy - Edge
def create_indexed_edge(self, data, index_name, keys=None):
"""
Creates a edge, indexes it, and returns the Response.
:param outV: Outgoing vertex ID.
:type outV: int
:param label: Edge label.
:type label: str
:param inV: Incoming vertex ID.
:type inV: int
:param data: Property data.
:type data: dict
:param index_name: Name of the index.
:type index_name: str
:param keys: Property keys to index. Defaults to None (indexes all properties).
:type keys: list
:rtype: Response
"""
raise NotImplementedError
def update_indexed_edge(self, _id, data, index_name, keys=None):
"""
Updates an indexed edge and returns the Response.
:param _id: Edge ID.
:type _id: int
:param data: Property data.
:type data: dict
:param index_name: Name of the index.
:type index_name: str
:param keys: Property keys to index. Defaults to None (indexes all properties).
:type keys: list
:rtype: Response
"""
raise NotImplementedError
| StarcoderdataPython |
3542564 | from distutils.version import StrictVersion
import django
from django.core.validators import RegexValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from cmsplugin_text_ng.compat import AbstractText
from cmsplugin_text_ng.type_registry import register_type, get_type_list
class TextNGTemplateCategory(models.Model):
title = models.CharField(max_length=128)
def __unicode__(self):
return self.title
class Meta:
verbose_name = _('template category')
verbose_name_plural = _('template categories')
ordering = ['title']
class TextNGTemplate(models.Model):
category = models.ForeignKey(TextNGTemplateCategory, blank=True, null=True)
title = models.CharField(max_length=128)
path = models.CharField(max_length=128)
def __unicode__(self):
if self.category:
return u"%s (%s)" % (self.title, self.category)
return self.title
class Meta:
verbose_name = _('template')
verbose_name_plural = _('templates')
ordering = ['title']
class TextNG(AbstractText):
template = models.ForeignKey(TextNGTemplate)
def copy_relations(self, old_instance):
for model in get_type_list():
for instance in model.objects.filter(text_ng=old_instance):
instance.pk = None
instance.text_ng = self
instance.save()
class Meta:
verbose_name = _('text')
verbose_name_plural = _('texts')
class TextNGVariableBase(models.Model):
select_related = []
text_ng = models.ForeignKey(TextNG, related_name='+')
label = models.CharField(_('label'), max_length=20, validators=[RegexValidator(regex='[_a-z]+', message=_('Only lower case characters.'))])
def __unicode__(self):
return self.label
class Meta:
abstract = True
unique_together = ('text_ng', 'label')
class TextNGVariableText(TextNGVariableBase):
value = models.TextField(_('value'), null=True, blank=True)
def __unicode__(self):
return self.label + (' (%s)' % self.value if self.value else '')
class Meta:
verbose_name = _('text')
verbose_name_plural = _('texts')
if StrictVersion(django.get_version()) < StrictVersion('1.7'):
register_type('text', TextNGVariableText)
| StarcoderdataPython |
8023433 | <reponame>Koalapvh13/python_exercises
#Faça um Programa que peça as 4 notas bimestrais e mostre a média
n1 = float(input('1ª nota: '))
n2 = float(input('2ª nota: '))
n3 = float(input('3ª nota: '))
n4 = float(input('4ª nota: '))
m = (n1+n2+n3+n4)/4
print('A sua média é {}.'.format(m))
| StarcoderdataPython |
3334164 | <filename>datapack/data/scripts/quests/338_AlligatorHunter/__init__.py<gh_stars>0
# Made by mtrix
import sys
from com.l2jfrozen.gameserver.model.quest import State
from com.l2jfrozen.gameserver.model.quest import QuestState
from com.l2jfrozen.gameserver.model.quest.jython import QuestJython as JQuest
qn = "338_AlligatorHunter"
ADENA = 57
ALLIGATOR = 20135
ALLIGATOR_PELTS = 4337
CHANCE = 90
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onEvent (self,event,st) :
htmltext = event
if event == "30892-00a.htm" :
htmltext = "30892-00a.htm"
st.exitQuest(1)
elif event == "30892-02.htm" :
st.setState(STARTED)
st.set("cond","1")
st.playSound("ItemSound.quest_accept")
elif event == "2" :
st.exitQuest(1)
st.playSound("ItemSound.quest_finish")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not carrying out your quest or don't meet the criteria.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
level = player.getLevel()
cond = st.getInt("cond")
amount = st.getQuestItemsCount(ALLIGATOR_PELTS)*40
if id == CREATED :
if level>=40 :
htmltext = "30892-01.htm"
else :
htmltext = "30892-00.htm"
elif cond==1 :
if amount :
htmltext = "30892-03.htm"
st.giveItems(ADENA,amount)
st.takeItems(ALLIGATOR_PELTS,-1)
else :
htmltext = "30892-04.htm"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != STARTED : return
npcId = npc.getNpcId()
if st.getRandom(100)<CHANCE :
st.giveItems(ALLIGATOR_PELTS,1)
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(338,qn,"Alligator Hunter")
CREATED = State('Start', QUEST)
STARTED = State('Started', QUEST)
QUEST.setInitialState(CREATED)
QUEST.addStartNpc(30892)
QUEST.addTalkId(30892)
QUEST.addKillId(ALLIGATOR)
STARTED.addQuestDrop(ALLIGATOR,ALLIGATOR_PELTS,1) | StarcoderdataPython |
3553043 | <reponame>cj-wong/Caltab
# Copyright 2019-2021 cj-wong
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from typing import Dict
import pendulum
from google.oauth2 import service_account
from googleapiclient.discovery import build
import config
# Replaced imports:
# datetime -> pendulum
def get_tab(entry: str) -> str:
"""Get a tab given its name, if it exists in `config.TAB_NAMES`.
Args:
entry (str): the name of an entry
entry_names (dict): {tab: [aliases]}
Returns:
str: if `entry` matched a tab in `config.TAB_NAMES`
Raises:
TabNotFound: if `entry` did not match
"""
for name, aliases in config.TAB_NAMES.items():
if entry == name or entry in aliases:
return name
raise TabNotFound
class TabNotFound(ValueError):
"""The tab name wasn't found in the configuration. Ignore it."""
pass
class Calendar:
"""Class for managing Google Calendar.
Attributes:
interface (Resource): an interface created from credentials;
used to retrieve calendars and entries per calendar
"""
def __init__(self, credentials: service_account.Credentials) -> None:
"""Initialize the Google Calendar interface.
Args:
credentials (service_account.Credentials): creds for Google APIs
"""
self.interface = build(
'calendar',
'v3',
credentials=credentials
)
def get_calendar_ids(self) -> Dict[str, str]:
"""Get IDs for calendars configured in config.yaml.
The IDs will be used for retrieving entries/events per calendar.
Returns:
Dict[str, str]: {calendar name: calendar id}
"""
cals = {}
all_cals = self.interface.calendarList().list().execute()['items']
for cal in all_cals:
calendar = cal['summary']
if calendar in config.CALS:
cals[calendar] = cal['id']
return cals
def get_entries(self, cal_name: str, cal_id: str) -> Dict[str, int]:
"""Get entries in a calendar given `cal_id` from yesterday until today.
We are interested in events that have elapsed from then and now.
Args:
cal_name (str): the name (summary) of the calendar
cal_id (str): the ID of the calendar
Returns:
dict: {tab: hours}
"""
tab_hours: Dict[str, int] = defaultdict(int)
all_entries = self.interface.events().list(
calendarId=cal_id,
timeMin=config.YESTERDAY,
timeMax=config.TODAY,
singleEvents=True,
orderBy='startTime',
).execute()['items']
for entry in all_entries:
try:
tab = get_tab(entry['summary'])
except TabNotFound:
continue
start = pendulum.parse(entry['start']['dateTime'])
end = pendulum.parse(entry['end']['dateTime'])
tab_hours[tab] += (end - start).seconds / 3600
if tab_hours[tab] >= 24:
config.LOGGER.warning(f'Hours exceeded 24 for tab {tab}')
return tab_hours
| StarcoderdataPython |
1758124 | <gh_stars>0
from flask import request, flash
from app.controllers.admin import admin_bp
from app.models.course import Course
from app.models.courseInstructor import CourseInstructor
from app.models.courseParticipant import CourseParticipant
from peewee import *
@admin_bp.route('/withdrawCourse/<courseID>', methods = ['POST'])
def withdrawCourse(courseID):
course = Course.get(Course.id == courseID)
(CourseInstructor.delete().where(CourseInstructor.course == course)).execute() #need to delete all ForeignKeyFields first
(CourseParticipant.delete().where(CourseParticipant.course == course)).execute()
course.delete_instance()
flash("Course successfully withdrawn", 'success')
return "Course successfully withdrawn"
| StarcoderdataPython |
5132874 | <reponame>Kris-Kindle/FARS-DBF-Reader<filename>Read_DBF.py
from dbfread import DBF
import zipfile
import sys
import csv
## This will unzip the downloaded folder from FARS
#zip_ref = zipfile.ZipFile('/Users/Kris/Documents/FARS_Reader/FARS2015NationalDBF.zip', 'r')
#zip_ref.extractall('/Users/Kris/Documents/FARS_Reader/FARS2015')
#zip_ref.close()
# Reads in the DBF file
table = DBF('/Users/Kris/Documents/FARS_Reader/FARS1990/acc1990.dbf')
f = open('/Users/Kris/Documents/FARS_Reader/FARS1990/accident90.csv', 'w')
writer = csv.writer(f)
writer.writerow(table.field_names)
for record in table:
writer.writerow(list(record.values())) | StarcoderdataPython |
3314159 | <filename>diva/formats.py
#!/bin/env python2
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from construct import *
from .util import IsTimestamp, IsNotTimestamp, Dynamic
AFSFormat = Struct('AFS',
Struct('header',
Magic('AFS\x00'),
ULInt32('entry_count'),
),
Anchor('a_toc_start'),
Struct('toc',
Array(lambda ctx: ctx._.header.entry_count,
Struct('entries',
ULInt32('offset'),
ULInt32('length'),
),
),
ULInt32('metadata_offset'),
ULInt32('metadata_length'),
),
Anchor('a_toc_end'),
Pointer(lambda ctx: ctx.toc.metadata_offset,
Array(lambda ctx: ctx.header.entry_count,
Struct('metadata',
String('name', 32, padchar='\x00'),
ULInt16('year'),
ULInt16('month'),
ULInt16('day'),
ULInt16('hour'),
ULInt16('minute'),
ULInt16('second'),
ULInt32('length'),
),
),
),
)
ColumnTypeMap = {
'TYPE_STRING' : SBInt32('value'),
'TYPE_DATA' : UBInt64('value'),
'TYPE_FLOAT' : BFloat32('value'),
'TYPE_8BYTE2' : SBInt64('value'),
'TYPE_8BYTE' : SBInt64('value'),
'TYPE_4BYTE2' : SBInt32('value'),
'TYPE_4BYTE' : SBInt32('value'),
'TYPE_2BYTE2' : SBInt16('value'),
'TYPE_2BYTE' : SBInt16('value'),
'TYPE_1BYTE2' : Byte('value'),
'TYPE_1BYTE' : Byte('value'),
}
CPK_UTF_Table_ColumnType = Enum(Byte('column_type'),
# STORAGE_MASK = 0xF0,
STORAGE_PERROW = 0x50,
STORAGE_CONSTANT = 0x30,
STORAGE_ZERO = 0x10,
# TYPE_MASK = 0x0F,
TYPE_DATA = 0x0B,
TYPE_STRING = 0x0A,
TYPE_FLOAT = 0x08,
TYPE_8BYTE2 = 0x07,
TYPE_8BYTE = 0x06,
TYPE_4BYTE2 = 0x05,
TYPE_4BYTE = 0x04,
TYPE_2BYTE2 = 0x03,
TYPE_2BYTE = 0x02,
TYPE_1BYTE2 = 0x01,
TYPE_1BYTE = 0x00,
# _default_ = 'TYPE_1BYTE',
)
ColumnTypeMapMirror = {v: ColumnTypeMap[k]
for k, v in CPK_UTF_Table_ColumnType.encoding.iteritems()
if k.startswith('TYPE_')}
UTF_STORAGE_MASK = 0xF0
UTF_TYPE_MASK = 0x0F
def build_utf_row(ctx):
cell_types = ColumnTypeMapMirror.copy()
cell_types[0x0B] = Struct('value',
SBInt32('offset'),
SBInt32('size'),
# Pointer(lambda ctx: ctx.offset,
# # OnDemand(String('value', lambda ctx: ctx.size)),
# ),
)
cell_types[0x0A] = Struct('value',
SBInt32('offset'),
Pointer(lambda ctx: ctx.offset + ctx._._.a_table_offset + 8 + ctx._._.table_info.strings_offset,
CString('value')
# OnDemand(CString('value'))
),
)
# cell_types[0x07] = Struct('value', Padding(4), SBInt32('value'))
# cell_types[0x06] = Struct('value', Padding(4), SBInt32('value'))
cells = []
for col in ctx.columns:
cell = cell_types[col.v_type]
if col.v_storage == 0x10:
cell = Value('zero', lambda ctx: 0x00)
else:
if col.v_storage == 0x30:
cell = Pointer(lambda ctx: col.constant_offset.a_constant_offset, cell)
cell = Rename(col.column_name, cell)
cells.append(cell)
return Struct('row', *cells)
CPK_UTF_Table = Struct('utf_table',
Anchor('a_table_offset'),
Magic('@UTF'),
Struct('table_info',
SBInt32('size'),
Anchor('a_offset_anchor'),
SBInt32('rows_offset'),
SBInt32('strings_offset'),
SBInt32('data_offset'),
SBInt32('name_offset'),
SBInt16('column_count'),
SBInt16('row_size'),
SBInt32('row_count'),
Value('v_strings_size', lambda ctx: ctx.data_offset - ctx.strings_offset)
),
Array(lambda ctx: ctx.table_info.column_count, Struct('columns',
Byte('column_type'),
SBInt32('column_name_offset'),
Pointer(lambda ctx: ctx._.table_info.a_offset_anchor + ctx._.table_info.strings_offset + ctx.column_name_offset,
CString('column_name')),
Value('v_type', lambda ctx: ctx.column_type & UTF_TYPE_MASK), # TYPE_MASK
Value('v_storage', lambda ctx: ctx.column_type & UTF_STORAGE_MASK), # STORAGE_MASK
If(lambda ctx: ctx.v_storage == 0x30, # STORAGE_CONSTANT
Struct('constant_offset',
Anchor('a_constant_offset'),
Switch('offset_value', lambda ctx: ctx._.v_type,
ColumnTypeMapMirror,
),
),
),
)),
Pointer(lambda ctx: ctx.a_table_offset + 8 + ctx.table_info.rows_offset,
Array(lambda ctx: ctx.table_info.row_count, Dynamic('rows', build_utf_row)),
),
)
CPKFormat = Struct('CPK',
Struct('header',
Magic('CPK '),
Padding(12),
CPK_UTF_Table,
),
IfThenElse('v_toc_offset', lambda ctx: ctx.header.utf_table.rows[0].TocOffset != 0,
Value('_toc_offset', lambda ctx: ctx.header.utf_table.rows[0].TocOffset),
Value('_itoc_offset', lambda ctx: ctx.header.utf_table.rows[0].ITocOffset),
),
Value('v_content_offset', lambda ctx: ctx.header.utf_table.rows[0].ContentOffset),
Value('v_file_count', lambda ctx: ctx.header.utf_table.rows[0].Files),
Value('v_alignment', lambda ctx: ctx.header.utf_table.rows[0].Align),
Pointer(lambda ctx: ctx.v_toc_offset, Struct('toc',
Magic('TOC '),
Padding(12),
CPK_UTF_Table,
)),
)
DSC_KeyStructs = {
06: Struct('opt',
SLInt32('large_value_06_00'),
SLInt32('large_value_06_01'),
SLInt32('zero_06_00'),
SLInt32('large_value_06_02'),
SLInt32('point_value_06_00'),
SLInt32('small_value_06_00'),
),
11: Struct('opt',
SLInt32('small_value_11_00'),
SLInt32('small_value_11_01'),
SLInt32('zero_11_00'),
SLInt32('small_value_11_02'),
SLInt32('small_value_11_03'),
SLInt32('small_value_11_04'),
),
13: Select('try_13',
# Struct('opt',
# SLInt32('small_value_13_00'),
# SLInt32('small_value_13_01'),
# SLInt32('small_value_13_02'),
# SLInt32('point_value_13_03'),
# SLInt32('small_value_13_04'),
# SLInt32('small_value_13_05'),
# SLInt32('small_value_13_06'),
# SLInt32('point_value_13_07'),
# ),
Struct('opt',
SLInt32('small_value_13_00'),
SLInt32('small_value_13_01'),
SLInt32('small_value_13_02'),
SLInt32('point_value_13_00'),
),
# Struct('opt',
# SLInt32('zero_13_00'),
# ),
),
18: Struct('opt',
SLInt32('point_value_18_00'),
),
19: Struct('opt',
SLInt32('point_value_19_00'),
),
20: Struct('opt',
SLInt32('zero_20_00'),
),
21: Struct('opt',
SLInt32('point_value_21_00'),
SLInt32('point_value_21_01'),
),
22: Struct('opt',
Pass,
),
24: Struct('opt',
Pass,
),
}
DSCFormat = Struct('DSC',
# Struct('header',
# Padding(4 * 46),
# Array(16, SLInt32('prefix_values')),
# ),
GreedyRange(IsNotTimestamp(SLInt32('prefix_values'))),
GreedyRange(Struct('events',
IsTimestamp(SLInt32('timestamp')),
GreedyRange(IsNotTimestamp(SLInt32('other_data'))),
)),
# GreedyRange(Struct('keys',
# IsTimestamp(SLInt32('timestamp')),
# SLInt32('key_format'),
# SLInt32('small_value00'),
# Switch('format_switch', lambda ctx: ctx.key_format, DSC_KeyStructs),
# SLInt32('small_value01'),
# )),
Anchor('a_stopped_reading'),
# GreedyRange(SLInt32('remaining_values')),
)
| StarcoderdataPython |
4870719 | import posixpath
import os.path
SKIP_FILE = object()
class HeaderHandler(object):
def __init__(self, include_paths):
self.include_paths = list(include_paths)
self.resolved = {}
def _open(self, header_path):
try:
f = open(header_path)
except IOError:
return None
else:
return f
def add_include_paths(self, include_paths):
self.include_paths.extend(include_paths)
def _resolve(self, anchor_file):
if anchor_file is not None:
if os.path.sep != posixpath.sep:
anchor_file = anchor_file.replace(os.path.sep,
posixpath.sep)
yield posixpath.dirname(anchor_file)
for include_path in self.include_paths:
yield include_path
def open_header(self, include_header, skip_file, anchor_file):
header_path = self.resolved.get(include_header)
f = None
if header_path is not None:
if skip_file(header_path):
return SKIP_FILE
else:
return self._open(header_path)
for include_path in self._resolve(anchor_file):
header_path = posixpath.join(include_path, include_header)
f = self._open(posixpath.normpath(header_path))
if f:
self.resolved[include_header] = f.name
break
return f
class FakeFile(object):
def __init__(self, name, contents):
self.name = name
self.contents = contents
def __iter__(self):
for line in self.contents:
yield line
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
class FakeHandler(HeaderHandler):
def __init__(self, header_mapping, include_paths=()):
self.header_mapping = header_mapping
super(FakeHandler, self).__init__(list(include_paths))
def _open(self, header_path):
contents = self.header_mapping.get(header_path)
if contents is not None:
return FakeFile(header_path, contents)
else:
return None
def parent_open(self, header_path):
return super(FakeHandler, self)._open(header_path)
| StarcoderdataPython |
6617752 | """
All the custom settings are placed here. The settings are therefore loaded
trough environment variable `FLASK_APP_SETTINGS` that should be just a location
of the file
"""
import os
import ast
import datetime
import binascii
# GENERAL SETTINGS
DEBUG = False
FLASK_ENV = os.getenv('FLASK_ENV', 'production')
SECRET_KEY = os.getenv('SECRET_KEY', binascii.hexlify(os.urandom(24)))
# HOSTS AND SECURITY
ALLOWED_HOSTS = ast.literal_eval(os.getenv('ALLOWED_HOSTS', "['*']"))
USE_X_FORWARDED_HOST = False
USE_X_FORWARDED_PORT = False
# LOGGING
LOG_BACKTRACE = True
LOG_LEVEL = 'INFO'
# DATABASE CONFIGURATION
DATABASE_USER = os.environ.get('POSTGRES_USER', 'postgres')
DATABASE_PASSWORD = os.environ.get('POSTGRES_PASSWORD', '<PASSWORD>')
DATABASE_NAME = os.environ.get('POSTGRES_DB', '{{ cookiecutter.project_name }}')
DATABASE_HOST = os.environ.get('POSTGRES_HOST', 'localhost')
SQLALCHEMY_DATABASE_URI = "postgresql://{}:{}@{}/{}".format(
DATABASE_USER, DATABASE_PASSWORD, DATABASE_HOST, DATABASE_NAME
)
# CACHES
REDIS_HOST = os.getenv('REDIS_HOST', 'localhost')
REDIS_PORT = os.getenv('REDIS_PORT', 6379)
# These control flask-seasurf.
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
CSRF_COOKIE_TIMEOUT = datetime.timedelta(days=1)
# SWAGGER
SWAGGER_SPECS = False
CORS_ORIGINS = ast.literal_eval(os.getenv('CORS_ORIGINS', '[]'))
| StarcoderdataPython |
308522 | <reponame>sggrilo/Curso-em-Video-Python
# JOGO DA ADIVINHAÇÃO V2.0 — Melhore o jogo do Desafio 028 em que o computador vai "pensar" em um número entre 0 e 10,
# mas agora o jogador vai tentar até acertar, mostrando no final quantos palpites foram necessários para vencer.
from random import randint
num = randint(0, 10)
resposta = 11
palpites = 0
print('-=-' * 22)
print('{: ^66}'.format('\033[1;36mEstou pensando em um número entre 0 e 10... Tente adivinhá-lo!\033[m'))
print('-=-' * 22)
print('\nEm que número eu pensei?')
while resposta != num:
resposta = int(input('Digite aqui a sua resposta: '))
print('\n\033[1;31mErrado!\033[m Tente novamente!')
if resposta > num:
print('\033[1;35mO número em que eu estou pensando é menor...\n\033[m')
elif resposta < num:
print('\033[1;35mO número em que eu estou pensando é maior...\n\033[m')
palpites += 1
print('\n\033[1;32mCorreto!\033[m O número em que eu estava pensando era \033[1;33m{}\033[m.'.format(num))
print('Foram necessários \033[1;34m{}\033[m palpites para adivinhar esse número.'.format(palpites))
| StarcoderdataPython |
1703731 | # -*- coding: utf-8 -*-
import pytest
import pandas as pd
from anacode.agg import aggregation as agg
@pytest.fixture
def frame_sentiments():
header = ['doc_id', 'text_order', 'sentiment_value']
sentiments = pd.DataFrame([
[0, 0, 0.5],
[0, 1, -0.5],
[0, 2, 0.5],
[1, 0, -1.0],
[2, 0, 1.0],
[2, 1, 0.5],
[2, 2, -0.5],
[2, 3, -0.5],
], columns=header)
return {'sentiments': sentiments}
@pytest.fixture
def dataset(frame_sentiments):
return agg.SentimentDataset(**frame_sentiments)
def test_empty_dataset_failure():
dataset = agg.SentimentDataset(None)
with pytest.raises(agg.NoRelevantData):
dataset.average_sentiment()
def test_average_sentiment(dataset):
assert dataset.average_sentiment() == 0.0
| StarcoderdataPython |
5138408 | <filename>out/plot.py
import matplotlib.pyplot as plt
import pandas as pd
cn_eval = pd.read_csv('client_num_eval.csv')
cn_data_multiple = 32 * 10 * 10 # 32K * 10 files * 10 repeat times
cn_speed = cn_eval['N'] * cn_data_multiple / cn_eval['total_time'] / 1024 # in MB
cn_eval['speed'] = cn_speed
ds_eval = pd.read_csv('data_size_eval.csv')
ds_data_multiple = 10 * 4 * 10 # 10 files * 4 clients * 10 repeat times
ds_speed = ds_eval['size(kb)'] * ds_data_multiple / ds_eval['total_time'] / 1024 # in MB
ds_eval['speed'] = ds_speed
print(cn_eval)
print(ds_eval)
x = cn_eval['N']
y = cn_eval['speed']
# plot
plt.plot(x, y, marker='o', markersize=5, color="blue")
plt.title("Client Number Evaluation")
plt.xlabel("Number of Clients")
plt.ylabel("Transfer Speed (MB/s)")
plt.savefig("client_num_eval_graph.png")
plt.show()
x = ds_eval['size(kb)']
y = ds_eval['speed']
# plot
plt.plot(x, y, marker='o', markersize=5, color="blue")
plt.title("File Size Evaluation")
plt.xlabel("File Size (KB)")
plt.ylabel("Transfer Speed (MB/s)")
plt.savefig("data_size_eval_graph.png")
plt.show() | StarcoderdataPython |
12827217 | <reponame>NuclearCactus/FOSSALGO
#Python program for KMP Algorithm
def LPSArray(pat, M, lps):
lenn = 0
i = 1
while i < M:
if pat[i]== pat[lenn]:
lenn += 1
lps[i] = lenn
i += 1
else:
if lenn != 0:
lenn = lps[lenn-1]
else:
lps[i] = 0
i += 1
def KMP(pat, txt):
M = len(pat)
N = len(txt)
# create lps[] that will hold the longest prefix suffix values for pattern
lps = [0]*M
j = 0
# Preprocess the pattern (calculate lps[] array)
LPSArray(pat, M, lps)
i = 0 # index for txt[]
while i < N:
if pat[j] == txt[i]:
i += 1
j += 1
if j == M:
print ("Found pattern at index " + str(i-j))
j = lps[j-1]
# mismatch after j matches
elif i < N and pat[j] != txt[i]:
if j != 0:
j = lps[j-1]
else:
i += 1
txt = "ABABDABACDABABCABAB"
pat = "ABABCABAB"
KMP(pat, txt)
| StarcoderdataPython |
9705764 | <filename>src/command_modules/azure-cli-monitor/azure/cli/command_modules/monitor/grammar/MetricAlertConditionLexer.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=all
# Generated from MetricAlertCondition.g4 by ANTLR 4.7.2
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2")
buf.write(u"\25\u00cc\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6")
buf.write(u"\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4")
buf.write(u"\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t")
buf.write(u"\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27")
buf.write(u"\4\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4")
buf.write(u"\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t")
buf.write(u"#\4$\t$\4%\t%\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6")
buf.write(u"\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13\3\f\3\f\3")
buf.write(u"\r\3\r\3\16\3\16\3\17\3\17\3\20\3\20\3\21\3\21\3\22\3")
buf.write(u"\22\3\23\3\23\3\24\3\24\3\25\3\25\3\26\3\26\3\27\3\27")
buf.write(u"\3\30\3\30\3\31\3\31\3\32\3\32\3\33\3\33\3\33\3\33\3")
buf.write(u"\33\3\33\3\34\3\34\3\34\3\34\3\35\3\35\3\35\3\35\3\35")
buf.write(u"\3\35\3\35\3\35\3\35\3\36\3\36\3\36\3\36\3\36\3\36\3")
buf.write(u"\36\3\36\3\36\3\37\3\37\3\37\3 \3 \3 \3 \3 \3 \3 \3 ")
buf.write(u"\3 \5 \u00a6\n \3!\6!\u00a9\n!\r!\16!\u00aa\3!\3!\6!")
buf.write(u"\u00af\n!\r!\16!\u00b0\5!\u00b3\n!\3\"\3\"\3#\6#\u00b8")
buf.write(u"\n#\r#\16#\u00b9\3$\5$\u00bd\n$\3$\3$\6$\u00c1\n$\r$")
buf.write(u"\16$\u00c2\3%\3%\3%\3%\6%\u00c9\n%\r%\16%\u00ca\2\2&")
buf.write(u"\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23\2\25\2\27\2\31")
buf.write(u"\2\33\2\35\2\37\2!\2#\2%\2\'\2)\2+\2-\2/\2\61\2\63\2")
buf.write(u"\65\13\67\f9\r;\16=\17?\20A\21C\22E\23G\24I\25\3\2\26")
buf.write(u"\4\2CCcc\4\2EEee\4\2FFff\4\2GGgg\4\2JJjj\4\2KKkk\4\2")
buf.write(u"NNnn\4\2PPpp\4\2QQqq\4\2TTtt\4\2UUuu\4\2WWww\4\2YYyy")
buf.write(u"\4\2ZZzz\3\2\62;\3\2c|\3\2C\\\4\2..\60\60\4\2$$))\4\2")
buf.write(u"\13\13\"\"\2\u00ca\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2")
buf.write(u"\2\2\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2")
buf.write(u"\2\21\3\2\2\2\2\65\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2")
buf.write(u";\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2")
buf.write(u"\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\3K\3\2\2\2\5M\3\2\2")
buf.write(u"\2\7O\3\2\2\2\tQ\3\2\2\2\13S\3\2\2\2\rU\3\2\2\2\17W\3")
buf.write(u"\2\2\2\21Y\3\2\2\2\23[\3\2\2\2\25]\3\2\2\2\27_\3\2\2")
buf.write(u"\2\31a\3\2\2\2\33c\3\2\2\2\35e\3\2\2\2\37g\3\2\2\2!i")
buf.write(u"\3\2\2\2#k\3\2\2\2%m\3\2\2\2\'o\3\2\2\2)q\3\2\2\2+s\3")
buf.write(u"\2\2\2-u\3\2\2\2/w\3\2\2\2\61y\3\2\2\2\63{\3\2\2\2\65")
buf.write(u"}\3\2\2\2\67\u0083\3\2\2\29\u0087\3\2\2\2;\u0090\3\2")
buf.write(u"\2\2=\u0099\3\2\2\2?\u00a5\3\2\2\2A\u00a8\3\2\2\2C\u00b4")
buf.write(u"\3\2\2\2E\u00b7\3\2\2\2G\u00c0\3\2\2\2I\u00c8\3\2\2\2")
buf.write(u"KL\7\60\2\2L\4\3\2\2\2MN\7\61\2\2N\6\3\2\2\2OP\7a\2\2")
buf.write(u"P\b\3\2\2\2QR\7^\2\2R\n\3\2\2\2ST\7<\2\2T\f\3\2\2\2U")
buf.write(u"V\7\'\2\2V\16\3\2\2\2WX\7.\2\2X\20\3\2\2\2YZ\7/\2\2Z")
buf.write(u"\22\3\2\2\2[\\\t\2\2\2\\\24\3\2\2\2]^\t\3\2\2^\26\3\2")
buf.write(u"\2\2_`\t\4\2\2`\30\3\2\2\2ab\t\5\2\2b\32\3\2\2\2cd\t")
buf.write(u"\6\2\2d\34\3\2\2\2ef\t\7\2\2f\36\3\2\2\2gh\t\b\2\2h ")
buf.write(u"\3\2\2\2ij\t\t\2\2j\"\3\2\2\2kl\t\n\2\2l$\3\2\2\2mn\t")
buf.write(u"\13\2\2n&\3\2\2\2op\t\f\2\2p(\3\2\2\2qr\t\r\2\2r*\3\2")
buf.write(u"\2\2st\t\16\2\2t,\3\2\2\2uv\t\17\2\2v.\3\2\2\2wx\t\20")
buf.write(u"\2\2x\60\3\2\2\2yz\t\21\2\2z\62\3\2\2\2{|\t\22\2\2|\64")
buf.write(u"\3\2\2\2}~\5+\26\2~\177\5\33\16\2\177\u0080\5\31\r\2")
buf.write(u"\u0080\u0081\5%\23\2\u0081\u0082\5\31\r\2\u0082\66\3")
buf.write(u"\2\2\2\u0083\u0084\5\23\n\2\u0084\u0085\5!\21\2\u0085")
buf.write(u"\u0086\5\27\f\2\u00868\3\2\2\2\u0087\u0088\5\35\17\2")
buf.write(u"\u0088\u0089\5!\21\2\u0089\u008a\5\25\13\2\u008a\u008b")
buf.write(u"\5\37\20\2\u008b\u008c\5)\25\2\u008c\u008d\5\27\f\2\u008d")
buf.write(u"\u008e\5\31\r\2\u008e\u008f\5\'\24\2\u008f:\3\2\2\2\u0090")
buf.write(u"\u0091\5\31\r\2\u0091\u0092\5-\27\2\u0092\u0093\5\25")
buf.write(u"\13\2\u0093\u0094\5\37\20\2\u0094\u0095\5)\25\2\u0095")
buf.write(u"\u0096\5\27\f\2\u0096\u0097\5\31\r\2\u0097\u0098\5\'")
buf.write(u"\24\2\u0098<\3\2\2\2\u0099\u009a\5#\22\2\u009a\u009b")
buf.write(u"\5%\23\2\u009b>\3\2\2\2\u009c\u00a6\7>\2\2\u009d\u009e")
buf.write(u"\7>\2\2\u009e\u00a6\7?\2\2\u009f\u00a6\7?\2\2\u00a0\u00a1")
buf.write(u"\7@\2\2\u00a1\u00a6\7?\2\2\u00a2\u00a6\7@\2\2\u00a3\u00a4")
buf.write(u"\7#\2\2\u00a4\u00a6\7?\2\2\u00a5\u009c\3\2\2\2\u00a5")
buf.write(u"\u009d\3\2\2\2\u00a5\u009f\3\2\2\2\u00a5\u00a0\3\2\2")
buf.write(u"\2\u00a5\u00a2\3\2\2\2\u00a5\u00a3\3\2\2\2\u00a6@\3\2")
buf.write(u"\2\2\u00a7\u00a9\5/\30\2\u00a8\u00a7\3\2\2\2\u00a9\u00aa")
buf.write(u"\3\2\2\2\u00aa\u00a8\3\2\2\2\u00aa\u00ab\3\2\2\2\u00ab")
buf.write(u"\u00b2\3\2\2\2\u00ac\u00ae\t\23\2\2\u00ad\u00af\5/\30")
buf.write(u"\2\u00ae\u00ad\3\2\2\2\u00af\u00b0\3\2\2\2\u00b0\u00ae")
buf.write(u"\3\2\2\2\u00b0\u00b1\3\2\2\2\u00b1\u00b3\3\2\2\2\u00b2")
buf.write(u"\u00ac\3\2\2\2\u00b2\u00b3\3\2\2\2\u00b3B\3\2\2\2\u00b4")
buf.write(u"\u00b5\t\24\2\2\u00b5D\3\2\2\2\u00b6\u00b8\t\25\2\2\u00b7")
buf.write(u"\u00b6\3\2\2\2\u00b8\u00b9\3\2\2\2\u00b9\u00b7\3\2\2")
buf.write(u"\2\u00b9\u00ba\3\2\2\2\u00baF\3\2\2\2\u00bb\u00bd\7\17")
buf.write(u"\2\2\u00bc\u00bb\3\2\2\2\u00bc\u00bd\3\2\2\2\u00bd\u00be")
buf.write(u"\3\2\2\2\u00be\u00c1\7\f\2\2\u00bf\u00c1\7\17\2\2\u00c0")
buf.write(u"\u00bc\3\2\2\2\u00c0\u00bf\3\2\2\2\u00c1\u00c2\3\2\2")
buf.write(u"\2\u00c2\u00c0\3\2\2\2\u00c2\u00c3\3\2\2\2\u00c3H\3\2")
buf.write(u"\2\2\u00c4\u00c9\5\61\31\2\u00c5\u00c9\5\63\32\2\u00c6")
buf.write(u"\u00c9\5/\30\2\u00c7\u00c9\7a\2\2\u00c8\u00c4\3\2\2\2")
buf.write(u"\u00c8\u00c5\3\2\2\2\u00c8\u00c6\3\2\2\2\u00c8\u00c7")
buf.write(u"\3\2\2\2\u00c9\u00ca\3\2\2\2\u00ca\u00c8\3\2\2\2\u00ca")
buf.write(u"\u00cb\3\2\2\2\u00cbJ\3\2\2\2\r\2\u00a5\u00aa\u00b0\u00b2")
buf.write(u"\u00b9\u00bc\u00c0\u00c2\u00c8\u00ca\2")
return buf.getvalue()
class MetricAlertConditionLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
WHERE = 9
AND = 10
INCLUDES = 11
EXCLUDES = 12
OR = 13
OPERATOR = 14
NUMBER = 15
QUOTE = 16
WHITESPACE = 17
NEWLINE = 18
WORD = 19
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ u"DEFAULT_MODE" ]
literalNames = [ u"<INVALID>",
u"'.'", u"'/'", u"'_'", u"'\\'", u"':'", u"'%'", u"','", u"'-'" ]
symbolicNames = [ u"<INVALID>",
u"WHERE", u"AND", u"INCLUDES", u"EXCLUDES", u"OR", u"OPERATOR",
u"NUMBER", u"QUOTE", u"WHITESPACE", u"NEWLINE", u"WORD" ]
ruleNames = [ u"T__0", u"T__1", u"T__2", u"T__3", u"T__4", u"T__5",
u"T__6", u"T__7", u"A", u"C", u"D", u"E", u"H", u"I",
u"L", u"N", u"O", u"R", u"S", u"U", u"W", u"X", u"DIGIT",
u"LOWERCASE", u"UPPERCASE", u"WHERE", u"AND", u"INCLUDES",
u"EXCLUDES", u"OR", u"OPERATOR", u"NUMBER", u"QUOTE",
u"WHITESPACE", u"NEWLINE", u"WORD" ]
grammarFileName = u"MetricAlertCondition.g4"
def __init__(self, input=None, output=sys.stdout):
super(MetricAlertConditionLexer, self).__init__(input, output=output)
self.checkVersion("4.7.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| StarcoderdataPython |
1857386 | <reponame>alexanderkell/reinforcement-learning-investment-FTT-Power<gh_stars>0
import matlab.engine
import argparse
import os
from multiprocessing import Process
from subprocess import Popen
import ray
from ray.rllib.agents.dqn import DQNTrainer
from ray.rllib.agents.ppo import PPOTrainer
from ray.rllib.agents.ddpg import DDPGTrainer
from ray.rllib.env.policy_server_input import PolicyServerInput
from ray.tune.logger import pretty_print
from gym.spaces import Box, Discrete, MultiDiscrete
from ray.rllib.utils.policy_server import PolicyServer
from ray.tune.registry import register_env
import ray.tune as tune
from ray.rllib.env.external_env import ExternalEnv
import gym
import numpy as np
parser = argparse.ArgumentParser()
print("creating RL Trainer")
SERVER_ADDRESS = "127.0.0.1"
SERVER_PORT = 9912
args = parser.parse_args()
connector_config = {
# Use the connector server to generate experiences.
"input": (
lambda ioctx: PolicyServerInput(ioctx, SERVER_ADDRESS, SERVER_PORT)
),
# Use a single worker process to run the server.
"num_workers": 0,
# Disable OPE, since the rollouts are coming from online clients.
"input_evaluation": [],
}
trainer = DDPGTrainer(
env='srv',
config=dict(
connector_config, **{
"sample_batch_size": 10000,
"train_batch_size": 40000,
}))
while True:
print(pretty_print(trainer.train())) | StarcoderdataPython |
6669079 | #!/usr/bin/env python3
# coding: utf-8
# @Author: ArthurBernard
# @Email: <EMAIL>
# @Date: 2019-03-23 11:36:05
# @Last modified by: ArthurBernard
# @Last modified time: 2020-02-05 17:41:40
""" Tools to manage date and time. """
# Built-in packages
import time
# External packages
# Local packages
__all__ = ['date_to_TS', 'TS_to_date', 'now']
"""
Some functions to manage date, timestamp and other time format.
TODO:
- Finish date_to_TS and TS_to_date functions
"""
def now(freq=60):
""" Return timestamp of the beging period `freq`.
Parameters
----------
freq : int, optional
Number of second of one period, default is 60 (minutely).
Returns
-------
int
Timestamp of the current period.
"""
return int(time.time() // freq * freq)
def str_time(t):
""" Return the time such as %H:%M:%S.
Parameters
----------
t : int
Time in seconds.
Returns
-------
str
Time in hours, minutes and seconds.
"""
txt = ''
s, t = t % 60, t // 60
if s < 10:
s = '0' + str(s)
m, h = t % 60, t // 60
if m < 10:
m = '0' + str(m)
if h > 24:
h, d = h % 24, h // 24
txt += str(d) + ' days '
if h < 10:
h = '0' + str(h)
return txt + '{}:{}:{}'.format(h, m, s)
def date_to_TS(date, format='%y-%m-%d %H:%M:%S'):
""" Convert date to timestamp.
Parameters
----------
date : int, str or date
Date to convert to timestamp
format : str
Format of input date.
Return
------
int
Timestamp of the date.
"""
if isinstance(date, int):
return date
elif isinstance(date, str):
return time.mktime(time.strptime(date, format))
else:
print('Date format not allowed')
raise ValueError('Unknow format', type(date))
def TS_to_date(TS, format='%y-%m-%d %H:%M:%S'):
""" Convert timestamp to date.
Parameters
----------
TS : int, str or date
Timestamp to convert to date.
format : str
Format of output date.
Return
------
date
Date of the timestamp.
"""
if isinstance(TS, int):
return time.strftime(format, time.localtime(TS))
elif isinstance(TS, str):
return TS
else:
print('Timestamp format not recognized')
raise ValueError('Unknow format', type(TS))
| StarcoderdataPython |
9767529 | <reponame>Bayashka1/open-rcv<filename>openrcv/test/test_parsing.py
#
# Copyright (c) 2014 <NAME>. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
from io import StringIO
import os
from textwrap import dedent
import unittest
from openrcv.models import ContestInput
from openrcv.parsing import BLTParser, ParsingError
from openrcv.utils import PathInfo, StringInfo
from openrcv.utiltest.helpers import UnitCase
class BLTParserTest(UnitCase):
BLT_STRING = """\
4 2
-3
2 2 0
1 2 4 3 1 0
0
"Jen"
"Alice"
"Steve"
"Bill"
"My Election"
"""
def make_parser(self, blt_string, output_info=None):
parser = BLTParser(output_info)
blt_stream = StringInfo(blt_string)
return parser, blt_stream
def parse_blt(self, blt_string, output_info=None):
"""
Arguments:
blt_str: a BLT-formatted string.
output_info: a StreamInfo object.
"""
parser, blt_stream = self.make_parser(blt_string, output_info=output_info)
info = parser.parse(blt_stream)
return info
def test_init(self):
output_info = StringInfo()
parser = BLTParser(output_info)
self.assertIs(parser.output_info, output_info)
def test_init__no_args(self):
parser = BLTParser()
output_info = parser.output_info
self.assertIs(type(output_info), PathInfo)
self.assertEqual(output_info.path, os.devnull)
# TODO: test passing
# TODO: test extra blank and non-empty lines at end.
def test_parse(self):
"""Test passing an output StreamInfo object."""
output_info = StringInfo()
info = self.parse_blt(self.BLT_STRING, output_info=output_info)
# TODO: test the other attributes.
self.assertEqual(type(info), ContestInput)
self.assertEqual(info.name, '"My Election"')
self.assertEqual(info.ballot_count, 2)
self.assertEqual(output_info.value, "2 2\n1 2 4 3 1\n")
def test_parse__terminal_empty_lines(self):
"""Test a BLT string with empty lines at the end."""
info = self.parse_blt(self.BLT_STRING + "\n\n")
self.assertEqual(type(info), ContestInput)
self.assertEqual(info.name, '"My Election"')
def test_parse__terminal_non_empty_lines(self):
"""Test a BLT string with non-empty lines at the end."""
suffixes = [
"foo",
"foo\n",
"\nfoo",
"\nfoo\n"
]
for suffix in suffixes:
with self.subTest(suffix=suffix):
# TODO: check the line number.
with self.assertRaises(ParsingError):
info = self.parse_blt(self.BLT_STRING + suffix)
def test_parse__no_output_info(self):
"""Test passing no output StreamInfo object."""
info = self.parse_blt(self.BLT_STRING)
# TODO: test the other attributes.
self.assertEqual(type(info), ContestInput)
self.assertEqual(info.ballot_count, 2)
| StarcoderdataPython |
71409 | <filename>auth0/v2/authentication/link.py
from .base import AuthenticationBase
class Link(AuthenticationBase):
"""Link accounts endpoints.
Args:
domain (str): Your auth0 domain (e.g: username.auth0.com)
"""
def __init__(self, domain):
self.domain = domain
def unlink(self, access_token, user_id):
"""Unlink an account.
"""
return self.post(
url='https://%s/unlink' % self.domain,
data={
'access_token': access_token,
'user_id': user_id,
},
headers={'Content-Type': 'application/json'}
)
| StarcoderdataPython |
5119752 | bind = ":8080"
workers = 1
threads = 8
logfile = "-"
loglevel = "DEBUG"
| StarcoderdataPython |
3420650 | <gh_stars>10-100
from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
import numpy as np
dataset=load_iris()
#print(dataset)
X_train,X_test,y_train,y_test=train_test_split(dataset["data"],dataset["target"],random_state=0)
kn=KNeighborsClassifier(n_neighbors=1)
kn.fit(X_train,y_train)
for i in range(len(X_test)):
x=X_test[i]
x_new=np.array([x])
prediction=kn.predict(x_new)
print("TARGET=",y_test[i],dataset["target_names"][y_test[i]],"PREDICTED=",prediction,dataset["target_names"][prediction])
print(kn.score(X_test,y_test)) | StarcoderdataPython |
74561 | """# HTML Widget Functionality
Provides the HTMLWidget and HTMLWidgetGenerator
"""
from ..html_component import HTMLComponent
class HTMLWidget(HTMLComponent): # pylint: disable=too-few-public-methods
"""Your HTML Widgets should inherits this"""
| StarcoderdataPython |
12809040 | # pylint: disable=missing-module-docstring
from typing import Union
from typing import List
from enum import Enum
from aibc import async_make_request
async def snapshot(contract_ids: List[str], since: int = None, fields: list = None) -> dict:
"""Get Market Data for the given conid(s).
The endpoint will return by default bid, ask, last, change, change pct, close, listing exchange. See response
fields for a list of available fields that can be request via fields argument. The endpoint /iserver/accounts
must be called prior to /iserver/marketdata/snapshot. For derivative contracts the endpoint /iserver/secdef/search
must be called first. First /snapshot endpoint call for given conid will initiate the market data request.
To receive all available fields the /snapshot endpoint will need to be called several times. To receive streaming
market data the endpoint /ws can be used. Refer to Streaming WebSocket Data for details.
Args:
contract_ids (List[str]): A list of contract Ids.
since (int): Time period since which updates are required. uses epoch time with milliseconds.
fields (list): List of fields to be contained in the response.
Returns:
dict: A `MarketSnapshot` resource.
Usage:
>>> await snapshot(contract_ids=['265598'])
"""
fields = ','.join(fields) if fields else None
params = {'conids': ','.join(contract_ids), 'since': since, 'fields': fields}
return await async_make_request(method='get', endpoint='/api/iserver/marketdata/snapshot', params=params)
async def market_history(contract_id: str, period: str, bar_type: Union[str, Enum] = None, exchange: str = None,
outside_regular_trading_hours: bool = True) -> dict:
"""Get historical market Data for given conid, length of data
is controlled by 'period' and 'bar'.
Args:
contract_id (str): A contract Id.
period (str): Available time period: {1-30}min, {1-8}h, {1-1000}d, {1-792}w, {1-182}m, {1-15}y
bar_type (Union[str, Enum], optional): The bar type you want the data in. Defaults to None.
exchange (str, optional): Exchange of the conid. Defaults to None.
outside_regular_trading_hours (bool, optional): For contracts that support it, will determine if historical
data includes outside of regular trading hours. Defaults
to True.
Returns:
dict: A collection `Bar` resources.
Usage:
>>> await market_history(contract_id=['265598'])
"""
if isinstance(bar_type, Enum):
bar_type = bar_type.value
payload = {
'conid': contract_id,
'period': period,
'bar': bar_type,
'exchange': exchange,
'outsideRth': outside_regular_trading_hours
}
return await async_make_request(method='get', endpoint='/api/iserver/marketdata/history', params=payload)
| StarcoderdataPython |
8138159 | #!/usr/bin/env python3
# imports go here
import json
import os
import tweepy
from flask import Flask, request
app = Flask(__name__)
#
# Free Coding session for 2014-12-18
# Written by <NAME>
#
consumer_key = os.environ['TWITTER_CONSUMER_KEY']
consumer_secret = os.environ['TWITTER_CONSUMER_SECRET']
access_token = os.environ['TWITTER_ACCESS_TOKEN']
access_token_secret = os.environ['TWITTER_ACCESS_TOKEN_SECRET']
def tweet_msg(message):
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.secure = True
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
api.update_status(message)
@app.route("/", methods=['GET', 'POST'])
def gittweet():
data = json.loads(request.data.decode('utf-8'))
tweet_msg("Freecoding: %s %s" % (data['commits'][-1]['message'], data['commits'][-1]['url']))
return "OK"
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8970)
| StarcoderdataPython |
11314736 | <filename>ivobject/decorator.py
def invariant(fn):
def invariant_fn(cls, instance):
return fn(cls, instance)
return invariant_fn
def param_invariant(fn):
def param_invariant_fn(cls, instance):
return fn(cls, instance)
return param_invariant_fn
| StarcoderdataPython |
11363758 | '''
A simulation implementing the SIRS model of infectious disease spread in a population and in particular demonstrating
probe plotting.
'''
from pram.data import ProbePersistenceMem, GroupSizeProbe
from pram.entity import Group
from pram.model.epi import SIRSModel
from pram.sim import Simulation
# ----------------------------------------------------------------------------------------------------------------------
p = GroupSizeProbe.by_attr('flu', 'flu', ['s', 'i', 'r'], persistence=ProbePersistenceMem())
(Simulation().
add_probe(p).
add_rule(SIRSModel('flu', beta=0.05, gamma=0.50, alpha=0.10)).
add_group(Group(m=1000, attr={ 'flu': 's' })).
run(100)
)
series = [
{ 'var': 'p0', 'lw': 0.75, 'ls': 'solid', 'marker': 'o', 'color': 'red', 'ms': 0, 'lbl': 'S' },
{ 'var': 'p1', 'lw': 0.75, 'ls': 'dashed', 'marker': '+', 'color': 'blue', 'ms': 0, 'lbl': 'I' },
{ 'var': 'p2', 'lw': 0.75, 'ls': 'dotted', 'marker': 'x', 'color': 'green', 'ms': 0, 'lbl': 'R' }
]
p.plot(series)
# series = [
# { 'var': f'{quantity}_S', 'lw': 1.50, 'ls': 'solid', 'dashes': (4,8), 'marker': '+', 'color': 'blue', 'ms': 0, 'lbl': 'S' },
# { 'var': f'{quantity}_E', 'lw': 1.50, 'ls': 'solid', 'dashes': (1,0), 'marker': '+', 'color': 'orange', 'ms': 0, 'lbl': 'E' },
# { 'var': f'{quantity}_I', 'lw': 1.50, 'ls': 'solid', 'dashes': (5,1), 'marker': '*', 'color': 'red', 'ms': 0, 'lbl': 'I' },
# { 'var': f'{quantity}_R', 'lw': 1.50, 'ls': 'solid', 'dashes': (5,6), 'marker': '|', 'color': 'green', 'ms': 0, 'lbl': 'R' },
# { 'var': f'{quantity}_X', 'lw': 1.50, 'ls': 'solid', 'dashes': (1,2), 'marker': 'x', 'color': 'black', 'ms': 0, 'lbl': 'X' }
# ]
# sim.probes[0].plot(series, ylabel='Population mass', figsize=(12,4), subplot_b=0.15)
# print(p.get_data())
| StarcoderdataPython |
5055071 | from __future__ import annotations
import dataclasses
from typing import Dict, Tuple
import numpy as np
from coffee.client import BulletClient
from coffee.structs import JointInfo, JointType
@dataclasses.dataclass(frozen=True)
class Joints:
"""A convenience class for accessing the joint information of a PyBullet body.
These are parsed from the URDF. The extracted information is useful for things like
inverse kinematics, which can take advantage of rest poses and joint limits to
refine its solution.
Attributes:
body_id: The unique ID of the body.
joints_info: A tuple of `JointInfo` objects, one for each joint.
controllable_joints: A tuple of indices designating the controllable joints of
the body.
non_controllable_joints: A tuple of indices designating the non-controllable
joints of the body.
"""
body_id: int
joints_info: Tuple[JointInfo, ...]
controllable_joints: Tuple[int, ...]
non_controllable_joints: Tuple[int, ...]
def __repr__(self) -> str:
return f"{self.__class__.__name__}(bodyid={self.body_id}, dof={self.dof})"
# Factory methods.
@staticmethod
def from_body_id(body_id: int, pb_client: BulletClient) -> Joints:
controllable_joints = []
non_controllable_joints = []
joints_info = []
for i in range(pb_client.getNumJoints(body_id)):
joint_info = JointInfo(*pb_client.getJointInfo(body_id, i))
if joint_info.joint_type != JointType.FIXED.value:
controllable_joints.append(joint_info.joint_index)
else:
non_controllable_joints.append(joint_info.joint_index)
joints_info.append(joint_info)
return Joints(
body_id=body_id,
joints_info=tuple(joints_info),
controllable_joints=tuple(controllable_joints),
non_controllable_joints=tuple(non_controllable_joints),
)
# Accessors.
def get_joint_index_from_joint_name(self, joint_name: str) -> int:
for i, joint_info in enumerate(self.joints_info):
if joint_info.joint_name == joint_name:
return i
raise ValueError(f"Joint {joint_name} not found.")
def get_joint_name_from_joint_index(self, joint_index: int) -> str:
return self.joints_info[joint_index].joint_name
def get_joint_index_from_link_name(self, link_name: str) -> int:
for i, joint_info in enumerate(self.joints_info):
if joint_info.link_name == link_name:
return i
raise ValueError(f"Link {link_name} not found.")
def get_link_name_from_joint_index(self, joint_index: int) -> str:
return self.joints_info[joint_index].link_name
def contains_link(self, link_name: str) -> bool:
"""Returns True if the given link name is present in the URDF."""
for joint_info in self.joints_info:
if joint_info.link_name == link_name:
return True
return False
def contains_joint(self, joint_name: str) -> bool:
"""Returns True if the given joint name is present in the URDF."""
for joint_info in self.joints_info:
if joint_info.joint_name == joint_name:
return True
return False
@property
def link_names(self) -> Tuple[str, ...]:
"""Returns a tuple of link names."""
return tuple(joint_info.link_name for joint_info in self.joints_info)
@property
def name2index(self) -> Dict[str, int]:
"""A dictionary mapping joint names to joint indices."""
return {
joint_info.joint_name: i for i, joint_info in enumerate(self.joints_info)
}
@property
def index2name(self) -> Dict[int, str]:
"""A dictionary mapping joint indices to joint names."""
return {
i: joint_info.joint_name for i, joint_info in enumerate(self.joints_info)
}
@property
def dof(self) -> int:
return len(self.controllable_joints)
@property
def joints_lower_limit(self) -> np.ndarray:
lower = []
for joint_info in [self.joints_info[i] for i in self.controllable_joints]:
if joint_info.q_index > -1:
lower.append(joint_info.joint_lower_limit)
else:
lower.append(0.0)
return np.array(lower, dtype=np.float64)
@property
def joints_upper_limit(self) -> np.ndarray:
upper = []
for joint_info in [self.joints_info[i] for i in self.controllable_joints]:
if joint_info.q_index > -1:
upper.append(joint_info.joint_upper_limit)
else:
upper.append(2.0 * np.pi)
return np.array(upper, dtype=np.float64)
@property
def joints_range(self) -> np.ndarray:
# Shape: (dof, 2).
return np.vstack([self.joints_lower_limit, self.joints_upper_limit]).T
@property
def joints_max_force(self) -> np.ndarray:
max_force = []
for joint_info in [self.joints_info[i] for i in self.controllable_joints]:
max_force.append(joint_info.joint_max_force)
return np.array(max_force, dtype=np.float32)
@property
def joints_max_velocity(self) -> np.ndarray:
max_velocity = []
for joint_info in [self.joints_info[i] for i in self.controllable_joints]:
max_velocity.append(joint_info.joint_max_velocity)
return np.array(max_velocity, dtype=np.float64)
# Array creation.
def zeros_array(self) -> np.ndarray:
return np.zeros(self.dof, dtype=np.float64)
def ones_array(self) -> np.ndarray:
return np.ones(self.dof, dtype=np.float64)
def const_array(self, value: float) -> np.ndarray:
return np.full(self.dof, value, dtype=np.float64)
| StarcoderdataPython |
8140610 | products = {"Grape": 5.9, "Guava": 4.9,
"Mango": 4.9, "Cashew": 2.4,
"Banana": 3.0, "Pear": 5.8}
for pro, price in products.items():
print(pro, " = ", price)
cost = 0
while True:
pro = input("Select product (n=nothing): ")
if pro == 'n':
break
qty = int(input("Number of product? "))
cost += products[pro]*qty
print("Price of products(s): ",cost) | StarcoderdataPython |
11333046 | <gh_stars>0
class Row:
def __init__(self, init_number_of_matches):
self.number_of_matches = init_number_of_matches
def remove_matches(self, number_to_remove):
if self.number_of_matches - number_to_remove >= 0:
self.number_of_matches = self.number_of_matches - number_to_remove
return True
else:
return False
# TODO - make it less ugly
def get_row_as_string(self):
row = ' '
for match in range(self.number_of_matches):
row += "| "
return row
| StarcoderdataPython |
3522498 | <filename>dictionary/merriam_webster_scraper.py<gh_stars>1-10
"""This module handles scraping dictionary entries and adding them to the db
This module handles visiting a website to look up a word's definition and then
navigating the HTML to extract the desired information to store in the database.
Main Functions:
scrape_word(word, search_synonym=False)
word: string of word to lookup
search_synonym: boolean indicating whether or not to look up the
synonyms listed for a word, if set false this information is stored in
a table for a later lookup
This function is the main function of the module, it handles looking up
a word and storing it in the databse. The function will check to make
sure that the word already isn't in the database. It returns True if
the word entered had a valid entry, returns False if the site didn't
match a word.
Example:
python3 manage.py shell
from dictionary import merriam_webster_scraper as mws
mws.scrape_word('bolster', True)
load_list_of_words(filename)
filename: name of file to lookup stored in dictionary/word_lists/
Scraped the definition of every word in the file. Each line should be a
word to lookup. The default search_synonym for this words is True,
meaning that the scraper will visit the entry pages for every
synonym and antonym listed on the page. Easy way to fill the database
with a lot of entries at once.
Example:
python3 manage.py shell
from dictionary import merriam_webster_scraper as mws
mws.load_list_of_words('top_gre_words.txt')
fill_in_synonyms()
This function will look at all of the synonyms for the base words whose
synonyms we have not added yet. Depending on how many words are in the
database, this function could take a while to complete. Fills in
incompete entries.
Example:
python3 manage.py shell
from dictionary import merriam_webster_scraper as mws
mws.fill_in_synonyms()
"""
from bs4 import BeautifulSoup
import requests
import time
import random
import os
import re
from dictionary import models
from django.db import transaction
from django.db.models import F
from django.db.utils import IntegrityError
@transaction.atomic()
def scrape_word(word, search_synonym=False):
"""Scrape entry for page and loads into database
Keyword arguments:
word -- word to add to database
search_synonym -- boolean to add all synonyms listed to database as well
Returns True if word found, False if not
"""
if _already_entered(word, search_synonym):
return True
url = 'https://www.merriam-webster.com/dictionary/' + word
try:
r = requests.get(url, timeout=10)
except requests.exceptions.Timeout:
time.sleep(5)
return scrape_word(word, search_synonym)
if r.status_code == 404:
return False
soup = BeautifulSoup(r.content, 'html5lib')
_manage_dictionary_entries(soup, word, search_synonym)
return True
def load_list_of_words(filename):
"""Loads list of words and adds to db if not already in"""
word_list_file = os.path.join('dictionary', 'word_lists', filename)
word_list = _load_word_list(word_list_file)
variant_word_set = models.VariantWord.objects.values_list('name', flat=True)
for word in word_list:
if word not in variant_word_set:
print(word)
scrape_word(word, search_synonym=True)
time.sleep(1)
def fill_in_synonyms():
"""Adds the synonyms for all basewords that haven't been added yet"""
qs = models.BaseWord.objects.filter(searched_synonym=False)
for word in qs:
print(word.name)
scrape_word(word.name, search_synonym=True)
time.sleep(2)
def _already_entered(word, search_synonym):
"""Checks to see if a word is already entered.
If a word has been entered, check if the synonyms have been searched. If
they haven't and search_synonym is true, then lookup all of the words
associated with the baseword in the SynonymsToLookUp table
"""
variant_word_set = models.VariantWord.objects.all().values_list('name',
flat=True)
if word in variant_word_set:
if search_synonym:
base_word_ = models.VariantWord.objects.get(name=word).base_word
if not base_word_.searched_synonym:
synonyms_to_lookup = base_word_.synonymstolookup_set.all()
for synonym in synonyms_to_lookup:
if synonym.is_synonym:
print(f'Looking up the synonym: {synonym.lookup_word}')
else:
print(f'Looking up the antonym: {synonym.lookup_word}')
valid_word = scrape_word(synonym.lookup_word)
synonym_word = synonym.lookup_word
if valid_word:
synonym_vw = models.VariantWord.objects \
.get(name=synonym_word)
if synonym.is_synonym:
_ = models.Synonym.objects \
.get_or_create(base_word=base_word_,
synonym=synonym_vw)
else:
_ = models.Antonym.objects \
.get_or_create(base_word=base_word_,
antonym=synonym_vw)
synonym.delete()
base_word_.searched_synonym = True
base_word_.save()
return True
else:
return False
def _manage_dictionary_entries(soup, word, search_synonym):
"""Searches soup for pertinent sections and sends to functions to handle"""
def_wrapper = soup.find('div', {'id': 'definition-wrapper'})
left_content = def_wrapper.find('div', {'id' : 'left-content'})
#If there's an entry, probably a more commonly spelled name to search
first_entry = left_content.find('div', {'id' : 'dictionary-entry-1'})
new_word = first_entry.find('a', {'class' : 'cxt', 'rel' : 'prev'})
if new_word is not None:
time.sleep(1)
new_word = new_word.getText().strip()
print(f'revising search from {word} to {new_word}')
return scrape_word(new_word, search_synonym)
variant_word_set = models.VariantWord.objects.all().values_list('name',
flat=True)
(word_name, base_word_) = _handle_main_dictionary_entry(left_content,
variant_word_set,
search_synonym)
if base_word_ is None:
return None
_compile_alternate_spellings(left_content, word_name, word, base_word_,
variant_word_set)
_add_synonyms(left_content, base_word_, search_synonym)
def _handle_main_dictionary_entry(left_content, variant_word_set,
search_synonym):
"""Searches for content containing the main aspects of a dictionary entry
Keyword argument:
left_content -- section of wepage containing the text of the dictionary
entries
variant_word_set -- list of all spellings of words currently in the
database
search_synonym -- whether or not we will search for synonyms for the word
Loops through the main sections of the webpage. Will create the base_word,
form_words, the definitions, pos, examples for a word
"""
entries = (left_content.find_all('div', {'class': 'entry-header'},
recursive=False))
i = 1
first_entry = entries[0]
remaining_entries = entries[1:]
(base_word_, word_name) = _add_base_and_form(first_entry,
i, left_content,
variant_word_set,
search_synonym)
#Loop through all definition sections, broken down by part of speech
for entry in remaining_entries:
i += 1
#We only use the return values for the first entry
if base_word_ is not None:
_ = _add_base_and_form(entry, i, left_content,
variant_word_set, search_synonym)
else:
(base_word_, _) = _add_base_and_form(entry, i, left_content,
variant_word_set,
search_synonym)
return (word_name, base_word_)
def _add_base_and_form(entry, i, left_content, variant_word_set,
search_synonym):
"""Function to add baseword and formword entries to db
Keyword arguments:
entry -- section of page that contains information on the word name and
part of speech
i -- used to identify which section the corresponding defintion and example
is located
left_content -- main section that contains all information on the entries
for words
variant_word_set -- list of all spellings of words currently in the
database
search_synonym -- whether or not we will search for synonyms for the word
Returns:
(base_word_, word_name)
base_word_ -- BaseWord object for the dictionary page
word_name -- The word_name as appears on the webpage (could be diff from
what gets searched)
"""
word_name = entry.find('div').find(['h1', 'p'], {'class' : 'hword'}) \
.getText().lower()
word_name = _clean_word_name(word_name)
if word_name is None:
return (None, None)
base_word_, _ = models.BaseWord.objects.get_or_create(name=word_name,
searched_synonym=search_synonym)
pos_ = _find_pos(entry)
#If there's no pos, probably not a valid dictionary entry
if pos_ is None:
return (None, word_name)
form_word_, _ = (models.FormWord.objects
.get_or_create(pos=pos_,
base_word=base_word_,))
_add_definition_and_examples(i, left_content, form_word_)
return (base_word_, word_name)
def _add_definition_and_examples(dictionary_entry_num, left_content,
form_word_):
"""Helper function to find the defintion & example sentence sections
Keyword arguments:
dictionary_entry_num -- Used to locate the correct HTML tag
left_content -- The part of the webpage that contains all pertinent info
form_word_ -- FormWord object to link definition to
Merriam webster does not keep all information for an entry in one parent
HTML tag. Instead, it puts information regarding the word name and part of
speech in one tag and then another tag for the defintions and example
sentence in the next tag. We use the dictionary_entry_num to locate the
associated definition entry with the correct word and pos.
Returns nothing, as we just create the entries unless they are already in
the database.
"""
def_entry_num = 'dictionary-entry-' + str(dictionary_entry_num)
def_entry = left_content.find('div', {'id' : def_entry_num})
definition_headers = def_entry.find_all('div', {'class' : 'vg'},
recursive=False)
for def_header in definition_headers:
definitions = def_header.find_all('span', {'class' : 'dtText'})
for definition in definitions:
#These are examples or quotes we don't need in the definition
extra_text = definition.find_all('span', {'class' : 'ex-sent'})
examples = definition.find_all('span', {'class' : 't'})
clean_defs = _clean_definition(definition, extra_text)
for clean_def in clean_defs:
word_def, _ = models.WordDefinition.objects \
.get_or_create(form_word=form_word_,
definition=clean_def)
for example in examples:
example_text = _clean_example_text(example.getText())
_, _ = models.ExampleSentence.objects \
.get_or_create(definition=word_def,
sentence=example_text)
def _find_pos(entry):
"""Helper function to find the pos on the site and return pos object
Keyword arguments:
entry -- the section of HTML that contains word_name, def, and pos
The part of speech can be found in different sections. Most of the time it
it stored in the 'import-blue-link' class within the entry. Otherwise, it
is in the 'fl' class. If it isn't in either of those, return a None. If it
is found, returns a PartOfSpeech object.
"""
try:
pos_text = _clean_pos_text(entry
.find('a', {'class' : 'important-blue-link'})
.getText())
except AttributeError:
try:
pos_text = _clean_pos_text(entry.find('span' , {'class' : 'fl'})
.getText())
except AttributeError:
return None
pos_, _ = models.PartOfSpeech.objects.get_or_create(name=pos_text)
return pos_
def _clean_example_text(example_text):
"""Returns just a sentence"""
p = re.compile('([A-z][A-z ,-\\\/()\']*)')
match = p.search(example_text)
if match is None:
raise (ValueError(f'Something wrong happened when extracting the part '
f'of speech. The extracted text is: {example_text}'))
return match.group()
def _clean_definition(definition, extra_text):
"""Clean a scraped definition"""
def_text = definition.getText().strip()
for text in extra_text:
extra = text.getText().strip()
def_text = def_text.replace(extra, '')
def_text = def_text.replace('archaic :', 'archaic --')
def_text = re.sub('\(see.*\)', '', def_text)
def_text = re.sub('sense [0-9][a-zA-Z]?', '', def_text)
def_text = re.sub('sense [a-zA-Z]?', '', def_text)
def_text = re.sub(' +', ' ', def_text)
split_defs = def_text.split(':')
p = re.compile('([a-zA-Z][a-zA-Z ,-\\\/()\']*)')
return [p.search(split_def).group().strip()
for split_def in split_defs
if p.search(split_def) is not None]
def _clean_pos_text(pos_text):
"""Limit to just the word"""
p = re.compile('([A-z ]*)')
match = p.search(pos_text)
if match.group() is None:
raise (ValueError(f'Something wrong happened when extracting the part '
f'of speech. The extracted text is: {pos_text}'))
else:
return match.group().strip()
def _clean_word_name(word):
"""Cleans the text for a word name, returns None if no match
Prevents us from adding entries that are just prefixes of suffixes, e.g.
-phobia.
"""
p = re.compile('(^[\w]+[\w-]*[\w]+)')
match = p.search(word)
if match is None:
#Make sure we aren't excluding one letter words
p = re.compile('(^[\w]$)')
match = p.search(word)
if match is None:
return None
else:
return match.group(0)
else:
return match.group(0)
def _compile_alternate_spellings(left_content, word_name, word, base_word_,
variant_word_set):
"""Search the page to add all the alternatative spellings of a word
Merriam webster sometimes stores this info in two parts, thus the adding
of the words in 'variants' section an dalso the 'alternate_forms' sections
"""
alternate_forms = left_content.find_all('span', {'class' : 'vg-ins'})
variants = left_content.find_all('a', {'class' : 'va-link'})
other_word_section = left_content.find('div', {'id' : 'other-words-anchor'})
if other_word_section:
other_words = other_word_section.find_all('div', {'class' : 'uro'})
else:
other_words = []
different_spellings = set()
different_spellings.add(word_name)
different_spellings.add(word)
for variant in variants:
different_spellings.add(variant.getText().strip())
for alternate_form in alternate_forms:
different_forms = alternate_form.find_all('span', {'class' : 'if'})
for different_form in different_forms:
different_spellings.add(different_form.getText().strip())
for other_word in other_words:
different_spellings.add(other_word.find('span', {'class' : 'ure'})
.getText().strip())
different_spellings = [spelling for spelling in different_spellings
if spelling not in variant_word_set]
for spelling in different_spellings:
_, _ = (models.VariantWord.objects.get_or_create(base_word=base_word_,
name=spelling))
def _add_synonyms(left_content, base_word_, search_synonym):
"""Adds synonyms to database
Keyword arguments:
left_content -- the portion of the merriam-webster webpage that stores the
pertinent information for building our entry
base_word_ -- BaseWord object associated with the word we are looking up
search_synonym -- tells us whether to lookup the synonyms or stow them in
the SynonymsToLookUp table
Adds synonyms listed on page, checks to see if words are in database,
if they are not, call scrape_word() to add them and then add to database.
The large issue with getting synonyms on Merriam-Webster is that sometimes
Merriam-Webster's entry for a word does not have the synonym/antonym section
that most entries do. However, there's another section that contains a list
of synonyms. _scrape_main_synonym_section() handles the default synonym
section, while _scrape_alternative_synonym_section() handles the alternative
synonym section. They return a list that contains a tuple that stores a list
of words to add to the dictionary and synonym table.
"""
try:
synonym_list = _scrape_main_synonym_section(left_content)
except AttributeError:
try:
synonym_list = _scrape_alternative_synonym_section(left_content)
except AttributeError:
return
if search_synonym:
_create_synonyms(left_content, base_word_, synonym_list)
else:
_create_synonym_lookups(left_content, base_word_, synonym_list)
def _scrape_main_synonym_section(left_content):
"""Scrapes the main/default synonym section for a word.
If there is no pos listed, use the one listed for the word
"""
synonym_header = left_content.find('div',
{'class' : 'synonyms_list'})
synonym_labels = synonym_header.find_all('p',
{'class' : 'function-label'})
synonym_lists = synonym_header.find_all('p', {'class' : None})
if len(synonym_labels) != len(synonym_lists):
raise ValueError('There are an uneven number of labels and lists')
synonym_list = []
for label, s_list in zip(synonym_labels, synonym_lists):
word_list = s_list.find_all('a')
word_list_text = [word for word in word_list]
pos_synonym_flag = label.getText().lower()
synonym_list.append((pos_synonym_flag, word_list_text))
return synonym_list
def _scrape_alternative_synonym_section(left_content):
"""Scrapes the alternative synonym listing"""
synonym_header = left_content.find('div',
{'class' : 'syns_discussion'})
synonym_lists = synonym_header.find_all('p', {'class' : 'syn'})
synonym_list = []
for s_list in synonym_lists:
word_list = s_list.find_all('a')
word_list_text = [word for word in word_list]
#Only will list synonyms, so just add synonym as flag
synonym_flag = 'synonyms: '
synonym_list.append((synonym_flag, word_list_text))
return synonym_list
def _create_synonyms(left_content, base_word_, synonym_list):
"""Creates synonyms for a word"""
p = re.compile('(^[\w\-]*)')
for (pos_synonym_flag, word_list) in synonym_list:
for word in word_list:
variant_word_set = models.VariantWord.objects.values_list('name',
flat=True)
word_text = _clean_word_name(word.getText().lower())
if word_text == base_word_.name:
continue
m = p.match(pos_synonym_flag)
synonym_flag = m.group(1)
if word_text not in variant_word_set:
synonym_variant_word = _handle_creating_synonyms(word_text,
variant_word_set, synonym_flag)
else:
synonym_variant_word = models.VariantWord.objects.all() \
.get(name=word_text)
if synonym_flag == 'synonyms':
_, _ = models.Synonym.objects \
.get_or_create(base_word=base_word_,
synonym=synonym_variant_word)
else:
_, _ = models.Antonym.objects \
.get_or_create(base_word=base_word_,
antonym=synonym_variant_word)
def _create_synonym_lookups(left_content, base_word_, synonym_list):
"""Stows away synonyms to lookup when we don't have to look them up now"""
p = re.compile('(^[\w\-]*)')
for (pos_synonym_flag, word_list) in synonym_list:
for word in word_list:
word_text = _clean_word_name(word.getText().lower())
if word_text == base_word_.name:
continue
m = p.match(pos_synonym_flag)
synonym_flag = m.group(1)
is_synonym = synonym_flag == 'synonyms'
_, _ = models.SynonymsToLookUp.objects \
.get_or_create(base_word=base_word_,
lookup_word=word_text,
is_synonym=is_synonym)
def _handle_creating_synonyms(word_text, variant_word_set, synonym_flag):
"""Adds synonym to db and returns the associated base word
Keyword arguments:
word_text -- the synonym/anonym listed to lookup
variant_word_set -- list of all different spellings of words in the db
Sometimes a word will be listed as a synonym that and has an entry page that
lists an alternative spelling that has its own page. If later on, a synonym
for a different word lists an alternative spelling of the word with its own
page, this can cause a failure to lookup a word successfully. For example,
if we look up the word 'capricious,' it lists 'settled' as an antonym.
'Settled' directs to the 'settle' entry that lists 'settling' as an
alternative form of the word. The word 'precipitate' lists 'settlings' as a
synonym. 'settlings' does not show up as an alternative form/spelling for
'settle.' Thus, we would look up 'settlings,' which goes to the 'settling'
page. When we try to add 'settling' to the database, there will be an error,
because 'settling' was already added to the variant word set. Thus, we try
to remove an 's' if the main spelling fails.
"""
if synonym_flag == 'synonyms':
msg = 'synonym'
else:
msg = 'antonym'
print(f'looking up the {msg}: {word_text}')
time.sleep(2)
try:
scrape_word(word_text)
except IntegrityError:
word_text = re.sub('s$', '', word_text)
if word_text not in variant_word_set:
scrape_word(word_text)
return models.VariantWord.objects.all().get(name=word_text)
def _load_word_list(filename):
"""Reads file of words into list"""
with open(filename, 'r') as f:
return [line.strip() for line in f]
| StarcoderdataPython |
8111250 | <reponame>phnomcobra/valarie-content<gh_stars>0
#!/usr/bin/python
FILE_HANDLE_TIME_OUT = 60
PROCESS_HANDLE_TIME_OUT = 60 * 60 * 8
import sys
import traceback
from subprocess import Popen, PIPE
from threading import Timer, Lock, Thread
from time import time
from Queue import Queue, Empty
from utils import sucky_uuid
ON_POSIX = 'posix' in sys.builtin_module_names
def enqueue_stdout(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def enqueue_stderr(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def process_sync(command, timeout = 10):
if type(command) == type([]):
shell = False
else:
shell = True
process = Popen(command, \
stdout = PIPE, \
stderr = PIPE, \
shell = shell)
kill_process = lambda p: p.kill()
timer = Timer(timeout, kill_process, [process])
try:
timer.start()
process_output_buffer, process_stderr_buffer = process.communicate()
finally:
timer.cancel()
return process.returncode, process_output_buffer, process_stderr_buffer
process_handles = {}
process_handles_lock = Lock()
def create_process_handle(command):
phduuid = sucky_uuid()
if type(command) == type([]):
shell = False
else:
shell = True
try:
process_handles_lock.acquire()
process_handles[phduuid] = {}
process_handles[phduuid]["contact"] = time()
process_handles[phduuid]["process"] = Popen(command, \
stdout = PIPE, \
stderr = PIPE, \
stdin = PIPE, \
bufsize = 1, \
shell = shell, \
close_fds = ON_POSIX)
process_handles[phduuid]["stdout queue"] = Queue()
process_handles[phduuid]["stdout thread"] = Thread(target = enqueue_stdout, \
args = (process_handles[phduuid]["process"].stdout, \
process_handles[phduuid]["stdout queue"]))
process_handles[phduuid]["stdout thread"].daemon = True
process_handles[phduuid]["stdout thread"].start()
process_handles[phduuid]["stderr queue"] = Queue()
process_handles[phduuid]["stderr thread"] = Thread(target = enqueue_stderr, \
args = (process_handles[phduuid]["process"].stderr, \
process_handles[phduuid]["stderr queue"]))
process_handles[phduuid]["stderr thread"].daemon = True
process_handles[phduuid]["stderr thread"].start()
Thread(target = process_handle_time_out_worker, args = (phduuid,)).start()
process_handles_lock.release()
except:
del process_handles[phduuid]
process_handles_lock.release()
raise Exception(traceback.format_exc())
return phduuid
def process_handle_status(phduuid):
process_handles[phduuid]["contact"] = time()
return process_handles[phduuid]["process"].poll()
def process_handle_kill(phduuid):
process_handles[phduuid]["contact"] = time()
process_handles[phduuid]["process"].kill()
def process_handle_terminate(phduuid):
process_handles[phduuid]["contact"] = time()
process_handles[phduuid]["process"].terminate()
def process_handle_wait(phduuid):
process_handles[phduuid]["contact"] = time()
process_handles[phduuid]["process"].wait()
def process_handle_send(phduuid, data):
process_handles[phduuid]["contact"] = time()
process_handles[phduuid]["process"].stdin.write(data)
process_handles[phduuid]["process"].stdin.flush()
def process_handle_recv(phduuid):
process_handles[phduuid]["contact"] = time()
stdout = bytearray()
while True:
try:
stdout.extend(process_handles[phduuid]["stdout queue"].get_nowait())
except Empty:
break
stderr = bytearray()
while True:
try:
stderr.extend(process_handles[phduuid]["stderr queue"].get_nowait())
except Empty:
break
return stdout, stderr
def close_process_handle(phduuid):
try:
process_handles_lock.acquire()
try:
process_handle_terminate(phduuid)
except:
pass
del process_handles[phduuid]
process_handles_lock.release()
except:
process_handles_lock.release()
raise Exception(traceback.format_exc())
def process_handle_time_out_worker(phduuid):
try:
if time() - process_handles[phduuid]["contact"] > PROCESS_HANDLE_TIME_OUT:
close_process_handle(phduuid)
else:
Timer(60, target = process_handle_time_out_worker, args = (phduuid,)).start()
except:
pass
file_handles = {}
file_handles_lock = Lock()
def file_handle_seek(fhduuid, position):
file_handles[fhduuid]["file"].seek(position)
file_handles[fhduuid]["contact"] = time()
def file_handle_truncate(fhduuid, num_bytes):
file_handles[fhduuid]["file"].truncate(num_bytes)
file_handles[fhduuid]["contact"] = time()
def file_handle_read(fhduuid, num_bytes = None):
file_handles[fhduuid]["contact"] = time()
if num_bytes == None:
return file_handles[fhduuid]["file"].read()
else:
return file_handles[fhduuid]["file"].read(num_bytes)
def file_handle_write(fhduuid, data):
file_handles[fhduuid]["file"].write(data)
file_handles[fhduuid]["contact"] = time()
def file_handle_tell(fhduuid):
file_handles[fhduuid]["contact"] = time()
return file_handles[fhduuid]["file"].tell()
def create_file_handle(filename, mode):
fhduuid = sucky_uuid()
try:
file_handles_lock.acquire()
file_handles[fhduuid] = {}
file_handles[fhduuid]["contact"] = time()
file_handles[fhduuid]["file"] = open(filename, mode)
Thread(target = file_handle_time_out_worker, args = (fhduuid,)).start()
file_handles_lock.release()
except:
del file_handles[fhduuid]
file_handles_lock.release()
raise Exception(traceback.format_exc())
return fhduuid
def close_file_handle(fhduuid):
try:
file_handles_lock.acquire()
try:
file_handles[fhduuid]["file"].close()
except:
pass
del file_handles[fhduuid]
file_handles_lock.release()
except Exception as e:
file_handles_lock.release()
raise Exception(traceback.format_exc())
def file_read(filename):
f = open(filename, "rb")
data = f.read()
f.close()
return data
def file_write(filename, data):
f = open(filename, "wb")
f.write(data)
f.close()
def file_handle_time_out_worker(fhduuid):
try:
if time() - file_handles[fhduuid]["contact"] > FILE_HANDLE_TIME_OUT:
close_file_handle(fhduuid)
else:
Timer(60, target = file_handle_time_out_worker, args = (fhduuid,)).start()
except:
pass | StarcoderdataPython |
3476420 | <reponame>mattwigway/sqmake<filename>sqmake/makefile.py
# Copyright 2020 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Represents an SQMake Makefile
"""
import yaml
import os.path
import sqlalchemy as sq
from logging import getLogger
from .circular_dependency_error import CircularDependencyError
from .task import Task, TaskFailedError
from .util import withdir
LOG = getLogger(__name__)
class Makefile(object):
def __init__ (self, echo=False):
self.tasks = {}
self.echo = echo
def _add_task (self, task_name, task):
if task_name in self.tasks:
raise KeyError(f'task {task_name} already exists!')
if '/' in task_name:
raise KeyError(f'Task name {task_name} includes reserved character /')
self.tasks[task_name] = task
def _include (self, namespace, makefile):
'Include another makefile in this one'
for task_name, task in makefile.tasks.items():
# resolve internal references
new_deps = []
for dep in task.depends_on:
if dep in makefile.tasks:
new_deps.append(f'{namespace}/{dep}')
else:
# fully qualified
new_deps.append(dep)
task.depends_on = new_deps
# don't call _add_task because it checks for slashes
self.tasks[f'{namespace}/{task_name}'] = task
def run (self, task_name, engine=None):
if engine is None:
engine = sq.create_engine(self.db, echo=self.echo)
task = self.tasks[task_name]
# check if task needs to be run
if task.metatask() or not task.exists(engine, self.schema):
LOG.info(f'running task {task_name}')
LOG.info('checking dependencies')
for dependency in task.depends_on:
self.run(dependency, engine=engine)
task.run(engine, self.db, self.schema)
if not task.metatask() and not task.exists(engine, self.schema):
raise TaskFailedError(f'task {task_name} did not produce required outputs')
else:
LOG.info(f'{task_name} already complete, skipping')
@staticmethod
def from_yaml (filename, echo=False):
with open(filename) as inf:
parsed = yaml.safe_load(inf)
makefile = Makefile(echo=echo)
makefile.db = parsed['db'] if 'db' in parsed else None
makefile.schema = parsed['schema'] if 'schema' in parsed else None
if 'tasks' in parsed:
for task in parsed['tasks']:
makefile._add_task(task['name'], Task.from_yaml(task))
dirname = os.path.dirname(filename)
if 'includes' in parsed:
for include in parsed['includes']:
subfilename = os.path.join(dirname, include['file'])
subdirname = os.path.dirname(subfilename)
subfileonly = os.path.basename(subfilename)
with withdir(subdirname):
submakefile = makefile.from_yaml(subfileonly)
makefile._include(include['name'], submakefile)
return makefile
| StarcoderdataPython |
9684946 | <reponame>MoZeWei/Data_Mining_Algorithm
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 4 15:18:19 2018
@author: justintimberlake
"""
import numpy as np
'''
class SVM:
def __init__(self):
pass
def GetTrainData(self,data_x,data_y):
pass
def Train(self,data_x,data_y):
pass
def Predict(self,data_x):
pass
def SMO(self):
pass
'''
def SimpleSMO(data,labels,C,toler,maxIter): #toler是容错率 maxIter是最大迭代次数
DataMat = np.mat(data)
labelMat = np.at(labels).T
b = 0
m,n = np.shape(DataMat)
alphas = np.mat(np.zeros(m,1))
iter = 0
while (iter < maxIter):
alphaPairsChanged = 0
for i in range(m):
fXi = np.float(np.multiply(alphas,labelMat).T * (DataMat * DataMat[i,:].T)) + b #multiply表示对应位置的元素相乘,fXi是分类的结果
Ei = fXi - float(labelMat[i])
if ((labelMat[i] * Ei < -toler) and (alphas[i] < C)) or ((labelMat[i] * Ei > toler) and (alphas[i] > 0)):
j = selectJrand(i,m)
fXj = float(np.multiply(alphas,labelMat).T *(DataMat * DataMat[j,:].T)) + b
Ej = fXj - float(labelMat[j])
alphaIold = alphas[i].copy()
alphaJold = alphas[j].copy()
if(labelMat[i] != labelMat[j]):
L = max(0,alphas[j] - alphas[i])
H = min(C,C + alphas[j] - alphas[i])
else:
L = max(0,alphas[j] + alphas[i] - C)
H = min(C,alphas[i] + alphas[j])
if j == H :
print('L == H')
continue
eta = 2.0 * DataMat[i,:] * DataMat[j,:].T - DataMat[i,:] * DataMat[i,:].T - DataMat[j,:] * DataMat[j,:].T
if eta >= 0:
print('eta >= 0')
continue
alphas[j] -= labelMat[j] * (Ei - Ej) / eta
alphas[j] = clipAlpha(alphas[j],H,L)
if(np.abs(alphas[j] - alphaJold) < 0.00001):
print('j is not moving enough')
continue
alphas[i] += labelMat[j] * labelMat[i] * (alphaJold - alphas[j])
b1 = b - Ei - labelMat[i] * (alphas[i] - alphaIold) * DataMat[i,:] * DataMat[i,:].T - labelMat[j] * (alphas[j] - alphaJold) * DataMat[i,:] * DataMat[j,:].T
b2 = b - Ej - labelMat[i] * (alphas[i] - alphaIold) * DataMat[i,:] * DataMat[j,:].T - labelMat[j] * (alphas[j] - alphaJold) * DataMat[j,:] * DataMat[j,:].T
if(0 < alphas[i]) and (C > alphas[i]):
b = b1
elif(0 < alphas[j]) and (C>alphas[j]):
b = b2
else:
b = (b1 + b2)/2.0
alphaPairsChanged += 1
print('iter: %d i :%d ,pairs changed %d' % (iter,i,alphaPairsChanged))
if(alphaPairsChanged == 0):
iter += 1
else:
iter = 0
print('iteration number:%d' %iter)
return b,alphas
def loadDataSet(file):
dataMat = []
labelMat = []
fr = open(file)
for line in fr.readlines():
lineArr = line.strip().split('\t')
dataMat.append([np.float(lineArr[0]),np.float(lineArr[1])])
labelMat.append(np.float(lineArr[2]))
fr.close()
return dataMat,labelMat
def selectJrand(i,m):
j = i
while (j == i):
j = int(np.random.uniform(0,m))
return j
def clipAlpha(aj,H,L):
if aj > H:
aj = H
if aj < L:
aj = L
return aj
#Platt SMO
class optStruct:
def __init__(self,DataMatIn,classlabels,C,toler):
self.X = DataMatIn #数据X
self.labelMat = classlabels
self.C = C
self.tol = toler
self.m = np.shape(DataMatIn)[0]
self.alphas = np.mat(np.zeros((self.m,1))) #数据实例个数的alpha
self.b = 0
self.eCache = np.mat(np.zeros((self.m,2)))
def CalcEk(oS,k): #oS是optStruct的结构体实例
fxk = np.float(np.multiply(oS.alpha,oS.labelMat).T * (oS.X * oS.X[k,:].T)) + oS.b
Ek = fxk - np.float(oS.labelMat[k])
return Ek
def selectJ(i,oS,Ei):
maxK = -1
maxDeltaE = 0
Ej = 0
oS.eCache[i] = [1,Ei]
validEcacheList = np.nonzero(oS.eCache[:,0].A)[0] #mat.A变成数组
if(len(validEcacheList) > 1):
for k in validEcacheList:
if k == i:
continue
Ek = CalcEk(oS,k)
deltaE = abs(Ei - Ek)
if(deltaE > maxDeltaE):
maxK = k
maxDeltaE = deltaE
Ej = Ek
return maxK,Ej
else:
j= selectJrand(i,oS.m)
Ej = CalcEk(oS,j)
return j,Ej
def updateEk(oS,k):
Ek = CalcEk(oS,k)
oS.eCache[k] = [1,Ek]
def innerL(i,oS): #第二个alpha的循环(内循环)
Ei = CalcEk(oS,i)
if((oS.labelMat[i] * Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or ((oS.labelMat[i] * Ei > oS.tol) and (oS.alphas[i] > 0)):
j,Ej = selectJ(i,oS,Ei)
alphaIold = oS.alphas[i].copy()
alphaJold = oS.alphas[j].copy()
if(oS.labelMat[i] != oS.labelMat[j]):
L = max(0,oS.alphas[j] - oS.alphas[i])
H = min(oS.C,oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0,oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
if L == H:
print('L == H')
return 0
eta = 2.0 * oS.X[i,:] * oS.X[j,:].T - oS.X[i,:] * oS.X[i,:].T - oS.X[j,:] * oS.X[j,:].T
if eta >= 0:
print('eta >= 0')
return 0
oS.alphas[j] -= oS.labelMat[j] * (Ei - Ej)/eta
oS.alphas[j] = clipAlpha(oS.alphas[j],H,L)
updateEk(oS,j)
if(abs(oS.alphas[j] - alphaJold) < 0.00001):
print('J is moving enough')
return 0
oS.alphas[i] += oS.labelMat[j] * oS.labelMat[i] * (alphaJold - oS.alphas[j])
updateEk(oS,j)
b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.X[i,:] * oS.X[i,:].T - oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.X[i,:] * oS.X[j,:].T
b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.X[i,:] * oS.X[i,:].T - oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.X[j,:] * oS.X[j,:].T
if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]):
oS.b = b1
elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]):
oS.b = b2
else:
oS.b = (b1 + b2)/2.0
return 1
else:
return 0
'''
部分解释:
关于if条件判断的理解:if ((oS.labelMat[i] * Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or ((oS.labelMat[i] * Ei > oS.tol) and (oS.alphas[i] > 0))
首先,oS.tol是一个很小的小数,称作容错率,其存在是为了防止错误,我觉得类似于软间隔的e(松弛变量)
其次,完全可以将这个oS.tol换为0,我么就以换为0之后的条件来分析这个式子:
oS.labelMat[i] * Ei < 0 and (oS.alphas[i] < oS.C
拆开左边的式子:Yi*(fxi-Yi)=Yi*fxi-1<0, 从而有Yi*fxi<1。此时根据KKT条件,我们应该取alpha_i = C,但是右边显示alpha_i < C,所以违背了KKT条件
拆开右边的式子:Yi*(fxi-Yi)=Yi*fxi-1>0, 从而有Yi*fxi>1。此时根据KKT条件,我们应该取alpha_i = 0,但是右边显示alpha_i > C,所以违背了KKT条件
因此,此判断式是找出了违背了KKT条件的alpha
还有人问,为什么KKT条件有三个,此处只判断了两个?其实,此式确实判断了三个条件,只是合在了一起,下面是解释:
注意,oS.alphas[i] < oS.C包含了0<alpha_i<C和alpha_i=0两个条件(同理另一个也包含了alpha_i=C的情况),
所以alpha=0和alpha=C这两个KKT条件,被分别放在两个式子中判断了,0<alpha<C也被分成了两部分,这样三个条件就都有了判断
关于 L与H
https://www.cnblogs.com/pinard/p/6111471.html 评论33楼
'''
def SmoP(DataMatIn, classLabels, C, toler, maxIter, kTup = ('lin',0)):
oS = optStruct(np.mat(DataMatIn), np.mat(classLabels).transpose(),C,toler)
iter_num = 0
entireSet = True
alphaPairsChanged = 0
while(iter_num < maxIter) and ((alphaPairsChanged > 0) or (entireSet)):
alphaPairsChanged = 0
if entireSet:
for i in range(oS.m):
alphaPairsChanged += innerL(i,oS)
print("Fullset, iter_num: %s i:%s, pairs changed %s" %(iter_num,i,alphaPairsChanged))
iter_num += 1
else:
nonBoundIs = np.nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]
for i in nonBoundIs:
alphaPairsChanged += innerL(i,oS)
print("non_bound iter: %d i:%d, pairs changed %d" %(iter_num,i,alphaPairsChanged))
iter += 1
if entireSet:
entireSet = False
elif(alphaPairsChanged == 0):
entireSet = True
print("iteration number: %d" %iter)
return oS.b, oS.alphas
def clacWs(alphas,dataArr,classLabels): #计算超平面参数w
X = np.mat(dataArr)
labelMat = np.mat(classLabels).transpose()
m,n = np.shape(X)
w = np.zeros((n,1))
for i in range(m):
w += np.multiply(alphas[i] * labelMat[i], X[i,:].T)
return w
def Classify_Data(dataArr,w_vector,b):
dataMat = np.mat(dataArr)
Result = []
for i in range(np.shape(dataMat)[0]):
r = (dataMat[i] * np.mat(w_vector) + b).A[0][0]
if r > 0:
r = 1
else:
r = -1
Result.append(r)
return Result
dataArr,labelArr = loadDataSet('train.txt')
b,alphas = SmoP(dataArr,labelArr,0.6,0.001,40)
w_vector = clacWs(alphas,dataArr,labelArr)
result = Classify_Data(dataArr,w_vector,b)
#添加核函数 radial basis function RBF的高斯版本
class New_optStruct:
def __init__(self,DataMatIn,classlabels,C,toler,kTup):
self.X = DataMatIn #数据X
self.labelMat = classlabels
self.C = C
self.tol = toler
self.m = np.shape(DataMatIn)[0]
self.alphas = np.mat(np.zeros((self.m,1))) #数据实例个数的alpha
self.b = 0
self.eCache = np.mat(np.zeros((self.m,2)))
self.K = np.mat(np.zeros((self.m,self.m)))
for i in range(self.m):
self.K[:,i] = KernelTrans(self.X, self.X[i,:], kTup)
def KernelTrans(X,A,kTup):
m,n = np.shape(X)
K = np.mat(np.zeros((m,1)))
if kTup[0] == 'lin':
K = X * A.T
elif kTup[0] =='rbf':
for j in range(m):
deltaRow = X[j,:] - A
K[j] = deltaRow * deltaRow.T
K = np.exp(K/(-1 * kTup[1] ** 2)) #kTup[1]是用户定义的到达率(函数值变为0的跌落速度)
else:
print("Kernel Name Error")
return 0
return K
def innerL_Kernel(i,oS): #第二个alpha的循环(内循环)
Ei = CalcEk(oS,i)
if((oS.labelMat[i] * Ei < -oS.tol) and (oS.alphas[i] < oS.C)) or ((oS.labelMat[i] * Ei > oS.tol) and (oS.alphas[i] > 0)):
j,Ej = selectJ(i,oS,Ei)
alphaIold = oS.alphas[i].copy()
alphaJold = oS.alphas[j].copy()
if(oS.labelMat[i] != oS.labelMat[j]):
L = max(0,oS.alphas[j] - oS.alphas[i])
H = min(oS.C,oS.C + oS.alphas[j] - oS.alphas[i])
else:
L = max(0,oS.alphas[j] + oS.alphas[i] - oS.C)
H = min(oS.C, oS.alphas[j] + oS.alphas[i])
if L == H:
print('L == H')
return 0
eta = 2.0 * oS.K[i,j] - oS.K[i,i] - oS.K[j,j]
if eta >= 0:
print('eta >= 0')
return 0
oS.alphas[j] -= oS.labelMat[j] * (Ei - Ej)/eta
oS.alphas[j] = clipAlpha(oS.alphas[j],H,L)
updateEk(oS,j)
if(abs(oS.alphas[j] - alphaJold) < 0.00001):
print('J is moving enough')
return 0
oS.alphas[i] += oS.labelMat[j] * oS.labelMat[i] * (alphaJold - oS.alphas[j])
updateEk(oS,j)
b1 = oS.b - Ei - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[i,j] - oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.K[i,j]
b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[i,j] - oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.K[i,j]
if (0 < oS.alphas[i]) and (oS.C > oS.alphas[i]):
oS.b = b1
elif (0 < oS.alphas[j]) and (oS.C > oS.alphas[j]):
oS.b = b2
else:
oS.b = (b1 + b2)/2.0
return 1
else:
return 0
def CalcEk_Kernel(oS,k): #oS是optStruct的结构体实例
fxk = np.float(np.multiply(oS.alpha,oS.labelMat).T * oS.K[:,k] + oS.b)
Ek = fxk - np.float(oS.labelMat[k])
return Ek
def RBF_SVM(k1 = 1.3): #k1(到达率)越小,需要的支持向量数越多 支持向量过多或过少都不好
dataArr,labelArr = loadDataSet('train.txt')
print('Input the k1 of RBF:(default value is 1.3)')
k1 = input()
b,alphas = smoP(dataArr,labelArr,200,0.0001,10000,('rbf',k1))
dataMat = np.mat(dataArr)
labelMat = np.mat(labelArr).transpose()
svInd = np.nonzero(alphas.A > 0)[0] #支持向量索引
sVs = dataMat[svInd]
labelSV = labelMat[svInd]
print("there are %d support vectors" %(np.shape(sVs)[0]))
m,n = np.shape(dataMat)
errorCount = 0
for i in range(m):
kernelEval = kernelTrans(sVs,dataMat[i,:],('rbf',k1))
predict = kernelEval.T * np.multiply(labelSV,alphas[svInd]) + b
if np.sign(predict) != np.sign(labelArr[i]):
errorCount += 1
print('The Train Error Rate is %d' %(errorCount * 1.0 / m))
dataArr,labelArr = loadDataSet('test.txt')
errorCount = 0
dataMat = np.mat(dataArr)
labelMat = np.mat(labelArr).transpose()
m,n = np.shape(dataMat)
for i in range(m):
kernelEval = kernelTrans(sVs,dataMat[i,:],('rbf',k1))
predict = kernelEval.T * np.multiply(labelSV,alphas[svInd]) + b
if sign(predict) != sign(labelArr[i]):
errorCount += 1
print('the test error rate is %d' %(errorCount * 1.0 / m))
| StarcoderdataPython |
47187 | from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Sequence, Type, TypeVar
from eth._utils.datatypes import Configurable
from eth.constants import ZERO_HASH32
from eth_typing import BLSSignature, Hash32
from eth_utils import humanize_hash
from ssz.hashable_container import HashableContainer, SignedHashableContainer
from ssz.sedes import List, bytes32, bytes96, uint64
from eth2.beacon.constants import (
EMPTY_SIGNATURE,
GENESIS_PARENT_ROOT,
ZERO_SIGNING_ROOT,
)
from eth2.beacon.typing import FromBlockParams, SigningRoot, Slot
from .attestations import Attestation
from .attester_slashings import AttesterSlashing
from .block_headers import BeaconBlockHeader
from .defaults import default_slot, default_tuple
from .deposits import Deposit
from .eth1_data import Eth1Data, default_eth1_data
from .proposer_slashings import ProposerSlashing
from .voluntary_exits import VoluntaryExit
if TYPE_CHECKING:
from eth2.beacon.db.chain import BaseBeaconChainDB # noqa: F401
TBeaconBlockBody = TypeVar("TBeaconBlockBody", bound="BeaconBlockBody")
class BeaconBlockBody(HashableContainer):
fields = [
("randao_reveal", bytes96),
("eth1_data", Eth1Data),
("graffiti", bytes32),
("proposer_slashings", List(ProposerSlashing, 16)),
("attester_slashings", List(AttesterSlashing, 1)),
("attestations", List(Attestation, 128)),
("deposits", List(Deposit, 16)),
("voluntary_exits", List(VoluntaryExit, 16)),
]
@classmethod
def create(
cls: Type[TBeaconBlockBody],
*,
randao_reveal: bytes96 = EMPTY_SIGNATURE,
eth1_data: Eth1Data = default_eth1_data,
graffiti: Hash32 = ZERO_HASH32,
proposer_slashings: Sequence[ProposerSlashing] = default_tuple,
attester_slashings: Sequence[AttesterSlashing] = default_tuple,
attestations: Sequence[Attestation] = default_tuple,
deposits: Sequence[Deposit] = default_tuple,
voluntary_exits: Sequence[VoluntaryExit] = default_tuple,
) -> TBeaconBlockBody:
return super().create(
randao_reveal=randao_reveal,
eth1_data=eth1_data,
graffiti=graffiti,
proposer_slashings=proposer_slashings,
attester_slashings=attester_slashings,
attestations=attestations,
deposits=deposits,
voluntary_exits=voluntary_exits,
)
@property
def is_empty(self) -> bool:
return self == BeaconBlockBody.create()
def __str__(self) -> str:
return (
f"randao_reveal={humanize_hash(self.randao_reveal)},"
f" graffiti={humanize_hash(self.graffiti)},"
f" proposer_slashings={self.proposer_slashings},"
f" attester_slashings={self.attester_slashings},"
f" attestations={self.attestations},"
f" deposits={self.deposits},"
f" voluntary_exits={self.voluntary_exits},"
)
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {str(self)}>"
default_beacon_block_body = BeaconBlockBody.create()
TBaseBeaconBlock = TypeVar("TBaseBeaconBlock", bound="BaseBeaconBlock")
class BaseBeaconBlock(SignedHashableContainer, Configurable, ABC):
fields = [
("slot", uint64),
("parent_root", bytes32),
("state_root", bytes32),
("body", BeaconBlockBody),
("signature", bytes96),
]
@classmethod
def create(
cls: Type[TBaseBeaconBlock],
*,
slot: Slot = default_slot,
parent_root: SigningRoot = ZERO_SIGNING_ROOT,
state_root: Hash32 = ZERO_HASH32,
body: BeaconBlockBody = default_beacon_block_body,
signature: BLSSignature = EMPTY_SIGNATURE,
) -> TBaseBeaconBlock:
return super().create(
slot=slot,
parent_root=parent_root,
state_root=state_root,
body=body,
signature=signature,
)
def __str__(self) -> str:
return (
f"[signing_root]={humanize_hash(self.signing_root)},"
f" [hash_tree_root]={humanize_hash(self.hash_tree_root)},"
f" slot={self.slot},"
f" parent_root={humanize_hash(self.parent_root)},"
f" state_root={humanize_hash(self.state_root)},"
f" body=({self.body}),"
f" signature={humanize_hash(self.signature)}"
)
@property
def is_genesis(self) -> bool:
return self.parent_root == GENESIS_PARENT_ROOT
@property
def header(self) -> BeaconBlockHeader:
return BeaconBlockHeader.create(
slot=self.slot,
parent_root=self.parent_root,
state_root=self.state_root,
body_root=self.body.hash_tree_root,
signature=self.signature,
)
@classmethod
@abstractmethod
def from_root(
cls, root: SigningRoot, chaindb: "BaseBeaconChainDB"
) -> "BaseBeaconBlock":
"""
Return the block denoted by the given block root.
"""
...
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {str(self)}>"
TBeaconBlock = TypeVar("TBeaconBlock", bound="BeaconBlock")
class BeaconBlock(BaseBeaconBlock):
block_body_class = BeaconBlockBody
@classmethod
def from_root(
cls, root: SigningRoot, chaindb: "BaseBeaconChainDB"
) -> "BeaconBlock":
"""
Return the block denoted by the given block ``root``.
"""
block = chaindb.get_block_by_root(root, cls)
body = cls.block_body_class.create(
randao_reveal=block.body.randao_reveal,
eth1_data=block.body.eth1_data,
graffiti=block.body.graffiti,
proposer_slashings=block.body.proposer_slashings,
attester_slashings=block.body.attester_slashings,
attestations=block.body.attestations,
deposits=block.body.deposits,
voluntary_exits=block.body.voluntary_exits,
)
return cls.create(
slot=block.slot,
parent_root=block.parent_root,
state_root=block.state_root,
body=body,
signature=block.signature,
)
@classmethod
def from_parent(
cls: Type[TBaseBeaconBlock],
parent_block: "BaseBeaconBlock",
block_params: FromBlockParams,
) -> TBaseBeaconBlock:
"""
Initialize a new block with the ``parent_block`` as the block's
previous block root.
"""
if block_params.slot is None:
slot = parent_block.slot + 1
else:
slot = block_params.slot
return cls.create(
slot=slot,
parent_root=parent_block.signing_root,
state_root=parent_block.state_root,
body=cls.block_body_class.create(),
)
@classmethod
def convert_block(
cls: Type[TBaseBeaconBlock], block: "BaseBeaconBlock"
) -> TBaseBeaconBlock:
return cls.create(
slot=block.slot,
parent_root=block.parent_root,
state_root=block.state_root,
body=block.body,
signature=block.signature,
)
@classmethod
def from_header(
cls: Type[TBaseBeaconBlock], header: BeaconBlockHeader
) -> TBeaconBlock:
return cls.create(
slot=header.slot,
parent_root=header.parent_root,
state_root=header.state_root,
signature=header.signature,
body=BeaconBlockBody(),
)
| StarcoderdataPython |
9648252 | <reponame>DebVortex/suggestion_bot
import sys
import os
import re
from discord.ext.commands import Bot
from database.models import Suggestion
SUMMARY_REGEX = r'.*\[(?P<summary>.*)\].*'
from .utils import get_votes, UPVOTE, DOWNVOTE
class SuggestionBot(Bot):
def __init__(self, logger, *args, **kwargs):
logger.debug('Start SuggestionBot.__init__')
super().__init__(*args, **kwargs)
self.logger = logger
self.logger.debug('Loading DELETION_MESSAGE template.')
message_template_path = os.getenv('DELETION_MESSAGE')
if not message_template_path:
self.logger.warning('No DELETION_MESSAGE defined, falling back to default.')
message_template_path = 'deletion_message.txt'
try:
with open(message_template_path, 'r') as message_template_file:
self.message_template = message_template_file.read()
self.logger.info('Loaded DELETION_MESSAGE template.')
except Exception as e:
self.logger.critical(f'Could not load message_template {message_template_path}')
self.logger.critical(f'Got following error: {e}')
self.logger.critical(f'Exiting.')
sys.exit(1)
self.logger.debug('Loading SUMMARY_MAX_LENGTH.')
max_length = os.getenv('SUMMARY_MAX_LENGTH')
if not max_length:
self.logger.warning('No SUMMARY_MAX_LENGTH defined, falling back to default.')
max_length = 200
self.logger.info(f'Setting summary max length to {max_length}')
self.max_length = max_length
self.logger.debug('Loading WATCH_CHANNELS.')
channels = os.getenv('WATCH_CHANNELS')
if not channels:
self.logger.warning('No WATCH_CHANNELS defined, falling back to default.')
channels = 'suggestion'
self.logger.info(f'Setting channels to watch to {channels}')
self.channels = channels.split(';')
self.logger.debug('Loading SUMMARY_CHANNEL')
self.SUMMARY_CHANNEL = os.getenv('SUMMARY_CHANNEL')
self.logger.info(f"Set SUMMARY_CHANNEL to '{self.SUMMARY_CHANNEL}'")
self.logger.debug('Compiling RegEx')
self.check = re.compile(SUMMARY_REGEX)
self.logger.info('Setup of RegEx complete.')
self.logger.debug('Finished SuggestionBot.__init__')
def get_decline_reason(self, match):
if not match:
return 'Incorrect message format.'
summary = match.group('summary')
if len(summary) > self.max_length:
return 'Summary is to long.'
async def decline_message(self, message, decline_reason):
await message.delete()
dm_channel = await message.author.create_dm()
await dm_channel.send(
self.message_template.format(
channel=message.channel,
max_length=self.max_length,
orig_message=message.content,
reason=decline_reason
)
)
self.logger.info(f'Message from {message.author} in {message.channel}: {message.content}')
async def on_raw_reaction_add(self, payload):
await self.handle_reaction_change(payload)
async def on_raw_reaction_remove(self, payload):
await self.handle_reaction_change(payload)
async def on_raw_reaction_clear(self, payload):
await self.handle_reaction_change(payload)
async def handle_reaction_change(self, payload):
self.logger.info(f'Got reaction change for message {payload.message_id}')
try:
suggestion = Suggestion.get(Suggestion.discord_id == payload.message_id)
channel = await self.fetch_channel(payload.channel_id)
message = await channel.fetch_message(payload.message_id)
up_votes = get_votes(message.reactions, UPVOTE)
down_votes = get_votes(message.reactions, DOWNVOTE)
if suggestion.up_votes == up_votes and suggestion.down_votes == down_votes:
self.logger.info(f'No change in votes for message {payload.message_id}')
return
suggestion.set_votes(up_votes, down_votes)
self.logger.info(f'Updated votes for message {payload.message_id}: {up_votes}x{UPVOTE} and {down_votes}x{DOWNVOTE}')
except Suggestion.DoesNotExist:
self.logger.info(f'Message {payload.message_id} not in database.')
def add_command(self, command):
if command.name != 'help':
self.logger.info(f'Loading command: {command.name}')
super().add_command(command)
def accept_message(self, message, match):
if not Suggestion.filter(discord_id=message.id).count():
summary = match.group('summary')
Suggestion.create(
discord_id=message.id,
channel_id=message.channel.id,
guild_id=message.guild.id,
summary=summary
)
self.logger.info(f'Saved message with ID {message.id} in database.')
return
self.logger.info(f'Message with ID {message.id} already in database.')
async def on_ready(self):
self.logger.info(f'Logged on as {self.user}!')
async def on_message(self, message):
if self.user == message.author:
# Do not react to messages of the bot
return
if message.channel.name == self.SUMMARY_CHANNEL:
await super().on_message(message)
return
self.logger.info(f'Got message from {message.author} in {message.channel}')
if message.channel.name not in self.channels:
self.logger.info('Ignoring message, as its not in a channel to watch.')
return
match = self.check.match(message.content)
decline_reason = self.get_decline_reason(match)
if not decline_reason or message.type == 18:
self.logger.info(f'Message from {message.author} accepted.')
self.accept_message(message, match)
return
await self.decline_message(message, decline_reason)
| StarcoderdataPython |
12849427 | from django.conf.urls import patterns, url
from quotes import views
from views import *
urlpatterns = patterns('',
url(r'^sdf$', index, name='index'),
url(r'^$', GameView.as_view(), name='game'),
url(r'^post$', AnswerView.as_view(), name='answer'),
url(r'^edit$', QuoteUpdate.as_view(), name='update'),
)
| StarcoderdataPython |
5134030 | <filename>test.py
### Source code / logs
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
sys.path.append("..")
from object_detection.utils import ops as utils_ops
if StrictVersion(tf.__version__) < StrictVersion('1.12.0'):
raise ImportError('Please upgrade your TensorFlow installation to v1.12.*.')
import tensorflow.compat.v1 as tf
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# # Model preparation
# ## Variables
#
# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_FROZEN_GRAPH` to point to a new .pb file.
# What model to download.
MODEL_NAME='output/t9'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '//frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS=os.path.join('F://Trashnet//Output', '//label_map.pbtxt')
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
#category_index = {1: {'id': 1, 'name': 'plastic'}}
category_index = label_map_util.create_category_index_from_labelmap('F:/Trashnet/Output/label_map.pbtxt', use_display_name=True)
# ## Helper code
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# # Detection
PATH_TO_TEST_IMAGES_DIR = 'F://Plastic//val'
#TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, '1.jpg') for i in range(3, 4) ]
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, '{0}.jpg'.format(i)) for i in range(1, 25) ]
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
def run_inference_for_single_image(image, graph):
with graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in [
'num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks'
]:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image.shape[1], image.shape[2])
detection_masks_reframed = tf.cast(
tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(
detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict,
feed_dict={image_tensor: image})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
for image_path in TEST_IMAGE_PATHS:
image = Image.open(image_path)
print(image_path)
# the array based representation of the image will be used later in order to prepare the
# result image with boxes and labels on it.
image_np = load_image_into_numpy_array(image)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
output_dict = run_inference_for_single_image(image_np_expanded, detection_graph)
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
instance_masks=output_dict.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=IMAGE_SIZE)
plt.imshow(image_np)
img_folder = MODEL_NAME+'/image'
if not os.path.exists(img_folder):
os.makedirs(img_folder)
plt.savefig(img_folder + '/' + image_path.split('\\')[1])
#plt.show()
#requires matplotlib==3.0.1 | StarcoderdataPython |
134539 | from time import sleep
from pycrunch_trace.client.api import trace
def alternative_ways_to_trace():
sleep(0.25)
print('You can use Trace object to manually start and stop tracing')
print(' Or by applying @trace decorator to the method')
print(' See examples bellow')
def example_without_decorators():
from pycrunch_trace.client.api import Trace
tracer = Trace()
tracer.start('recording_name')
code_under_trace()
another_code_to_trace()
tracer.stop()
@trace
def example_with_decorator():
# Recording will be named the same as the method name
pass
@trace('this_is_custom_name')
def example_with_custom_name():
pass | StarcoderdataPython |
219319 | <filename>send_non_tls.py<gh_stars>0
import pika
import ssl
credentials = pika.PlainCredentials('test', 'test')
parameters = pika.ConnectionParameters(
host='localhost',
port=5672,
virtual_host='/',
credentials=credentials
)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue='hello')
channel.basic_publish(exchange='',
routing_key='hello',
body='Hello World!')
print(" [x] Sent 'Hello World!'")
connection.close()
| StarcoderdataPython |
1901614 | <reponame>ska-sa/katsdpscripts<filename>rts_deploy/fabfile.py<gh_stars>0
import rts_dc
import rts_imager
import rts_timeplot
| StarcoderdataPython |
5020126 | #!/usr/bin/env python
"""Tests for the hunt database api."""
import re
from absl.testing import absltest
from grr_response_server.databases import db_test_utils
class TestOffsetAndCountTest(db_test_utils.QueryTestHelpersMixin,
absltest.TestCase):
def testDoesNotRaiseWhenWorksAsExpected(self):
items = range(10)
self.DoOffsetAndCountTest(
lambda: items,
lambda offset, count: items[offset:offset + count],
error_desc="foo")
def testRaisesWhenDoesNotWorkAsExpected(self):
items = range(10)
def FetchRangeFn(offset, count):
# Deliberate bug for offset > 5.
if offset > 5:
return []
else:
return items[offset:offset + count]
with self.assertRaisesRegex(
AssertionError,
re.escape(
"Results differ from expected (offset 6, count 1, foo): [] vs [6]")
):
self.DoOffsetAndCountTest(lambda: items, FetchRangeFn, error_desc="foo")
class TestFilterCombinations(db_test_utils.QueryTestHelpersMixin,
absltest.TestCase):
def testDoesNotRaiseWhenWorkingAsExpected(self):
def FetchFn(bigger_than_3_only=None, less_than_7_only=None, even_only=None):
result = []
for i in range(10):
if bigger_than_3_only and i <= 3:
continue
if less_than_7_only and i >= 7:
continue
if even_only and i % 2 != 0:
continue
result.append(i)
return result
self.DoFilterCombinationsTest(
FetchFn,
dict(bigger_than_3_only=True, less_than_7_only=True, even_only=True),
error_desc="foo")
def testRaisesWhenDoesNotWorkAsExpected(self):
def FetchFn(bigger_than_3_only=None, less_than_7_only=None, even_only=None):
result = []
for i in range(10):
# This line introduces a bug.
if bigger_than_3_only and less_than_7_only and i == 4:
continue
if bigger_than_3_only and i <= 3:
continue
if less_than_7_only and i >= 7:
continue
if even_only and i % 2 != 0:
continue
result.append(i)
return result
with self.assertRaisesRegex(
AssertionError,
re.escape(
"Results differ from expected "
"({'bigger_than_3_only': True, 'less_than_7_only': True}, foo): "
"[5, 6] vs [4, 5, 6]")):
self.DoFilterCombinationsTest(
FetchFn,
dict(bigger_than_3_only=True, less_than_7_only=True, even_only=True),
error_desc="foo")
class TestFilterCombinationsAndOffsetCountTest(
db_test_utils.QueryTestHelpersMixin, absltest.TestCase):
def testDoesNotRaiseWhenWorksAsExpected(self):
def FetchFn(offset,
count,
bigger_than_3_only=None,
less_than_7_only=None,
even_only=None):
result = []
for i in range(10):
if bigger_than_3_only and i <= 3:
continue
if less_than_7_only and i >= 7:
continue
if even_only and i % 2 != 0:
continue
result.append(i)
return result[offset:offset + count]
self.DoFilterCombinationsAndOffsetCountTest(
FetchFn,
dict(bigger_than_3_only=True, less_than_7_only=True, even_only=True),
error_desc="foo")
def testRaisesWhenDoesNotWorkAsExpected(self):
def FetchFn(offset,
count,
bigger_than_3_only=None,
less_than_7_only=None,
even_only=None):
del offset # Unused.
result = []
for i in range(10):
if bigger_than_3_only and i <= 3:
continue
if less_than_7_only and i >= 7:
continue
if even_only and i % 2 != 0:
continue
result.append(i)
# An intentionally buggy line.
# Should have been: result[offset:offset + count]
return result[0:count]
with self.assertRaisesRegex(
AssertionError,
re.escape("Results differ from expected "
"(offset 1, count 1, {'bigger_than_3_only': True}, foo): "
"[4] vs [5]")):
self.DoFilterCombinationsAndOffsetCountTest(
FetchFn,
dict(bigger_than_3_only=True, less_than_7_only=True, even_only=True),
error_desc="foo")
if __name__ == "__main__":
absltest.main()
| StarcoderdataPython |
11313092 | # python3
# Copyright 2021 InstaDeep Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MADQN trainer implementation."""
import copy
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import reverb
import sonnet as snt
import tensorflow as tf
import tree
import trfl
from acme.tf import utils as tf2_utils
from acme.utils import loggers
import mava
from mava import types as mava_types
from mava.systems.tf.variable_utils import VariableClient
from mava.utils import training_utils as train_utils
from mava.utils.sort_utils import sort_str_num
train_utils.set_growing_gpu_memory()
class MADQNTrainer(mava.Trainer):
"""MADQN trainer.
This is the trainer component of a MADQN system. IE it takes a dataset as input
and implements update functionality to learn from this dataset.
"""
def __init__(
self,
agents: List[str],
agent_types: List[str],
value_networks: Dict[str, snt.Module],
target_value_networks: Dict[str, snt.Module],
optimizer: Union[snt.Optimizer, Dict[str, snt.Optimizer]],
discount: float,
target_averaging: bool,
target_update_period: int,
target_update_rate: float,
dataset: tf.data.Dataset,
observation_networks: Dict[str, snt.Module],
target_observation_networks: Dict[str, snt.Module],
variable_client: VariableClient,
counts: Dict[str, Any],
agent_net_keys: Dict[str, str],
max_gradient_norm: float = None,
logger: loggers.Logger = None,
learning_rate_scheduler_fn: Optional[Dict[str, Callable[[int], None]]] = None,
):
"""Initialise MADQN trainer.
Args:
agents: agent ids, e.g. "agent_0".
agent_types: agent types, e.g. "speaker" or "listener".
value_networks: value networks for each agents in
the system.
target_value_networks: target value networks.
optimizer: optimizer(s) for updating policy networks.
discount: discount factor for TD updates.
target_averaging: whether to use polyak averaging for target network
updates.
target_update_period: number of steps before target networks are
updated.
target_update_rate: update rate when using averaging.
dataset: training dataset.
observation_networks: network for feature
extraction from raw observation.
target_observation_networks: target observation
network.
variable_client: The client used to manage the variables.
counts: step counter object.
agent_net_keys: specifies what network each agent uses.
max_gradient_norm: maximum allowed norm for gradients
before clipping is applied.
logger: logger object for logging trainer
statistics.
learning_rate_scheduler_fn: dict with two functions (one for the policy and
one for the critic optimizer), that takes in a trainer step t and
returns the current learning rate.
"""
self._agents = agents
self._agent_types = agent_types
self._agent_net_keys = agent_net_keys
self._variable_client = variable_client
self._learning_rate_scheduler_fn = learning_rate_scheduler_fn
# Setup counts
self._counts = counts
# Store online and target networks.
self._value_networks = value_networks
self._target_value_networks = target_value_networks
# Ensure obs and target networks are sonnet modules
self._observation_networks = {
k: tf2_utils.to_sonnet_module(v) for k, v in observation_networks.items()
}
self._target_observation_networks = {
k: tf2_utils.to_sonnet_module(v)
for k, v in target_observation_networks.items()
}
# General learner book-keeping and loggers.
self._logger = logger or loggers.make_default_logger("trainer")
# Other learner parameters.
self._discount = discount
# Set up gradient clipping.
if max_gradient_norm is not None:
self._max_gradient_norm = tf.convert_to_tensor(max_gradient_norm)
else: # A very large number. Infinity results in NaNs.
self._max_gradient_norm = tf.convert_to_tensor(1e10)
# Necessary to track when to update target networks.
self._num_steps = tf.Variable(0, dtype=tf.int32)
self._target_averaging = target_averaging
self._target_update_period = target_update_period
self._target_update_rate = target_update_rate
# Create an iterator to go through the dataset.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
# Dictionary with unique network keys.
self.unique_net_keys = sort_str_num(self._value_networks.keys())
# Get the agents which shoud be updated and ran
self._trainer_agent_list = self._agents
# Create optimizers for different agent types.
if not isinstance(optimizer, dict):
self._optimizers: Dict[str, snt.Optimizer] = {}
for agent in self.unique_net_keys:
self._optimizers[agent] = copy.deepcopy(optimizer)
else:
self._optimizers = optimizer
# Expose the variables.
self._system_network_variables: Dict[str, Dict[str, snt.Module]] = {
"observations": {},
"values": {},
}
for agent_key in self.unique_net_keys:
self._system_network_variables["observations"][
agent_key
] = self._target_observation_networks[agent_key].variables
self._system_network_variables["values"][agent_key] = self._value_networks[
agent_key
].variables
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp: Optional[float] = None
def _update_target_networks(self) -> None:
"""Update the target networks.
Using either target averaging or
by directy copying the weights of the online networks every few steps.
"""
for key in self.unique_net_keys:
# Update target network.
online_variables = (
*self._observation_networks[key].variables,
*self._value_networks[key].variables,
)
target_variables = (
*self._target_observation_networks[key].variables,
*self._target_value_networks[key].variables,
)
if self._target_averaging:
assert 0.0 < self._target_update_rate < 1.0
tau = self._target_update_rate
for src, dest in zip(online_variables, target_variables):
dest.assign(dest * (1.0 - tau) + src * tau)
else:
# Make online -> target network update ops.
if tf.math.mod(self._num_steps, self._target_update_period) == 0:
for src, dest in zip(online_variables, target_variables):
dest.assign(src)
self._num_steps.assign_add(1)
def get_variables(self, names: Sequence[str]) -> Dict[str, Dict[str, np.ndarray]]:
"""Depricated"""
pass
def _transform_observations(
self, obs: Dict[str, mava_types.OLT], next_obs: Dict[str, mava_types.OLT]
) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]:
"""Transform the observations using the observation networks of each agent.
We assume the observation network is non-recurrent.
Args:
obs: observations at timestep t-1
next_obs: observations at timestep t
Returns:
Transformed observations
"""
o_tm1 = {}
o_t = {}
for agent in self._agents:
agent_key = self._agent_net_keys[agent]
o_tm1[agent] = self._observation_networks[agent_key](obs[agent].observation)
o_t[agent] = self._target_observation_networks[agent_key](
next_obs[agent].observation
)
# This stop_gradient prevents gradients to propagate into the target
# observation network. In addition, since the online policy network is
# evaluated at o_t, this also means the policy loss does not influence
# the observation network training.
o_t[agent] = tree.map_structure(tf.stop_gradient, o_t[agent])
return o_tm1, o_t
@tf.function
def _step(
self,
) -> Dict[str, Dict[str, Any]]:
"""Trainer step.
Returns:
losses
"""
# Draw a batch of data from replay.
sample: reverb.ReplaySample = next(self._iterator)
# Compute loss
self._forward(sample)
# Compute and apply gradients
self._backward()
# Update the target networks
self._update_target_networks()
# Log losses per agent
return train_utils.map_losses_per_agent_value(self.value_losses)
def _forward(self, inputs: reverb.ReplaySample) -> None:
"""Trainer forward pass.
Args:
inputs: input data from the data table (transitions)
"""
# Unpack input data as follows:
# o_tm1 = dictionary of observations one for each agent
# a_tm1 = dictionary of actions taken from obs in o_tm1
# e_tm1 [Optional] = extra data for timestep t-1
# that the agents persist in replay.
# r_t = dictionary of rewards or rewards sequences
# (if using N step transitions) ensuing from actions a_tm1
# d_t = environment discount ensuing from actions a_tm1.
# This discount is applied to future rewards after r_t.
# o_t = dictionary of next observations or next observation sequences
# e_t [Optional] = extra data for timestep t that the agents persist in replay.
trans = mava_types.Transition(*inputs.data)
o_tm1, o_t, a_tm1, r_t, d_t, _, _ = (
trans.observations,
trans.next_observations,
trans.actions,
trans.rewards,
trans.discounts,
trans.extras,
trans.next_extras,
)
self.value_losses = {}
# Do forward passes through the networks and calculate the losses
with tf.GradientTape(persistent=True) as tape:
o_tm1_trans, o_t_trans = self._transform_observations(o_tm1, o_t)
for agent in self._trainer_agent_list:
agent_key = self._agent_net_keys[agent]
# Double Q-learning
q_tm1 = self._value_networks[agent_key](o_tm1_trans[agent])
q_t_value = self._target_value_networks[agent_key](o_t_trans[agent])
q_t_selector = self._value_networks[agent_key](o_t_trans[agent])
# Legal action masking
q_t_selector = tf.where(
tf.cast(o_t[agent].legal_actions, "bool"), q_t_selector, -999999999
)
# pcont
discount = tf.cast(self._discount, dtype=d_t[agent].dtype)
# Value loss.
value_loss, _ = trfl.double_qlearning(
q_tm1,
a_tm1[agent],
r_t[agent],
discount * d_t[agent],
q_t_value,
q_t_selector,
)
self.value_losses[agent] = tf.reduce_mean(value_loss, axis=0)
self.tape = tape
def _backward(self) -> None:
"""Trainer backward pass updating network parameters"""
# Calculate the gradients and update the networks
value_losses = self.value_losses
tape = self.tape
for agent in self._trainer_agent_list:
agent_key = self._agent_net_keys[agent]
# Get trainable variables.
variables = (
self._observation_networks[agent_key].trainable_variables
+ self._value_networks[agent_key].trainable_variables
)
# Compute gradients.
# Note: Warning "WARNING:tensorflow:Calling GradientTape.gradient
# on a persistent tape inside its context is significantly less efficient
# than calling it outside the context." caused by losses.dpg, which calls
# tape.gradient.
gradients = tape.gradient(value_losses[agent], variables)
# Maybe clip gradients.
gradients = tf.clip_by_global_norm(gradients, self._max_gradient_norm)[0]
# Apply gradients.
self._optimizers[agent_key].apply(gradients, variables)
train_utils.safe_del(self, "tape")
def step(self) -> None:
"""Trainer step to update the parameters of the agents in the system"""
raise NotImplementedError("A trainer statistics wrapper should overwrite this.")
def after_trainer_step(self) -> None:
"""Optionally decay lr after every training step."""
if self._learning_rate_scheduler_fn:
self._decay_lr(self._num_steps)
info: Dict[str, Dict[str, float]] = {}
for agent in self._agents:
info[agent] = {}
info[agent]["learning_rate"] = self._optimizers[
self._agent_net_keys[agent]
].learning_rate
if self._logger:
self._logger.write(info)
def _decay_lr(self, trainer_step: int) -> None:
"""Decay lr.
Args:
trainer_step : trainer step time t.
"""
train_utils.decay_lr(
self._learning_rate_scheduler_fn, # type: ignore
self._optimizers,
trainer_step,
)
class MADQNRecurrentTrainer(mava.Trainer):
"""Recurrent MADQN trainer.
This is the trainer component of a recurrent MADQN system. IE it takes a dataset
as input and implements update functionality to learn from this dataset.
"""
def __init__(
self,
agents: List[str],
agent_types: List[str],
value_networks: Dict[str, snt.Module],
target_value_networks: Dict[str, snt.Module],
optimizer: Union[snt.Optimizer, Dict[str, snt.Optimizer]],
discount: float,
target_averaging: bool,
target_update_period: int,
target_update_rate: float,
dataset: tf.data.Dataset,
observation_networks: Dict[str, snt.Module],
target_observation_networks: Dict[str, snt.Module],
variable_client: VariableClient,
counts: Dict[str, Any],
agent_net_keys: Dict[str, str],
max_gradient_norm: float = None,
logger: loggers.Logger = None,
learning_rate_scheduler_fn: Optional[Dict[str, Callable[[int], None]]] = None,
):
"""Initialise Recurrent MADQN trainer
Args:
agents: agent ids, e.g. "agent_0".
agent_types: agent types, e.g. "speaker" or "listener".
value_networks: value networks for each agent in
the system.
target_value_networks: target value networks.
optimizer: optimizer(s) for updating value networks.
discount: discount factor for TD updates.
target_averaging: whether to use polyak averaging for target network
updates.
target_update_period: number of steps before target networks are
updated.
target_update_rate: update rate when using averaging.
dataset: training dataset.
observation_networks: network for feature
extraction from raw observation.
target_observation_networks: target observation
network.
variable_client: The client used to manage the variables.
counts: step counter object.
agent_net_keys: specifies what network each agent uses.
max_gradient_norm: maximum allowed norm for gradients
before clipping is applied.
logger: logger object for logging trainer
statistics.
learning_rate_scheduler_fn: dict with two functions (one for the policy and
one for the critic optimizer), that takes in a trainer step t and
returns the current learning rate.
"""
self._agents = agents
self._agent_type = agent_types
self._agent_net_keys = agent_net_keys
self._variable_client = variable_client
self._learning_rate_scheduler_fn = learning_rate_scheduler_fn
# Setup counts
self._counts = counts
# Store online and target networks.
self._value_networks = value_networks
self._target_value_networks = target_value_networks
# Ensure obs and target networks are sonnet modules
self._observation_networks = {
k: tf2_utils.to_sonnet_module(v) for k, v in observation_networks.items()
}
self._target_observation_networks = {
k: tf2_utils.to_sonnet_module(v)
for k, v in target_observation_networks.items()
}
# General learner book-keeping and loggers.
self._logger = logger or loggers.make_default_logger("trainer")
# Other learner parameters.
self._discount = discount
# Set up gradient clipping.
if max_gradient_norm is not None:
self._max_gradient_norm = tf.convert_to_tensor(max_gradient_norm)
else: # A very large number. Infinity results in NaNs.
self._max_gradient_norm = tf.convert_to_tensor(1e10)
# Necessary to track when to update target networks.
self._num_steps = tf.Variable(0, dtype=tf.int32)
self._target_averaging = target_averaging
self._target_update_period = target_update_period
self._target_update_rate = target_update_rate
# Create an iterator to go through the dataset.
self._iterator = iter(dataset) # pytype: disable=wrong-arg-types
# Dictionary with unique network keys.
self.unique_net_keys = sort_str_num(self._value_networks.keys())
# Get the agents which shoud be updated and ran
self._trainer_agent_list = self._agents
# Create optimizers for different agent types.
if not isinstance(optimizer, dict):
self._optimizers: Dict[str, snt.Optimizer] = {}
for agent in self.unique_net_keys:
self._optimizers[agent] = copy.deepcopy(optimizer)
else:
self._optimizers = optimizer
# Expose the variables.
self._system_network_variables: Dict[str, Dict[str, snt.Module]] = {
"observations": {},
"values": {},
}
for agent_key in self.unique_net_keys:
self._system_network_variables["observations"][
agent_key
] = self._target_observation_networks[agent_key].variables
self._system_network_variables["values"][
agent_key
] = self._target_value_networks[agent_key].variables
# Do not record timestamps until after the first learning step is done.
# This is to avoid including the time it takes for actors to come online and
# fill the replay buffer.
self._timestamp: Optional[float] = None
def step(self) -> None:
"""Trainer step to update the parameters of the agents in the system"""
raise NotImplementedError("A trainer statistics wrapper should overwrite this.")
def _transform_observations(
self, observations: Dict[str, mava_types.OLT]
) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]:
"""Apply the observation networks to the raw observations from the dataset
We assume that the observation network is non-recurrent.
Args:
observations: raw agent observations
Returns:
obs_trans: transformed agent observation
obs_target_trans: transformed target network observations
"""
# NOTE We are assuming that only the value network
# is recurrent and not the observation network.
obs_trans = {}
obs_target_trans = {}
for agent in self._agents:
agent_key = self._agent_net_keys[agent]
reshaped_obs, dims = train_utils.combine_dim(
observations[agent].observation
)
obs_trans[agent] = train_utils.extract_dim(
self._observation_networks[agent_key](reshaped_obs), dims
)
obs_target_trans[agent] = train_utils.extract_dim(
self._target_observation_networks[agent_key](reshaped_obs),
dims,
)
# This stop_gradient prevents gradients to propagate into
# the target observation network.
obs_target_trans[agent] = tree.map_structure(
tf.stop_gradient, obs_target_trans[agent]
)
return obs_trans, obs_target_trans
def _update_target_networks(self) -> None:
"""Update the target networks.
Using either target averaging or
by directy copying the weights of the online networks every few steps.
"""
for key in self.unique_net_keys:
# Update target network.
online_variables = (
*self._observation_networks[key].variables,
*self._value_networks[key].variables,
)
target_variables = (
*self._target_observation_networks[key].variables,
*self._target_value_networks[key].variables,
)
if self._target_averaging:
assert 0.0 < self._target_update_rate < 1.0
tau = self._target_update_rate
for src, dest in zip(online_variables, target_variables):
dest.assign(dest * (1.0 - tau) + src * tau)
else:
# Make online -> target network update ops.
if tf.math.mod(self._num_steps, self._target_update_period) == 0:
for src, dest in zip(online_variables, target_variables):
dest.assign(src)
self._num_steps.assign_add(1)
def get_variables(self, names: Sequence[str]) -> Dict[str, Dict[str, np.ndarray]]:
"""Depricated"""
pass
@tf.function
def _step(
self,
) -> Dict[str, Dict[str, Any]]:
"""Trainer step.
Returns:
losses
"""
# Draw a batch of data from replay.
sample: reverb.ReplaySample = next(self._iterator)
# Compute loss
self._forward(sample)
# Compute and apply gradients
self._backward()
# Update the target networks
self._update_target_networks()
# Log losses per agent
return train_utils.map_losses_per_agent_value(self.value_losses)
def _forward(self, inputs: reverb.ReplaySample) -> None:
"""Trainer forward pass.
Args:
inputs: input data from the data table (transitions)
"""
# Convert to time major
data = tree.map_structure(
lambda v: tf.expand_dims(v, axis=0) if len(v.shape) <= 1 else v, inputs.data
)
data = tf2_utils.batch_to_sequence(data)
# Note (dries): The unused variable is start_of_episodes.
observations, actions, rewards, discounts, _, extras = (
data.observations,
data.actions,
data.rewards,
data.discounts,
data.start_of_episode,
data.extras,
)
# Get initial state for the LSTM from replay and
# extract the first state in the sequence.
core_state = tree.map_structure(lambda s: s[0, :, :], extras["core_states"])
target_core_state = tree.map_structure(
lambda s: s[0, :, :], extras["core_states"]
)
# TODO (dries): Take out all the data_points that does not need
# to be processed here at the start. Therefore it does not have
# to be done later on and saves processing time.
self.value_losses: Dict[str, tf.Tensor] = {}
# Do forward passes through the networks and calculate the losses
with tf.GradientTape(persistent=True) as tape:
# Note (dries): We are assuming that only the policy network
# is recurrent and not the observation network.
obs_trans, target_obs_trans = self._transform_observations(observations)
for agent in self._trainer_agent_list:
agent_key = self._agent_net_keys[agent]
# Double Q-learning
q, _ = snt.static_unroll(
self._value_networks[agent_key],
obs_trans[agent],
core_state[agent][0],
)
q_tm1 = q[:-1] # Chop off last timestep
q_t_selector = q[1:] # Chop off first timestep
q_t_value, _ = snt.static_unroll(
self._target_value_networks[agent_key],
target_obs_trans[agent],
target_core_state[agent][0],
)
q_t_value = q_t_value[1:] # Chop off first timestep
# Legal action masking
q_t_selector = tf.where(
tf.cast(observations[agent].legal_actions[1:], "bool"),
q_t_selector,
-999999999,
)
# Flatten out time and batch dim
q_tm1, _ = train_utils.combine_dim(q_tm1)
q_t_selector, _ = train_utils.combine_dim(q_t_selector)
q_t_value, _ = train_utils.combine_dim(q_t_value)
a_tm1, _ = train_utils.combine_dim(
actions[agent][:-1] # Chop off last timestep
)
r_t, _ = train_utils.combine_dim(
rewards[agent][:-1] # Chop off last timestep
)
d_t, _ = train_utils.combine_dim(
discounts[agent][:-1] # Chop off last timestep
)
# Cast the additional discount to match
# the environment discount dtype.
discount = tf.cast(self._discount, dtype=discounts[agent].dtype)
# Value loss
value_loss, _ = trfl.double_qlearning(
q_tm1, a_tm1, r_t, discount * d_t, q_t_value, q_t_selector
)
# Zero-padding mask
zero_padding_mask, _ = train_utils.combine_dim(
tf.cast(extras["zero_padding_mask"], dtype=value_loss.dtype)[:-1]
)
masked_loss = value_loss * zero_padding_mask
self.value_losses[agent] = tf.reduce_sum(masked_loss) / tf.reduce_sum(
zero_padding_mask
)
self.tape = tape
def _backward(self) -> None:
"""Trainer backward pass updating network parameters"""
# Calculate the gradients and update the networks
value_losses = self.value_losses
tape = self.tape
for agent in self._trainer_agent_list:
agent_key = self._agent_net_keys[agent]
# Get trainable variables.
variables = (
self._observation_networks[agent_key].trainable_variables
+ self._value_networks[agent_key].trainable_variables
)
# Compute gradients.
gradients = tape.gradient(value_losses[agent], variables)
# Maybe clip gradients.
gradients = tf.clip_by_global_norm(gradients, self._max_gradient_norm)[0]
# Apply gradients.
self._optimizers[agent_key].apply(gradients, variables)
train_utils.safe_del(self, "tape")
def after_trainer_step(self) -> None:
"""Optionally decay lr after every training step."""
if self._learning_rate_scheduler_fn:
self._decay_lr(self._num_steps)
info: Dict[str, Dict[str, float]] = {}
for agent in self._agents:
info[agent] = {}
info[agent]["learning_rate"] = self._optimizers[
self._agent_net_keys[agent]
].learning_rate
if self._logger:
self._logger.write(info)
def _decay_lr(self, trainer_step: int) -> None:
"""Decay lr.
Args:
trainer_step : trainer step time t.
"""
train_utils.decay_lr(
self._learning_rate_scheduler_fn, # type: ignore
self._optimizers,
trainer_step,
)
| StarcoderdataPython |
5035585 | from pyomo import environ as po
from pyomo import dae as pod
from matplotlib import pyplot as plt
import numpy as np
def create_model_hgp(spt):
m = po.ConcreteModel()
tau = max(spt)
norm_spt = spt / tau
m.t = pod.ContinuousSet(bounds=(0, 1), initialize=norm_spt) # normalized time variable (unitless)
m.tau = po.Var(bounds=(0, None)) # batch time in hours
m.p = po.Var(m.t, bounds=(0, None)) # pressure in reservoir in MPa
m.dpdt = pod.DerivativeVar(m.p, wrt=m.t) # rate of change of pressure per day
m.v = po.Var(bounds=(0, 1)) # the unknown volume of the reservoir in trillion cubic metres
m.T = po.Var(bounds=(0, None)) # the temperature in the reservoir, assumed isothermal
m.q = po.Var(m.t, bounds=(0, None)) # gas flowrate taken out in examoles per day (10^{18} moles per day)
m.p_vac = po.Var(m.t, bounds=(21.90, 22), initialize=21.95) # outlet pressure that drive gas extraction in MPa
m.mu = po.Var(bounds=(0, None)) # dynamic viscosity of gas in MPa.day
m.L = po.Var(bounds=(0, None)) # length of pipe in m
m.R = po.Var(bounds=(0, None)) # pipe radius in m
m.rho = po.Var(bounds=(0, None)) # density of gas in kg per m3
m.a = po.Var(m.t, bounds=(0, None)) # accumulated production in examoles
m.dadt = pod.DerivativeVar(m.a, wrt=m.t) # rate of change of accumulated production (equivalent to m.q)
def _bal(m, t):
return m.dpdt[t] * m.v / m.tau == - m.q[t] * 8.314 * m.qin
m.bal = po.Constraint(m.t, rule=_bal)
def _hagen_poisueille(m, t):
return m.p[t] - m.p_vac[t] == 8 * m.mu * m.L * m.q[t] / (3.14159 * m.R ** 4)
m.hagen_poisueille = po.Constraint(m.t, rule=_hagen_poisueille)
def _compute_accumulated_production(m, t):
return m.dadt[t] == m.q[t]
m.compute_accumulated_production = po.Constraint(m.t, rule=_compute_accumulated_production)
# defining zero accumulated production at the start
m.a[0].fix(0.0)
return m
def simulate_hgp(ti_controls, tv_controls, sampling_times, model_parameters):
m = create_model_hgp(sampling_times)
m.p[0].fix(22) # 22 MPa - figure from first result of googling "pressure in ghawar field"
m.T.fix(300) # 360 Kelvin - taken from (<NAME>; <NAME>; <NAME>; <NAME> (2003) 8 (1): 9–42.)
m.tau.fix(max(sampling_times)) # hours
m.v.fix(model_parameters[0])
m.mu.fix(1000)
m.L.fix(ti_controls[0])
m.R.fix(ti_controls[1])
m.tvc = po.Suffix(direction=po.Suffix.LOCAL)
m.tvc[m.p_vac] = tv_controls[0]
simulator = pod.Simulator(m, package="casadi")
t, profile = simulator.simulate(
numpoints=101,
integrator="idas",
varying_inputs=m.tvc,
)
discretizer = po.TransformationFactory("dae.collocation")
discretizer.apply_to(m, nfe=51, ncp=3)
discretizer.reduce_collocation_points(m, var=m.p_vac, ncp=1, contset=m.t)
simulator.initialize_model()
t = [po.value(t) * max(sampling_times) for t in m.t]
p = [po.value(m.p[t]) for t in m.t]
a = [po.value(m.a[t]) for t in m.t]
q = [po.value(m.q[t]) for t in m.t]
if True:
tau = max(sampling_times)
swt = list(tv_controls[0].keys())
fig = plt.figure()
axes = fig.add_subplot(111)
axes.plot(t, p, label="Pressure in Reservoir (MPa)")
axes.plot(
[swt[0] * tau, swt[1] * tau, swt[1] * tau, swt[2] * tau, swt[2] * tau, swt[3] * tau, swt[3] * tau, tau],
[tv_controls[0][0], tv_controls[0][0.0], tv_controls[0][0.25], tv_controls[0][0.25], tv_controls[0][0.50], tv_controls[0][0.50], tv_controls[0][0.75], tv_controls[0][0.75]],
label="Outlet Pressure (MPa)"
)
axes.set_xlabel("Time (Days)")
axes.set_ylabel("Reservoir Pressure (MPa)")
axes.legend()
fig.tight_layout()
fig2 = plt.figure()
axes2 = fig2.add_subplot(111)
axes2.plot(t, np.asarray(a) * 1e9, label="Accumulated Production")
axes2.set_xlabel("Time (Days)")
axes2.set_ylabel(r"Accumulated Production ($10^{9}$ Moles)")
fig2.tight_layout()
fig3 = plt.figure()
axes3 = fig3.add_subplot(111)
axes3.plot(t, np.asarray(q) * 1e9, label="Gas Flowrate")
axes3.set_xlabel("Time (Days)")
axes3.set_ylabel("Gas Flow Rate ($10^9$ Moles per Day)")
fig3.tight_layout()
print(f"Productivity: {po.value(m.a[1]) * 1e9 / max(sampling_times)} Billion moles per day")
norm_spt = sampling_times / max(sampling_times)
p = [po.value(m.p[t]) for t in norm_spt]
p = np.asarray(p)[:, None]
return p
def optimal_extraction(ti_controls, sampling_times, model_parameters):
m = create_model_hgp(sampling_times)
def _objective(m):
# return m.a[1] / max(sampling_times) # maximum productivity (moles per time)
return m.a[1] # maximum production
m.objective = po.Objective(rule=_objective, sense=po.maximize)
def _p_vac_cons(m, t):
return m.p_vac[t] + 0.01 <= m.p[t]
m.p_vac_cons = po.Constraint(m.t, rule=_p_vac_cons)
m.p[0].fix(22) # 22 MPa - figure from first result of googling "pressure in ghawar field"
m.T.fix(360) # 360 Kelvin - taken from (<NAME>; <NAME>; <NAME>; <NAME> GeoArabia (2003) 8 (1): 9–42.)
m.tau.fix(max(sampling_times)) # hours
m.v.fix(model_parameters[0])
m.mu.fix(model_parameters[1])
m.L.fix(4500)
m.R.fix(0.25)
discretizer = po.TransformationFactory("dae.collocation")
discretizer.apply_to(m, nfe=51, ncp=3)
discretizer.reduce_collocation_points(m, var=m.p_vac, ncp=1, contset=m.t)
solver = po.SolverFactory("ipopt")
result = solver.solve(m)
t = [po.value(t) * max(sampling_times) / 24 for t in m.t]
p = [po.value(m.p[t]) for t in m.t]
a = [po.value(m.a[t]) for t in m.t]
q = [po.value(m.q[t]) for t in m.t]
p_vac = [po.value(m.p_vac[t]) for t in m.t]
if True:
fig = plt.figure()
axes = fig.add_subplot(111)
axes.plot(t, p, label="Pressure in Reservoir (MPa)")
axes.plot(t, p_vac, label="Outlet Pressure (MPa)")
axes.set_xlabel("Time (Days)")
axes.set_ylabel("Pressure (MPa)")
axes.legend()
fig.tight_layout()
fig2 = plt.figure()
axes2 = fig2.add_subplot(111)
axes2.plot(t, np.asarray(a) * 1e9, label="Accumulated Production")
axes2.set_xlabel("Time (Days)")
axes2.set_ylabel(r"Accumulated Production ($10^{9}$ Moles)")
fig2.tight_layout()
fig3 = plt.figure()
axes3 = fig3.add_subplot(111)
axes3.plot(t, q)
axes3.set_xlabel("Time (Days)")
axes3.set_ylabel(r"Gas Flowrate $(10^{12} m^{3} / day)$")
fig3.tight_layout()
# fig4 = plt.figure()
# axes4 = fig4.add_subplot(111)
# axes4.set_xlabel("Time (Days)")
# axes4.set_ylabel(r"Outlet Pressure (MPa)")
# axes4.plot(t, p_vac)
# fig4.tight_layout()
print(f"Productivity: {po.value(m.a[1]) * 1e9 / max(sampling_times)} Billion moles per day")
return t, p, a
if __name__ == '__main__':
tic = [4500, 1]
mp = [
3.1, # estimated gas in place - taken from wikipedia page of ghawar field on 2020-12-25
1.28125E-16, # viscosity in MPa.day - converted from 1.107x10^(-5) Pa.s
]
spt = np.linspace(0, 365, 366)
# Simulation
if True:
tvc = [{
0.00: 21.995,
0.25: 21.990,
0.50: 21.985,
0.75: 21.980,
}]
# tvc = [{
# 0.00: 22.00,
# 0.25: 22.00,
# 0.50: 22.00,
# 0.75: 22.00,
# }]
simulate_hgp(tic, tvc, spt, mp)
# Maximize Production or Productivity
if False:
t, p, a = optimal_extraction(tic, spt, mp)
# FOR REFERENCE:
# THE US Produces 107.25 billion moles of natural gas per day
# Saudi produces 0.9 billion moles of natural gas per day
plt.show()
| StarcoderdataPython |
4911795 | import pytest
import pandas as pd
import numpy as np
import pandas.util.testing as tm
pytest.importorskip('rbc')
def catch_udf_support_disabled(mth):
def new_mth(self, con):
try:
return mth(self, con)
except Exception as msg:
if type(
msg
).__name__ == 'TOmniSciException' and msg.error_msg.startswith(
'Runtime UDF registration is disabled'
):
print('Ignoring `%s` failure' % (msg.error_msg))
return
raise
new_mth.__name__ = mth.__name__
return new_mth
@pytest.mark.usefixtures("omnisci_server")
class TestRuntimeUDF:
def load_test_udf_incr(self, con):
con.execute('drop table if exists test_udf_incr')
con.execute('create table test_udf_incr (i4 integer, f8 double)')
con.execute('insert into test_udf_incr values (1, 2.3);')
con.execute('insert into test_udf_incr values (2, 3.4);')
@catch_udf_support_disabled
def test_udf_incr(self, con):
@con('int32(int32)', 'double(double)')
def incr(x):
return x + 1
self.load_test_udf_incr(con)
result = list(con.execute('select i4, incr(i4) from test_udf_incr'))
expected = [(1, 2), (2, 3)]
assert result == expected
result = list(con.execute('select f8, incr(f8) from test_udf_incr'))
expected = [(2.3, 3.3), (3.4, 4.4)]
assert result == expected
con.execute('drop table if exists test_udf_incr')
@catch_udf_support_disabled
def test_udf_incr_read_sql(self, con):
@con('int32(int32)', 'double(double)')
def incr_read_sql(x):
return x + 1
self.load_test_udf_incr(con)
result = pd.read_sql(
'''select i4 as qty, incr_read_sql(i4) as price
from test_udf_incr''',
con,
)
expected = pd.DataFrame(
{
"qty": np.array([1, 2], dtype=np.int64),
"price": np.array([2, 3], dtype=np.int64),
}
)[['qty', 'price']]
tm.assert_frame_equal(result, expected)
result = pd.read_sql(
'''select f8 as qty, incr_read_sql(f8) as price
from test_udf_incr''',
con,
)
expected = pd.DataFrame(
{
"qty": np.array([2.3, 3.4], dtype=np.float64),
"price": np.array([3.3, 4.4], dtype=np.float64),
}
)[['qty', 'price']]
tm.assert_frame_equal(result, expected)
con.execute('drop table if exists test_udf_incr')
| StarcoderdataPython |
1973208 | <reponame>ipa-cmh/bonsai
#Copyright (c) 2017 <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
###############################################################################
# Imports
###############################################################################
from __future__ import unicode_literals
from builtins import map
from builtins import object
from ..model import *
###############################################################################
# Language Model
###############################################################################
CppEntity = CodeEntity
CppStatementGroup = CodeStatementGroup
# ----- Common Entities -------------------------------------------------------
class CppVariable(CodeVariable):
def __init__(self, scope, parent, id, name, result, ctype=None):
CodeVariable.__init__(self, scope, parent, id, name, result)
self.full_type = result
self.canonical_type = ctype or result
self.result = result[6:] if result.startswith("const ") else result
def auto_init(self):
"""Return a default value for this variable."""
assign = CppOperator(self.scope, self.parent, "=", self.result,
ctype=self.canonical_type)
value = CppDefaultArgument(self.scope, assign, self.result,
ctype=self.canonical_type)
assign.arguments = (self, value)
return value
class CppFunction(CodeFunction):
def __init__(self, scope, parent, id, name, result, definition=True,
ctype=None):
CodeFunction.__init__(self, scope, parent, id, name, result,
definition=definition)
self.full_type = result
self.canonical_type = ctype or result
self.result = result[6:] if result.startswith("const ") else result
self.template_parameters = 0
@property
def is_constructor(self):
return self.member_of and self.name == self.member_of.name
# def _afterpass(self):
# left side can be CALL_EXPR: operator[] or operator()
# or ARRAY_SUBSCRIPT_EXPR: a[]
# or UNARY_OPERATOR: *a
# or PAREN_EXPR: (*a)
CppClass = CodeClass
CppEnum = CodeEnum
CppNamespace = CodeNamespace
CppGlobalScope = CodeGlobalScope
# ----- Expression Entities ---------------------------------------------------
CppExpression = CodeExpression
SomeCpp = SomeValue
class CppExpressionInterface(object):
def _trim_result(self, result, ctype=None):
self.full_type = result
self.canonical_type = ctype or result
self.result = result[6:] if result.startswith("const ") else result
class CppReference(CodeReference, CppExpressionInterface):
def __init__(self, scope, parent, name, result, paren=False, ctype=None):
CodeReference.__init__(self, scope, parent, name, result, paren = paren)
self._trim_result(result, ctype=ctype)
def pretty_str(self, indent=0):
spaces = ' ' * indent
pretty = '{}({})' if self.parenthesis else '{}{}'
name = self.name
if self.field_of:
o = self.field_of
if isinstance(o, CppFunctionCall) and o.name == 'operator->':
name = '{}->{}'.format(o.arguments[0].pretty_str(), self.name)
else:
name = '{}.{}'.format(o.pretty_str(), self.name)
return pretty.format(spaces, name)
class CppOperator(CodeOperator, CppExpressionInterface):
_UNARY_TOKENS = ("+", "-", "++", "--", "*", "&", "!", "~")
_BINARY_TOKENS = ("+", "-", "*", "/", "%", "&", "|", "^", "<<", ">>",
"<", ">", "<=", ">=", "==", "!=", "&&", "||", "=",
"+=", "-=", "*=", "/=", "%=", "<<=", ">>=", "&=",
"|=", "^=", ",")
def __init__(self, scope, parent, name, result, args=None, paren=False,
ctype=None):
CodeOperator.__init__(self, scope, parent, name, result,
args = args, paren = paren)
self._trim_result(result, ctype=ctype)
@property
def is_assignment(self):
return self.name in ('=', '+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=')
def pretty_str(self, indent=0):
indent = ' ' * indent
pretty = '{}({})' if self.parenthesis else '{}{}'
operator = self.name
if self.is_unary:
if self.name.startswith('_'):
operator = pretty_str(self.arguments[0]) + self.name[1:]
else:
operator += pretty_str(self.arguments[0])
else:
operator = '{} {} {}'.format(pretty_str(self.arguments[0]),
self.name,
pretty_str(self.arguments[1]))
return pretty.format(indent, operator)
class CppFunctionCall(CodeFunctionCall, CppExpressionInterface):
def __init__(self, scope, parent, name, result, ctype=None):
CodeFunctionCall.__init__(self, scope, parent, name, result)
self.template = None
self._trim_result(result, ctype=ctype)
@property
def is_constructor(self):
result = self.canonical_type
start = result.find("<")
if start >= 0:
result = result[:start]
result = result.split("::")[-1]
if result.endswith(" *"):
result = result[:-2]
return result == self.name
def _set_method(self, cppobj):
assert isinstance(cppobj, CodeExpression)
self.method_of = cppobj
self.full_name = '{}::{}'.format(cppobj.result, self.name)
def pretty_str(self, indent=0):
indent = ' ' * indent
pretty = '{}({})' if self.parenthesis else '{}{}'
call = self.name
operator = self.name[8:]
args = [pretty_str(arg) for arg in self.arguments]
if operator in CppOperator._BINARY_TOKENS:
call = '{} {} {}'.format(args[0], operator, args[1])
else:
temp = ('<{}>'.format(','.join(self.template))
if self.template else '')
args = ', '.join(args)
if self.method_of:
o = self.method_of
if isinstance(o, CppFunctionCall) and o.name == 'operator->':
call = '{}->{}{}({})'.format(o.arguments[0].pretty_str(),
self.name, temp, args)
else:
call = '{}.{}{}({})'.format(o.pretty_str(),
self.name, temp, args)
elif self.is_constructor:
call = 'new {}{}({})'.format(self.name, temp, args)
else:
call = '{}{}({})'.format(self.name, temp, args)
return pretty.format(indent, call)
def __repr__(self):
temp = ('<{}>'.format(','.join(self.template))
if self.template else '')
args = ', '.join(map(str, self.arguments))
if self.is_constructor:
return '[{}] new {}({})'.format(self.result, self.name, args)
if self.method_of:
return '[{}] {}.{}{}({})'.format(self.result, self.method_of.name,
self.name, temp, args)
return '[{}] {}{}({})'.format(self.result, self.name, temp, args)
class CppDefaultArgument(CodeDefaultArgument, CppExpressionInterface):
def __init__(self, scope, parent, result, ctype=None):
CodeDefaultArgument.__init__(self, scope, parent, result)
self._trim_result(result, ctype=ctype)
# ----- Statement Entities ----------------------------------------------------
CppStatement = CodeStatement
CppJumpStatement = CodeJumpStatement
CppExpressionStatement = CodeExpressionStatement
CppBlock = CodeBlock
CppDeclaration = CodeDeclaration
CppControlFlow = CodeControlFlow
CppConditional = CodeConditional
class CppLoop(CodeLoop):
def pretty_str(self, indent=0):
spaces = ' ' * indent
condition = pretty_str(self.condition)
if self.name == 'while':
pretty = '{}while ({}):\n'.format(spaces, condition)
pretty += self.body.pretty_str(indent=indent + 2)
elif self.name == 'do':
pretty = spaces + 'do:\n'
pretty += self.body.pretty_str(indent=indent + 2)
pretty += '\n{}while ({})'.format(spaces, condition)
elif self.name == 'for':
v = self.declarations.pretty_str() if self.declarations else ''
i = self.increment.pretty_str(indent=1) if self.increment else ''
pretty = '{}for ({}; {};{}):\n'.format(spaces, v, condition, i)
pretty += self.body.pretty_str(indent=indent + 2)
return pretty
CppSwitch = CodeSwitch
CppTryBlock = CodeTryBlock
CppCatchBlock = CodeTryBlock.CodeCatchBlock
| StarcoderdataPython |
4835787 | <filename>lldb/scripts/Python/prepare_binding_Python.py
"""
Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
See https://llvm.org/LICENSE.txt for license information.
SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Python binding preparation script.
"""
# Python modules:
from __future__ import print_function
import logging
import os
import re
import shutil
import subprocess
import sys
import platform
class SwigSettings(object):
"""Provides a single object to represent swig files and settings."""
def __init__(self):
self.extensions_file = None
self.header_files = None
self.input_file = None
self.interface_files = None
self.output_file = None
self.safecast_file = None
self.typemaps_file = None
self.wrapper_file = None
@classmethod
def _any_files_newer(cls, files, check_mtime):
"""Returns if any of the given files has a newer modified time.
@param cls the class
@param files a list of zero or more file paths to check
@param check_mtime the modification time to use as a reference.
@return True if any file's modified time is newer than check_mtime.
"""
for path in files:
path_mtime = os.path.getmtime(path)
if path_mtime > check_mtime:
# This path was modified more recently than the
# check_mtime.
return True
# If we made it here, nothing was newer than the check_mtime
return False
@classmethod
def _file_newer(cls, path, check_mtime):
"""Tests how recently a file has been modified.
@param cls the class
@param path a file path to check
@param check_mtime the modification time to use as a reference.
@return True if the file's modified time is newer than check_mtime.
"""
path_mtime = os.path.getmtime(path)
return path_mtime > check_mtime
def output_out_of_date(self):
"""Returns whether the output file is out of date.
Compares output file time to all the input files.
@return True if any of the input files are newer than
the output file, or if the output file doesn't exist;
False otherwise.
"""
if not os.path.exists(self.output_file):
logging.info("will generate, missing binding output file")
return True
output_mtime = os.path.getmtime(self.output_file)
if self._any_files_newer(self.header_files, output_mtime):
logging.info("will generate, header files newer")
return True
if self._any_files_newer(self.interface_files, output_mtime):
logging.info("will generate, interface files newer")
return True
if self._file_newer(self.input_file, output_mtime):
logging.info("will generate, swig input file newer")
return True
if self._file_newer(self.extensions_file, output_mtime):
logging.info("will generate, swig extensions file newer")
return True
if self._file_newer(self.wrapper_file, output_mtime):
logging.info("will generate, swig wrapper file newer")
return True
if self._file_newer(self.typemaps_file, output_mtime):
logging.info("will generate, swig typemaps file newer")
return True
if self._file_newer(self.safecast_file, output_mtime):
logging.info("will generate, swig safecast file newer")
return True
# If we made it here, nothing is newer than the output file.
# Thus, the output file is not out of date.
return False
def get_header_files(options):
"""Returns a list of paths to C++ header files for the LLDB API.
These are the files that define the C++ API that will be wrapped by Python.
@param options the dictionary of options parsed from the command line.
@return a list of full paths to the include files used to define the public
LLDB C++ API.
"""
header_file_paths = []
header_base_dir = os.path.join(options.src_root, "include", "lldb")
# Specify the include files in include/lldb that are not easy to
# grab programatically.
for header in [
"lldb-defines.h",
"lldb-enumerations.h",
"lldb-forward.h",
"lldb-types.h"]:
header_file_paths.append(os.path.normcase(
os.path.join(header_base_dir, header)))
# Include the main LLDB.h file.
api_dir = os.path.join(header_base_dir, "API")
header_file_paths.append(os.path.normcase(
os.path.join(api_dir, "LLDB.h")))
filename_regex = re.compile(r"^SB.+\.h$")
# Include all the SB*.h files in the API dir.
for filename in os.listdir(api_dir):
if filename_regex.match(filename):
header_file_paths.append(
os.path.normcase(os.path.join(api_dir, filename)))
logging.debug("found public API header file paths: %s", header_file_paths)
return header_file_paths
def get_interface_files(options):
"""Returns a list of interface files used as input to swig.
@param options the options dictionary parsed from the command line args.
@return a list of full paths to the interface (.i) files used to describe
the public API language binding.
"""
interface_file_paths = []
interface_dir = os.path.join(options.src_root, "scripts", "interface")
for filepath in [f for f in os.listdir(interface_dir)
if os.path.splitext(f)[1] == ".i"]:
interface_file_paths.append(
os.path.normcase(os.path.join(interface_dir, filepath)))
logging.debug("found swig interface files: %s", interface_file_paths)
return interface_file_paths
def remove_ignore_enoent(filename):
"""Removes given file, ignoring error if it doesn't exist.
@param filename the path of the file to remove.
"""
try:
os.remove(filename)
except OSError as error:
import errno
if error.errno != errno.ENOENT:
raise
def do_swig_rebuild(options, dependency_file, config_build_dir, settings):
"""Generates Python bindings file from swig.
This method will do a sys.exit() if something fails. If it returns to
the caller, it succeeded.
@param options the parsed command line options structure.
@param dependency_file path to the bindings dependency file
to be generated; otherwise, None if a dependency file is not
to be generated.
@param config_build_dir used as the output directory used by swig
@param settings the SwigSettings that specify a number of aspects used
to configure building the Python binding with swig (mostly paths)
"""
if options.generate_dependency_file:
temp_dep_file_path = dependency_file + ".tmp"
# Build the SWIG args list
is_darwin = options.target_platform == "Darwin"
gen_deps = options.generate_dependency_file
darwin_extras = ["-D__APPLE__"] if is_darwin else []
deps_args = ["-MMD", "-MF", temp_dep_file_path] if gen_deps else []
command = ([
options.swig_executable,
"-c++",
"-shadow",
"-python",
"-threads",
"-I" + os.path.normpath(os.path.join(options.src_root, "include")),
"-I" + os.path.curdir,
"-D__STDC_LIMIT_MACROS",
"-D__STDC_CONSTANT_MACROS"
]
+ darwin_extras
+ deps_args
+ [
"-outdir", config_build_dir,
"-o", settings.output_file,
settings.input_file
]
)
logging.info("running swig with: %r", command)
# Execute swig
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# Wait for SWIG process to terminate
swig_stdout, swig_stderr = process.communicate()
return_code = process.returncode
if return_code != 0:
logging.error(
"swig failed with error code %d: stdout=%s, stderr=%s",
return_code,
swig_stdout,
swig_stderr)
logging.error(
"command line:\n%s", ' '.join(command))
sys.exit(return_code)
logging.info("swig generation succeeded")
if swig_stdout is not None and len(swig_stdout) > 0:
logging.info("swig output: %s", swig_stdout)
# Move the depedency file we just generated to the proper location.
if options.generate_dependency_file:
if os.path.exists(temp_dep_file_path):
shutil.move(temp_dep_file_path, dependency_file)
else:
logging.error(
"failed to generate Python binding depedency file '%s'",
temp_dep_file_path)
if os.path.exists(dependency_file):
# Delete the old one.
os.remove(dependency_file)
sys.exit(-10)
def get_python_module_path(options):
"""Returns the location where the lldb Python module should be placed.
@param options dictionary of options parsed from the command line.
@return the directory where the lldb module should be placed.
"""
if options.framework:
# Caller wants to use the OS X framework packaging.
# We are packaging in an OS X-style framework bundle. The
# module dir will be within the
# LLDB.framework/Resources/Python subdirectory.
return os.path.join(
options.target_dir,
"LLDB.framework",
"Resources",
"Python",
"lldb")
else:
from distutils.sysconfig import get_python_lib
if options.prefix is not None:
module_path = get_python_lib(True, False, options.prefix)
else:
module_path = get_python_lib(True, False)
return os.path.normcase(
os.path.join(module_path, "lldb"))
def main(options):
"""Pepares the Python language binding to LLDB.
@param options the parsed command line argument dictionary
"""
# Setup generated dependency file options.
if options.generate_dependency_file:
dependency_file = os.path.normcase(os.path.join(
options.target_dir, "LLDBWrapPython.cpp.d"))
else:
dependency_file = None
# Keep track of all the swig-related settings.
settings = SwigSettings()
# Determine the final binding file path.
settings.output_file = os.path.normcase(
os.path.join(options.target_dir, "LLDBWrapPython.cpp"))
# Touch the output file (but don't really generate it) if python
# is disabled.
disable_python = os.getenv("LLDB_DISABLE_PYTHON", None)
if disable_python is not None and disable_python == "1":
remove_ignore_enoent(settings.output_file)
# Touch the file.
open(settings.output_file, 'w').close()
logging.info(
"Created empty python binding file due to LLDB_DISABLE_PYTHON "
"being set")
return
# We also check the GCC_PREPROCESSOR_DEFINITIONS to see if it
# contains LLDB_DISABLE_PYTHON. If so, we skip generating
# the binding.
gcc_preprocessor_defs = os.getenv("GCC_PREPROCESSOR_DEFINITIONS", None)
if gcc_preprocessor_defs is not None:
if re.search(r"LLDB_DISABLE_PYTHON", gcc_preprocessor_defs):
remove_ignore_enoent(settings.output_file)
# Touch the file
open(settings.output_file, 'w').close()
logging.info(
"Created empty python binding file due to "
"finding LLDB_DISABLE_PYTHON in GCC_PREPROCESSOR_DEFINITIONS")
return
# Setup paths used during swig invocation.
settings.input_file = os.path.normcase(
os.path.join(options.src_root, "scripts", "lldb.swig"))
scripts_python_dir = os.path.dirname(os.path.realpath(__file__))
settings.extensions_file = os.path.normcase(
os.path.join(scripts_python_dir, "python-extensions.swig"))
settings.wrapper_file = os.path.normcase(
os.path.join(scripts_python_dir, "python-wrapper.swig"))
settings.typemaps_file = os.path.normcase(
os.path.join(scripts_python_dir, "python-typemaps.swig"))
settings.safecast_file = os.path.normcase(
os.path.join(scripts_python_dir, "python-swigsafecast.swig"))
settings.header_files = get_header_files(options)
settings.interface_files = get_interface_files(options)
generate_output = settings.output_out_of_date()
# Determine where to put the module.
python_module_path = get_python_module_path(options)
logging.info("python module path: %s", python_module_path)
# Handle the configuration build dir.
if options.config_build_dir is not None:
config_build_dir = options.config_build_dir
else:
config_build_dir = python_module_path
# Allow missing/non-link _lldb.so to force regeneration.
if not generate_output:
# Ensure the _lldb.so file exists.
so_path = os.path.join(python_module_path, "_lldb.so")
if not os.path.exists(so_path) or not os.path.islink(so_path):
logging.info("_lldb.so doesn't exist or isn't a symlink")
generate_output = True
# Allow missing __init__.py to force regeneration.
if not generate_output:
# Ensure the __init__.py for the lldb module can be found.
init_path = os.path.join(python_module_path, "__init__.py")
if not os.path.exists(init_path):
logging.info("__init__.py doesn't exist")
generate_output = True
if not generate_output:
logging.info(
"Skipping Python binding generation: everything is up to date")
return
# Generate the Python binding with swig.
logging.info("Python binding is out of date, regenerating")
do_swig_rebuild(options, dependency_file, config_build_dir, settings)
# This script can be called by another Python script by calling the main()
# function directly
if __name__ == "__main__":
print("Script cannot be called directly.")
sys.exit(-1)
| StarcoderdataPython |
5001217 | <reponame>pianoft/FlaskPrimeFactorization
DEBUG=True
SQLALCHEMY_DATABASE_URI='sqlite:///flask_blog.db'
SQLALCHEMY_DATABASE_TRACK_MODIFICATIONS=True
SECRET_KEY='secret key'
USERNAME='a'
PASSWORD='' | StarcoderdataPython |
12847600 | <gh_stars>1-10
import os
from argparse import ArgumentParser
import requests
import json
import traceback
LOCATIONS = [
"Aruba",
"Afghanistan",
"Africa",
"Angola",
"Albania",
"Andorra",
"Andean Region",
"Arab World",
"United Arab Emirates",
"Argentina",
"Armenia",
"American Samoa",
"Antigua and Barbuda",
"Australia",
"Austria",
"Azerbaijan",
"Burundi",
"East Asia & Pacific (IBRD-only countries)",
"Europe & Central Asia (IBRD-only countries)",
"Belgium",
"Benin",
"Burkina Faso",
"Bangladesh",
"Bulgaria",
"IBRD countries classified as high income",
"Bahrain",
"Bahamas, The",
"Bosnia and Herzegovina",
"Latin America & the Caribbean (IBRD-only countries)",
"Belarus",
"Belize",
"Middle East & North Africa (IBRD-only countries)",
"Bermuda",
"Bolivia",
"Brazil",
"Barbados",
"Brunei Darussalam",
"Sub-Saharan Africa (IBRD-only countries)",
"Bhutan",
"Botswana",
"Sub-Saharan Africa (IFC classification)",
"Central African Republic",
"Canada",
"East Asia and the Pacific (IFC classification)",
"Central Europe and the Baltics",
"Europe and Central Asia (IFC classification)",
"Switzerland",
"Channel Islands",
"Chile",
"China",
"Cote d'Ivoire",
"Latin America and the Caribbean (IFC classification)",
"Middle East and North Africa (IFC classification)",
"Cameroon",
"Congo, Dem. Rep.",
"Congo, Rep.",
"Colombia",
"Comoros",
"Cabo Verde",
"Costa Rica",
"South Asia (IFC classification)",
"Caribbean small states",
"Cuba",
"Curacao",
"Cayman Islands",
"Cyprus",
"Czech Republic",
"East Asia & Pacific (IDA-eligible countries)",
"Europe & Central Asia (IDA-eligible countries)",
"Germany",
"IDA countries classified as Fragile Situations",
"Djibouti",
"Latin America & the Caribbean (IDA-eligible countries)",
"Dominica",
"Middle East & North Africa (IDA-eligible countries)",
"IDA countries not classified as Fragile Situations",
"Denmark",
"IDA countries in Sub-Saharan Africa not classified as fragile situations ",
"Dominican Republic",
"South Asia (IDA-eligible countries)",
"IDA countries in Sub-Saharan Africa classified as fragile situations ",
"Sub-Saharan Africa (IDA-eligible countries)",
"IDA total, excluding Sub-Saharan Africa",
"Algeria",
"East Asia & Pacific (excluding high income)",
"Early-demographic dividend",
"East Asia & Pacific",
"Europe & Central Asia (excluding high income)",
"Europe & Central Asia",
"Ecuador",
"Egypt, Arab Rep.",
"Euro area",
"Eritrea",
"Spain",
"Estonia",
"Ethiopia",
"European Union",
"Fragile and conflict affected situations",
"Finland",
"Fiji",
"France",
"Faroe Islands",
"Micronesia, Fed. Sts.",
"IDA countries classified as fragile situations, excluding Sub-Saharan Africa",
"Gabon",
"United Kingdom",
"Georgia",
"Ghana",
"Gibraltar",
"Guinea",
"Gambia, The",
"Guinea-Bissau",
"Equatorial Guinea",
"Greece",
"Grenada",
"Greenland",
"Guatemala",
"Guam",
"Guyana",
"High income",
"Hong Kong SAR, China",
"Honduras",
"Heavily indebted poor countries (HIPC)",
"Croatia",
"Haiti",
"Hungary",
"IBRD, including blend",
"IBRD only",
"IDA & IBRD total",
"IDA total",
"IDA blend",
"Indonesia",
"IDA only",
"Isle of Man",
"India",
"Not classified",
"Ireland",
"Iran, Islamic Rep.",
"Iraq",
"Iceland",
"Israel",
"Italy",
"Jamaica",
"Jordan",
"Japan",
"Kazakhstan",
"Kenya",
"Kyrgyz Republic",
"Cambodia",
"Kiribati",
"St. Kitts and Nevis",
"Korea, Rep.",
"Kuwait",
"Latin America & Caribbean (excluding high income)",
"Lao PDR",
"Lebanon",
"Liberia",
"Libya",
"St. Lucia",
"Latin America & Caribbean ",
"Latin America and the Caribbean",
"Least developed countries,ssification",
"Low income",
"Liechtenstein",
"Sri Lanka",
"Lower middle income",
"Low & middle income",
"Lesotho",
"Late-demographic dividend",
"Lithuania",
"Luxembourg",
"Latvia",
"Macao SAR, China",
"St. Martin (French part)",
"Morocco",
"Central America",
"Monaco",
"Moldova",
"Middle East (developing only)",
"Madagascar",
"Maldives",
"Middle East & North Africa",
"Mexico",
"Marshall Islands",
"Middle income",
"Macedonia, FYR",
"Mali",
"Malta",
"Myanmar",
"Middle East & North Africa (excluding high income)",
"Montenegro",
"Mongolia",
"Northern Mariana Islands",
"Mozambique",
"Mauritania",
"Mauritius",
"Malawi",
"Malaysia",
"North America",
"North Africa",
"Namibia",
"New Caledonia",
"Niger",
"Nigeria",
"Nicaragua",
"Netherlands",
"Non-resource rich Sub-Saharan Africa countries, of which landlocked",
"Norway",
"Nepal",
"Non-resource rich Sub-Saharan Africa countries",
"Nauru",
"IDA countries not classified as fragile situations, excluding Sub-Saharan Africa",
"New Zealand",
"OECD members",
"Oman",
"Other small states",
"Pakistan",
"Panama",
"Peru",
"Philippines",
"Palau",
"Papua New Guinea",
"Poland",
"Pre-demographic dividend",
"Puerto Rico",
"Korea, Dem. People’s Rep.",
"Portugal",
"Paraguay",
"West Bank and Gaza",
"Pacific island small states",
"Post-demographic dividend",
"French Polynesia",
"Qatar",
"Romania",
"Resource rich Sub-Saharan Africa countries",
"Resource rich Sub-Saharan Africa countries, of which oil exporters",
"Russian Federation",
"Rwanda",
"South Asia",
"Saudi Arabia",
"Southern Cone",
"Sudan",
"Senegal",
"Singapore",
"Solomon Islands",
"Sierra Leone",
"El Salvador",
"San Marino",
"Somalia",
"Serbia",
"Sub-Saharan Africa (excluding high income)",
"South Sudan",
"Sub-Saharan Africa ",
"Small states",
"Sao Tome and Principe",
"Suriname",
"Slovak Republic",
"Slovenia",
"Sweden",
"Eswatini",
"Sint Maarten (Dutch part)",
"Sub-Saharan Africa excluding South Africa",
"Seychelles",
"Syrian Arab Republic",
"Turks and Caicos Islands",
"Chad",
"East Asia & Pacific (IDA & IBRD countries)",
"Europe & Central Asia (IDA & IBRD countries)",
"Togo",
"Thailand",
"Tajikistan",
"Turkmenistan",
"Latin America & the Caribbean (IDA & IBRD countries)",
"Timor-Leste",
"Middle East & North Africa (IDA & IBRD countries)",
"Tonga",
"South Asia (IDA & IBRD)",
"Sub-Saharan Africa (IDA & IBRD countries)",
"Trinidad and Tobago",
"Tunisia",
"Turkey",
"Tuvalu",
"Taiwan, China",
"Tanzania",
"Uganda",
"Ukraine",
"Upper middle income",
"Uruguay",
"United States",
"Uzbekistan",
"St. Vincent and the Grenadines",
"Venezuela, RB",
"British Virgin Islands",
"Virgin Islands (U.S.)",
"Vietnam",
"Vanuatu",
"World",
"Samoa",
"Kosovo",
"Sub-Saharan Africa excluding South Africa and Nigeria",
"Yemen, Rep.",
"South Africa",
"Zambia",
"Zimbabwe"
]
def getAllIndicatorList():
url = "https://api.worldbank.org/v2/indicators?format=json&page=1"
res = requests.get(url)
data = res.json()
total = data[0]['total']
url2 = "https://api.worldbank.org/v2/indicators?format=json&page=1&per_page=" + str(total)
res2 = requests.get(url2)
data2 = res2.json()
return data2[1]
def generate_json_schema(dst_path):
unique_urls_str = getAllIndicatorList()
for commondata in unique_urls_str:
try:
urldata = "https://api.worldbank.org/v2/countries/indicators/" + commondata['id'] + "?format=json"
resdata = requests.get(urldata)
data_ind = resdata.json()
print("Generating schema for Trading economics", commondata['name'])
schema = {}
schema["title"] = commondata['name']
schema["description"] = commondata['sourceNote']
schema["url"] = "https://api.worldbank.org/v2/indicators/" + commondata['id'] + "?format=json"
schema["keywords"] = [i for i in commondata['name'].split()]
schema["date_updated"] = data_ind[0]["lastupdated"] if data_ind else None
schema["license"] = None
schema["provenance"] = {"source": "http://worldbank.org"}
schema["original_identifier"] = commondata['id']
schema["materialization"] = {
"python_path": "worldbank_materializer",
"arguments": {
"url": "https://api.worldbank.org/v2/indicators/" + commondata['id'] + "?format=json"
}
}
schema['variables'] = []
first_col = {
"name": "indicator_id",
"description": "id is identifier of an indicator in worldbank datasets",
"semantic_type": ["https://metadata.datadrivendiscovery.org/types/CategoricalData"]
}
second_col = {
"name": "indicator_value",
"description": "name of an indicator in worldbank datasets",
"semantic_type": ["http://schema.org/Text"]
}
third_col = {
"name": "unit",
"description": "unit of value returned by this indicator for a particular country",
"semantic_type": ["https://metadata.datadrivendiscovery.org/types/CategoricalData"]
}
fourth_col = {
"name": "sourceNote",
"description": "Long description of the indicator",
"semantic_type": ["http://schema.org/Text"]
}
fifth_col = {
"name": "sourceOrganization",
"description": "Source organization from where Worldbank acquired this data",
"semantic_type": ["http://schema.org/Text"]
}
sixth_col = {
"name": "country_value",
"description": "Country for which idicator value is returned",
"semantic_type": ["https://metadata.datadrivendiscovery.org/types/Location"],
"named_entity": LOCATIONS
}
seventh_col = {
"name": "countryiso3code",
"description": "Country iso code for which idicator value is returned",
"semantic_type": ["https://metadata.datadrivendiscovery.org/types/Location"]
}
eighth_col = {
"name": "date",
"description": "date for which indictor value is returned for a particular country",
"semantic_type": ["https://metadata.datadrivendiscovery.org/types/Time"],
"temporal_coverage": {"start": "1950", "end": "2100"}
}
schema['variables'].append(first_col)
schema['variables'].append(second_col)
schema['variables'].append(third_col)
schema['variables'].append(fourth_col)
schema['variables'].append(fifth_col)
schema['variables'].append(sixth_col)
schema['variables'].append(seventh_col)
schema['variables'].append(eighth_col)
if dst_path:
os.makedirs(dst_path + '/worldbank_schema', exist_ok=True)
file = os.path.join(dst_path, 'worldbank_schema',
"{}_description.json".format(commondata['id']))
else:
os.makedirs('WorldBank_schema', exist_ok=True)
file = os.path.join('worldbank_schema',
"{}_description.json".format(commondata['id']))
with open(file, "w") as fp:
json.dump(schema, fp, indent=2)
except:
traceback.print_exc()
pass
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-o", "--dst", action="store", type=str, dest="dst_path")
args, _ = parser.parse_known_args()
generate_json_schema(args.dst_path)
| StarcoderdataPython |
4823803 | <gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
from flask import Blueprint, render_template
from flask_login import login_required
from vhoops import authorize, user_groups
from vhoops.modules.users.forms.new_user import NewUser
from vhoops.modules.teams.api.models import Teams
users_router = Blueprint("users_router", __name__)
@users_router.route("/users", methods=["GET"])
@authorize.in_group("superuser")
@login_required
def users_page():
new_user_form = NewUser()
new_user_form.teams.choices.extend((i.as_dict['name'], i.as_dict['name']) for i in Teams.query.all())
new_user_form.group.choices.extend((i['name'], i['name']) for i in user_groups)
return render_template(
"users/users.html",
form=new_user_form
)
| StarcoderdataPython |
1870004 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Trace GUI."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import logging
from pathlib import Path
import numpy as np
from phylib.io.model import load_raw_data
from phylib.utils import Bunch
from phy.apps.template import get_template_params
from phy.cluster.views.trace import TraceView, select_traces
from phy.gui import create_app, run_app, GUI
logger = logging.getLogger(__name__)
#------------------------------------------------------------------------------
# Trace GUI
#------------------------------------------------------------------------------
def create_trace_gui(dat_path, **kwargs):
"""Create the Trace GUI.
Parameters
----------
dat_path : str or Path
Path to the raw data file
sample_rate : float
The data sampling rate, in Hz.
n_channels_dat : int
The number of columns in the raw data file.
dtype : str
The NumPy data type of the raw binary file.
"""
gui_name = 'TraceGUI'
dat_path = Path(dat_path)
# Support passing a params.py file.
if dat_path.suffix == '.py':
params = get_template_params(str(dat_path))
return create_trace_gui(next(iter(params.pop('dat_path'))), **params)
sample_rate = float(kwargs['sample_rate'])
assert sample_rate > 0.
n_channels_dat = int(kwargs['n_channels_dat'])
dtype = np.dtype(kwargs['dtype'])
offset = int(kwargs['offset'] or 0)
order = kwargs.get('order', None)
# Memmap the raw data file.
data = load_raw_data(
path=dat_path,
n_channels_dat=n_channels_dat,
dtype=dtype,
offset=offset,
order=order,
)
duration = data.shape[0] / sample_rate
create_app()
gui = GUI(name=gui_name, subtitle=dat_path.resolve(), enable_threading=False)
gui.set_default_actions()
def _get_traces(interval):
return Bunch(
data=select_traces(
data, interval, sample_rate=sample_rate))
# TODO: load channel information
view = TraceView(
traces=_get_traces,
n_channels=n_channels_dat,
sample_rate=sample_rate,
duration=duration,
enable_threading=False,
)
view.attach(gui)
return gui
def trace_gui(dat_path, **kwargs): # pragma: no cover
"""Launch the Trace GUI.
Parameters
----------
dat_path : str or Path
Path to the raw data file
sample_rate : float
The data sampling rate, in Hz.
n_channels_dat : int
The number of columns in the raw data file.
dtype : str
The NumPy data type of the raw binary file.
order : str
Order of the data file: `C` or `F` (Fortran).
"""
gui = create_trace_gui(dat_path, **kwargs)
gui.show()
run_app()
gui.close()
| StarcoderdataPython |
11204004 | # coding:utf-8
class HtmlStorer(object):
def store_db(self, html):
pass
def store_file(self, html):
pass
| StarcoderdataPython |
1745503 | <reponame>a-fox/indy-node
from copy import deepcopy
from typing import List
import base58
from indy_common.auth import Authoriser
from indy_common.constants import NYM, ROLE, ATTRIB, SCHEMA, CLAIM_DEF, REF, \
GET_NYM, GET_ATTR, GET_SCHEMA, GET_CLAIM_DEF, SIGNATURE_TYPE, REVOC_REG_DEF
from indy_common.roles import Roles
from indy_common.state import domain
from indy_common.types import Request
from plenum.common.constants import TXN_TYPE, TARGET_NYM, RAW, ENC, HASH, \
VERKEY, DATA, NAME, VERSION, ORIGIN, \
TXN_TIME
from plenum.common.exceptions import InvalidClientRequest, \
UnauthorizedClientRequest, UnknownIdentifier, InvalidClientMessageException
from plenum.common.types import f
from plenum.common.constants import TRUSTEE
from plenum.server.domain_req_handler import DomainRequestHandler as PHandler
from stp_core.common.log import getlogger
logger = getlogger()
class DomainReqHandler(PHandler):
write_types = {NYM, ATTRIB, SCHEMA, CLAIM_DEF, REVOC_REG_DEF}
query_types = {GET_NYM, GET_ATTR, GET_SCHEMA, GET_CLAIM_DEF}
def __init__(self, ledger, state, config, requestProcessor,
idrCache, attributeStore, bls_store):
super().__init__(ledger, state, config, requestProcessor, bls_store)
self.idrCache = idrCache
self.attributeStore = attributeStore
self.query_handlers = {
GET_NYM: self.handleGetNymReq,
GET_ATTR: self.handleGetAttrsReq,
GET_SCHEMA: self.handleGetSchemaReq,
GET_CLAIM_DEF: self.handleGetClaimDefReq,
}
def onBatchCreated(self, stateRoot):
self.idrCache.currentBatchCreated(stateRoot)
def onBatchRejected(self):
self.idrCache.batchRejected()
def _updateStateWithSingleTxn(self, txn, isCommitted=False):
typ = txn.get(TXN_TYPE)
nym = txn.get(TARGET_NYM)
if typ == NYM:
data = {
f.IDENTIFIER.nm: txn.get(f.IDENTIFIER.nm),
f.SEQ_NO.nm: txn.get(f.SEQ_NO.nm),
TXN_TIME: txn.get(TXN_TIME)
}
if ROLE in txn:
data[ROLE] = txn.get(ROLE)
if VERKEY in txn:
data[VERKEY] = txn.get(VERKEY)
self.updateNym(nym, data, isCommitted=isCommitted)
elif typ == ATTRIB:
self._addAttr(txn)
elif typ == SCHEMA:
self._addSchema(txn)
elif typ == CLAIM_DEF:
self._addClaimDef(txn)
elif typ == REVOC_REG_DEF:
self._addRevocDef(txn)
else:
logger.debug(
'Cannot apply request of type {} to state'.format(typ))
def commit(self, txnCount, stateRoot, txnRoot) -> List:
r = super().commit(txnCount, stateRoot, txnRoot)
stateRoot = base58.b58decode(stateRoot.encode())
self.idrCache.onBatchCommitted(stateRoot)
return r
def doStaticValidation(self, request: Request):
identifier, req_id, operation = request.identifier, request.reqId, request.operation
if operation[TXN_TYPE] == NYM:
self._doStaticValidationNym(identifier, req_id, operation)
if operation[TXN_TYPE] == ATTRIB:
self._doStaticValidationAttrib(identifier, req_id, operation)
def _doStaticValidationNym(self, identifier, reqId, operation):
role = operation.get(ROLE)
nym = operation.get(TARGET_NYM)
if not nym:
raise InvalidClientRequest(identifier, reqId,
"{} needs to be present".
format(TARGET_NYM))
if not Authoriser.isValidRole(role):
raise InvalidClientRequest(identifier, reqId,
"{} not a valid role".
format(role))
def _doStaticValidationAttrib(self, identifier, reqId, operation):
if not self._validate_attrib_keys(operation):
raise InvalidClientRequest(identifier, reqId,
'{} should have one and only one of '
'{}, {}, {}'
.format(ATTRIB, RAW, ENC, HASH))
def validate(self, req: Request, config=None):
op = req.operation
typ = op[TXN_TYPE]
if typ == NYM:
self._validateNym(req)
elif typ == ATTRIB:
self._validateAttrib(req)
elif typ == SCHEMA:
self._validate_schema(req)
elif typ == CLAIM_DEF:
self._validate_claim_def(req)
elif typ == REVOC_REG_DEF:
self._validate_revoc_reg_def(req)
@staticmethod
def _validate_attrib_keys(operation):
dataKeys = {RAW, ENC, HASH}.intersection(set(operation.keys()))
return len(dataKeys) == 1
def _validateNym(self, req: Request):
origin = req.identifier
op = req.operation
try:
originRole = self.idrCache.getRole(
origin, isCommitted=False) or None
except BaseException:
raise UnknownIdentifier(
req.identifier,
req.reqId)
nymData = self.idrCache.getNym(op[TARGET_NYM], isCommitted=False)
if not nymData:
# If nym does not exist
self._validateNewNym(req, op, originRole)
else:
self._validateExistingNym(req, op, originRole, nymData)
def _validateNewNym(self, req: Request, op, originRole):
role = op.get(ROLE)
r, msg = Authoriser.authorised(NYM, ROLE, originRole,
oldVal=None, newVal=role)
if not r:
raise UnauthorizedClientRequest(
req.identifier,
req.reqId,
"{} cannot add {}".format(
Roles.nameFromValue(originRole),
Roles.nameFromValue(role))
)
def _validateExistingNym(self, req: Request, op, originRole, nymData):
unauthorized = False
reason = None
origin = req.identifier
owner = self.idrCache.getOwnerFor(op[TARGET_NYM], isCommitted=False)
isOwner = origin == owner
if not originRole == TRUSTEE and not isOwner:
reason = '{} is neither Trustee nor owner of {}' \
.format(origin, op[TARGET_NYM])
unauthorized = True
if not unauthorized:
updateKeys = [ROLE, VERKEY]
for key in updateKeys:
if key in op:
newVal = op[key]
oldVal = nymData.get(key)
if oldVal != newVal:
r, msg = Authoriser.authorised(NYM, key, originRole,
oldVal=oldVal, newVal=newVal,
isActorOwnerOfSubject=isOwner)
if not r:
unauthorized = True
reason = "{} cannot update {}".\
format(Roles.nameFromValue(originRole), key)
break
if unauthorized:
raise UnauthorizedClientRequest(
req.identifier, req.reqId, reason)
def _validateAttrib(self, req: Request):
origin = req.identifier
op = req.operation
if not (not op.get(TARGET_NYM) or
self.hasNym(op[TARGET_NYM], isCommitted=False)):
raise InvalidClientRequest(origin, req.reqId,
'{} should be added before adding '
'attribute for it'.
format(TARGET_NYM))
if op.get(TARGET_NYM) and op[TARGET_NYM] != req.identifier and \
not self.idrCache.getOwnerFor(op[TARGET_NYM],
isCommitted=False) == origin:
raise UnauthorizedClientRequest(
req.identifier,
req.reqId,
"Only identity owner/guardian can add attribute "
"for that identity")
def _validate_schema(self, req: Request):
# we can not add a Schema with already existent NAME and VERSION
# sine a Schema needs to be identified by seqNo
identifier = req.identifier
operation = req.operation
schema_name = operation[DATA][NAME]
schema_version = operation[DATA][VERSION]
schema, _, _, _ = self.getSchema(
author=identifier,
schemaName=schema_name,
schemaVersion=schema_version
)
if schema:
raise InvalidClientRequest(identifier, req.reqId,
'{} can have one and only one SCHEMA with '
'name {} and version {}'
.format(identifier, schema_name, schema_version))
def _validate_claim_def(self, req: Request):
# we can not add a Claim Def with existent ISSUER_DID
# sine a Claim Def needs to be identified by seqNo
identifier = req.identifier
operation = req.operation
schema_ref = operation[REF]
signature_type = operation[SIGNATURE_TYPE]
claim_def, _, _, _ = self.getClaimDef(
author=identifier,
schemaSeqNo=schema_ref,
signatureType=signature_type
)
if claim_def:
raise InvalidClientRequest(identifier, req.reqId,
'{} can have one and only one CLAIM_DEF for '
'and schema ref {} and signature type {}'
.format(identifier, schema_ref, signature_type))
def _validate_revoc_reg_def(self, req: Request):
# TODO Need to check that CRED_DEF for this REVOC_DEF exist
operation = req.operation
cred_def_id = operation.get("id")
revoc_def_type = operation.get("type")
revoc_def_tag = operation.get("tag")
assert cred_def_id
assert revoc_def_tag
assert revoc_def_type
def updateNym(self, nym, data, isCommitted=True):
updatedData = super().updateNym(nym, data, isCommitted=isCommitted)
txn_time = data.get(TXN_TIME)
self.idrCache.set(nym,
seqNo=data[f.SEQ_NO.nm],
txnTime=txn_time,
ta=updatedData.get(f.IDENTIFIER.nm),
role=updatedData.get(ROLE),
verkey=updatedData.get(VERKEY),
isCommitted=isCommitted)
def hasNym(self, nym, isCommitted: bool = True):
return self.idrCache.hasNym(nym, isCommitted=isCommitted)
def handleGetNymReq(self, request: Request):
nym = request.operation[TARGET_NYM]
nymData = self.idrCache.getNym(nym, isCommitted=True)
path = domain.make_state_path_for_nym(nym)
if nymData:
nymData[TARGET_NYM] = nym
data = self.stateSerializer.serialize(nymData)
seq_no = nymData[f.SEQ_NO.nm]
update_time = nymData[TXN_TIME]
proof = self.make_proof(path)
else:
data = None
seq_no = None
proof = self.make_proof(path)
update_time = None
# TODO: add update time here!
result = self.make_result(request=request,
data=data,
last_seq_no=seq_no,
update_time=update_time,
proof=proof)
result.update(request.operation)
return result
def handleGetSchemaReq(self, request: Request):
author_did = request.operation[TARGET_NYM]
schema_name = request.operation[DATA][NAME]
schema_version = request.operation[DATA][VERSION]
schema, lastSeqNo, lastUpdateTime, proof = self.getSchema(
author=author_did,
schemaName=schema_name,
schemaVersion=schema_version
)
# TODO: we have to do this since SCHEMA has a bit different format than other txns
# (it has NAME and VERSION inside DATA, and it's not part of the state value, but state key)
if schema is None:
schema = {}
schema.update({
NAME: schema_name,
VERSION: schema_version
})
return self.make_result(request=request,
data=schema,
last_seq_no=lastSeqNo,
update_time=lastUpdateTime,
proof=proof)
def handleGetClaimDefReq(self, request: Request):
signatureType = request.operation[SIGNATURE_TYPE]
keys, lastSeqNo, lastUpdateTime, proof = self.getClaimDef(
author=request.operation[ORIGIN],
schemaSeqNo=request.operation[REF],
signatureType=signatureType
)
result = self.make_result(request=request,
data=keys,
last_seq_no=lastSeqNo,
update_time=lastUpdateTime,
proof=proof)
result[SIGNATURE_TYPE] = signatureType
return result
def handleGetAttrsReq(self, request: Request):
if not self._validate_attrib_keys(request.operation):
raise InvalidClientRequest(request.identifier, request.reqId,
'{} should have one and only one of '
'{}, {}, {}'
.format(ATTRIB, RAW, ENC, HASH))
nym = request.operation[TARGET_NYM]
if RAW in request.operation:
attr_type = RAW
elif ENC in request.operation:
# If attribute is encrypted, it will be queried by its hash
attr_type = ENC
else:
attr_type = HASH
attr_key = request.operation[attr_type]
value, lastSeqNo, lastUpdateTime, proof = \
self.getAttr(did=nym, key=attr_key, attr_type=attr_type)
attr = None
if value is not None:
if HASH in request.operation:
attr = attr_key
else:
attr = value
return self.make_result(request=request,
data=attr,
last_seq_no=lastSeqNo,
update_time=lastUpdateTime,
proof=proof)
def lookup(self, path, isCommitted=True) -> (str, int):
"""
Queries state for data on specified path
:param path: path to data
:return: data
"""
assert path is not None
encoded = self.state.get(path, isCommitted)
proof = self.make_proof(path)
if encoded is not None:
value, last_seq_no, last_update_time = domain.decode_state_value(encoded)
return value, last_seq_no, last_update_time, proof
return None, None, None, proof
def _addAttr(self, txn) -> None:
"""
The state trie stores the hash of the whole attribute data at:
the did+attribute name if the data is plaintext (RAW)
the did+hash(attribute) if the data is encrypted (ENC)
If the attribute is HASH, then nothing is stored in attribute store,
the trie stores a blank value for the key did+hash
"""
assert txn[TXN_TYPE] == ATTRIB
attr_type, path, value, hashed_value, value_bytes = domain.prepare_attr_for_state(txn)
self.state.set(path, value_bytes)
if attr_type != HASH:
self.attributeStore.set(hashed_value, value)
def _addSchema(self, txn) -> None:
assert txn[TXN_TYPE] == SCHEMA
path, value_bytes = domain.prepare_schema_for_state(txn)
self.state.set(path, value_bytes)
def _addClaimDef(self, txn) -> None:
assert txn[TXN_TYPE] == CLAIM_DEF
path, value_bytes = domain.prepare_claim_def_for_state(txn)
self.state.set(path, value_bytes)
def _addRevocDef(self, txn) -> None:
assert txn[TXN_TYPE] == REVOC_REG_DEF
path, value_bytes = domain.prepare_revoc_def_for_state(txn)
self.state.set(path, value_bytes)
def getAttr(self,
did: str,
key: str,
attr_type,
isCommitted=True) -> (str, int, int, list):
assert did is not None
assert key is not None
path = domain.make_state_path_for_attr(did, key, attr_type == HASH)
try:
hashed_val, lastSeqNo, lastUpdateTime, proof = \
self.lookup(path, isCommitted)
except KeyError:
return None, None, None, None
if not hashed_val or hashed_val == '':
# Its a HASH attribute
return hashed_val, lastSeqNo, lastUpdateTime, proof
else:
try:
value = self.attributeStore.get(hashed_val)
except KeyError:
logger.error('Could not get value from attribute store for {}'
.format(hashed_val))
return None, None, None, None
return value, lastSeqNo, lastUpdateTime, proof
def getSchema(self,
author: str,
schemaName: str,
schemaVersion: str,
isCommitted=True) -> (str, int, int, list):
assert author is not None
assert schemaName is not None
assert schemaVersion is not None
path = domain.make_state_path_for_schema(author, schemaName, schemaVersion)
try:
keys, seqno, lastUpdateTime, proof = self.lookup(path, isCommitted)
return keys, seqno, lastUpdateTime, proof
except KeyError:
return None, None, None, None
def getClaimDef(self,
author: str,
schemaSeqNo: str,
signatureType='CL',
isCommitted=True) -> (str, int, int, list):
assert author is not None
assert schemaSeqNo is not None
path = domain.make_state_path_for_claim_def(author, schemaSeqNo, signatureType)
try:
keys, seqno, lastUpdateTime, proof = self.lookup(path, isCommitted)
return keys, seqno, lastUpdateTime, proof
except KeyError:
return None, None, None, None
def getRevocDef(self,
author_did,
cred_def_id,
revoc_def_type,
revoc_def_tag,
isCommitted=True) -> (str, int, int, list):
assert author_did is not None
assert cred_def_id is not None
assert revoc_def_type is not None
assert revoc_def_tag is not None
path = domain.make_state_path_for_revoc_def(author_did,
cred_def_id,
revoc_def_type,
revoc_def_tag)
try:
keys, seqno, lastUpdateTime, proof = self.lookup(path, isCommitted)
return keys, seqno, lastUpdateTime, proof
except KeyError:
return None, None, None, None
def get_query_response(self, request: Request):
return self.query_handlers[request.operation[TXN_TYPE]](request)
@staticmethod
def transform_txn_for_ledger(txn):
"""
Some transactions need to be transformed before they can be stored in the
ledger, eg. storing certain payload in another data store and only its
hash in the ledger
"""
if txn[TXN_TYPE] == ATTRIB:
txn = DomainReqHandler.transform_attrib_for_ledger(txn)
return txn
@staticmethod
def transform_attrib_for_ledger(txn):
"""
Creating copy of result so that `RAW`, `ENC` or `HASH` can be
replaced by their hashes. We do not insert actual attribute data
in the ledger but only the hash of it.
"""
txn = deepcopy(txn)
attr_type, _, value = domain.parse_attr_txn(txn)
if attr_type in [RAW, ENC]:
txn[attr_type] = domain.hash_of(value) if value else ''
return txn
| StarcoderdataPython |
191110 | <reponame>XiaohanZhangCMU/spinningup
# Demonstrate
# 1. Pearlmutter hvp theory works
# 2. j-towns 3 backward prop trick works
from autograd import numpy as np
import autograd.numpy.random as npr
from autograd.test_util import check_grads, check_equivalent
from autograd import (grad, elementwise_grad, jacobian, value_and_grad,
hessian_tensor_product, hessian, make_hvp,
tensor_jacobian_product, checkpoint, make_jvp,
make_ggnvp, grad_and_aux)
import torch
import timeit
npr.seed(1)
def jvp():
A = np.random.randn(2, 2)
def f(x):
return np.dot(A, x)
x = np.zeros(2)
jvp_f_x = make_jvp(f)(x)
print(jvp_f_x(np.array([1, 0]))) # f(0) and first column of f's Jacobian at 0
print(jvp_f_x(np.array([0, 1]))) # f(0) and second column of f's Jacobian at 0
def hvp():
hvp = make_hvp(fun)(a)[0]
s = hvp(u)
return s
def hessian1():
H = hessian(fun)(a)
s = np.dot(H, u)
return s
# Adapted using the trick: https://j-towns.github.io/2017/06/12/A-new-trick.html
def torchhvp():
L = torch.sum(torch.sin(x))
y, = torch.autograd.grad(L, x, create_graph=True, retain_graph=False)
w = torch.zeros(y.size(), requires_grad=True)
g = torch.autograd.grad(y, x, grad_outputs = w, create_graph = True)
r = torch.autograd.grad(g, w, grad_outputs = v, create_graph = False)
return r
class net(torch.nn.Module):
def __init__(self): #, D_in, H, D_out):
super(net, self).__init__()
def forward(self, x):
return torch.sum(torch.sin(x))
def torchwNet():
fun = net()
L = fun(x)
y, = torch.autograd.grad(L, x, create_graph=True, retain_graph=False)
w = torch.zeros(y.size(), requires_grad=True)
g = torch.autograd.grad(y, x, grad_outputs = w, create_graph = True)
r = torch.autograd.grad(g, w, grad_outputs = v, create_graph = False)
return r
fun = lambda a: np.sum(np.sin(a))
for size in range(50, 50000, 50):
x = torch.randn(size,)
v = torch.randn(size,)
x.requires_grad=True
a = x.detach().numpy()
u = v.detach().numpy()
print([timeit.timeit(hvp, number=10),timeit.timeit(hessian1, number=10), timeit.timeit(torchhvp, number=10), timeit.timeit(torchwNet, number=10)])
#print(fun(a))
#print(hvp())
#print(hessian1())
#print(torchhvp())
#print(torchwNet())
| StarcoderdataPython |
5018467 | <filename>test/__init__.py
import sys
sys.path.append('~/PycharmProjects/kawalc1') | StarcoderdataPython |
189117 | <filename>NTA_detect_fast_flux.py
#!/usr/bin/env python2
from scapy.all import *
records = {}
min_count = 5 # we only care if the domain name has more than min_count IPs associated with it.
def examine(pk):
if pk.haslayer(DNSRR):
rname = pk.getlayer(DNSRR).rrname
rdata = pk.getlayer(DNSRR).rdata
if records.has_key(rname):
if rdata not in records[rname]:
records[rname].append(rdata)
else:
records[rname] = []
records[rname].append(rdata)
def main():
if len(sys.argv) <> 2:
print("USAGE: %s %s" % (sys.argv[0], "[pcap file]"))
sys.exit()
else:
pk = rdpcap(sys.argv[1])
for pkt in pk:
examine(pkt)
for rec in records:
if len(records[rec]) >= min_count:
print("[+] %s has %s unique IPs" % (rec, str(len(records[rec]))))
if __name__=="__main__":
main()
| StarcoderdataPython |
9692561 | <gh_stars>1-10
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
from apps.E_business_project_admin.serializers.image import ImageSeriazlier, SKUSeriazlier
from apps.E_business_project_admin.utils import PageNum
from apps.goods.models import SKUImage, SKU
class ImageView(ModelViewSet):
# 图片序列化器
serializer_class = ImageSeriazlier
# 图片查询集
queryset = SKUImage.objects.all()
# 分页
pagination_class = PageNum
# 重写拓展类的保存业务逻辑
def create(self, request, *args, **kwargs):
from fdfs_client.client import Fdfs_client
# 创建FastDFS连接对象
client = Fdfs_client('utils/fastdfs/client.conf')
# 获取前端传递的image文件
data = request.FILES.get('image')
# 上传图片到fastDFS
res = client.upload_by_buffer(data.read())
# 判断是否上传成功
if res['Status'] != 'Upload successed.':
return Response(status=403)
# 获取上传后的路径
image_url = res['Remote file_id']
# 获取sku_id
sku_id = request.data.get('sku')
# 保存图片
img = SKUImage.objects.create(sku_id=sku_id, image=image_url)
# 返回结果
return Response(
{
'id': img.id,
'sku': sku_id,
'image': img.image.url
},
status=201 # 前端需要接受201状态
)
# 重写拓展类的更新业务逻辑
def update(self, request, *args, **kwargs):
# 创建FastDFS连接对象
from fdfs_client.client import Fdfs_client
client = Fdfs_client('utils/fastdfs/client.conf')
# 获取前端传递的image文件
data = request.FILES.get('image')
# 上传图片到fastDFS
res = client.upload_by_buffer(data.read())
# 判断是否上传成功
if res['Status'] != 'Upload successed.':
return Response(status=403)
# 获取上传后的路径
image_url = res['Remote file_id']
# 获取sku_id
sku_id = request.data.get('sku')
# 查询图片对象
img = SKUImage.objects.get(id=kwargs['pk'])
# 更新图片
img.image = image_url
img.save()
# 返回结果
return Response(
{
'id': img.id,
'sku': sku_id,
'image': img.image.url
},
status=201 # 前端需要接受201状态码
)
class SKUView(APIView):
def get(self,request):
data = SKU.objects.all()
ser = SKUSeriazlier(data, many=True)
return Response(ser.data) | StarcoderdataPython |
6512410 | import json
import pandas as pd
def create_csv(json_blob, path_for_csv):
# with open(json_blob.name) as file:
#data = json.load(file)
json_string = json_blob.download_as_string()
data = json.loads(json_string)
# dicts where Im gonna save the info for the csv, key = page-number, value = list of attributes
dict_id = {}
dict_text = {}
dict_chars = {}
dict_chars_height = {}
dict_para_height = {}
dict_para_width = {}
dict_para_area = {}
dict_para_pos1_x = {}
dict_para_pos1_y = {}
dict_para_pos2_x = {}
dict_para_pos2_y = {}
# loop file responses
for response in data['responses']:
page = response['context']['pageNumber']
# lists for dicts
#word_for_line = ''
line = ''
list_text = []
list_chars_height = []
list_para_height = []
list_para_width = []
list_para_area = []
list_para_pos1_x = []
list_para_pos1_y = []
list_para_pos2_x = []
list_para_pos2_y = []
# to check if row changed
row_changed = False
first_row = True
# list of coords
x_list_para = []
y_list_para = []
x_list_word = []
y_list_word = []
for page_ in response['fullTextAnnotation']['pages']:
for block in page_['blocks']:
for para in block['paragraphs']:
if row_changed == True or first_row == True:
# clear old values
x_list_para.clear()
y_list_para.clear()
# save new values
for v in para['boundingBox']['normalizedVertices']:
x_list_para.append(v['x'])
y_list_para.append(v['y'])
for word in para['words']:
if row_changed == True or first_row == True:
# clear old values
x_list_word.clear()
y_list_word.clear()
# save new values
for v in word['boundingBox']['normalizedVertices']:
x_list_word.append(v['x'])
y_list_word.append(v['y'])
for symbol in word['symbols']:
# create word
#word_for_line += symbol['text']
line += symbol['text']
try:
if symbol['property'].get('detectedBreak'):
if symbol['property'].get('detectedBreak').get('type') == 'LINE_BREAK' or symbol['property'].get('detectedBreak').get('type') == 'HYPHEN' or symbol['property'].get('detectedBreak').get('type') == 'EOL_SURE_SPACE':
line = line.rstrip(' ')
# add space if is line break
if symbol['property'].get('detectedBreak').get('type') == 'LINE_BREAK' or symbol['property'].get('detectedBreak').get('type') == 'EOL_SURE_SPACE':
line += ' '
row_changed = True
first_row = False
# create a register in lists for dicts
# list_text
list_text.append(line)
line = ''
# list_chars_height
list_chars_height.append(round(
max(y_list_word) - min(y_list_word), 5))
# list_para_height
list_para_height.append(round(
max(y_list_para) - min(y_list_para), 5))
# list_para_width
list_para_width.append(round(
max(x_list_para) - min(x_list_para), 5))
# list_para_area
list_para_area.append(round(
(max(y_list_para) - min(y_list_para))*(max(x_list_para) - min(x_list_para)), 5))
# list_para_pos1_x
list_para_pos1_x.append(
round(min(x_list_para), 5))
# list_para_pos1_y
list_para_pos1_y.append(
round(min(y_list_para), 5))
# list_para_pos2_x
list_para_pos2_x.append(
round(max(x_list_para), 5))
# list_para_pos2_y
list_para_pos2_y.append(
round(max(y_list_para), 5))
# reset row
#row_changed = False
except KeyError:
row_changed = False
# check for space add word to line and start new word
try:
if symbol['property'].get('detectedBreak'):
if symbol['property'].get('detectedBreak').get('type') == 'SPACE' or symbol['property'].get('detectedBreak').get('type') == 'SURE_SPACE':
#line += word_for_line
line += ' '
#word_for_line = ''
except KeyError:
pass
# register page in dicts
dict_text[page] = list_text
# fill dict_text
#text = response['fullTextAnnotation']['text']
#text_list = text.split('\n')
#dict_text[page] = text_list
# try:
# dict_text[page].remove('')
# except ValueError:
# pass
# fill dict_id
id_list_str = ['{}.{}'.format(page, i)
for i, v in enumerate(dict_text[page])]
id_list = [float(string) for string in id_list_str]
dict_id[page] = id_list
# fill dict_chars
for key, value in dict_text.items():
char_list = []
for string in value:
char_list.append(len(string))
dict_chars[key] = char_list
dict_chars_height[page] = list_chars_height
dict_para_height[page] = list_para_height
dict_para_width[page] = list_para_width
dict_para_area[page] = list_para_area
dict_para_pos1_x[page] = list_para_pos1_x
dict_para_pos1_y[page] = list_para_pos1_y
dict_para_pos2_x[page] = list_para_pos2_x
dict_para_pos2_y[page] = list_para_pos2_y
# join pages into a single list
final_list_id = []
for value in dict_id.values():
final_list_id += value
final_list_text = []
for value in dict_text.values():
final_list_text += value
final_list_chars = []
for value in dict_chars.values():
final_list_chars += value
final_list_chars_height = []
for value in dict_chars_height.values():
final_list_chars_height += value
final_list_para_height = []
for value in dict_para_height.values():
final_list_para_height += value
final_list_para_width = []
for value in dict_para_width.values():
final_list_para_width += value
final_list_para_area = []
for value in dict_para_area.values():
final_list_para_area += value
final_list_para_pos1_x = []
for value in dict_para_pos1_x.values():
final_list_para_pos1_x += value
final_list_para_pos1_y = []
for value in dict_para_pos1_y.values():
final_list_para_pos1_y += value
final_list_para_pos2_x = []
for value in dict_para_pos2_x.values():
final_list_para_pos2_x += value
final_list_para_pos2_y = []
for value in dict_para_pos2_y.values():
final_list_para_pos2_y += value
# make dataframe
df = pd.DataFrame({
'id': final_list_id,
'text': final_list_text,
'chars': final_list_chars,
'chars_height': final_list_chars_height,
'para_height': final_list_para_height,
'para_width': final_list_para_width,
'para_area': final_list_para_area,
'para_pos1_x': final_list_para_pos1_x,
'para_pos1_y': final_list_para_pos1_y,
'para_pos2_x': final_list_para_pos2_x,
'para_pos2_y': final_list_para_pos2_y
})
if path_for_csv.endswith('/'):
path = '{}{}.csv'.format(path_for_csv, json_blob.name.rstrip('.json'))
else:
path = '{}/{}.csv'.format(path_for_csv, json_blob.name.rstrip('.json'))
df.to_csv(path, index=False)
def create_dataframe(json_blob):
# with open(json_blob.name) as file:
#data = json.load(file)
json_string = json_blob.download_as_string()
data = json.loads(json_string)
# dicts where Im gonna save the info for the csv, key = page-number, value = list of attributes
dict_id = {}
dict_text = {}
dict_chars = {}
dict_chars_height = {}
dict_para_height = {}
dict_para_width = {}
dict_para_area = {}
dict_para_pos1_x = {}
dict_para_pos1_y = {}
dict_para_pos2_x = {}
dict_para_pos2_y = {}
# loop file responses
for response in data['responses']:
page = response['context']['pageNumber']
# lists for dicts
#word_for_line = ''
line = ''
list_text = []
list_chars_height = []
list_para_height = []
list_para_width = []
list_para_area = []
list_para_pos1_x = []
list_para_pos1_y = []
list_para_pos2_x = []
list_para_pos2_y = []
# to check if row changed
row_changed = False
first_row = True
# list of coords
x_list_para = []
y_list_para = []
x_list_word = []
y_list_word = []
for page_ in response['fullTextAnnotation']['pages']:
for block in page_['blocks']:
for para in block['paragraphs']:
if row_changed == True or first_row == True:
# clear old values
x_list_para.clear()
y_list_para.clear()
# save new values
for v in para['boundingBox']['normalizedVertices']:
x_list_para.append(v['x'])
y_list_para.append(v['y'])
for word in para['words']:
if row_changed == True or first_row == True:
# clear old values
x_list_word.clear()
y_list_word.clear()
# save new values
for v in word['boundingBox']['normalizedVertices']:
x_list_word.append(v['x'])
y_list_word.append(v['y'])
for symbol in word['symbols']:
# create word
#word_for_line += symbol['text']
line += symbol['text']
try:
if symbol['property'].get('detectedBreak'):
if symbol['property'].get('detectedBreak').get('type') == 'LINE_BREAK' or symbol['property'].get('detectedBreak').get('type') == 'HYPHEN' or symbol['property'].get('detectedBreak').get('type') == 'EOL_SURE_SPACE':
row_changed = True
first_row = False
# create a register in lists for dicts
# list_text
list_text.append(line.rstrip(' '))
line = ''
# list_chars_height
list_chars_height.append(round(
max(y_list_word) - min(y_list_word), 5))
# list_para_height
list_para_height.append(round(
max(y_list_para) - min(y_list_para), 5))
# list_para_width
list_para_width.append(round(
max(x_list_para) - min(x_list_para), 5))
# list_para_area
list_para_area.append(round(
(max(y_list_para) - min(y_list_para))*(max(x_list_para) - min(x_list_para)), 5))
# list_para_pos1_x
list_para_pos1_x.append(
round(min(x_list_para), 5))
# list_para_pos1_y
list_para_pos1_y.append(
round(min(y_list_para), 5))
# list_para_pos2_x
list_para_pos2_x.append(
round(max(x_list_para), 5))
# list_para_pos2_y
list_para_pos2_y.append(
round(max(y_list_para), 5))
# reset row
#row_changed = False
except KeyError:
row_changed = False
# check for space add word to line and start new word
try:
if symbol['property'].get('detectedBreak'):
if symbol['property'].get('detectedBreak').get('type') == 'SPACE' or symbol['property'].get('detectedBreak').get('type') == 'SURE_SPACE':
#line += word_for_line
line += ' '
#word_for_line = ''
except KeyError:
pass
# register page in dicts
dict_text[page] = list_text
# fill dict_text
#text = response['fullTextAnnotation']['text']
#text_list = text.split('\n')
#dict_text[page] = text_list
# try:
# dict_text[page].remove('')
# except ValueError:
# pass
# fill dict_id
id_list_str = ['{}.{}'.format(page, i)
for i, v in enumerate(dict_text[page])]
id_list = [float(string) for string in id_list_str]
dict_id[page] = id_list
# fill dict_chars
for key, value in dict_text.items():
char_list = []
for string in value:
char_list.append(len(string))
dict_chars[key] = char_list
dict_chars_height[page] = list_chars_height
dict_para_height[page] = list_para_height
dict_para_width[page] = list_para_width
dict_para_area[page] = list_para_area
dict_para_pos1_x[page] = list_para_pos1_x
dict_para_pos1_y[page] = list_para_pos1_y
dict_para_pos2_x[page] = list_para_pos2_x
dict_para_pos2_y[page] = list_para_pos2_y
# join pages into a single list
final_list_id = []
for value in dict_id.values():
final_list_id += value
final_list_text = []
for value in dict_text.values():
final_list_text += value
final_list_chars = []
for value in dict_chars.values():
final_list_chars += value
final_list_chars_height = []
for value in dict_chars_height.values():
final_list_chars_height += value
final_list_para_height = []
for value in dict_para_height.values():
final_list_para_height += value
final_list_para_width = []
for value in dict_para_width.values():
final_list_para_width += value
final_list_para_area = []
for value in dict_para_area.values():
final_list_para_area += value
final_list_para_pos1_x = []
for value in dict_para_pos1_x.values():
final_list_para_pos1_x += value
final_list_para_pos1_y = []
for value in dict_para_pos1_y.values():
final_list_para_pos1_y += value
final_list_para_pos2_x = []
for value in dict_para_pos2_x.values():
final_list_para_pos2_x += value
final_list_para_pos2_y = []
for value in dict_para_pos2_y.values():
final_list_para_pos2_y += value
# make dataframe
df = pd.DataFrame({
'id': final_list_id,
'text': final_list_text,
'chars': final_list_chars,
'chars_height': final_list_chars_height,
'para_height': final_list_para_height,
'para_width': final_list_para_width,
'para_area': final_list_para_area,
'para_pos1_x': final_list_para_pos1_x,
'para_pos1_y': final_list_para_pos1_y,
'para_pos2_x': final_list_para_pos2_x,
'para_pos2_y': final_list_para_pos2_y
})
return df
| StarcoderdataPython |
6550498 | <filename>tests/test_usage.py
from mock import patch, Mock
from nose.tools import raises
from tests.tools import create_mock_json
from twilio.rest.resources import Usage
from twilio.rest.resources.usage import UsageTriggers, UsageTrigger
BASE_URI = "https://api.twilio.com/2010-04-01/Accounts/AC123"
ACCOUNT_SID = "AC123"
AUTH = (ACCOUNT_SID, "token")
usage = Usage(BASE_URI, AUTH)
@patch("twilio.rest.resources.base.make_twilio_request")
def test_triggers_create(request):
resp = create_mock_json("tests/resources/usage_triggers_instance.json")
resp.status_code = 201
request.return_value = resp
usage.triggers.create(
friendly_name="foo",
usage_category="sms",
trigger_by="count",
recurring="price",
trigger_value="10.00",
callback_url="http://www.example.com",
callback_method="POST"
)
uri = "%s/Usage/Triggers" % BASE_URI
request.assert_called_with("POST", uri, data={
"FriendlyName": "foo",
"UsageCategory": "sms",
"TriggerBy": "count",
"Recurring": "price",
"TriggerValue": "10.00",
"CallbackUrl": "http://www.example.com",
"CallbackMethod": "POST"
}, auth=AUTH, use_json_extension=True)
@patch("twilio.rest.resources.base.make_twilio_request")
def test_triggers_paging(request):
resp = create_mock_json("tests/resources/usage_triggers_list.json")
request.return_value = resp
uri = "%s/Usage/Triggers" % BASE_URI
usage.triggers.list(
recurring="daily",
usage_category="sms",
trigger_by="count")
request.assert_called_with("GET", uri, params={
"Recurring": "daily",
"UsageCategory": "sms",
"TriggerBy": "count"
}, auth=AUTH, use_json_extension=True)
@patch("twilio.rest.resources.base.make_twilio_request")
def test_records_paging(request):
resp = create_mock_json("tests/resources/usage_records_list.json")
request.return_value = resp
uri = "%s/Usage/Records" % BASE_URI
usage.records.list(
start_date="2012-10-12",
end_date="2012-10-13",
category="sms")
request.assert_called_with("GET", uri, params={
"StartDate": "2012-10-12",
"EndDate": "2012-10-13",
"Category": "sms"
}, auth=AUTH, use_json_extension=True)
@patch("twilio.rest.resources.base.Resource.request")
def test_delete_trigger(req):
resp = Mock()
resp.content = ""
resp.status_code = 204
req.return_value = resp, {}
triggers = UsageTriggers("https://api.twilio.com", None)
trigger = UsageTrigger(triggers, "UT123")
trigger.delete()
uri = "https://api.twilio.com/Usage/Triggers/UT123"
req.assert_called_with("DELETE", uri)
@raises(AttributeError)
def test_records_create():
usage.records.all.create
@raises(AttributeError)
def test_records_delete():
usage.records.all.delete
@raises(AttributeError)
def test_records_get():
usage.records.all.get('abc')
| StarcoderdataPython |
359169 | <reponame>Strategy-Tap/Novizi-BackEnd<gh_stars>0
"""Collection permissions."""
from typing import Any
from django.utils import timezone
from rest_framework import permissions
from rest_framework.request import Request
class IsOwnerOrReadOnly(permissions.BasePermission):
"""Allows full access to object owner otherwise read only."""
def has_object_permission(
self: "IsOwnerOrReadOnly", request: Request, view: Any, obj: Any
) -> bool:
"""Checking if user have object level permission.
Args:
request: Request object
view: Any type of view
obj: Any django model
Returns:
If user is owner of the object it return True otherwise it false
"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.hosted_by == request.user and obj.event_date > timezone.now()
class IsProposerOrReadOnly(permissions.BasePermission):
"""Allows full access to object owner otherwise read only."""
def has_object_permission(
self: "IsProposerOrReadOnly", request: Request, view: Any, obj: Any
) -> bool:
"""Checking if user have object level permission.
Args:
request: Request object
view: Any type of view
obj: Any django model
Returns:
If user is owner of the object it return True otherwise it false
"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.proposed_by == request.user
| StarcoderdataPython |
5194807 | <reponame>ondiiik/meteoink<filename>simulator/machine.py
import sys
import time
import datetime
import pygame
class SPI:
def __init__(self, n):
pass
def init(self, baudrate, polarity, phase, sck, mosi, miso):
pass
_pins = [1] * 256
_pins[32] = 1 # 1 - Meteostation, 0 - Config server
class WDT:
def __init__(self, timeout):
pass
def feed(self):
pass
class Pin:
OUT = 1
IN = 2
PULL_UP = 3
PULL_DOWN = 4
def __init__(self, n, t = OUT, p = None):
self.n = n
def on(self):
#print('PIN {} ON'.format(self.n))
pass
def off(self):
#print('PIN {} OFF'.format(self.n))
pass
def value(self):
print('PIN {} {}'.format(self.n, _pins[self.n]))
return _pins[self.n]
class ADC:
ATTN_6DB = 1
ATTN_11DB = 2
def __init__(self, pin):
pass
def atten(self, at):
pass
def read(self):
return 3287
class PWM:
def __init__(self, pin, freq, duty):
print('BEEP', freq)
def duty(self, v):
pass
def freq(self, v):
pass
def deinit(self):
pass
class RTC():
def __init__(self):
pass
def datetime(self):
dt = datetime.datetime.now().timetuple()
return dt.tm_year, dt.tm_mon, dt.tm_mday, dt.tm_wday, dt.tm_hour, dt.tm_min, dt.tm_sec, dt.tm_yday
def init(self, v):
pass
def freq(max_freq):
pass
def deepsleep(t = 0):
print('Deep sleep ....')
for i in range(t // 1000):
_check_events()
time.sleep(1)
sys.exit(0)
def reset():
print('Reset ....')
sys.exit()
def reset_cause():
return 0
DEEPSLEEP = 0
def _check_events():
global _adc
for event in pygame.event.get():
if event.type == pygame.QUIT:
print('Bye bye ...')
sys.exit(0)
else:
print('EVENT:', event)
PWRON_RESET = 1 | StarcoderdataPython |
327381 | from .myHeartCounts import MyHeartCounts
| StarcoderdataPython |
12830248 | <gh_stars>10-100
# Copyright (C) 2015-2021 by Vd.
# This file is part of Rocketgram, the modern Telegram bot framework.
# Rocketgram is released under the MIT License (see LICENSE).
from io import BytesIO
from rocketgram import api
def test_GetUpdates():
req = api.GetUpdates()
assert req.render() == {}
assert req.render(with_method=True) == {'method': 'GetUpdates'}
assert req.files() == []
req = api.GetUpdates(offset=1000, limit=10, timeout=30,
allowed_updates=[api.UpdateType.message, api.UpdateType.channel_post])
assert req.render() == {'allowed_updates': ['message', 'channel_post'], 'limit': 10, 'offset': 1000, 'timeout': 30}
def test_SetWebhook():
req = api.SetWebhook(url='https://www.example.com/bot')
assert req.render() == {'url': 'https://www.example.com/bot'}
assert req.render(with_method=True) == {'method': 'SetWebhook', 'url': 'https://www.example.com/bot'}
assert req.files() == []
req = api.SetWebhook(url='https://www.example.com/bot',
certificate='https://www.example.com/cert/cert.crt', max_connections=10,
allowed_updates=[api.UpdateType.message, api.UpdateType.channel_post])
assert req.render() == {'url': 'https://www.example.com/bot',
'allowed_updates': ['message', 'channel_post'], 'max_connections': 10,
'certificate': 'https://www.example.com/cert/cert.crt'}
file = api.InputFile('cert.crt', 'application/x-x509-ca-cert', BytesIO())
req = api.SetWebhook(url='https://www.example.com/bot', certificate=file)
assert req.render() == {'url': 'https://www.example.com/bot',
'certificate': 'attach://cert.crt'}
assert req.files() == [file]
def test_DeleteWebhook():
req = api.DeleteWebhook()
assert req.render() == {}
assert req.render(with_method=True) == {'method': 'DeleteWebhook'}
assert req.files() == []
def test_GetWebhookInfo():
req = api.GetWebhookInfo()
assert req.render() == {}
assert req.render(with_method=True) == {'method': 'GetWebhookInfo'}
assert req.files() == []
def test_GetMe():
req = api.GetMe()
assert req.render() == {}
assert req.render(with_method=True) == {'method': 'GetMe'}
assert req.files() == []
def test_SendMessage():
req = api.SendMessage(1000, "Hello, World!")
assert req.render() == {'chat_id': 1000, 'text': 'Hello, World!'}
assert req.render(with_method=True) == {'method': 'SendMessage', 'chat_id': 1000, 'text': 'Hello, World!'}
assert req.files() == []
kb = api.InlineKeyboardMarkup([[api.InlineKeyboardButton('Button', callback_data='data')]])
req = api.SendMessage(1000, "Hello, World!", parse_mode=api.ParseModeType.html,
disable_web_page_preview=True, disable_notification=True, reply_to_message_id=100,
reply_markup=kb)
assert req.render() == {'chat_id': 1000, 'text': 'Hello, World!', 'disable_notification': True,
'disable_web_page_preview': True, 'parse_mode': 'html', 'reply_to_message_id': 100,
'reply_markup': {'inline_keyboard': [[{'callback_data': 'data', 'text': 'Button'}]]}}
assert req.files() == []
def test_ForwardMessage():
req = api.ForwardMessage(1000, 1234, 100)
assert req.render() == {'chat_id': 1000, 'from_chat_id': 1234, 'message_id': 100}
assert req.render(with_method=True) == {'method': 'ForwardMessage', 'chat_id': 1000, 'from_chat_id': 1234,
'message_id': 100}
assert req.files() == []
req = api.ForwardMessage(1000, 1234, 100, disable_notification=True)
assert req.render() == {'chat_id': 1000, 'from_chat_id': 1234, 'message_id': 100, 'disable_notification': True}
assert req.files() == []
def test_SendPhoto():
req = api.SendPhoto(1000, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
assert req.render() == {'chat_id': 1000, 'photo': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}
assert req.render(with_method=True) == {'method': 'SendPhoto', 'chat_id': 1000,
'photo': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}
assert req.files() == []
photo_file = api.InputFile('photo.jpg', 'image/jpeg', BytesIO())
kb = api.InlineKeyboardMarkup([[api.InlineKeyboardButton('Button', callback_data='data')]])
req = api.SendPhoto(1000, photo_file, caption="Hello, World!", parse_mode=api.ParseModeType.html,
disable_notification=True, reply_to_message_id=100, reply_markup=kb)
assert req.render() == {'chat_id': 1000, 'photo': 'attach://photo.jpg', 'disable_notification': True,
'caption': 'Hello, World!', 'parse_mode': 'html', 'reply_to_message_id': 100,
'reply_markup': {'inline_keyboard': [[{'callback_data': 'data', 'text': 'Button'}]]}}
assert req.files() == [photo_file]
def test_SendAudio():
req = api.SendAudio(1000, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
assert req.render() == {'chat_id': 1000, 'audio': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}
assert req.render(with_method=True) == {'method': 'SendAudio', 'chat_id': 1000,
'audio': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}
assert req.files() == []
audio_file = api.InputFile('audio.mp3', 'audio/mpeg', BytesIO())
thumb_file = api.InputFile('thumb.jpg', 'image/jpeg', BytesIO())
kb = api.InlineKeyboardMarkup([[api.InlineKeyboardButton('Button', callback_data='data')]])
req = api.SendAudio(1000, audio_file, duration=300, performer="Beethoven", title="Symphony No. 5",
thumb=thumb_file, caption="Hello, World!", parse_mode=api.ParseModeType.html,
disable_notification=True, reply_to_message_id=100, reply_markup=kb)
assert req.render() == {'chat_id': 1000, 'audio': 'attach://audio.mp3', 'duration': 300, 'performer': "Beethoven",
'title': "Symphony No. 5", 'thumb': 'attach://thumb.jpg', 'caption': 'Hello, World!',
'parse_mode': 'html', 'disable_notification': True, 'reply_to_message_id': 100,
'reply_markup': {'inline_keyboard': [[{'callback_data': 'data', 'text': 'Button'}]]}}
assert req.files() == [audio_file, thumb_file]
def test_SendDocument():
req = api.SendDocument(1000, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
assert req.render() == {'chat_id': 1000, 'document': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}
assert req.render(with_method=True) == {'method': 'SendDocument', 'chat_id': 1000,
'document': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}
assert req.files() == []
document_file = api.InputFile('document.pdf', 'application/pdf', BytesIO())
thumb_file = api.InputFile('thumb.jpg', 'image/jpeg', BytesIO())
kb = api.InlineKeyboardMarkup([[api.InlineKeyboardButton('Button', callback_data='data')]])
req = api.SendDocument(1000, document_file, thumb=thumb_file, caption="Hello, World!",
parse_mode=api.ParseModeType.html, disable_notification=True, reply_to_message_id=100,
reply_markup=kb)
assert req.render() == {'chat_id': 1000, 'document': 'attach://document.pdf', 'thumb': 'attach://thumb.jpg',
'caption': 'Hello, World!', 'parse_mode': 'html', 'disable_notification': True,
'reply_to_message_id': 100,
'reply_markup': {'inline_keyboard': [[{'callback_data': 'data', 'text': 'Button'}]]}}
assert req.files() == [document_file, thumb_file]
def test_SendVideo():
req = api.SendVideo(1000, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
assert req.render() == {'chat_id': 1000, 'video': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}
assert req.render(with_method=True) == {'method': 'SendVideo', 'chat_id': 1000,
'video': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}
assert req.files() == []
video_file = api.InputFile('video.mp4', 'video/mp4', BytesIO())
thumb_file = api.InputFile('thumb.jpg', 'image/jpeg', BytesIO())
kb = api.InlineKeyboardMarkup([[api.InlineKeyboardButton('Button', callback_data='data')]])
req = api.SendVideo(1000, video_file, duration=300, width=640, height=480, supports_streaming=True,
thumb=thumb_file, caption="Hello, World!", parse_mode=api.ParseModeType.html,
disable_notification=True, reply_to_message_id=100, reply_markup=kb)
assert req.render() == {'chat_id': 1000, 'video': 'attach://video.mp4', 'duration': 300, 'width': 640,
'height': 480, 'supports_streaming': True, 'thumb': 'attach://thumb.jpg',
'caption': 'Hello, World!', 'parse_mode': 'html', 'disable_notification': True,
'reply_to_message_id': 100,
'reply_markup': {'inline_keyboard': [[{'callback_data': 'data', 'text': 'Button'}]]}}
assert req.files() == [video_file, thumb_file]
def test_SendAnimation():
req = api.SendAnimation(1000, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
assert req.render() == {'chat_id': 1000, 'animation': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}
assert req.render(with_method=True) == {'method': 'SendAnimation', 'chat_id': 1000,
'animation': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}
assert req.files() == []
animation_file = api.InputFile('animation.mp4', 'video/mp4', BytesIO())
thumb_file = api.InputFile('thumb.jpg', 'image/jpeg', BytesIO())
kb = api.InlineKeyboardMarkup([[api.InlineKeyboardButton('Button', callback_data='data')]])
req = api.SendAnimation(1000, animation_file, duration=300, width=640, height=480,
thumb=thumb_file, caption="Hello, World!", parse_mode=api.ParseModeType.html,
disable_notification=True, reply_to_message_id=100, reply_markup=kb)
assert req.render() == {'chat_id': 1000, 'animation': 'attach://animation.mp4', 'duration': 300, 'width': 640,
'height': 480, 'thumb': 'attach://thumb.jpg',
'caption': 'Hello, World!', 'parse_mode': 'html', 'disable_notification': True,
'reply_to_message_id': 100,
'reply_markup': {'inline_keyboard': [[{'callback_data': 'data', 'text': 'Button'}]]}}
assert req.files() == [animation_file, thumb_file]
def test_SendVoice():
req = api.SendVoice(1000, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
assert req.render() == {'chat_id': 1000, 'voice': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}
assert req.render(with_method=True) == {'method': 'SendVoice', 'chat_id': 1000,
'voice': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}
assert req.files() == []
voice_file = api.InputFile('voice.opus', 'audio/ogg', BytesIO())
kb = api.InlineKeyboardMarkup([[api.InlineKeyboardButton('Button', callback_data='data')]])
req = api.SendVoice(1000, voice_file, duration=300, caption="Hello, World!",
parse_mode=api.ParseModeType.html, disable_notification=True, reply_to_message_id=100,
reply_markup=kb)
assert req.render() == {'chat_id': 1000, 'voice': 'attach://voice.opus', 'duration': 300,
'caption': 'Hello, World!', 'parse_mode': 'html', 'disable_notification': True,
'reply_to_message_id': 100,
'reply_markup': {'inline_keyboard': [[{'callback_data': 'data', 'text': 'Button'}]]}}
assert req.files() == [voice_file]
def test_SendVideoNote():
req = api.SendVideoNote(1000, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
assert req.render() == {'chat_id': 1000, 'video_note': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}
assert req.render(with_method=True) == {'method': 'SendVideoNote', 'chat_id': 1000,
'video_note': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}
assert req.files() == []
note_file = api.InputFile('voice.opus', 'audio/ogg', BytesIO())
thumb_file = api.InputFile('thumb.jpg', 'image/jpeg', BytesIO())
kb = api.InlineKeyboardMarkup([[api.InlineKeyboardButton('Button', callback_data='data')]])
req = api.SendVideoNote(1000, note_file, duration=300, length=500, thumb=thumb_file,
disable_notification=True, reply_to_message_id=100, reply_markup=kb)
assert req.render() == {'chat_id': 1000, 'video_note': 'attach://voice.opus', 'duration': 300, 'length': 500,
'thumb': 'attach://thumb.jpg', 'disable_notification': True, 'reply_to_message_id': 100,
'reply_markup': {'inline_keyboard': [[{'callback_data': 'data', 'text': 'Button'}]]}}
assert req.files() == [note_file, thumb_file]
def test_SendMediaGroupe():
photo = api.InputMediaPhoto("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
req = api.SendMediaGroup(1000, [photo])
assert req.render() == {'chat_id': 1000, 'media': [{'media': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'type': 'photo'}]}
assert req.render(with_method=True) == {'method': 'SendMediaGroup', 'chat_id': 1000,
'media': [{'media': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'type': 'photo'}]}
assert req.files() == []
photo_file_1 = api.InputFile('photo1.jpg', 'image/jpeg', BytesIO())
photo1 = api.InputMediaPhoto(photo_file_1, caption="Hello, World!", parse_mode=api.ParseModeType.html)
photo_file_2 = api.InputFile('photo2.jpg', 'image/jpeg', BytesIO())
photo2 = api.InputMediaPhoto(photo_file_2, caption="Hello, World!", parse_mode=api.ParseModeType.html)
req = api.SendMediaGroup(1000, [photo1, photo2], disable_notification=True, reply_to_message_id=100)
assert req.render() == {'chat_id': 1000,
'media': [{'media': 'attach://photo1.jpg', 'type': 'photo', 'caption': 'Hello, World!',
'parse_mode': 'html'},
{'media': 'attach://photo2.jpg', 'type': 'photo', 'caption': 'Hello, World!',
'parse_mode': 'html'}],
'disable_notification': True, 'reply_to_message_id': 100}
assert req.files() == [photo_file_1, photo_file_2]
def test_SendLocation():
req = api.SendLocation(1000, latitude=31.7767, longitude=35.2345)
assert req.render() == {'chat_id': 1000, 'latitude': 31.7767, 'longitude': 35.2345}
assert req.render(with_method=True) == {'method': 'SendLocation', 'chat_id': 1000,
'latitude': 31.7767, 'longitude': 35.2345}
assert req.files() == []
kb = api.InlineKeyboardMarkup([[api.InlineKeyboardButton('Button', callback_data='data')]])
req = api.SendLocation(1000, latitude=31.7767, longitude=35.2345, live_period=300,
disable_notification=True, reply_to_message_id=100, reply_markup=kb)
assert req.render() == {'chat_id': 1000, 'latitude': 31.7767, 'longitude': 35.2345, 'disable_notification': True,
'live_period': 300, 'reply_to_message_id': 100,
'reply_markup': {'inline_keyboard': [[{'callback_data': 'data', 'text': 'Button'}]]}}
assert req.files() == []
def test_EditMessageLiveLocation():
req = api.EditMessageLiveLocation(chat_id=1000, message_id=300, latitude=31.7767, longitude=35.2345)
assert req.render() == {'chat_id': 1000, 'message_id': 300, 'latitude': 31.7767, 'longitude': 35.2345}
assert req.render(with_method=True) == {'method': 'EditMessageLiveLocation', 'chat_id': 1000, 'message_id': 300,
'latitude': 31.7767, 'longitude': 35.2345}
assert req.files() == []
req = api.EditMessageLiveLocation(inline_message_id='ABCDEFGHIJKLMNOPQRSTUVWXYZ',
latitude=31.7767, longitude=35.2345)
assert req.render() == {'inline_message_id': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'latitude': 31.7767, 'longitude': 35.2345}
assert req.render(with_method=True) == {'method': 'EditMessageLiveLocation',
'inline_message_id': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
'latitude': 31.7767, 'longitude': 35.2345}
assert req.files() == []
kb = api.InlineKeyboardMarkup([[api.InlineKeyboardButton('Button', callback_data='data')]])
req = api.EditMessageLiveLocation(chat_id=1000, message_id=300, latitude=31.7767, longitude=35.2345,
reply_markup=kb)
assert req.render() == {'chat_id': 1000, 'message_id': 300, 'latitude': 31.7767, 'longitude': 35.2345,
'reply_markup': {'inline_keyboard': [[{'callback_data': 'data', 'text': 'Button'}]]}}
assert req.files() == []
def test_StopMessageLiveLocation():
req = api.StopMessageLiveLocation(chat_id=1000, message_id=300)
assert req.render() == {'chat_id': 1000, 'message_id': 300}
assert req.render(with_method=True) == {'method': 'StopMessageLiveLocation', 'chat_id': 1000, 'message_id': 300}
assert req.files() == []
req = api.StopMessageLiveLocation(inline_message_id='ABCDEFGHIJKLMNOPQRSTUVWXYZ')
assert req.render() == {'inline_message_id': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}
assert req.render(with_method=True) == {'method': 'StopMessageLiveLocation',
'inline_message_id': 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'}
assert req.files() == []
kb = api.InlineKeyboardMarkup([[api.InlineKeyboardButton('Button', callback_data='data')]])
req = api.StopMessageLiveLocation(chat_id=1000, message_id=300, reply_markup=kb)
assert req.render() == {'chat_id': 1000, 'message_id': 300,
'reply_markup': {'inline_keyboard': [[{'callback_data': 'data', 'text': 'Button'}]]}}
assert req.files() == []
def test_SendVenue():
req = api.SendVenue(1000, latitude=31.7767, longitude=35.2345, title='Earth', address='Solar system')
assert req.render() == {'chat_id': 1000, 'latitude': 31.7767, 'longitude': 35.2345, 'title': 'Earth',
'address': 'Solar system'}
assert req.render(with_method=True) == {'method': 'SendVenue', 'chat_id': 1000, 'latitude': 31.7767,
'longitude': 35.2345, 'title': 'Earth', 'address': 'Solar system'}
assert req.files() == []
kb = api.InlineKeyboardMarkup([[api.InlineKeyboardButton('Button', callback_data='data')]])
req = api.SendVenue(1000, latitude=31.7767, longitude=35.2345, title='Earth', address='Solar system',
foursquare_id='ABCDE123', foursquare_type='food/icecream',
disable_notification=True, reply_to_message_id=100, reply_markup=kb)
assert req.render() == {'chat_id': 1000, 'latitude': 31.7767, 'longitude': 35.2345, 'title': 'Earth',
'address': 'Solar system', 'foursquare_id': 'ABCDE123', 'foursquare_type': 'food/icecream',
'disable_notification': True, 'reply_to_message_id': 100,
'reply_markup': {'inline_keyboard': [[{'callback_data': 'data', 'text': 'Button'}]]}}
assert req.files() == []
def test_SendContact():
req = api.SendContact(1000, phone_number='+1234567890', first_name='John')
assert req.render() == {'chat_id': 1000, 'phone_number': '+1234567890', 'first_name': 'John'}
assert req.render(with_method=True) == {'method': 'SendContact', 'chat_id': 1000, 'phone_number': '+1234567890',
'first_name': 'John'}
assert req.files() == []
vcard = "BEGIN:VCARD\nVERSION:4.0\nN:Gump;Forrest;;Mr.;\nFN:<NAME>\n" \
"EMAIL:<EMAIL>\nEND:VCARD"
kb = api.InlineKeyboardMarkup([[api.InlineKeyboardButton('Button', callback_data='data')]])
req = api.SendContact(1000, phone_number='+1234567890', first_name='Forrest', last_name='Gump', vcard=vcard,
disable_notification=True, reply_to_message_id=100, reply_markup=kb)
assert req.render() == {'chat_id': 1000, 'phone_number': '+1234567890', 'first_name': 'Forrest',
'last_name': 'Gump', 'vcard': vcard, 'disable_notification': True,
'reply_to_message_id': 100,
'reply_markup': {'inline_keyboard': [[{'callback_data': 'data', 'text': 'Button'}]]}}
assert req.files() == []
def test_SendPoll():
req = api.SendPoll(1000, question='Do it?', options=['Yes', 'No'])
assert req.render() == {'chat_id': 1000, 'question': 'Do it?', 'options': ['Yes', 'No']}
assert req.render(with_method=True) == {'method': 'SendPoll', 'chat_id': 1000, 'question': 'Do it?',
'options': ['Yes', 'No']}
assert req.files() == []
kb = api.InlineKeyboardMarkup([[api.InlineKeyboardButton('Button', callback_data='data')]])
req = api.SendPoll(1000, question='Do it?', options=['Yes', 'No'],
disable_notification=True, reply_to_message_id=100, reply_markup=kb)
assert req.render() == {'chat_id': 1000, 'question': 'Do it?', 'options': ['Yes', 'No'],
'disable_notification': True, 'reply_to_message_id': 100,
'reply_markup': {'inline_keyboard': [[{'callback_data': 'data', 'text': 'Button'}]]}}
assert req.files() == []
def test_SendChatAction():
req = api.SendChatAction(1000, action=api.ChatActionType.typing)
assert req.render() == {'chat_id': 1000, 'action': 'typing'}
assert req.render(with_method=True) == {'method': 'SendChatAction', 'chat_id': 1000, 'action': 'typing'}
assert req.files() == []
def test_GetUserProfilePhotos():
req = api.GetUserProfilePhotos(10000, offset=10, limit=20)
assert req.render() == {'user_id': 10000, 'offset': 10, 'limit': 20}
assert req.render(with_method=True) == {'method': 'GetUserProfilePhotos', 'user_id': 10000, 'offset': 10,
'limit': 20}
assert req.files() == []
def test_GetFile():
req = api.GetFile('ABCDEFG12345')
assert req.render() == {'file_id': 'ABCDEFG12345'}
assert req.render(with_method=True) == {'method': 'GetFile', 'file_id': 'ABCDEFG12345'}
assert req.files() == []
| StarcoderdataPython |
3366034 | from scipy import sparse
from boosts.hole_boost import HoleBoost
from boosts.tail_boost import TailBoost
from boosts.album_boost import AlbumBoost
from boosts.match_boost import MatchBoost
from utils.post_processing import *
from utils.submitter import Submitter
from utils.pre_processing import *
def submission(boost, eurm_ens, sim, name):
"""
Function to create a submission from a eurm with or without boosts.
:param boost: apply boosts
:param eurm_ens: eurm from ensemble (10k x 2.2M)
:param sim: similarity matrix (tracks x tracks)
:param name: name of the submission
"""
# INIT
dr = Datareader(mode='online', only_load=True, verbose=False)
sb = Submitter(dr)
if boost:
# HOLEBOOST
hb = HoleBoost(similarity=sim, eurm=eurm_ens, datareader=dr, norm=norm_l1_row)
eurm_ens = hb.boost_eurm(categories=[8, 10], k=300, gamma=5)
# TAILBOOST
tb = TailBoost(similarity=sim, eurm=eurm_ens, datareader=dr, norm=norm_l2_row)
eurm_ens = tb.boost_eurm(categories=[9, 7, 6, 5],
last_tracks=[10, 3, 3, 3],
k=[100, 80, 100, 100],
gamma=[0.01, 0.01, 0.01, 0.01])
# ALBUMBOOST
ab = AlbumBoost(dr, eurm_ens)
eurm_ens = ab.boost_eurm(categories=[3, 4, 7, 9], gamma=2, top_k=[3, 3, 10, 40])
# SUBMISSION
rec_list = eurm_to_recommendation_list_submission(eurm_ens, datareader=dr)
sb.submit(rec_list, name=name)
if __name__ == '__main__':
# SETTINGS
boost = True
eurm = sparse.load_npz(ROOT_DIR + '')
similarity = sparse.load_npz(ROOT_DIR + '')
submission(boost=boost, eurm_ens=eurm, sim=similarity)
| StarcoderdataPython |
11378461 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012, <NAME>
#
# This file is part of statio released under MIT license.
# See the LICENSE for more information.
"""
Collection of functions used to calculate statistics.
The _values functions typically run over a sliding window and return
a list of computed stats.
The _value functions return a single computed stat.
"""
import math
import bisect
def sum_values(values, period=None):
"""Returns list of running sums.
:param values: list of values to iterate.
:param period: (optional) # of values to include in computation.
* None - includes all values in computation.
:rtype: list of summed values.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> sum_values(values, 3) #using 3 period window.
[34, 64, 93, 93, 101, 97, 98]
"""
if period:
if period < 1:
raise ValueError("period must be 1 or greater")
period = int(period)
results = []
lastval = None
for bar, newx in enumerate(values):
if lastval == None:
lastval = newx
elif (not period) or (bar < period):
lastval += newx
else:
oldx = values[bar - period]
lastval += (newx - oldx)
results.append(lastval)
return results
def sum_value(values, period=None):
"""Returns the final sum.
:param values: list of values to iterate.
:param period: (optional) # of values to include in computation.
* None - includes all values in computation.
:rtype: the final sum.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> result = sum_value(values, 3) #using 3 period window.
>>> print "%.2f" % result
98.00
"""
if not values:
return None
maxbar = len(values)
beg = 0
if period:
if period < 1:
raise ValueError("period must be 1 or greater")
beg = maxbar - int(period)
if beg < 0:
beg = 0
return sum(values[beg:])
def sma_values(values, period=None):
"""Returns list of running simple moving averages.
:param values: list of values to iterate and compute stats.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:rtype: list of simple moving averages.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> results = sma_values(values, 3) #using 3 period window.
>>> ["%.2f" % x for x in results]
['34.00', '32.00', '31.00', '31.00', '33.67', '32.33', '32.67']
"""
if period:
if period < 1:
raise ValueError("period must be 1 or greater")
period_n = float(period)
period = int(period)
results = []
lastval = None
for bar, newx in enumerate(values):
if lastval == None:
lastval = float(newx)
elif (not period) or (bar < period):
lastval += ((newx - lastval) / (bar + 1.0))
else:
lastval += ((newx - values[bar - period]) / period_n)
results.append(lastval)
return results
def sma_value(values, period=None):
"""Returns the final simple moving average.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:rtype: the final simple moving average.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> result = sma_value(values, 3) #using 3 period window.
>>> print "%.2f" % result
32.67
"""
if not values:
return None
maxbar = len(values)
beg = 0
if period:
if period < 1:
raise ValueError("period must be 1 or greater")
beg = maxbar - int(period)
if beg < 0:
beg = 0
return sum(values[beg:]) / float(len(values[beg:]))
def ema_values(values, period=None, smoothing=None):
"""Returns list of running exponential moving averages.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:param smoothing: (optional) smoothing factor.
* valid values: between 0 - 1.
* None - (default) use formula = 2.0 / (period + 1.0).
* closer to 0 - greater weight to older values - more smooth.
* closer to 1 - greater weight to recent values - less smooth.
:rtype: list of windowed exponential moving averages.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> results = ema_values(values, 3) #using 3 period window.
>>> ["%.2f" % x for x in results]
['34.00', '32.00', '31.00', '32.50', '35.25', '30.13', '32.56']
"""
if period:
if period < 1:
raise ValueError("period must be 1 or greater")
if smoothing == None:
smoothing = 2.0 / (period + 1.0)
elif (smoothing < 0) or (smoothing > 1):
msg = "smoothing outside of 0 to 1 range: "
msg = ''.join((msg, str(smoothing)))
raise ValueError(msg)
period = int(period)
results = []
lastval = None
for bar, newx in enumerate(values):
if lastval == None:
lastval = float(newx)
elif (not period) or (bar < period):
lastval = lastval + ((newx - lastval) / (bar + 1.0))
else:
lastval = lastval + smoothing * (newx - lastval)
results.append(lastval)
return results
def wwma_values(values, period=None):
"""Returns list of running Welles Wilder moving averages.
Approximation of the ema.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:rtype: list of windowed Welles Wilder moving averages.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> results = wwma_values(values, 3) #using 3 period window.
>>> ["%.2f" % x for x in results]
['34.00', '32.00', '31.00', '32.00', '34.00', '31.00', '32.33']
"""
if period:
if period < 1:
raise ValueError("period must be 1 or greater")
period = int(period)
results = []
lastval = None
for bar, newx in enumerate(values):
if lastval == None:
lastval = float(newx)
elif (not period) or (bar < period):
lastval = lastval + ((newx - lastval) / (bar + 1.0))
else:
lastval = (newx + lastval * (period - 1.0)) / period
results.append(lastval)
return results
def psa_values(values, period=None):
"""Returns list of running Power Sum averages.
Used to derive running variances. Based on the blog post from
Subliminal Messages:
http://subluminal.wordpress.com/2008/07/31/running-standard-deviations/
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:rtype: list of windowed Power Sum averages.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> results = psa_values(values, 3) #using 3 period window.
>>> ["%.2f" % x for x in results]
['1156.00', '1028.00', '965.67', '965.67', '1147.00', '1075.00', '1098.00']
"""
if period:
if period < 1:
raise ValueError("period must be 1 or greater")
period_n = float(period)
period = int(period)
results = []
lastval = None
for bar, newx in enumerate(values):
if lastval == None:
lastval = (newx * newx) / (bar + 1.0)
elif (not period) or (bar < period):
lastval += ((newx * newx - lastval) / (bar + 1.0))
else:
oldx = values[bar - period]
lastval += (((newx * newx) - (oldx * oldx)) / period_n)
results.append(lastval)
return results
def _varbases(values, period=None, population=False):
"""
Returns list of running variances or standard deviations.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:param population:
* True - entire population, n.
* False - sample set, n - 1 (default).
Examples:
>>> values = [32.47, 32.70, 32.77, 33.11, 33.25, 33.23, 33.23]
>>> results = _varbases(values, 3, population=True)
>>> ["%.2f" % x for x in results]
['0.00', '0.01', '0.02', '0.03', '0.04', '0.00', '0.00']
"""
if period:
if period < 1:
raise ValueError("period must be 1 or greater")
period = int(period)
_smas = sma_values(values, period)
_psas = psa_values(values, period)
results = []
lastval = None
sample_adjust = 0.0
if not population:
sample_adjust = 1.0
if period:
period_n = float(period)
for bar, row in enumerate(zip(_smas, _psas)):
if lastval == None:
lastval = 0.0
results.append(lastval)
continue
sma_x, psa_x = row
if (not period) or (bar < period):
size = bar + 1.0
else:
size = period_n
n = size - sample_adjust
lastval = (psa_x * size - size * sma_x * sma_x) / n
results.append(lastval)
return results
def _varbase(values, period=None, population=False):
"""
Returns final variance.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:param population:
* True - entire population, n.
* False - sample set, n - 1 (default).
Examples:
>>> values = [32.47, 32.70, 32.77, 33.11, 33.25, 33.23, 33.23]
>>> result = _varbase(values, 3, population=True)
>>> print "%.2f" % result
0.00
"""
if not values:
return None
maxbar = len(values)
beg = 0
if period:
if period < 1:
raise ValueError("period must be 1 or greater")
beg = maxbar - int(period)
if beg < 0:
beg = 0
itemcnt = len(values[beg:])
if itemcnt < 2:
return 0.0
sample_adjust = 0.0
if not population:
sample_adjust = 1.0
n = 0
meandiffs = 0.0
mean = 0.0
for x in values[beg:]:
n += 1
delta = x - mean
mean += delta / n
meandiffs += delta * (x - mean)
return meandiffs / (itemcnt - sample_adjust)
def varp_values(values, period=None):
"""Returns list of running population variances.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:rtype: list of windowed population variances.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> results = varp_values(values, 3) #using 3 period window.
>>> ["%.2f" % x for x in results]
['0.00', '4.00', '4.67', '4.67', '13.56', '29.56', '30.89']
"""
return _varbases(values, period, population=True)
def varp_value(values, period=None):
"""Returns the final population variance.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:rtype: the final population variance.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> result = varp_value(values, 3) #using 3 period window.
>>> "%.2f" % result
'30.89'
"""
return _varbase(values, period, population=True)
def var_values(values, period=None):
"""Returns list of running sample variances.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:rtype: list of windowed sample variances.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> results = var_values(values, 3) #using 3 period window.
>>> ["%.2f" % x for x in results]
['0.00', '8.00', '7.00', '7.00', '20.33', '44.33', '46.33']
"""
return _varbases(values, period)
def var_value(values, period=None):
"""Returns the final sample variance.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:rtype: final sample variance.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> result = var_value(values, 3) #using 3 period window.
>>> "%.2f" % result
'46.33'
"""
return _varbase(values, period)
def stdp_values(values, period=None):
"""Returns list of running population standard deviations.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:rtype: list of windowed population standard deviations.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> results = stdp_values(values, 3) #using 3 period window.
>>> ["%.2f" % x for x in results]
['0.00', '2.00', '2.16', '2.16', '3.68', '5.44', '5.56']
"""
results = _varbases(values, period, population=True)
_sqrt = math.sqrt
return [_sqrt(x) for x in results]
def stdp_value(values, period=None):
"""Returns final population standard deviation.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:rtype: final population standard deviation.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> result = stdp_value(values, 3) #using 3 period window.
>>> "%.2f" % result
'5.56'
"""
result = _varbase(values, period, population=True)
if result:
result = math.sqrt(result)
return result
def std_values(values, period=None):
"""Returns list of running sample standard deviations.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:rtype: list of windowed sample standard deviations.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> results = std_values(values, 3) #using 3 period window.
>>> ["%.2f" % x for x in results]
['0.00', '2.83', '2.65', '2.65', '4.51', '6.66', '6.81']
"""
results = _varbases(values, period)
_sqrt = math.sqrt
return [_sqrt(x) for x in results]
def std_value(values, period=None):
"""Returns final sample standard deviation.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:rtype: final sample standard deviation.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> result = std_value(values, 3) #using 3 period window.
>>> "%.2f" % result
'6.81'
"""
result = _varbase(values, period)
if result:
result = math.sqrt(result)
return result
def max_values(values, period=None):
"""Returns list of running maximums.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:rtype: list of windowed maximums.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> results = max_values(values, 3) #using 3 period window.
>>> ["%.2f" % x for x in results]
['34.00', '34.00', '34.00', '34.00', '38.00', '38.00', '38.00']
"""
if period:
if period < 1:
raise ValueError("period must be 1 or greater")
period = int(period)
results = []
recs = []
_additem = bisect.insort
_search = bisect.bisect_left
for bar, newx in enumerate(values):
if period and (bar >= period):
item = values[bar - period]
idx = _search(recs, item)
del recs[idx]
_additem(recs, newx)
lastval = recs[-1]
results.append(lastval)
return results
def top_values(values, period=None, num=1):
"""Returns list of top num items.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:param num: the num in the top num items.
:rtype: list of windowed top num items.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> top_values(values, 3, 2) #3 period window and top 2 items.
[[34], [30, 34], [30, 34], [30, 34], [34, 38], [34, 38], [35, 38]]
"""
if period:
if period < 1:
raise ValueError("period must be 1 or greater")
period = int(period)
if num:
num = int(num)
results = []
recs = []
_additem = bisect.insort
_search = bisect.bisect_left
for bar, newx in enumerate(values):
if period and (bar >= period):
item = values[bar - period]
idx = _search(recs, item)
del recs[idx]
_additem(recs, newx)
begidx = num
if bar < num - 1:
begidx = bar + 1
lastval = recs[-begidx:]
results.append(lastval)
return results
def min_values(values, period=None):
"""Returns list of minimum items.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:rtype: list of windowed minimum items.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> min_values(values, 3) #using 3 period window.
[34, 30, 29, 29, 29, 25, 25]
"""
if period:
if period < 1:
raise ValueError("period must be 1 or greater")
period = int(period)
results = []
recs = []
_additem = bisect.insort
_search = bisect.bisect_left
for bar, newx in enumerate(values):
if period and (bar >= period):
item = values[bar - period]
idx = _search(recs, item)
del recs[idx]
_additem(recs, newx)
lastval = recs[0]
results.append(lastval)
return results
def bottom_values(values, period=None, num=1):
"""Returns list of bottom num items.
:param values: list of values to iterate and compute stat.
:param period: (optional) # of values included in computation.
* None - includes all values in computation.
:param num: the num in the bottom num items.
:rtype: list of windowed bottom num items.
Examples:
>>> values = [34, 30, 29, 34, 38, 25, 35]
>>> bottom_values(values, 3, 2) #3 period window and top 2 items.
[[34], [30, 34], [29, 30], [29, 30], [29, 34], [25, 34], [25, 35]]
"""
if period:
if period < 1:
raise ValueError("period must be 1 or greater")
period = int(period)
if num:
num = int(num)
results = []
recs = []
_additem = bisect.insort
_search = bisect.bisect_left
for bar, newx in enumerate(values):
if period and (bar >= period):
item = values[bar - period]
idx = _search(recs, item)
del recs[idx]
_additem(recs, newx)
endidx = num
if bar < num - 1:
endidx = bar + 1
lastval = recs[0:endidx]
results.append(lastval)
return results
def _testit(verbose=None):
import doctest
doctest.testmod(verbose=verbose)
if __name__ == "__main__":
_testit()
| StarcoderdataPython |
9727337 | <reponame>paolostivanin/exam<gh_stars>10-100
from functools import partial
from operator import eq, ne
IRRELEVANT = object()
class ChangeWatcher(object):
POSTCONDITION_FAILURE_MESSAGE = {
ne: 'Value did not change',
eq: 'Value changed from {before} to {after}',
'invalid': 'Value changed to {after}, not {expected_after}'
}
def __init__(self, comparator, check, *args, **kwargs):
self.check = check
self.comparator = comparator
self.args = args
self.kwargs = kwargs
self.expected_before = kwargs.pop('before', IRRELEVANT)
self.expected_after = kwargs.pop('after', IRRELEVANT)
def __enter__(self):
self.before = self.__apply()
if not self.expected_before is IRRELEVANT:
check = self.comparator(self.before, self.expected_before)
message = "Value before is {before}, not {expected_before}"
assert not check, message.format(**vars(self))
def __exit__(self, exec_type, exec_value, traceback):
if exec_type is not None:
return False # reraises original exception
self.after = self.__apply()
met_precondition = self.comparator(self.before, self.after)
after_value_matches = self.after == self.expected_after
# Changed when it wasn't supposed to, or, didn't change when it was
if not met_precondition:
self.__raise_postcondition_error(self.comparator)
# Do care about the after value, but it wasn't equal
elif self.expected_after is not IRRELEVANT and not after_value_matches:
self.__raise_postcondition_error('invalid')
def __apply(self):
return self.check(*self.args, **self.kwargs)
def __raise_postcondition_error(self, key):
message = self.POSTCONDITION_FAILURE_MESSAGE[key]
raise AssertionError(message.format(**vars(self)))
class AssertsMixin(object):
assertChanges = partial(ChangeWatcher, ne)
assertDoesNotChange = partial(
ChangeWatcher,
eq,
before=IRRELEVANT,
after=IRRELEVANT
)
| StarcoderdataPython |
5025309 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import Required, Email, EqualTo, ValidationError
from ..models import User
class RegistrationForm(FlaskForm):
email = StringField('<EMAIL>', validators=[Required(), Email()])
username = StringField('Preferred Username', validators=[Required()])
password = PasswordField('password', validators=[Required(), EqualTo('confirm_password', message="The password and Confirm password MUST match")])
confirm_password = PasswordField('<PASSWORD>', validators=[Required()])
submit = SubmitField('Sign Up')
# Custom Validation for the registration email address
def validate_email(self, data_field):
if User.query.filter_by(email=data_field.data).first():
raise ValidationError('An account with that email address exists')
# Custom Validation for registration username
def validate_username(self, data_field):
if User.query.filter_by(username=data_field.data).first():
raise ValidationError('That username is already taken. Please pick a different username')
class LoginForm(FlaskForm):
email = StringField('Enter your email address...', validators=[Required(),Email()])
password = PasswordField('Password', validators=[Required()])
remember = BooleanField('Remember Me')
submit = SubmitField('Sign In') | StarcoderdataPython |
6504942 | <filename>parseq/scripts_compgen/basic_ib.py
import json
import os
import random
import re
import shelve
from copy import deepcopy
from functools import partial
from typing import Dict
import wandb
import qelos as q
import torch
import numpy as np
from torch.utils.data import DataLoader
from parseq.datasets import SCANDatasetLoader, autocollate, Dataset, CFQDatasetLoader
from transformers import AutoTokenizer, BertModel
from parseq.eval import make_array_of_metrics
from parseq.grammar import lisp_to_tree, are_equal_trees
from parseq.scripts_compgen.baseline import BasicRelPosEmb, TransformerEmbeddings, ORDERLESS, load_ds, \
SeqDecoderBaseline, TransformerDecoderCell
from parseq.scripts_compgen.transformer import TransformerConfig, TransformerStack
from parseq.vocab import Vocab
class TransformerDecoderCellWithVIB(TransformerDecoderCell):
def __init__(self, dim, vocab:Vocab=None, inpvocab:Vocab=None, numlayers:int=6, numheads:int=6, userelpos=False, useabspos=True,
relposmode="basic", relposrng=10,
dropout:float=0., maxpos=512, bertname="bert-base-uncased", mode="afterdec", **kw):
super(TransformerDecoderCellWithVIB, self).__init__(dim,
vocab=vocab, inpvocab=inpvocab, numlayers=numlayers, numheads=numheads,
userelpos=userelpos, useabspos=useabspos, relposmode=relposmode, relposrng=relposrng,
dropout=dropout, maxpos=maxpos, bertname=bertname, **kw)
self.mode = mode
# reparam network:
self.vib_linA = None
self.vib_linA = torch.nn.Sequential(torch.nn.Linear(dim, dim*2), torch.nn.ReLU())
self.vib_lin_mu = torch.nn.Linear(dim*2, dim)
self.vib_lin_logvar = torch.nn.Linear(dim*2, dim)
def encode_source(self, x):
encmask = (x != 0)
relpos = None
if self.encrelposemb is not None: # compute relative positions
positions = torch.arange(x.size(1), device=x.device)
relpos = positions[None, :] - positions[:, None]
relpos = relpos.clamp(-self.relposrng, self.relposrng) + self.relposrng + 1
relpos = relpos[None, :, :, None]
if relpos is not None:
encs = self.encoder_model(x, attention_mask=encmask, relpos=relpos)[0]
else:
encs = self.encoder_model(x, attention_mask=encmask)[0]
if self.adapter is not None:
encs = self.adapter(encs)
if self.mode == "afterenc":
encz, (mu, logvar) = self.sample_z(encs)
if encz is not None:
return encz, encmask, (mu, logvar)
else:
return mu, encmask
else:
if self.training:
return encs, encmask, None
else:
return encs, encmask
def sample_z(self, x):
if self.vib_linA is not None:
x = self.vib_linA(x)
mu, logvar = self.vib_lin_mu(x), self.vib_lin_logvar(x)
if self.training:
ret = mu + torch.exp(0.5 * logvar) * torch.randn_like(mu)
else:
ret = None
return ret, (mu, logvar) # (batsize, seqlen, dim)
def forward(self, tokens:torch.Tensor=None, enc=None, encmask=None, cache=None):
padmask = (tokens != 0)
try:
embs = self.dec_emb(tokens)
except Exception as e:
raise e
if self.absposemb is not None:
posembs = self.absposemb(torch.arange(tokens.size(1), device=tokens.device))[None]
embs = embs + posembs
relpos = None
if self.relposemb is not None: # compute relative positions
positions = torch.arange(tokens.size(1), device=tokens.device)
relpos = positions[None, :] - positions[:, None]
relpos = relpos.clamp(-self.relposrng, self.relposrng) + self.relposrng + 1
relpos = relpos[None, :, :, None]
if cache is not None:
embs = embs[:, -1:, :]
if relpos is not None:
relpos = relpos[:, -1:, :, :]
_ret = self.decoder(inputs_embeds=embs, attention_mask=padmask,
encoder_hidden_states=enc,
encoder_attention_mask=encmask, use_cache=True,
past_key_value_states=cache,
relpos=relpos)
ret = _ret[0]
c = ret
cache = _ret[1]
if self.mode == "afterdec":
zs, (mu, logvar) = self.sample_z(c) # (batsize, seqlen, dim)
if self.training:
logits = self.out(zs) # (batsize, seqlen, vocabsize)
return logits, cache, (mu, logvar)
else:
assert zs is None
logits = self.out(mu)
return logits, cache
else:
logits = self.out(c)
if self.training:
return logits, cache, None
else:
return logits, cache
class SeqDecoderBaselineWithVIB(SeqDecoderBaseline):
def __init__(self, tagger:TransformerDecoderCellWithVIB, vocab=None, max_size:int=100, smoothing:float=0., priorweight=1., **kw):
super(SeqDecoderBaselineWithVIB, self).__init__(tagger=tagger, vocab=vocab, max_size=max_size, smoothing=smoothing, **kw)
self.priorweight = priorweight
def compute_loss(self, logits, tgt, mulogvar, mulogvarmask=None):
"""
:param logits: (batsize, seqlen, vocsize)
:param tgt: (batsize, seqlen)
:return:
"""
mask = (tgt != 0).float()
# batsize, seqlen = tgt.size()
logprobs = self.logsm(logits)
if self.smoothing > 0:
loss = self.loss(logprobs, tgt)
else:
# print(tgt.max(), tgt.min(), logprobs.size(-1))
# print()
loss = self.loss(logprobs.permute(0, 2, 1), tgt) # (batsize, seqlen)
loss = loss * mask
loss = loss.sum(-1)
priorkl = torch.zeros(loss.size(0), device=loss.device)
if self.priorweight > 0:
mulogvarmask = mask if mulogvarmask is None else mulogvarmask
mu, logvar = mulogvar # (batsize, seqlen, dim)
priorkls = -0.5 * torch.sum(1 + logvar - mu ** 2 - logvar.exp(), dim=-1) # (batsize, seqlen)
priorkls = priorkls * mulogvarmask
priorkl = priorkls.sum(-1) * self.priorweight
loss = loss + priorkl
best_pred = logits.max(-1)[1] # (batsize, seqlen)
best_gold = tgt
same = best_pred == best_gold
same = same | ~(mask.bool())
acc = same.all(-1) # (batsize,)
return loss, priorkl, acc.float()
def train_forward(self, x:torch.Tensor, y:torch.Tensor): # --> implement one step training of tagger
# extract a training example from y:
x, newy, tgt = self.extract_training_example(x, y)
enc, encmask, mulogvar = self.tagger.encode_source(x)
# run through tagger: the same for all versions
mulogvarmask = encmask if mulogvar is not None else None
logits, cache, _mulogvar = self.tagger(tokens=newy, enc=enc, encmask=encmask, cache=None)
mulogvar = _mulogvar if mulogvar is None else mulogvar
# logits: (numsamples, batsize, seqlen, vocabsize)
# cache: ...
# mu, sigma: (numsamples, batsize, dim)
# compute loss: different versions do different masking and different targets
loss, priorkl, acc = self.compute_loss(logits, tgt, mulogvar, mulogvarmask)
return {"loss": loss, "priorkl": priorkl, "acc": acc}, logits
def run(lr=0.0001,
enclrmul=0.1,
smoothing=0.1,
gradnorm=3,
batsize=60,
epochs=16,
patience=10,
validinter=3,
validfrac=0.1,
warmup=3,
cosinelr=False,
dataset="scan/length",
maxsize=50,
seed=42,
hdim=768,
numlayers=6,
numheads=12,
dropout=0.1,
bertname="bert-base-uncased",
testcode=False,
userelpos=False,
gpu=-1,
evaltrain=False,
trainonvalid=False,
trainonvalidonly=False,
priorweight=1.,
mode="afterdec",
recomputedata=False,
):
settings = locals().copy()
q.pp_dict(settings, indent=3)
# wandb.init()
wandb.init(project=f"compgen_basic_ib", config=settings, reinit=True)
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
device = torch.device("cpu") if gpu < 0 else torch.device("cuda", gpu)
tt = q.ticktock("script")
tt.tick("data")
trainds, validds, testds, fldic, inpdic = load_ds(dataset=dataset, validfrac=validfrac, bertname=bertname, recompute=recomputedata)
if trainonvalid:
trainds = trainds + validds
validds = testds
tt.tick("dataloaders")
traindl = DataLoader(trainds, batch_size=batsize, shuffle=True, collate_fn=autocollate)
validdl = DataLoader(validds, batch_size=batsize, shuffle=False, collate_fn=autocollate)
testdl = DataLoader(testds, batch_size=batsize, shuffle=False, collate_fn=autocollate)
# print(json.dumps(next(iter(trainds)), indent=3))
# print(next(iter(traindl)))
# print(next(iter(validdl)))
tt.tock()
tt.tock()
tt.tick("model")
cell = TransformerDecoderCellWithVIB(hdim, vocab=fldic, inpvocab=inpdic, numlayers=numlayers, numheads=numheads, dropout=dropout,
bertname=bertname, userelpos=userelpos, useabspos=not userelpos, mode=mode)
decoder = SeqDecoderBaselineWithVIB(cell, vocab=fldic, max_size=maxsize, smoothing=smoothing, priorweight=priorweight)
print(f"one layer of decoder: \n {cell.decoder.block[0]}")
tt.tock()
if testcode:
tt.tick("testcode")
batch = next(iter(traindl))
# out = tagger(batch[1])
tt.tick("train")
out = decoder(*batch)
tt.tock()
decoder.train(False)
tt.tick("test")
out = decoder(*batch)
tt.tock()
tt.tock("testcode")
tloss = make_array_of_metrics("loss", "priorkl", "acc", reduction="mean")
tmetrics = make_array_of_metrics("treeacc", reduction="mean")
vmetrics = make_array_of_metrics("treeacc", reduction="mean")
xmetrics = make_array_of_metrics("treeacc", reduction="mean")
# region parameters
def get_parameters(m, _lr, _enclrmul):
bertparams = []
otherparams = []
for k, v in m.named_parameters():
if "encoder_model." in k:
bertparams.append(v)
else:
otherparams.append(v)
if len(bertparams) == 0:
raise Exception("No encoder parameters found!")
paramgroups = [{"params": bertparams, "lr": _lr * _enclrmul},
{"params": otherparams}]
return paramgroups
# endregion
def get_optim(_m, _lr, _enclrmul, _wreg=0):
paramgroups = get_parameters(_m, _lr=lr, _enclrmul=_enclrmul)
optim = torch.optim.Adam(paramgroups, lr=lr, weight_decay=_wreg)
return optim
def clipgradnorm(_m=None, _norm=None):
torch.nn.utils.clip_grad_norm_(_m.parameters(), _norm)
if patience < 0:
patience = epochs
eyt = q.EarlyStopper(vmetrics[0], patience=patience, min_epochs=30, more_is_better=True,
remember_f=lambda: deepcopy(cell))
def wandb_logger():
d = {}
for name, loss in zip(["loss", "priorkl", "acc"], tloss):
d["train_"+name] = loss.get_epoch_error()
for name, loss in zip(["tree_acc"], tmetrics):
d["train_"+name] = loss.get_epoch_error()
for name, loss in zip(["tree_acc"], vmetrics):
d["valid_"+name] = loss.get_epoch_error()
wandb.log(d)
t_max = epochs
optim = get_optim(cell, lr, enclrmul)
print(f"Total number of updates: {t_max} .")
if cosinelr:
assert t_max > (warmup + 10)
lr_schedule = q.sched.Linear(steps=warmup) >> q.sched.Cosine(low=0., high=1.0, steps=t_max-warmup) >> (0. * lr)
else:
lr_schedule = q.sched.Linear(steps=warmup) >> 1.
lr_schedule = q.sched.LRSchedule(optim, lr_schedule)
trainbatch = partial(q.train_batch, on_before_optim_step=[lambda : clipgradnorm(_m=cell, _norm=gradnorm)])
if trainonvalidonly:
traindl = validdl
validdl = testdl
trainepoch = partial(q.train_epoch, model=decoder,
dataloader=traindl,
optim=optim,
losses=tloss,
device=device,
_train_batch=trainbatch,
on_end=[lambda: lr_schedule.step()])
trainevalepoch = partial(q.test_epoch,
model=decoder,
losses=tmetrics,
dataloader=traindl,
device=device)
on_end_v = [lambda: eyt.on_epoch_end(), lambda: wandb_logger()]
validepoch = partial(q.test_epoch,
model=decoder,
losses=vmetrics,
dataloader=validdl,
device=device,
on_end=on_end_v)
tt.tick("training")
if evaltrain:
validfs = [trainevalepoch, validepoch]
else:
validfs = [validepoch]
q.run_training(run_train_epoch=trainepoch,
run_valid_epoch=validfs,
max_epochs=epochs,
check_stop=[lambda: eyt.check_stop()],
validinter=validinter)
tt.tock("done training")
tt.tick("running test before reloading")
testepoch = partial(q.test_epoch,
model=decoder,
losses=xmetrics,
dataloader=testdl,
device=device)
testres = testepoch()
print(f"Test tree acc: {testres}")
tt.tock("ran test")
if eyt.remembered is not None:
tt.msg("reloading best")
decoder.tagger = eyt.remembered
tagger = eyt.remembered
tt.tick("rerunning validation")
validres = validepoch()
tt.tock(f"Validation results: {validres}")
tt.tick("running train")
trainres = trainevalepoch()
print(f"Train tree acc: {trainres}")
tt.tock()
tt.tick("running test")
testres = testepoch()
print(f"Test tree acc: {testres}")
tt.tock()
settings.update({"final_train_loss": tloss[0].get_epoch_error()})
settings.update({"final_train_tree_acc": tmetrics[0].get_epoch_error()})
settings.update({"final_valid_tree_acc": vmetrics[0].get_epoch_error()})
settings.update({"final_test_tree_acc": xmetrics[0].get_epoch_error()})
wandb.config.update(settings)
q.pp_dict(settings)
def run_experiment(
lr=-1.,
enclrmul=-1.,
smoothing=-1.,
gradnorm=2,
batsize=-1,
epochs=-1, # probably 11 is enough
patience=100,
validinter=-1,
warmup=3,
cosinelr=False,
dataset="default",
maxsize=-1,
seed=-1,
hdim=-1,
numlayers=-1,
numheads=-1,
dropout=-1.,
bertname="vanilla",
testcode=False,
userelpos=False,
trainonvalidonly=False,
evaltrain=False,
gpu=-1,
priorweight=-1.,
mode="afterdec",
recomputedata=False,
):
settings = locals().copy()
_dataset = None
if dataset.endswith("mcd"):
_dataset = [dataset+str(i) for i in range(3)]
ranges = {
"dataset": ["scan/random", "scan/length", "scan/add_jump", "scan/add_turn_left", "scan/mcd1", "scan/mcd2", "scan/mcd3"],
"dropout": [0.1, 0.25, 0.5],
"seed": [42, 87646464, 456852],
"epochs": [15],
"batsize": [60],
"hdim": [768],
"numheads": [12],
"numlayers": [6],
"lr": [0.0001],
"enclrmul": [0.1], # use 1.
"smoothing": [0., 0.1],
# "patience": [-1],
# "warmup": [20],
"validinter": [2],
# "gradacc": [1],
"priorweight": [0.01, 0.03, 0.1, 0.3, 1.],
}
if _dataset is not None:
ranges["dataset"] = _dataset
settings["dataset"] = "default"
if bertname.startswith("none") or bertname == "vanilla":
ranges["lr"] = [0.0001]
ranges["enclrmul"] = [1.]
ranges["epochs"] = [71]
ranges["hdim"] = [384]
ranges["numheads"] = [6]
ranges["batsize"] = [256]
ranges["validinter"] = [5]
ranges["dropout"] = [0.1]
ranges["smoothing"] = [0.]
if dataset.startswith("cfq"):
settings["maxsize"] = 200
elif dataset.startswith("scan"):
settings["maxsize"] = 50
for k in ranges:
if k in settings:
if isinstance(settings[k], str) and settings[k] != "default":
ranges[k] = [settings[k]]
elif isinstance(settings[k], (int, float)) and settings[k] >= 0:
ranges[k] = [settings[k]]
else:
pass
# raise Exception(f"something wrong with setting '{k}'")
del settings[k]
def checkconfig(spec):
return True
print(__file__)
p = __file__ + f".baseline.{dataset.replace('/', '-')}"
q.run_experiments_random(
run, ranges, path_prefix=p, check_config=checkconfig, **settings)
if __name__ == '__main__':
q.argprun(run_experiment) | StarcoderdataPython |
1840209 | import game
import policy_value_net_numpy
import pickle
import operator
import json
import numpy as np
#TODO: delete the following paths -lines
#board_file_path = "board_file_6.txt"
#model_file = 'best_policy_8_8_5.model'#'best_policy_6_6_4.model'
def file_to_board(board_file_path,rows_num, cols_num, X="X",O="O",empty="_",splitter="\t"):
'''
:param board_file_path:
:param X: X representation in the file
:param O:
:param empty:
:return: A game.Board() object, filled with the file's setting
'''
board = game.Board(width=cols_num, height=rows_num)
board.init_board()
with open (board_file_path, "r") as board_file:
rows = board_file.read().splitlines()
assert len(rows) == rows_num
for i, row in enumerate(rows):
cols = row.split(splitter)
assert len(cols) == cols_num
for j, col in enumerate(cols):
move = i*cols_num + j
if col == X:
board.do_move_manual(move, 1)
elif col == O:
board.do_move_manual(move, 2)
return board
def json_file_to_AlphaZeroBoard(json_path,rows_num, cols_num, matrix_key, X=1,O=2,empty=0):
'''
:param board_file_path: path to a JSON file with array-like matrix
:param X: X representation in the matrix
:param matrix_key: key (String) to the matrix value inside the json file; e.g. "position"
:param O:
:param empty:
:return: 1. A game.Board() object, filled with the file's setting
'''
with open (json_path, "r") as json_file:
data = json.load(json_file)
matrix = data[matrix_key]
matrix_np = np.array(matrix)
assert matrix_np.shape == (rows_num, cols_num)
#Initialize a Board object
board = game.Board(width=cols_num, height=rows_num)
board.init_board()
for i in range(rows_num):
for j in range(cols_num):
move = i * cols_num + j
if matrix_np[i][j] == X:
board.do_move_manual(move, 1)
elif matrix_np[i][j] == O:
board.do_move_manual(move, 2)
else:
continue
return board
def extract_marked_locations(json_path,rows_num, cols_num, matrix_key, X=1,O=2,empty=0):
'''
:param board_file_path: path to file with array-like matrix
:param X: X representation in the matrix
:param matrix_key: key (String) to the matrix value inside the json file; e.g. "position"
:return: 1. A list containing locations of marked Xs in the format [(row1,col1),(row2,col2)...]
2. A similar list for marked Os
'''
with open(json_path, "r") as json_file:
data = json.load(json_file)
matrix = data[matrix_key]
matrix_np = np.array(matrix)
assert matrix_np.shape == (rows_num, cols_num)
X_locations = []
O_locations = []
for i in range(rows_num):
for j in range(cols_num):
if matrix_np[i][j] == X:
X_locations.append((i, j))
elif matrix_np[i][j] == O:
O_locations.append((i, j))
else:
continue
return X_locations, O_locations
def extract_scores(DQN_results, board):
actions_score = {}
for move, probability in DQN_results:
location = board.move_to_location(move)
(row, col) = (location[0], location[1])
actions_score[(row, col)] = probability
return actions_score
def create_probabilites_matrix(actions_dict, X_locations, O_locations, board_size):
'''
:param actions_dict: dictionary of (row,col)->probability
:param original_board: the original board
:return: a numpy matrix, in which each square contains:
1. The original mark (X or O) if the square isn't empty
2. The probability to choose this action - otherwise
'''
result_mat = np.zeros((board_size,board_size))
#Fill X's and O's locations first
for X_loc_row,X_loc_col in X_locations:
result_mat[X_loc_row, X_loc_col] = 1
for O_loc_row, O_loc_col in O_locations:
result_mat[O_loc_row, O_loc_col] = 2
# Fill the rest of the squares with probabilites
for square, prob in actions_dict.items():
# square is of the form (row,col)
# Verify square is empty - just for sanity check
assert result_mat[square[0]][square[1]] == 0
result_mat[square[0]][square[1]] = prob
return result_mat
#Main
json_path = "json_6x6.json"
model_file = 'best_policy_6_6_4.model2'
rows_num = 6
cols_num = 6
# Set up board
board = json_file_to_AlphaZeroBoard(json_path, matrix_key= "position", rows_num=rows_num, cols_num=cols_num)
# Extract X and O locations in the original board
X_locations, O_locations = extract_marked_locations(json_path, matrix_key= "position", rows_num=rows_num, cols_num=cols_num)
#Create an evaluator for each board and evaluate current board
policy_param = pickle.load(open(model_file, 'rb'),encoding='bytes')
evaluator = policy_value_net_numpy.PolicyValueNetNumpy(rows_num, cols_num, policy_param)
zipped_res, board_score = evaluator.policy_value_fn(board) #Scores are alculated for new_board.current_player!
actions_scores = extract_scores(zipped_res, board)
heat_map = create_probabilites_matrix(actions_scores, X_locations, O_locations, board_size=rows_num)
'''
best_move_index = max(actions_scores.items(), key=operator.itemgetter(1))[0]
best_move_index = (best_move_index[0] + 1, best_move_index[1] + 1) #Show "Matlab" index
print(actions_scores)
print("\n"*2)
print("Best move is {best_move_index}".format(best_move_index=best_move_index))
'''
#TODO: delete the following lines later (they were used for sanity-check tests)
#win, winner = new_board.has_a_winner(original_code=False)
#print(win, winner)
| StarcoderdataPython |
1889876 | <reponame>QinHan-Erin/AMOS<filename>python/tvm/tensor_graph/testing/models/__init__.py
from .shufflenet import ShuffleNet
from .capsule_tg import CapsuleNetwork
from .LLTM import LLTM
from .MI_LSTM import MI_LSTM
from .transformer import Transformer
from .resnet import resnet18, resnet50
from .mobilenet_v1 import MobileNetv1
from .weightnet import WeightNet
from .mobilenet_v2 import MobileNetV2
| StarcoderdataPython |
1628635 | ##############################################################################
# Copyright (c) 2017, Los Alamos National Security, LLC
# Produced at the Los Alamos National Laboratory.
#
# This file is part of Spack.
# Created by <NAME>, <EMAIL>, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Coevp(MakefilePackage):
"""CoEVP is a scale-bridging proxy application for embedded viscoplasticity
applications. It is created and maintained by The Exascale Co-Design Center
for Materials in Extreme Environments (ExMatEx). The code is intended to
serve as a vehicle for co-design by allowing others to extend and/or
reimplement it as needed to test performance of new architectures,
programming models, etc.
Due to the size and complexity of the studied models, as well as
restrictions on distribution, the currently available LULESH proxy
application provides the coarse-scale model implementation and the ASPA
proxy application provides the adaptive sampling support."""
homepage = 'https://github.com/exmatex/CoEVP'
version('develop', git='https://github.com/exmatex/CoEVP.git',
branch='master')
variant('mpi', default=True, description='Build with MPI Support')
variant('silo', default=False, description='Build with silo Support')
variant('flann', default=False, description='Build with flann Support')
depends_on('mpi', when='+mpi')
depends_on('silo', when='+silo')
depends_on('flann@1.8.1', when='+flann')
depends_on('lapack')
tags = ['proxy-app']
@property
def build_targets(self):
targets = []
if '+mpi' in self.spec:
targets.append('COEVP_MPI=yes')
else:
targets.append('COEVP_MPI=no')
if '+flann' in self.spec:
targets.append('FLANN=yes')
targets.append('FLANN_TARGET=')
targets.append('FLANN_LOC={0}'.format(
join_path(self.spec['flann'].prefix.include, 'flann')))
else:
targets.append('FLANN=no')
targets.append('REDIS=no')
if '+silo' in self.spec:
targets.append('SILO=yes')
targets.append('SILO_TARGET=')
targets.append('SILO_LOC={0}'.format(self.spec['silo'].prefix))
else:
targets.append('SILO=no')
targets.append('TWEMPROXY=no')
targets.append('LAPACK=%s' % self.spec['lapack'].libs.ld_flags)
return targets
def install(self, spec, prefix):
mkdir(prefix.bin)
mkdir(prefix.doc)
install('LULESH/lulesh', prefix.bin)
install('COPYRIGHT', prefix.doc)
install('README.md', prefix.doc)
install('CoEVP.pdf', prefix.doc)
| StarcoderdataPython |
6642038 | import math
import numpy as np
from .augmentor import DataAugment
class MissingSection(DataAugment):
"""Missing-section augmentation of image stacks
Args:
num_sections (int): number of missing sections.
p (float): probability of applying the augmentation.
"""
def __init__(self, num_sections=2, p=0.5):
super(MissingSection, self).__init__(p=p)
self.num_sections = 2
self.set_params()
def set_params(self):
self.sample_params['add'] = [int(math.ceil(self.num_sections / 2.0)), 0, 0]
def missing_section(self, data, random_state):
images, labels = data['image'], data['label']
new_images = images.copy()
new_labels = labels.copy()
idx = random_state.choice(np.array(range(1, images.shape[0]-1)), self.num_sections, replace=False)
new_images = np.delete(new_images, idx, 0)
new_labels = np.delete(new_labels, idx, 0)
return new_images, new_labels
def __call__(self, data, random_state=None):
if random_state is None:
random_state = np.random.RandomState(1234)
new_images, new_labels = self.missing_section(data, random_state)
return {'image': new_images, 'label': new_labels} | StarcoderdataPython |
3527778 | <gh_stars>0
"""
@package mi.dataset.driver.WFP_ENG.STC_IMODEM.driver
@file marine-integrations/mi/dataset/driver/WFP_ENG/STC_IMODEM/driver.py
@author <NAME>
@brief Driver for the WFP_ENG__STC_IMODEM
Release notes:
initial release
"""
__author__ = '<NAME>'
__license__ = 'Apache 2.0'
from mi.core.common import BaseEnum
from mi.core.log import get_logger
log = get_logger()
from mi.core.exceptions import ConfigurationException
from mi.dataset.dataset_driver import HarvesterType, DataSetDriverConfigKeys
from mi.dataset.dataset_driver import MultipleHarvesterDataSetDriver
from mi.dataset.parser.wfp_eng__stc_imodem import WfpEngStcImodemParser
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemStatusRecoveredDataParticle
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemStartRecoveredDataParticle
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemEngineeringRecoveredDataParticle
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemStatusTelemeteredDataParticle
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemStartTelemeteredDataParticle
from mi.dataset.parser.wfp_eng__stc_imodem_particles import WfpEngStcImodemEngineeringTelemeteredDataParticle
from mi.dataset.harvester import SingleDirectoryHarvester
class DataTypeKey(BaseEnum):
WFP_ENG_STC_IMODEM_RECOVERED = 'wfp_eng_stc_imodem_recovered'
WFP_ENG_STC_IMODEM_TELEMETERED = 'wfp_eng_stc_imodem_telemetered'
class WFP_ENG__STC_IMODEM_DataSetDriver(MultipleHarvesterDataSetDriver):
def __init__(self, config, memento, data_callback, state_callback,
event_callback, exception_callback):
data_keys = [DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED, DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED]
harvester_type = {
DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED: HarvesterType.SINGLE_DIRECTORY,
DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED: HarvesterType.SINGLE_DIRECTORY
}
super(WFP_ENG__STC_IMODEM_DataSetDriver, self).__init__(
config, memento, data_callback, state_callback, event_callback,
exception_callback, data_keys, harvester_type)
@classmethod
def stream_config(cls):
return [WfpEngStcImodemStatusRecoveredDataParticle.type(),
WfpEngStcImodemStartRecoveredDataParticle.type(),
WfpEngStcImodemEngineeringRecoveredDataParticle.type(),
WfpEngStcImodemStatusTelemeteredDataParticle.type(),
WfpEngStcImodemStartTelemeteredDataParticle.type(),
WfpEngStcImodemEngineeringTelemeteredDataParticle.type()]
def _build_parser(self, parser_state, infile, data_key=None):
"""
Build and return the parser
"""
# Default the parser to None
parser = None
config = self._parser_config.get(data_key)
#
# If the key is WFP_ENG_STC_IMODEM_RECOVERED, build the Wfp_eng__stc_imodemParser parser and
# provide a config that includes the specific recovered particle types.
#
if data_key == DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED:
config.update({
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.wfp_eng__stc_imodem_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
'status_data_particle_class': WfpEngStcImodemStatusRecoveredDataParticle,
'start_data_particle_class': WfpEngStcImodemStartRecoveredDataParticle,
'engineering_data_particle_class': WfpEngStcImodemEngineeringRecoveredDataParticle
}
})
log.debug("My Config: %s", config)
parser = WfpEngStcImodemParser(
config,
parser_state,
infile,
lambda state, ingested: self._save_parser_state(state, data_key, ingested),
self._data_callback,
self._sample_exception_callback)
#
# If the key is WFP_ENG_STC_IMODEM_TELEMETERED, build the Wfp_eng__stc_imodemParser parser and
# provide a config that includes the specific telemetered particle types.
#
elif data_key == DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED:
config.update({
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.wfp_eng__stc_imodem_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
'status_data_particle_class': WfpEngStcImodemStatusTelemeteredDataParticle,
'start_data_particle_class': WfpEngStcImodemStartTelemeteredDataParticle,
'engineering_data_particle_class': WfpEngStcImodemEngineeringTelemeteredDataParticle
}
})
log.debug("My Config: %s", config)
parser = WfpEngStcImodemParser(
config,
parser_state,
infile,
lambda state, ingested: self._save_parser_state(state, data_key, ingested),
self._data_callback,
self._sample_exception_callback)
else:
raise ConfigurationException
return parser
def _build_harvester(self, driver_state):
"""
Build and return the harvesters
"""
harvesters = [] # list of harvesters to be returned
#
# Verify that the WFP_ENG_STC_IMODEM_RECOVERED harvester has been configured.
# If so, build the WFP_ENG_STC_IMODEM_RECOVERED harvester and add it to the
# list of harvesters.
#
if DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED in self._harvester_config:
harvester = SingleDirectoryHarvester(
self._harvester_config.get(DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED),
driver_state[DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED],
lambda filename: self._new_file_callback(filename, DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED),
lambda modified: self._modified_file_callback(modified, DataTypeKey.WFP_ENG_STC_IMODEM_RECOVERED),
self._exception_callback
)
if harvester is not None:
harvesters.append(harvester)
else:
log.debug('WFP_ENG_STC_IMODEM_RECOVERED HARVESTER NOT BUILT')
#
# Verify that the WFP_ENG_STC_IMODEM_TELEMETERED harvester has been configured.
# If so, build the WFP_ENG_STC_IMODEM_TELEMETERED harvester and add it to the
# list of harvesters.
#
if DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED in self._harvester_config:
harvester = SingleDirectoryHarvester(
self._harvester_config.get(DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED),
driver_state[DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED],
lambda filename: self._new_file_callback(filename, DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED),
lambda modified: self._modified_file_callback(modified, DataTypeKey.WFP_ENG_STC_IMODEM_TELEMETERED),
self._exception_callback
)
if harvester is not None:
harvesters.append(harvester)
else:
log.debug('WFP_ENG_STC_IMODEM_TELEMETERED HARVESTER NOT BUILT')
return harvesters
| StarcoderdataPython |
3256974 | <reponame>LaudateCorpus1/python-grafeas
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import any_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(package="grafeas.v1", manifest={"SlsaProvenance",},)
class SlsaProvenance(proto.Message):
r"""
Attributes:
builder (grafeas.grafeas_v1.types.SlsaProvenance.SlsaBuilder):
required
recipe (grafeas.grafeas_v1.types.SlsaProvenance.SlsaRecipe):
Identifies the configuration used for the
build. When combined with materials, this SHOULD
fully describe the build, such that re-running
this recipe results in bit-for-bit identical
output (if the build is reproducible).
metadata (grafeas.grafeas_v1.types.SlsaProvenance.SlsaMetadata):
materials (Sequence[grafeas.grafeas_v1.types.SlsaProvenance.Material]):
The collection of artifacts that influenced
the build including sources, dependencies, build
tools, base images, and so on. This is
considered to be incomplete unless
metadata.completeness.materials is true. Unset
or null is equivalent to empty.
"""
class SlsaRecipe(proto.Message):
r"""Steps taken to build the artifact.
For a TaskRun, typically each container corresponds to one step
in the recipe.
Attributes:
type_ (str):
URI indicating what type of recipe was
performed. It determines the meaning of
recipe.entryPoint, recipe.arguments,
recipe.environment, and materials.
defined_in_material (int):
Index in materials containing the recipe
steps that are not implied by recipe.type. For
example, if the recipe type were "make", then
this would point to the source containing the
Makefile, not the make program itself. Set to -1
if the recipe doesn't come from a material, as
zero is default unset value for int64.
entry_point (str):
String identifying the entry point into the
build. This is often a path to a configuration
file and/or a target label within that file. The
syntax and meaning are defined by recipe.type.
For example, if the recipe type were "make",
then this would reference the directory in which
to run make as well as which target to use.
arguments (google.protobuf.any_pb2.Any):
Collection of all external inputs that
influenced the build on top of
recipe.definedInMaterial and recipe.entryPoint.
For example, if the recipe type were "make",
then this might be the flags passed to make
aside from the target, which is captured in
recipe.entryPoint. Depending on the recipe Type,
the structure may be different.
environment (google.protobuf.any_pb2.Any):
Any other builder-controlled inputs necessary
for correctly evaluating the recipe. Usually
only needed for reproducing the build but not
evaluated as part of policy. Depending on the
recipe Type, the structure may be different.
"""
type_ = proto.Field(proto.STRING, number=1,)
defined_in_material = proto.Field(proto.INT64, number=2,)
entry_point = proto.Field(proto.STRING, number=3,)
arguments = proto.Field(proto.MESSAGE, number=4, message=any_pb2.Any,)
environment = proto.Field(proto.MESSAGE, number=5, message=any_pb2.Any,)
class SlsaCompleteness(proto.Message):
r"""Indicates that the builder claims certain fields in this
message to be complete.
Attributes:
arguments (bool):
If true, the builder claims that
recipe.arguments is complete, meaning that all
external inputs are properly captured in the
recipe.
environment (bool):
If true, the builder claims that
recipe.environment is claimed to be complete.
materials (bool):
If true, the builder claims that materials
are complete, usually through some controls to
prevent network access. Sometimes called
"hermetic".
"""
arguments = proto.Field(proto.BOOL, number=1,)
environment = proto.Field(proto.BOOL, number=2,)
materials = proto.Field(proto.BOOL, number=3,)
class SlsaMetadata(proto.Message):
r"""Other properties of the build.
Attributes:
build_invocation_id (str):
Identifies the particular build invocation,
which can be useful for finding associated logs
or other ad-hoc analysis. The value SHOULD be
globally unique, per in-toto Provenance spec.
build_started_on (google.protobuf.timestamp_pb2.Timestamp):
The timestamp of when the build started.
build_finished_on (google.protobuf.timestamp_pb2.Timestamp):
The timestamp of when the build completed.
completeness (grafeas.grafeas_v1.types.SlsaProvenance.SlsaCompleteness):
Indicates that the builder claims certain
fields in this message to be complete.
reproducible (bool):
If true, the builder claims that running the
recipe on materials will produce bit-for-bit
identical output.
"""
build_invocation_id = proto.Field(proto.STRING, number=1,)
build_started_on = proto.Field(
proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,
)
build_finished_on = proto.Field(
proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,
)
completeness = proto.Field(
proto.MESSAGE, number=4, message="SlsaProvenance.SlsaCompleteness",
)
reproducible = proto.Field(proto.BOOL, number=5,)
class SlsaBuilder(proto.Message):
r"""
Attributes:
id (str):
"""
id = proto.Field(proto.STRING, number=1,)
class Material(proto.Message):
r"""
Attributes:
uri (str):
digest (Sequence[grafeas.grafeas_v1.types.SlsaProvenance.Material.DigestEntry]):
"""
uri = proto.Field(proto.STRING, number=1,)
digest = proto.MapField(proto.STRING, proto.STRING, number=2,)
builder = proto.Field(proto.MESSAGE, number=1, message=SlsaBuilder,)
recipe = proto.Field(proto.MESSAGE, number=2, message=SlsaRecipe,)
metadata = proto.Field(proto.MESSAGE, number=3, message=SlsaMetadata,)
materials = proto.RepeatedField(proto.MESSAGE, number=4, message=Material,)
__all__ = tuple(sorted(__protobuf__.manifest))
| StarcoderdataPython |
11230891 | <filename>Python-programming-1/nonDivisableSubset.py
from itertools import combinations
def nonDivisibleSubset(k, s):
# Write your code here
res=[]
for pair in list(combinations(s, 2)):
if sum(pair)%k != 0:
# print(res, pair)
res+=list(pair)
print(set(res))
return len(set(res))
if __name__ == '__main__':
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
k = int(first_multiple_input[1])
s = list(map(int, input().rstrip().split()))
result = nonDivisibleSubset(k, s)
print(result)
| StarcoderdataPython |
9731955 |
# encoding: utf-8
__author__ = "<NAME>"
__email__ = "schmidt89 at informatik.uni-marburg.de"
'''
Utility functions for mongodb.
'''
from androlyze.log.Log import log
from androlyze.model.script.ScriptUtil import dict2json
from androlyze.storage import Util
from gridfs.grid_file import GridOutCursor
from pymongo.cursor import Cursor
# in operator
MONGODB_IN_OPERATOR = "$in"
############################################################
#---MongoDB key escaping
############################################################
def escape_key(k):
'''
Escape key `k` so that in conforms to mongodb's key restrictions.
See Also
--------
http://docs.mongodb.org/manual/faq/developers/#dollar-sign-operator-escaping
'''
replaced_key = k
DOT = '.'
DOT_REPL = '_'
DOLLAR = '$'
DOLLAR_REPL = '_$'
# replace dot
if DOT in k:
replaced_key = k.replace(DOT, DOT_REPL)
# replace starting dollar
if k.startswith(DOLLAR):
replaced_key = k.replace(DOLLAR, DOLLAR_REPL, 1)
return replaced_key
def escape_keys(_dict):
''' Escape the keys in the `_dict` so that the `_dict` can be inserted into mongodb.
Will do a deepcopy of the `dict`!
So escaping isn't in-place!
Parameters
----------
_dict : dict
Returns
-------
dict
'''
return Util.escape_dict(_dict, escape_key, escape_keys = True, escape_values = False)
############################################################
#---MongoDB query builder helper functions
############################################################
def get_attr_str(key, attr, gridfs = False):
''' Get the attribute string depending on `gridfs`'''
from androlyze.storage.resultdb.ResultDatabaseStorage import GRIDFS_FILES_METADATA
BASE = '%s.%s' % (key, attr)
if gridfs:
return '%s.%s' % (GRIDFS_FILES_METADATA, BASE)
return BASE
def build_apk_meta_where(kwargs, gridfs = False):
''' Create where clause from `kwargs` for apk meta key '''
from androlyze.model.analysis.result.StaticResultKeys import RESOBJ_APK_META, \
RESOBJ_APK_META_PACKAGE_NAME, RESOBJ_APK_META_HASH, \
RESOBJ_APK_META_VERSION_NAME, RESOBJ_APK_META_TAG
wheres = []
# get from kwargs
# apk stuff
package_name = kwargs.get("package_name", None)
apk_hash = kwargs.get("apk_hash", None)
version_name = kwargs.get("version_name", None)
tag = kwargs.get("tag", None)
def apk_meta_attr(attr):
return get_attr_str(RESOBJ_APK_META, attr, gridfs)
if package_name is not None:
wheres += [(apk_meta_attr(RESOBJ_APK_META_PACKAGE_NAME), package_name)]
if apk_hash is not None:
wheres += [(apk_meta_attr(RESOBJ_APK_META_HASH), apk_hash)]
if version_name is not None:
wheres += [(apk_meta_attr(RESOBJ_APK_META_VERSION_NAME), version_name)]
if tag is not None:
wheres += [(apk_meta_attr(RESOBJ_APK_META_TAG), tag)]
return wheres
def build_script_meta_where(kwargs, gridfs = False):
''' Create where clause from `kwargs` for script meta key '''
from androlyze.model.analysis.result.StaticResultKeys import RESOBJ_SCRIPT_META, \
RESOBJ_SCRIPT_META_HASH, RESOBJ_SCRIPT_META_NAME, RESOBJ_SCRIPT_META_VERSION
wheres = []
# get from kwargs
# script stuff
script_hash = kwargs.get("script_hash", None)
script_name = kwargs.get("script_name", None)
script_version = kwargs.get("script_version", None)
def apk_meta_attr(attr):
return get_attr_str(RESOBJ_SCRIPT_META, attr, gridfs)
if script_hash is not None:
wheres += [(apk_meta_attr(RESOBJ_SCRIPT_META_HASH), script_hash)]
if script_name is not None:
wheres += [(apk_meta_attr(RESOBJ_SCRIPT_META_NAME), script_name)]
if script_version is not None:
wheres += [(apk_meta_attr(RESOBJ_SCRIPT_META_VERSION), script_version)]
return wheres
def build_checks_filter(
checks_non_empty_list = None, checks_empty_list = None,
checks_true = None, checks_false = None,
checks_not_null = None, checks_null = None,
conjunction = 'or'
):
'''
Helper function to easily check if some value has been set.
E.g. == [],!= [], != null, == null, == true, == false.
Parameters
----------
checks_non_empty_list : iterable<str>, optional (default is ())
Check the keys against a non empty list.
checks_empty_list : iterable<str>, optional (default is ())
Check the keys against an empty list.
checks_true : iterable<str>, optional (default is ())
Check if the values of the given keys are true.
checks_false : iterable<str>, optional (default is ())
Check if the values of the given keys are false.
checks_not_null : iterable<str>, optional (default is ())
Check if the values of the given keys are null (python None).
checks_null : iterable<str>, optional (default is ())
Check if the values of the given keys are not null (python None).
conjunction : str, optional (default is 'or')
Choose between 'or' and 'and'.
Specifies how to to link the filter arguments.
Examples
--------
>>> print build_checks_filter(checks_non_empty_list = ['logged.enum'], checks_true = ['logged.bool'])
{'$or': [{'logged.enum': {'$ne': []}}, {'logged.bool': True}]}
>>> print build_checks_filter(checks_empty_list = ["foo"])
{'foo': []}
Returns
-------
dict
Dictionary describing the checks.
Can be used for mongodb.
'''
if checks_empty_list is None:
checks_empty_list = ()
if checks_non_empty_list is None:
checks_non_empty_list = ()
if checks_false is None:
checks_false = ()
if checks_true is None:
checks_true = ()
if checks_null is None:
checks_null = ()
if checks_not_null is None:
checks_not_null = ()
filters = []
def gen_not_equal(key, val):
''' Generate not equals clause for mongodb '''
OPERATOR_NON_EQ = '$ne'
return {key : {OPERATOR_NON_EQ : val}}
def gen_equal(key, val):
''' Generate equals clause for mongodb '''
return {key : val}
# check for non empty list
for key in checks_non_empty_list:
filters.append( gen_not_equal(key, []) )
# check for empty list
for key in checks_empty_list:
filters.append( gen_equal(key, []) )
# check for True
for key in checks_true:
filters.append( gen_equal(key, True) )
# check for False
for key in checks_false:
filters.append( gen_equal(key, False) )
# checks for null
for key in checks_null:
filters.append( gen_equal(key, None) )
# checks for not null
for key in checks_not_null:
filters.append( gen_not_equal(key, None) )
cnt_filters = len(filters)
if cnt_filters > 0:
if cnt_filters > 1:
# apply conjunction (n-digit operator, n > 1)
if conjunction.lower() == 'or':
return {'$or' : filters}
return {'$and' : filters}
else:
# return dictionary with values and keys from dicts in filters
res = {}
for fdict in filters:
res.update(fdict)
return res
return {}
############################################################
#---Results
############################################################
def split_result_ids(results):
'''
Split the id's into non-gridfs and gridfs id's.
Parameters
----------
results : iterable<tuple<str, bool>>
First component is the id of the entry
and the second a boolean indication if the result has been stored in gridfs.
See e.g. output of :py:method:`.ResultDatabaseStorage.store_result_for_apk`
Returns
-------
tuple<list<str>, list<str>>
First component holds the non-gridfs id's, the second the gridfs id's
'''
non_gridfs_ids = map(lambda x : x[0], filter(lambda x :x[1] is False, results))
gridfs_ids = map(lambda x : x[0], filter(lambda x : x[1] is True, results))
return non_gridfs_ids, gridfs_ids
def format_query_result_db(res_cursor, distict_generator = False, count = False, raw = False, html = False):
'''
Format the results from the result db (mongodb).
Parameters
----------
res_cursor : gridfs.grid_file.GridOutCursor or generator<object> or pymongo.cursor.Cursor
First if non_document and non_document_raw.
Second if distinct values wanted.
Thirst otherwise.
distict_generator : bool, optional (default is False)
Res is generator<object> created from the distinct(...) method of mongodb.
If generaor<dict>, convert each dict to json.
Otherwise just print.
count : bool, optional (default is False)
Only print count, not results
raw : bool, optional (default is False)
Print raw data from gridfs
Otherwise print json.
If `raw` will not be converted to html!
html : bool, optional (default is False)
Format as html.
Returns
-------
str
'''
from pymongo.errors import PyMongoError
from androlyze.ui.util import HtmlUtil
# if html enabled convert to table view if `json2html` is present
# otherwise use pygmentize
json_convert = lambda json : json
if html:
try:
from json2html import json2html
json_convert = lambda j : json2html.convert(json = j)
except ImportError:
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
json_convert = lambda json: highlight(json, get_lexer_by_name('json'), HtmlFormatter())
# collect results as list<str>
resl = []
def anl(text):
''' Append a newline '''
# dont format raw data as html
return '%s\n' % text if not html or raw else HtmlUtil.newline(HtmlUtil.prefy(text))
try:
# return count
if count:
cnt = 0
if is_pymongo_cursor(res_cursor):
cnt = res_cursor.count()
elif distict_generator:
cnt = len(list(res_cursor))
return '%d' % cnt
else:
if distict_generator:
for r in sorted(res_cursor):
if isinstance(r, dict):
r = dict2json(res_cursor)
resl.append(r)
elif isinstance(r, (str, unicode)):
resl.append(r)
else:
for i, res in enumerate(res_cursor, 1):
delimiter = '/* %d */' % i
text = HtmlUtil.newline(delimiter) if html else delimiter
if html: text = HtmlUtil.redify(text)
resl.append(text)
# return raw data
if raw:
# gridfs.grid_file.GridOut
for gridout_obj in res:
resl.append(gridout_obj)
# return json
else:
j = dict2json(res)
# convert json (if enabled)
j = json_convert(j)
resl.append(j)
# return result by joining single strings
return ''.join([anl(res_str) for res_str in resl])
except PyMongoError as e:
log.exception(e)
############################################################
#---Cursor stuff
############################################################
def is_pymongo_cursor(cursor):
''' Check if `cursor` is a mongodb cursor '''
return isinstance(cursor, (GridOutCursor, Cursor))
if __name__ == '__main__':
print build_checks_filter(checks_empty_list = ["foo"])
from collections import OrderedDict
test = OrderedDict([('script meta', OrderedDict([('name', 'CodePermissions'), ('sha256', None), ('analysis date', 'time'), ('version', '0.1')])), ('code permissions', ('code', OrderedDict([('BLUETOOTH', [{'La2dp.Vol.service$1.onReceive': ''}])])))])
escaped = escape_keys(test)
import json
print json.dumps(escaped, indent = 4)
| StarcoderdataPython |
1841777 | <reponame>cheikhsidi/Sharepoint_To_SQL
from sqlalchemy import create_engine, Table, Column, Integer, Unicode, MetaData, String, Text, update, and_, select, func, types
from sqlalchemy.ext.automap import automap_base
import sql_connect as secret
pwd1 = <PASSWORD>.password
# create engine, reflect existing columns, and create table object for oldTable
#engine_prod = create_engine(f"mssql+pyodbc://{secret.user}:{pwd1}@{secret.server}:1433/{secret.db}?driver=SQL+Server+Native+Client+11.0")
#engine_dev = create_engine(f"mssql+pyodbc://{secret.user}:{pwd1}@{secret.server}:1433/{secret.db}?driver=SQL+Server+Native+Client+11.0")
# tables = ['frps.Deal', 'frps.DataSource', 'frps.Chart_Agnecy', 'frps.BusinessUnit', 'frps.OperatingUnit', 'frps.ReportableUnit']
# create engine, reflect existing columns, and create table object for oldTable
srcEngine = create_engine(f"mssql+pyodbc://{secret.user}:{pwd1}@{secret.server}:1433/frpbi?driver=SQL+Server+Native+Client+11.0")
srcEngine_metadata = MetaData(bind=srcEngine)
# srcEngine_metadata.reflect(srcEngine) # get columns from existing table
srcTable = Table('ExternalUser_Profile', srcEngine_metadata, autoload=True, schema="etl")
# # create engine and table object for newTable
destEngine = create_engine(f"mssql+pyodbc://{secret.user}:{pwd1}@{secret.server}:1433/frp_edw?driver=SQL+Server+Native+Client+11.0")
destEngine_metadata = MetaData(bind=destEngine)
destTable = Table('ExternalUser_Profile', destEngine_metadata, schema="etl")
# copy schema and create newTable from oldTable
for column in srcTable.columns:
destTable.append_column(column.copy())
destTable.create()
# Copy Data from the source table to the new table
insert = destTable.insert()
for row in srcTable.select().execute():
insert.execute(row) | StarcoderdataPython |
6680975 | <filename>pygradsreader.py
import numpy
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import re
import glob
class Grads():
"""
grads形式のデータをnumpy配列に変換するクラス
"""
@classmethod
def read_ctlfile(cls,ctlfile):
ctl={}
for line in f:
info=re.split(" +",line.rstrip("\r\n"))
ctl[info[0]]=info[1:]
def __init__(self,x,y,z,t):
self.x=x
self.y=y
self.z=z
self.t=t
self.num=x*y*z*t
self.struct=np.dtype([("arr",f"<{self.num}f")])
def read_grads(self,file):
"""
Parameters
------------
file : str
grads binary filepath
Retunrs
-------------
data : numpy.ndarray
reshaped array of reading data
"""
with open(file) as f:
chunk=np.fromfile(f,dtype=self.struct,count=self.t)
data=chunk[0]["arr"].reshape((self.z,self.y, self.x))
return data
def read_multi_grads(self,files):
t_all=len(files)
alldata=np.zeros((t_all, self.z, self.y, self.x))
for t,file in enumerate(files):
tmp=self.read_grads(file)
alldata[t,:,:,:]=tmp
del tmp
return alldata | StarcoderdataPython |
92080 | # coding: utf-8
"""
1Password Connect
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 0.2.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class Field(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'section': 'FieldSection',
'type': 'str',
'purpose': 'str',
'label': 'str',
'value': 'str',
'generate': 'bool',
'entropy': 'float'
}
attribute_map = {
'id': 'id',
'section': 'section',
'type': 'type',
'purpose': 'purpose',
'label': 'label',
'value': 'value',
'generate': 'generate',
'entropy': 'entropy'
}
def __init__(self, id=None, section=None, type='STRING', purpose=None, label=None, value=None, generate=False, entropy=None): # noqa: E501
self._id = None
self._section = None
self._type = None
self._purpose = None
self._label = None
self._value = None
self._generate = None
self._entropy = None
self.discriminator = None
self.id = id
if section is not None:
self.section = section
if type is not None:
self.type = type
if purpose is not None:
self.purpose = purpose
if label is not None:
self.label = label
if value is not None:
self.value = value
if generate is not None:
self.generate = generate
if entropy is not None:
self.entropy = entropy
@property
def id(self):
"""Gets the id of this Field. # noqa: E501
:return: The id of this Field. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Field.
:param id: The id of this Field. # noqa: E501
:type: str
"""
self._id = id
@property
def section(self):
"""Gets the section of this Field. # noqa: E501
:return: The section of this Field. # noqa: E501
:rtype: FieldSection
"""
return self._section
@section.setter
def section(self, section):
"""Sets the section of this Field.
:param section: The section of this Field. # noqa: E501
:type: FieldSection
"""
self._section = section
@property
def type(self):
"""Gets the type of this Field. # noqa: E501
:return: The type of this Field. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Field.
:param type: The type of this Field. # noqa: E501
:type: str
"""
allowed_values = ["STRING", "EMAIL", "CONCEALED", "URL", "TOTP", "DATE", "MONTH_YEAR", "MENU"] # noqa: E501
if type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def purpose(self):
"""Gets the purpose of this Field. # noqa: E501
Some item types, Login and Password, have fields used for autofill. This property indicates that purpose. # noqa: E501
:return: The purpose of this Field. # noqa: E501
:rtype: str
"""
return self._purpose
@purpose.setter
def purpose(self, purpose):
"""Sets the purpose of this Field.
Some item types, Login and Password, have fields used for autofill. This property indicates that purpose. # noqa: E501
:param purpose: The purpose of this Field. # noqa: E501
:type: str
"""
allowed_values = ["", "USERNAME", "PASSWORD", "NOTES"] # noqa: E501
if purpose not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `purpose` ({0}), must be one of {1}" # noqa: E501
.format(purpose, allowed_values)
)
self._purpose = purpose
@property
def label(self):
"""Gets the label of this Field. # noqa: E501
:return: The label of this Field. # noqa: E501
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this Field.
:param label: The label of this Field. # noqa: E501
:type: str
"""
self._label = label
@property
def value(self):
"""Gets the value of this Field. # noqa: E501
:return: The value of this Field. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this Field.
:param value: The value of this Field. # noqa: E501
:type: str
"""
self._value = value
@property
def generate(self):
"""Gets the generate of this Field. # noqa: E501
If value is not present then a new value should be generated for this field # noqa: E501
:return: The generate of this Field. # noqa: E501
:rtype: bool
"""
return self._generate
@generate.setter
def generate(self, generate):
"""Sets the generate of this Field.
If value is not present then a new value should be generated for this field # noqa: E501
:param generate: The generate of this Field. # noqa: E501
:type: bool
"""
self._generate = generate
@property
def entropy(self):
"""Gets the entropy of this Field. # noqa: E501
For fields with a purpose of `PASSWORD` this is the entropy of the value # noqa: E501
:return: The entropy of this Field. # noqa: E501
:rtype: float
"""
return self._entropy
@entropy.setter
def entropy(self, entropy):
"""Sets the entropy of this Field.
For fields with a purpose of `PASSWORD` this is the entropy of the value # noqa: E501
:param entropy: The entropy of this Field. # noqa: E501
:type: float
"""
self._entropy = entropy
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Field):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Field):
return True
return self.to_dict() != other.to_dict()
| StarcoderdataPython |
12854221 | <filename>pymtl3/passes/rtlir/structural/__init__.py
"""Expose structural RTLIR generation pass.
PyMTL user should only interact with the passes exposed here.
"""
from .StructuralRTLIRGenL4Pass import StructuralRTLIRGenL4Pass as StructuralRTLIRGenPass
| StarcoderdataPython |
9734305 | <gh_stars>0
# Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import os
import tempfile
import warnings
import zipfile
import yaml
import mlrun
import mlrun.errors
from ..datastore import get_store_uri, is_store_uri, store_manager
from ..model import ModelObj
from ..utils import (
StorePrefix,
calculate_local_file_hash,
generate_artifact_uri,
is_relative_path,
)
calc_hash = True
class ArtifactMetadata(ModelObj):
_dict_fields = ["key", "project", "iter", "tree", "description", "hash", "tag"]
_extra_fields = ["updated", "labels"]
def __init__(
self,
key=None,
project=None,
iter=None,
tree=None,
description=None,
hash=None,
tag=None,
):
self.key = key
self.project = project
self.iter = iter
self.tree = tree
self.description = description
self.hash = hash
self.labels = {}
self.updated = None
self.tag = tag # temp store of the tag
def base_dict(self):
return super().to_dict()
def to_dict(self, fields=None, exclude=None):
"""return long dict form of the artifact"""
return super().to_dict(self._dict_fields + self._extra_fields, exclude=exclude)
@classmethod
def from_dict(cls, struct=None, fields=None, deprecated_fields: dict = None):
fields = fields or cls._dict_fields + cls._extra_fields
return super().from_dict(
struct, fields=fields, deprecated_fields=deprecated_fields
)
class ArtifactSpec(ModelObj):
_dict_fields = [
"src_path",
"target_path",
"viewer",
"inline",
"format",
"size",
"db_key",
"extra_data",
]
_extra_fields = ["annotations", "producer", "sources", "license", "encoding"]
def __init__(
self,
src_path=None,
target_path=None,
viewer=None,
is_inline=False,
format=None,
size=None,
db_key=None,
extra_data=None,
body=None,
):
self.src_path = src_path
self.target_path = target_path
self.viewer = viewer
self._is_inline = is_inline
self.format = format
self.size = size
self.db_key = db_key
self.extra_data = extra_data or {}
self._body = body
self.encoding = None
self.annotations = None
self.sources = []
self.producer = None
self.license = ""
def base_dict(self):
return super().to_dict()
def to_dict(self, fields=None, exclude=None):
"""return long dict form of the artifact"""
return super().to_dict(self._dict_fields + self._extra_fields, exclude=exclude)
@classmethod
def from_dict(cls, struct=None, fields=None, deprecated_fields: dict = None):
fields = fields or cls._dict_fields + cls._extra_fields
return super().from_dict(
struct, fields=fields, deprecated_fields=deprecated_fields
)
@property
def inline(self):
"""inline data (body)"""
if self._is_inline:
return self.get_body()
return None
@inline.setter
def inline(self, body):
self._body = body
if body:
self._is_inline = True
def get_body(self):
"""get the artifact body when inline"""
return self._body
class ArtifactStatus(ModelObj):
_dict_fields = ["state", "stats", "preview"]
def __init__(self):
self.state = "created"
self.stats = None
self.preview = None
def base_dict(self):
return super().to_dict()
class Artifact(ModelObj):
kind = "artifact"
_dict_fields = ["kind", "metadata", "spec", "status"]
_store_prefix = StorePrefix.Artifact
def __init__(
self,
key=None,
body=None,
viewer=None,
is_inline=False,
format=None,
size=None,
target_path=None,
# All params up until here are legacy params for compatibility with legacy artifacts.
project=None,
metadata: ArtifactMetadata = None,
spec: ArtifactSpec = None,
src_path: str = None,
):
self._metadata = None
self.metadata = metadata
self._spec = None
self.spec = spec
self.metadata.key = key or self.metadata.key
self.metadata.project = (
project or mlrun.mlconf.default_project or self.metadata.project
)
self.spec.size = size or self.spec.size
self.spec.target_path = target_path or self.spec.target_path
self.spec.format = format or self.spec.format
self.spec.viewer = viewer or self.spec.viewer
self.spec.src_path = src_path
if body:
self.spec._body = body
self.spec._is_inline = is_inline or self.spec._is_inline
self.status = ArtifactStatus()
@property
def metadata(self) -> ArtifactMetadata:
return self._metadata
@metadata.setter
def metadata(self, metadata):
self._metadata = self._verify_dict(metadata, "metadata", ArtifactMetadata)
@property
def spec(self) -> ArtifactSpec:
return self._spec
@spec.setter
def spec(self, spec):
self._spec = self._verify_dict(spec, "spec", ArtifactSpec)
@property
def status(self) -> ArtifactStatus:
return self._status
@status.setter
def status(self, status):
self._status = self._verify_dict(status, "status", ArtifactStatus)
def _get_file_body(self):
body = self.spec.get_body()
if body:
return body
if self.src_path and os.path.isfile(self.src_path):
with open(self.src_path, "rb") as fp:
return fp.read()
return mlrun.get_dataitem(self.get_target_path()).get()
def export(self, target_path: str, with_extras=True):
"""save the artifact object into a yaml/json file or zip archive
when the target path is a .yaml/.json file the artifact spec is saved into that file,
when the target_path suffix is '.zip' the artifact spec, body and extra data items are
packaged into a zip file. The archive target_path support DataItem urls for remote object storage
(e.g. s3://<bucket>/<path>).
:param target_path: path to store artifact .yaml/.json spec or .zip (spec with the content)
:param with_extras: will include the extra_data items in the zip archive
"""
if target_path.endswith(".yaml") or target_path.endswith(".yml"):
mlrun.get_dataitem(target_path).put(self.to_yaml())
elif target_path.endswith(".json"):
mlrun.get_dataitem(target_path).put(self.to_json())
elif target_path.endswith(".zip"):
tmp_path = None
if "://" in target_path:
tmp_path = tempfile.NamedTemporaryFile(suffix=".zip", delete=False).name
zipf = zipfile.ZipFile(tmp_path or target_path, "w")
body = self._get_file_body()
zipf.writestr("_body", body)
extras = {}
if with_extras:
for k, item_path in self.extra_data.items():
if is_relative_path(item_path):
base_dir = self.src_path or ""
if not self.is_dir:
base_dir = os.path.dirname(base_dir)
item_path = os.path.join(base_dir, item_path).replace("\\", "/")
zipf.writestr(k, mlrun.get_dataitem(item_path).get())
extras[k] = k
artifact = self.copy()
artifact.extra_data = extras
zipf.writestr("_spec.yaml", artifact.to_yaml())
zipf.close()
if tmp_path:
mlrun.get_dataitem(target_path).upload(tmp_path)
os.remove(tmp_path)
else:
raise ValueError("unsupported file suffix, use .yaml, .json, or .zip")
def before_log(self):
pass
@property
def is_dir(self):
"""this is a directory"""
return False
@property
def uri(self):
"""return artifact uri (store://..)"""
return self.get_store_url()
def to_dataitem(self):
"""return a DataItem object (if available) representing the artifact content"""
uri = self.get_store_url()
if uri:
return mlrun.get_dataitem(uri)
def get_body(self):
"""get the artifact body when inline"""
return self.spec.get_body()
def get_target_path(self):
"""get the absolute target path for the artifact"""
return self.spec.target_path
def get_store_url(self, with_tag=True, project=None):
"""get the artifact uri (store://..) with optional parameters"""
tag = self.metadata.tree if with_tag else None
uri = generate_artifact_uri(
project or self.metadata.project, self.spec.db_key, tag, self.metadata.iter
)
return get_store_uri(self._store_prefix, uri)
def base_dict(self):
"""return short dict form of the artifact"""
struct = {"kind": self.kind}
for field in ["metadata", "spec", "status"]:
val = getattr(self, field, None)
if val:
struct[field] = val.base_dict()
return struct
def upload(self):
"""internal, upload to target store"""
src_path = self.spec.src_path
body = self.get_body()
if body:
self._upload_body(body)
else:
if src_path and os.path.isfile(src_path):
self._upload_file(src_path)
def _upload_body(self, body, target=None):
if calc_hash:
self.metadata.hash = blob_hash(body)
self.spec.size = len(body)
store_manager.object(url=target or self.spec.target_path).put(body)
def _upload_file(self, src, target=None):
if calc_hash:
self.metadata.hash = calculate_local_file_hash(src)
self.spec.size = os.stat(src).st_size
store_manager.object(url=target or self.spec.target_path).upload(src)
# Following properties are for backwards compatibility with the ArtifactLegacy class. They should be
# removed once we only work with the new Artifact structure.
def is_inline(self):
return self.spec._is_inline
@property
def inline(self):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.inline instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.spec.inline
@inline.setter
def inline(self, body):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.inline instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.spec.inline = body
@property
def tag(self):
"""This is a property of the metadata, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the metadata, use artifact.metadata.tag instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.metadata.tag
@tag.setter
def tag(self, tag):
"""This is a property of the metadata, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the metadata, use artifact.metadata.tag instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.metadata.tag = tag
@property
def key(self):
"""This is a property of the metadata, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the metadata, use artifact.metadata.key instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.metadata.key
@key.setter
def key(self, key):
"""This is a property of the metadata, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the metadata, use artifact.metadata.key instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.metadata.key = key
@property
def src_path(self):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.src_path instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.spec.src_path
@src_path.setter
def src_path(self, src_path):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.src_path instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.spec.src_path = src_path
@property
def target_path(self):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.target_path instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.spec.target_path
@target_path.setter
def target_path(self, target_path):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.target_path instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.spec.target_path = target_path
@property
def producer(self):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.producer instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.spec.producer
@producer.setter
def producer(self, producer):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.producer instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.spec.producer = producer
@property
def format(self):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.format instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.spec.format
@format.setter
def format(self, format):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.format instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.spec.format = format
@property
def viewer(self):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.viewer instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.spec.viewer
@viewer.setter
def viewer(self, viewer):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.viewer instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.spec.viewer = viewer
@property
def size(self):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.size instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.spec.size
@size.setter
def size(self, size):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.size instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.spec.size = size
@property
def db_key(self):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.db_key instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.spec.db_key
@db_key.setter
def db_key(self, db_key):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.db_key instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.spec.db_key = db_key
@property
def sources(self):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.sources instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.spec.sources
@sources.setter
def sources(self, sources):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.sources instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.spec.sources = sources
@property
def extra_data(self):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.extra_data instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.spec.extra_data
@extra_data.setter
def extra_data(self, extra_data):
"""This is a property of the spec, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.spec.extra_data instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.spec.extra_data = extra_data
@property
def labels(self):
"""This is a property of the metadata, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.metadata.labels instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.metadata.labels
@labels.setter
def labels(self, labels):
"""This is a property of the metadata, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the metadata, use artifact.metadata.labels instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.metadata.labels = labels
@property
def iter(self):
"""This is a property of the metadata, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.metadata.iter instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.metadata.iter
@iter.setter
def iter(self, iter):
"""This is a property of the metadata, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the metadata, use artifact.metadata.iter instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.metadata.iter = iter
@property
def tree(self):
"""This is a property of the metadata, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.metadata.tree instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.metadata.tree
@tree.setter
def tree(self, tree):
"""This is a property of the metadata, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the metadata, use artifact.metadata.tree instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.metadata.tree = tree
@property
def project(self):
"""This is a property of the metadata, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.metadata.project instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.metadata.project
@project.setter
def project(self, project):
"""This is a property of the metadata, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the metadata, use artifact.metadata.project instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.metadata.project = project
@property
def hash(self):
"""This is a property of the metadata, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the spec, use artifact.metadata.hash instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
return self.metadata.hash
@hash.setter
def hash(self, hash):
"""This is a property of the metadata, look there for documentation
leaving here for backwards compatibility with users code that used ArtifactLegacy"""
warnings.warn(
"This is a property of the metadata, use artifact.metadata.hash instead"
"This will be deprecated in 1.3.0, and will be removed in 1.5.0",
# TODO: In 1.3.0 do changes in examples & demos In 1.5.0 remove
PendingDeprecationWarning,
)
self.metadata.hash = hash
def generate_target_path(self, artifact_path, producer):
return generate_target_path(self, artifact_path, producer)
class DirArtifactSpec(ArtifactSpec):
_dict_fields = [
"src_path",
"target_path",
"db_key",
]
class DirArtifact(Artifact):
kind = "dir"
_dict_fields = [
"key",
"kind",
"iter",
"tree",
"src_path",
"target_path",
"description",
"db_key",
]
@property
def spec(self) -> DirArtifactSpec:
return self._spec
@spec.setter
def spec(self, spec):
self._spec = self._verify_dict(spec, "spec", DirArtifactSpec)
@property
def is_dir(self):
return True
def upload(self):
if not self.spec.src_path:
raise ValueError("local/source path not specified")
files = os.listdir(self.spec.src_path)
for f in files:
file_path = os.path.join(self.spec.src_path, f)
if not os.path.isfile(file_path):
raise ValueError(f"file {file_path} not found, cant upload")
target = os.path.join(self.spec.target_path, f)
store_manager.object(url=target).upload(file_path)
class LinkArtifactSpec(ArtifactSpec):
_dict_fields = ArtifactSpec._dict_fields + [
"link_iteration",
"link_key",
"link_tree",
]
def __init__(
self,
src_path=None,
target_path=None,
link_iteration=None,
link_key=None,
link_tree=None,
):
super().__init__(src_path, target_path)
self.link_iteration = link_iteration
self.link_key = link_key
self.link_tree = link_tree
class LinkArtifact(Artifact):
kind = "link"
def __init__(
self,
key=None,
target_path="",
link_iteration=None,
link_key=None,
link_tree=None,
# All params up until here are legacy params for compatibility with legacy artifacts.
project=None,
metadata: ArtifactMetadata = None,
spec: LinkArtifactSpec = None,
):
super().__init__(
key, target_path=target_path, project=project, metadata=metadata, spec=spec
)
self.spec.link_iteration = link_iteration
self.spec.link_key = link_key
self.spec.link_tree = link_tree
@property
def spec(self) -> LinkArtifactSpec:
return self._spec
@spec.setter
def spec(self, spec):
self._spec = self._verify_dict(spec, "spec", LinkArtifactSpec)
class LegacyArtifact(ModelObj):
_dict_fields = [
"key",
"kind",
"iter",
"tree",
"src_path",
"target_path",
"hash",
"description",
"viewer",
"inline",
"format",
"size",
"db_key",
"extra_data",
"tag",
]
kind = ""
_store_prefix = StorePrefix.Artifact
def __init__(
self,
key=None,
body=None,
viewer=None,
is_inline=False,
format=None,
size=None,
target_path=None,
):
self.key = key
self.project = ""
self.db_key = None
self.size = size
self.iter = None
self.tree = None
self.updated = None
self.target_path = target_path
self.src_path = None
self._body = body
self.format = format
self.description = None
self.viewer = viewer
self.encoding = None
self.labels = {}
self.annotations = None
self.sources = []
self.producer = None
self.hash = None
self._inline = is_inline
self.license = ""
self.extra_data = {}
self.tag = None # temp store of the tag
def before_log(self):
for key, item in self.extra_data.items():
if hasattr(item, "target_path"):
self.extra_data[key] = item.target_path
def is_inline(self):
return self._inline
@property
def is_dir(self):
"""this is a directory"""
return False
@property
def inline(self):
"""inline data (body)"""
if self._inline:
return self.get_body()
return None
@inline.setter
def inline(self, body):
self._body = body
if body:
self._inline = True
@property
def uri(self):
"""return artifact uri (store://..)"""
return self.get_store_url()
def to_dataitem(self):
"""return a DataItem object (if available) representing the artifact content"""
uri = self.get_store_url()
if uri:
return mlrun.get_dataitem(uri)
def get_body(self):
"""get the artifact body when inline"""
return self._body
def get_target_path(self):
"""get the absolute target path for the artifact"""
return self.target_path
def get_store_url(self, with_tag=True, project=None):
"""get the artifact uri (store://..) with optional parameters"""
tag = self.tree if with_tag else None
uri = generate_artifact_uri(
project or self.project, self.db_key, tag, self.iter
)
return get_store_uri(self._store_prefix, uri)
def base_dict(self):
"""return short dict form of the artifact"""
return super().to_dict()
def to_dict(self, fields=None):
"""return long dict form of the artifact"""
return super().to_dict(
self._dict_fields
+ ["updated", "labels", "annotations", "producer", "sources", "project"]
)
@classmethod
def from_dict(cls, struct=None, fields=None):
fields = fields or cls._dict_fields + [
"updated",
"labels",
"annotations",
"producer",
"sources",
"project",
]
return super().from_dict(struct, fields=fields)
def upload(self):
"""internal, upload to target store"""
src_path = self.src_path
body = self.get_body()
if body:
self._upload_body(body)
else:
if src_path and os.path.isfile(src_path):
self._upload_file(src_path)
def _upload_body(self, body, target=None):
if calc_hash:
self.hash = blob_hash(body)
self.size = len(body)
store_manager.object(url=target or self.target_path).put(body)
def _upload_file(self, src, target=None):
if calc_hash:
self.hash = calculate_local_file_hash(src)
self.size = os.stat(src).st_size
store_manager.object(url=target or self.target_path).upload(src)
def artifact_kind(self):
return self.kind
def generate_target_path(self, artifact_path, producer):
return generate_target_path(self, artifact_path, producer)
class LegacyDirArtifact(LegacyArtifact):
_dict_fields = [
"key",
"kind",
"iter",
"tree",
"src_path",
"target_path",
"description",
"db_key",
]
kind = "dir"
@property
def is_dir(self):
return True
def upload(self):
if not self.src_path:
raise ValueError("local/source path not specified")
files = os.listdir(self.src_path)
for f in files:
file_path = os.path.join(self.src_path, f)
if not os.path.isfile(file_path):
raise ValueError(f"file {file_path} not found, cant upload")
target = os.path.join(self.target_path, f)
store_manager.object(url=target).upload(file_path)
class LegacyLinkArtifact(LegacyArtifact):
_dict_fields = LegacyArtifact._dict_fields + [
"link_iteration",
"link_key",
"link_tree",
]
kind = "link"
def __init__(
self,
key=None,
target_path="",
link_iteration=None,
link_key=None,
link_tree=None,
):
super().__init__(key)
self.target_path = target_path
self.link_iteration = link_iteration
self.link_key = link_key
self.link_tree = link_tree
def blob_hash(data):
if isinstance(data, str):
data = data.encode()
h = hashlib.sha1()
h.update(data)
return h.hexdigest()
def upload_extra_data(
artifact_spec: Artifact,
extra_data: dict,
prefix="",
update_spec=False,
):
if not extra_data:
return
target_path = artifact_spec.target_path
for key, item in extra_data.items():
if isinstance(item, bytes):
target = os.path.join(target_path, prefix + key)
store_manager.object(url=target).put(item)
artifact_spec.extra_data[prefix + key] = target
continue
if is_relative_path(item):
src_path = (
os.path.join(artifact_spec.src_path, item)
if artifact_spec.src_path
else item
)
if not os.path.isfile(src_path):
raise ValueError(f"extra data file {src_path} not found")
target = os.path.join(target_path, item)
store_manager.object(url=target).upload(src_path)
if update_spec:
artifact_spec.extra_data[prefix + key] = item
def get_artifact_meta(artifact):
"""return artifact object, and list of extra data items
:param artifact: artifact path (store://..) or DataItem
:returns: artifact object, extra data dict
"""
if hasattr(artifact, "artifact_url"):
artifact = artifact.artifact_url
if is_store_uri(artifact):
artifact_spec, target = store_manager.get_store_artifact(artifact)
elif artifact.lower().endswith(".yaml"):
data = store_manager.object(url=artifact).get()
spec = yaml.load(data, Loader=yaml.FullLoader)
artifact_spec = mlrun.artifacts.dict_to_artifact(spec)
else:
raise ValueError(f"cant resolve artifact file for {artifact}")
extra_dataitems = {}
for k, v in artifact_spec.extra_data.items():
extra_dataitems[k] = store_manager.object(v, key=k)
return artifact_spec, extra_dataitems
def generate_target_path(item: Artifact, artifact_path, producer):
# path convention: artifact_path[/{run_name}]/{iter}/{key}.{suffix}
# todo: add run_id here (vs in the .run() methods), support items dedup (by hash)
artifact_path = artifact_path or ""
if artifact_path and not artifact_path.endswith("/"):
artifact_path += "/"
if producer.kind == "run":
artifact_path += f"{producer.name}/{item.iter or 0}/"
suffix = "/"
if not item.is_dir:
suffix = os.path.splitext(item.src_path or "")[1]
if not suffix and item.format:
suffix = f".{item.format}"
return f"{artifact_path}{item.key}{suffix}"
| StarcoderdataPython |
11284522 | # -*- coding: utf-8 -*-
from Headset import Headset
from Bracelet import Bracelet
import time
headsetPort = 'COM3'
braceletPort = 'COM4'
headset = Headset()
bracelet = Bracelet()
try:
headset.connect(headsetPort, 9600)
except Exception, e:
raise e
try:
bracelet.connect(braceletPort, 9600)
except Exception, e:
headset.closePort()
raise e
print "Headset: Is conected? " + str(headset.isConnected())
print "Bracelet: Is conected? " + str(bracelet.isConnected())
print "------------------------------------------------"
headset.startReading(persist_data=False)
bracelet.startReading(persist_data=False)
time.sleep(10)
headset.stopReading()
bracelet.stopReading()
headset.closePort()
bracelet.closePort()
print "------------------------------------------------"
print "Headset: Is conected? " + str(headset.isConnected())
print "Bracelet: Is conected? " + str(bracelet.isConnected())
print headset.getStatus()
print bracelet.getStatus()
| StarcoderdataPython |
1904243 | import svgwrite
from bplot import svgbase
import math
class SVGObject(object):
def __init__(self,
stroke = 'black', stroke_width = 1,
stroke_dasharray=[], stroke_lines=['outline'], stroke_pattern=[1,1,1,1],
fill='white', opacity=1,
):
self._dwg = svgwrite.Drawing()
self.stroke = stroke
self.stroke_width = stroke_width
self._stroke_lines = stroke_lines
self._stroke_pattern = stroke_pattern
if stroke_lines or stroke_pattern:
self._draw_stroke_lines()
self.fill = fill
self.opacity = opacity
class Polygon(SVGObject):
def __init__(self, points, **extra):
self.points = points
super(Polygon, self).__init__(**extra)
def render(self):
polygon = self._dwg.polygon(self.points,
stroke=self.stroke, stroke_width=self.stroke_width, stroke_dasharray=self.stroke_dasharray,
fill=self.fill, opacity=self.opacity)
return polygon
def _draw_stroke_lines(self):
if 'outline' in self._stroke_lines: self._stroke_lines += ['up','right','bottom','left']
pattern = [int(x in self._stroke_lines) for x in ['up','right','bottom','left']]
if self._stroke_pattern: pattern = self._stroke_pattern
distance = lambda x, y: math.sqrt(math.pow(x[0]-y[0], 2) + math.pow(x[1]-y[1], 2))
size = [distance(x,y) for x, y in zip(self.points, self.points[1:]+[self.points[0]])]
stroke_dasharray = []
prev_pattern = -1
element = 0
for x, size in zip(pattern, size):
if prev_pattern == x or prev_pattern == -1:
element += size
else: # new
# update prev
stroke_dasharray.append(element)
element = size
prev_pattern = x
stroke_dasharray.append(element)
if pattern[0] != 1:
stroke_dasharray = [0] + stroke_dasharray
self.stroke_dasharray=','.join(map(str,stroke_dasharray))
class Arrow(svgbase.Container):
def __init__(self, insert, size,
border_stroke='none',
anchor_object=None,
anchor_pos='',
**kwargs):
super(Arrow, self).__init__(insert, size,
anchor_object=anchor_object,
anchor_pos=anchor_pos,
border_stroke=border_stroke,
**kwargs)
line = svgbase.Line((0, 0), (0, 0),
anchor_object=self.rect,
anchor_pos='middle:iup',
target_object=self.rect,
target_pos='middle:ibottom',
**kwargs)
self.svgobj.add(line.svgobj)
width = self.width/2
line = svgbase.Line((0, 0), (width, width),
anchor_object=self.rect,
anchor_pos='ileft:ibottom',
**kwargs)
self.svgobj.add(line.svgobj)
line = svgbase.Line((0, 0), (width, width),
anchor_object=self.rect,
anchor_pos='iright:ibottom',
**kwargs)
self.svgobj.add(line.svgobj)
| StarcoderdataPython |
3269674 | # -*- coding:utf-8 -*-
"""
@author:SiriYang
@file: CategoryView.py
@time: 2020.1.28 21:13
"""
import sys
import ui
import console
from AppsTableView import AppsTableView
sys.path.append("..")
from AppService import AppService
from tools.Result import *
class CategoryView(ui.View):
def __init__(self,app,father):
self.app=app
self.father=father
self.name="分类"
self.background_color="white"
self.frame=(0,0,self.app.width,self.app.height)
self.flex="WHLRTB"
self.tableView = ui.TableView(frame=(0, 0, self.width, self.height))
self.tableView.flex="WHLRTB"
self.add_subview(self.tableView)
self.categories_dict = {}
self.categories_names = []
self.tableView.data_source = self
self.tableView.delegate = self
self.loadData()
def loadData(self):
try:
res=self.app.appService.getCategories()
if(not res.isPositive()):
console.hud_alert(res.toString(), 'error', 1.0)
return
self.categories_dict=res.getData()
self.categories_names = sorted(self.categories_dict.keys())
self.tableView.reload()
except Exception as e:
console.hud_alert('Failed to load Categories_Dict', 'error', 1.0)
finally:
pass
def layout(self):
self.tableView.reload()
pass
def tableview_number_of_sections(self, tableview):
return 1
def tableview_number_of_rows(self, tableview, section):
return len(self.categories_dict)
def tableview_cell_for_row(self, tableview, section, row):
cell = ui.TableViewCell()
categories_name = self.categories_names[row]
categories_count = self.categories_dict[categories_name]
cell.text_label.text = categories_name
cell.accessory_type='disclosure_indicator'
label=ui.Label()
if(self.app.orientation==self.app.LANDSCAPE):
label.frame=(self.width-250,10,50,25)
else:
label.frame=(self.width-125,10,50,25)
label.alignment=ui.ALIGN_CENTER
label.border_width = 1
label.corner_radius=5
label.background_color="white"
label.text_color="#9400d3"
label.border_color="#9400d3"
label.text=str(categories_count)
cell.add_subview(label)
return cell
def tableview_can_delete(self, tableview, section, row):
return True
def tableview_can_move(self, tableview, section, row):
return False
@ui.in_background
def tableview_did_select(self,tableview, section, row):
self.app.activity_indicator.start()
try:
category_name = self.categories_names[row]
apps_table = AppsTableView(self.app,self, category_name)
self.app.nav_view.push_view(apps_table)
except Exception as e:
console.hud_alert('Failed to load apps list', 'error', 1.0)
finally:
self.app.activity_indicator.stop()
def tableview_title_for_delete_button(self,tableview, section, row):
return "删除"
def tableview_delete(self,tableview, section, row):
category=self.categories_names[row]
res=console.alert("删除分类",'你确定要删除"'+category+'"分类及其下所有App吗?',"确定","取消",hide_cancel_button=True)
if(res==1):
self.deleteCategory(category)
@ui.in_background
def scrollview_did_scroll(self,scrollview):
if(scrollview.content_offset[1]<-150):
self.renovate()
@ui.in_background
def renovate(self):
self.app.activity_indicator.start()
try:
self.loadData()
console.hud_alert('刷新成功!', 'success', 1.0)
except Exception as e:
console.hud_alert('Failed to load Categories_Dict', 'error', 1.0)
finally:
self.app.activity_indicator.stop()
def updateData(self):
self.loadData()
self.father.updateData()
def deleteCategory(self,category):
self.app.activity_indicator.start()
try:
self.app.appService.deleteAppsByCategory(category)
self.loadData()
except Exception as e:
console.hud_alert('Failed to delete category', 'error', 1.0)
finally:
self.app.activity_indicator.stop()
| StarcoderdataPython |
1968632 | <reponame>sirikata/sirikata
#!/usr/bin/python
import feedparser
import urllib
import os.path
import os
import time
import pickle
import zipfile
import xml.dom.minidom
import math
import euclid
import subprocess
def extract_entity_id(entity_url):
id_url_prefix = 'http://sketchup.google.com/3dwarehouse/data/entities/'
assert(entity_url.startswith(id_url_prefix))
return entity_url[len(id_url_prefix):]
def fetch_model_list(place):
url = 'http://sketchup.google.com/3dwarehouse/data/entities?q=is:model+is:best-of-geo+is:geo+is:downloadable'
url += '+near:%22' + place + '%22'
url += '&scoring=d&max-results=1000'
url = url.replace(' ', '+')
d = feedparser.parse(url)
objects = set()
for x in d.entries:
objects.add(extract_entity_id(x.id))
return objects
def build_download_url(model_id):
return 'http://sketchup.google.com/3dwarehouse/download?mid=' + model_id + '&rtyp=zs&fn=Untitled&ctyp=other' + '&ts=1262100548000'
def model_filename(model_id, work_dir):
return os.path.join(work_dir, str(model_id) + '.zip')
def have_model(model_id, work_dir):
fname = model_filename(model_id, work_dir)
if (os.path.exists(fname)): return True
return False
def download_model(model_id, work_dir):
if have_model(model_id, work_dir): return
fname = model_filename(model_id, work_dir)
dl_url = build_download_url(model_id)
filename, headers = urllib.urlretrieve(dl_url, fname)
class ColladaZip:
def __init__(self, zf):
self.zf = zipfile.ZipFile(zf)
def find_kmls(self):
return [x for x in self.zf.namelist() if x.endswith('.kml')]
def find_daes(self):
return [x for x in self.zf.namelist() if x.endswith('.dae')]
def get_file(self, fname):
return self.zf.open(fname)
class ModelPosition:
def __init__(self, lat, lng, alt):
self.lat, self.long, self.alt = lat, lng, alt
class ModelOrientation:
def __init__(self, heading, tilt, roll):
self.heading, self.tilt, self.roll = heading, tilt, roll
class ModelScale:
def __init__(self, x, y, z):
self.x, self.y, self.z = x, y, z
def radius(self):
return math.sqrt(self.x*self.x + self.y*self.y + self.z*self.z) * 0.5;
class ModelLocation:
def __init__(self, pos, orient, scale):
self.pos = pos
self.orientation = orient
self.scale = scale
def getXmlChildByName(xml, tag):
node = xml.getElementsByTagName(tag)
assert(len(node) <= 1)
if (len(node) == 0): return None
(node,) = node
return node
def getXmlContents(xml):
return xml.childNodes[0].data
def extract_kml_info(model_id, work_dir):
assert(have_model(model_id, work_dir))
fname = model_filename(model_id, work_dir)
cz = ColladaZip(fname)
kmls = cz.find_kmls()
assert(len(kmls) == 1)
(kml,) = kmls
kml_xml = xml.dom.minidom.parse( cz.get_file(kml) )
model_node = getXmlChildByName(kml_xml, 'Model')
loc_node = getXmlChildByName(kml_xml, 'Location')
loc_lat = float(getXmlContents(getXmlChildByName(loc_node, 'latitude')))
loc_long = float(getXmlContents(getXmlChildByName(loc_node, 'longitude')))
loc_alt = float(getXmlContents(getXmlChildByName(loc_node, 'altitude')))
orient_node = getXmlChildByName(kml_xml, 'Orientation')
orient_heading = float(getXmlContents(getXmlChildByName(orient_node, 'heading')))
orient_tilt = float(getXmlContents(getXmlChildByName(orient_node, 'tilt')))
orient_roll = float(getXmlContents(getXmlChildByName(orient_node, 'roll')))
scale_node = getXmlChildByName(kml_xml, 'Scale')
sx = float(getXmlContents(getXmlChildByName(scale_node, 'x')))
sy = float(getXmlContents(getXmlChildByName(scale_node, 'y')))
sz = float(getXmlContents(getXmlChildByName(scale_node, 'z')))
return ModelLocation(ModelPosition(loc_lat, loc_long, loc_alt), ModelOrientation(orient_heading, orient_tilt, orient_roll), ModelScale(sx, sy, sz))
def compute_scale(model_id, work_dir):
assert(have_model(model_id, work_dir))
fname = model_filename(model_id, work_dir)
cz = ColladaZip(fname)
daes = cz.find_daes()
assert(len(daes) == 1)
(dae,) = daes
dae_fp = cz.get_file(dae)
# We need to manually pass the dae data because subprocess can't handle ZipFile objects
dae_data = ''.join(dae_fp.readlines())
bounds_str = subprocess.Popen(['meshtool_d', '--load', '--compute-bounds'], stdin=subprocess.PIPE, stdout=subprocess.PIPE).communicate(dae_data)[0]
# Horrible parsing because re isn't doing what I want it to
bounds_list = [float(z.strip().strip('<>')) for z in bounds_str.split(',')]
return ModelScale(bounds_list[3] - bounds_list[0], bounds_list[4] - bounds_list[1], bounds_list[5] - bounds_list[2])
def average(l):
return sum(l) / float(len(l))
class WarehouseScene:
def __init__(self, name, max_rate=10):
self._work_dir = name
self._max_rate = max_rate
self._setup_work_dir()
self._load_db()
def _setup_work_dir(self):
if not os.path.exists(self._work_dir):
os.makedirs(self._work_dir)
def _db_file(self):
return os.path.join(self._work_dir, 'db')
def _load_db(self):
# Our 'database' is just pickled summary data
if os.path.exists(self._db_file()):
f = open(self._db_file(), 'r')
self._db = pickle.load(f)
f.close()
del f
return
# Or start fresh
self._db = set()
def _save_db(self):
f = open(self._db_file(), 'w')
pickle.dump(self._db, f)
f.close()
del f
def add(self, place):
objects = fetch_model_list('chinatown new york new york')
self._db = self._db | objects;
self._save_db()
def download(self):
print 'Downloading...'
idx = 1
for objid in self._db:
print objid, '(%d of %d)' % (idx, len(self._db))
idx += 1
if have_model(objid, self._work_dir): continue
download_model(objid, self._work_dir)
time.sleep(self._max_rate)
def generateCSV(self, fname):
'''Generates a CSV scene file for this WarehouseScene.'''
# First, we need to extract all the basic information available in the kml files
objdata = {}
for objid in self._db:
objdata[objid] = extract_kml_info(objid, self._work_dir)
model_scale = compute_scale(objid, self._work_dir)
kml_scale = objdata[objid].scale
objdata[objid].scale = ModelScale(model_scale.x * kml_scale.x, model_scale.y * kml_scale.y, model_scale.z * kml_scale.z)
# Next, figure out where to center things
avg_lat = average([obj.pos.lat for obj in objdata.values()])
avg_long = average([obj.pos.long for obj in objdata.values()])
# Based on our central point, we need to compute where
# everything is in relation to it. We also need to take care
# of rotating everything so that the specified lat,long's
# normal is y-up
earth_rad = 6371000.0 # meters
print avg_lat, avg_long
avg_theta = math.radians(avg_long)
avg_phi = math.radians(90 - avg_lat)
# Generate quaternion for rotating points into the central point's frame
rot_center = \
euclid.Quaternion.new_rotate_axis(-avg_phi, euclid.Vector3(0, 0, -1)) * \
euclid.Quaternion.new_rotate_axis(-avg_theta, euclid.Vector3(0, -1, 0))
# Now we have all the info we need to create the scene
with open(fname, 'w') as fout:
print >>fout, '"objtype","pos_x","pos_y","pos_z","orient_x","orient_y","orient_z","orient_w","meshURI","scale"'
# In these scenes, we always need lights added
print >>fout, '"mesh",0,0,0,0,0,0,1,"meerkat:///ewencp/DirectionalLights.dae",1'
for objid in self._db:
obj = objdata[objid]
# Get in spherical coordinates
theta = math.radians(obj.pos.long)
phi = math.radians(90 - obj.pos.lat)
# Get in cartesian coords
pos = euclid.Vector3(
x = earth_rad * math.cos(theta) * math.sin(phi),
y = earth_rad * math.cos(phi),
z = earth_rad * math.sin(theta) * math.sin(phi)
)
# Rotate into position
pos = rot_center * pos
# And translate to the "center of the earth," i.e. the origin,
# in order to get things in a sane coordinate system
pos.y -= earth_rad
rot = [0,0,0,1]
mesh = "meerkat:///ewencp/%s.dae" % (objid)
# obj.scale contains the extents, compute a radius
size = obj.scale.radius()
print >>fout, '"mesh",%f,%f,%f,%f,%f,%f,%f,"%s",%f' % (pos[0], pos[1], pos[2], rot[0], rot[1], rot[2], rot[3], mesh, size)
if __name__ == '__main__':
scene = WarehouseScene('chinatown')
#scene.add('chinatown new york new york')
#scene.download()
scene.generateCSV('chinatown.scene.db')
| StarcoderdataPython |
98280 | <filename>usaspending_api/recipient/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-05-11 00:51
from __future__ import unicode_literals
import django.contrib.postgres.fields
import django.contrib.postgres.indexes
import partial_index
from django.contrib.postgres.operations import TrigramExtension
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
TrigramExtension(),
migrations.CreateModel(
name='DUNS',
fields=[
('awardee_or_recipient_uniqu', models.TextField(primary_key=True, serialize=False)),
('legal_business_name', models.TextField(blank=True, null=True)),
('dba_name', models.TextField(blank=True, null=True)),
('ultimate_parent_unique_ide', models.TextField(blank=True, null=True)),
('ultimate_parent_legal_enti', models.TextField(blank=True, null=True)),
('address_line_1', models.TextField(blank=True, null=True)),
('address_line_2', models.TextField(blank=True, null=True)),
('city', models.TextField(blank=True, null=True)),
('state', models.TextField(blank=True, null=True)),
('zip', models.TextField(blank=True, null=True)),
('zip4', models.TextField(blank=True, null=True)),
('country_code', models.TextField(blank=True, null=True)),
('congressional_district', models.TextField(blank=True, null=True)),
('business_types_codes', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=list, null=True, size=None)),
('entity_structure', models.TextField(blank=True, null=True)),
('broker_duns_id', models.TextField()),
('update_date', models.DateField()),
],
options={
'db_table': 'duns',
},
),
migrations.CreateModel(
name='HistoricParentDUNS',
fields=[
('awardee_or_recipient_uniqu', models.TextField()),
('legal_business_name', models.TextField(blank=True, null=True)),
('ultimate_parent_unique_ide', models.TextField(blank=True, null=True)),
('ultimate_parent_legal_enti', models.TextField(blank=True, null=True)),
('broker_historic_duns_id', models.IntegerField(primary_key=True, serialize=False)),
('year', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'historic_parent_duns',
},
),
migrations.CreateModel(
name='RecipientLookup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('recipient_hash', models.UUIDField(null=True, unique=True)),
('legal_business_name', models.TextField(db_index=True, null=True)),
('duns', models.TextField(null=True, unique=True)),
('parent_duns', models.TextField(null=True)),
('parent_legal_business_name', models.TextField(null=True)),
('address_line_1', models.TextField(null=True)),
('address_line_2', models.TextField(null=True)),
('city', models.TextField(null=True)),
('state', models.TextField(null=True)),
('zip5', models.TextField(null=True)),
('zip4', models.TextField(null=True)),
('country_code', models.TextField(null=True)),
('congressional_district', models.TextField(null=True)),
('business_types_codes', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=list, null=True, size=None)),
],
options={
'db_table': 'recipient_lookup',
},
),
migrations.CreateModel(
name='RecipientProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('recipient_level', models.CharField(max_length=1)),
('recipient_hash', models.UUIDField(db_index=True, null=True)),
('recipient_unique_id', models.TextField(db_index=True, null=True)),
('recipient_name', models.TextField(db_index=True, null=True)),
('recipient_affiliations', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=list, size=None)),
('award_types', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=list, size=None)),
('last_12_months', models.DecimalField(decimal_places=2, default=0.0, max_digits=23)),
('last_12_contracts', models.DecimalField(decimal_places=2, default=0.0, max_digits=23)),
('last_12_grants', models.DecimalField(decimal_places=2, default=0.0, max_digits=23)),
('last_12_direct_payments', models.DecimalField(decimal_places=2, default=0.0, max_digits=23)),
('last_12_loans', models.DecimalField(decimal_places=2, default=0.0, max_digits=23)),
('last_12_other', models.DecimalField(decimal_places=2, default=0.0, max_digits=23)),
('last_12_months_count', models.IntegerField(default=0)),
],
options={
'db_table': 'recipient_profile',
'managed': True,
},
),
migrations.CreateModel(
name='StateData',
fields=[
('id', models.TextField(primary_key=True, serialize=False)),
('fips', models.TextField(db_index=True)),
('code', models.TextField()),
('name', models.TextField()),
('type', models.TextField()),
('year', models.IntegerField(db_index=True)),
('population', models.BigIntegerField(blank=True, null=True)),
('pop_source', models.TextField(blank=True, null=True)),
('median_household_income', models.DecimalField(blank=True, decimal_places=2, max_digits=23, null=True)),
('mhi_source', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'state_data',
},
),
migrations.CreateModel(
name='SummaryAwardRecipient',
fields=[
('award_id', models.BigIntegerField(primary_key=True, serialize=False)),
('action_date', models.DateField(blank=True, db_index=True)),
('recipient_hash', models.UUIDField(db_index=True, null=True)),
('parent_recipient_unique_id', models.TextField(db_index=True, null=True)),
],
options={
'db_table': 'summary_award_recipient',
'managed': True,
},
),
migrations.AddIndex(
model_name='recipientprofile',
index=django.contrib.postgres.indexes.GinIndex(fields=['award_types'], name='recipient_p_award_t_211373_gin'),
),
migrations.AddIndex(
model_name='recipientprofile',
index=models.Index(fields=['recipient_unique_id'], name='recipient_p_recipie_7039a5_idx'),
),
migrations.AlterUniqueTogether(
name='recipientprofile',
unique_together=set([('recipient_hash', 'recipient_level')]),
),
migrations.AddIndex(
model_name='recipientlookup',
index=partial_index.PartialIndex(fields=['duns'], name='recipient_l_duns_bb057a_partial', unique=True, where=partial_index.PQ(duns__isnull=False)),
),
migrations.AddIndex(
model_name='recipientlookup',
index=partial_index.PartialIndex(fields=['parent_duns'], name='recipient_l_parent__efd6d5_partial', unique=False, where=partial_index.PQ(parent_duns__isnull=False)),
),
migrations.RunSQL(
sql=[
'alter table only recipient_profile alter column last_12_months set default 0.00',
"alter table only recipient_profile alter column recipient_affiliations set default '{}'::text[]",
'create index idx_recipient_profile_name on recipient_profile using gin (recipient_name gin_trgm_ops)',
],
),
]
| StarcoderdataPython |
3241624 | #vse mozne kombinacije a**b za 2 <= a <= 100 enak za b
def vse_kombinacije(od, do):
sez_kombinacij = []
for osnova in range(od, do + 1):
for potenca in range(od, do + 1):
if osnova ** potenca not in sez_kombinacij:
sez_kombinacij.append(osnova ** potenca)
return len(sez_kombinacij)
print(vse_kombinacije(2, 100))
9183 | StarcoderdataPython |
11201726 | # coding=utf-8
'''
accuracy:98%
'''
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print(mnist.train.images.shape, mnist.train.labels.shape)
X_SIZE = 784
Y_SIZE = 10
LR = 0.5
BATCH_SIZE = 100
STEP_TIMES = 4000
UNIT_NUM = 300
UNIT_NUM2 = 100
KEEP_PROB = 0.75
x = tf.placeholder(tf.float32, [None, X_SIZE])
y = tf.placeholder(tf.float32, [None, Y_SIZE])
keep_prob = tf.placeholder(tf.float32)
W1 = tf.Variable(tf.truncated_normal([X_SIZE, UNIT_NUM], stddev=0.1))
b1 = tf.Variable(tf.zeros([UNIT_NUM]))
W2 = tf.Variable(tf.truncated_normal([UNIT_NUM, UNIT_NUM2], stddev=0.1))
b2 = tf.Variable(tf.zeros([UNIT_NUM2]))
OW = tf.Variable(tf.zeros([UNIT_NUM2, Y_SIZE]))
Ob = tf.Variable(tf.zeros([Y_SIZE]))
# 模型定义
h1 = tf.nn.relu(tf.matmul(x, W1) + b1)
h2 = tf.nn.relu(tf.matmul(h1, W2) + b2)
h2_dropout = tf.nn.dropout(h2, keep_prob)
pred = tf.nn.softmax(tf.matmul(h2_dropout, OW) + Ob)
# 损失函数
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=[1]))
# 训练模型
train_step = tf.train.GradientDescentOptimizer(LR).minimize(cross_entropy)
# 准确率计算
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(y, 1), tf.argmax(pred, 1)), tf.float32))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(STEP_TIMES):
batch_x, batch_y = mnist.train.next_batch(BATCH_SIZE)
_, loss, rr = sess.run([train_step, cross_entropy, accuracy],
feed_dict={x: batch_x, y: batch_y, keep_prob: KEEP_PROB})
if i % 20 == 0:
print("%d --> %f : %f" % (i, loss, rr))
print(sess.run(accuracy, feed_dict={x: mnist.train.images, y: mnist.train.labels, keep_prob: 1.0}))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.0}))
| StarcoderdataPython |
303618 | # -*- coding: utf-8 -*-
"""
Transitive closure
===================
Computes transitive closure on a graph Adjacency Matrix.
These algorithms work with undirected weighted (distance) graphs.
"""
# Copyright (C) 2017 by
# <NAME> <<EMAIL>>
# All rights reserved.
# MIT license.
import numpy as np
import heapq
__author__ = """\n""".join(['<NAME> <<EMAIL>>'])
#
#
#
def _py_single_source_shortest_distances(source, N, E, neighbors, operators=(min,sum), verbose=0):
"""
Compute shortest distance between a source and all other reachable nodes.
Args:
source (int): the source node.
N (list): the list of nodes in the network.
E (dict): the dict of edges in the network.
neighbors (dict): a dict that contains all node neighbors.
operators (tuple): a tuple of the operators to compute shortest path. Default is ``(min,max)``.
verbose (int, optional): print statements as it computed shortest distances.
Returns:
dists (dict): the final distance calculated from source to all other nodes.
paths (dict): the local path between source and all other nodes.
Note:
The python `heapq` module does not support item update.
Therefore this algorithm keeps track of which nodes and edges have been searched already;
and the queue itself has duplicated nodes inside.
"""
Q = [] # priority queue; note items are mutable
final_dist = {} # {node:distance}
paths = {} # {node: [distance, parent_node]}
visited_nodes = set([]) # We need this because we can't update the heapq
visited_edges = set([])
disjf, conjf = operators
for node in N:
# Root node has distance 0
if node == source:
final_dist[source] = 0.
heapq.heappush(Q, [0, node])
# All other nodes have distance infinity
else:
final_dist[node] = np.inf
heapq.heappush(Q, [np.inf, node])
# Iterate over all nodes in the Queue
while Q:
node_dist, node = heapq.heappop(Q) # Current `node distance` and `node index`
#If this node has been searched, continue
if node in visited_nodes:
continue
# Iterate over all neighbors of node
for neighbor in neighbors[node]:
# If this edge has been searched, continue
if (node, neighbor) in visited_edges:
continue
# the edge distance/weight/cost
weight = E[ (node, neighbor) ]
# Operation to decide how to compute the lenght, summing edges (metric) or taking the max (ultrametric)
new_dist = conjf([node_dist, weight])
# If this is a shortest distance, update
if new_dist < final_dist[neighbor]:
# update the shortest distance to this node
final_dist[neighbor] = new_dist
# update (actually include a new one) this neighbor on the queue
heapq.heappush(Q, [new_dist, neighbor])
# update the path
paths[neighbor] = [new_dist, node]
# Add to visited edges
visited_edges.add( (neighbor,node) )
# Add to visited nodes
visited_nodes.add(node)
return final_dist, paths
def _py_single_source_complete_paths(source, N, paths):
"""
From the dict of node parent paths, recursively return the complete path from the source to all targets
Args:
source (int/string): the source node.
N (dict): the set of nodes in the network.
paths (dict): a dict of nodes and their distance to the parent node.
Returns:
path (dict): the complete path between source and all other nodes, including source and target in the list.
"""
def __py__get_path_recursive(plist, n, source):
if n != source:
plist.append(n)
try:
n = paths[n][1]
except:
pass
else:
__py__get_path_recursive(plist, n, source)
return plist
complete_paths = {}
for n in N:
plist = __py__get_path_recursive([], n, source)
plist.append(source)
plist.reverse()
complete_paths[n] = plist
return complete_paths
| StarcoderdataPython |
149160 | """
Copyright 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Purpose:
This class as a kind of virtual mail sorter. Messages from other PCs,
both servers and clients, come into this class through TCP connections.
Inbound messages are queued for when the program is ready to read them.
Conversely, outbound messages are sent immediately
Assumptions:
Usage:
python3 connection.py SERVER
python3 connection.py CLIENT
PRECON: all connections are point-to-point (one client per server)
"""
import json
from enum import Enum
from connection_core import ConnectionCore
class SERVER_DEFINITION(Enum):
ROBOT={"ip_address":"192.168.1.113","port":7003}
CONTROLLER={"ip_address":"192.168.1.112","port":7002}
class Connection:
def __init__(self,is_server,ip_address,port_number):
self.core=ConnectionCore(is_server,ip_address,port_number)
def is_connected(self):
return self.core.is_connected()
def start(self):
self.core.start()
def disconnect(self):
self.core.disconnect()
#self.core.join() #wait for helper class to finish multi-threaded application
#given an object (dict, list, string, int, etc), push to the outbound message queue
def send(self,message_obj):
return self.core.send(message_obj)
#return the oldest message from the incoming queue
#returned the same as was sent: dict, list, string, int, etc
def pop(self):
return self.core.pop()
#True if incoming message queue is empty
def is_inbound_queue_empty(self):
return self.core.is_inbound_queue_empty()
#False for fully-functional link
#True anything else
def is_error(self):
return self.is_error
@staticmethod
def getOwnAddress():
return ConnectionCore.getOwnAddress()
#is_loopback performs IP packet ping between server and client on the same machine
#is_wlan performs IP packet ping between server and client on different machines
# this code must be run identically on two machines (machines will self-identify as robot
# or controller based on their own IP addresses
@staticmethod
def build_test(is_robot,is_loopback,is_wlan):
print("Connection Build Test...")
import time
print("Check own IP Address...")
my_ip_address=Connection.getOwnAddress()
found_node=None
for node in SERVER_DEFINITION:
node_ip_address=node.value["ip_address"]
if(my_ip_address==node_ip_address): found_node=node
print("Is the IP Address of this device valid? ","FAIL" if found_node is None else "PASS",": ",my_ip_address,", ",found_node)
if(found_node is None):
print("!! run the following command: sudo ifdown wlan0 && sudo ifdown eth0 && sudo ifup wlan0")
if(is_robot is None):
is_robot=found_node==SERVER_DEFINITION.ROBOT
print("JSON Check...")
print("Decode garbage...")
decoder = json.JSONDecoder()
garbage_list=["HA\nPPY","[{\"alpha\": 1}, {\"b"]
for garbage in garbage_list:
try:
decompressed,end_index=decoder.raw_decode(garbage)
print("Decompressed garbage: ",decompressed,", ",end_index)
except json.decoder.JSONDecodeError:
print("Garbage parse test: PASS from ",garbage)
print("Multi-message JSON parse...")
message_1=[{"alpha":1},{"beta":[2,3,4]},{"epsilon":{"bravo":5,"elix":6}}]
message_2={"thing":"kind","igloo":[7,8]}
print("Outbound message 1: ",message_1)
print("Outbound message 2: ",message_2)
compressed_1=json.dumps(message_1)
compressed_2=json.dumps(message_2)
compressed=compressed_1+compressed_2
print("Merged json: ",compressed)
decompressed,end_index=decoder.raw_decode(compressed)
print("Decompressed 1: ",decompressed,", ",end_index)
compressed=compressed[end_index:]
print("Remainder merged json compressed: "+compressed)
decompressed,end_index=decoder.raw_decode(compressed)
print("Decompressed 2: ",decompressed,", ",end_index)
if(is_robot):
server_def=SERVER_DEFINITION.ROBOT.value #if is robot, setup server as robot
client_def=SERVER_DEFINITION.CONTROLLER.value #try to connect to controller as client
else:
server_def=SERVER_DEFINITION.CONTROLLER.value #if is controller, setup server as controller
client_def=SERVER_DEFINITION.ROBOT.value #try to connect to robot server as client
if(is_loopback):# test only locally on one computer
print("Run loopback test on local machine...")
server_conn=Connection(True,server_def["ip_address"],server_def["port"]) #this is server
client_conn=Connection(False,server_def["ip_address"],server_def["port"]) #this is client
print("Create server...")
server_conn.connect()
print("Create client...")
client_conn.connect()
print("Wait for connection to be established...")
time.sleep(0.5)
print("Server connected? ","PASS" if server_conn.is_connected() else "FAIL")
print("Server address: ",server_conn.core.socket)
print("Server view of client address: ",server_conn.core.connection)
print("Client connected? ","PASS" if client_conn.is_connected() else "FAIL")
print("Client view of server address: ",client_conn.core.socket)
print("Wait for link to settle...")
time.sleep(0.5)
print("Send message from client to server...")
client_to_server_message={"payload":"ALPHA"}
server_to_client_response="BETA"
client_conn.send(client_to_server_message)
for rep in range(10):
if(not server_conn.is_inbound_queue_empty()):
server_to_client_response=server_conn.pop()
else:
time.sleep(0.1)
print("Response server to client: ","PASS" if server_to_client_response==client_to_server_message else "FAIL")
print("Send message from server to client...")
server_to_client_message={"payload":"DELTA"}
client_to_server_response="GAMMA"
server_conn.send(server_to_client_message)
for rep in range(10):
if(not client_conn.is_inbound_queue_empty()):
client_to_server_response=client_conn.pop()
else:
time.sleep(0.1)
print("Response server to client: ","PASS" if client_to_server_response==server_to_client_message else "FAIL")
print("Send multiple messages from client to server...")
num_messages=10
for rep in range(num_messages):
client_to_server_message={"payload":"PASS_"+str(rep)+"_of_"+str(num_messages)}
client_conn.send(client_to_server_message)
time.sleep(0.2)
while(not server_conn.is_inbound_queue_empty()):
print("Server received message: ",server_conn.pop())
print("Dispose client...")
client_conn.disconnect()
print("Dispose server...")
server_conn.disconnect()
if(is_wlan):
#robot is server, controller is client
server_def=SERVER_DEFINITION.ROBOT.value #if is robot, setup server as robot
if(is_robot):
this_conn=Connection(True,server_def["ip_address"],server_def["port"])
else:
this_conn=Connection(False,server_def["ip_address"],server_def["port"])
print("Pause to form connection...")
this_conn.connect() #NOT blocking, exectuion will continue past here even if link is not established
while(not this_conn.is_connected()):
time.sleep(0.1) #wait for opposite end of connection to appear
print("Connection established: ","PASS" if this_conn.is_connected() else "FAIL")
packet_tennis=60 #send packets back and forth X times
for packet_iter in range(packet_tennis):
if(is_robot):
this_packet="robot_"+str(packet_iter)
print("Robot sending packet... ",this_packet)
this_conn.send(this_packet)
if(not is_robot):
print("Controller wait for packet...")
while(this_conn.is_inbound_queue_empty()):
time.sleep(0.01)
print("Controller received packet: ",this_conn.pop())
this_packet="controller_"+str(packet_iter)
print("Controller sending packet... ",this_packet)
this_conn.send(this_packet)
if(is_robot):
print("Robot wait for packet...")
while(this_conn.is_inbound_queue_empty()):
time.sleep(0.01)
print("Robot received packet: ",this_conn.pop())
print("Dispose connection...")
this_conn.disconnect()
#print("Create...")
#server_conn=Connection(server_def,True)
#client_conn=Connection(client_def,False)
#server_conn.connect()
#client_conn.connect()
if __name__ == "__main__":
print("START")
is_robot=None #None: is_robot determined by IP address of computer
is_loopback=False #test communication on local port?
is_wlan=True #test communication between computers
Connection.build_test(is_robot,is_loopback,is_wlan)
print("DONE")
| StarcoderdataPython |
11366554 | <reponame>fgitmichael/AutoregressiveModeDisentangling<gh_stars>0
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.distributions import Normal
from code_slac.network.base import BaseNetwork
from code_slac.network.latent import Gaussian, ConstantGaussian
class ModeLatentNetwork(BaseNetwork):
def __init__(self,
mode_dim,
rnn_dim,
num_rnn_layers,
rnn_dropout,
hidden_units_mode_encoder,
hidden_units_action_decoder,
mode_repeating,
feature_dim,
action_dim,
dyn_latent_network,
std_decoder,
action_normalized,
device,
leaky_slope):
super(ModeLatentNetwork, self).__init__()
self.device = device
# Latent model for the dynamics
self.dyn_latent_network = dyn_latent_network
# Encoder net for mode q(m | x(1:T), a(1:T))
self.mode_encoder = ModeEncoderCombined(
feature_shape=feature_dim,
output_dim=mode_dim,
action_dim=action_dim,
hidden_units=hidden_units_mode_encoder,
hidden_rnn_dim=rnn_dim,
rnn_layers=num_rnn_layers,
rnn_dropout=rnn_dropout)
# Mode prior
self.mode_prior = ConstantGaussian(mode_dim)
# Action decoder
latent1_dim = self.dyn_latent_network.latent1_dim
latent2_dim = self.dyn_latent_network.latent2_dim
if mode_repeating:
self.action_decoder = ActionDecoderModeRepeat(
latent1_dim=latent1_dim,
latent2_dim=latent2_dim,
mode_dim=mode_dim,
action_dim=action_dim,
hidden_units=hidden_units_action_decoder,
std=std_decoder,
action_normalized=action_normalized,
leaky_slope=leaky_slope)
else:
self.action_decoder = ActionDecoderNormal(
latent1_dim=latent1_dim,
latent2_dim=latent2_dim,
mode_dim=mode_dim,
action_dim=action_dim,
hidden_units=hidden_units_action_decoder,
std=std_decoder,
action_normalized=action_normalized,
leaky_slope=leaky_slope)
def sample_mode_prior(self, batch_size):
mode_dist = self.mode_prior(torch.rand(batch_size, 1).to(self.device))
return {'mode_dist': mode_dist,
'mode_sample': mode_dist.sample()
}
def sample_mode_posterior(self, features_seq, actions_seq):
"""
Args:
features_seq : (N, S + 1, feature_dim) tensor
actions_seq : (N, S, action_dim) tensor
"""
features_seq = features_seq.transpose(0, 1)
actions_seq = actions_seq.transpose(0, 1)
mode_dist = self.mode_encoder(features_seq=features_seq,
actions_seq=actions_seq)
mode_sample = mode_dist.rsample()
return {'mode_dist': mode_dist,
'mode_sample': mode_sample}
class BiRnn(BaseNetwork):
def __init__(self,
input_dim,
hidden_rnn_dim,
rnn_layers,
rnn_dropout,
learn_initial_state=True):
super(BiRnn, self).__init__()
# RNN
# Note: batch_first=True means input and output dims are treated as
# (batch, seq, feature)
self.input_dim = input_dim
self.hidden_rnn_dim = hidden_rnn_dim
self.f_rnn = nn.GRU(self.input_dim, self.hidden_rnn_dim,
num_layers=rnn_layers,
dropout=rnn_dropout,
bidirectional=True)
# Noisy hidden init state
# Note: Only works with GRU right now
self.learn_init = learn_initial_state
if self.learn_init:
# Initial state (dim: num_layers * num_directions, batch, hidden_size)
self.init_network = Gaussian(input_dim=1,
output_dim=self.f_rnn.hidden_size,
hidden_units=[256])
def forward(self, x):
num_sequence = x.size(0)
batch_size = x.size(1)
# Initial state (dim: num_layers * num_directions, batch, hidden_size)
if self.learn_init:
num_directions = 2 if self.f_rnn.bidirectional else 1
init_input = torch.ones(self.f_rnn.num_layers * num_directions,
batch_size,
1).to(x.device)
hidden_init = self.init_network(init_input).rsample()
# LSTM recursion and extraction of the ends of the two directions
# (front: end of the forward pass, back: end of the backward pass)
rnn_out, _ = self.f_rnn(x, hidden_init)
else:
# Don't use learned initial state and rely on pytorch init
rnn_out, _ = self.f_rnn(x)
# Split into the two directions
(forward_out, backward_out) = torch.chunk(rnn_out, 2, dim=2)
# Get the ends of the two directions
front = forward_out[num_sequence - 1, :, :]
back = backward_out[0, :, :]
# Stack along hidden_dim and return
return torch.cat([front, back], dim=1)
# TODO: Move this class as inner class to ModeDisentanglingNetwork as it is
# too sophisticated
class ModeEncoder(BaseNetwork):
def __init__(self,
feature_shape,
action_shape,
output_dim, # typically mode_dim
hidden_rnn_dim,
hidden_units,
rnn_layers,
rnn_dropout
):
super(ModeEncoder, self).__init__()
self.f_rnn_features = BiRnn(feature_shape,
hidden_rnn_dim=hidden_rnn_dim,
rnn_layers=rnn_layers,
rnn_dropout=rnn_dropout)
self.f_rnn_actions = BiRnn(action_shape,
hidden_rnn_dim=hidden_rnn_dim,
rnn_layers=rnn_layers,
rnn_dropout=rnn_dropout)
# Concatenation of 2*hidden_rnn_dim from the features rnn and
# 2*hidden_rnn_dim from actions rnn, hence input dim is 4*hidden_rnn_dim
self.f_dist = Gaussian(input_dim=4 * hidden_rnn_dim,
output_dim=output_dim,
hidden_units=hidden_units)
def forward(self, features_seq, actions_seq):
feat_res = self.f_rnn_features(features_seq)
act_res = self.f_rnn_actions(actions_seq)
rnn_result = torch.cat([feat_res, act_res], dim=1)
# Feed result into Gaussian layer
return self.f_dist(rnn_result)
#TODO: Rename this class as it isn't using combination of features and actions
# It is just using features
class ModeEncoderCombined(BaseNetwork):
def __init__(self,
feature_shape,
action_dim,
output_dim, # typicall mode_dim
hidden_rnn_dim,
hidden_units,
rnn_dropout,
rnn_layers):
super(BaseNetwork, self).__init__()
#self.rnn = BiRnn(feature_shape + action_dim,
# hidden_rnn_dim=hidden_rnn_dim,
# rnn_layers=rnn_layers)
self.rnn = BiRnn(feature_shape,
hidden_rnn_dim=hidden_rnn_dim,
rnn_layers=rnn_layers,
rnn_dropout=rnn_dropout)
self.mode_dist = Gaussian(input_dim=2 * hidden_rnn_dim,
output_dim=output_dim,
hidden_units=hidden_units)
def forward(self, features_seq, actions_seq):
# State-seq-len is always shorter/longer by one than action_seq_len,
# but to stack the
# sequence need to have the same length.
# Solution: learn the missing element in the state seq
# Solution: discard last action
if features_seq.size(0) + 1 == actions_seq.size(0):
actions_seq = actions_seq[:-1, :, :]
elif features_seq.size(0) - 1 == actions_seq.size(0):
features_seq = features_seq[:-1, :, :]
else:
raise ValueError('num sequences is not plausible')
assert actions_seq.shape[:2] == features_seq.shape[:2]
#seq = torch.cat([features_seq, actions_seq], dim=2)
seq = features_seq
rnn_result = self.rnn(seq)
return self.mode_dist(rnn_result)
class ActionDecoderModeRepeat(BaseNetwork):
def __init__(self,
latent1_dim,
latent2_dim,
mode_dim,
action_dim,
hidden_units,
leaky_slope,
action_normalized,
std=None):
super(ActionDecoderModeRepeat, self).__init__()
latent_dim = latent1_dim + latent2_dim
if latent_dim > mode_dim:
self.mode_repeat = 10 * latent_dim//mode_dim
else:
self.mode_repeat = 1
self.action_normalized = action_normalized
self.net = Gaussian(latent_dim+self.mode_repeat*mode_dim,
action_dim,
hidden_units=hidden_units,
leaky_slope=leaky_slope,
std=std)
def forward(self,
latent1_sample,
latent2_sample,
mode_sample):
assert len(latent1_sample.shape) \
== len(latent2_sample.shape) \
== len(mode_sample.shape)
mode_sample_input = torch.cat(self.mode_repeat * [mode_sample], dim=-1)
net_input = torch.cat([latent1_sample, latent2_sample, mode_sample_input], dim=-1)
action_dist = self.net(net_input)
if self.action_normalized:
action_dist.loc = torch.tanh(action_dist.loc)
return action_dist
class ActionDecoderNormal(BaseNetwork):
def __init__(self,
latent1_dim,
latent2_dim,
mode_dim,
action_dim,
hidden_units,
leaky_slope,
action_normalized,
std=None):
super(ActionDecoderNormal, self).__init__()
self.action_normalized = action_normalized
self.net = Gaussian(
input_dim=latent1_dim + latent2_dim + mode_dim,
output_dim=action_dim,
hidden_units=hidden_units,
std=std,
leaky_slope=leaky_slope)
def forward(self,
latent1_sample,
latent2_sample,
mode_sample):
action_dist = self.net([latent1_sample, latent2_sample, mode_sample])
if self.action_normalized:
action_dist.loc = torch.tanh(action_dist.loc)
return action_dist
| StarcoderdataPython |
345650 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-18 13:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('departmentsApp', '0006_remove_department_prequel'),
]
operations = [
migrations.RemoveField(
model_name='department',
name='dep_name',
),
migrations.AddField(
model_name='department',
name='prequel',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='departmentsApp.Group'),
),
]
| StarcoderdataPython |
11348978 | <gh_stars>1000+
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class PaymentTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts("AC<KEY>") \
.calls("CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.payments.create(idempotency_key="idempotency_key", status_callback="https://example.com")
values = {'IdempotencyKey': "idempotency_key", 'StatusCallback': "https://example.com", }
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Calls/CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Payments.json',
data=values,
))
def test_start_payment_session_success_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"call_sid": "CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Wed, 18 Dec 2019 20:02:01 +0000",
"date_updated": "Wed, 18 Dec 2019 20:02:01 +0000",
"sid": "PKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Calls/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Payments/PKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts("AC<KEY>") \
.calls("CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.payments.create(idempotency_key="idempotency_key", status_callback="https://example.com")
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts("AC<KEY>") \
.calls("CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.payments("PKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(idempotency_key="idempotency_key", status_callback="https://example.com")
values = {'IdempotencyKey': "idempotency_key", 'StatusCallback': "https://example.com", }
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Calls/CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Payments/PKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
data=values,
))
def test_collect_credit_card_number_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"call_sid": "CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Wed, 18 Dec 2019 20:02:01 +0000",
"date_updated": "Wed, 18 Dec 2019 20:02:01 +0000",
"sid": "PKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Calls/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Payments/PKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts("AC<KEY>") \
.calls("CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.payments("PKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(idempotency_key="idempotency_key", status_callback="https://example.com")
self.assertIsNotNone(actual)
def test_collect_credit_card_expiry_date_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "<KEY>",
"call_sid": "CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Wed, 18 Dec 2019 20:02:01 +0000",
"date_updated": "Wed, 18 Dec 2019 20:02:01 +0000",
"sid": "PKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Calls/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Payments/PKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts("AC<KEY>") \
.calls("CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.payments("PKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(idempotency_key="idempotency_key", status_callback="https://example.com")
self.assertIsNotNone(actual)
def test_complete_payment_response(self):
self.holodeck.mock(Response(
200,
'''
{
"account_sid": "<KEY>",
"call_sid": "CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Wed, 18 Dec 2019 20:02:01 +0000",
"date_updated": "Wed, 18 Dec 2019 20:02:01 +0000",
"sid": "PKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Calls/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Payments/PKaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts("AC<KEY>") \
.calls("CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.payments("PKXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(idempotency_key="idempotency_key", status_callback="https://example.com")
self.assertIsNotNone(actual)
| StarcoderdataPython |
4837118 | <reponame>git-men/bsm-django
from rest_framework.permissions import IsAdminUser as OriginIsAdminUser
from django.conf import settings
from api_basebone.utils.sign import common_make_sign
class IsAdminUser(OriginIsAdminUser):
"""
Allows access only to admin users.
这里使用了签名认证和原有的会话认证
如果请求的头部包含了签名的参数,则使用签名校验,否则使用
原有的会话认证
"""
def check_with_sign(self, request):
meta = request.META
sign_key_list = [
'HTTP_X_API_TIMESTAMP',
'HTTP_X_API_NONCESTR',
'HTTP_X_API_SIGNATURE',
]
return all([key in meta for key in sign_key_list])
def validate_sign(self, request):
"""校验签名是否正常"""
key = settings.BUSINESS_KEY
secret = settings.BUSINESS_SECRET
timestamp = request.META.get('HTTP_X_API_TIMESTAMP')
noncestr = request.META.get('HTTP_X_API_NONCESTR')
sign = request.META.get('HTTP_X_API_SIGNATURE')
return sign == common_make_sign(key, secret, timestamp, noncestr)
def has_permission(self, request, view):
if not self.check_with_sign(request):
return request.user and request.user.is_staff
else:
return self.validate_sign(request)
| StarcoderdataPython |
8168810 | <reponame>markusmeingast/Airfoil-GAN
"""
This is an implementation of a conditional GAN (CGAN) network. The generator
predicts and image (in this case the number of points along the profile in x and
y).This is done by combining parameter terms as well as random noise, as well as
employing a gaussian blur to smooth the profile.
The discriminator receives the image as well as the parameter vector. These are
combined and finally tested for validity.
Losses are based on binary crossentropy only, i.e. the validity of results.
Issues were encountered using batch normalization. For now, it is ignored.
Observations:
- Concatenating target parameters with image prior to Conv2D layers produces
unstable solution. Parameter dependency seems present, but no clean solution obtained.
- Concatenating target parameters with processed image data, post Conv2D, results
in clean shapes, but no parameter dependency obtained (tested 1000+ epochs)
- LeakyReLU after late concatenation produces weird results
- BN for all Conv2D layers produces weird results.
- BN on D only produces weird results
- SN on all Conv2D layers seems to work fine, no parameter dependency as before
- Reduced learning rate seems to stabilize slightly, but issue still not avoided
- Scaling all parameters to normal distribution helped a lot!
- With CD as parameter, solutions still noisy. (might have messed this up... rerun)
- mode collapse... adding encoder (works ok it seems?)
- AE loss drops quite fast, so decreased learning rate for AE
Guidelines:
- smoothing works
- closing works, but not really required...
- no BN on D or G or E
- SN on all Conv2D layers
- LeakyReLU on all Conv2D and Dense layers
Aifoil data:
- use half cosine spacing for better LE resolution (TODO)
- apply SG filter as Lambda layer to generator, instead of Gaussian Blur?
"""
################################################################################
# %% IMPORT PACKAGES
################################################################################
import numpy as np
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, concatenate, Activation, Conv2DTranspose
from tensorflow.keras.layers import Reshape, Dense, BatchNormalization, Conv2D
from tensorflow.keras.layers import GaussianNoise, Dropout, LeakyReLU, Flatten, ReLU
from tensorflow.keras.layers import Lambda, ELU
from SNConv2D import SpectralNormalization
from tensorflow.keras.initializers import RandomNormal
from tensorflow.keras.optimizers import Adam, RMSprop, SGD
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.losses import BinaryCrossentropy
import tensorflow.keras.backend as K
################################################################################
# %% DEFINE INFOGAN CLASS
################################################################################
class CGAN():
"""
Implementation of the CGAN network with curve smoothing in the generator
"""
##### CLASS INIT
def __init__(self, DAT_SHP=(64, 2, 1), PAR_DIM=2, LAT_DIM=100, DEPTH=32, LEARN_RATE=0.0002):
"""
Initializing basic settings
"""
##### TARGET DATA SOURCE
self.DAT_SHP = DAT_SHP
self.PAR_DIM = PAR_DIM
self.LAT_DIM = LAT_DIM
self.DEPTH = DEPTH
self.init = RandomNormal(mean=0.0, stddev=0.02)
self.LEARN_RATE = LEARN_RATE
self.optimizer = Adam(lr=self.LEARN_RATE, beta_1=0.5)
self.BLUR = False # Gaussian / SG / False
self.CLOSE = True
##### GAUSSIAN BLUR FILTER (ISSUES AT END POINTS)
def kernel_init(self, shape, dtype=float, partition_info=None):
"""
Definition of a length 7 gaussian blur kernel to be used to smooth the profile
"""
if self.BLUR == "Gaussian":
kernel = np.zeros(shape=shape)
kernel[:,:,0,0] = np.array([[0.006],[0.061],[0.242],[0.383],[0.242],[0.061],[0.006]])
return kernel
elif self.BLUR == "SG":
kernel = np.zeros(shape=shape)
#kernel[:,:,0,0] = np.array([[-3],[12],[17],[12],[-3]])/35 # WINDOW 5
kernel[:,:,0,0] = np.array([[-2],[3],[6],[7],[6],[3],[-2]])/21 # WINDOW 7
return kernel
##### PAD EDGES TO MAKE GAUSSIAB BLUR WORK
def edge_padding(self, X):
"""
Custom padding layer to be called by Lambda. Adds each end point 3 times
respectively to produce cleaner edge conditions
"""
if self.BLUR == 'SG':
"""
SG-Filter padding with end points
"""
Xlow = X[:, 0, :, :][:, np.newaxis, :, :]
Xhigh = X[:, -1, :, :][:, np.newaxis, :, :]
#X = K.concatenate((Xlow, Xlow, X, Xhigh, Xhigh), axis=1)
X = K.concatenate((Xlow, Xlow, Xlow, X, Xhigh, Xhigh, Xhigh), axis=1)
return X
elif self.BLUR == 'Gaussian':
"""
Gaussian blur padding with mirrored conditions to keep end points
"""
##### PAD START
Xlow0 = X[:, 0, :, :]
Xlow1 = (2.0*Xlow0 - X[:, 1, :, :])[:, np.newaxis, :, :]
Xlow2 = (2.0*Xlow0 - X[:, 2, :, :])[:, np.newaxis, :, :]
Xlow3 = (2.0*Xlow0 - X[:, 3, :, :])[:, np.newaxis, :, :]
##### PAD END
Xhigh0 = X[:, -1, :, :]
Xhigh1 = (2.0*Xhigh0 - X[:, -2, :, :])[:, np.newaxis, :, :]
Xhigh2 = (2.0*Xhigh0 - X[:, -3, :, :])[:, np.newaxis, :, :]
Xhigh3 = (2.0*Xhigh0 - X[:, -4, :, :])[:, np.newaxis, :, :]
##### BUILD AND RETURN PADDED ARRAY
X = K.concatenate((Xlow3,Xlow2,Xlow1,X,Xhigh1,Xhigh2,Xhigh3), axis=1)
return X
def closing(self, X):
Xlow = X[:, 0, :, :][:, np.newaxis, :, :]
Xhigh = X[:, -1, :, :][:, np.newaxis, :, :]
Xmean = (Xlow+Xhigh)*0.5
return K.concatenate((Xmean, X[:, 1:-1, :, :], Xmean), axis=1)
def build_generator(self):
"""
Generator network:
- Input dimensions: (PAR_DIM)+(LAT_DIM)
- Output dimensions: (DAT_SHP)
"""
##### INPUT LAYERS
y_in = Input(shape=self.PAR_DIM)
z_in = Input(shape=self.LAT_DIM)
##### COMBINE AND DENSE
net = concatenate([y_in, z_in], axis=-1)
net = Dense(self.DAT_SHP[0]/8*2*self.DEPTH*4)(net)
net = LeakyReLU(alpha=0.2)(net)
net = Reshape((self.DAT_SHP[0]//8, 2, self.DEPTH*4))(net)
##### CONV2DTRANSPOSE
net = SpectralNormalization(Conv2DTranspose(self.DEPTH*2, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init))(net)
net = LeakyReLU(alpha=0.2)(net)
##### CONV2DTRANSPOSE
net = SpectralNormalization(Conv2DTranspose(self.DEPTH, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init))(net)
net = LeakyReLU(alpha=0.2)(net)
##### PREDICT COORDINATES
net = Conv2DTranspose(1, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init, activation='tanh')(net)
##### CLOSE TE?
if self.CLOSE:
net = Lambda(self.closing)(net)
##### GAUSSIAN BLUR?
if self.BLUR == "Gaussian":
net = Lambda(self.edge_padding)(net)
net = Conv2D(1, (7,1), strides=(1,1), padding='valid', kernel_initializer=self.kernel_init, trainable=False, use_bias=False)(net)
elif self.BLUR == "SG":
net = Lambda(self.edge_padding)(net)
net = Conv2D(1, (7,1), strides=(1,1), padding='valid', kernel_initializer=self.kernel_init, trainable=False, use_bias=False)(net)
##### OUTPUT
X_out = net
##### BUILD MODEL
model = Model(inputs=[y_in, z_in], outputs=X_out)
##### RETURN MODEL
return model
def build_discriminator(self):
"""
Input dimensions: (DAT_SHP)
Output dimensions: (1) + (PAR_DIM)
"""
##### INPUT LAYERS
X_in = Input(self.DAT_SHP)
y_in = Input(self.PAR_DIM)
##### ADD NOISE TO IMAGE
Xnet = GaussianNoise(0.00)(X_in)
ynet = Dense(np.prod(self.DAT_SHP))(y_in)
ynet = Reshape(self.DAT_SHP)(ynet)
net = concatenate([Xnet, ynet], axis=-1)
##### CONV2D
net = SpectralNormalization(Conv2D(self.DEPTH, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init))(net)
net = LeakyReLU(alpha=0.2)(net)
##### CONV2D
net = SpectralNormalization(Conv2D(self.DEPTH*2, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init))(net)
net = LeakyReLU(alpha=0.2)(net)
##### CONV2D
net = SpectralNormalization(Conv2D(self.DEPTH*4, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init))(net)
net = LeakyReLU(alpha=0.2)(net)
##### TO DENSE
net = Flatten()(net)
##### DENSE LAYER ($$ NO ACTIVATION!)
net = Dense(64)(net)
net = LeakyReLU(alpha=0.2)(net)
##### VALIDITY
w_out = Dense(1, activation='sigmoid')(net)
##### BUILD AND COMPILE MODEL
model = Model(inputs=[X_in, y_in], outputs=w_out)
model.compile(loss=BinaryCrossentropy(label_smoothing=0.3), metrics=['accuracy'], optimizer=Adam(lr=self.LEARN_RATE, beta_1=0.5))
##### RETURN MODEL
return model
def build_encoder(self):
"""
Input dimensions: (DAT_SHP)
Output dimensions: (1) + (PAR_DIM)
"""
##### INPUT LAYERS
X_in = Input(self.DAT_SHP)
##### ADD NOISE TO IMAGE
net = GaussianNoise(0.00)(X_in)
##### CONV2D
net = SpectralNormalization(Conv2D(self.DEPTH, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init))(net)
net = LeakyReLU(alpha=0.2)(net)
##### CONV2D
net = SpectralNormalization(Conv2D(self.DEPTH*2, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init))(net)
net = LeakyReLU(alpha=0.2)(net)
##### CONV2D
net = SpectralNormalization(Conv2D(self.DEPTH*4, (4,2), strides=(2,1), padding='same', kernel_initializer=self.init))(net)
net = LeakyReLU(alpha=0.2)(net)
##### TO DENSE
net = Flatten()(net)
##### DENSE LAYER ($$ NO ACTIVATION!)
net = Dense(64)(net)
net = LeakyReLU(alpha=0.2)(net)
##### PARMETERS
y_out = Dense(self.PAR_DIM, activation='linear')(net)
##### NOISE
z_out = Dense(self.LAT_DIM, activation='linear')(net)
##### BUILD AND COMPILE MODEL
model = Model(inputs=[X_in], outputs=[y_out, z_out])
##### RETURN MODEL
return model
def build_gan(self, g_model, d_model):
"""
GAN network combined generator and discriminator networks for generator
training. The discriminator is not trained within this model
"""
##### TRAIN ONLY GENERATOR
d_model.trainable = False
##### INPUT LAYERS
y_in = Input(shape=self.PAR_DIM)
z_in = Input(shape=self.LAT_DIM)
##### GENERATE IMAGE
X = g_model([y_in, z_in])
##### TEST IMAGE
w_out = d_model([X, y_in])
##### BUILD AND COMPILE MODEL
gan_model = Model(inputs = [y_in, z_in], outputs = [w_out])
gan_model.compile(loss=['binary_crossentropy'], metrics=['accuracy'], optimizer=Adam(lr=self.LEARN_RATE, beta_1=0.5))
##### RETURN MODEL
return gan_model
def build_autoencoder(self, e_model, g_model):
"""
Input:
* image of IMG_SHP
Output:
* image of IMG_SHP
"""
##### INPUT IMAGE
X_in = Input(self.DAT_SHP)
##### INTERMEDIATE OUTPUT (ONE-HOT-VECTOR AND LATENT NOISE)
y, z = e_model(X_in)
##### GENERATOR OUTPUT
X_out = g_model([y, z])
##### BUILD, COMPILE AND RETURN MODEL
model = Model(inputs = X_in, outputs = X_out)
model.compile(loss='mean_absolute_error', optimizer=Adam(lr=self.LEARN_RATE/4, beta_1=0.5))
return model
| StarcoderdataPython |
1940983 | class Solution:
def isBoomerang(self, points: List[List[int]]) -> bool:
x1, y1 = points[0]
x2, y2 = points[1]
x3, y3 = points[2]
return (y1 - y2) * (x1 - x3) != (y1 - y3) * (x1 - x2)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.