id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1842880 | """Specifies the current version number of OpenCOOD."""
__version__ = "0.1.0"
| StarcoderdataPython |
8001653 |
import json
import click
import requests
from . import configure_env_var
_BASE_URL = 'BASE_URL'
@click.command('login')
@click.option('-b', '--base_url', envvar=_BASE_URL,
show_default=True, type=str,
help=f"Base api url (overrides {_BASE_URL} env var).")
@click.argument('output', default='-', type=click.File('w'), required=False)
@click.option('-u', '--username', prompt="Your username",
help="Username with no blank spaces.")
@click.option('-p', '--password', prompt=True, hide_input=True,
help="Password with at least 8 characteres.")
@click.pass_context
def initiate_login_session(ctx, base_url, output, username, password):
"""Login to api and save the access token."""
base_url = configure_env_var(_BASE_URL, base_url)
URL = f'{base_url}/token/'
auth = {
'username': username,
'password': password
}
res = requests.post(URL, data=auth)
if res.status_code == 200:
click.echo(json.dumps(res.json()), file=output)
else:
click.echo(f'Error code: {res.status_code}')
click.echo(res.text, file=output)
| StarcoderdataPython |
5011171 | <reponame>gokulyc/python123<filename>selenium_py/TestCases/main.py<gh_stars>0
import unittest
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
import page
import time
class PythonOrgSearch(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(ChromeDriverManager().install())
self.driver.get("https://www.python.org")
def test_example(self):
print("Test 1")
assert True
# def test_example2(self):
# print("Test 2")
# assert False
# def test_title(self):
# mainPage=page.MainPage()
# assert mainPage.is_title_match()
def test_search_python(self):
mainPage=page.MainPage(self.driver)
assert mainPage.is_title_match()
mainPage.search_text_element="pycon"
mainPage.click_go_button()
search_result_page=page.SearchResultPage(self.driver)
assert search_result_page.is_results_found
def tearDown(self):
time.sleep(10)
self.driver.close()
if __name__=="__main__":
unittest.main() | StarcoderdataPython |
8027369 | '''
Utility for plotting gaussian distributions
with different variance
Project: https://github.com/roatienza/dl-keras
Dependency: keras 2.0
Usage: python3 <this file>
'''
import numpy as np
import matplotlib.pyplot as plt
want_noise = False
# grayscale plot, comment if color is wanted
plt.style.use('grayscale')
mu = [0, 0]
cov = [[25, 0], [0, 25]]
x, y = np.random.multivariate_normal(mu, cov, 100).T
plt.plot(x, y, '+', label="Fake samples")
mu = [0, 0]
cov = [[2, 0], [0, 2]]
x, y = np.random.multivariate_normal(mu, cov, 100).T
plt.plot(x, y, 'o', label="Real samples")
x = np.linspace(-15, 15)
y = x
plt.plot(x, y, '--', label="Sigmoid decision boundary")
x = np.linspace(-15, 15)
y = np.zeros(x.shape)
plt.plot(x, y, '-', label="Least squares decision boundary")
plt.legend(loc=0)
plt.savefig("lsgan.png")
plt.show()
plt.close('all')
| StarcoderdataPython |
3336360 | """
Datetime-related misc utilities
:author: <NAME>
"""
__all__ = ['format_duration', 'timedelta_to_str']
def format_duration(seconds: float) -> str:
"""
Formats time in seconds as (Dd)HH:MM:SS (time.stfrtime() is not useful for formatting durations).
:param seconds: Number of seconds to format
:return: Given number of seconds as (Dd)HH:MM:SS
"""
x = '-' if seconds < 0 else ''
m, s = divmod(abs(seconds), 60)
h, m = divmod(int(m), 60)
d, h = divmod(h, 24)
x = f'{x}{d}d' if d > 0 else x
return f'{x}{h:02d}:{m:02d}:{s:02d}' if isinstance(s, int) else f'{x}{h:02d}:{m:02d}:{s:05.2f}'
def timedelta_to_str(delta):
m, s = divmod(delta.seconds, 60)
h, m = divmod(m, 60)
td_str = f'{h:d}:{m:02d}:{s:02d}'
return f'{delta.days:d}d, {td_str}' if delta.days != 0 else td_str
| StarcoderdataPython |
3488874 | from typing import Optional, Union
from pydantic import BaseModel
from pydantic.color import Color
from pydantic.fields import Field
from typing_extensions import Literal
class ThemeColors(BaseModel):
canvas: Optional[Color]
console: Optional[Color]
background: Optional[Color]
foreground: Optional[Color]
primary: Optional[Color]
secondary: Optional[Color]
highlight: Optional[Color]
text: Optional[Color]
icon: Optional[Color]
warning: Optional[Color]
current: Optional[Color]
class ThemeContribution(BaseModel):
label: str = Field(description="Label of the color theme as shown in the UI.")
id: str = Field(description="Id of the color theme as used in the user settings.")
type: Union[Literal["dark"], Literal["light"]] = Field(
description="Base theme type, used for icons and filling in unprovided colors"
)
colors: ThemeColors = Field(description="Theme colors")
| StarcoderdataPython |
8071560 | """
TripsLayer
==========
Plot of a single vehicle trip within San Francisco, fading in from the origin.
Adapted from a deck.gl documentation example.
"""
import pydeck as pdk
import pandas as pd
TRIPS_LAYER_DATA = "https://raw.githubusercontent.com/visgl/deck.gl-data/master/website/sf.trips.json" # noqa
df = pd.read_json(TRIPS_LAYER_DATA)
df["coordinates"] = df["waypoints"].apply(lambda f: [item["coordinates"] for item in f])
df["timestamps"] = df["waypoints"].apply(lambda f: [item["timestamp"] - 1554772579000 for item in f])
df.drop(["waypoints"], axis=1, inplace=True)
layer = pdk.Layer(
"TripsLayer",
df,
get_path="coordinates",
get_timestamps="timestamps",
get_color=[253, 128, 93],
opacity=0.8,
width_min_pixels=5,
rounded=True,
trail_length=600,
current_time=500,
)
view_state = pdk.ViewState(latitude=37.7749295, longitude=-122.4194155, zoom=11, bearing=0, pitch=45)
# Render
r = pdk.Deck(layers=[layer], initial_view_state=view_state)
r.to_html("trips_layer.html", notebook_display=False)
| StarcoderdataPython |
9752656 | <gh_stars>0
:wq
tabby_cat="\t I'm tabbed in."
persian_cat="I'm split\non a line."
backslash_cat="I'm \\ a\\ cat."
fat_cat="""
I'll do a list:
\t*Cat food
\t*Fishie
\t*Catnip\n\t*Grass
"""
print(tabby_cat)
print(persian_cat)
print(backslash_cat)
print(fat_cat)
tabby_rad="\t I'm tabbed in."
persian_rad="I'm split\non a line."
backslash_rad="I'm \\ a\\ rad."
fat_rad="""
I'll do a list:
\t*rad food
\t*Fishie
\t*Catnip\n\t*Grass
"""
print(tabby_rad)
print(persian_rad)
print(backslash_rad)
print(fat_rad)
| StarcoderdataPython |
4941188 | import os
from PIL import Image
from torch.utils.data import Dataset
import torch
import torchvision.transforms as transforms
import numpy as np
def get_transforms(cfg):
train_transform = transforms.Compose([
transforms.Resize((cfg.DATA.RESIZE_SIZE, cfg.DATA.RESIZE_SIZE)),
transforms.RandomCrop((cfg.DATA.CROP_SIZE, cfg.DATA.CROP_SIZE)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(cfg.DATA.IMAGE_MEAN, cfg.DATA.IMAGE_STD)
])
test_transform = transforms.Compose([
transforms.Resize((cfg.DATA.CROP_SIZE, cfg.DATA.CROP_SIZE)),
transforms.ToTensor(),
transforms.Normalize(cfg.DATA.IMAGE_MEAN, cfg.DATA.IMAGE_STD)
])
test_tencrops_transform = transforms.Compose([
transforms.Resize((cfg.DATA.RESIZE_SIZE, cfg.DATA.RESIZE_SIZE)),
transforms.TenCrop(cfg.DATA.CROP_SIZE),
transforms.Lambda(lambda crops: torch.stack(
[transforms.Normalize(cfg.DATA.IMAGE_MEAN, cfg.DATA.IMAGE_STD)
(transforms.ToTensor()(crop)) for crop in crops])),
])
return train_transform, test_transform, test_tencrops_transform
class CUBDataset(Dataset):
def __init__(self, root, cfg, is_train):
self.root = root
self.cfg = cfg
self.is_train = is_train
self.resize_size = cfg.DATA.RESIZE_SIZE
self.crop_size = cfg.DATA.CROP_SIZE
self.image_list = self.remove_1st_column(open(
os.path.join(root, 'images.txt'), 'r').readlines())
self.label_list = self.remove_1st_column(open(
os.path.join(root, 'image_class_labels.txt'), 'r').readlines())
self.split_list = self.remove_1st_column(open(
os.path.join(root, 'train_test_split.txt'), 'r').readlines())
self.bbox_list = self.remove_1st_column(open(
os.path.join(root, 'bounding_boxes.txt'), 'r').readlines())
self.train_transform, self.onecrop_transform, self.tencrops_transform = get_transforms(cfg)
if cfg.TEST.TEN_CROPS:
self.test_transform = self.tencrops_transform
else:
self.test_transform = self.onecrop_transform
if is_train:
self.index_list = self.get_index(self.split_list, '1')
else:
self.index_list = self.get_index(self.split_list, '0')
def get_index(self, list, value):
index = []
for i in range(len(list)):
if list[i] == value:
index.append(i)
return index
def remove_1st_column(self, input_list):
output_list = []
for i in range(len(input_list)):
if len(input_list[i][:-1].split(' '))==2:
output_list.append(input_list[i][:-1].split(' ')[1])
else:
output_list.append(input_list[i][:-1].split(' ')[1:])
return output_list
def __getitem__(self, idx):
name = self.image_list[self.index_list[idx]]
image_path = os.path.join(self.root, 'images', name)
image = Image.open(image_path).convert('RGB')
image_size = list(image.size)
label = int(self.label_list[self.index_list[idx]])-1
if self.is_train:
image = self.train_transform(image)
return image, label
else:
image = self.test_transform(image)
bbox = self.bbox_list[self.index_list[idx]]
bbox = [int(float(value)) for value in bbox]
[x, y, bbox_width, bbox_height] = bbox
# if self.is_train:
# resize_size = self.resize_size
# crop_size = self.crop_size
# shift_size = (resize_size - crop_size) // 2
resize_size = self.crop_size
crop_size = self.crop_size
shift_size = 0
[image_width, image_height] = image_size
left_bottom_x = int(max(x / image_width * resize_size - shift_size, 0))
left_bottom_y = int(max(y / image_height * resize_size - shift_size, 0))
right_top_x = int(min((x + bbox_width) / image_width * resize_size - shift_size, crop_size - 1))
right_top_y = int(min((y + bbox_height) / image_height * resize_size - shift_size, crop_size - 1))
# gt_bbox = [left_bottom_x, left_bottom_y, right_top_x, right_top_y]
# gt_bbox = torch.tensor(gt_bbox)
gt_bbox = np.array([left_bottom_x, left_bottom_y, right_top_x, right_top_y]).reshape(-1)
gt_bbox = " ".join(list(map(str, gt_bbox)))
return image, label, gt_bbox, name
def __len__(self):
return len(self.index_list)
if __name__ == "__main__":
import os, sys
sys.path.insert(0, '../../lib')
from config.default import cfg_from_list, cfg_from_file, update_config
from config.default import config as cfg
# parameters: --config_file ../../configs/CUB/cub_vgg16_cam.yaml
update_config()
cfg.BASIC.ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
dataset_root = os.path.join(cfg.BASIC.ROOT_DIR, cfg.DATA.DATADIR)
# train_loader = torch.utils.data.DataLoader(
# CUBDataset(root=dataset_root, cfg=cfg, is_train=True),
# batch_size=1, shuffle=False, num_workers=1, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
CUBDataset(root=dataset_root, cfg=cfg, is_train=False),
batch_size=1, shuffle=False, num_workers=1, pin_memory=True)
for image, label, gt_bbox, name in val_loader:
print(label)
print(image.shape)
| StarcoderdataPython |
1673930 | <filename>dask/dataframe/io/tests/test_io.py
from concurrent.futures import ThreadPoolExecutor
from threading import Lock
import numpy as np
import pandas as pd
import pytest
import dask.array as da
import dask.dataframe as dd
from dask.dataframe._compat import tm
from dask.dataframe.io.io import _meta_from_array
from dask.dataframe.utils import assert_eq, is_categorical_dtype
from dask.delayed import Delayed, delayed
from dask.utils import tmpfile
####################
# Arrays and BColz #
####################
def test_meta_from_array():
x = np.array([[1, 2], [3, 4]], dtype=np.int64)
res = _meta_from_array(x)
assert isinstance(res, pd.DataFrame)
assert res[0].dtype == np.int64
assert res[1].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index([0, 1]))
x = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float64)
res = _meta_from_array(x, columns=["a", "b"])
assert isinstance(res, pd.DataFrame)
assert res["a"].dtype == np.float64
assert res["b"].dtype == np.float64
tm.assert_index_equal(res.columns, pd.Index(["a", "b"]))
with pytest.raises(ValueError):
_meta_from_array(x, columns=["a", "b", "c"])
np.random.seed(42)
x = np.random.rand(201, 2)
x = dd.from_array(x, chunksize=50, columns=["a", "b"])
assert len(x.divisions) == 6 # Should be 5 partitions and the end
def test_meta_from_1darray():
x = np.array([1.0, 2.0, 3.0], dtype=np.float64)
res = _meta_from_array(x)
assert isinstance(res, pd.Series)
assert res.dtype == np.float64
x = np.array([1, 2, 3], dtype=np.object_)
res = _meta_from_array(x, columns="x")
assert isinstance(res, pd.Series)
assert res.name == "x"
assert res.dtype == np.object_
x = np.array([1, 2, 3], dtype=np.object_)
res = _meta_from_array(x, columns=["x"])
assert isinstance(res, pd.DataFrame)
assert res["x"].dtype == np.object_
tm.assert_index_equal(res.columns, pd.Index(["x"]))
with pytest.raises(ValueError):
_meta_from_array(x, columns=["a", "b"])
def test_meta_from_recarray():
x = np.array(
[(i, i * 10) for i in range(10)], dtype=[("a", np.float64), ("b", np.int64)]
)
res = _meta_from_array(x)
assert isinstance(res, pd.DataFrame)
assert res["a"].dtype == np.float64
assert res["b"].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index(["a", "b"]))
res = _meta_from_array(x, columns=["b", "a"])
assert isinstance(res, pd.DataFrame)
assert res["a"].dtype == np.float64
assert res["b"].dtype == np.int64
tm.assert_index_equal(res.columns, pd.Index(["b", "a"]))
with pytest.raises(ValueError):
_meta_from_array(x, columns=["a", "b", "c"])
def test_from_array():
x = np.arange(10 * 3).reshape(10, 3)
d = dd.from_array(x, chunksize=4)
assert isinstance(d, dd.DataFrame)
tm.assert_index_equal(d.columns, pd.Index([0, 1, 2]))
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
d = dd.from_array(x, chunksize=4, columns=list("abc"))
assert isinstance(d, dd.DataFrame)
tm.assert_index_equal(d.columns, pd.Index(["a", "b", "c"]))
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().values == x).all()
with pytest.raises(ValueError):
dd.from_array(np.ones(shape=(10, 10, 10)))
def test_from_array_with_record_dtype():
x = np.array([(i, i * 10) for i in range(10)], dtype=[("a", "i4"), ("b", "i4")])
d = dd.from_array(x, chunksize=4)
assert isinstance(d, dd.DataFrame)
assert list(d.columns) == ["a", "b"]
assert d.divisions == (0, 4, 8, 9)
assert (d.compute().to_records(index=False) == x).all()
def test_from_bcolz_multiple_threads():
bcolz = pytest.importorskip("bcolz")
def check(i):
t = bcolz.ctable(
[[1, 2, 3], [1.0, 2.0, 3.0], ["a", "b", "a"]], names=["x", "y", "a"]
)
d = dd.from_bcolz(t, chunksize=2)
assert d.npartitions == 2
assert is_categorical_dtype(d.dtypes["a"])
assert list(d.x.compute(scheduler="sync")) == [1, 2, 3]
assert list(d.a.compute(scheduler="sync")) == ["a", "b", "a"]
d = dd.from_bcolz(t, chunksize=2, index="x")
L = list(d.index.compute(scheduler="sync"))
assert L == [1, 2, 3] or L == [1, 3, 2]
# Names
assert sorted(dd.from_bcolz(t, chunksize=2).dask) == sorted(
dd.from_bcolz(t, chunksize=2).dask
)
assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(
dd.from_bcolz(t, chunksize=3).dask
)
with ThreadPoolExecutor(5) as pool:
list(pool.map(check, range(5)))
def test_from_bcolz():
bcolz = pytest.importorskip("bcolz")
t = bcolz.ctable(
[[1, 2, 3], [1.0, 2.0, 3.0], ["a", "b", "a"]], names=["x", "y", "a"]
)
d = dd.from_bcolz(t, chunksize=2)
assert d.npartitions == 2
assert is_categorical_dtype(d.dtypes["a"])
assert list(d.x.compute(scheduler="sync")) == [1, 2, 3]
assert list(d.a.compute(scheduler="sync")) == ["a", "b", "a"]
L = list(d.index.compute(scheduler="sync"))
assert L == [0, 1, 2]
d = dd.from_bcolz(t, chunksize=2, index="x")
L = list(d.index.compute(scheduler="sync"))
assert L == [1, 2, 3] or L == [1, 3, 2]
# Names
assert sorted(dd.from_bcolz(t, chunksize=2).dask) == sorted(
dd.from_bcolz(t, chunksize=2).dask
)
assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(
dd.from_bcolz(t, chunksize=3).dask
)
dsk = dd.from_bcolz(t, chunksize=3).dask
t.append((4, 4.0, "b"))
t.flush()
assert sorted(dd.from_bcolz(t, chunksize=2).dask) != sorted(dsk)
def test_from_bcolz_no_lock():
bcolz = pytest.importorskip("bcolz")
locktype = type(Lock())
t = bcolz.ctable(
[[1, 2, 3], [1.0, 2.0, 3.0], ["a", "b", "a"]], names=["x", "y", "a"], chunklen=2
)
a = dd.from_bcolz(t, chunksize=2)
b = dd.from_bcolz(t, chunksize=2, lock=True)
c = dd.from_bcolz(t, chunksize=2, lock=False)
assert_eq(a, b)
assert_eq(a, c)
assert not any(isinstance(item, locktype) for v in c.dask.values() for item in v)
def test_from_bcolz_filename():
bcolz = pytest.importorskip("bcolz")
with tmpfile(".bcolz") as fn:
t = bcolz.ctable(
[[1, 2, 3], [1.0, 2.0, 3.0], ["a", "b", "a"]],
names=["x", "y", "a"],
rootdir=fn,
)
t.flush()
d = dd.from_bcolz(fn, chunksize=2)
assert list(d.x.compute()) == [1, 2, 3]
def test_from_bcolz_column_order():
bcolz = pytest.importorskip("bcolz")
t = bcolz.ctable(
[[1, 2, 3], [1.0, 2.0, 3.0], ["a", "b", "a"]], names=["x", "y", "a"]
)
df = dd.from_bcolz(t, chunksize=2)
assert list(df.loc[0].compute().columns) == ["x", "y", "a"]
def test_from_pandas_dataframe():
a = list("aaaaaaabbbbbbbbccccccc")
df = pd.DataFrame(
dict(a=a, b=np.random.randn(len(a))),
index=pd.date_range(start="20120101", periods=len(a)),
)
ddf = dd.from_pandas(df, 3)
assert len(ddf.dask) == 3
assert len(ddf.divisions) == len(ddf.dask) + 1
assert isinstance(ddf.divisions[0], type(df.index[0]))
tm.assert_frame_equal(df, ddf.compute())
ddf = dd.from_pandas(df, chunksize=8)
msg = "Exactly one of npartitions and chunksize must be specified."
with pytest.raises(ValueError) as err:
dd.from_pandas(df, npartitions=2, chunksize=2)
assert msg in str(err.value)
with pytest.raises((ValueError, AssertionError)) as err:
dd.from_pandas(df)
assert msg in str(err.value)
assert len(ddf.dask) == 3
assert len(ddf.divisions) == len(ddf.dask) + 1
assert isinstance(ddf.divisions[0], type(df.index[0]))
tm.assert_frame_equal(df, ddf.compute())
def test_from_pandas_small():
df = pd.DataFrame({"x": [1, 2, 3]})
for i in [1, 2, 30]:
a = dd.from_pandas(df, i)
assert len(a.compute()) == 3
assert a.divisions[0] == 0
assert a.divisions[-1] == 2
a = dd.from_pandas(df, chunksize=i)
assert len(a.compute()) == 3
assert a.divisions[0] == 0
assert a.divisions[-1] == 2
for sort in [True, False]:
for i in [0, 2]:
df = pd.DataFrame({"x": [0] * i})
ddf = dd.from_pandas(df, npartitions=5, sort=sort)
assert_eq(df, ddf)
s = pd.Series([0] * i, name="x", dtype=int)
ds = dd.from_pandas(s, npartitions=5, sort=sort)
assert_eq(s, ds)
@pytest.mark.parametrize("n", [1, 2, 4, 5])
def test_from_pandas_npartitions_is_accurate(n):
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5, 6], "y": list("abdabd")}, index=[10, 20, 30, 40, 50, 60]
)
assert dd.from_pandas(df, npartitions=n).npartitions <= n
def test_from_pandas_series():
n = 20
s = pd.Series(np.random.randn(n), index=pd.date_range(start="20120101", periods=n))
ds = dd.from_pandas(s, 3)
assert len(ds.dask) == 3
assert len(ds.divisions) == len(ds.dask) + 1
assert isinstance(ds.divisions[0], type(s.index[0]))
tm.assert_series_equal(s, ds.compute())
ds = dd.from_pandas(s, chunksize=8)
assert len(ds.dask) == 3
assert len(ds.divisions) == len(ds.dask) + 1
assert isinstance(ds.divisions[0], type(s.index[0]))
tm.assert_series_equal(s, ds.compute())
def test_from_pandas_non_sorted():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[3, 1, 2])
ddf = dd.from_pandas(df, npartitions=2, sort=False)
assert not ddf.known_divisions
assert_eq(df, ddf)
ddf = dd.from_pandas(df, chunksize=2, sort=False)
assert not ddf.known_divisions
assert_eq(df, ddf)
def test_from_pandas_single_row():
df = pd.DataFrame({"x": [1]}, index=[1])
ddf = dd.from_pandas(df, npartitions=1)
assert ddf.divisions == (1, 1)
assert_eq(ddf, df)
def test_from_pandas_with_datetime_index():
df = pd.DataFrame(
{
"Date": [
"2015-08-28",
"2015-08-27",
"2015-08-26",
"2015-08-25",
"2015-08-24",
"2015-08-21",
"2015-08-20",
"2015-08-19",
"2015-08-18",
],
"Val": list(range(9)),
}
)
df.Date = df.Date.astype("datetime64[ns]")
ddf = dd.from_pandas(df, 2)
assert_eq(df, ddf)
ddf = dd.from_pandas(df, chunksize=2)
assert_eq(df, ddf)
def test_DataFrame_from_dask_array():
x = da.ones((10, 3), chunks=(4, 2))
df = dd.from_dask_array(x, ["a", "b", "c"])
assert isinstance(df, dd.DataFrame)
tm.assert_index_equal(df.columns, pd.Index(["a", "b", "c"]))
assert list(df.divisions) == [0, 4, 8, 9]
assert (df.compute(scheduler="sync").values == x.compute(scheduler="sync")).all()
# dd.from_array should re-route to from_dask_array
df2 = dd.from_array(x, columns=["a", "b", "c"])
assert isinstance(df, dd.DataFrame)
tm.assert_index_equal(df2.columns, df.columns)
assert df2.divisions == df.divisions
def test_Series_from_dask_array():
x = da.ones(10, chunks=4)
ser = dd.from_dask_array(x, "a")
assert isinstance(ser, dd.Series)
assert ser.name == "a"
assert list(ser.divisions) == [0, 4, 8, 9]
assert (ser.compute(scheduler="sync").values == x.compute(scheduler="sync")).all()
ser = dd.from_dask_array(x)
assert isinstance(ser, dd.Series)
assert ser.name is None
# dd.from_array should re-route to from_dask_array
ser2 = dd.from_array(x)
assert isinstance(ser2, dd.Series)
assert_eq(ser, ser2)
@pytest.mark.parametrize("as_frame", [True, False])
def test_from_dask_array_index(as_frame):
s = dd.from_pandas(pd.Series(range(10), index=list("abcdefghij")), npartitions=3)
if as_frame:
s = s.to_frame()
result = dd.from_dask_array(s.values, index=s.index)
assert_eq(s, result)
def test_from_dask_array_index_raises():
x = da.random.uniform(size=(10,), chunks=(5,))
with pytest.raises(ValueError) as m:
dd.from_dask_array(x, index=pd.Index(np.arange(10)))
assert m.match("must be an instance")
a = dd.from_pandas(pd.Series(range(12)), npartitions=2)
b = dd.from_pandas(pd.Series(range(12)), npartitions=4)
with pytest.raises(ValueError) as m:
dd.from_dask_array(a.values, index=b.index)
assert m.match("index")
assert m.match("number")
assert m.match("blocks")
assert m.match("4 != 2")
def test_from_dask_array_compat_numpy_array():
x = da.ones((3, 3, 3), chunks=2)
with pytest.raises(ValueError):
dd.from_dask_array(x) # dask
with pytest.raises(ValueError):
dd.from_array(x.compute()) # numpy
x = da.ones((10, 3), chunks=(3, 3))
d1 = dd.from_dask_array(x) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index([0, 1, 2]))
d2 = dd.from_array(x.compute()) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index([0, 1, 2]))
with pytest.raises(ValueError):
dd.from_dask_array(x, columns=["a"]) # dask
with pytest.raises(ValueError):
dd.from_array(x.compute(), columns=["a"]) # numpy
d1 = dd.from_dask_array(x, columns=["a", "b", "c"]) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index(["a", "b", "c"]))
d2 = dd.from_array(x.compute(), columns=["a", "b", "c"]) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index(["a", "b", "c"]))
def test_from_dask_array_compat_numpy_array_1d():
x = da.ones(10, chunks=3)
d1 = dd.from_dask_array(x) # dask
assert isinstance(d1, dd.Series)
assert (d1.compute().values == x.compute()).all()
assert d1.name is None
d2 = dd.from_array(x.compute()) # numpy
assert isinstance(d1, dd.Series)
assert (d2.compute().values == x.compute()).all()
assert d2.name is None
d1 = dd.from_dask_array(x, columns="name") # dask
assert isinstance(d1, dd.Series)
assert (d1.compute().values == x.compute()).all()
assert d1.name == "name"
d2 = dd.from_array(x.compute(), columns="name") # numpy
assert isinstance(d1, dd.Series)
assert (d2.compute().values == x.compute()).all()
assert d2.name == "name"
# passing list via columns results in DataFrame
d1 = dd.from_dask_array(x, columns=["name"]) # dask
assert isinstance(d1, dd.DataFrame)
assert (d1.compute().values == x.compute()).all()
tm.assert_index_equal(d1.columns, pd.Index(["name"]))
d2 = dd.from_array(x.compute(), columns=["name"]) # numpy
assert isinstance(d1, dd.DataFrame)
assert (d2.compute().values == x.compute()).all()
tm.assert_index_equal(d2.columns, pd.Index(["name"]))
def test_from_dask_array_struct_dtype():
x = np.array([(1, "a"), (2, "b")], dtype=[("a", "i4"), ("b", "object")])
y = da.from_array(x, chunks=(1,))
df = dd.from_dask_array(y)
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
assert_eq(df, pd.DataFrame(x))
assert_eq(
dd.from_dask_array(y, columns=["b", "a"]), pd.DataFrame(x, columns=["b", "a"])
)
def test_from_dask_array_unknown_chunks():
# Series
dx = da.Array(
{("x", 0): np.arange(5), ("x", 1): np.arange(5, 11)},
"x",
((np.nan, np.nan),),
np.arange(1).dtype,
)
df = dd.from_dask_array(dx)
assert isinstance(df, dd.Series)
assert not df.known_divisions
assert_eq(df, pd.Series(np.arange(11)), check_index=False)
# DataFrame
dsk = {("x", 0, 0): np.random.random((2, 3)), ("x", 1, 0): np.random.random((5, 3))}
dx = da.Array(dsk, "x", ((np.nan, np.nan), (3,)), np.float64)
df = dd.from_dask_array(dx)
assert isinstance(df, dd.DataFrame)
assert not df.known_divisions
assert_eq(df, pd.DataFrame(dx.compute()), check_index=False)
# Unknown width
dx = da.Array(dsk, "x", ((np.nan, np.nan), (np.nan,)), np.float64)
with pytest.raises(ValueError):
df = dd.from_dask_array(dx)
def test_to_bag():
a = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]},
index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"),
)
ddf = dd.from_pandas(a, 2)
assert ddf.to_bag().compute() == list(a.itertuples(False))
assert ddf.to_bag(True).compute() == list(a.itertuples(True))
assert ddf.to_bag(format="dict").compute() == [
{"x": "a", "y": 2},
{"x": "b", "y": 3},
{"x": "c", "y": 4},
{"x": "d", "y": 5},
]
assert ddf.to_bag(True, format="dict").compute() == [
{"index": 1.0, "x": "a", "y": 2},
{"index": 2.0, "x": "b", "y": 3},
{"index": 3.0, "x": "c", "y": 4},
{"index": 4.0, "x": "d", "y": 5},
]
assert ddf.x.to_bag(True).compute() == list(a.x.items())
assert ddf.x.to_bag().compute() == list(a.x)
assert ddf.x.to_bag(True, format="dict").compute() == [
{"x": "a"},
{"x": "b"},
{"x": "c"},
{"x": "d"},
]
assert ddf.x.to_bag(format="dict").compute() == [
{"x": "a"},
{"x": "b"},
{"x": "c"},
{"x": "d"},
]
def test_to_records():
pytest.importorskip("dask.array")
from dask.array.utils import assert_eq
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]},
index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"),
)
ddf = dd.from_pandas(df, 2)
assert_eq(
df.to_records(), ddf.to_records(), check_type=False
) # TODO: make check_type pass
@pytest.mark.parametrize("lengths", [[2, 2], True])
def test_to_records_with_lengths(lengths):
pytest.importorskip("dask.array")
from dask.array.utils import assert_eq
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]},
index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"),
)
ddf = dd.from_pandas(df, 2)
result = ddf.to_records(lengths=lengths)
assert_eq(df.to_records(), result, check_type=False) # TODO: make check_type pass
assert isinstance(result, da.Array)
expected_chunks = ((2, 2),)
assert result.chunks == expected_chunks
def test_to_records_raises():
pytest.importorskip("dask.array")
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]},
index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"),
)
ddf = dd.from_pandas(df, 2)
with pytest.raises(ValueError):
ddf.to_records(lengths=[2, 2, 2])
pytest.fail("3 != 2")
with pytest.raises(ValueError):
ddf.to_records(lengths=5)
pytest.fail("Unexpected value")
def test_from_delayed():
df = pd.DataFrame(data=np.random.normal(size=(10, 4)), columns=list("abcd"))
parts = [df.iloc[:1], df.iloc[1:3], df.iloc[3:6], df.iloc[6:10]]
dfs = [delayed(parts.__getitem__)(i) for i in range(4)]
meta = dfs[0].compute()
my_len = lambda x: pd.Series([len(x)])
for divisions in [None, [0, 1, 3, 6, 10]]:
ddf = dd.from_delayed(dfs, meta=meta, divisions=divisions)
assert_eq(ddf, df)
assert list(ddf.map_partitions(my_len).compute()) == [1, 2, 3, 4]
assert ddf.known_divisions == (divisions is not None)
s = dd.from_delayed([d.a for d in dfs], meta=meta.a, divisions=divisions)
assert_eq(s, df.a)
assert list(s.map_partitions(my_len).compute()) == [1, 2, 3, 4]
assert ddf.known_divisions == (divisions is not None)
meta2 = [(c, "f8") for c in df.columns]
assert_eq(dd.from_delayed(dfs, meta=meta2), df)
assert_eq(dd.from_delayed([d.a for d in dfs], meta=("a", "f8")), df.a)
with pytest.raises(ValueError):
dd.from_delayed(dfs, meta=meta, divisions=[0, 1, 3, 6])
with pytest.raises(ValueError) as e:
dd.from_delayed(dfs, meta=meta.a).compute()
assert str(e.value).startswith("Metadata mismatch found in `from_delayed`")
def test_from_delayed_preserves_hlgs():
df = pd.DataFrame(data=np.random.normal(size=(10, 4)), columns=list("abcd"))
parts = [df.iloc[:1], df.iloc[1:3], df.iloc[3:6], df.iloc[6:10]]
dfs = [delayed(parts.__getitem__)(i) for i in range(4)]
meta = dfs[0].compute()
chained = [d.a for d in dfs]
hlg = dd.from_delayed(chained, meta=meta).dask
for d in chained:
for layer_name, layer in d.dask.layers.items():
assert hlg.layers[layer_name] == layer
assert hlg.dependencies[layer_name] == d.dask.dependencies[layer_name]
def test_from_delayed_misordered_meta():
df = pd.DataFrame(
columns=["(1)", "(2)", "date", "ent", "val"],
data=[range(i * 5, i * 5 + 5) for i in range(3)],
index=range(3),
)
# meta with different order for columns
misordered_meta = pd.DataFrame(
columns=["date", "ent", "val", "(1)", "(2)"], data=[range(5)]
)
ddf = dd.from_delayed([delayed(lambda: df)()], meta=misordered_meta)
with pytest.raises(ValueError) as info:
# produces dataframe which does not match meta
ddf.reset_index().compute(scheduler="sync")
msg = (
"The columns in the computed data do not match the columns in the"
" provided metadata"
)
assert msg in str(info.value)
def test_from_delayed_sorted():
a = pd.DataFrame({"x": [1, 2]}, index=[1, 10])
b = pd.DataFrame({"x": [4, 1]}, index=[100, 200])
A = dd.from_delayed([delayed(a), delayed(b)], divisions="sorted")
assert A.known_divisions
assert A.divisions == (1, 100, 200)
def test_to_delayed():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
# Frame
a, b = ddf.to_delayed()
assert isinstance(a, Delayed)
assert isinstance(b, Delayed)
assert_eq(a.compute(), df.iloc[:2])
# Scalar
x = ddf.x.sum()
dx = x.to_delayed()
assert isinstance(dx, Delayed)
assert_eq(dx.compute(), x)
def test_to_delayed_optimize_graph():
df = pd.DataFrame({"x": list(range(20))})
ddf = dd.from_pandas(df, npartitions=20)
ddf2 = (ddf + 1).loc[:2]
# Frame
d = ddf2.to_delayed()[0]
assert len(d.dask) < 20
d2 = ddf2.to_delayed(optimize_graph=False)[0]
assert sorted(d2.dask) == sorted(ddf2.dask)
assert_eq(ddf2.get_partition(0), d.compute())
assert_eq(ddf2.get_partition(0), d2.compute())
# Scalar
x = ddf2.x.sum()
dx = x.to_delayed()
dx2 = x.to_delayed(optimize_graph=False)
assert len(dx.dask) < len(dx2.dask)
assert_eq(dx.compute(), dx2.compute())
def test_from_dask_array_index_dtype():
x = da.ones((10,), chunks=(5,))
df = pd.DataFrame(
{
"date": pd.date_range("2019-01-01", periods=10, freq="1T"),
"val1": list(range(10)),
}
)
ddf = dd.from_pandas(df, npartitions=2).set_index("date")
ddf2 = dd.from_dask_array(x, index=ddf.index, columns="val2")
assert ddf.index.dtype == ddf2.index.dtype
assert ddf.index.name == ddf2.index.name
df = pd.DataFrame({"idx": np.arange(0, 1, 0.1), "val1": list(range(10))})
ddf = dd.from_pandas(df, npartitions=2).set_index("idx")
ddf2 = dd.from_dask_array(x, index=ddf.index, columns="val2")
assert ddf.index.dtype == ddf2.index.dtype
assert ddf.index.name == ddf2.index.name
| StarcoderdataPython |
1694323 | import os
import argparse
import warnings
import datasets
import torch
import flwr as fl
import pandas as pd
import numpy as np
from datasets import load_dataset, load_metric
from transformers import AutoTokenizer, DataCollatorWithPadding
from transformers import AutoModelForSequenceClassification
from transformers import AdamW
from collections import OrderedDict
from utils import progress_bar
from pathlib import Path
# IF no tracking folder exists, create one automatically
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
else:
if os.path.isfile('./checkpoint/loss_acc_tracking.txt'):
os.remove('./checkpoint/loss_acc_tracking.txt')
if os.path.isfile('./checkpoint/ckpt.pth'):
os.remove('./checkpoint/ckpt.pth')
warnings.filterwarnings("ignore", category=UserWarning)
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
CHECKPOINT = "distilbert-base-uncased" # transformer model checkpoint
# #############################################################################
# 1. Dataloader
# #############################################################################
def load_data(split_idx):
"""Load IMDB data (training and eval)"""
raw_datasets = load_dataset("imdb")
raw_datasets = raw_datasets.shuffle(seed=42)
# remove unnecessary data split
del raw_datasets["unsupervised"]
train_dd = raw_datasets["train"]
if split_idx is not None:
print('==> Training on a subset ', split_idx)
path = Path('./split_data/').expanduser()
prefix = "imdb_split_part"
subset_idx = torch.load(path/(prefix+str(split_idx)+'.pt'))
train_dl = torch.utils.data.DataLoader(subset_idx, shuffle=False)
dat = []
textgenerator = iter(train_dl)
for i in range(len(subset_idx.indices)):
try:
etr = next(textgenerator)
dat.append([etr['text'][0], np.array(etr['label'])[0]])
except StopIteration:
print(i)
train_dd = datasets.arrow_dataset.Dataset.from_pandas(pd.DataFrame(dat, columns=['text', 'label']))
tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT)
def tokenize_function(examples):
return tokenizer(examples["text"], truncation=True)
tokenized_train_dd = train_dd.map(tokenize_function, batched=True)
tokenized_test_dd = raw_datasets["test"].map(tokenize_function, batched=True)
tokenized_train_dd = tokenized_train_dd.remove_columns("text")
tokenized_train_dd = tokenized_train_dd.rename_column("label", "labels")
tokenized_test_dd = tokenized_test_dd.remove_columns("text")
tokenized_test_dd = tokenized_test_dd.rename_column("label", "labels")
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
trainloader = torch.utils.data.DataLoader(
tokenized_train_dd,
shuffle=True,
batch_size=32,
collate_fn=data_collator
)
testloader = torch.utils.data.DataLoader(
tokenized_test_dd,
batch_size=32,
collate_fn=data_collator
)
return trainloader, testloader
def train(net, optimizer, trainloader, epochs, scheduler):
criterion = torch.nn.CrossEntropyLoss()
net.train()
for _ in range(epochs):
train_loss = 0
correct = 0
total = 0
for batch_idx, data in enumerate(trainloader):
targets = data['labels'].to(DEVICE)
batch = {k: v.to(DEVICE) for k, v in data.items()}
optimizer.zero_grad()
outputs = net(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
train_loss += loss.item()
predictions = torch.argmax(outputs.logits, dim=-1)
total += targets.size(0)
correct += predictions.eq(targets).sum().item()
progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
scheduler.step()
with open("./checkpoint/loss_acc_tracking.txt", "a") as track:
track.write("train," + str(train_loss) + "," + str(100.*correct/total) +
"," + str(correct) + "," + str(total) + "\n")
def test(net, testloader):
metric = load_metric("accuracy")
correct, total, loss = 0, 0, 0.0
net.eval()
for batch in testloader:
batch = {k: v.to(DEVICE) for k, v in batch.items()}
with torch.no_grad():
outputs = net(**batch)
logits = outputs.logits
loss += outputs.loss.item()
predictions = torch.argmax(logits, dim=-1)
metric.add_batch(predictions=predictions, references=batch["labels"])
loss /= len(testloader.dataset)
accuracy = metric.compute()["accuracy"]
return loss, accuracy
def test_save(net, testloader, best_acc, epoch):
metric = load_metric("accuracy")
test_loss = 0
correct = 0
total = 0
net.eval()
with torch.no_grad():
for batch_idx, data in enumerate(testloader):
targets = data['labels'].to(DEVICE)
batch = {k: v.to(DEVICE) for k, v in data.items()}
outputs = net(**batch)
loss = outputs.loss
test_loss += loss.item()
predictions = torch.argmax(outputs.logits, dim=-1)
total += targets.size(0)
correct += predictions.eq(targets).sum().item()
progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (test_loss/(batch_idx+1), 100.*correct/total, correct, total))
with open("./checkpoint/loss_acc_tracking.txt", "a") as track:
track.write("test," + str(test_loss) + "," + str(100.*correct/total) +
"," + str(correct) + "," + str(total) + "\n")
# Save checkpoint.
acc = 100.*correct/total
if acc > best_acc:
print('Saving... accuracy', acc)
state = {
'net': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
torch.save(state, './checkpoint/ckpt.pth')
best_acc = acc
return best_acc
def main():
parser = argparse.ArgumentParser(description='PyTorch IMDB Training')
parser.add_argument('--ip', type=str, help='Server ip address to use')
parser.add_argument('--idx', type=int, help='index number to use')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
args = parser.parse_args()
for arg in vars(args):
print(arg, getattr(args, arg))
"""Create model, load data, define Flower client, start Flower client."""
net = AutoModelForSequenceClassification.from_pretrained(
CHECKPOINT, num_labels=2
).to(DEVICE)
optimizer = AdamW(net.parameters(), lr=5e-5)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200)
trainloader, testloader = load_data(args.idx)
epochs_step = 1
# Flower client
class IMDBClient(fl.client.NumPyClient):
epoch_counter = 0
best_acc = 0.0
def get_parameters(self):
return [val.cpu().numpy() for _, val in net.state_dict().items()]
def set_parameters(self, parameters):
params_dict = zip(net.state_dict().keys(), parameters)
state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})
net.load_state_dict(state_dict, strict=True)
def fit(self, parameters, config):
self.set_parameters(parameters)
train(net, optimizer, trainloader, epochs_step, scheduler)
self.epoch_counter = self.epoch_counter + epochs_step
self.best_acc = test_save(net, testloader, self.best_acc, self.epoch_counter)
return self.get_parameters(), len(trainloader), {}
def evaluate(self, parameters, config):
self.set_parameters(parameters)
loss, accuracy = test(net, testloader)
return float(loss), len(testloader), {"accuracy": float(accuracy)}
# Start client
client = IMDBClient()
fl.client.start_numpy_client(args.ip, client=client)
print("==> best accuracy:", client.best_acc)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1730674 | import logging
import pytest
import responses
from flash_services.core import Service
from flash_services.github import GitHubEnterpriseIssues, GitHubIssues
@pytest.fixture
def service():
return GitHubIssues(username='user', password='<PASSWORD>', account='foo', repo='bar')
def test_tracker_service_type():
assert issubclass(GitHubIssues, Service)
def test_correct_config():
assert GitHubIssues.FRIENDLY_NAME == 'GitHub Issues'
assert GitHubIssues.REQUIRED == {'username', 'password', 'account', 'repo'}
assert GitHubIssues.ROOT == 'https://api.github.com'
assert GitHubIssues.TEMPLATE == 'gh-issues-section'
def test_correct_enterprise_config():
assert GitHubEnterpriseIssues.FRIENDLY_NAME == 'GitHub Issues'
assert GitHubEnterpriseIssues.REQUIRED == {'username', 'password', 'account', 'repo', 'root'}
assert GitHubEnterpriseIssues.ROOT == ''
assert GitHubEnterpriseIssues.TEMPLATE == 'gh-issues-section'
def test_update_success(service, caplog, mocked_responses):
caplog.set_level(logging.DEBUG)
mocked_responses.add(
responses.GET,
'https://api.github.com/repos/foo/bar/issues?state=all',
headers={'User-Agent': 'bar'},
json=[],
)
result = service.update()
assert 'fetching GitHub Issues project data' in [
record.getMessage()
for record in caplog.records
if record.levelno == logging.DEBUG
]
assert result == {'issues': {}, 'name': 'foo/bar', 'health': 'neutral', 'halflife': None}
def test_update_enterprise_success(caplog, mocked_responses):
caplog.set_level(logging.DEBUG)
mocked_responses.add(
responses.GET,
'http://dummy.url/repos/foo/bar/issues?state=all',
headers={'User-Agent': 'bar'},
json=[],
)
service = GitHubEnterpriseIssues(
username='enterprise-user',
password='<PASSWORD>',
account='foo',
repo='bar',
root='http://dummy.url',
)
result = service.update()
assert 'fetching GitHub Issues project data' in [
record.getMessage()
for record in caplog.records
if record.levelno == logging.DEBUG
]
assert result == {'issues': {}, 'name': 'foo/bar', 'health': 'neutral', 'halflife': None}
def test_update_failure(service, caplog, mocked_responses):
mocked_responses.add(
responses.GET,
'https://api.github.com/repos/foo/bar/issues?state=all',
headers={'User-Agent': 'bar'},
status=401,
)
result = service.update()
assert 'failed to update GitHub Issues project data' in [
record.getMessage()
for record in caplog.records
if record.levelno == logging.ERROR
]
assert result == {}
@pytest.mark.parametrize('payload, expected', [
([], dict(name='foo/bar', issues={}, health='neutral', halflife=None)),
(
[{'state': 'open'}, {'state': 'open'}],
dict(name='foo/bar', issues={'open-issues': 2}, health='neutral',
halflife=None),
),
(
[{'state': 'open'}, {'state': 'closed'}],
dict(name='foo/bar', issues={'open-issues': 1, 'closed-issues': 1},
health='neutral', halflife=None),
),
(
[{'state': 'open'}, {'state': 'open', 'pull_request': {}}],
dict(name='foo/bar', issues={'open-issues': 1, 'open-pull-requests':
1},
health='neutral', halflife=None),
),
(
[{'state': 'closed', 'created_at': '2010/11/12', 'closed_at': '2010/11/14'}],
dict(name='foo/bar', issues={'closed-issues': 1}, health='ok',
halflife='two days'),
),
(
[{'state': 'closed', 'created_at': '2010/11/12', 'closed_at': '2010/11/22'}],
dict(name='foo/bar', issues={'closed-issues': 1}, health='neutral',
halflife='ten days'),
),
(
[{'state': 'closed', 'created_at': '2010/10/12', 'closed_at': '2010/11/14'}],
dict(name='foo/bar', issues={'closed-issues': 1}, health='error',
halflife='a month'),
),
(
[
{'state': 'closed', 'created_at': '2010/10/12', 'closed_at': '2010/10/15'},
{'state': 'closed', 'created_at': '2010/10/12', 'closed_at': '2010/10/16'},
{'state': 'open', 'created_at': '2010/10/12', 'closed_at': None},
],
dict(name='foo/bar', issues={'closed-issues': 2, 'open-issues': 1},
health='ok', halflife='three days'),
),
(
[
{'state': 'closed', 'created_at': '2010/10/12', 'closed_at': '2010/10/15'},
{'state': 'closed', 'created_at': '2010/10/12', 'closed_at': '2010/10/16', 'pull_request': {}},
{'state': 'closed', 'created_at': '2010/10/12', 'closed_at': '2010/10/17'},
{'state': 'open', 'created_at': '2010/10/12', 'closed_at': None},
],
dict(
name='foo/bar',
issues={'closed-issues': 2, 'closed-pull-requests': 1, 'open-issues': 1},
health='ok',
halflife='four days',
),
),
])
def test_format_data(payload, expected, service, mocked_responses):
mocked_responses.add(
responses.GET,
'https://api.github.com/repos/foo/bar/issues?state=all',
json=payload,
)
assert service.update() == expected
assert mocked_responses.calls[0].request.headers['User-Agent'] == 'bar'
def test_adjust_threshold():
service = GitHubIssues(ok_threshold=1, account='', repo='', username='', password='')
assert service.ok_threshold == 1
assert service.neutral_threshold == 30
issues = [
{'state': 'closed', 'created_at': '2010/10/12', 'closed_at': '2010/10/15'},
]
assert service.format_data(issues).get('health') == 'neutral'
service.neutral_threshold = 2
assert service.format_data(issues).get('health') == 'error'
| StarcoderdataPython |
83942 | <reponame>gbrault/schemdraw<gh_stars>10-100
''' Two-terminal element definitions '''
import numpy as np
from .elements import Element2Term, gap
from ..segments import Segment, SegmentArrow, SegmentArc, SegmentText, SegmentCircle, SegmentPoly
from ..adddocs import adddocs
resheight = 0.25 # Resistor height
reswidth = 1.0 / 6 # Full (inner) length of resistor is 1.0 data unit
class Resistor(Element2Term):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.segments.append(Segment(
[[0, 0], [0.5*reswidth, resheight], [1.5*reswidth, -resheight],
[2.5*reswidth, resheight], [3.5*reswidth, -resheight],
[4.5*reswidth, resheight], [5.5*reswidth, -resheight], [6*reswidth, 0]]))
class RBox(Element2Term):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.segments.append(Segment(
[[0, 0], [0, resheight], [reswidth*6, resheight],
[reswidth*6, -resheight], [0, -resheight], [0, 0],
gap, [reswidth*6, 0]]))
class ResistorVar(Resistor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.segments.append(SegmentArrow([1.5*reswidth, -resheight*2],
[4.5*reswidth, reswidth*3.5],
headwidth=.12, headlength=.2))
class RBoxVar(RBox):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.segments.append(SegmentArrow([1*reswidth, -resheight*2],
[5*reswidth, reswidth*3.5],
headwidth=.12, headlength=.2))
class Thermistor(RBox):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.segments.append(Segment([[0, -resheight-.2], [.2, -resheight-.2], [1, resheight+.2]]))
class Photoresistor(Resistor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.segments.append(SegmentArrow([.7, .75], [.4, .4],
headwidth=.12, headlength=.2))
self.segments.append(SegmentArrow([1, .75], [.7, .4],
headwidth=.12, headlength=.2))
class PhotoresistorBox(RBox):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.segments.append(SegmentArrow([.7, .75], [.4, .4],
headwidth=.12, headlength=.2))
self.segments.append(SegmentArrow([1, .75], [.7, .4],
headwidth=.12, headlength=.2))
@adddocs(Element2Term)
class Capacitor(Element2Term):
''' Capacitor 2-terminal element.
Parameters
----------
polar : bool
Add polarity + sign
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
capgap = 0.18
self.segments = [Segment([[0, 0], gap, [0, resheight], [0, -resheight], gap,
[capgap, resheight], [capgap, -resheight], gap,
[capgap, 0]])]
if kwargs.get('polar', False):
kwargs = dict(kwargs)
kwargs.pop('label', None) # Remove existing element label from kwargs
self.segments.append(SegmentText([-capgap*1.2, capgap], '+'))
@adddocs(Element2Term)
class Capacitor2(Element2Term):
''' Capacitor 2-terminal element, with curved side.
Parameters
----------
polar : bool
Add polarity + sign
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
capgap = 0.18
self.segments = [Segment([[0, 0], gap, [0, resheight],
[0, -resheight], gap, [capgap, 0]]),
SegmentArc([capgap*1.5, 0], width=capgap*1.5,
height=resheight*2.5, theta1=105, theta2=-105)]
if kwargs.get('polar', False):
self.segments.append(SegmentText([-capgap*1.2, capgap], '+'))
class CapacitorVar(Capacitor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.segments.append(SegmentArrow([-2*reswidth, -resheight],
[3*reswidth, reswidth*2],
headwidth=.12, headlength=.2))
class CapacitorTrim(Capacitor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
capgap = 0.18
self.segments.append(SegmentArrow([-1.8*reswidth, -resheight], [1.8*reswidth+capgap, resheight],
headlength=.0001, headwidth=.3))
class Crystal(Element2Term):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
xgap = 0.2
self.segments = [Segment(
[[0, 0], gap, [0, resheight], [0, -resheight], gap,
[xgap/2, resheight], [xgap/2, -resheight], [xgap*1.5, -resheight],
[xgap*1.5, resheight], [xgap/2, resheight], gap,
[xgap*2, resheight], [xgap*2, -resheight], gap, [xgap*2, 0]])]
class Diode(Element2Term):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.segments = [Segment([[0, 0], gap, [resheight*1.4, resheight],
[resheight*1.4, -resheight], gap, [resheight*1.4, 0]]),
SegmentPoly([[0, resheight], [resheight*1.4, 0], [0, -resheight]])]
class Schottky(Diode):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
schottky_width = 0.1
self.segments.append(Segment(
[[resheight*1.4, resheight],
[resheight*1.4-schottky_width, resheight],
[resheight*1.4-schottky_width, resheight-schottky_width]]))
self.segments.append(Segment(
[[resheight*1.4, -resheight],
[resheight*1.4+schottky_width, -resheight],
[resheight*1.4+schottky_width, -resheight+schottky_width]]))
class DiodeTunnel(Diode):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
tunnel_width = 0.1
self.segments.append(Segment([[resheight*1.4, resheight],
[resheight*1.4-tunnel_width, resheight]]))
self.segments.append(Segment([[resheight*1.4, -resheight],
[resheight*1.4-tunnel_width, -resheight]]))
class DiodeShockley(Element2Term):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.segments.append(Segment([[0, 0], [resheight*1.4, 0], [resheight*1.4, resheight],
[resheight*1.4, -resheight], gap, [resheight*1.4, 0]]))
self.segments.append(Segment([[0, -resheight], [0, resheight], [resheight*1.4, 0]]))
class Zener(Diode):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
zener_width = 0.1
self.segments.append(Segment([[resheight*1.4, resheight],
[resheight*1.4+zener_width, resheight+zener_width]]))
self.segments.append(Segment([[resheight*1.4, -resheight],
[resheight*1.4-zener_width, -resheight-zener_width]]))
class LED(Diode):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.segments.append(SegmentArrow([resheight, resheight*1.5],
[resheight*2, resheight*3.25],
headwidth=.12, headlength=.2))
self.segments.append(SegmentArrow([resheight*.1, resheight*1.5],
[resheight*1.1, resheight*3.25],
headwidth=.12, headlength=.2))
self.params['lblloc'] = 'bot'
class LED2(Diode): # LED with squiggly light lines
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
x = np.linspace(-1, 1)
y = -x*(x-.7)*(x+.7)/2 + resheight*2.5
x = np.linspace(resheight*.75, resheight*1.25)
theta = 20
c = np.cos(np.radians(theta))
s = np.sin(np.radians(theta))
m = np.array([[c, s], [-s, c]])
p = np.transpose(np.vstack((x, y)))
p = np.dot(p, m)
p2 = np.transpose(np.vstack((x-.2, y)))
p2 = np.dot(p2, m)
self.segments.append(Segment(p))
self.segments.append(Segment(p2))
self.segments.append(SegmentArrow(p[1], p[0], headwidth=.07, headlength=.08))
self.segments.append(SegmentArrow(p2[1], p2[0], headwidth=.07, headlength=.08))
self.params['lblloc'] = 'bot'
class Photodiode(Diode):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
x = np.linspace(-1, 1)
y = -x*(x-.7)*(x+.7)/2 + resheight*2.5
x = np.linspace(resheight*.75, resheight*1.25)
theta = 20
c = np.cos(np.radians(theta))
s = np.sin(np.radians(theta))
m = np.array([[c, s], [-s, c]])
p = np.transpose(np.vstack((x, y)))
p = np.dot(p, m)
p2 = np.transpose(np.vstack((x-.2, y)))
p2 = np.dot(p2, m)
self.segments.append(Segment(p))
self.segments.append(Segment(p2))
self.segments.append(SegmentArrow(p[-2], p[-1], headwidth=.07, headlength=.08))
self.segments.append(SegmentArrow(p2[-2], p2[-1], headwidth=.07, headlength=.08))
self.params['lblloc'] = 'bot'
@adddocs(Element2Term)
class Potentiometer(Resistor):
''' Potentiometer element. Anchors: `tap` '''
# Ok, this has three terminals, but is works like a two-term with lead extension
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
potheight = .72
self.anchors['tap'] = [reswidth*3, potheight]
self.params['lblloc'] = 'bot'
self.segments.append(SegmentArrow([reswidth*3, potheight], [reswidth*3, reswidth*1.5],
headwidth=.15, headlength=.25))
@adddocs(Element2Term)
class PotBox(RBox):
''' Potentiometer using box resistor element. Anchors: `tap` '''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
potheight = .72
self.anchors['tap'] = [reswidth*3, potheight]
self.params['lblloc'] = 'bot'
self.segments.append(SegmentArrow([reswidth*3, potheight], [reswidth*3, reswidth*2],
headwidth=.15, headlength=.22))
class Diac(Element2Term):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.segments.append(Segment(
[[0, 0], gap, [resheight*1.4, resheight*1.8],
[resheight*1.4, -resheight*1.8], gap,
[0, resheight*1.8], [0, -resheight*1.8], gap, [resheight*1.4, 0]]))
self.segments.append(SegmentPoly([[0, -resheight-.25], [resheight*1.4, -.25],
[0, -resheight+.25]]))
self.segments.append(SegmentPoly([[resheight*1.4, resheight+.25], [0, .25],
[resheight*1.4, resheight-.25]]))
@adddocs(Element2Term)
class Triac(Diac):
''' Triac element. Anchors: `gate` '''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.segments.append(Segment([[resheight*1.4, .25], [resheight*1.4+.5, .5]]))
self.anchors['gate'] = [resheight*1.4+.5, .5]
@adddocs(Element2Term)
class SCR(Diode):
''' Silicon controlled rectifier (or thyristor) element. Anchors: `gate` '''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.segments.append(Segment(
[[resheight*1.4, 0], [resheight*1.4+.3, -.3], [resheight*1.4+.3, -.5]]))
self.anchors['gate'] = [resheight*1.4+.3, -.5]
class Memristor(Element2Term):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
mr = 0.2
self.segments.append(Segment(
[[0, 0], [mr, 0], [mr, -mr*.75], [mr*2, -mr*.75], [mr*2, mr*.75],
[mr*3, mr*.75], [mr*3, -mr*.75], [mr*4, -mr*.75], [mr*4, 0], [mr*5, 0]]))
self.segments.append(Segment(
[[0, mr*1.25], [mr*5, mr*1.25], [mr*5, mr*-1.25], [0, mr*-1.25], [0, mr*1.25]]))
self.segments.append(SegmentPoly(
[[0, mr*1.25], [0, -mr*1.25], [mr/2, -mr*1.25], [mr/2, mr*1.25]],
fill='black'))
class Memristor2(Element2Term):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
mr = 0.2
mrv = .25
self.segments.append(Segment(
[[0, 0], [0, mrv], [mr, mrv], [mr, -mrv], [mr*2, -mrv], [mr*2, mrv],
[mr*3, mrv], [mr*3, -mrv], [mr*4, -mrv], [mr*4, mrv],
[mr*5, mrv], [mr*5, -mrv], [mr*6, -mrv], [mr*6, 0],
[mr*7, 0]]))
class Josephson(Element2Term):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.segments.append(Segment(
[[0, 0], gap, [-resheight, resheight], [resheight, -resheight],
gap, [resheight, resheight], [-resheight, -resheight], gap, [0, 0]]))
class Fuse(Element2Term):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
fuser = .12
fusex = np.linspace(fuser*2, 1+fuser)
fusey = np.sin(np.linspace(0, 1)*2*np.pi) * resheight
self.segments.append(Segment(np.transpose(np.vstack((fusex, fusey)))))
self.segments.append(Segment([[0, 0], gap, [1+fuser*3, 0]]))
if kwargs.get('dots', True):
fill = kwargs.get('fill', 'white')
self.segments.append(SegmentCircle([fuser, 0], fuser, zorder=4, fill=fill))
self.segments.append(SegmentCircle([fuser*2+1, 0], fuser, zorder=4, fill=fill))
class Breaker(Element2Term):
''' Circuit breaker
Parameters
----------
dots : bool
Show connection dots
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
dots = kwargs.get('dots', True)
theta1 = 25 if dots else 10
theta2 = 155 if dots else 170
self.segments.append(Segment([[0, 0], gap, [1, 0]]))
self.segments.append(SegmentArc([.5, 0], 1, .65, theta1=theta1, theta2=theta2))
if dots:
fill = kwargs.get('fill', 'white')
rad = .12
self.segments.append(SegmentCircle([rad, 0], rad, zorder=4, fill=fill))
self.segments.append(SegmentCircle([1-rad, 0], rad, zorder=4, fill=fill))
def cycloid(loops=4, ofst=(0, 0), a=.06, b=.19, norm=True, vertical=False, flip=False):
''' Generate a prolate cycloid (inductor spiral) that
will always start and end at y=0.
Parameters
----------
loops : int
Number of loops
a, b : float
Parameters. b>a for prolate (loopy) cycloid
norm : bool
Normalize the length to 1
vertical, flip : bool
Control the orientation of cycloid
Returns
-------
path : array
List of [x, y] coordinates defining the cycloid
'''
yint = np.arccos(a/b) # y-intercept
t = np.linspace(yint, 2*(loops+1)*np.pi-yint, num=loops*50)
x = a*t - b*np.sin(t)
y = a - b*np.cos(t)
x = x - x[0] # Shift to start at 0,0
if norm:
x = x / (x[-1]-x[0]) # Normalize length to 1
if flip:
y = -y
y = y * (max(y)-min(y))/(resheight) # Normalize to resistor width
if vertical:
x, y = y, x
x = x + ofst[0]
y = y + ofst[1]
path = np.transpose(np.vstack((x, y)))
return path
class Inductor(Element2Term):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ind_w = .25
self.segments.append(Segment([[0, 0], gap, [1, 0]]))
for i in range(4):
self.segments.append(SegmentArc(
[(i*2+1)*ind_w/2, 0], theta1=0, theta2=180,
width=ind_w, height=ind_w))
@adddocs(Element2Term)
class Inductor2(Element2Term):
''' Inductor, drawn as cycloid
Parameters
----------
loops : int
Number of inductor loops
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
loops = kwargs.get('loops', 4)
self.segments.append(Segment(cycloid(loops=loops)))
| StarcoderdataPython |
3473763 | # this script resizes bitmaps, so that they are square shaped
import wx
from os import walk
BITMAP_SIZE = 96
app = wx.App()
def ResizeBitmap(filepath):
img = wx.Image(filepath)
if img.Width == BITMAP_SIZE and img.Height == BITMAP_SIZE:
print('Skipped ' + filepath)
return
img.Resize(wx.Size(BITMAP_SIZE, BITMAP_SIZE), wx.Point((BITMAP_SIZE - img.Width) * 0.5, (BITMAP_SIZE - img.Height) * 0.5))
img.SaveFile(filepath)
print('Resized ' + filepath)
def FileIsBitmapSuffix(filename):
dot = filename.rfind('.')
if dot == -1:
return False
suffix = filename[dot:]
if suffix.lower() == '.png':
return True
return False
def ResizeBitmapsInFolder(folder):
for (dirpath, dirnames, filenames) in walk(folder):
for filename in filenames:
if FileIsBitmapSuffix(filename):
ResizeBitmap(folder + '/' + filename)
break
ResizeBitmapsInFolder('./')
| StarcoderdataPython |
1677213 | <reponame>jwoehr/nuqasm2<filename>test/test_parsing.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 21 19:03:20 2020
@author: jax
"""
import io
import os
import unittest
import nuqasm2 as nq
class TestParsing(unittest.TestCase):
"""Test nuqasm2 regressions"""
include_path = os.getenv('NUQASM2_INCLUDE_PATH') + ':test/qasm_src'
def test_test(self):
"""Nonsense test"""
self.assertFalse(nq.qasmast.ASTType.UNKNOWN.value)
def _test_circ_qasm_file_compare(self, regression_name):
"""Factor to run translation and do file compare"""
self.maxDiff = None #pylint: disable-msg=invalid-name
from_file_path = 'test/qasm_src/' + regression_name + '.qasm'
validation_file_path = 'test/validation_output/' + regression_name + '.output.txt'
qt = nq.qasmast.QasmTranslator.fromFile(from_file_path, #pylint: disable-msg=invalid-name
include_path=self.include_path)
qt.translate()
translated_ast = qt.get_translation()
ast2circ = nq.Ast2Circ(nuq2_ast=translated_ast)
circ = ast2circ.translate().circuit
qasm = circ.qasm()
qasm_list_raw = qasm.split('\n')
qasm_list = []
for line in qasm_list_raw:
qasm_list.append(line + '\n')
validation_file = io.open(validation_file_path)
self.assertListEqual(qasm_list, list(validation_file))
validation_file.close()
def test_constant_parm_to_gate_op(self):
"""Test unbound constant parm to op from a gate definition."""
self._test_circ_qasm_file_compare('constant_parm_to_gate_op')
def test_no_space_before_curly_gatedef(self):
"""Test curly brace following reg list with no space in gatedef."""
self._test_circ_qasm_file_compare('no_space_before_curly_gatedef')
def test_local_gate_include(self):
"""Test if gates from a local (same dir) include file work."""
self._test_circ_qasm_file_compare('local_gate_include')
def test_extra_spaces(self):
"""Test extra spaces in input lines e.g., before a semicolon."""
self._test_circ_qasm_file_compare('extra_spaces')
def test_gate_parameter_substitution(self):
"""Test gate parameter substitution at runtime
by params to gate invocation."""
self._test_circ_qasm_file_compare('gate_parameter_substitution')
| StarcoderdataPython |
9781233 | <reponame>R0b3rt020/django-friendship<filename>friendship/tests/tests.py
import os
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.test import TestCase
from django.urls import reverse
from friendship.exceptions import AlreadyExistsError, AlreadyFriendsError
from friendship.models import Block, Follow, Friend, FriendshipRequest
TEST_TEMPLATES = os.path.join(os.path.dirname(__file__), "templates")
class login:
def __init__(self, testcase, user, password):
self.testcase = testcase
success = testcase.client.login(username=user, password=password)
self.testcase.assertTrue(
success, f"login with username={user!r}, password={<PASSWORD>} failed"
)
def __enter__(self):
pass
def __exit__(self, *args):
self.testcase.client.logout()
class BaseTestCase(TestCase):
def setUp(self):
"""
Setup some initial users
"""
self.user_pw = "test"
self.user_bob = self.create_user("bob", "<EMAIL>", self.user_pw)
self.user_steve = self.create_user("steve", "<EMAIL>", self.user_pw)
self.user_susan = self.create_user("susan", "<EMAIL>", self.user_pw)
self.user_amy = self.create_user("amy", "<EMAIL>", self.user_pw)
cache.clear()
def tearDown(self):
cache.clear()
self.client.logout()
def login(self, user, password):
return login(self, user, password)
def create_user(self, username, email_address, password):
user = User.objects.create_user(username, email_address, password)
return user
def assertResponse200(self, response):
self.assertEqual(response.status_code, 200)
def assertResponse302(self, response):
self.assertEqual(response.status_code, 302)
def assertResponse403(self, response):
self.assertEqual(response.status_code, 403)
def assertResponse404(self, response):
self.assertEqual(response.status_code, 404)
class FriendshipModelTests(BaseTestCase):
def test_friendship_request(self):
# Bob wants to be friends with Steve
req1 = Friend.objects.add_friend(self.user_bob, self.user_steve)
# Ensure neither have friends already
self.assertEqual(Friend.objects.friends(self.user_bob), [])
self.assertEqual(Friend.objects.friends(self.user_steve), [])
# Ensure FriendshipRequest is created
self.assertEqual(
FriendshipRequest.objects.filter(from_user=self.user_bob).count(), 1
)
self.assertEqual(
FriendshipRequest.objects.filter(to_user=self.user_steve).count(), 1
)
self.assertEqual(Friend.objects.unread_request_count(self.user_steve), 1)
# Ensure the proper sides have requests or not
self.assertEqual(len(Friend.objects.requests(self.user_bob)), 0)
self.assertEqual(len(Friend.objects.requests(self.user_steve)), 1)
self.assertEqual(len(Friend.objects.sent_requests(self.user_bob)), 1)
self.assertEqual(len(Friend.objects.sent_requests(self.user_steve)), 0)
self.assertEqual(len(Friend.objects.unread_requests(self.user_steve)), 1)
self.assertEqual(Friend.objects.unread_request_count(self.user_steve), 1)
self.assertEqual(len(Friend.objects.rejected_requests(self.user_steve)), 0)
self.assertEqual(len(Friend.objects.unrejected_requests(self.user_steve)), 1)
self.assertEqual(Friend.objects.unrejected_request_count(self.user_steve), 1)
# Ensure they aren't friends at this point
self.assertFalse(Friend.objects.are_friends(self.user_bob, self.user_steve))
# Ensure Bob can't request another friendship request from Steve.
with self.assertRaises(AlreadyExistsError):
Friend.objects.add_friend(self.user_bob, self.user_steve)
# Ensure Steve can't request a friendship request from Bob.
with self.assertRaises(AlreadyExistsError):
Friend.objects.add_friend(self.user_steve, self.user_bob)
# Accept the request
req1.accept()
# Ensure neither have pending requests
self.assertEqual(
FriendshipRequest.objects.filter(from_user=self.user_bob).count(), 0
)
self.assertEqual(
FriendshipRequest.objects.filter(to_user=self.user_steve).count(), 0
)
# Ensure both are in each other's friend lists
self.assertEqual(Friend.objects.friends(self.user_bob), [self.user_steve])
self.assertEqual(Friend.objects.friends(self.user_steve), [self.user_bob])
self.assertTrue(Friend.objects.are_friends(self.user_bob, self.user_steve))
# Make sure we can remove friendship
self.assertTrue(Friend.objects.remove_friend(self.user_bob, self.user_steve))
self.assertFalse(Friend.objects.are_friends(self.user_bob, self.user_steve))
self.assertFalse(Friend.objects.remove_friend(self.user_bob, self.user_steve))
# Susan wants to be friends with Amy, but cancels it
req2 = Friend.objects.add_friend(self.user_susan, self.user_amy)
self.assertEqual(Friend.objects.friends(self.user_susan), [])
self.assertEqual(Friend.objects.friends(self.user_amy), [])
req2.cancel()
self.assertEqual(Friend.objects.requests(self.user_susan), [])
self.assertEqual(Friend.objects.requests(self.user_amy), [])
# Susan wants to be friends with Amy, but Amy rejects it
req3 = Friend.objects.add_friend(self.user_susan, self.user_amy)
self.assertEqual(Friend.objects.friends(self.user_susan), [])
self.assertEqual(Friend.objects.friends(self.user_amy), [])
req3.reject()
# Duplicated requests raise a more specific subclass of IntegrityError.
with self.assertRaises(AlreadyExistsError):
Friend.objects.add_friend(self.user_susan, self.user_amy)
self.assertFalse(Friend.objects.are_friends(self.user_susan, self.user_amy))
self.assertEqual(len(Friend.objects.rejected_requests(self.user_amy)), 1)
self.assertEqual(len(Friend.objects.rejected_requests(self.user_amy)), 1)
# let's try that again..
req3.delete()
# Susan wants to be friends with Amy, and Amy reads it
req4 = Friend.objects.add_friend(self.user_susan, self.user_amy)
req4.mark_viewed()
self.assertFalse(Friend.objects.are_friends(self.user_susan, self.user_amy))
self.assertEqual(len(Friend.objects.read_requests(self.user_amy)), 1)
# Ensure we can't be friends with ourselves
with self.assertRaises(ValidationError):
Friend.objects.add_friend(self.user_bob, self.user_bob)
# Ensure we can't do it manually either
with self.assertRaises(ValidationError):
Friend.objects.create(to_user=self.user_bob, from_user=self.user_bob)
def test_already_friends_with_request(self):
# Make Bob and Steve friends
req = Friend.objects.add_friend(self.user_bob, self.user_steve)
req.accept()
with self.assertRaises(AlreadyFriendsError):
Friend.objects.add_friend(self.user_bob, self.user_steve)
with self.assertRaises(AlreadyFriendsError):
Friend.objects.add_friend(self.user_steve, self.user_bob)
def test_multiple_friendship_requests(self):
""" Ensure multiple friendship requests are handled properly """
# Bob wants to be friends with Steve
req1 = Friend.objects.add_friend(self.user_bob, self.user_steve)
# Ensure neither have friends already
self.assertEqual(Friend.objects.friends(self.user_bob), [])
self.assertEqual(Friend.objects.friends(self.user_steve), [])
# Ensure FriendshipRequest is created
self.assertEqual(
FriendshipRequest.objects.filter(from_user=self.user_bob).count(), 1
)
self.assertEqual(
FriendshipRequest.objects.filter(to_user=self.user_steve).count(), 1
)
self.assertEqual(Friend.objects.unread_request_count(self.user_steve), 1)
# Steve also wants to be friends with Bob before Bob replies
with self.assertRaises(AlreadyExistsError):
Friend.objects.add_friend(self.user_steve, self.user_bob)
# Ensure they aren't friends at this point
self.assertFalse(Friend.objects.are_friends(self.user_bob, self.user_steve))
# Accept the request
req1.accept()
# Ensure neither have pending requests
self.assertEqual(
FriendshipRequest.objects.filter(from_user=self.user_bob).count(), 0
)
self.assertEqual(
FriendshipRequest.objects.filter(to_user=self.user_steve).count(), 0
)
self.assertEqual(
FriendshipRequest.objects.filter(from_user=self.user_steve).count(), 0
)
self.assertEqual(
FriendshipRequest.objects.filter(to_user=self.user_bob).count(), 0
)
def test_multiple_calls_add_friend(self):
""" Ensure multiple calls with same friends, but different message works as expected """
Friend.objects.add_friend(self.user_bob, self.user_steve, message="Testing")
with self.assertRaises(AlreadyExistsError):
Friend.objects.add_friend(self.user_bob, self.user_steve, message="Foo Bar")
def test_following(self):
# Bob follows Steve
Follow.objects.add_follower(self.user_bob, self.user_steve)
self.assertEqual(len(Follow.objects.followers(self.user_steve)), 1)
self.assertEqual(len(Follow.objects.following(self.user_bob)), 1)
self.assertEqual(Follow.objects.followers(self.user_steve), [self.user_bob])
self.assertEqual(Follow.objects.following(self.user_bob), [self.user_steve])
self.assertTrue(Follow.objects.follows(self.user_bob, self.user_steve))
self.assertFalse(Follow.objects.follows(self.user_steve, self.user_bob))
# Duplicated requests raise a more specific subclass of IntegrityError.
with self.assertRaises(IntegrityError):
Follow.objects.add_follower(self.user_bob, self.user_steve)
with self.assertRaises(AlreadyExistsError):
Follow.objects.add_follower(self.user_bob, self.user_steve)
# Remove the relationship
self.assertTrue(Follow.objects.remove_follower(self.user_bob, self.user_steve))
self.assertEqual(len(Follow.objects.followers(self.user_steve)), 0)
self.assertEqual(len(Follow.objects.following(self.user_bob)), 0)
self.assertFalse(Follow.objects.follows(self.user_bob, self.user_steve))
# Ensure we canot follow ourselves
with self.assertRaises(ValidationError):
Follow.objects.add_follower(self.user_bob, self.user_bob)
with self.assertRaises(ValidationError):
Follow.objects.create(follower=self.user_bob, followee=self.user_bob)
def test_blocking(self):
# Bob blocks Steve
Block.objects.add_block(self.user_bob, self.user_steve)
self.assertEqual(len(Block.objects.blocking(self.user_bob)), 1)
self.assertEqual(Block.objects.blocking(self.user_bob)[0], self.user_steve)
self.assertEqual(len(Block.objects.blocked(self.user_steve)), 1)
self.assertEqual(Block.objects.blocked(self.user_steve)[0], self.user_bob)
self.assertEqual(Block.objects.is_blocked(self.user_bob, self.user_steve), True)
self.assertEqual(Block.objects.is_blocked(self.user_steve, self.user_bob), True)
# Duplicated requests raise a more specific subclass of IntegrityError.
with self.assertRaises(IntegrityError):
Block.objects.add_block(self.user_bob, self.user_steve)
with self.assertRaises(AlreadyExistsError):
Block.objects.add_block(self.user_bob, self.user_steve)
# Remove the relationship
self.assertTrue(Block.objects.remove_block(self.user_bob, self.user_steve))
self.assertEqual(len(Block.objects.blocking(self.user_steve)), 0)
self.assertEqual(len(Block.objects.blocked(self.user_bob)), 0)
# Ensure we canot block ourselves
with self.assertRaises(ValidationError):
Block.objects.add_block(self.user_bob, self.user_bob)
with self.assertRaises(ValidationError):
Block.objects.create(blocker=self.user_bob, blocked=self.user_bob)
class FriendshipViewTests(BaseTestCase):
def setUp(self):
super().setUp()
self.friendship_request = Friend.objects.add_friend(
self.user_steve, self.user_bob
)
def test_friendship_view_users(self):
url = reverse("friendship_view_users")
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse200(response)
with self.settings(
FRIENDSHIP_CONTEXT_OBJECT_LIST_NAME="object_list",
TEMPLATE_DIRS=(TEST_TEMPLATES,),
):
response = self.client.get(url)
self.assertResponse200(response)
self.assertTrue("object_list" in response.context)
def test_friendship_view_friends(self):
url = reverse(
"friendship_view_friends", kwargs={"username": self.user_bob.username}
)
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse200(response)
self.assertTrue("user" in response.context)
with self.settings(
FRIENDSHIP_CONTEXT_OBJECT_NAME="object", TEMPLATE_DIRS=(TEST_TEMPLATES,)
):
response = self.client.get(url)
self.assertResponse200(response)
self.assertTrue("object" in response.context)
def test_friendship_add_friend(self):
url = reverse(
"friendship_add_friend", kwargs={"to_username": self.user_amy.username}
)
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
# if we don't POST the view should return the
# friendship_add_friend view
response = self.client.get(url)
self.assertResponse200(response)
# on POST accept the friendship request and redirect to the
# friendship_request_list view
response = self.client.post(url)
self.assertResponse302(response)
redirect_url = reverse("friendship_request_list")
self.assertTrue(redirect_url in response["Location"])
def test_friendship_add_friend_dupe(self):
url = reverse(
"friendship_add_friend", kwargs={"to_username": self.user_amy.username}
)
with self.login(self.user_bob.username, self.user_pw):
# if we don't POST the view should return the
# friendship_add_friend view
# on POST accept the friendship request and redirect to the
# friendship_request_list view
response = self.client.post(url)
self.assertResponse302(response)
redirect_url = reverse("friendship_request_list")
self.assertTrue(redirect_url in response["Location"])
response = self.client.post(url)
self.assertResponse200(response)
self.assertTrue("errors" in response.context)
self.assertEqual(
response.context["errors"], ["You already requested friendship from this user."]
)
url = reverse(
"friendship_add_friend", kwargs={"to_username": self.user_bob.username}
)
with self.login(self.user_amy.username, self.user_pw):
response = self.client.post(url)
self.assertResponse200(response)
self.assertTrue("errors" in response.context)
self.assertEqual(
response.context["errors"], ["This user already requested friendship from you."]
)
def test_friendship_requests(self):
url = reverse("friendship_request_list")
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
response = self.client.get(url)
self.assertResponse200(response)
def test_friendship_requests_rejected(self):
url = reverse("friendship_requests_rejected")
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
response = self.client.get(url)
self.assertResponse200(response)
def test_friendship_accept(self):
url = reverse(
"friendship_accept",
kwargs={"friendship_request_id": self.friendship_request.pk},
)
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
# if we don't POST the view should return the
# friendship_requests_detail view
response = self.client.get(url)
self.assertResponse302(response)
redirect_url = reverse(
"friendship_requests_detail",
kwargs={"friendship_request_id": self.friendship_request.pk},
)
self.assertTrue(redirect_url in response["Location"])
# on POST accept the friendship request and redirect to the
# friendship_view_friends view
response = self.client.post(url)
self.assertResponse302(response)
redirect_url = reverse(
"friendship_view_friends", kwargs={"username": self.user_bob.username}
)
self.assertTrue(redirect_url in response["Location"])
with self.login(self.user_steve.username, self.user_pw):
# on POST try to accept the friendship request
# but I am logged in as Steve, so I cannot accept
# a request sent to Bob
response = self.client.post(url)
self.assertResponse404(response)
def test_friendship_reject(self):
url = reverse(
"friendship_reject",
kwargs={"friendship_request_id": self.friendship_request.pk},
)
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
# if we don't POST the view should return the
# friendship_requests_detail view
response = self.client.get(url)
self.assertResponse302(response)
redirect_url = reverse(
"friendship_requests_detail",
kwargs={"friendship_request_id": self.friendship_request.pk},
)
self.assertTrue(redirect_url in response["Location"])
# on POST reject the friendship request and redirect to the
# friendship_requests view
response = self.client.post(url)
self.assertResponse302(response)
redirect_url = reverse("friendship_request_list")
self.assertTrue(redirect_url in response["Location"])
with self.login(self.user_steve.username, self.user_pw):
# on POST try to reject the friendship request
# but I am logged in as Steve, so I cannot reject
# a request sent to Bob
response = self.client.post(url)
self.assertResponse404(response)
def test_friendship_cancel(self):
url = reverse(
"friendship_cancel",
kwargs={"friendship_request_id": self.friendship_request.pk},
)
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
# if we don't POST the view should return the
# friendship_requests_detail view
response = self.client.get(url)
self.assertResponse302(response)
redirect_url = reverse(
"friendship_requests_detail",
kwargs={"friendship_request_id": self.friendship_request.pk},
)
self.assertTrue(redirect_url in response["Location"])
# on POST try to cancel the friendship request
# but I am logged in as Bob, so I cannot cancel
# a request made by Steve
response = self.client.post(url)
self.assertResponse404(response)
with self.login(self.user_steve.username, self.user_pw):
# on POST cancel the friendship request and redirect to the
# friendship_requests view
response = self.client.post(url)
self.assertResponse302(response)
redirect_url = reverse("friendship_request_list")
self.assertTrue(redirect_url in response["Location"])
def test_friendship_requests_detail(self):
url = reverse(
"friendship_requests_detail",
kwargs={"friendship_request_id": self.friendship_request.pk},
)
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
response = self.client.get(url)
self.assertResponse200(response)
def test_friendship_followers(self):
url = reverse("friendship_followers", kwargs={"username": "bob"})
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse200(response)
with self.settings(
FRIENDSHIP_CONTEXT_OBJECT_NAME="object", TEMPLATE_DIRS=(TEST_TEMPLATES,)
):
response = self.client.get(url)
self.assertResponse200(response)
self.assertTrue("object" in response.context)
def test_friendship_following(self):
url = reverse("friendship_following", kwargs={"username": "bob"})
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse200(response)
with self.settings(
FRIENDSHIP_CONTEXT_OBJECT_NAME="object", TEMPLATE_DIRS=(TEST_TEMPLATES,)
):
response = self.client.get(url)
self.assertResponse200(response)
self.assertTrue("object" in response.context)
def test_follower_add(self):
url = reverse(
"follower_add", kwargs={"followee_username": self.user_amy.username}
)
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
response = self.client.get(url)
self.assertResponse200(response)
# on POST accept the friendship request and redirect to the
# friendship_following view
response = self.client.post(url)
self.assertResponse302(response)
redirect_url = reverse(
"friendship_following", kwargs={"username": self.user_bob.username}
)
self.assertTrue(redirect_url in response["Location"])
response = self.client.post(url)
self.assertResponse200(response)
self.assertTrue("errors" in response.context)
self.assertEqual(
response.context["errors"], ["User 'bob' already follows 'amy'"]
)
def test_follower_remove(self):
# create a follow relationship so we can test removing a follower
Follow.objects.add_follower(self.user_bob, self.user_amy)
url = reverse(
"follower_remove", kwargs={"followee_username": self.user_amy.username}
)
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
response = self.client.get(url)
self.assertResponse200(response)
response = self.client.post(url)
self.assertResponse302(response)
redirect_url = reverse(
"friendship_following", kwargs={"username": self.user_bob.username}
)
self.assertTrue(redirect_url in response["Location"])
def test_friendship_blockers(self):
url = reverse("friendship_blockers", kwargs={"username": "bob"})
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse200(response)
with self.settings(
FRIENDSHIP_CONTEXT_OBJECT_NAME="object", TEMPLATE_DIRS=(TEST_TEMPLATES,)
):
response = self.client.get(url)
self.assertResponse200(response)
self.assertTrue("object" in response.context)
def test_friendship_blocking(self):
url = reverse("friendship_blocking", kwargs={"username": "bob"})
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse200(response)
with self.settings(
FRIENDSHIP_CONTEXT_OBJECT_NAME="object", TEMPLATE_DIRS=(TEST_TEMPLATES,)
):
response = self.client.get(url)
self.assertResponse200(response)
self.assertTrue("object" in response.context)
def test_block_add(self):
url = reverse("block_add", kwargs={"blocked_username": self.user_amy.username})
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
response = self.client.get(url)
self.assertResponse200(response)
# on POST accept the friendship request and redirect to the
# friendship_following view
response = self.client.post(url)
self.assertResponse302(response)
redirect_url = reverse(
"friendship_blocking", kwargs={"username": self.user_bob.username}
)
self.assertTrue(redirect_url in response["Location"])
response = self.client.post(url)
self.assertResponse200(response)
self.assertTrue("errors" in response.context)
self.assertEqual(
response.context["errors"], ["User 'bob' already blocks 'amy'"]
)
def test_block_remove(self):
# create a follow relationship so we can test removing a block
Block.objects.add_block(self.user_bob, self.user_amy)
url = reverse(
"block_remove", kwargs={"blocked_username": self.user_amy.username}
)
# test that the view requires authentication to access it
response = self.client.get(url)
self.assertResponse302(response)
with self.login(self.user_bob.username, self.user_pw):
response = self.client.get(url)
self.assertResponse200(response)
response = self.client.post(url)
self.assertResponse302(response)
redirect_url = reverse(
"friendship_blocking", kwargs={"username": self.user_bob.username}
)
self.assertTrue(redirect_url in response["Location"])
| StarcoderdataPython |
3559205 | # -- EIGRP
# nxos: interface <intf> / ip authentication key-chain eigrp someword someword2
# nxos: interface <intf> / ip authentication mode eigrp someword md5
# nxos: interface <intf> / ip bandwidth eigrp someword 1
# nxos: interface <intf> / ip bandwidth-percent eigrp someword 1
# nxos: interface <intf> / ip delay eigrp someword 1
# nxos: interface <intf> / ip delay eigrp someword 1 picoseconds
# nxos: interface <intf> / ip distribute-list eigrp someword prefix-list someword2 in
# nxos: interface <intf> / ip distribute-list eigrp someword prefix-list someword2 out
# nxos: interface <intf> / ip distribute-list eigrp someword route-map rpl1 in
# nxos: interface <intf> / ip distribute-list eigrp someword route-map rpl1 out
# nxos: interface <intf> / ip eigrp someword bfd
# nxos: interface <intf> / ip eigrp someword bfd disable
# nxos: interface <intf> / ip eigrp someword shutdown
# nxos: interface <intf> / ip hello-interval eigrp someword 1
# nxos: interface <intf> / ip hold-time eigrp someword 1
# nxos: interface <intf> / ip mtu eigrp someword 210
# nxos: interface <intf> / ip next-hop-self eigrp someword
# nxos: interface <intf> / ip offset-list eigrp someword prefix-list someword2 in <0-2147483647>
# nxos: interface <intf> / ip offset-list eigrp someword prefix-list someword2 out <0-2147483647>
# nxos: interface <intf> / ip offset-list eigrp someword route-map rpl1 in <0-2147483647>
# nxos: interface <intf> / ip offset-list eigrp someword route-map rpl1 out <0-2147483647>
# nxos: interface <intf> / ip passive-interface eigrp someword
# nxos: interface <intf> / ip router eigrp someword
# nxos: interface <intf> / ip split-horizon eigrp someword
# nxos: interface <intf> / ip summary-address eigrp someword 1.2.3.4 255.255.255.0
# nxos: interface <intf> / ip summary-address eigrp someword 1.2.3.4 255.255.255.0 1
# nxos: interface <intf> / ip summary-address eigrp someword 1.2.3.4 255.255.255.0 1 leak-map someword2
# nxos: interface <intf> / ip summary-address eigrp someword 1.2.3.4 255.255.255.0 leak-map someword2
# nxos: interface <intf> / ip summary-address eigrp someword 1.2.3.0/24
# nxos: interface <intf> / ip summary-address eigrp someword 1.2.3.0/24 1
# nxos: interface <intf> / ip summary-address eigrp someword 1.2.3.0/24 1 leak-map someword2
# nxos: interface <intf> / ip summary-address eigrp someword 1.2.3.0/24 leak-map someword2
# nxos: interface <intf> / ipv6 authentication key-chain eigrp someword someword2
# nxos: interface <intf> / ipv6 authentication mode eigrp someword md5
# nxos: interface <intf> / ipv6 bandwidth eigrp someword 1
# nxos: interface <intf> / ipv6 bandwidth-percent eigrp someword 1
# nxos: interface <intf> / ipv6 delay eigrp someword 1
# nxos: interface <intf> / ipv6 delay eigrp someword 1 picoseconds
# nxos: interface <intf> / ipv6 distribute-list eigrp someword prefix-list someword2 in
# nxos: interface <intf> / ipv6 distribute-list eigrp someword prefix-list someword2 out
# nxos: interface <intf> / ipv6 distribute-list eigrp someword route-map rpl1 in
# nxos: interface <intf> / ipv6 distribute-list eigrp someword route-map rpl1 out
# nxos: interface <intf> / ipv6 eigrp someword shutdown
# nxos: interface <intf> / ipv6 hello-interval eigrp someword 1
# nxos: interface <intf> / ipv6 hold-time eigrp someword 1
# nxos: interface <intf> / ipv6 mtu eigrp someword 210
# nxos: interface <intf> / ipv6 next-hop-self eigrp someword
# nxos: interface <intf> / ipv6 offset-list eigrp someword prefix-list someword2 in <0-2147483647>
# nxos: interface <intf> / ipv6 offset-list eigrp someword prefix-list someword2 out <0-2147483647>
# nxos: interface <intf> / ipv6 offset-list eigrp someword route-map rpl1 in <0-2147483647>
# nxos: interface <intf> / ipv6 offset-list eigrp someword route-map rpl1 out <0-2147483647>
# nxos: interface <intf> / ipv6 passive-interface eigrp someword
# nxos: interface <intf> / ipv6 router eigrp someword
# nxos: interface <intf> / ipv6 split-horizon eigrp someword
# nxos: interface <intf> / ipv6 summary-address eigrp someword fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/128
# nxos: interface <intf> / ipv6 summary-address eigrp someword fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/128 1
# nxos: interface <intf> / ipv6 summary-address eigrp someword fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/128 1 leak-map someword2
# nxos: interface <intf> / ipv6 summary-address eigrp someword fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b/128 leak-map someword2
| StarcoderdataPython |
8125454 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.event import RotateEvent, FormatDescriptionEvent
from pymysqlreplication.row_event import UpdateRowsEvent
from pymysqlreplication.row_event import WriteRowsEvent
from pymysqlreplication.row_event import DeleteRowsEvent
import pymysql
import time,datetime
import argparse
# import simplejson as json
import json
import sys
import signal
import traceback
'''
MySQL表信息统计小工具
基本使用
[root@centos7 tmp]# python mysql_binlog_stat.py --help
usage: mysql_binlog_stat.py [-h] [--host HOST] [--port PORT]
[--username USERNAME] [--password PASSWORD]
[--log-file binlog-file-name]
[--log-pos binlog-file-pos]
[--server-id server-id] [--slave-uuid slave-uuid]
[--blocking False/True] [--start-time start-time]
[--sorted-by insert/update/delete]
Description: The script parse MySQL binlog and statistic column.
optional arguments:
-h, --help show this help message and exit
--host HOST Connect MySQL host
--port PORT Connect MySQL port
--username USERNAME Connect MySQL username
--password PASSWORD Connect MySQL password
--log-file binlog-file-name
Specify a binlog name
--log-pos binlog-file-pos
Specify a binlog file pos
--server-id server-id
Specify a slave server server-id
--slave-uuid slave-uuid
Specify a slave server uuid
--blocking False/True
Specify is bloking and parse, default False
--start-time start-time
Specify is start parse timestamp, default None,
example: 2016-11-01 00:00:00
--sorted-by insert/update/delete
Specify show statistic sort by, default: insert
主要参数介绍:
--log-file: binlog 文件名称
--log-pos: binlog 文件位置(从哪个位置开始解析)
--blocking: 是否需要使用阻塞的方式进行解析始终为 False 就好(默认就是False)
--start-time: 从什么时间开始解析
--sorted-by: 展示的结果通过什么来排序, 默认是通过 insert 的行数的多少降序排列, 设置的值有 insert/update/delete
解析binlog并统计
root@(none) 09:17:12>show binary logs;
+------------------+-----------+
| Log_name | File_size |
+------------------+-----------+
| mysql-bin.000012 | 437066170 |
| mysql-bin.000013 | 536884582 |
| mysql-bin.000014 | 537032563 |
| mysql-bin.000015 | 536950457 |
| mysql-bin.000016 | 87791004 |
| mysql-bin.000017 | 143 |
| mysql-bin.000018 | 143 |
| mysql-bin.000019 | 143 |
| mysql-bin.000020 | 143 |
| mysql-bin.000021 | 1426 |
+------------------+-----------+
10 rows in set (0.01 sec)
[root@centos7 tmp]# time python mysql_binlog_stat.py --log-file=mysql-bin.000012 --log-pos=120 --username=root --password=<PASSWORD> --sorted-by='insert'
[
{
"app_db.business_item_sku_detail": {
"row_insert_count": {
"market_price": 273453,
"sku_id": 273453,
"weight": 273453,
"sku_info": 273453,
"created": 273453,
"pre_sale_stock": 273453,
"price": 273453,
"sku_name": 273453,
"limit_sale_time": 273453,
"sku_no": 273453,
"limit_sale_num": 273453,
"business_item_id": 273453,
"channel_sku_lowest_price": 273453,
"tmall_shop_id": 273453,
"guid": 273453,
"pic_url": 273453,
"stock": 273453
},
"table_dml_count": {
"insert": 273453,
"update": 0,
"delete": 0
},
"row_update_count": {}
}
},
{
"app_db.business_item_sku_property": {
"row_insert_count": {
"sku_id": 273112,
"created": 273112,
"property_value_id": 273112,
"business_item_id": 273112,
"record_id": 273112,
"property_id": 273112
},
"table_dml_count": {
"insert": 273112,
"update": 0,
"delete": 0
},
"row_update_count": {}
}
},
{
"app_db.business_item_pic": {
"row_insert_count": {
"created": 270993,
"business_item_id": 270993,
"pic_id": 270993,
"pic_no": 270993,
"tmall_shop_id": 270993,
"pic_url": 270993
},
"table_dml_count": {
"insert": 270993,
"update": 0,
"delete": 0
},
"row_update_count": {}
}
},
{
"app_db.business_item": {
"row_insert_count": {
"guide_commission": 264803,
"commission_type": 264803,
"pstatus": 264803,
"num_iid": 264803,
"limit_sale_time": 264803,
"sell_point": 264803,
"abbreviation": 264803,
"distribution_time": 264803,
"view_num": 264803,
"tariff_rate": 264803,
"tmall_shop_id": 264803,
"is_pre_sale": 264803,
"pic_url": 264803,
"pre_sale_begin_time": 264803,
"business_item_id": 264803,
"sale_tax": 264803,
"guid": 264803,
"recommend_time": 264803,
"is_top_newgood": 264803,
"is_delete": 264803,
"is_open_item_property": 264803,
"mstatus": 264803,
"pre_sale_end_time": 264803,
"top_time": 264803,
"country_id": 264803,
"vir_sales_num": 264803,
"content": 264803,
"commission": 264803,
"wholesale_sales_num": 264803,
"is_associated_type": 264803,
"recommend": 264803,
"is_cross_border": 264803,
"sales_num": 264803,
"custom_discount_type": 264803,
"use_item_type_tax_rate": 264803,
"one_type_id": 264803,
"new_good_time": 264803,
"ship_time": 264803,
"value_add_tax": 264803,
"new_good_words": 264803,
"top_time_newgood": 264803,
"bar_code": 264803,
"price": 264803,
"business_no": 264803,
"limit_sale_num": 264803,
"is_top_hot_sell": 264803,
"discount_type": 264803,
"is_top": 264803,
"tax_rate": 264803,
"hot_sell_time": 264803,
"is_taobao_item": 264803,
"business_item_brand_id": 264803,
"logistics_costs": 264803,
"business_type": 264803,
"guide_commission_type": 264803,
"is_top_recommend": 264803,
"created": 264803,
"pre_sale_stock": 264803,
"title": 264803,
"two_type_id": 264803,
"new_good_flag": 264803,
"custom_clear_type": 264803,
"top_time_recommend": 264803,
"store_commission_type": 264803,
"store_commission": 264803,
"is_hot_sell": 264803,
"like_num": 264803,
"distribution": 264803,
"stock": 264803,
"channel_item_lowest_price": 264803,
"top_time_hot_sell": 264803
},
"table_dml_count": {
"insert": 264803,
"update": 0,
"delete": 0
},
"row_update_count": {}
}
},
{
"test.t_binlog_event": {
"row_insert_count": {
"auto_id": 5926,
"dml_sql": 5926,
"dml_start_time": 5926,
"dml_end_time": 5926,
"start_log_pos": 5926,
"db_name": 5926,
"binlog_name": 5926,
"undo_sql": 5926,
"table_name": 5926,
"end_log_pos": 5926
},
"table_dml_count": {
"insert": 5926,
"update": 0,
"delete": 4017
},
"row_update_count": {}
}
},
{
"test.ord_order": {
"row_insert_count": {
"order_id": 184,
"pay_type": 181,
"amount": 184,
"create_time": 184,
"serial_num": 181
},
"table_dml_count": {
"insert": 184,
"update": 0,
"delete": 0
},
"row_update_count": {}
}
},
{
"test.t1": {
"row_insert_count": {
"id": 7,
"name": 7
},
"table_dml_count": {
"insert": 7,
"update": 2,
"delete": 2
},
"row_update_count": {
"name": 2
}
}
},
{
"test.area": {
"row_insert_count": {},
"table_dml_count": {
"insert": 0,
"update": 0,
"delete": 0
},
"row_update_count": {}
}
}
]
real 5m42.982s
user 5m26.080s
sys 0m8.958s
'''
class MySQLBinlogStat(object):
"""对MySQL Binlog Event进行解析,获得对MySQL操作的统计"""
_stream = None
_table_stat_info = {}
def __init__(self, connectionSettings, startFile=None, startPos=None, endFile=None, endPos=None, startTime=None,
stopTime=None, only_schemas=None, only_tables=None, nopk=False, flashback=False, only_events=[UpdateRowsEvent, WriteRowsEvent, DeleteRowsEvent],
stopnever=False,countnum=10):
if not startFile:
raise ValueError('lack of parameter,startFile.')
self.countnum = countnum
self.only_events = only_events
self.connectionSettings = connectionSettings
self.startFile = startFile
self.startPos = startPos if startPos else 4 # use binlog v4
self.endFile = endFile if endFile else startFile
self.endPos = endPos
self.startTime = datetime.datetime.strptime(startTime,
"%Y-%m-%d %H:%M:%S") if startTime else datetime.datetime.strptime(
'1970-01-01 00:00:00', "%Y-%m-%d %H:%M:%S")
self.stopTime = datetime.datetime.strptime(stopTime,
"%Y-%m-%d %H:%M:%S") if stopTime else datetime.datetime.strptime(
'2999-12-31 00:00:00', "%Y-%m-%d %H:%M:%S")
self.only_schemas = only_schemas if only_schemas else None
self.only_tables = only_tables if only_tables else None
self.nopk, self.flashback, self.stopnever = (nopk, flashback, stopnever)
self.sqllist = []
self.binlogList = []
self.connection = pymysql.connect(**self.connectionSettings)
try:
cur = self.connection.cursor()
cur.execute("SHOW MASTER STATUS")
self.eofFile, self.eofPos = cur.fetchone()[:2]
cur.execute("SHOW MASTER LOGS")
binIndex = [row[0] for row in cur.fetchall()]
if self.startFile not in binIndex:
raise ValueError('parameter error: startFile %s not in mysql server' % self.startFile)
binlog2i = lambda x: x.split('.')[1]
for bin in binIndex:
if binlog2i(bin) >= binlog2i(self.startFile) and binlog2i(bin) <= binlog2i(self.endFile):
self.binlogList.append(bin)
cur.execute("SELECT @@server_id")
self.serverId = cur.fetchone()[0]
if not self.serverId:
raise ValueError('need set server_id in mysql server %s:%s' % (
self.connectionSettings['host'], self.connectionSettings['port']))
finally:
if 'cur' in locals():
cur.close()
@property
def stream(self):
"""stream 是一个属性 - getter 方法"""
return self._stream
@stream.setter
def stream(self, value):
"""stream属性的 setter 方法"""
self._stream = value
@property
def table_stat_info(self):
"""table_stat_info 是一个属性 - getter 方法"""
return self._table_stat_info
@table_stat_info.setter
def table_stat_info(self, value):
"""table_stat_info属性的 setter 方法"""
self._table_stat_info = value
def init_schema_stat_struct(self, schema=None):
"""初始化记录表统计信息的数据库基本结构
Args:
schema: 数据库名称
Return: None
Raise: None
Table stat info struct:
_table_stat_info = {
'test': { # 数据库名称
}
}
"""
if schema not in self.table_stat_info: # 初始化 数据库
self.table_stat_info[schema] = {}
def init_table_stat_struct(self, schema=None, table=None):
"""初始化记录表统计信息的表的基本结构
Args:
schema: 数据库名称
table: 表名称
Return: None
Raise: None
Table stat info struct:
_table_stat_info['test'] = {
't1': { # 表名称
'table_dml_count': { # 统计表 DML 次数的变量
'insert': 0,
'update': 0,
'delete': 0,
},
'row_insert_count': {}, # 统计表的字段插入数
'row_update_count': {}, # 统计表的字段更新数
}
}
"""
if table not in self.table_stat_info[schema]: # 初始化表
self.table_stat_info[schema][table] = {
'table_dml_count': { # 统计表 DML 次数的变量
'insert': 0,
'update': 0,
'delete': 0,
},
'row_insert_count': {}, # 统计表的字段插入数
'row_update_count': {}, # 统计表的字段更新数
}
def init_insert_col_stat_struct(self, schema=None, table=None, col=None):
"""初始化插入字段统计结构
Args:
schema: 数据库
table: 表
col: 字段
Return: None
Raise: None
"""
self.table_stat_info[schema][table]['row_insert_count'][col] = 0
def init_update_col_stat_struct(self, schema=None, table=None, col=None):
"""初始化更新字段统计结构
Args:
schema: 数据库
table: 表
col: 字段
Return: None
Raise: None
"""
self.table_stat_info[schema][table]['row_update_count'][col] = 0
def schema_exist(self, schema=None):
"""判断schema是否存在
Args:
schema: 数据库
Return: True/False
Raise: None
"""
if schema in self.table_stat_info:
return True
else:
return False
def table_exist(self, schema=None, table=None):
"""判断表是否存在
Args:
schema: 数据库
table: 表
Return: True/False
Raise: None
"""
if table in self.table_stat_info[schema]:
return True
else:
return False
def insert_col_exist(self, schema=None, table=None, col=None):
"""判断插入的字段是否存在
Args:
schema: 数据库
table: 表
col: 字段名
Return: True/False
Raise: None
"""
if col in self.table_stat_info[schema][table]['row_insert_count']:
return True
else:
return False
def update_col_exist(self, schema=None, table=None, col=None):
"""判断更新的字段是否存在
Args:
schema: 数据库
table: 表
col: 字段名
Return: True/False
Raise: None
"""
if col in self.table_stat_info[schema][table]['row_update_count']:
return True
else:
return False
def add_insert_count(self, schema=None, table=None, count=0):
"""添加insert执行的行数
Args:
schema: 数据库
table: 表
count: 行数
"""
self.table_stat_info[schema][table] \
['table_dml_count']['insert'] += count
def add_update_count(self, schema=None, table=None, count=0):
"""添加update执行的行数
Args:
schema: 数据库
table: 表
count: 行数
"""
self.table_stat_info[schema][table] \
['table_dml_count']['update'] += count
def add_delete_count(self, schema=None, table=None, count=0):
"""添加delete执行的行数
Args:
schema: 数据库
table: 表
count: 行数
"""
self.table_stat_info[schema][table] \
['table_dml_count']['delete'] += count
def add_insert_row_col_count(self, schema=None, table=None,
col=None, count=0):
"""添加insert语句列的插入次数
Args:
schema: 数据库
table: 表
col: 列名
count: 更新新次数
"""
self.table_stat_info[schema][table] \
['row_insert_count'][col] += count
def add_update_row_col_count(self, schema=None, table=None,
col=None, count=0):
"""添加insert语句列的插入次数
Args:
schema: 数据库
table: 表
col: 列名
count: 更新新次数
"""
self.table_stat_info[schema][table] \
['row_update_count'][col] += count
def insert_row_stat(self, binlogevent=None):
"""对WriteRowsEvent事件进行分析统计
Args:
binlogevent: binlog 事件对象
Return: None
Raise: None
"""
# 判断之前是否存在该表的统计信息, 不存在则初始化一个
schema = binlogevent.schema
table = binlogevent.table
if not self.schema_exist(schema=schema): # 初始化 schema
self.init_schema_stat_struct(schema=schema)
if not self.table_exist(schema=schema, table=table): # 初始化 table
self.init_table_stat_struct(schema=schema, table=table)
self.add_insert_count(schema=schema, table=table,
count=len(binlogevent.rows)) # 添加 INSERT 行数
def update_row_stat(self, binlogevent=None):
"""对UpdateRowsEvent事件进行分析统计
Args:
binlogevent: binlog 事件对象
Return: None
Raise: None
"""
# 判断之前是否存在该表的统计信息, 不存在则初始化一个
schema = binlogevent.schema
table = binlogevent.table
if not self.schema_exist(schema=schema): # 初始化 schema
self.init_schema_stat_struct(schema=schema)
if not self.table_exist(schema=schema, table=table): # 初始化 table
self.init_table_stat_struct(schema=schema, table=table)
self.add_update_count(schema=schema, table=table,
count=len(binlogevent.rows)) # 添加 INSERT 行数
def delete_row_stat(self, binlogevent=None):
"""对DeleteRowsEvent事件进行分析统计
Args:
binlogevent: binlog 事件对象
Return: None
Raise: None
"""
# 判断之前是否存在该表的统计信息, 不存在则初始化一个
schema = binlogevent.schema
table = binlogevent.table
if not self.schema_exist(schema=schema): # 初始化 schema
self.init_schema_stat_struct(schema=schema)
if not self.table_exist(schema=schema, table=table): # 初始化 table
self.init_table_stat_struct(schema=schema, table=table)
self.add_delete_count(schema=schema, table=table,
count=len(binlogevent.rows)) # 添加 DELETE 行数
def insert_row_col_stat(self, binlogevent):
"""统计insert某列的值"""
schema = binlogevent.schema
table = binlogevent.table
row_size = len(binlogevent.rows)
for column in binlogevent.columns:
# 初始化列的统计
if not self.insert_col_exist(schema=schema, table=table,
col=column.name):
self.init_insert_col_stat_struct(schema=schema,
table=table,
col=column.name)
self.add_insert_row_col_count(schema=schema, table=table,
col=column.name, count=row_size)
def update_row_col_stat(self, binlogevent):
"""统计update某列的值"""
schema = binlogevent.schema
table = binlogevent.table
for row in binlogevent.rows:
for column in binlogevent.columns:
if column.is_primary: # 是主键则不处理
continue
# 前后的值相等则不处理
if (row['before_values'][column.name] ==
row['after_values'][column.name]):
continue
# 初始化更新列统计
if not self.update_col_exist(schema=schema, table=table,
col=column.name):
self.init_update_col_stat_struct(schema=schema,
table=table,
col=column.name)
# 添加某列更行1次
self.add_update_row_col_count(schema=schema, table=table,
col=column.name, count=1)
def run_parse(self):
self.stream = BinLogStreamReader(connection_settings=self.connectionSettings, server_id=self.serverId,
log_file=self.startFile, log_pos=self.startPos, only_schemas=self.only_schemas, only_events=self.only_events,
only_tables=self.only_tables, resume_stream=True)
"""循环解析并统计"""
for binlogevent in self.stream:
count = 0
print datetime.datetime.fromtimestamp(binlogevent.timestamp).strftime('%Y-%m-%d %H:%M:%S') ,self.stream.log_file,self.stream.log_pos
if count >= self.countnum:
break
if not self.stopnever:
if (self.stream.log_file == self.endFile and self.stream.log_pos == self.endPos) or (
self.stream.log_file == self.eofFile and self.stream.log_pos == self.eofPos):
flagLastEvent = True
elif datetime.datetime.fromtimestamp(binlogevent.timestamp) < self.startTime:
if not (isinstance(binlogevent, RotateEvent) or isinstance(binlogevent, FormatDescriptionEvent)):
lastPos = binlogevent.packet.log_pos
continue
elif (self.stream.log_file not in self.binlogList) or (
self.endPos and self.stream.log_file == self.endFile and self.stream.log_pos > self.endPos) or (
self.stream.log_file == self.eofFile and self.stream.log_pos > self.eofPos) or (
datetime.datetime.fromtimestamp(binlogevent.timestamp) >= self.stopTime):
break
if binlogevent.event_type in [23, 30]: # WriteRowsEvent(WRITE_ROWS_EVENT)
self.insert_row_stat(binlogevent)
self.insert_row_col_stat(binlogevent)
elif binlogevent.event_type in [24, 31]: # UpdateRowsEvent(UPDATE_ROWS_EVENT)
self.update_row_stat(binlogevent)
self.update_row_col_stat(binlogevent)
pass
elif binlogevent.event_type in [25, 32]: # DeleteRowsEvent(DELETE_ROWS_EVENT)
self.delete_row_stat(binlogevent)
def print_format(self, content):
print json.dumps(content, encoding='utf-8', ensure_ascii=False, indent=4)
def print_sort_stat(self, by='insert'):
"""排序打印统计结果"""
by = by.lower() # 一律转化为小写
# 对统计进行排序
stat = self.iter_table_stat_format()
sorted_stat = sorted(
self.iter_table_stat_format(),
key=lambda stat: stat.values()[0]['table_dml_count'][by],
reverse=True,
)
self.print_format(sorted_stat)
def iter_table_stat_format(self):
"""格式化每个表的统计的dict
Format: {'schema.table': xxx}
"""
for schema, tables in self.table_stat_info.iteritems():
for table, stat in tables.iteritems():
key = '{schema}.{table}'.format(schema=schema, table=table)
yield {key: stat}
def parse_args():
"""解析命令行传入参数"""
usage = """
Description:
The script parse MySQL binlog and statistic column.
"""
# 创建解析对象并传入描述
parser = argparse.ArgumentParser(description=usage)
# 添加 MySQL Host 参数
parser.add_argument('--host', dest='host', action='store',
default='127.0.0.1', help='Connect MySQL host',
metavar='HOST')
# 添加 MySQL Port 参数
parser.add_argument('--port', dest='port', action='store',
default=3306, help='Connect MySQL port',
metavar='PORT', type=int)
# 添加 MySQL username 参数
parser.add_argument('--username', dest='username', action='store',
default='root', help='Connect MySQL username',
metavar='USERNAME')
# 添加 MySQL password 参数
parser.add_argument('--password', dest='password', action='store',
default='root', help='Connect MySQL password',
metavar='PASSWORD')
# 添加 MySQL binlog file 参数
parser.add_argument('--log-file', dest='log_file', action='store',
default=None, help='Specify a binlog name',
metavar='binlog-file-name')
# 添加 MySQL binlog file pos 参数
parser.add_argument('--log-pos', dest='log_pos', action='store',
default=None, help='Specify a binlog file pos',
metavar='binlog-file-pos', type=int)
# 添加 slave server id 参数
parser.add_argument('--server-id', dest='server_id', action='store',
default=99999, help='Specify a slave server server-id',
metavar='server-id', type=int)
# 添加 slave uuid 参数
parser.add_argument('--slave-uuid', dest='slave_uuid', action='store',
default='ca1e2b93-5d2f-11e6-b758-0800277643c8',
help='Specify a slave server uuid', metavar='slave-uuid')
# 添加 是否以阻塞的方式进行解析 参数
parser.add_argument('--blocking', dest='blocking', action='store',
default=False, help='Specify is bloking and parse, default False',
metavar='False/True')
# 添加指定以什么时间戳开始进行解析
help = 'Specify is start parse timestamp, default None, example: 2016-11-01 00:00:00'
parser.add_argument('--start-time', dest='start_time', action='store',
default=None, help=help, metavar='start-time')
# 添加 是否以阻塞的方式进行解析 参数
parser.add_argument('--sorted-by', dest='sorted_by', action='store',
default='insert', help='Specify show statistic sort by, default: insert',
metavar='insert/update/delete')
args = parser.parse_args()
return args
def kill_sign_op(signum, frame):
"""当接收到kill 信号执行关闭流打印输出 和"""
global mysql_binlog_stat # 使用全局mysql_binlog_stat
if mysql_binlog_stat != None: # 不为空才执行
# 关闭流
mysql_binlog_stat.stream.close()
# 打印数据
mysql_binlog_stat.print_sort_stat()
sys.exit(0)
# 定义全局变量
mysql_binlog_stat = None
def main():
global mysql_binlog_stat # 使用前面的全局变量
# 注册 捕获型号kill信号
signal.signal(signal.SIGTERM, kill_sign_op)
# args = parse_args() # 解析传入参数
# mysql_settings = {
# 'host': args.host,
# 'port': args.port,
# 'user': args.username,
# 'passwd': args.password,
# }
# mysql_settings = {
# 'host': '192.168.134.1',
# 'port': 3308,
# 'user': 'admin_user',
# 'passwd': '<PASSWORD>$%^',
# }
# skip_to_timestamp = (
# time.mktime(time.strptime(args.start_time, '%Y-%m-%d %H:%M:%S'))
# if args.start_time else None
# )
#
# stream_conf = {
# 'connection_settings': mysql_settings,
# 'server_id': args.server_id,
# 'slave_uuid': args.slave_uuid,
# 'blocking': args.blocking,
# 'log_file': args.log_file,
# 'log_pos': args.log_pos,
# 'skip_to_timestamp': skip_to_timestamp,
# 'only_events': [UpdateRowsEvent, WriteRowsEvent, DeleteRowsEvent],
# }
# stream_conf = {
# 'connection_settings': mysql_settings,
# 'server_id': 132,
# 'slave_uuid': '',
# 'blocking': '',
# 'log_file': 'mysql-bin.000006',
# 'log_pos': 4,
# 'skip_to_timestamp': '',
# 'only_events': [UpdateRowsEvent, WriteRowsEvent, DeleteRowsEvent],
# }
#
#
#
host = '192.168.134.1'
port = 3308
user = 'admin_user'
password = '<PASSWORD>$%^'
startFile = 'mysql-bin.000008'
endFile = 'mysql-bin.000009'
startPos = 4
connectionSettings = {'host': host, 'port': port, 'user': user, 'passwd': password}
mysql_binlog_stat = MySQLBinlogStat(connectionSettings=connectionSettings, startFile=startFile,
startPos=startPos, endFile=endFile, endPos=0,
startTime='', stopTime='', only_schemas='',
only_tables='', nopk=False, flashback=False, stopnever=False, countnum=1)
try:
mysql_binlog_stat.run_parse()
except KeyboardInterrupt: # 捕捉 KeyboardInterrupt 异常
print 'force to exit...'
except Exception as e:
print traceback.format_exc()
finally: # 最终需要关闭流
pass
# mysql_binlog_stat.stream.close()
# 打印数据
mysql_binlog_stat.print_sort_stat(by='insert')
if __name__ == '__main__':
main() | StarcoderdataPython |
1707522 | <reponame>ocarneiro/scraping
# coding: utf-8
import json
import requests
from bs4 import BeautifulSoup as bs
from pprint import pprint
sites = {}
errors = {}
print("====== scrap_sites.py =======")
print("loading site list...")
with open('sites.json', 'r') as file:
content = file.read()
sites = json.loads(content)
print("scraping........")
for h in sorted(list(sites.keys())):
print(h)
try:
r = requests.get(h)
soup = bs(r.text, 'html.parser')
with open(sites[h]['filename'],'w') as output:
print(soup.text, file=output)
except Exception as e:
errors[h] = e
if any(errors):
print("saving errors")
with open("errors.log", "w") as output:
pprint(errors, stream=output)
print("done!")
| StarcoderdataPython |
1673740 | <gh_stars>0
from .columnar import Columnar
| StarcoderdataPython |
5178201 | # Copyright (c) 2019 zfit
from collections import OrderedDict
from contextlib import ExitStack
from typing import List, Tuple, Union, Dict, Mapping
import warnings
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
import uproot
import numpy as np
import pandas as pd
# from ..settings import types as ztypes
import zfit
from zfit import ztf
from zfit.core.interfaces import ZfitSpace
from ..util.cache import Cachable, invalidates_cache
from ..util.execution import SessionHolderMixin
from .baseobject import BaseObject
from .dimension import BaseDimensional
from .interfaces import ZfitData
from .limits import Space, convert_to_space, convert_to_obs_str
from ..settings import ztypes
from ..util import ztyping
from ..util.container import convert_to_container
from ..util.exception import LogicalUndefinedOperationError, NoSessionSpecifiedError, ShapeIncompatibleError, \
ObsIncompatibleError
from ..util.temporary import TemporarilySet
class Data(SessionHolderMixin, Cachable, ZfitData, BaseDimensional, BaseObject):
def __init__(self, dataset: Union[tf.data.Dataset, "LightDataset"], obs: ztyping.ObsTypeInput = None,
name: str = None, weights=None, iterator_feed_dict: Dict = None,
dtype: tf.DType = None):
"""Create a data holder from a `dataset` used to feed into `models`.
Args:
dataset (): A dataset storing the actual values
obs (): Observables where the data is defined in
name (): Name of the `Data`
iterator_feed_dict ():
dtype ():
"""
if name is None:
name = "Data"
if dtype is None:
dtype = ztypes.float
super().__init__(name=name)
if iterator_feed_dict is None:
iterator_feed_dict = {}
self._permutation_indices_data = None
self._next_batch = None
self._dtype = dtype
self._nevents = None
self._weights = None
self._data_range = None
self._set_space(obs)
self._original_obs = self.space.obs
self._data_range = self.space # TODO proper data cuts: currently set so that the cuts in all dims are applied
self.dataset = dataset
self._name = name
self.iterator_feed_dict = iterator_feed_dict
self.iterator = None
self.set_weights(weights=weights)
@property
def nevents(self):
nevents = self._nevents
if nevents is None:
nevents = self._get_nevents()
return nevents
@property
def dtype(self):
return self._dtype
def _set_space(self, obs: Space):
obs = convert_to_space(obs)
self._check_n_obs(space=obs)
obs = obs.with_autofill_axes(overwrite=True)
self._space = obs
@property
def data_range(self):
data_range = self._data_range
if data_range is None:
data_range = self.space
return data_range
@invalidates_cache
def set_data_range(self, data_range):
# warnings.warn("Setting the data_range may currently has an unexpected behavior and does not affect the range."
# "If you set it once in the beginning, it's ok. Otherwise, it's currently unsafe.")
data_range = self._check_input_data_range(data_range=data_range)
def setter(value):
self._data_range = value
def getter():
return self._data_range
return TemporarilySet(value=data_range, setter=setter, getter=getter)
@property
def weights(self):
return self._weights
@invalidates_cache
def set_weights(self, weights: ztyping.WeightsInputType):
"""Set (temporarily) the weights of the dataset.
Args:
weights (`tf.Tensor`, np.ndarray, None):
"""
if weights is not None:
weights = ztf.convert_to_tensor(weights)
weights = ztf.to_real(weights)
if weights.shape.ndims != 1:
raise ShapeIncompatibleError("Weights have to be 1-Dim objects.")
def setter(value):
self._weights = value
def getter():
return self.weights
return TemporarilySet(value=weights, getter=getter, setter=setter)
@property
def space(self) -> "ZfitSpace":
space = self._space
# if space.limits is None:
# if self._data_range is not None:
# space = self._data_range
return space
@property
def iterator(self):
if self._iterator is None:
self.initialize()
return self._iterator
@iterator.setter
def iterator(self, value):
self._iterator = value
# constructors
@classmethod
def from_root_iter(cls, path, treepath, branches=None, entrysteps=None, name=None, **kwargs):
# branches = convert_to_container(branches)
warnings.warn(
"Using the iterator is hardcore and will most probably fail! Don't use it (yet) if you don't fully "
"understand what happens.")
def uproot_generator():
for data in uproot.iterate(path=path, treepath=treepath,
branches=branches, entrysteps=entrysteps, **kwargs):
data = np.array([data[branch] for branch in branches])
yield data
dataset = tf.data.Dataset.from_generator(uproot_generator, output_types=ztypes.float)
dataset.prefetch(2)
return Data(dataset=dataset, name=name)
# @classmethod
# def from_root(cls, path, treepath, branches=None, branches_alias=None, name=None, root_dir_options=None):
# if branches_alias is None:
# branches_alias = {}
#
# branches = convert_to_container(branches)
# if root_dir_options is None:
# root_dir_options = {}
#
# def uproot_generator():
# root_tree = uproot.open(path, **root_dir_options)[treepath]
# data = root_tree.arrays(branches)
# data = np.array([data[branch] for branch in branches])
# yield data
#
# dataset = tf.data.Dataset.from_generator(uproot_generator, output_types=ztypes.float)
#
# dataset = dataset.repeat()
# obs = [branches_alias.get(branch, branch) for branch in branches]
# return Data(dataset=dataset, obs=obs, name=name)
@classmethod
def from_root(cls, path: str, treepath: str, branches: List[str] = None, branches_alias: Dict = None,
weights: ztyping.WeightsStrInputType = None,
name: str = None,
dtype: tf.DType = None,
root_dir_options=None) -> "Data":
"""Create a `Data` from a ROOT file. Arguments are passed to `uproot`.
Args:
path (str):
treepath (str):
branches (List[str]]):
branches_alias (dict): A mapping from the `branches` (as keys) to the actual `observables` (as values).
This allows to have different `observable` names, independent of the branch name in the file.
weights (tf.Tensor, None, np.ndarray, str]): Weights of the data. Has to be 1-D and match the shape
of the data (nevents). Can be a column of the ROOT file by using a string corresponding to a
column.
name (str):
root_dir_options ():
Returns:
`zfit.Data`:
"""
if branches_alias is None and branches is None:
raise ValueError("Either branches or branches_alias has to be specified.")
if branches_alias is None:
branches_alias = {}
if branches is None:
branches = list(branches_alias.values())
weights_are_branch = isinstance(weights, str)
branches = convert_to_container(branches)
if root_dir_options is None:
root_dir_options = {}
def uproot_loader():
root_tree = uproot.open(path, **root_dir_options)[treepath]
if weights_are_branch:
branches_with_weights = branches + [weights]
else:
branches_with_weights = branches
data = root_tree.arrays(branches_with_weights, namedecode="utf-8")
data_np = np.array([data[branch] for branch in branches])
if weights_are_branch:
weights_np = data[weights]
else:
weights_np = None
return data_np.transpose(), weights_np
data, weights_np = uproot_loader()
if not weights_are_branch:
weights_np = weights
shape = data.shape
dataset = LightDataset.from_tensor(data)
# dataset = dataset.repeat()
obs = [branches_alias.get(branch, branch) for branch in branches]
return Data(dataset=dataset, obs=obs, weights=weights_np, name=name, dtype=dtype)
@classmethod
def from_pandas(cls, df: pd.DataFrame, obs: ztyping.ObsTypeInput = None, weights: ztyping.WeightsInputType = None,
name: str = None, dtype: tf.DType = None):
"""Create a `Data` from a pandas DataFrame. If `obs` is `None`, columns are used as obs.
Args:
df (`pandas.DataFrame`):
weights (tf.Tensor, None, np.ndarray, str]): Weights of the data. Has to be 1-D and match the shape
of the data (nevents).
obs (`zfit.Space`):
name (str):
"""
if obs is None:
obs = list(df.columns)
array = df.values
return cls.from_numpy(obs=obs, array=array, weights=weights, name=name, dtype=dtype)
@classmethod
def from_numpy(cls, obs: ztyping.ObsTypeInput, array: np.ndarray, weights: ztyping.WeightsInputType = None,
name: str = None, dtype: tf.DType = None):
"""Create `Data` from a `np.array`.
Args:
obs ():
array (numpy.ndarray):
name (str):
Returns:
zfit.Data:
"""
if not isinstance(array, np.ndarray):
raise TypeError("`array` has to be a `np.ndarray`. Is currently {}".format(type(array)))
if dtype is None:
dtype = ztypes.float
tensor = tf.cast(array, dtype=dtype)
return cls.from_tensor(obs=obs, tensor=tensor, weights=weights, name=name, dtype=dtype)
# np_placeholder = tf.compat.v1.placeholder(dtype=array.dtype, shape=array.shape)
# iterator_feed_dict = {np_placeholder: array}
# dataset = tf.data.Dataset.from_tensors(np_placeholder)
#
# dataset = dataset.batch(len(array))
# dataset = dataset.repeat()
# return Data(dataset=dataset, obs=obs, name=name, weights=weights, dtype=dtype,
# iterator_feed_dict=iterator_feed_dict)
@classmethod
def from_tensor(cls, obs: ztyping.ObsTypeInput, tensor: tf.Tensor, weights: ztyping.WeightsInputType = None,
name: str = None, dtype: tf.DType = None) -> "Data":
"""Create a `Data` from a `tf.Tensor`. `Value` simply returns the tensor (in the right order).
Args:
obs (Union[str, List[str]):
tensor (`tf.Tensor`):
name (str):
Returns:
zfit.core.Data:
"""
dataset = LightDataset.from_tensor(tensor=tensor)
return Data(dataset=dataset, obs=obs, name=name, weights=weights, dtype=dtype)
def to_pandas(self, obs: ztyping.ObsTypeInput = None):
"""Create a `pd.DataFrame` from `obs` as columns and return it.
Args:
obs (): The observables to use as columns. If `None`, all observables are used.
Returns:
"""
values = self.value(obs=obs)
if obs is None:
obs = self.obs
obs_str = convert_to_obs_str(obs)
values = self.sess.run(values)
df = pd.DataFrame(data=values, columns=obs_str)
return df
def initialize(self):
iterator = tf.compat.v1.data.make_initializable_iterator(self.dataset)
self.sess.run(iterator.initializer, self.iterator_feed_dict)
self.iterator = iterator
def get_iteration(self):
if isinstance(self.dataset, LightDataset):
return self.dataset.value()
if self._next_batch is None:
self._next_batch = self.iterator.get_next()
return self._next_batch
def unstack_x(self, obs: ztyping.ObsTypeInput = None, always_list: bool = False):
"""Return the unstacked data: a list of tensors or a single Tensor.
Args:
obs (): which observables to return
always_list (bool): If True, always return a list (also if length 1)
Returns:
List(tf.Tensor)
"""
return ztf.unstack_x(self._value_internal(obs=obs))
def value(self, obs: ztyping.ObsTypeInput = None):
return self._value_internal(obs=obs)
def _cut_data(self, value, obs=None):
if self.data_range.limits is not None:
data_range = self.data_range.with_obs(obs=obs)
inside_limits = []
# value = tf.transpose(value)
for lower, upper in data_range.iter_limits():
above_lower = tf.reduce_all(input_tensor=tf.less_equal(value, upper), axis=1)
below_upper = tf.reduce_all(input_tensor=tf.greater_equal(value, lower), axis=1)
inside_limits.append(tf.logical_and(above_lower, below_upper))
inside_any_limit = tf.reduce_any(input_tensor=inside_limits, axis=0) # has to be inside one limit
value = tf.boolean_mask(tensor=value, mask=inside_any_limit)
# value = tf.transpose(value)
return value
def _value_internal(self, obs: ztyping.ObsTypeInput = None):
if obs is not None:
obs = convert_to_obs_str(obs)
raw_value = self._value()
value = self._cut_data(raw_value, obs=self._original_obs)
value_sorted = self._sort_value(value=value, obs=obs)
return value_sorted
def _value(self):
values = self.get_iteration()
# TODO(Mayou36): add conversion to right dimension? (n_events, n_obs)? # check if 1-D?
if len(values.shape.as_list()) == 0:
values = tf.expand_dims(values, -1)
if len(values.shape.as_list()) == 1:
values = tf.expand_dims(values, -1)
# cast data to right type
if not values.dtype == self.dtype:
values = tf.cast(values, dtype=self.dtype)
return values
def _sort_value(self, value, obs: Tuple[str]):
obs = convert_to_container(value=obs, container=tuple)
perm_indices = self.space.axes if self.space.axes != tuple(range(value.shape[-1])) else False
# permutate = perm_indices is not None
if obs:
if not frozenset(obs) <= frozenset(self.obs):
raise ValueError("The observable(s) {} are not contained in the dataset. "
"Only the following are: {}".format(frozenset(obs) - frozenset(self.obs),
self.obs))
perm_indices = self.space.get_axes(obs=obs)
# values = list(values[self.obs.index(o)] for o in obs if o in self.obs)
if perm_indices:
value = ztf.unstack_x(value)
value = list(value[i] for i in perm_indices)
value = ztf.stack_x(value)
return value
# TODO(Mayou36): use Space to permute data?
# TODO(Mayou36): raise error is not obs <= self.obs?
@invalidates_cache
def sort_by_axes(self, axes: ztyping.AxesTypeInput, allow_superset: bool = False):
if not allow_superset:
if not frozenset(axes) <= frozenset(self.axes):
raise ValueError("The observable(s) {} are not contained in the dataset. "
"Only the following are: {}".format(frozenset(axes) - frozenset(self.axes),
self.axes))
space = self.space.with_axes(axes=axes)
def setter(value):
self._space = value
def getter():
return self.space
return TemporarilySet(value=space, setter=setter, getter=getter)
@invalidates_cache
def sort_by_obs(self, obs: ztyping.ObsTypeInput, allow_superset: bool = False):
if not allow_superset:
if not frozenset(obs) <= frozenset(self.obs):
raise ValueError("The observable(s) {} are not contained in the dataset. "
"Only the following are: {}".format(frozenset(obs) - frozenset(self.obs),
self.obs))
space = self.space.with_obs(obs=obs)
def setter(value):
self._space = value
def getter():
return self.space
return TemporarilySet(value=space, setter=setter, getter=getter)
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
del name
if dtype is not None:
if dtype != self.dtype:
# return ValueError("From Mayou36", self.dtype)
return NotImplemented
if as_ref:
# return "NEVER READ THIS"
raise LogicalUndefinedOperationError("There is no ref for the `Data`")
else:
return self.value()
def _AsTensor(self):
return self.value()
@staticmethod
def _OverloadAllOperators(): # pylint: disable=invalid-name
"""Register overloads for all operators."""
for operator in tf.Tensor.OVERLOADABLE_OPERATORS:
Data._OverloadOperator(operator)
# For slicing, bind getitem differently than a tensor (use SliceHelperVar
# instead)
# pylint: disable=protected-access
setattr(Data, "__getitem__", array_ops._SliceHelperVar)
@staticmethod
def _OverloadOperator(operator): # pylint: disable=invalid-name
"""Defer an operator overload to `ops.Tensor`.
We pull the operator out of ops.Tensor dynamically to avoid ordering issues.
Args:
operator: string. The operator name.
"""
tensor_oper = getattr(tf.Tensor, operator)
def _run_op(a, *args):
# pylint: disable=protected-access
value = a._AsTensor()
return tensor_oper(value, *args)
# Propagate __doc__ to wrapper
try:
_run_op.__doc__ = tensor_oper.__doc__
except AttributeError:
pass
setattr(Data, operator, _run_op)
def _check_input_data_range(self, data_range):
data_range = self.convert_sort_space(limits=data_range)
if not frozenset(self.data_range.obs) == frozenset(data_range.obs):
raise ObsIncompatibleError(f"Data range has to cover the full observable space {self.data_range.obs}, not "
f"only {data_range.obs}")
return data_range
# TODO(Mayou36): refactor with pdf or other range things?
def convert_sort_space(self, obs: ztyping.ObsTypeInput = None, axes: ztyping.AxesTypeInput = None,
limits: ztyping.LimitsTypeInput = None) -> Union[Space, None]:
"""Convert the inputs (using eventually `obs`, `axes`) to :py:class:`~zfit.Space` and sort them according to
own `obs`.
Args:
obs ():
axes ():
limits ():
Returns:
"""
if obs is None: # for simple limits to convert them
obs = self.obs
space = convert_to_space(obs=obs, axes=axes, limits=limits)
self_space = self._space
if self_space is not None:
space = space.with_obs_axes(self_space.get_obs_axes(), ordered=True, allow_subset=True)
return space
def _get_nevents(self):
nevents = tf.shape(input=self.value())[0]
return nevents
class SampleData(Data):
_cache_counting = 0
def __init__(self, dataset: Union[tf.data.Dataset, "LightDataset"], sample_holder: tf.Tensor,
obs: ztyping.ObsTypeInput = None, weights=None, name: str = None,
dtype: tf.DType = ztypes.float):
super().__init__(dataset, obs, name=name, weights=weights, iterator_feed_dict=None, dtype=dtype)
@classmethod
def get_cache_counting(cls):
counting = cls._cache_counting
cls._cache_counting += 1
return counting
@classmethod
def from_sample(cls, sample: tf.Tensor, obs: ztyping.ObsTypeInput, name: str = None,
weights=None):
dataset = LightDataset.from_tensor(sample)
return SampleData(dataset=dataset, sample_holder=sample, obs=obs, name=name, weights=weights)
class Sampler(Data):
_cache_counting = 0
def __init__(self, dataset: Union[tf.data.Dataset, "LightDataset"], sample_holder: tf.Variable,
n_holder: tf.Variable, weights=None,
fixed_params: Dict["zfit.Parameter", ztyping.NumericalScalarType] = None,
obs: ztyping.ObsTypeInput = None, name: str = None,
dtype: tf.DType = ztypes.float):
super().__init__(dataset, obs, name=name, weights=weights, iterator_feed_dict=None, dtype=dtype)
if fixed_params is None:
fixed_params = OrderedDict()
if isinstance(fixed_params, (list, tuple)):
fixed_params = OrderedDict((param, self.sess.run(param)) for param in fixed_params)
self._initial_resampled = False
# self.sess.run(sample_holder.initializer)
self.fixed_params = fixed_params
self.sample_holder = sample_holder
self.n = None
self._n_holder = n_holder
@property
def n_samples(self):
return self._n_holder
def _value_internal(self, obs: ztyping.ObsTypeInput = None):
if not self._initial_resampled:
raise RuntimeError(
"No data generated yet. Use `resample()` to generate samples or directly use `model.sample()`"
"for single-time sampling.")
return super()._value_internal(obs)
@classmethod
def get_cache_counting(cls):
counting = cls._cache_counting
cls._cache_counting += 1
return counting
@classmethod
def from_sample(cls, sample: tf.Tensor, n_holder: tf.Variable, obs: ztyping.ObsTypeInput, fixed_params=None,
name: str = None, weights=None):
if fixed_params is None:
fixed_params = []
from tensorflow.python.ops.variables import VariableV1
sample_holder = VariableV1(initial_value=sample, trainable=False,
name="sample_data_holder_{}".format(cls.get_cache_counting()))
dataset = LightDataset.from_tensor(sample_holder)
return Sampler(dataset, fixed_params=fixed_params, sample_holder=sample_holder,
n_holder=n_holder, obs=obs, name=name, weights=weights)
def resample(self, param_values: Mapping = None, n: Union[int, tf.Tensor] = None):
"""Update the sample by newly sampling. This affects any object that used this data already.
All params that are not in the attribute `fixed_params` will use their current value for
the creation of the new sample. The value can also be overwritten for one sampling by providing
a mapping with `param_values` from `Parameter` to the temporary `value`.
Args:
param_values (Dict): a mapping from :py:class:`~zfit.Parameter` to a `value`. For the current sampling,
`Parameter` will use the `value`.
n (int, tf.Tensor): the number of samples to produce. If the `Sampler` was created with
anything else then a numerical or tf.Tensor, this can't be used.
"""
if n is None:
n = self.n
temp_param_values = self.fixed_params.copy()
if param_values is not None:
temp_param_values.update(param_values)
with ExitStack() as stack:
_ = [stack.enter_context(param.set_value(val)) for param, val in temp_param_values.items()]
if not (n and self._initial_resampled): # we want to load and make sure that it's initialized
# means it's handled inside the function
# TODO(Mayou36): check logic; what if new_samples loaded? get's overwritten by initializer
# fixed with self.n, needs cleanup
if not (isinstance(self.n_samples, str) or self.n_samples is None):
self.sess.run(self.n_samples.initializer)
if n:
if not isinstance(self.n_samples, tf.Variable):
raise RuntimeError("Cannot set a new `n` if not a Tensor-like object was given")
self.n_samples.load(value=n, session=self.sess)
self.sess.run(self.sample_holder.initializer)
self._initial_resampled = True
def _dense_var_to_tensor(var, dtype=None, name=None, as_ref=False):
return var._dense_var_to_tensor(dtype=dtype, name=name, as_ref=as_ref)
ops.register_tensor_conversion_function(Data, _dense_var_to_tensor)
fetch_function = lambda data: ([data.value()],
lambda val: val[0])
feed_function = lambda data, feed_val: [(data.value(), feed_val)]
feed_function_for_partial_run = lambda data: [data.value()]
from tensorflow.python.client.session import register_session_run_conversion_functions
# ops.register_dense_tensor_like_type()
register_session_run_conversion_functions(tensor_type=Data, fetch_function=fetch_function,
feed_function=feed_function,
feed_function_for_partial_run=feed_function_for_partial_run)
Data._OverloadAllOperators()
class LightDataset:
def __init__(self, tensor):
if not isinstance(tensor, tf.Tensor):
tensor = ztf.convert_to_tensor(tensor)
self.tensor = tensor
@classmethod
def from_tensor(cls, tensor):
return cls(tensor=tensor)
def value(self):
return self.tensor
if __name__ == '__main__':
# from skhep_testdata import data_path
path_root = "/data/uni/b2k1ee/classification_new/2012/"
small_root = 'small.root'
#
path_root += small_root
# path_root = data_path("uproot-Zmumu.root")
branches = ['B_PT', 'B_P'] # b needed currently -> uproot
data = zfit.Data.from_root(path=path_root, treepath='DecayTree', branches=branches)
import time
with tf.compat.v1.Session() as sess:
# data.initialize()
x = data.value()
for i in range(1):
print(i)
try:
func = tf.math.log(x) * tf.sin(x) * tf.reduce_mean(
input_tensor=x ** 2 - tf.cos(x) ** 2) / tf.reduce_sum(input_tensor=x)
start = time.time()
result_grad = sess.run(tf.gradients(ys=func, xs=x))
result = sess.run(func)
end = time.time()
print("time needed", (end - start))
except tf.errors.OutOfRangeError:
print("finished at i = ", i)
break
print(np.shape(result))
print(result[:, :10])
print(result_grad)
# directory = open_tree[]
# directory = directory['DecayTree']
# directory = directory['B_P']
| StarcoderdataPython |
1810716 | import glob
import os
import numpy as np
from sklearn.utils import shuffle
from scipy import misc
from PIL import Image
import pandas as pd
import matplotlib
from matplotlib import pyplot as plt
import tensorflow as tf
#matplotlib.interactive(True)
random_seed = 90210
np.random.seed(random_seed)
img_w, img_h = 64, 64
img_channels = 3
def crop_center(file, img,cropx,cropy):
print(file)
y,x,c = img.shape
if(x < 64 or y < 64):
os.remove(file)
startx = x//2-(cropx//2)
starty = y//2-(cropy//2)
return img[starty:starty+cropy,startx:startx+cropx]
path = '/home/suroot/Documents/data/simpsons/data/'
outpath = '/home/suroot/Documents/data/simpsons/data128/'
def clean(path, size):
allFiles = glob.glob(os.path.join(path, "*/*.jpg"))
ind = 0
for infile in allFiles:
try:
im = Image.open(infile).convert('RGB')
savepath = outpath + str( ind ) + ".png"
ind += 1
print(savepath)
print(im.size)
im.thumbnail( (size, size), Image.ANTIALIAS)
im.save(savepath, "PNG")
except IOError:
print("cannot create thumbnail for " + savepath)
#clean(path, 128)
def create_nparray_from_data(path, subset = -1):
allFiles = glob.glob(os.path.join(path, "*.png"))
if(subset > 0):
allFiles = allFiles[0:subset]
data = []
# NOTE: set np.random.seed for reproducability
shuffleFiles = shuffle(sorted(allFiles))
img_test = shuffleFiles[ int(np.random.random() * len(shuffleFiles)) ]
print("random file: " + img_test)
x = misc.imread(img_test)
x = x/255
x = crop_center(img_test, x,img_w,img_h)
print(x.shape)
plt.imshow( x )
plt.show()
data = np.array( [crop_center(x, misc.imread(x), img_w, img_h) for x in shuffleFiles] )
data = data/255
print(data.shape)
return data
#data = create_nparray_from_data(outpath)
#print(data.shape)
#np.save("./data/data.npy", data)
#data = np.load("./data/data-small.npy")
data = np.load("./data/data.npy")
print(data.shape)
print("DONE DONE DONE")
#img_test = data[ int(np.random.random() * data.shape[0]),:,:,: ]
#plt.imshow(img_test)
#plt.show()
idx = np.random.randint(0, data.shape[0], size=36)
fig, axes = plt.subplots(6, 6, sharex=True, sharey=True, figsize=(5,5),)
for ii, ax in zip(idx, axes.flatten()):
ax.imshow(data[ii,:,:,:], aspect='equal')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
plt.subplots_adjust(wspace=0, hspace=0)
plt.show()
input("Press Enter to start training.")
###################################################################################
## Model
###################################################################################
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, *real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
return inputs_real, inputs_z
def generator(z, output_dim, reuse=False, alpha=0.2, training=True):
with tf.variable_scope('generator', reuse=reuse):
# First fully connected layer
x1 = tf.layers.dense(z, 4*4*1024)
# Reshape it to start the convolutional stack
x1 = tf.reshape(x1, (-1, 4, 4, 1024))
x1 = tf.layers.batch_normalization(x1, training=training)
x1 = tf.maximum(alpha * x1, x1)
# 4x4x1024 now
x2 = tf.layers.conv2d_transpose(x1, 512, 5, strides=2, padding='same')
x2 = tf.layers.batch_normalization(x2, training=training)
x2 = tf.maximum(alpha * x2, x2)
# 8x8x512 now
x3 = tf.layers.conv2d_transpose(x2, 256, 5, strides=2, padding='same')
x3 = tf.layers.batch_normalization(x3, training=training)
x3 = tf.maximum(alpha * x3, x3)
# 16x16x256 now
x4 = tf.layers.conv2d_transpose(x3, 128, 5, strides=2, padding='same')
x4 = tf.layers.batch_normalization(x4, training=training)
x4 = tf.maximum(alpha * x4, x4)
# 32x32x128 now
# Output layer
logits = tf.layers.conv2d_transpose(x3, output_dim, 5, strides=2, padding='same')
# 64x64x3 now
out = tf.tanh(logits)
return out
def discriminator(x, reuse=False, alpha=0.2):
with tf.variable_scope('discriminator', reuse=reuse):
# Input layer is 64x64x3
x1 = tf.layers.conv2d(x, 128, 5, strides=2, padding='same')
relu1 = tf.maximum(alpha * x1, x1)
# 16x16x64
x2 = tf.layers.conv2d(relu1, 256, 5, strides=2, padding='same')
bn2 = tf.layers.batch_normalization(x2, training=True)
relu2 = tf.maximum(alpha * bn2, bn2)
# 8x8x256
x3 = tf.layers.conv2d(relu2, 512, 5, strides=2, padding='same')
bn3 = tf.layers.batch_normalization(x3, training=True)
relu3 = tf.maximum(alpha * bn3, bn3)
# 4x4x512
# Flatten it
flat = tf.reshape(relu3, (-1, 4*4*512))
logits = tf.layers.dense(flat, 1)
out = tf.sigmoid(logits)
return out, logits
def model_loss(input_real, input_z, output_dim, alpha=0.2):
"""
Get the loss for the discriminator and generator
:param input_real: Images from the real dataset
:param input_z: Z input
:param out_channel_dim: The number of channels in the output image
:return: A tuple of (discriminator loss, generator loss)
"""
g_model = generator(input_z, output_dim, alpha=alpha)
d_model_real, d_logits_real = discriminator(input_real, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, alpha=alpha)
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_model_real)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_model_fake)))
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_model_fake)))
d_loss = d_loss_real + d_loss_fake
return d_loss, g_loss
def model_opt(d_loss, g_loss, learning_rate, beta1):
"""
Get optimization operations
:param d_loss: Discriminator loss Tensor
:param g_loss: Generator loss Tensor
:param learning_rate: Learning Rate Placeholder
:param beta1: The exponential decay rate for the 1st moment in the optimizer
:return: A tuple of (discriminator training operation, generator training operation)
"""
# Get weights and bias to update
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
g_vars = [var for var in t_vars if var.name.startswith('generator')]
# Optimize
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
d_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate, beta1=beta1).minimize(g_loss, var_list=g_vars)
return d_train_opt, g_train_opt
class GAN:
def __init__(self, real_size, z_size, learning_rate, alpha=0.2, beta1=0.5):
tf.reset_default_graph()
self.input_real, self.input_z = model_inputs(real_size, z_size)
self.d_loss, self.g_loss = model_loss(self.input_real, self.input_z,
real_size[2], alpha=alpha)
self.d_opt, self.g_opt = model_opt(self.d_loss, self.g_loss, learning_rate, beta1)
def view_samples(epoch, samples, nrows, ncols, figsize=(5,5)):
fig, axes = plt.subplots(figsize=figsize, nrows=nrows, ncols=ncols,
sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.axis('off')
img = ((img - img.min())*255 / (img.max() - img.min())).astype(np.uint8)
ax.set_adjustable('box-forced')
im = ax.imshow(img, aspect='equal')
plt.subplots_adjust(wspace=0, hspace=0)
return fig, axes
# FIXME : ...
def scale(x, feature_range=(-1, 1)):
# scale to (0, 1)
x = ((x - x.min())/(255 - x.min()))
# scale to feature_range
min, max = feature_range
x = x * (max - min) + min
return x
def batches(data, batch_size):
n_batches = data.shape[0]//batch_size
for ii in range(0, data.shape[0], batch_size):
x = data[ii:ii+batch_size]
y = [1] * batch_size
yield scale(x), y
def train(net, dataset, epochs, batch_size, print_every=10, show_every=1000, figsize=(5,5)):
saver = tf.train.Saver()
sample_z = np.random.uniform(-1, 1, size=(72, z_size))
samples, losses = [], []
steps = 0
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for x, y in batches(data, batch_size):
steps += 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
if steps % 3 == 1:
_ = sess.run(net.d_opt, feed_dict={net.input_real: x, net.input_z: batch_z})
_ = sess.run(net.g_opt, feed_dict={net.input_z: batch_z, net.input_real: x})
if steps % print_every == 0:
# At the end of each epoch, get the losses and print them out
train_loss_d = net.d_loss.eval({net.input_z: batch_z, net.input_real: x})
train_loss_g = net.g_loss.eval({net.input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
if steps % show_every == 0:
gen_samples = sess.run(
generator(net.input_z, 3, reuse=True, training=False),
feed_dict={net.input_z: sample_z})
samples.append(gen_samples)
_ = view_samples(-1, samples, 6, 12, figsize=figsize)
plt.show()
saver.save(sess, './checkpoints/generator.ckpt')
with open('samples.pkl', 'wb') as f:
pkl.dump(samples, f)
return losses, samples
real_size = (64,64,3)
z_size = 4*4*1024
learning_rate = 0.0002
batch_size = 128
epochs = 500
alpha = 0.2
beta1 = 0.5
# Create the network
net = GAN(real_size, z_size, learning_rate, alpha=alpha, beta1=beta1)
losses, samples = train(net, data, epochs, batch_size, figsize=(10,5))
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator', alpha=0.5)
plt.plot(losses.T[1], label='Generator', alpha=0.5)
plt.title("Training Losses")
plt.legend()
| StarcoderdataPython |
3597442 | <filename>tests/__init__.py
"""Unit test package for pagic."""
| StarcoderdataPython |
292444 | <reponame>HenrYxZ/2d-simple-shader<filename>script.py
import imageio
import numpy as np
import os
import os.path
from PIL import Image
# Local Modules
from constants import MAX_COLOR_VALUE, MAX_QUALITY
from normal_map import get_normals
from render import render_lambert, render_specular
import utils
AMBIENT_FILENAME = "dark.jpg"
DIFFUSE_FILENAME = "light.jpg"
FPS = 24
NORMAL_FILENAME = "normal_map_sm.png"
SPECULAR_FILENAME = "specular_sm.jpg"
Z_HEIGHT = 512
OUTPUT_DIR = "output"
NUM_FRAMES = 48
ambient_map = None
diffuse_map = None
normal_map = None
normals = None
light_pos = None
def open_image(img_filename):
img = Image.open(img_filename)
img_arr = np.array(img) / MAX_COLOR_VALUE
return img_arr
def init():
global ambient_map, diffuse_map, normal_map, normals, light_pos
ambient_map = open_image(AMBIENT_FILENAME)
diffuse_map = open_image(DIFFUSE_FILENAME)
normal_map = open_image(NORMAL_FILENAME)
normals = get_normals(normal_map)
h, w, _ = normal_map.shape
center = (w // 2, h // 2)
light_pos = np.array([center[0], center[1], Z_HEIGHT])
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
def main():
global light_pos
init()
while True:
opt = input(
"Enter an option:\n"
"[1] for 2D Lambert\n"
"[2] for 2D Specular\n"
"[3] for 2D Reflection\n"
"[4] for 2D Refraction\n"
"[5] for 2D Fresnel\n"
"[0] to quit\n"
)
if opt == '0':
quit()
timer = utils.Timer()
timer.start()
if opt == '1':
print("Using 2D Lambert...")
opt = input(
"Enter an option:\n"
"[1] for single image\n"
"[2] for video\n"
)
if opt == '1':
# Single image
output = render_lambert(
ambient_map, diffuse_map, normals, light_pos
)
output_img = Image.fromarray(output)
output_img.save("lambert.jpg", quality=MAX_QUALITY)
print("Image saved in lambert.jpg")
else:
# Video
writer = imageio.get_writer(
f"{OUTPUT_DIR}/lambert.mp4", format="mp4", mode='I', fps=FPS
)
for i in range(NUM_FRAMES):
angle = i * (2 * np.pi / (NUM_FRAMES - 1))
# Assume we work in a cube with size Z_HEIGHT
r = Z_HEIGHT // 2
center = (r, r)
x = r * np.cos(angle) + center[0]
y = r * np.sin(angle) + center[1]
light_pos = np.array([x, y, Z_HEIGHT])
output = render_lambert(
ambient_map, diffuse_map, normals, light_pos
)
writer.append_data(output)
print(f"Image n° {i + 1}/{NUM_FRAMES} done")
writer.close()
elif opt == '2':
specular_map = open_image(SPECULAR_FILENAME)
opt = input(
"Enter an option:\n"
"[1] for single image\n"
"[2] for video\n"
)
if opt == '1':
# Single image
output = render_specular(
ambient_map, diffuse_map, specular_map, normals, light_pos
)
output_img = Image.fromarray(output)
output_img.save("specular_out.jpg", quality=MAX_QUALITY)
print("Image saved in specular_out.jpg")
else:
# Video
writer = imageio.get_writer(
f"{OUTPUT_DIR}/specular.mp4", format="mp4", mode='I',
fps=FPS
)
for i in range(NUM_FRAMES):
angle = i * (2 * np.pi / (NUM_FRAMES - 1))
# Assume we work in a cube with size Z_HEIGHT
r = Z_HEIGHT // 2
center = (r, r)
x = r * np.cos(angle) + center[0]
y = r * np.sin(angle) + center[1]
light_pos = np.array([x, y, Z_HEIGHT])
output = render_specular(
ambient_map, diffuse_map, specular_map, normals,
light_pos
)
writer.append_data(output)
print(f"Image n° {i + 1}/{NUM_FRAMES} done")
writer.close()
timer.stop()
print(f"Total time spent: {timer}")
if __name__ == '__main__':
main()
| StarcoderdataPython |
5156766 | from django.urls import path
from .views import *
app_name = 'create_forms'
urlpatterns = [
path('', form_list, name='list'),
path('<slug:slug>/', form_detail, name='detail'),
]
| StarcoderdataPython |
11256724 | # Autumn Festival Full Moon Damage Skin
success = sm.addDamageSkin(2434374)
if success:
sm.chat("The Autumn Festival Full Moon Damage Skin has been added to your account's damage skin collection.")
| StarcoderdataPython |
8130708 | <reponame>under-chaos/ymir
import logging
import os
import shutil
import tarfile
import unittest
from unittest import mock
from google.protobuf.json_format import ParseDict
import yaml
from mir.commands.mining import CmdMining
import tests.utils as test_utils
import mir.protos.mir_command_pb2 as mirpb
class TestMiningCmd(unittest.TestCase):
_USER_NAME = "test_user"
_MIR_REPO_NAME = "ymir-dvc-test"
_STORAGE_NAME = "monitor_storage_root"
# lifecycle
def __init__(self, methodName: str) -> None:
# dir structure:
# test_invoker_mining_sandbox_root
# ├── monitor_storage_root
# └── test_user
# └── ymir-dvc-test
super().__init__(methodName=methodName)
self._sandbox_root = test_utils.dir_test_root(self.id().split(".")[-3:])
self._user_root = os.path.join(self._sandbox_root, self._USER_NAME)
self._mir_repo_root = os.path.join(self._user_root, self._MIR_REPO_NAME)
self._storage_root = os.path.join(self._sandbox_root, self._STORAGE_NAME)
self._config_file = os.path.join(self._sandbox_root, 'config.yaml')
def setUp(self) -> None:
test_utils.check_commands()
def tearDown(self) -> None:
if os.path.isdir(self._sandbox_root):
shutil.rmtree(self._sandbox_root)
return super().tearDown()
# protected: mock functions
def _mock_run_func(*args, **kwargs):
output_file = os.path.join(kwargs['work_dir'], 'out', 'result.tsv')
with open(output_file, 'w') as f:
f.writelines("d4e4a60147f1e35bc7f5bc89284aa16073b043c9\t0.1")
return 0
# protected: custom: env prepare
def _prepare_dirs(self):
if os.path.isdir(self._sandbox_root):
shutil.rmtree(self._sandbox_root)
os.makedirs(self._sandbox_root)
os.mkdir(self._user_root)
os.mkdir(self._mir_repo_root)
os.mkdir(self._storage_root)
def _prepare_config(self):
shutil.copyfile('tests/assets/mining-template.yaml', self._config_file)
def _prepare_mir_repo(self):
# init repo
logging.info(f"mir repo: {self._mir_repo_root}")
test_utils.mir_repo_init(self._mir_repo_root)
# prepare branch a
test_utils.mir_repo_create_branch(self._mir_repo_root, "a")
self._prepare_mir_repo_branch_mining()
def _prepare_mir_repo_branch_mining(self):
mir_annotations = mirpb.MirAnnotations()
mir_keywords = mirpb.MirKeywords()
mir_metadatas = mirpb.MirMetadatas()
mir_tasks = mirpb.MirTasks()
mock_image_file = os.path.join(self._storage_root, 'd4e4a60147f1e35bc7f5bc89284aa16073b043c9')
shutil.copyfile("tests/assets/2007_000032.jpg", mock_image_file)
mock_image_file = os.path.join(self._storage_root, 'a3008c032eb11c8d9ffcb58208a36682ee40900f')
shutil.copyfile("tests/assets/2007_000243.jpg", mock_image_file)
mock_training_config_file = os.path.join(self._storage_root, 'config.yaml')
shutil.copyfile('tests/assets/training-template.yaml', mock_training_config_file)
mock_model_json = os.path.join(self._storage_root, '1.json')
with open(mock_model_json, 'w') as f:
f.writelines("mock")
mock_model_params = os.path.join(self._storage_root, '1.params')
with open(mock_model_params, 'w') as f:
f.writelines("mock")
with open(mock_training_config_file, 'r') as f:
config = yaml.safe_load(f.read())
config['class_names'] = ['cat', 'person']
with open(mock_training_config_file, 'w') as f:
yaml.dump(config, f)
mock_model_file = os.path.join(self._storage_root, 'xyz')
with tarfile.open(mock_model_file, "w:gz") as dest_tar_gz:
dest_tar_gz.add(mock_model_json, os.path.basename(mock_model_json))
dest_tar_gz.add(mock_model_params, os.path.basename(mock_model_params))
dest_tar_gz.add(mock_training_config_file, os.path.basename(mock_training_config_file))
dict_metadatas = {
'attributes': {
'd4e4a60147f1e35bc7f5bc89284aa16073b043c9': {
'assetType': 'AssetTypeImageJpeg',
'width': 1080,
'height': 1620,
'imageChannels': 3
},
'a3008c032eb11c8d9ffcb58208a36682ee40900f': {
'assetType': 'AssetTypeImageJpeg',
'width': 1080,
'height': 1620,
'imageChannels': 3
},
}
}
ParseDict(dict_metadatas, mir_metadatas)
dict_tasks = {
'tasks': {
'5928508c-1bc0-43dc-a094-0352079e39b5': {
'type': 'TaskTypeMining',
'name': 'mining',
'task_id': 'mining-task-id',
'timestamp': '1624376173'
}
},
'head_task_id': '5928508c-1bc0-43dc-a094-0352079e39b5',
}
ParseDict(dict_tasks, mir_tasks)
print("self._mir_repo_root: ", self._mir_repo_root)
test_utils.mir_repo_commit_all(self._mir_repo_root, mir_metadatas, mir_annotations, mir_keywords, mir_tasks,
"prepare_branch_mining")
# public: test cases
@mock.patch("mir.commands.infer.CmdInfer.run_with_args", side_effect=_mock_run_func)
def test_mining_cmd_00(self, mock_run):
self._prepare_dirs()
self._prepare_config()
self._prepare_mir_repo()
args = type('', (), {})()
args.src_revs = 'a@5928508c-1bc0-43dc-a094-0352079e39b5'
args.dst_rev = 'a@mining-task-id'
args.model_hash = 'xyz'
args.work_dir = os.path.join(self._storage_root, "mining-task-id")
args.media_cache = ''
args.model_location = self._storage_root
args.media_location = self._storage_root
args.topk = 1
args.add_annotations = False
args.mir_root = self._mir_repo_root
args.config_file = self._config_file
args.executor = 'al:0.0.1'
args.executor_instance = 'executor-instance'
mining_instance = CmdMining(args)
mining_instance.run()
mock_run.assert_called_once_with(work_dir=args.work_dir,
media_path=os.path.join(args.work_dir, 'in', 'candidate'),
model_location=args.model_location,
model_hash=args.model_hash,
index_file=os.path.join(args.work_dir, 'in', 'candidate', 'src-index.tsv'),
config_file=args.config_file,
task_id='mining-task-id',
shm_size='16G',
executor=args.executor,
executor_instance=args.executor_instance,
run_infer=False,
run_mining=True)
if os.path.isdir(self._sandbox_root):
shutil.rmtree(self._sandbox_root)
| StarcoderdataPython |
9665674 | # -*- coding: utf-8 -*-
"""Command line interface for :mod:`cova`.
Why does this file exist, and why not put this in ``__main__``? You might be tempted to import things from ``__main__``
later, but that will cause problems--the code will get executed twice:
- When you run ``python3 -m cova`` python will execute``__main__.py`` as a script.
That means there won't be any ``cova.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``cova.__main__`` in ``sys.modules``.
.. seealso:: https://click.palletsprojects.com/en/7.x/setuptools/#setuptools-integration
"""
import argparse
import logging
from cova.cli_helper import _run
logger = logging.getLogger(__name__)
def get_args():
parser = argparse.ArgumentParser(
description="This program runs a COVA pipeline defined in a json-like config file."
)
parser.add_argument("config", type=str, help="Path to the configuration file.")
return parser
def main():
parser = get_args()
args = parser.parse_args()
_run(args.config)
| StarcoderdataPython |
374125 | <reponame>waikato-ufdl/ufdl-backend<filename>ufdl-core-app/src/ufdl/core_app/exceptions/_JobNotStarted.py
from rest_framework import status
from rest_framework.exceptions import APIException
class JobNotStarted(APIException):
"""
Exception for when a node tries to perform an action
on a job that it hasn't officially started yet.
"""
status_code = status.HTTP_417_EXPECTATION_FAILED
default_code = 'job_not_started'
def __init__(self, action: str):
super().__init__(f"Attempted to perform the following action on an un-started job: {action}")
| StarcoderdataPython |
1997572 | <reponame>andzaytsev/deepnav
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class Config:
#########################################################################
# Number of stacked LSTM layers
NUM_LSTMS = 2
#########################################################################
# Game configuration
#MAP = 'seekavoid_arena_01'
MAP = 'stairway_to_melon'
#MAP = 'nav_maze_static_01'
# Enable to see the trained agent in action
PLAY_MODE = False
# Enable to train
TRAIN_MODELS = True
# Load old models. Throws if the model doesn't exist
LOAD_CHECKPOINT = False
# If 0, the latest checkpoint is loaded
LOAD_EPISODE = 0
#########################################################################
# Number of agents, predictors, trainers and other system settings
# If the dynamic configuration is on, these are the initial values.
# Number of Agents
AGENTS = 8
# Number of Predictors
PREDICTORS = 2
# Number of Trainers
TRAINERS = 2
# Device
DEVICE = 'gpu:0'
# Enable the dynamic adjustment (+ waiting time to start it)
DYNAMIC_SETTINGS = False
DYNAMIC_SETTINGS_STEP_WAIT = 20
DYNAMIC_SETTINGS_INITIAL_WAIT = 10
#########################################################################
# Algorithm parameters
# Discount factor
DISCOUNT = 0.99
# Tmax (Interval over which gradients are computerd)
TIME_MAX = 40
# Maximum steps taken by agent in environment
MAX_STEPS = 10 * 10**7
# Reward Clipping
REWARD_MIN = -10
REWARD_MAX = 10
# Max size of the queue
MAX_QUEUE_SIZE = 100
PREDICTION_BATCH_SIZE = 128
# Input of the DNN
STACKED_FRAMES = 1
IMAGE_WIDTH = 84
IMAGE_HEIGHT = 84
IMAGE_DEPTH = 3 # 3 for RGB, 4 for RGBD
COMBINED_STATE_SIZE = 21240 # includes auxiliary inputs to NN
VEL_DIM = 6 # velocity dimension
# Lab setting (frames per second)
FPS = 60
# Rotation for look-left, look-right actions [-512, 512]
ROTATION = 20
# Total number of episodes and annealing frequency
EPISODES = 400000
ANNEALING_EPISODE_COUNT = 400000
# Entropy regualrization hyper-parameter
BETA_START = 0.001
BETA_END = 0.001
# Learning rate
LEARNING_RATE_START = 0.0005
LEARNING_RATE_END = 0.0005
# RMSProp parameters
RMSPROP_DECAY = 0.99
RMSPROP_MOMENTUM = 0.0
RMSPROP_EPSILON = 0.1
# Dual RMSProp - we found that using a single RMSProp for the two cost function works better and faster
DUAL_RMSPROP = False
# Gradient clipping
USE_GRAD_CLIP = False
GRAD_CLIP_NORM = 40.0
# Epsilon (regularize policy lag in GA3C)
LOG_EPSILON = 1e-6
# Training min batch size - increasing the batch size increases the stability of the algorithm, but make learning slower
TRAINING_MIN_BATCH_SIZE = 0
#########################################################################
# Log and save
# Enable TensorBoard
TENSORBOARD = False
# Update TensorBoard every X training steps
TENSORBOARD_UPDATE_FREQUENCY = 1000
# Enable to save models every SAVE_FREQUENCY episodes
SAVE_MODELS = False
# Save every SAVE_FREQUENCY episodes
SAVE_FREQUENCY = 1000
# Print stats every PRINT_STATS_FREQUENCY episodes
PRINT_STATS_FREQUENCY = 1
# The window to average stats
STAT_ROLLING_MEAN_WINDOW = 1000
# Results filename
RESULTS_FILENAME = 'results.txt'
# Network checkpoint name
NETWORK_NAME = 'network'
#########################################################################
# More experimental parameters here
# Minimum policy
MIN_POLICY = 0.0
# Use log_softmax() instead of log(softmax())
USE_LOG_SOFTMAX = False
| StarcoderdataPython |
284215 | <reponame>gauravvazirani/ooad<gh_stars>0
import math
class IntegerStatistics(list):
"""
An extension of the built in list data structure that stores
at hand mehtods to calculate the mean and standard deviation of the elements
"""
def mean(self):
"""
:return: (decimal) mean of the list elements
"""
return sum(self)/len(self)
def stdev(self):
"""
:return: (decimal) standard deviation of the list elements
"""
m = self.mean()
return math.sqrt(sum((x-m)**2 for x in self)/(len(self)-1))
| StarcoderdataPython |
8046652 | <gh_stars>1-10
import click
import scipy.signal
import scipy.fft
import numpy as np
import pandas as pd
import os
import csv
import gzip
def chunk_generator(data_file):
with gzip.open(data_file, "rt") as f:
csvr = csv.DictReader(f)
chunk = []
chunk_key = None
is_wrist = False
for r in csvr:
is_wrist = "s2" in r
if is_wrist:
row_key = r["key"]
else:
row_key = (r["key"], r["phase"])
if chunk_key is None:
chunk_key = row_key
chunk = [r]
elif chunk_key == row_key:
chunk.append(r)
else:
# Chunk completed
df = pd.DataFrame().from_records(chunk)
df = df[~df["s1"].isna()]
if is_wrist:
df = df[~df["s2"].isna()]
yield df
# Start new chunk
chunk_key = row_key
chunk = [r]
df = pd.DataFrame().from_records(chunk)
df = df[~df["s1"].isna()]
if is_wrist:
df = df[~df["s2"].isna()]
yield df
def fft(values):
data = values - np.mean(values)
yf = scipy.fft.rfft(data).real
# xf = scipy.fft.rfftfreq(data.size, 1 / len(chunk)).round().astype(int)
return np.abs(yf).round(5)
@click.command()
@click.argument("input_files", nargs=-1)
@click.argument("output_directory", nargs=1)
@click.option(
"-s", "--samples", default=6144, help="Samples per second; affects output shape"
)
@click.option(
"-f",
"--format",
default="raw",
type=click.Choice(["raw", "fft"], case_sensitive=False),
)
def convert(input_files, output_directory, samples, format):
print(input_files, output_directory, samples, format)
for data_file in input_files:
convert_single_file(data_file, output_directory, samples, format)
def convert_single_file(input_file, output_directory, samples, format):
filename = os.path.basename(input_file).replace(".csv", "").replace(".gz", "")
print(filename)
out_file = os.path.join(output_directory, f"{filename}_{format}.csv")
with open(out_file, "w") as outf:
csvout = None
for chunk in chunk_generator(input_file):
if len(chunk) == 0:
continue
is_wrist = "s2" in chunk.columns
sensor_values = {}
for sensor in ("s1", "s2"):
if sensor == "s2" and not is_wrist:
continue
if len(chunk) != samples:
sensor_values[sensor] = scipy.signal.resample(
chunk[sensor].values, samples
).reshape((samples,))
else:
sensor_values[sensor] = chunk[sensor].values
if format == "fft":
sensor_values[sensor] = fft(sensor_values[sensor].astype('float'))
if is_wrist:
new_row = {
"index": chunk["key"][0],
"scenario": chunk["scenario"][0],
"movement": chunk["movement"][0],
"iteration": chunk["iteration"][0],
}
else:
chunk_key = chunk["key"][0]
chunk_phase = chunk["phase"][0]
new_row = {
"index": f"{chunk_key}_{chunk_phase}",
"phase": chunk_phase,
"pattern": chunk["pattern"][0],
"iteration": chunk["iteration"][0],
}
for k, v in sensor_values.items():
new_row.update(**{f"{k}_{i}": v for i, v in enumerate(v)})
if csvout is None:
csvout = csv.DictWriter(outf, fieldnames=new_row.keys())
csvout.writeheader()
csvout.writerow(new_row)
if __name__ == "__main__":
convert()
| StarcoderdataPython |
3296318 | """
Test that breakpoints in an IT instruction don't fire if their condition is
false.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestBreakpointIt(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@skipIf(archs=no_match(["arm"]))
@skipIf(archs=["arm64", "arm64e", "arm64_32"])
def test_false(self):
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("target create %s" % exe)
lldbutil.run_break_set_by_symbol(self, "bkpt_false",
extra_options="--skip-prologue 0")
self.runCmd("run")
self.assertEqual(self.process().GetState(), lldb.eStateExited,
"Breakpoint does not get hit")
@skipIf(archs=no_match(["arm"]))
@skipIf(archs=["arm64", "arm64e", "arm64_32"])
def test_true(self):
self.build()
exe = self.getBuildArtifact("a.out")
self.runCmd("target create %s" % exe)
bpid = lldbutil.run_break_set_by_symbol(self, "bkpt_true",
extra_options="--skip-prologue 0")
self.runCmd("run")
self.assertIsNotNone(lldbutil.get_one_thread_stopped_at_breakpoint_id(
self.process(), bpid))
| StarcoderdataPython |
188357 | <filename>pmDisk.py
import ebf
import healpix_util as hu
import numpy as np
import matplotlib as mpl
mpl.use('pdf')
import matplotlib.pyplot as plt
import astropy.coordinates as coord
import astropy.units as u
import healpy as hp
import astropy.stats as st
import astropy.visualization as av
import xdpm
import itertools
from scipy import linalg
from sklearn import mixture
from sklearn.externals import joblib
def plotPixels(x, y, pixel_array, mapSky, pixelImages, plotEach=False,
xlim=(-2, 2.5), ylim=(2, -1), vmin=0.01, vmax=10.,
xlabel='log distance', ylabel='log proper motion',
fignameAll='pofd_allPixels.pdf', fignameEachpre='pofd',
bins=100, normed=False, norm=None, cmap='Greys', dynRange=100., grid=False, npixels=None):
if npixels is None: npixels = np.max(pixel_array)
fig, axes = plt.subplots(4*nside, 3*nside, figsize=(7*nside, 7*nside))
axes = axes.flatten()
if plotEach: fignow, axesnow = plt.subplots(1, 2, figsize=(10, 5))
#loop over pixel on sky
for ax, pixel_index in zip(axes, range(np.max(pixel_array)+1)):
axesnow[0].cla()
axesnow[1].cla()
index = pixel_array == pixel_index
if np.sum(index):
for axis in [ax, axesnow[0]]:
counts, xedges, yedges = np.histogram2d(x[index], y[index], bins=bins, normed=normed)
if norm == 'lognorm':
minhist = np.min(counts[counts > 0])
norm=mpl.colors.LogNorm(vmin=minhist, vmax=minhist*dynRange)
axis.imshow(counts, origin='lower', extent=[np.min(xedges), np.max(xedges), np.min(yedges), np.max(yedges)], cmap=cmap, norm=norm)
axis.set_xlim(xlim)
axis.set_ylim(ylim)
axis.set_xlabel(xlabel)
axis.set_ylabel(ylabel)
axis.grid()
axesnow[1].imshow(mapSky, origin='lower', extent=[-180, 180, -90, 90])
axesnow[1].imshow(pixelImages[pixel_index], origin='lower', cmap='Greys', alpha=0.3, extent=[-180, 180, -90, 90])
axesnow[1].set_xlabel('l')
axesnow[1].set_ylabel('b')
fignow.savefig('{0}_{1:03d}.pdf'.format(fignameEachpre, pixel_index), rasterized=True)
fig.savefig(fignameAll)
plt.close(fig)
plt.close(fignow)
datafile = 'sdssgalaxy_1percent.ebf'
#datafile = 'sdssHalo.ebf'
#datafile = '../GalaxiaData/sdssgalaxy_10percent.ebf'
#datafile = '../GalaxiaData/sdssgalaxy_1percent.ebf'
#datafile = '../GalaxiaData/sdssHalo.ebf'
data = ebf.read(datafile, '/')
c = coord.Galactic(u=data['px']*u.kpc, v=data['py']*u.kpc, w=data['pz']*u.kpc,
U=data['vx']*u.km/u.s, V=data['vy']*u.km/u.s, W=data['vz']*u.km/u.s,
representation=coord.CartesianRepresentation, differential_cls=coord.CartesianDifferential)
c.set_representation_cls(coord.SphericalRepresentation, s=coord.SphericalCosLatDifferential)
#for visualizing all data on the sky
nside = 128
hpixMap = hu.HealPix("ring", nside)
pixnums = hpixMap.eq2pix(data['glon'], data['glat'])
omap = np.bincount(pixnums, minlength=hpixMap.npix)
mapSky = hp.mollview(np.log10(omap), return_projected_map=True)
plt.savefig('pofd_allsky.pdf')
#pmb = np.random.normal(loc=c.pm_b, scale=2)
#pml = np.random.normal(loc=c.pm_l_cosb, scale=2)
def matrixize(data, err):
"""
vectorize the 2 pieces of data into a 2D mean and 2D covariance matrix
"""
X = np.vstack(data).T
Xerr = np.zeros(X.shape + X.shape[-1:])
diag = np.arange(X.shape[-1])
Xerr[:, diag, diag] = np.vstack([e**2. for e in err]).T
return X, Xerr
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-9., 5.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
X, Xerr = matrixize([data['glon'], data['glat'], c.pm_l_cosb, c.pm_b], [0, 0, 0, 0])
gmmfile = 'gmm_n5_galactic.pkl'
try:
with open(gmmfile) as f:
gmm = joblib.load(f)
except IOError:
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
joblib.dump(gmm, gmmfile)
pmlim = 2
nbins = 500
bins = [np.linspace(-pmlim, pmlim, nbins), np.linspace(-pmlim, pmlim, nbins)]
x1 = data['glon']
y1 = data['glat']
x2 = c.pm_l_cosb
y2 = c.pm_b
fig, axes = plt.subplots(1, 2, figsize = (10, 5))
axes[0].hist2d(x1, y1, bins=100, norm=mpl.colors.LogNorm(), rasterized=True)
axes[1].hist2d(xdpm.logNegative(x2.value), xdpm.logNegative(y2.value), bins=bins, norm=mpl.colors.LogNorm(), rasterized=True)
xdpm.plotGMM(gmm, ax = axes, indices=[[0,1],[2,3]], labels=['pos', 'pm'])
axes[0].set_xlabel('l')
axes[0].set_ylabel('b')
axes[1].set_xlabel('log |pml| + 1')
axes[1].set_ylabel('log |pmb| + 1')
axes[1].set_xlim(-2, 2)
axes[1].set_ylim(-2, 2)
fig.savefig('.'.join(gmmfile.split('.')[:-1])+'.pdf')
#plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,#
# 'Gaussian Mixture')
#plt.savefig('gmm_n5_galactic.png')
#plt.clf()
"""
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full').fit(X)
joblib.dump(gmm, 'dpgmm_n5_galactic.pkl')
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.savefig('dpgmm_n5_galactic.png')
"""
| StarcoderdataPython |
166953 | from django.contrib import admin
from .models import Biodata, UserProfile
# Register your models here.
@admin.register(Biodata)
@admin.register(UserProfile)
# admin.site.register(UserProfile)
class BiodataAdmin(admin.ModelAdmin):
pass | StarcoderdataPython |
115194 | import os
import subprocess
import itertools
import pytest
from click.testing import CliRunner
from hobbit import main as hobbit
from hobbit.bootstrap import templates
from . import BaseTest, rmdir, chdir
class TestHobbit(BaseTest):
wkdir = os.path.abspath('hobbit-tox-test')
def setup_method(self, method):
rmdir(self.wkdir)
def teardown_method(self, method):
os.chdir(self.root_path)
rmdir(self.wkdir)
@pytest.fixture
def runner(self):
yield CliRunner()
def test_not_exist_cmd(self, runner):
result = runner.invoke(hobbit)
assert result.exit_code == 0
result = runner.invoke(hobbit, ['doesnotexistcmd'], obj={})
assert 'Error: cmd not exist: doesnotexistcmd' in result.output
@pytest.mark.parametrize(
'name,template,celery_,dist',
itertools.product(
['haha'], templates, ['--celery', '--no-celery'],
[None, '.', wkdir]))
@chdir(wkdir)
def test_new_cmd(self, runner, name, template, celery_, dist):
options = [
'--echo', 'new', '-p 1024', '-n', name, '-t', template, celery_]
if dist:
assert os.getcwd() == os.path.abspath(dist)
options.extend(['-d', dist])
result = runner.invoke(hobbit, options, obj={})
assert result.exit_code == 0, result.output
assert 'mkdir\t{}'.format(self.wkdir) in result.output
assert 'render\t{}'.format(self.wkdir) in result.output
file_nums = {
# tart + 29 files + 11 dir + 1 end + empty
'shire | --no-celery': 1 + 29 + 11 + 1 + 1 - 1,
# start + files + mkdir + tail
'shire | --celery': 1 + 30 + 12 + 1,
'rivendell | --no-celery': 1 + 31 + 11 + 1,
'rivendell | --celery': 1 + 32 + 12 + 1,
}
assert len(result.output.split('\n')) == file_nums[
f'{template} | {celery_}']
assert subprocess.call(['flake8', '.']) == 0
assert subprocess.call(
'pip install -r requirements.txt '
'--upgrade-strategy=only-if-needed',
shell=True, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) == 0
assert subprocess.call(['pytest'], stdout=subprocess.DEVNULL) == 0
# test --force option
result = runner.invoke(hobbit, options, obj={})
assert all([i in result.output for i in ['exists ', 'ignore ...']])
options.extend(['-f'])
result = runner.invoke(hobbit, options, obj={})
assert any([i in result.output for i in ['exists ', 'ignore ...']])
@chdir(wkdir)
def test_dev_init_cmd(self, runner):
# new project use rivendell template
cmd = ['--echo', 'new', '-n', 'haha', '-p', '1024', '-t', 'rivendell']
result = runner.invoke(hobbit, cmd, obj={})
assert result.exit_code == 0
result = runner.invoke(hobbit, ['dev', 'init', '--all'], obj={})
assert result.exit_code == 0, result.output
| StarcoderdataPython |
8172320 | <gh_stars>0
from framework.core.util import is_uri, get_feature_from_uri
class _PluginRegistry(object):
def __init__(self):
self._plugins = {}
def add(self, plugin):
self._plugins[plugin.name] = plugin
def get_plugin_uris(self):
for plugin in self._plugins.values():
yield plugin.uri
def get_plugin(self, name):
if is_uri(name, 'plugins'):
name = get_feature_from_uri(name, 'plugins')
return self._plugins[name]
def get_methods(self, plugin_name=None):
if plugin_name is None:
plugin_names = self._plugins.keys()
else:
plugin_names = [plugin_name]
for name in plugin_names:
for method in self.get_plugin(name).get_methods().values():
yield method
def get_types(self, plugin_name=None):
if plugin_name is None:
plugin_names = self._plugins.keys()
else:
plugin_names = [plugin_name]
for name in plugin_names:
for type_ in self.get_plugin(name).get_types():
yield type_
plugin_registry = _PluginRegistry()
| StarcoderdataPython |
11295955 | import pytest
from tests.conftest import create_mock_error
from py42.exceptions import Py42BadRequestError
from py42.exceptions import Py42CaseAlreadyHasEventError
from py42.exceptions import Py42UpdateClosedCaseError
from py42.services.casesfileevents import CasesFileEventsService
_TEST_CASE_NUMBER = 123456
UPDATE_ERROR_RESPONSE = '{"problem":"CASE_IS_CLOSED"}'
ADDED_SAME_EVENT_AGAIN_ERROR = '{"problem":"CASE_ALREADY_HAS_EVENT"}'
UNKNOWN_ERROR = '{"problem":"SURPRISE!"}'
class TestCasesFileEventService:
def test_add_called_with_expected_url_and_params(self, mock_connection):
case_file_event_service = CasesFileEventsService(mock_connection)
case_file_event_service.add(_TEST_CASE_NUMBER, "event-id")
assert (
mock_connection.post.call_args[0][0]
== f"/api/v1/case/{_TEST_CASE_NUMBER}/fileevent/event-id"
)
def test_delete_called_with_expected_url_and_params(self, mock_connection):
case_file_event_service = CasesFileEventsService(mock_connection)
case_file_event_service.delete(_TEST_CASE_NUMBER, "event-id")
assert (
mock_connection.delete.call_args[0][0]
== f"/api/v1/case/{_TEST_CASE_NUMBER}/fileevent/event-id"
)
def test_get_called_with_expected_url_and_params(self, mock_connection):
case_file_event_service = CasesFileEventsService(mock_connection)
case_file_event_service.get(_TEST_CASE_NUMBER, "event-id")
assert (
mock_connection.get.call_args[0][0]
== f"/api/v1/case/{_TEST_CASE_NUMBER}/fileevent/event-id"
)
def test_get_all_called_with_expected_url_and_params(self, mock_connection):
case_file_event_service = CasesFileEventsService(mock_connection)
case_file_event_service.get_all(_TEST_CASE_NUMBER)
assert (
mock_connection.get.call_args[0][0]
== f"/api/v1/case/{_TEST_CASE_NUMBER}/fileevent"
)
def test_add_on_a_closed_case_raises_error(self, mocker, mock_connection):
mock_connection.post.side_effect = create_mock_error(
Py42BadRequestError, mocker, UPDATE_ERROR_RESPONSE
)
case_file_event_service = CasesFileEventsService(mock_connection)
with pytest.raises(Py42UpdateClosedCaseError) as err:
case_file_event_service.add(_TEST_CASE_NUMBER, event_id="x")
assert err.value.args[0] == "Cannot update a closed case."
def test_delete_on_a_closed_case_raises_error(self, mocker, mock_connection):
case_file_event_service = CasesFileEventsService(mock_connection)
mock_connection.delete.side_effect = create_mock_error(
Py42BadRequestError, mocker, UPDATE_ERROR_RESPONSE
)
with pytest.raises(Py42UpdateClosedCaseError) as err:
case_file_event_service.delete(_TEST_CASE_NUMBER, event_id="x")
assert err.value.args[0] == "Cannot update a closed case."
def test_add_when_same_event_is_added_multiple_times_raises_error(
self, mocker, mock_connection
):
case_file_event_service = CasesFileEventsService(mock_connection)
mock_connection.post.side_effect = create_mock_error(
Py42BadRequestError, mocker, ADDED_SAME_EVENT_AGAIN_ERROR
)
with pytest.raises(Py42CaseAlreadyHasEventError) as err:
case_file_event_service.add(_TEST_CASE_NUMBER, event_id="x")
assert err.value.args[0] == "Event is already associated to the case."
def test_add_when_unknown_error_raises_error(self, mocker, mock_connection):
case_file_event_service = CasesFileEventsService(mock_connection)
mock_connection.post.side_effect = create_mock_error(
Py42BadRequestError, mocker, UNKNOWN_ERROR
)
with pytest.raises(Py42BadRequestError):
case_file_event_service.add(_TEST_CASE_NUMBER, event_id="x")
def test_delete_when_unknown_error_raises_error(self, mocker, mock_connection):
case_file_event_service = CasesFileEventsService(mock_connection)
mock_connection.post.side_effect = create_mock_error(
Py42BadRequestError, mocker, UNKNOWN_ERROR
)
with pytest.raises(Py42BadRequestError):
case_file_event_service.add(_TEST_CASE_NUMBER, event_id="x")
| StarcoderdataPython |
5134169 | #!/usr/bin/env python
#coding=utf-8
from pyecharts.base import Base
from pyecharts.option import get_all_options
class Radar(Base):
"""
<<< 雷达图 >>>
雷达图主要用于表现多变量的数据。
"""
def __init__(self, title="", subtitle="", **kwargs):
super(Radar, self).__init__(title, subtitle, **kwargs)
def config(self, schema=None,
c_schema=None,
shape="",
rader_text_color="#000",
**kwargs):
""" 配置 rader 组件选项
:param schema:
默认雷达图的指示器,用来指定雷达图中的多个维度,会对数据处理成 {name:xx, value:xx} 的字典
:param c_schema:
用户自定义雷达图的指示器,用来指定雷达图中的多个维度
name: 指示器名称
min: 指示器最小值
max: 指示器最大值
:param shape:
雷达图绘制类型,支持 polygon(多边形) 和 circle
:param rader_text_color:
雷达图数据项字体颜色
:param kwargs:
"""
chart = get_all_options(**kwargs)
indicator = []
if schema:
for s in schema:
_name, _max = s
indicator.append({"name": _name, "max": _max})
if c_schema:
indicator = c_schema
self._option.update(
radar={
"indicator": indicator,
"shape": shape,
"name": {"textStyle": {"color": rader_text_color}},
"splitLine": chart['split_line'],
"splitArea": chart['split_area'],
"axisLine": chart['axis_line']}
)
def add(self, *args, **kwargs):
self.__add(*args, **kwargs)
def __add(self, name, value, item_color=None, **kwargs):
"""
:param name:
图例名称
:param value:
数据项,类型为包含列表的列表 [[]]。数据中,每一行是一个『数据项』,每一列属于一个『维度』
:param item_color:
指定单图例颜色
:param kwargs:
"""
kwargs.update(flag=True, type='radar')
chart = get_all_options(**kwargs)
self._option.get('legend')[0].get('data').append(name)
self._option.get('series').append({
"type": "radar",
"name": name,
"data": value,
"symbol": chart['symbol'],
"itemStyle": {"normal": {"color": item_color}},
"lineStyle": chart['line_style'],
"areaStyle": {"normal": chart['area_style']},
})
self._legend_visualmap_colorlst(**kwargs)
| StarcoderdataPython |
359174 | #!/usr/bin/python
#FIXME: -Error handling
# -Bind to request from all addresses
"""
This is the holiday server. It provides a generic interface to a holiday calender for
multiple users. Communication is done via an abstract interface, currently using NC::SocketServer.
See the man page holiday for further details. Software is distributed under the MIT license.
Copyright (c) 2006-2015: <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys, os, logging, ConfigParser, logging, SocketServer, socket
from glob import glob
from os import F_OK, R_OK, W_OK
#FIXME: Replace "chomps" by proper chomps that can handle Windows line breaks as well
#FIXME: Keep this and write Python 3.x version
#########################################################
# Helper classes ########################################
#########################################################
class hContainer:
"Container class for global context"
htable = {};
port = 1970
privileged = None
logfile = 'logfile.log'
logger = None
__init_done__ = False
def __init__(self):
#FIXME: Log file name is fix, logging is done to root logger
# If this shall be given in cfg file name, we must
# remove load_usr here and call it externally.
logger = logging.getLogger('')
self.load_cfg()
self.load_usr()
def load_cfg(self):
"load global configuration file"
if self.__init_done__: return None
else: self.__init_done__ = True
if os.access('holiday.conf', F_OK): #no -r function using euid as in perl???
try:
fd = open('holiday.conf')
cfg = ConfigParser.ConfigParser()
cfg.readfp(fd)
if cfg.has_section('global'):
if cfg.has_option('global', 'port'):
self.port = cfg.getint('global', 'port')
if cfg.has_option('global', 'privileged'):
self.privileged = cfg.get('global', 'privileged').split()
fd.close()
fd = None
except IOError:
pass #defaults set above
def load_usr(self):
"Loads currently saved holiday calenders for all users."
for uf in glob('*.usr'):
fd = file(uf, 'r')
u = uf[0 : -4]
line = fd.readline()
self.htable[u] = {}
self.htable[u]['group'] = line[0:-1] #chomp
self.htable[u]['times'] = {}
while True:
line = fd.readline()
if len(line) == 0:
break
line = line[0:-1] #chomp
d = line.split('|')
self.htable[u]['times'][int(d[0])] = int(d[1])
fd.close()
if not self.htable.has_key('gast'): #default entry
self.htable['gast'] = { 'group' : 'N', 'times' : { 2005001 : 6, 2005320 : 2 } };
def save_usr(self):
"Save user timetables to config files."
for k, v in self.htable.iteritems():
fd = open(k + '.usr', 'w')
fd.write(v['group'] + '\n')
for d, l in v['times'].iteritems():
fd.write(str(d) + '|' + str(l) + '\n')
fd.close()
fd = None
#########################################################
class hSocketServer(SocketServer.TCPServer):
"Minimal extension of TCPServer to provide context for application"
shutdown = False
ctx = None
empty_char = "\0"
def __init__(self, rqh_class, ct):
# SocketServer.TCPServer.__init__(self, (socket.INADDR_ANY, ct.port), rqh_class)
SocketServer.TCPServer.__init__(self, ('0.0.0.0', ct.port), rqh_class)
self.ctx = ct
def format_response(self, s):
ret = None
if s == None or len(s) == 0: ret = self.empty_char
else: ret = s
return ret + "\r\n"
#########################################################
class hRequestHandler(SocketServer.StreamRequestHandler):
"""
(Necessary) custom request handler class for handling
server requests. We do not separate communication
from contents because communication is nearly nothing
with Python's StreamRequestHandler
"""
def handle(self):
"""
Unfortunately everything needs to be done here. Get
request from client (one line), handle request and
send response.
"""
htable = self.server.ctx.htable
logger = logging.getLogger('')
req = self.rfile.readline()
req = req[0:-2] #truncate \r\n
data = req.split('|')
try:
if data[0] == 'test':
self.wfile.write('response\n')
self.wfile.write(req)
elif data[0] == 'shutdown':
self.server.shutdown = True
elif data[0] == 'addh':
if htable.has_key(data[1]):
htable[data[1]]['times'][int(data[2])] = int(data[3])
logger.info("Created/changed holiday: U:" + data[1] + ",S:" + data[2] + ",C:" + data[3] + ".")
self.wfile.write(self.server.format_response(""))
else:
self.wfile.write(self.server.format_response('User ' + data[1] + ' does not exist, you are not allowed to do this'))
elif data[0] == 'delh':
logger.info('Deleted holiday: U:' + data[1] + ',S:' + data[2])
del htable[data[1]]['times'][int(data[2])]
self.wfile.write(self.server.format_response(""))
elif data[0] == 'getu':
if htable.has_key(data[1]):
for k, v in htable[data[1]]['times'].iteritems():
self.wfile.write(self.server.format_response(str(k) + '|' + str(v)))
elif data[0] == 'geta':
for u, d in htable.iteritems():
for k, v in d['times'].iteritems():
self.wfile.write(self.server.format_response(u + '|' + str(k) + '|' + str(v)))
elif data[0] == 'addu':
if self.server.ctx.privileged is None or self.client_address[0] in self.server.ctx.privileged:
htable[data[1]] = {}
htable[data[1]]['group'] = data[2]
htable[data[1]]['times'] = {}
self.wfile.write(self.server.format_response(""))
else:
logger.warn("addu not allowed from non privileged connection...")
self.wfile.write(self.server.format_response("Non privileged connection, you are not allowed to do this"))
elif data[0] == 'getl':
for u in htable.iterkeys():
self.wfile.write(self.server.format_response(u))
elif data[0] == 'delu':
if self.server.ctx.privileged is None or self.client_address[0] in self.server.ctx.privileged:
u = data[1]
if htable.has_key(u):
if os.path.isfile(u + '.usr'):
try:
os.remove(u + '.usr')
del htable[u]
self.wfile.write(self.server.format_response(""))
except:
logger.warn('Cannot remove user file:'+u)
self.wfile.write(self.server.format_response("Server error: Cannot delete"))
else:
del htable[u]
self.wfile.write(self.server.format_response(""))
else:
self.wfile.write(self.server.format_response("User does not exist:"+u))
else:
logger.warn("delu not allowed from non privileged connection...")
self.wfile.write(self.server.format_response("Non privileged connection, you are not allowed to do this"))
else:
self.wfile.write(self.server.format_response("Unknown command:" + data[0] + "."))
except IndexError:
self.wfile.write(self.server.format_response('Incomplete command:' + req))
self.wfile.write("\r\n") #terminate output
###################################
# MAIN STARTS
###################################
#Create root logger
logger = logging.getLogger('') #get root logger
logger.setLevel(logging.INFO) #override default 'WARNING'
t = logging.FileHandler('logfile.log')
t.setFormatter(logging.Formatter('%(name)s %(levelname)s %(module)s %(asctime)s %(message)s'))
logger.addHandler(t) #add one handler, otherwise it would be created implicitly in 1st msg
t = None
del t
logger.info('Startup')
server_ctx = hContainer()
if (server_ctx.privileged is None):
logger.info('All access is privileged')
else:
logger.info('Privileged access is restricted')
sock = hSocketServer(hRequestHandler, server_ctx)
while not sock.shutdown:
sock.handle_request()
sock.server_close()
server_ctx.save_usr() #implies we wish to save after every quit of main loop
| StarcoderdataPython |
8012110 | import json
import discord
import requests
import tweepy
from discord.ext import commands, tasks
class StreamListener(tweepy.StreamListener):
def __init__(self):
with open("./config.json") as f:
self.config = json.load(f)
def on_error(self, status_code: int) -> bool:
if status_code == 420:
print("Rate limit reached. ")
# returning False in on_error disconnects the stream
return False
def on_data(self, data):
data = json.loads(data)
try:
tweetUser = data["tweet"]["user"]["screen_name"]
tweetID = data["tweet"]["id_str"]
except:
tweetUser = data["user"]["screen_name"]
tweetID = data["id_str"]
tweetLink = f"https://twitter.com/{tweetUser}/status/{tweetID}"
body = {"content": tweetLink}
global config
r = requests.post(
self.config["574267523869179904"]["tweetWebhook"],
headers={"Content-Type": "application/json"},
data=json.dumps(body),
) # config['574267523869179904']['tweetWebhook'], data=json.dumps(body))
print(r.status_code)
print(r.text)
# print(json.dumps(data, indent='\t'))
class Twitter(commands.Cog):
def __init__(self, bot):
self.bot = bot
auth = tweepy.OAuthHandler(
self.bot.config["twitter"]["consumer_key"],
self.bot.config["twitter"]["consumer_secret"],
)
auth.set_access_token(
self.bot.config["twitter"]["access_token"],
self.bot.config["twitter"]["access_token_secret"],
)
api = tweepy.API(auth)
myStreamListener = StreamListener()
stream = tweepy.Stream(auth=api.auth, listener=myStreamListener)
stream.filter(follow=["1287799985040437254"], is_async=True)
def setup(bot):
bot.add_cog(Twitter(bot))
| StarcoderdataPython |
3578558 | <gh_stars>1-10
import math
import torch
import random
import numpy as np
import matplotlib.pyplot as plt
# import modules
from training.chessEnv import ChessEnv
from models.models import ChessNN
# Use GPU, if available
USE_CUDA = torch.cuda.is_available()
class Trainer():
def __init__(self, parameters, simple=True):
# Opponent does not move
self.env = ChessEnv(parameters["rewards"], simple=simple)
self.load_model()
self.optimizer = torch.optim.Adam(self.current_model.parameters())
self.num_frames = parameters["num_frames"]
self.buffersize = parameters["buffersize"]
self.batch_size = parameters["batch_size"]
self.gamma = parameters["gamma"]
self.epsilon_start = parameters["epsilon_start"]
self.epsilon_final = parameters["epsilon_final"]
self.epsilon_decay = parameters["epsilon_decay"]
self.max_illegal = parameters["max_illegal"]
def load_model(self):
# try:
# if USE_CUDA:
# self.current_model = torch.load("models/model.pkl")
# self.target_model = torch.load("models/model.pkl")
# else:
# self.current_model = torch.load(
# "models/model.pkl", map_location={'cuda:0': 'cpu'})
# self.target_model = torch.load(
# "models/model.pkl", map_location={'cuda:0': 'cpu'})
# except:
self.current_model = ChessNN(
self.env.observation_space.shape, self.env.action_space.n)
self.target_model = ChessNN(
self.env.observation_space.shape, self.env.action_space.n)
if USE_CUDA:
self.current_model = self.current_model.cuda()
self.target_model = self.target_model.cuda()
self.update_target(self.current_model, self.target_model) # sync nets
def update_target(self, current_model, target_model):
self.target_model.load_state_dict(self.current_model.state_dict())
def plot(self, frame_idx, rewards, losses):
plt.figure(figsize=(20, 5))
plt.subplot(121)
plt.title('frame %s. reward: %s' % (frame_idx, np.mean(rewards[-10:])))
plt.plot(rewards)
plt.subplot(122)
plt.title('loss')
plt.plot(losses)
plt.show()
# def epsilon_by_frame(self, frame_idx):
# decay = math.exp(-1. * frame_idx / self.epsilon_decay)
# return self.epsilon_final + (self.epsilon_start - self.epsilon_final) * decay
def epsilon_by_frame(self, frame_idx):
m = (self.epsilon_final - self.epsilon_start) / self.epsilon_decay
epsilon_linear = self.epsilon_start + m * frame_idx
return max(epsilon_linear, self.epsilon_final)
def push_to_buffer(self, *args):
pass
def compute_td_loss(self, *args):
return None
def train(self):
# Variables
color = True # random.random() > 0.5 # white if True
state = self.env.reset(color)
losses = []
all_rewards = []
episode_reward = 0
# Log
wins = 0
defeats = 0
draws = 0
legalNN = 0
illegal = 0
illegalNN = 0
explore = 0
# Training
for frame_idx in range(1, self.num_frames + 1):
epsilon = self.epsilon_by_frame(frame_idx)
# Select action
action = self.current_model.act(state, epsilon)
# if exploring allow only some illegal moves
if action < 0:
action = self.env.getLegalAction()
explore += 1
# action = - action if random.random() > 0.5 else self.env.getLegalAction()
elif self.env.is_legal_action(action):
legalNN += 1
elif illegal >= 1000 * self.max_illegal:
action = self.env.getLegalAction()
explore += 1
else:
illegalNN += 1
# Move action
next_state, reward, done, info = self.env.step(action)
self.push_to_buffer(state, action, reward, next_state, done)
# Accumulate rewards
episode_reward += reward
# Count illegal moves
if info.startswith('illegal'):
illegal += 1
# Move when illegal to get more states
action = self.env.getLegalAction()
next_state, _, done, _ = self.env.step(action)
# Check if game has been terminated
if done:
color = True # random.random() > 0.5 # randomly choose player color
state = self.env.reset(color)
all_rewards.append(episode_reward)
episode_reward = 0
if info == 'win':
wins += 1
elif info == 'loss':
defeats += 1
elif info == 'draw':
draws += 1
else:
state = next_state
# Train
if frame_idx > self.buffersize:
loss = self.compute_td_loss(self.batch_size, frame_idx)
losses.append(loss.data.item())
# Update Target
if frame_idx % 1000 == 0:
self.update_target(self.current_model, self.target_model)
percentNN = round(100 * legalNN / (legalNN + illegalNN), 2) if legalNN + illegalNN != 0 else 0
loss = round(np.mean(losses[-1000:]), 5) if losses else np.nan
print("{} frames: {}-{}-{}, {}% illegal, {}% legal NN moves, {}% explored, {} loss".format(
frame_idx, wins, defeats, draws, round(illegal / 10, 2),
percentNN, round(explore / 10, 2), loss))
wins, defeats, draws, illegal, legalNN, illegalNN, explore = 0, 0, 0, 0, 0, 0, 0
# Save the current model
if frame_idx % 10000 == 0:
torch.save(self.current_model, "models/model.pkl")
torch.save(self.current_model, "models/model.pkl")
print('Training finished.')
self.plot(frame_idx, all_rewards, losses)
| StarcoderdataPython |
8049825 | import unittest
import rastervision as rv
@unittest.skipIf(not rv.backend.pytorch_available, 'PyTorch is not available')
class TestPyTorchSemanticSegmentationConfig(unittest.TestCase):
def test_builder(self):
batch_size = 10
num_epochs = 10
chip_size = 300
class_map = {'red': (1, 'red'), 'green': (2, 'green')}
task = rv.TaskConfig.builder(rv.SEMANTIC_SEGMENTATION) \
.with_chip_size(chip_size) \
.with_classes(class_map) \
.with_chip_options(window_method='sliding',
stride=chip_size,
debug_chip_probability=1.0) \
.build()
backend = rv.BackendConfig.builder(rv.PYTORCH_SEMANTIC_SEGMENTATION) \
.with_task(task) \
.with_train_options(
batch_size=batch_size,
num_epochs=num_epochs) \
.build()
msg = backend.to_proto()
backend = rv.BackendConfig.builder(rv.PYTORCH_SEMANTIC_SEGMENTATION) \
.from_proto(msg).build()
self.assertEqual(backend.train_opts.batch_size, batch_size)
self.assertEqual(backend.train_opts.num_epochs, num_epochs)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
9783407 | <reponame>Derpimort/VGGVox-PyTorch
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 10 12:52:25 2020
@author: darp_lord
"""
import torch
import torch.nn as nn
from collections import OrderedDict
import numpy as np
class VGGM(nn.Module):
def __init__(self, n_classes=1251):
super(VGGM, self).__init__()
self.n_classes=n_classes
self.features=nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(in_channels=1, out_channels=96, kernel_size=(7,7), stride=(2,2), padding=1)),
('bn1', nn.BatchNorm2d(96, momentum=0.5)),
('relu1', nn.ReLU()),
('mpool1', nn.MaxPool2d(kernel_size=(3,3), stride=(2,2))),
('conv2', nn.Conv2d(in_channels=96, out_channels=256, kernel_size=(5,5), stride=(2,2), padding=1)),
('bn2', nn.BatchNorm2d(256, momentum=0.5)),
('relu2', nn.ReLU()),
('mpool2', nn.MaxPool2d(kernel_size=(3,3), stride=(2,2))),
('conv3', nn.Conv2d(in_channels=256, out_channels=384, kernel_size=(3,3), stride=(1,1), padding=1)),
('bn3', nn.BatchNorm2d(384, momentum=0.5)),
('relu3', nn.ReLU()),
('conv4', nn.Conv2d(in_channels=384, out_channels=256, kernel_size=(3,3), stride=(1,1), padding=1)),
('bn4', nn.BatchNorm2d(256, momentum=0.5)),
('relu4', nn.ReLU()),
('conv5', nn.Conv2d(in_channels=256, out_channels=256, kernel_size=(3,3), stride=(1,1), padding=1)),
('bn5', nn.BatchNorm2d(256, momentum=0.5)),
('relu5', nn.ReLU()),
('mpool5', nn.MaxPool2d(kernel_size=(5,3), stride=(3,2))),
('fc6', nn.Conv2d(in_channels=256, out_channels=4096, kernel_size=(9,1), stride=(1,1))),
('bn6', nn.BatchNorm2d(4096, momentum=0.5)),
('relu6', nn.ReLU()),
('apool6', nn.AdaptiveAvgPool2d((1,1))),
('flatten', nn.Flatten())]))
self.classifier=nn.Sequential(OrderedDict([
('fc7', nn.Linear(4096, 1024)),
#('drop1', nn.Dropout()),
('relu7', nn.ReLU()),
('fc8', nn.Linear(1024, n_classes))]))
def forward(self, inp):
inp=self.features(inp)
#inp=inp.view(inp.size()[0],-1)
inp=self.classifier(inp)
return inp
if __name__=="__main__":
from torchsummary import summary
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model=VGGM(1251)
model.to(device)
print(summary(model, (1,512,300)))
| StarcoderdataPython |
5061594 | <filename>cocoa/inter/stats_label.py
from cocoa.inter import PyGUIObject, GUIObjectView
class StatsLabelView(GUIObjectView):
pass
class PyStatsLabel(PyGUIObject):
def display(self) -> str:
return self.model.display
| StarcoderdataPython |
5133908 | from django.apps import AppConfig
class FinanceAccountsConfig(AppConfig):
name = 'applications.finance_accounts'
| StarcoderdataPython |
35832 | <gh_stars>0
from typing import Generic, List, Optional, Type, TypeVar, Union, Dict, Any
from uuid import UUID
from fastapi import HTTPException
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel
from sqlalchemy.orm import Session
from src.db.sqlalchemy.database import Base
ModelType = TypeVar("ModelType", bound=Base)
CreateModelType = TypeVar("CreateModelType", bound=BaseModel)
UpdateSchemaType = TypeVar("UpdateSchemaType", bound=BaseModel)
class ControllerBase(Generic[ModelType, CreateModelType, UpdateSchemaType]):
def __init__(self, model: Type[ModelType]):
"""
CRUD object with default methods to Create, Read, Update, Delete (CRUD).
"""
self.model = model
def get(self, db: Session, uuid: UUID) -> Optional[ModelType]:
return db.query(self.model).filter(self.model.id == uuid).first()
def get_all(self, db: Session, skip: int = 0, limit: int = 1000) -> List[ModelType]:
return db.query(self.model).offset(skip).limit(limit).all()
def create(self, db: Session, *, create: CreateModelType) -> ModelType:
create_obj = jsonable_encoder(create)
db_obj = self.model(**create_obj) # type: ignore
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj
def update(self, db: Session, *, db_obj: ModelType, update_obj: Union[UpdateSchemaType, Dict[str, Any]]) -> ModelType:
obj_data = jsonable_encoder(db_obj)
if isinstance(update_obj, dict):
update_data = update_obj
else:
update_data = update_obj.dict(exclude_unset=True)
for field in obj_data:
if field in update_data:
setattr(db_obj, field, update_data[field])
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj
def remove(self, db: Session, uuid: UUID) -> ModelType:
obj = self.get_or_error(db=db, uuid=uuid)
db.delete(obj)
db.commit()
return obj
def get_or_error(self, db: Session, uuid: UUID) -> ModelType:
obj = self.get(db=db, uuid=uuid)
if not obj:
raise HTTPException(status_code=404, detail="Not found.")
return obj
| StarcoderdataPython |
3493007 | from enum import Enum, unique
@unique
class MessageType(Enum):
MESSAGE = 1
WAKEUP = 2
def __lt__(self, other):
return self.value < other.value
class Message:
uniq = 0
def __init__ (self, body = None):
# The base Message class no longer holds envelope/header information,
# however any desired information can be placed in the arbitrary
# body. Delivery metadata is now handled outside the message itself.
# The body may be overridden by specific message type subclasses.
# It is acceptable for WAKEUP type messages to have no body.
self.body = body
# The autoincrementing variable here will ensure that, when Messages are
# due for delivery at the same time step, the Message that was created
# first is delivered first. (Which is not important, but Python 3
# requires a fully resolved chain of priority in all cases, so we need
# something consistent.) We might want to generate these with stochasticity,
# but guarantee uniqueness somehow, to make delivery of orders at the same
# exact timestamp "random" instead of "arbitrary" (FIFO among tied times)
# as it currently is.
self.uniq = Message.uniq
Message.uniq += 1
# The base Message class can no longer do any real error checking.
# Subclasses are strongly encouraged to do so based on their body.
def __lt__(self, other):
# Required by Python3 for this object to be placed in a priority queue.
# If we ever decide to place something on the queue other than Messages,
# we will need to alter the below to not assume the other object is
# also a Message.
return (self.uniq < other.uniq)
def __str__(self):
# Make a printable representation of this message.
return str(self.body)
| StarcoderdataPython |
3273267 | #!/usr/bin/env python
# coding: utf-8
# define custom exceptions
class HeaderAndRowLenNotMatch(Exception):
pass
class HeaderNotDefine(Exception):
pass
| StarcoderdataPython |
6479662 | <reponame>groboclown/nightjar-mesh
"""Some constant values."""
TRUE_VALUES = ('yes', 'true', '1', 'on', 'enable', 'active', 'activate',)
FALSE_VALUES = ('no', 'false', '0', 'off', 'disable', 'deactivate',)
| StarcoderdataPython |
5020509 | <reponame>rsk1130/image_classifier
import torch
from torch import nn
from torch import optim
from PIL import Image
from torchvision import datasets, transforms, models
import numpy as np
import matplotlib.pyplot as plt
def process_image(image):
transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
pil_image = Image.open(image)
pil_image = transform(pil_image)
np_image = np.array(pil_image)
return np_image
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax | StarcoderdataPython |
6619205 | from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtWidgets import QLabel, QCheckBox, QFrame, QVBoxLayout, QHBoxLayout, QComboBox
import config as config
def init_view(label, container, label_bold=True, position="centertop", vertical=True):
if vertical:
vl = QVBoxLayout(container)
else:
vl = QHBoxLayout(container)
if label:
ql = QLabel()
if label_bold:
ql.setStyleSheet("font: bold 14px;")
# positions
if position == "centertop":
ql.setAlignment(QtCore.Qt.AlignTop)
ql.setAlignment(QtCore.Qt.AlignCenter)
elif position == "center":
ql.setAlignment(QtCore.Qt.AlignCenter)
elif position == "rightbottom":
ql.setAlignment(QtCore.Qt.AlignRight)
ql.setAlignment(QtCore.Qt.AlignBottom)
elif position == "righttop":
ql.setAlignment(QtCore.Qt.AlignRight)
ql.setAlignment(QtCore.Qt.AlignTop)
elif position == "lefttop":
ql.setAlignment(QtCore.Qt.AlignLeft)
ql.setAlignment(QtCore.Qt.AlignTop)
elif position == "leftbottom":
ql.setAlignment(QtCore.Qt.AlignLeft)
ql.setAlignment(QtCore.Qt.AlignBottom)
ql.setText(label)
vl.addWidget(ql)
return vl
def init_container(parent, label=None, label_position=None, label_bold=True, vertical=True, style=None, size=None):
container = QtGui.QWidget()
if size:
container.setFixedWidth(size[0])
container.setFixedHeight(size[1])
if style: # set the style of the container, which takes over the invisible layout
container.setStyleSheet(style)
parent.addWidget(container)
vl = init_view(label, container, label_bold, label_position, vertical)
return vl
def init_button(parent, label=None, function=None, style=config.button_style_classic):
btn = QtWidgets.QPushButton(text=label)
btn.clicked.connect(function)
parent.addWidget(btn)
btn.setStyleSheet(config.button_style_classic)
return btn
def init_inputBox(parent, label=None, label_bold=False, default_input=None):
block = init_container(parent=parent,
label=label,
label_bold=label_bold,
vertical=False)
textbox = QtWidgets.QLineEdit()
textbox.setContentsMargins(5, 0, 0, 0)
textbox.setText(str(default_input))
block.addWidget(textbox)
textbox.setStyleSheet("background-color:white;")
return block, textbox
def setup_configPath_block(parent):
is_valid_config_path = False
config_textbox = init_inputBox(parent=parent,
label=config.control_tab_config_path_label,
label_bold=True,
default_input=config.control_tab_config_file_path_default)
return is_valid_config_path, config_textbox
def init_checkBox(parent, label=None, function=None):
box = QCheckBox(label)
parent.addWidget(box)
box.stateChanged.connect(function)
return box
def draw_boarder(parent, width, height):
frame = QFrame()
frame.setFixedSize(int(width), int(height))
frame.setFrameShape(QFrame.StyledPanel)
frame.setLineWidth(2)
frame.setContentsMargins(5, 5, 5, 5)
parent.addWidget(frame)
return frame
def init_combo_box(parent, label, item_list):
container = init_container(parent=parent, label=label, vertical=False)
combo_widget = QtGui.QWidget()
combo_box = QComboBox()
for i in item_list:
combo_box.addItem(i)
container.addWidget(combo_box)
return combo_box
| StarcoderdataPython |
9671311 | <reponame>AltairStar/PyReqBS
import sys
sys.path.append('../src/Coin.py')
from src import Coin
scrapper = Coin.Coinmarketcap('xrp')#default param
scrapper.get_info()#default is initialization param in class
scrapper.get_info("bitcoin")#use another param | StarcoderdataPython |
9741984 | from types import FunctionType
from src.managers.RequestManager import RequestManager
class Application:
def __call__(self, environ: dict, start_response: FunctionType) -> tuple:
"""
Method to be called when make_server function starts
Args:
environ (dict): Enviroment variable with all the request data
start_response (function): Function to setup the status code and response headers
Returns:
bytes: Byte sequence to be handled in the main layer
"""
return RequestManager.process(environ, start_response)
| StarcoderdataPython |
6666729 | <reponame>benayas1/benatools
import tensorflow as tf
def _log(s, verbose):
"""
Prints a message on the screen
"""
if verbose:
print(s)
def get_device_strategy(device, half=False, XLA=False, verbose=True):
"""
Returns the distributed strategy object, the tune policy anb the number of replicas.
Parameters
----------
device : str
Possible values are "TPU", "GPU", "CPU"
verbose : bool
Whether to print the output messages or not
Returns
-------
tf.distribute.TPUStrategy
The distributed strategy object
int
The auto tune constant
int
Number of TPU cores, to adjust batch size and learning rate
tf.distribute.cluster_resolver.TPUClusterResolver
The tpu object
"""
device = device.upper()
v = tf.__version__
tpu = None
if device == "TPU":
_log("connecting to TPU...", verbose)
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
_log('Running on TPU ' + tpu.master(), verbose)
except ValueError:
_log("Could not connect to TPU", verbose)
tpu = None
if tpu:
try:
_log("initializing TPU ...", verbose)
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.TPUStrategy(tpu) if v >= '2.3.0' else tf.distribute.experimental.TPUStrategy(
tpu)
_log("TPU initialized", verbose)
if half:
from tensorflow.keras.mixed_precision import experimental as mixed_precision
policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
mixed_precision.set_policy(policy)
print('Mixed precision enabled')
if XLA:
tf.config.optimizer.set_jit(True)
print('Accelerated Linear Algebra enabled')
except:
_log("failed to initialize TPU", verbose)
device = "GPU"
else:
device = "GPU"
if device != "TPU":
_log("Using default strategy for CPU and single GPU", verbose)
strategy = tf.distribute.get_strategy()
if device == "GPU":
_log("Num GPUs Available: " + str(len(tf.config.experimental.list_physical_devices('GPU') if v < '2.1.0' else
tf.config.list_physical_devices('GPU'))), verbose)
tune = tf.data.experimental.AUTOTUNE
replicas = strategy.num_replicas_in_sync
_log(f'REPLICAS: {replicas}', verbose)
return strategy, tune, replicas, tpu
def init_tpu(tpu):
"""
Re-initializes the TPU cluster, useful to clean up memory
Parameters
----------
tf.distribute.cluster_resolver.TPUClusterResolver
The TPU cluster
"""
if tpu:
tf.tpu.experimental.initialize_tpu_system(tpu)
| StarcoderdataPython |
3456837 | <filename>zwave_js_server/firmware.py<gh_stars>10-100
"""Firmware update helper."""
from typing import Optional
import aiohttp
from .client import Client
from .model.node import Node
from .util.helpers import convert_bytes_to_base64
async def begin_firmware_update(
url: str,
node: Node,
filename: str,
file: bytes,
session: aiohttp.ClientSession,
file_format: Optional[str] = None,
) -> None:
"""Send beginFirmwareUpdate command to Node."""
client = Client(url, session)
await client.connect()
await client.set_api_schema()
cmd = {
"command": "node.begin_firmware_update",
"nodeId": node.node_id,
"firmwareFilename": filename,
"firmwareFile": convert_bytes_to_base64(file),
}
if file_format is not None:
cmd["firmwareFileFormat"] = file_format
await client.async_send_command(cmd, require_schema=5)
await client.disconnect()
| StarcoderdataPython |
1827145 | #! /usr/bin/env python
import numpy as np
import scipy
"""
Simple utilities for managing snapshots and
creating training, testing data
"""
def prepare_data(data, soln_names, **options):
"""
Utility to extract snapshots and time
arrays from raw data, by ignoring
initial spin-up times, skipping over
snapshots, and setting end points.
"""
### Keys used in snapshot files are different
if 'time' in data.keys():
t_array = data['time']
elif 'T' in data.keys():
t_array = data['T']
try:
snap_start = options['start_skip']
except:
snap_start = 0
try:
snap_end = options['end_skip']
except:
snap_end = -1
try: ## Overwrites "snap_start"
T_start = options['T_start']
snap_start = np.count_nonzero(t_array[t_array <= T_start])
except:
T_start = t_array[0]
try: ## Overwrites "snap_end"
T_end = options['T_end']
snap_end = np.count_nonzero(t_array[t_array <= T_end])+1
except:
T_end = t_array[-1]
try:
incr = options['incr']
except:
incr = 1
snap = {}
for key in soln_names:
snap[key] = data[key][:,snap_start:snap_end:incr]
times = t_array[snap_start:snap_end:incr]
return snap, times
| StarcoderdataPython |
367823 | '''
<NAME>
This script is used to create clusters using the DBSCAN(density-based spatial clustering of applications) implementation
Creates two different types of clusters which are labeled as the yellow and redclusters. The yellow cluster covers areas
that are potentially contaminated. The red cluster covers areas that are likely to be contaminated. The results are
displayed in a diagram that uses matplotlib
'''
import pandas as pd, numpy as np, matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
from geopy.distance import great_circle
from shapely.geometry import MultiPoint
#yellow cluster function that returns the centroid of each cluster as centermost_point
def get_centermost_point(cluster):
try:
centroid = (MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y)
except IndexError:
return
centroid = (MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y)
centermost_point = min(cluster, key=lambda point: great_circle(point, centroid).m)
return tuple(centermost_point)
#red cluster function that returns the centroid of each cluster as centermost_point
def get_centermost_point2(cluster2):
centroid2 = (MultiPoint(cluster2).centroid.x, MultiPoint(cluster2).centroid.y)
centermost_point2 = min(cluster2, key=lambda point: great_circle(point, centroid2).m)
return tuple(centermost_point2)
def clustering(df):
#covert latitude and longitude columns in dataframe to a numpy array
coords = df[['Lat','Lon']].to_numpy()
kms_per_radian = 6371.0088
#set the radius of the cluster to 200 meters
epsilon = .2 / kms_per_radian
#use the ball_tree algorithm which works in O(nlog(n)) time with a minimum samples of 1
db = DBSCAN(eps=epsilon, min_samples=1, algorithm='ball_tree', metric='haversine').fit(np.radians(coords))
cluster_labels = db.labels_
#count the number of clusters
num_clusters = len(set(cluster_labels))
#create a series which is a column of coordinates
clusters = pd.Series([coords[cluster_labels == n] for n in range(num_clusters)])
#print the number of clusters
print('Number of clusters: {}'.format(num_clusters))
#do the same for the red clusters with a minium samples of 3
db2 = DBSCAN(eps=epsilon, min_samples=3, algorithm='ball_tree', metric='haversine').fit(np.radians(coords))
cluster_labels2 = db2.labels_
num_clusters2 = len(set(cluster_labels2))
clusters2 = pd.Series([coords[cluster_labels2 == n] for n in range(num_clusters2)])
#print the number of clusters
print('Number of clusters: {}'.format(num_clusters2))
#yellow area
centermost_points = clusters.map(get_centermost_point)
#collect the centroids of all clusters in the yellow area and record their latitude and longitude coordinates
lats, lons = zip(*centermost_points)
#create a dataframe with longitude and latitude columns
rep_points = pd.DataFrame({'Lon':lons, 'Lat':lats})
rs = rep_points.apply(lambda row: df[(df['Lat']==row['Lat']) & (df['Lon']==row['Lon'])].iloc[0], axis=1)
#store as a csv file
rs.to_csv('data/red_clusters.csv', index=False)
#red area
centermost_points2 = clusters2.map(get_centermost_point)
#drop all null values
centermost_points2 = centermost_points2.dropna()
#collect the centroids of all clusters in the red area and record their latitude and longitude coordinates
lats, lons = zip(*centermost_points2)
rep_points = pd.DataFrame({'Lon':lons, 'Lat':lats})
rs2 = rep_points.apply(lambda row: df[(df['Lat']==row['Lat']) & (df['Lon']==row['Lon'])].iloc[0], axis=1)
rs2.to_csv('data/yellow_clusters.csv', index=False)
#plot the data using matplotlib
fig, ax = plt.subplots(figsize=[10, 6])
yellow_scatter = ax.scatter(rs['Lon'], rs['Lat'], c='#ffff00', edgecolor='None', alpha=0.7, s=3000)
red_scatter = ax.scatter(rs2['Lon'], rs2['Lat'], c='#ff0000', edgecolor='None', alpha=0.7, s=3000)
df_scatter = ax.scatter(df['Lon'], df['Lat'], c='k', alpha=0.9, s=3)
ax.set_title('DBSCAN areas')
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
plt.show()
#read in coordinates csv file
df = pd.read_csv('data/coordinates.csv')
clustering(df)
| StarcoderdataPython |
3444164 | <filename>Live Events/GetMessage.py<gh_stars>0
import boto3
import json
import pandas as pd
from pprint import pprint
import config_SQS as csqs
import exasol as e
import config_Exasol as ec
import simplejson
num_of_records = 1
def get_messages(is_json=True):
sqsclient = boto3.client(
'sqs',
aws_access_key_id = csqs.access_key,
aws_secret_access_key = csqs.secret_key,
region_name = csqs.region_name
)
# Receive message from SQS queue
response = sqsclient.receive_message(
QueueUrl=csqs.sqs_url,
AttributeNames=[
'All'
],
MaxNumberOfMessages=num_of_records,
MessageAttributeNames=[
'All'
],
VisibilityTimeout=0,
WaitTimeSeconds=0
)
result = []
# messages = response
jdumps = json.dumps(response)
messages = json.loads(jdumps)['Messages']
# return messages
# print(response)
for msg in messages:
# Parse the strings as JSON so that we can deal with them easier
# if is_json:
# msg = json.loads(msg)
# else:
# body = msg.body
# msgs = json.loads(msg)['Messages']
msg_dict = {
'ReceiptHandle': msg['ReceiptHandle'],
'MessageId': msg['MessageId'],
'Body': msg['Body'],
'MessageAttributes': msg['MessageAttributes'],
'EventName': msg['MessageAttributes']['event_name']['StringValue'],
'Attributes': msg['Attributes'],
}
result.append(msg_dict)
return result
if __name__ == '__main__':
messages = get_messages()
pprint(messages)
| StarcoderdataPython |
9718383 | <reponame>renaudb/bixistrava
from .strava import Strava
__all__ = ['Strava']
| StarcoderdataPython |
11232940 | #!/usr/bin/env python3
import numpy as np
from enum import IntEnum
class OpCode(IntEnum):
HALT = 0
SET = 1
PUSH = 2
POP = 3
EQ = 4
GT = 5
JMP = 6
JT = 7
JF = 8
ADD = 9
MULT = 10
MOD = 11
AND = 12
OR = 13
NOT = 14
RMEM = 15
WMEM = 16
CALL = 17
RET = 18
OUT = 19
IN = 20
NOOP = 21
MEM_SIZE = 32768
REG_COUNT = 8
MAX_LITERAL = 32767
MAX_REGISTER = 32775
class VM(object):
def __init__(self, program=None, trace=False):
self._memory = np.zeros(MEM_SIZE, dtype=np.uint16)
self._registers = np.zeros(REG_COUNT, dtype=np.uint16)
self._stack = []
self._ip = 0
self._sp = 0
self._trace = trace
if program is not None:
self._memory[:len(program)] = program[:]
return
def log(self, msg):
if self._trace:
print(msg)
return
def set_ip(self, ip):
if ip < MEM_SIZE:
self._ip = ip
else:
# self._ip = ip % MEM_SIZE
raise RuntimeError(f"New ip {ip} out of range.")
return
def inc_ip(self, count=1):
self.set_ip(self._ip + count)
return
def load_value(self, value):
if value <= MAX_LITERAL:
pass
elif value <= MAX_REGISTER:
value = self._registers[value - 32768]
else:
raise RuntimeError(f"Argument {value} is out of range.")
return value
def store_value(self, value, address):
if address < MEM_SIZE:
self._memory[address] = value
elif address <= MAX_REGISTER:
self._registers[address - 32768] = value
else:
raise RuntimeError(f"Address argument {address} is out of range.")
return
def _halt(self):
self.inc_ip()
return False
def _arg_a(self):
return self._memory[self._ip+1]
def _arg_b(self):
return self._memory[self._ip+2]
def _arg_c(self):
return self._memory[self._ip+3]
def _set(self):
"""set register <a> to the value of <b>"""
a = self._arg_a()
b = self._arg_b()
self.log(f"SET {a} <- {b}")
self.store_value(b, a)
self.inc_ip(3) # opcode, reg, value
return True
def _push(self):
"""push <a> onto the stack"""
a = self._arg_a()
value = self.load_value(a)
self.log(f"PUSH {a} = {value}")
self._stack.append(value)
self.inc_ip(2) # opcode, value
return True
def _pop(self):
"""remove the top element from the stack and write it into <a>; empty stack = error"""
if self._stack:
a = self._arg_a()
value = self._stack.pop()
self.log(f"POP => {a} <- {value}")
self.store_value(value, a)
else:
raise RuntimeError("Cannot pop from empty stack.")
self.inc_ip(2) # opcode, value
return True
def _eq(self):
"""set <a> to 1 if <b> is equal to <c>; set it to 0 otherwise"""
a = self._arg_a()
b = self.load_value(self._arg_b())
c = self.load_value(self._arg_c())
value = 1 if b == c else 0
self.log(f"SET {a} <- {b} == {c} ({value})")
self.store_value(value, a)
self.inc_ip(4) # opcode, address, op1, op2
return True
def _gt(self):
"""set <a> to 1 if <b> is greater than <c>; set it to 0 otherwise"""
a = self._arg_a()
b = self.load_value(self._arg_b())
c = self.load_value(self._arg_c())
value = 1 if b > c else 0
self.log(f"GT {a} <- {b} > {c} ({value})")
self.store_value(value, a)
self.inc_ip(4) # opcode, address, op1, op2
return True
def _jmp(self):
"""jump to <a>"""
a = self._arg_a()
value = self.load_value(a)
self.log(f"JMP ip <- {a} ({value})")
self.set_ip(value)
return True
def _jt(self):
"""if <a> is nonzero, jump to <b>"""
a = self._arg_a()
value_a = self.load_value(a)
if value_a != 0:
b = self._arg_b()
value_b = self.load_value(b)
self.log(f"JT ip <- {b} ({value_b}) if {a} ({value_a}) != 0")
self.set_ip(value_b)
else:
self.inc_ip(3) # opcode, test, destination
return True
def _jf(self):
"""if <a> is zero, jump to <b>"""
a = self._arg_a()
value_a = self.load_value(a)
if value_a == 0:
b = self._arg_b()
value_b = self.load_value(b)
self.log(f"JF ip <- {b} ({value_b}) if {a} ({value_a}) == 0")
self.set_ip(value_b)
else:
self.inc_ip(3) # opcode, test, destination
return True
def _add(self):
"""assign into <a> the sum of <b> and <c> (modulo 32768)"""
a = self._arg_a()
b = self.load_value(self._arg_b())
c = self.load_value(self._arg_c())
result = (b + c) % 32768
self.log(f"ADD {a} <- {b} + {c} ({result}) # % 32768")
self.store_value(result, a)
self.inc_ip(4) # opcode, address, op1, op2
return True
def _mult(self):
"""store into <a> the product of <b> and <c> (modulo 32768)"""
a = self._arg_a()
b = self.load_value(self._arg_b())
c = self.load_value(self._arg_c())
result = np.uint16((int(b) * int(c)) % 32768)
self.log(f"MULT {a} <- {b} * {c} ({result}) # % 32768")
self.store_value(result, a)
self.inc_ip(4) # opcode, address, op1, op2
return True
def _mod(self):
"""store into <a> the remainder of <b> divided by <c>"""
a = self._arg_a()
b = self.load_value(self._arg_b())
c = self.load_value(self._arg_c())
value = b % c
self.log(f"MOD {a} <- {b} % {c} ({value})")
self.store_value(value, a)
self.inc_ip(4) # opcode, address, op1, op2
return True
def _and(self):
"""stores into <a> the bitwise and of <b> and <c>"""
a = self._arg_a()
b = self.load_value(self._arg_b())
c = self.load_value(self._arg_c())
value = b & c
self.log(f"AND {a} <- {b} & {c} ({value})")
self.store_value(value, a)
self.inc_ip(4) # opcode, address, op1, op2
return True
def _or(self):
"""stores into <a> the bitwise or of <b> and <c>"""
a = self._arg_a()
b = self.load_value(self._arg_b())
c = self.load_value(self._arg_c())
value = b | c
self.log(f"OR {a} <- {b} | {c} ({value})")
self.store_value(b | c, a)
self.inc_ip(4) # opcode, address, op1, op2
return True
def _not(self):
"""stores 15-bit bitwise inverse of <b> in <a>"""
a = self._arg_a()
b = self.load_value(self._arg_b())
value = b ^ MAX_LITERAL
self.log(f"NOT {a} <- {b} ^ {MAX_LITERAL} ({value})")
self.store_value(value, a)
self.inc_ip(3) # opcode, address, op1
return True
def _rmem(self):
"""read memory at address <b> and write it to <a>"""
a = self._arg_a()
b = self.load_value(self._arg_b())
value = self._memory[b]
self.log(f"RMEM {a} <- *{b} ({value})")
self.store_value(value, a)
self.inc_ip(3) # opcode, a, b
return True
def _wmem(self):
"""write the value from <b> into memory at address <a>"""
a = self.load_value(self._arg_a())
b = self.load_value(self._arg_b())
self.log(f"WMEM *{a} <- {b}")
self._memory[a] = b
self.inc_ip(3) # opcode, a, b
return True
def _call(self):
"""write the address of the next instruction to the stack and jump to <a>"""
self._stack.append(self._ip+2)
a = self.load_value(self._arg_a())
self.log(f"CALL ip <- {a}")
self.set_ip(a)
return True
def _ret(self):
"""remove the top element from the stack and jump to it; empty stack = halt"""
proceed = True
if self._stack:
ip = self._stack.pop()
self.log(f"RET ip <- {ip}")
self.set_ip(ip)
else:
proceed = False
return proceed
def _out(self):
print(f"{chr(self.load_value(self._arg_a()))}", end="")
self.inc_ip(2)
return True
def _in(self):
return True
def _noop(self):
self.log("NOOP")
self.inc_ip()
return True
OPCODES = {
OpCode.HALT: _halt,
OpCode.SET: _set,
OpCode.PUSH: _push,
OpCode.POP: _pop,
OpCode.EQ: _eq,
OpCode.GT: _gt,
OpCode.JMP: _jmp,
OpCode.JT: _jt,
OpCode.JF: _jf,
OpCode.ADD: _add,
OpCode.MULT: _mult,
OpCode.MOD: _mod,
OpCode.AND: _and,
OpCode.OR: _or,
OpCode.NOT: _not,
OpCode.RMEM: _rmem,
OpCode.WMEM: _wmem,
OpCode.CALL: _call,
OpCode.RET: _ret,
OpCode.OUT: _out,
# OpCode.IN: _in,
OpCode.NOOP: _noop
}
def run(self):
operation = None
while operation != OpCode.HALT:
operation = OpCode(self._memory[self._ip])
self.log(f"{self._ip:04X}: {operation}")
if not VM.OPCODES[operation](self):
break
return
def main():
program = np.fromfile('challenge.bin', dtype=np.uint16)
computer = VM(program, trace=True)
computer.run()
return
if __name__ == "__main__":
main()
| StarcoderdataPython |
3512223 | <reponame>Robbie-Cook/nimblenet<filename>nimblenet/learning_algorithms/commons/utils.py
from ...tools import confirm
from ...activation_functions import softmax_function
from ...cost_functions import softmax_neg_loss
import numpy as np
all = ["check_network_structure", "verify_dataset_shape_and_modify", "print_training_status", "print_training_results"]
def check_network_structure( network, cost_function ):
assert softmax_function != network.layers[-1][1] or cost_function == softmax_neg_loss,\
"When using the `softmax` activation function, the cost function MUST be `softmax_neg_loss`."
assert cost_function != softmax_neg_loss or softmax_function == network.layers[-1][1],\
"When using the `softmax_neg_loss` cost function, the activation function in the final layer MUST be `softmax`."
#end
def verify_dataset_shape_and_modify( network, dataset ):
assert dataset[0].features.shape[0] == network.n_inputs, \
"ERROR: input size varies from the defined input setting"
assert dataset[0].targets.shape[0] == network.layers[-1][0], \
"ERROR: output size varies from the defined output setting"
data = np.array( [instance.features for instance in dataset ] )
targets = np.array( [instance.targets for instance in dataset ] )
return data, targets
#end
def apply_regularizers( dataset, cost_function, regularizers, network ):
dW_regularizer = lambda x: np.zeros( shape = x.shape )
if regularizers != None:
# Modify the cost function to add the regularizer
for entry in regularizers:
if type(entry) == tuple:
regularizer, regularizer_settings = entry
cost_function, dW_regularizer = regularizer( dataset, cost_function, dW_regularizer, network, **regularizer_settings )
else:
regularizer = entry
cost_function, dW_regularizer = regularizer( dataset, cost_function, dW_regularizer, network )
return cost_function, dW_regularizer
#end | StarcoderdataPython |
8096929 | <gh_stars>0
"""
Copyright 2011 <NAME> <<EMAIL>>
This file is part of PyCAM.
PyCAM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyCAM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyCAM. If not, see <http://www.gnu.org/licenses/>.
"""
import datetime
import os
import time
import pycam.Plugins
from pycam.Utils.events import get_mainloop
class ProgressBar(pycam.Plugins.PluginBase):
UI_FILE = "progress_bar.ui"
CATEGORIES = ["System"]
def setup(self):
if not self._gtk:
return False
if self.gui:
box = self.gui.get_object("ProgressBox")
box.unparent()
self.core.register_ui("main_window", "Progress", box, 50)
self.core.add_item("progress",
lambda: ProgressGTK(self.core, self.gui, self._gtk, self.log))
show_progress_button = self.gui.get_object("ShowToolpathProgressButton")
# TODO: move this setting somewhere else or rename it
self.core.add_item("show_toolpath_progress", show_progress_button.get_active,
show_progress_button.set_active)
self._gtk_handlers = []
self._gtk_handlers.append((show_progress_button, "clicked",
lambda widget: self.core.emit_event("visual-item-updated")))
self.register_gtk_handlers(self._gtk_handlers)
return True
def teardown(self):
if self.gui:
self.unregister_gtk_handlers(self._gtk_handlers)
self.core.unregister_ui("main_window", self.gui.get_object("ProgressBox"))
self.core.set("progress", None)
class ProgressGTK:
_PROGRESS_STACK = []
def __init__(self, core, gui, gtk, log):
ProgressGTK._PROGRESS_STACK.append(self)
self._finished = False
self._gtk = gtk
self._gui = gui
self.log = log
self.core = core
self._cancel_requested = False
self._start_time = 0
self._multi_maximum = 0
self._multi_counter = 0
self._multi_base_text = ""
self._last_gtk_events_time = None
self._main_widget = self._gui.get_object("ProgressBox")
self._multi_widget = self._gui.get_object("MultipleProgressBar")
self._cancel_button = self._gui.get_object("ProgressCancelButton")
self._cancel_button.connect("clicked", self.cancel)
self._progress_bar = self._gui.get_object("ProgressBar")
self._progress_button = self._gui.get_object("ShowToolpathProgressButton")
self._start_time = time.time()
self._last_text = None
self._last_percent = None
self.update(text="", percent=0)
self._cancel_button.set_sensitive(True)
self._progress_button.hide()
# enable "pulse" mode for a start (in case of unknown ETA)
self._progress_bar.pulse()
self._main_widget.show()
self._multi_widget.hide()
self._multi_widget.set_text("")
self._multi_widget.set_fraction(0)
self.core.emit_event("gui-disable")
def set_multiple(self, count, base_text=None):
if base_text:
self._multi_base_text = base_text
else:
self._multi_base_text = ""
self._multi_counter = 0
if count > 1:
self._multi_maximum = count
self.update_multiple(increment=False)
else:
self._multi_maximum = 0
def update_multiple(self, increment=True):
if self._multi_maximum <= 1:
self._multi_widget.hide()
return
self._multi_widget.show()
if increment:
self._multi_counter += 1
self._progress_bar.set_fraction(0)
if self._multi_base_text:
text = "%s %d/%d" % (self._multi_base_text, self._multi_counter + 1,
self._multi_maximum)
else:
text = "%d/%d" % (self._multi_counter + 1, self._multi_maximum)
self._multi_widget.set_text(text)
self._multi_widget.set_fraction(min(1.0, float(self._multi_counter) / self._multi_maximum))
def disable_cancel(self):
self._cancel_button.set_sensitive(False)
def cancel(self, widget=None):
self._cancel_requested = True
def finish(self):
if self._finished:
self.log.debug("Called progressbar 'finish' twice: %s" % self)
return
ProgressGTK._PROGRESS_STACK.remove(self)
if ProgressGTK._PROGRESS_STACK:
# restore the latest state of the previous progress
current = ProgressGTK._PROGRESS_STACK[-1]
current.update(text=current._last_text, percent=current._last_percent)
current.update_multiple(increment=False)
else:
# hide the widget
self._main_widget.hide()
self._multi_widget.hide()
widget = self._main_widget
while widget:
if hasattr(widget, "resize_children"):
widget.resize_children()
if hasattr(widget, "check_resize"):
widget.check_resize()
widget = widget.get_parent()
self.core.emit_event("gui-enable")
self._finished = True
def __del__(self):
if not self._finished:
self.finish()
def update(self, text=None, percent=None):
if text:
self._last_text = text
if percent:
self._last_percent = percent
if percent is not None:
percent = min(max(percent, 0.0), 100.0)
self._progress_bar.set_fraction(percent/100.0)
if (not percent) and (self._progress_bar.get_fraction() == 0):
# use "pulse" mode until we reach 1% of the work to be done
self._progress_bar.pulse()
# update the GUI
current_time = time.time()
# Don't update the GUI more often than once per second.
# Exception: text-only updates
# This restriction improves performance and reduces the
# "snappiness" of the GUI.
if (self._last_gtk_events_time is None) \
or text \
or (self._last_gtk_events_time + 0.5 <= current_time):
# "estimated time of arrival" text
time_estimation_suffix = " remaining ..."
if self._progress_bar.get_fraction() > 0:
total_fraction = ((self._progress_bar.get_fraction() + self._multi_counter)
/ max(1, self._multi_maximum))
total_fraction = max(0.0, min(total_fraction, 1.0))
eta_full = (time.time() - self._start_time) / total_fraction
if eta_full > 0:
eta_delta = eta_full - (time.time() - self._start_time)
eta_delta = int(round(eta_delta))
if hasattr(self, "_last_eta_delta"):
previous_eta_delta = self._last_eta_delta
if eta_delta == previous_eta_delta + 1:
# We are currently toggling between two numbers.
# We want to avoid screen flicker, thus we just live
# with the slight inaccuracy.
eta_delta = self._last_eta_delta
self._last_eta_delta = eta_delta
eta_delta_obj = datetime.timedelta(seconds=eta_delta)
eta_text = "%s%s" % (eta_delta_obj, time_estimation_suffix)
else:
eta_text = None
else:
eta_text = None
if text is not None:
lines = [text]
else:
old_lines = self._progress_bar.get_text().split(os.linesep)
# skip the time estimation line
lines = [line for line in old_lines if not line.endswith(time_estimation_suffix)]
if eta_text:
lines.append(eta_text)
self._progress_bar.set_text(os.linesep.join(lines))
# show the "show_tool_button" ("hide" is called in the progress decorator)
# TODO: move "in_progress" somewhere else
if self.core.get("toolpath_in_progress"):
self._progress_button.show()
get_mainloop().update()
if not text or (self._start_time + 5 < current_time):
# We don't store the timining if the text was changed.
# This is especially nice for the snappines during font
# initialization. This exception is only valid for the first
# five seconds of the operation.
self._last_gtk_events_time = current_time
# return if the user requested a break
return self._cancel_requested
| StarcoderdataPython |
11210409 | import logging
import flask
from typing import Dict
from allennlp_demo.common.logs import configure_logging
from allennlp_models.pretrained import get_pretrained_models
logger = logging.getLogger(__name__)
class ModelCardsService(flask.Flask):
def __init__(self, name: str = "model-cards"):
super().__init__(name)
configure_logging(self)
# We call this once and cache the results. It takes a little memory (~4 MB) but makes
# everything a lot faster.
self.cards_by_id = get_pretrained_models()
@self.route("/", methods=["GET"])
def all_model_cards():
cards: Dict[str, Dict] = {}
for id, card in self.cards_by_id.items():
cards[id] = card.to_dict()
return flask.jsonify(cards)
if __name__ == "__main__":
app = ModelCardsService()
app.run(host="0.0.0.0", port=8000)
| StarcoderdataPython |
3556161 | # -*- coding: utf-8 -*-
# (c) Copyright 2022 Sensirion AG, Switzerland
from enum import IntFlag
from typing import Iterable, Optional
from typing import List
from sensirion_i2c_driver import I2cConnection, CrcCalculator
from sensirion_shdlc_driver import ShdlcSerialPort, ShdlcConnection
from sensirion_shdlc_sensorbridge import (SensorBridgePort,
SensorBridgeShdlcDevice,
SensorBridgeI2cProxy)
from sensirion_i2c_adapter.channel import AbstractMultiChannel
from sensirion_i2c_adapter.i2c_channel import I2cChannel
from sensirion_i2c_adapter.multi_channel import MultiChannel
class UsedPorts(IntFlag):
"""Flag to indicate which ports being used."""
PORT_1 = 1
PORT_2 = 2
ALL = 3
class Config:
"""Holds the configuration of a single attached SensorBridge device.
:param serial_port: Serial port used by a SensorBridge device.
:param ports: SensorBridge port(s) used to communicate with the desired sensor(s).
"""
def __init__(self, serial_port: str, ports: UsedPorts) -> None:
self.serial_port: str = serial_port
self.selected_ports: UsedPorts = ports
class SensorBridgeLiveInfo:
def __init__(self, sensor_bridge: SensorBridgeShdlcDevice, ports: Optional[List[SensorBridgePort]]) -> None:
self.sensor_bridge = sensor_bridge
self.ports: List[SensorBridgePort] = ports if ports is not None else list()
class I2cMultiSensorBridgeConnection:
"""
The class I2cMultiSensorBridgeConnection is a convenience method to support the creation of a multi-channel
object for one or several SensorBridges devices.
The multi-channel object with N channels can be used to communicate simultaneously with N different sensors
:param config_list: List of configuration objects.
:param baud_rate: Uart speed to be used for the multi-channel. The same baud rate will be used with all SensorBridge
devices.
:param i2c_frequency: The I2c frequency used for communication with the sensors.
:param voltage: The supply voltage used by the attached sensors.
"""
def __init__(self, config_list: Iterable[Config], baud_rate: int, i2c_frequency: int, voltage: float) -> None:
self._config_list = config_list
self._baud_rate = baud_rate
self._i2c_frequency = i2c_frequency
self._voltage = voltage
self._serial_ports: List[ShdlcSerialPort] = []
self._proxies: List[SensorBridgeI2cProxy] = []
self._sensor_bridges: List[SensorBridgeLiveInfo] = []
def _create_proxies(self, serial: ShdlcSerialPort, selected_ports: UsedPorts) -> None:
bridge = SensorBridgeShdlcDevice(ShdlcConnection(serial), slave_address=0)
sensor_bridge_port_list = [SensorBridgePort(i) for i in range(2) if selected_ports.value & (1 << i) != 0]
# we need this information in order to power off an on the different channels later on!
self._sensor_bridges.append(SensorBridgeLiveInfo(sensor_bridge=bridge, ports=sensor_bridge_port_list))
for sensor_bridge_port in sensor_bridge_port_list:
bridge.set_i2c_frequency(sensor_bridge_port, self._i2c_frequency)
bridge.set_supply_voltage(sensor_bridge_port, self._voltage)
bridge.switch_supply_on(sensor_bridge_port)
self._proxies.append(SensorBridgeI2cProxy(bridge, sensor_bridge_port))
def __enter__(self) -> "I2cMultiSensorBridgeConnection":
for config in self._config_list:
serial = ShdlcSerialPort(port=config.serial_port, baudrate=self._baud_rate)
self._serial_ports.append(serial)
self._create_proxies(serial, config.selected_ports)
return self
def get_multi_channel(self, i2c_address, crc: CrcCalculator) -> AbstractMultiChannel:
"""Create a multi-channel object for the configured SensorBridge devices and selected SensorBridge ports.
"""
assert len(self._proxies) > 0, "Wrong usage: proxies not initialized"
channels = tuple([I2cChannel(I2cConnection(x), i2c_address, crc) for x in self._proxies])
return MultiChannel(channels)
def switch_supply_off(self):
""" Switch the supply off for all connected sensors."""
for bridge_live in self._sensor_bridges:
for port in bridge_live.ports:
bridge_live.sensor_bridge.switch_supply_off(port)
def switch_supply_on(self):
""" Switch the supply on for all connected sensors."""
for bridge_live in self._sensor_bridges:
for port in bridge_live.ports:
bridge_live.sensor_bridge.switch_supply_on(port)
def __exit__(self, exc_type, exc_val, exc_tb) -> bool:
self.switch_supply_off()
[port.close() for port in self._serial_ports]
self._serial_ports.clear()
return False
| StarcoderdataPython |
5196292 | <reponame>pyToshka/pyTerrafile<gh_stars>1-10
import os
import shutil
import sys
from os import path
import git
import requests
import yaml
from git import Repo, Git, GitCommandError
from loguru import logger
TF_REGISTRY_BASE_URL = "https://registry.terraform.io/v1/modules"
class Progress(git.remote.RemoteProgress):
def update(self, op_code, cur_count, max_count=None, message=""):
print(f"Downloading: ({op_code}, {cur_count}, {max_count}, {message})")
class Formatter:
def __init__(self):
self.padding = 0
self.fmt = "{time} | {level: <8} | {name}:{function}:{line}{extra[padding]} | {message}\n{exception}"
def format(self, record):
length = len("{name}:{function}:{line}".format(**record))
self.padding = max(self.padding, length)
record["extra"]["padding"] = " " * (self.padding - length)
return self.fmt
formatter = Formatter()
def check_source(source):
if source.startswith("https://") or "@" in source:
logger.debug(f"Detected GIT based module for source {source}")
return "git"
if source.startswith("../") or source.startswith("./") or source.startswith("/"):
logger.debug(f"Detected local based module for source {source}")
return "local"
if source.startswith(""):
logger.debug(f"Detected local based module for source {source}")
return "registry"
def check_directory(name):
logger.info(f"Checking directory {name}")
directory = str(path.exists(f"{name}"))
if directory:
logger.debug(f"Directory {name} exist")
return os.path.abspath(f"{name}")
else:
logger.debug(f"Directory {name} doesn't exist, creating directory")
os.mkdir(directory)
logger.debug(f"Directory {name} has been created")
return os.path.abspath(f"{name}")
def git_module(source, module_path, name, tag=None):
check_path = check_directory(module_path)
logger.info(f"Checkout new module {name}")
try:
Repo.clone_from(source, f"{check_path}/{name}", progress=Progress())
except GitCommandError as error:
logger.debug(f"During checkout error {error} has been detected")
pass
git_version = Git(f"{check_path}/{name}")
if tag is not None:
logger.info(f"Getting {tag} for module {name}")
git_version.checkout(tag)
else:
logger.info(f"{tag} hasn't present for module {name}, checkout master branch")
git_version.checkout("master")
def registry_module(source, version):
name, namespace, provider = source.split("/")
module_url = f"{TF_REGISTRY_BASE_URL}/{name}/{namespace}/{provider}/{version}"
response = requests.get(module_url)
logger.info(
f"Got information for {response.json()['name']}, "
f"Module url is {response.json()['source']},"
f"provider is {response.json()['provider']}"
)
return (
response.json()["source"],
response.json()["tag"],
response.json()["name"],
response.json()["provider"],
)
def local_module(source, destination):
try:
shutil.rmtree(destination, ignore_errors=True)
shutil.copytree(
source, destination, dirs_exist_ok=True, copy_function=shutil.copy
)
except shutil.Error as exc:
errors = exc.args[0]
for error in errors:
source, destination, msg = error
shutil.copy2(source, destination)
def read_tf_file(tfile=None):
with open(tfile) as terrafile:
tf_file = yaml.safe_load(terrafile)
return tf_file
def install(terrafile, module_path=None, log_level=None, download_force=False):
level = log_level.upper()
logger.remove()
logger.add(sys.stderr, format=formatter.format, level=level)
if download_force:
shutil.rmtree(f"{module_path}/vendors")
if os.path.isfile(terrafile) and os.path.getsize(terrafile) == 0:
logger.error("tfile is empty, please check tfile")
sys.exit(2)
try:
terrafile = read_tf_file(terrafile)
except FileNotFoundError:
logger.error(
"tfile does not exist or is not correct, please create file in current directory or provide "
"existing file"
)
sys.exit(2)
try:
for name, details in sorted(terrafile.items()):
if "provider" in details:
provider = details["provider"]
else:
provider = "custom"
module_type = check_source(details["source"])
if module_type == "registry":
url, version, name, provider = registry_module(
details["source"], details["version"]
)
git_module(url, f"{module_path}/vendors/{provider}", name, version)
elif module_type == "local":
local_module(
details["source"], f"{module_path}/vendors/{provider}/{name}"
)
elif module_type == "git":
git_module(
details["source"],
f"{module_path}/vendors/{provider}",
name,
details["version"],
)
except AttributeError as error:
logger.error(f"tfile is not correct, please check tfile format {error}")
| StarcoderdataPython |
3343973 | <gh_stars>1-10
"""
Unit tests for the main web views
"""
import re
import os
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
class ViewTests(TestCase):
""" Tests for various top-level views """
username = 'test_user'
password = '<PASSWORD>'
def setUp(self):
# Create a user
self.user = get_user_model().objects.create_user(self.username, '<EMAIL>', self.password)
self.user.set_password(self.password)
self.user.save()
result = self.client.login(username=self.username, password=self.password)
self.assertEqual(result, True)
def test_api_doc(self):
""" Test that the api-doc view works """
api_url = os.path.join(reverse('index'), 'api-doc') + '/'
response = self.client.get(api_url)
self.assertEqual(response.status_code, 200)
def test_index_redirect(self):
"""
top-level URL should redirect to "index" page
"""
response = self.client.get("/")
self.assertEqual(response.status_code, 302)
def get_index_page(self):
"""
Retrieve the index page (used for subsequent unit tests)
"""
response = self.client.get("/index/")
self.assertEqual(response.status_code, 200)
return str(response.content.decode())
def test_panels(self):
"""
Test that the required 'panels' are present
"""
content = self.get_index_page()
self.assertIn("<div id='detail-panels'>", content)
# TODO: In future, run the javascript and ensure that the panels get created!
def test_js_load(self):
"""
Test that the required javascript files are loaded correctly
"""
# Change this number as more javascript files are added to the index page
N_SCRIPT_FILES = 40
content = self.get_index_page()
# Extract all required javascript files from the index page content
script_files = re.findall("<script type='text\\/javascript' src=\"([^\"]*)\"><\\/script>", content)
self.assertEqual(len(script_files), N_SCRIPT_FILES)
# TODO: Request the javascript files from the server, and ensure they are correcty loaded
| StarcoderdataPython |
1866240 | <filename>unknowntags/admin.py
from django.contrib import admin
from import_export.admin import ImportExportModelAdmin
from import_export import resources
from simple_history.admin import SimpleHistoryAdmin
from .models import Unknowntag
class UnknownagAdmin(ImportExportModelAdmin, SimpleHistoryAdmin):
resource_class = Unknowntag
admin.site.register(Unknowntag, UnknownagAdmin)
| StarcoderdataPython |
8012294 | <filename>Part_3_advanced/m17_tests_II/unittest_module/homework_1_solution/estudent/tests/test_grade.py
from estudent.grade import Grade
from unittest import TestCase
class GradeTestCase(TestCase):
def setUp(self):
self.failing_grade = Grade(value=1)
self.passing_grade = Grade(value=5)
def test_grade_above_1_is_passing(self):
self.assertTrue(self.passing_grade.is_passing())
def test_grade_below_2_is_failing(self):
self.assertFalse(self.failing_grade.is_passing())
| StarcoderdataPython |
9706469 | <reponame>gigabackup/gigantum-client
import os
from typing import List, Optional
from gtmcore.gitlib.git_fs import GitFilesystem
from gtmcore.logging import LMLogger
import subprocess
logger = LMLogger.get_logger()
class GitFilesystemShimmed(GitFilesystem):
# INFORMATIONAL
def check_ignored(self, path: str) -> bool:
"""Check if path is ignored (e.g., via .gitignore)
path: a path relative to the repository root
Returns:
is the path ignored?
"""
result = self._run(['git', 'check-ignore', path], check=False)
return result != ''
def list_submodules(self):
"""Method to list the name of configured submodules
Should return a list of dicts with the format:
Returns:
list(str)
"""
# Git-python is broken when not all submodules have been initialized and you try to do remote git ops.
# So instead of listing with self.repo.submodules, we just look at the .gitmodule file
submodule_list = list()
gitmodules_file = os.path.join(self.working_directory, '.gitmodules')
if os.path.exists(gitmodules_file):
if os.stat(gitmodules_file).st_size > 0:
result = self._run(['git', 'config', '--file', '.gitmodules', '--name-only', '--get-regexp', 'path'])
if result:
for line in result.split('\n'):
if line:
_, part = line.split('submodule.')
name, _ = part.split('.path')
submodule_list.append(name)
return submodule_list
# MODIFY
def add(self, filename) -> bool:
"""Add a file to a commit
Args:
filename(str): Filename to add.
Returns:
None
"""
if self.check_ignored(filename):
# This file is ignored - don't do any git operations
logger.info(f"Skipped adding untracked {filename} to Git repository in {self.working_directory}")
return False
else:
logger.info(f"Adding file {filename} to Git repository in {self.working_directory}")
self._run(['git', 'add', f'{filename}'])
# We added something
return True
def add_all(self, relative_directory=None) -> bool:
"""Add all changes/files using the `git add -A` command
Args:
relative_directory(str): Relative directory (from the root_dir) to add everything
Returns:
None
"""
if relative_directory:
if self.check_ignored(relative_directory):
# This file is ignored - don't do any git operations
logger.info(f"Skipped adding untracked {relative_directory} to Git repository in {self.working_directory}")
return False
self.repo.git.add(relative_directory, A=True)
else:
self.repo.git.add(A=True)
return True
def reset(self, branch_name: str):
"""git reset --hard current branch to the treeish specified by branch_name
Args:
branch_name: What to reset current branch to? Will be passed directly to git
"""
self._run(['git', 'reset', '--hard', branch_name])
def remote_set_branches(self, branch_names: List[str], remote_name: str = 'origin'):
"""git remote set-branch to the list of branches
Args:
branch_names: What branches do you want to track?
remote_name: Which git remote? Default is 'origin'
"""
self._run(['git', 'remote', 'set-branches', remote_name] + branch_names)
def _run(self, command: List[str], working_directory: Optional[str] = None, check=True) -> str:
"""subprocess.run wrapped in a try block for error reporting
Args:
command: what to run with subprocess.run()
working_directory: usually a path within a Git repo. Defaults to the instance working_directory
check: Raise an exception on non-zero return code?
Returns:
The stdout from the process as a string
"""
if working_directory is None:
working_directory = self.working_directory
try:
result = subprocess.run(command, capture_output=True, text=True, check=check, cwd=working_directory)
except subprocess.CalledProcessError as x:
logger.error(f'{x.stdout}, {x.stderr}')
raise
return result.stdout
# SYNC / REPLICATE
def clone(self, source: str, directory: str, branch: Optional[str] = None, single_branch=False):
"""Clone a repo
Args:
source: Git ssh or https string to clone - should be a bare path, or include '/' as a final delimiter
directory: Directory to clone into
branch: The name of the desired branch to be checked out (defaults to master)
single_branch: Fetch ONLY the contents of the specified branch
Returns:
None
"""
if self.repo:
raise ValueError("Cannot init an existing git repository. Choose a different working directory")
logger.info("Cloning Git repository from {} into {}".format(source, directory))
args = []
if branch is not None:
args.extend(['--branch', branch])
if single_branch:
args.append('--single-branch')
command_string = ['git', 'clone'] + args + [source, directory]
self._run(command_string)
self.set_working_directory(directory)
| StarcoderdataPython |
197645 | <reponame>emailweixu/XWorld
"""
Copyright (c) 2017 Baidu Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import itertools
import numbers
import os
import random
from maze2d import spanning_tree_maze_generator
"""
Entity:
id - unique str for this entity
grid_types - "agent", "goal", "block", "boundary"
location - (x, y, z)
yaw - in radian
scale - (0, 1.0]
offset - [0, 1-scale]
name - name of the entity
asset_path - the model path
color - color of the entity
"""
class Entity:
def __init__(self, type, id=None, loc=None, yaw=0.0,
scale=1.0, offset=0.0, name=None, asset_path=None, color=None):
if not loc is None:
assert isinstance(loc, tuple) and len(loc) == 3
self.type = type
self.id = id
self.loc = loc
self.yaw = yaw
self.scale = scale
self.offset = offset
self.name = name
self.asset_path = asset_path
self.color = color
class XWorld3DEnv(object):
PI_2 = 1.5707963
PI = 3.1415926
curriculum_check_period = 100
def __init__(self, asset_path, max_height=10, max_width=10):
self.current_usage = {}
self.action_successful = False
self.grid_types = ["goal", "block", "agent", "boundary"]
## init dimensions
self.max_height = max_height
self.max_width = max_width
self.__clean_env()
## event messages
self.action_successful = False
self.agent_sent = ""
self.game_event = ""
self.curriculum_check_counter = 0
## load all items from asset_path
self.asset_path = asset_path
self.all_object_paths = []
for dirpath, _, files in os.walk(asset_path):
for f in files:
if f.endswith(".urdf"):
self.all_object_paths.append(os.path.join(dirpath, f))
self.set_goal_subtrees([])
## read item properties
color_file = os.path.join(asset_path, "properties.txt")
assert os.path.exists(color_file)
with open(color_file, "r") as f:
lines = f.read().splitlines()
self.color_table = {os.path.join(asset_path, l.split()[0]) : l.split()[1]\
for l in lines if not l.startswith("//") and not l == ""}
############################ interface with Python tasks ############################
def reset(self):
"""
Reset the map.
"""
self.__clean_env()
self._configure()
self.__instantiate_entities()
def get_current_usage(self):
self.curriculum_check_counter += 1
if self.curriculum_check_counter < XWorld3DEnv.curriculum_check_period \
or not self.current_usage:
return 0
## we take the average usage across all the tasks
usage = sum([sum(l) / float(len(l)) for l in self.current_usage.values()]) \
/ len(self.current_usage)
self.curriculum_check_counter = 0
return usage
def set_dims(self, h, w):
"""
Set the dimensions of the map. If h or w is less than self.max_height or
self.max_width, then walls will be automatically padded. The python user should
use coordinates in [0, h) and [0, w).
"""
assert h > 1 and w > 1
assert h <= self.max_height and w <= self.max_width
self.height = h
self.width = w
self.boundaries = self.__add_boundaries()
self.available_grids = list(set(itertools.product(range(w), range(h), (0,))))
random.shuffle(self.available_grids)
self.changed = True
def set_entity(self, type, loc=None, name=None):
"""
Add an entity of type to loc which must be currently empty
"""
self.set_entity_inst(Entity(type=type, loc=loc, name=name))
def set_entity_inst(self, e):
if not e.loc is None:
assert e.loc in self.available_grids
self.available_grids.remove(e.loc)
self.entity_nums[e.type] += 1
self.entities.append(e)
self.changed = True
def delete_entity(self, x):
"""
Delete an entity on the current map
"""
self.entities.remove(x)
self.entity_nums[x.type] -= 1
self.available_grids.append(x.loc)
self.changed = True
def move_entity(self, e, loc):
"""
Move entity e from its current location to loc
"""
self.delete_entity(e)
e.loc = loc
self.set_entity_inst(e)
def set_goal_subtrees(self, subtrees):
"""
Set goal directory substrees so that only goals in the selected subtrees
will be sampled when generating the map. The user can use this function to
control the number of goal classes.
The change of goal subtrees will only be reflected for the next game, after
reset() is called. The current game still uses old goal subtrees.
"""
goal_path = os.path.join(self.asset_path, "goal")
self.object_paths = copy.deepcopy(self.all_object_paths)
if len(subtrees) > 0:
self.object_paths \
= [p for p in self.object_paths \
if not p.startswith(goal_path) or p.split("/")[-3] in subtrees]
## get a hierarchy of all possible objects
key = lambda p: '_'.join(p.split('_')[:-1])
objects = itertools.groupby(sorted(self.object_paths, key=key), key=key)
self.items = {t : {} for t in self.grid_types}
for k, g in objects:
type = [t for t in k.split("/") if t in self.grid_types][0]
assert type in self.items
self.items[type][os.path.basename(k)] = list(g)
def get_max_dims(self):
"""
Get the max height and max width of the map
"""
return (self.max_height, self.max_width)
def get_dims(self):
return (self.height, self.width)
def get_n(self, type):
"""
Get the current number of entities on the map for type
"""
assert type in self.entity_nums
return self.entity_nums[type]
def get_all_possible_names(self, type):
"""
Return all possible names for type
'goal' - all unique object names
'block' - all block names
'agent' - all agent names
"""
return self.items[type].keys()
def get_all_colors(self):
"""
Return all possible colors in xworld
"""
return list(set(self.color_table.values()))
def get_agent(self):
"""
Get the agent information: (entity, agent sentence, action success)
"""
agent = [e for e in self.entities if e.type == "agent"][0]
return (agent, self.agent_sent, self.action_successful)
def get_goals(self):
"""
Return all the goals on the current map
"""
return [e for e in self.entities if e.type == "goal"]
def get_blocks(self):
"""
Return all the blocks on the current map
"""
return [e for e in self.entities if e.type == "block"]
def get_available_grids(self):
"""
Return all the available grids on the current map
"""
return self.available_grids
def get_entities(self):
"""
Return all the entities on the current map
"""
return self.entities
def record_environment_usage(self, task_name, x):
"""
Update the current environment usage
The higher the usage is, the better the agent handles the environment (so
it might be a good time now to move to more difficult scenarios)
This quantity can be used to generate a curriculum of the world
"""
self.current_usage[task_name] = x
######################## interface with C++ #############################
def dump_curriculum_progress(self):
return self.current_level
def env_changed(self):
"""
Whether the environment has been changed by the teacher during the current
stage of the task. If yes, then teaching_task.cpp will notify the simulator to update
the game environment.
"""
ret = self.changed
self.changed = False
return ret
def cpp_get_entities(self):
"""
C++ code gets entities information. Used by the underlying simulator.
"""
actual_entities = [e.__dict__ for e in self.entities]
boundary_entities = [e.__dict__ for e in self.boundaries]
return actual_entities + boundary_entities
def update_entities_from_cpp(self, entities):
"""
Update the environment from C++. The changes might be due to
the environment dynamics or the agent's actions.
Entities is a list of python dicts.
"""
self.entity_nums = {t : 0 for t in self.grid_types}
self.entities = [Entity(**i) for i in entities if not self.__is_boundary(i["id"])]
for e in self.entities:
self.entity_nums[e.type] += 1
# update available grids
self.available_grids = set(itertools.product(range(self.width), range(self.height), (0,)))
occupied = set([e.loc for e in self.entities])
self.available_grids -= occupied
self.available_grids = list(self.available_grids)
random.shuffle(self.available_grids)
def update_agent_sentence_from_cpp(self, sent):
"""
Update the agent sentence from the CPP simulator
"""
self.agent_sent = sent
def update_agent_action_success_from_cpp(self, successful):
"""
Update the agent action success from the CPP simulator
"""
self.action_successful = successful
def update_game_event_from_cpp(self, event):
"""
Update the game event from CPP simulator
"""
self.game_event = event
######################## private or protected #########################
def _configure(self):
"""
The user has to override this function to define how the map
will be generated after each session resetting
"""
raise NotImplementedError()
def __instantiate_entities(self):
"""
For each entity, select an instance from the object class it belongs to,
after which its properties are set.
The entities should have been set in _configure()
"""
Y, X = self.get_dims()
maze = spanning_tree_maze_generator(X, Y)
blocks = [(j, i, 0) for i,m in enumerate(maze) for j,b in enumerate(m) if b == '#']
## maybe not all blocks of the maze will be used later
random.shuffle(blocks)
## first remove all maze blocks from the available set
for b in blocks:
if b in self.available_grids:
self.available_grids.remove(b)
## select a random object path for each non-block entity
for i, e in enumerate(self.entities):
if e.name is None:
e.name = random.choice(self.get_all_possible_names(e.type))
e.id = "%s_%d" % (e.name, i)
if e.asset_path is None:
icons = self.items[e.type][e.name]
e.asset_path = random.choice(icons)
e.color = self.color_table[e.asset_path]
if e.loc is None and e.type != "block":
assert len(self.available_grids) > 0
e.loc = self.available_grids.pop()
if e.type == "agent":
e.yaw = random.uniform(-self.PI, self.PI)
elif e.type == "goal":
e.yaw = random.randint(-1,2) * self.PI_2
## add back some empty grids
self.available_grids += blocks[len(self.get_blocks()):]
## use the remaining blocks
for e in self.get_blocks():
assert blocks, "too many blocks for a valid maze"
e.loc = blocks.pop()
def __add_boundaries(self):
"""
Given the max height and width of a map, return a list of surrounding
wall blocks.
"""
wall_blocks = []
wall_height = 3
def add_blocks(range1, range2, id):
for loc in itertools.product(range1, range2):
for k in range(wall_height):
wall_blocks.append(Entity(type="boundary", loc=loc+(k,), id="wall_%d" % id,
name="wall", color="na",
asset_path=random.choice(self.items["boundary"]["wall"])))
id += 1
return id
id = add_blocks(range(-1, self.width+1), (-1, self.height),
self.height * self.width)
id = add_blocks((-1, self.width), range(0, self.height), id);
return wall_blocks
def __is_boundary(self, id):
"""
Given a location, determine whether it's a padding block or not
"""
return (id in [b.id for b in self.boundaries])
def __clean_env(self):
"""
Reset members; preparing for the next session
"""
self.agent_sent = ""
self.changed = False
self.entities = []
self.boundaries = []
self.entity_nums = {t : 0 for t in self.grid_types}
self.available_grids = []
| StarcoderdataPython |
6663493 | import logging
import json
from board import Board
logger = logging.getLogger(__name__)
class Game(object):
def __init__(self, players, game_id=None, size=(10, 10), star_count=10):
if game_id:
logger.info("Loading game {id}".format(id=game_id))
raise NotImplementedError
self.id = 1
self.players = players
self.board = Board(bounds=size, star_count=star_count)
logger.info("Game was init")
def __repr__(self):
return str(self.serialized)
@property
def serialized(self):
return {'game_id': self.id,
'players': self.players,
'board': self.board,
}
| StarcoderdataPython |
6407050 | <gh_stars>0
from utils import *
def diveToLast(lst):
try:
if type(list(list(lst.values())[0][0].values())[0]) is list:
return diveToLast(list(lst.values())[0][0])
elif type(list(list(lst.values())[0][1].values())[0]) is list:
return diveToLast(list(lst.values())[0][1])
else:
return lst
except Exception as e:
raise SyntaxError(fail + "Invalid AST format! (`%s`)" % (e) + endc)
def insertToTuple(tup, value, depth, index):
if type(tup) is dict:
tup = [tup]
else:
tup = list(tup)
if depth > 0:
if type(tup) is list:
success = False
for i, x in enumerate(tup):
if "tuple" in x:
success = True
tup = [insertToTuple(tup[i]["tuple"], value, depth-1, index)]
break
if not success:
raise SyntaxError(fail + "Invalid tuple format (maybe depth overflow) `%s`!" % (tup) + endc)
elif type(tup) is dict and contains({"tuple"}, tup["tuple"]):
tup = [insertToTuple(tup["tuple"], value, depth-1, index)]
else:
for i, x in enumerate(tup):
if "tuple" in x:
if type(value) is list and len(value) == 1:
value = value[0]
if index < 0:
if index == -1:
tup[i]["tuple"].append(value)
else:
tup[i]["tuple"].insert(index+1, value)
else:
tup[i]["tuple"].insert(index, value)
break
if type(tup) is list:
if len(tup) > 1:
tup = {"tuple": list(tup)}
else:
tup = tup[0]
return tup
if __name__ == '__main__':
print(diveToLast({"+": [{"number": "3"}, {"number": "17"}]}))
print(diveToLast({"-": [{"*": [{"number": "5"}, {"id": "pi"}]}, {"/": [{"number": "3"}, {"number": "2"}]}]}))
print(diveToLast({"=": [{"id": "pi"}, {"-": [{"*": [{"number": "5"}, {"id": "pi"}]}, {"/": [{"number": "3"}, {"number": "2"}]}]}]}))
print(insertToTuple({"tuple": [{"tuple": [{"tuple": [{"number": "3"}, {"number": "4"}]}]}]}, {"tuple": [{"number": "1"}, {"number": "2"}]}, 2, 0)) | StarcoderdataPython |
12811797 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
def main(entrada, saida):
# Trata o arquivo palavras.txt para remover os verbos (palavras terminadas em ar,er,ir)
# Ler todas as palavras do arquivo de entrada
with open(entrada, 'r') as arquivo:
palavras = [palavra.lower().rstrip() for palavra in arquivo.read().splitlines()]
# Eliminar todas as palavras que são verbos
palavras_sem_verbos = [palavra for palavra in palavras if not palavra.endswith(('ar', 'er', 'ir'))]
# Escrever todas as palavras que não são verbos no arquivo de saída
with open(saida, 'w') as f:
for palavra in palavras_sem_verbos:
f.write("%s\n" % palavra)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--entrada', type=str, required=False, default='palavras.txt',
help='Arquivo de entrada contendo as palavras')
parser.add_argument('--saida', type=str, required=False, default='palavras_sem_verbos.txt',
help='Arquivo de saída contendo as palavras sem verbos')
args = parser.parse_args()
entrada = args.entrada
saida = args.saida
main(entrada, saida)
| StarcoderdataPython |
3471368 | """
main_module - 输出形状计算,测试时将对应方法的@unittest.skip注释掉.
Main members:
# __main__ - 程序入口.
"""
import unittest
import torch
from torch import nn
class TestShape(unittest.TestCase):
"""输出形状计算.
Main methods:
test_Conv2d - RNN.
"""
# @unittest.skip('debug')
def test_RNN(self):
"""RNN.
"""
print('{} test_RNN {}'.format('-'*15, '-'*15))
vocab_size = 200
rnn_layer = nn.RNN(input_size=vocab_size, hidden_size=10)
seq_len = 35
batch_size = 8
state = None
X = torch.rand(seq_len, batch_size, vocab_size)
print(X.shape) # 输入形状为 (seq_len, batch, input_size);torch.Size([35, 8, 200])
Y, state_new = rnn_layer(X, state)
print(Y.shape) # 输出形状为(seq_len, batch, num_directions * hidden_size);torch.Size([35, 8, 10])
print(state_new.shape) # 隐藏状态h的形状为(num_layers * num_directions, batch, hidden_size);torch.Size([1, 8, 10])
print('{} num_layers=3,bidirectional {}'.format('-'*15, '-'*15))
rnn_layer = nn.RNN(input_size=vocab_size, hidden_size=10, num_layers=3, bidirectional=True)
state = None
X = torch.rand(seq_len, batch_size, vocab_size)
print(X.shape) # 输入形状为(seq_len, batch, input_size);torch.Size([35, 8, 200])
Y, state_new = rnn_layer(X, state)
print(Y.shape) # 输出形状为(seq_len, batch, num_directions * hidden_size);torch.Size([35, 8, 20])
print(state_new.shape) # 隐藏状态h的形状为(num_layers * num_directions, batch, hidden_size);torch.Size([6, 8, 10])
if __name__ == "__main__":
unittest.main() # 运行当前源文件中的所有测试用例
| StarcoderdataPython |
1976655 | # -*- coding: utf-8 -*-
"""
To make userdefined function avaiable to Business logic define them here
Function names have to be all lower case !!!
Created on Fri Mar 2 14:50:18 2018
@author: hanseni
"""
from numba import jit
@jit("f8(b1,f8,f8)")
def recode(condition,yes,no):
'''Function which recreates the functionality of @recode from eviews '''
return yes if condition else no
try:
from stem import ste
except:
pass
def __pd():
''' Returns functions translating pd to REA veights.
The reason for making a closure is to avoid namespace clutter with the imported functions'''
from math import isclose,sqrt,erf
from math import exp, log
from scipy.special import erfinv , ndtri
def phi(x):
''' Cumulative normal distribution function '''
return (1.0 + erf(x / sqrt(2.0))) / 2.0
def phiinv(x):
''' inverse Cumulative normal distribution function '''
return ndtri(x)
def pd_to_w(PD=0.01,LGD=0.5,cat='mrtg'):
''' based on pd,lgd and sector this function calculates the risk weigts based on Basel 3
this function is based on Marco Gross's matlab function and chekked against the results
the function is Risk_weights.m and a copy is located at 'Python poc' directory
This function only distinguis betwen 3 types,
Alternative is to implement the parameters from BST.steet(CR_MAP) '''
NORM99 = 3.0902323061678132
# from phiinv(0.999)
PD_ = 1e-10 if isclose(PD,0.0,abs_tol=1e-9) else PD
if PD < -1e-9:
PD_ = 1e-10
if cat == 'corp':
R = 0.12*(1-exp(-50*PD_ ))/(1-exp(-50)) + 0.24* (1-(1-exp(-50*PD_ ))/(1-exp(-50)))
b = (0.11852 - 0.05478*log(PD_))**2.
M = 2.5;
normal_dist_comp = phi(((1-R)**-0.5) *phiinv(PD_) + NORM99 * ((R /(1-R))**0.5))
K = LGD *(normal_dist_comp-PD_ ) *(1+(b*(M-2.5))) /(1- b*1.5)
elif cat == 'mrtg':
R = 0.15;
normal_dist_comp = phi(((1-R)**-0.5)*phiinv(PD_) + NORM99 *((R/(1-R))**0.5))
K = LGD*(normal_dist_comp-PD)
elif cat == 'retail':
R = 0.03*(1-exp(-35*PD_))/(1-exp(-35)) + 0.16*(1-(1-exp(-35*PD_))/(1-exp(-35)));
normal_dist_comp = phi(((1-R)**-0.5)*phiinv(PD_) + NORM99 * ((R/(1-R))**0.5))
K = LGD*(normal_dist_comp-PD)
else:
print('Major mistake. No Basel categori :',cat)
return K * 12.5
def pd_to_w_corp(PD=0.01,LGD=0.5):
return pd_to_w(PD,LGD,cat='corp' )
def pd_to_w_mrtg(PD=0.01,LGD=0.5):
return pd_to_w(PD,LGD,cat='mrtg' )
def pd_to_w_retail(PD=0.01,LGD=0.5):
return pd_to_w(PD,LGD,cat='retail' )
return pd_to_w,pd_to_w_corp,pd_to_w_mrtg,pd_to_w_retail,phi,phiinv
# pd_to_w,pd_to_w_corp,pd_to_w_mrtg,pd_to_w_retail,phi,phiinv = __pd()
| StarcoderdataPython |
1640332 | <filename>reinforcement/agents/basis.py
from abc import ABC, abstractmethod
import numpy as np
from reinforcement.trajectories import TrajectoryRecorder
class AgentInterface(ABC):
@abstractmethod
def next_action(self, observation):
raise NotImplementedError
@abstractmethod
def signal(self, reward):
raise NotImplementedError
@abstractmethod
def train(self):
raise NotImplementedError
class BatchAgent(AgentInterface):
def __init__(self, algorithm):
self._algorithm = algorithm
self._recorder = TrajectoryRecorder()
self._record = self._recorder.start()
def signal(self, reward):
self._record = next(self._record.add_reward(reward))
def next_action(self, observation):
p = self._algorithm.sample(observation)
a = np.random.choice(len(p), p=p)
self._record = next(self._record.add_action(a))
self._record = next(self._record.add_observation(observation))
return a
def train(self):
trj = self._recorder.to_trajectory()
self._algorithm.optimize(trj)
| StarcoderdataPython |
3542052 | <gh_stars>0
from config.settings.base import *
from config.settings.base import env
# Production specific settings
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = env.str("DJANGO_ALLOWED_HOSTS").split(" ")
# ADMINS = [
# (
# env.str("WEBMASTER_NAME", default="Webmaster"),
# env.str("WEBMASTER_EMAIL", default="<EMAIL>")
# ),
# (
# env.str("ADMINISTRATOR_NAME", default="Administrator"),
# env.str("ADMINISTRATOR_EMAIL", default="<EMAIL>")
# )
# ]
# MANAGERS = ADMINS
# Add INSTALLED_APPS on top
INSTALLED_APPS = [] + INSTALLED_APPS
# Add INSTALLED_APPS at bottom
INSTALLED_APPS += ['admin_honeypot', ]
# Add MIDDLEWARE on top
MIDDLEWARE = ['django.middleware.cache.UpdateCacheMiddleware', ] + MIDDLEWARE
# Add MIDDLEWARE at bottom
MIDDLEWARE += ['django.middleware.cache.FetchFromCacheMiddleware', ]
ROOT_URLCONF = 'config.urls.production'
WSGI_APPLICATION = 'config.wsgi.application'
# django-allauth config
# FIX-URGENT: Change to production specific email backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Cache config
CACHE_MIDDLEWARE_ALIAS = 'default'
# TODO-NORMAL: Update to 7 days once working properly
CACHE_MIDDLEWARE_SECONDS = 3600
CACHE_MIDDLEWARE_KEY_PREFIX = ''
# Security config
if env.bool("MIMIC_PRODUCTION_LOCALLY"):
pass
else:
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
SECURE_SSL_REDIRECT = True
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
# TODO-NORMAL: Update to 7 days or 30 days once working properly
SECURE_HSTS_SECONDS = env.int("DJANGO_SECURE_HSTS_SECONDS", default=60)
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = True
# Admin Honeypot config
ADMIN_HONEYPOT_EMAIL_ADMINS = True
# Anymail
# TODO-NORMAL: Remove console backend and switch to Twilio Sendgrid
# EMAIL_BACKEND = "anymail.backends.sendgrid.EmailBackend"
# ANYMAIL = {
# "SENDGRID_API_KEY": env("SENDGRID_API_KEY"),
# "SENDGRID_GENERATE_MESSAGE_ID": env("SENDGRID_GENERATE_MESSAGE_ID"),
# "SENDGRID_MERGE_FIELD_FORMAT": env("SENDGRID_MERGE_FIELD_FORMAT"),
# "SENDGRID_API_URL": env("SENDGRID_API_URL", default="https://api.sendgrid.com/v3/"),
# }
| StarcoderdataPython |
11383349 | # -*- coding: utf-8 -*-
from connect import security
import json
class TestFilteredKeys():
def test_encrypt_and_decrypt_return_same_value(self):
master_key = "00000000000000000000000000000000"
filter_definition = {
'filters': {
'type': 'cycling'
},
'canQuery': True,
'canPush': True
}
encrypted = security.encrypt_filtered_key(filter_definition,master_key)
decrypted = security.decrypt_filtered_key(encrypted,master_key)
expected_value = bytearray(json.dumps(filter_definition),"ascii")
assert decrypted == expected_value | StarcoderdataPython |
4942335 | <filename>schapp/timeClock/admin.py<gh_stars>0
from django.contrib import admin
from .models import Clock_In, Clock_Out
admin.site.register(Clock_In)
admin.site.register(Clock_Out)
| StarcoderdataPython |
4930671 | <reponame>glauberm/doaj
from doajtest.helpers import DoajTestCase
class TestSearchBox(DoajTestCase):
def setUp(self):
super(TestSearchBox, self).setUp()
def tearDown(self):
super(TestSearchBox, self).tearDown()
def test_01_with_referrer(self):
""" Utilise the search endpoint using the ref field. We expect a redirect with the referrer to be appended """
with self.app_test.test_client() as c:
resp1 = c.post('/search?source={"query": {"query_string": {"query": "cheese", "default_operator": "AND"}}}',
data={'ref': 'homepage-box', 'origin': 'ui'})
assert resp1.status_code == 302, resp1.status_code
assert resp1.location.endswith('&ref=homepage-box')
resp2 = c.post('/search?source={"query": {"query_string": {"query": "cheese", "default_operator": "AND"}}}',
data={'ref': 'homepage-box', 'origin': 'ui'}, follow_redirects=True)
assert resp2.status_code == 200, resp2.status_code
def test_02_without_origin(self):
""" Omit the origin field when emulating the text box - this is disallowed."""
with self.app_test.test_client() as c:
resp = c.post('/search?source={"query": {"query_string": {"query": "cheese", "default_operator": "AND"}}}',
data={'ref': 'homepage-box'})
assert resp.status_code == 400, resp.status_code
pass
def test_03_without_referrer(self):
""" Omit the referrer field when emulating the text box """
with self.app_test.test_client() as c:
resp = c.post('/search?source={"query": {"query_string": {"query": "cheese", "default_operator": "AND"}}}',
data={'origin': 'ui'})
assert resp.status_code == 400, resp.status_code
pass
| StarcoderdataPython |
11220908 | # Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
import arvados
import re
import os
import sys
import fcntl
import subprocess
gatk2_install_path = None
def install_path():
global gatk2_install_path
if gatk2_install_path:
return gatk2_install_path
gatk2_install_path = arvados.util.tarball_extract(
tarball = arvados.current_job()['script_parameters']['gatk_tbz'],
path = 'gatk2')
return gatk2_install_path
def memory_limit():
taskspernode = int(os.environ.get('CRUNCH_NODE_SLOTS', '1'))
with open('/proc/meminfo', 'r') as f:
ram = int(re.search(r'MemTotal:\s*(\d+)', f.read()).group(1)) / 1024
if taskspernode > 1:
ram = ram / taskspernode
return max(ram-700, 500)
def cpus_on_this_node():
with open('/proc/cpuinfo', 'r') as cpuinfo:
return max(int(os.environ.get('SLURM_CPUS_ON_NODE', 1)),
len(re.findall(r'^processor\s*:\s*\d',
cpuinfo.read(),
re.MULTILINE)))
def cpus_per_task():
return max(1, (cpus_on_this_node()
/ int(os.environ.get('CRUNCH_NODE_SLOTS', 1))))
def run(**kwargs):
kwargs.setdefault('cwd', arvados.current_task().tmpdir)
kwargs.setdefault('stdout', sys.stderr)
execargs = ['java',
'-Xmx%dm' % memory_limit(),
'-Djava.io.tmpdir=' + arvados.current_task().tmpdir,
'-jar', os.path.join(install_path(), 'GenomeAnalysisTK.jar')]
execargs += [str(arg) for arg in kwargs.pop('args', [])]
sys.stderr.write("%s.run: exec %s\n" % (__name__, str(execargs)))
return arvados.util.run_command(execargs, **kwargs)
| StarcoderdataPython |
3374553 | from base64 import b64encode
from pathlib import Path
from typing import Optional, Union
from PIL import Image as PILImage
from .grid_object import GridObject
from .mixin import SlideMixin
from .._utils import check_exists, check_type, check_sanity_int
class Image(GridObject):
"""Represents an image on the slide.
Parameters
----------
image_file: str, pathlib.Path or
A string or pathlib.Path object representing the path to the image.
row: int
The grid row in which to place this image.
column: int
The grid column in which to place this image.
row_span: optional, int
The number of rows for this image to span (defaults to `1`).
col_span: optional, int
The number of columns for this image to span (defaults to `1`).
width_pct: optional, int
The percentage of the original image width to scale by. Defaults to 100 (no resizing).
css_class: str, optional
The name of the CSS class (or classes) to apply to this object.
"""
def __init__(self,
image_path: Union[str, Path],
row: int,
column: int,
row_span: int = 1,
col_span: int = 1,
width_pct: int = 100,
css_class: Optional[str] = None
):
# Check provided image_path is either string or Path object, then check that it exists.
check_type("image_path", image_path, Union[str, Path])
check_exists(image_path, "Image")
self.image_file = image_path
self._check_and_set(row, column, row_span, col_span, css_class)
check_sanity_int("width_pct", width_pct)
self.width_pct = width_pct
def get_div(self) -> str:
"""Get the required <div></div> HTML tags to display this image.
Returns
-------
str
"""
# Read in the image, convert to string with Base64.
with open(self.image_file, 'rb') as f:
img = b64encode(f.read()).decode()
# Use Pillow to also open the image and get its size - we'll use this to scale the image if we need to.
img_size = PILImage.open(f).size
img_width = int(self.width_pct / 100 * img_size[0])
img_style_str = f'style="justify-self:center"'
html_str = f'<div {self._style_str}><center><img src="data:image;base64, {img}" {img_style_str} width={img_width}px></center></div>'
return html_str
class ImageMixin(SlideMixin):
"""Adds Image functionality to the Slide class."""
def add_image(self,
image_path: Union[str, Path],
row: int,
column: int,
row_span: int = 1,
col_span: int = 1,
width_pct: int = 100,
css_class: Optional[str] = None,
):
"""Add an image to this slide, in the specified row and column.
Parameters
----------
image_path: str or pathlib.Path
A string or pathlib.Path object representing the path to the image.
row: int
The grid row in which to place this image.
column: int
The grid column in which to place this image.
row_span: int, default=1
The number of rows for this image to span (defaults to `1`).
col_span: int, default=1
The number of columns for this image to span (defaults to `1`).
width_pct: int, default=100
The percentage of the original image width to scale by. Defaults to 100 (no resizing).
css_class : str, optional
The CSS class (or classes) to apply to this image. Multiple CSS classes are applied in a single string,
separated by a space. I.e. `css_class = "class1 class2"`.
"""
image = Image(image_path, row, column, row_span, col_span, width_pct, css_class)
self._check_grid_pos(row, column)
self._elements.append(image)
| StarcoderdataPython |
1765638 | __version__ = "0.15.1"
__api_version__ = "1.0.1"
| StarcoderdataPython |
9655975 | <filename>datasets/ToyARD2.py
'''
ToyARD2.py
Toy data from a first-order auto-regressive process.
'''
import numpy as np
import scipy.linalg
from bnpy.data import XData
def get_short_name():
''' Return short string used in filepaths to store solutions
'''
return 'ToyARD2'
def get_data_info():
return 'Toy AutoRegressive Data. %d true clusters.' % (K)
def get_data(seed=8675309, nObsTotal=25000, **kwargs):
'''
Args
-------
seed : integer seed for random number generator,
used for actually *generating* the data
nObsTotal : total number of observations for the dataset.
Returns
-------
Data : bnpy XData object, with nObsTotal observations
'''
X, Xprev, TrueZ = genToyData(seed, nObsTotal)
Data = XData(X=X, TrueZ=TrueZ, Xprev=Xprev)
Data.name = get_short_name()
Data.summary = get_data_info()
return Data
K = 2
D = 2
a1 = 0.9995
A = np.zeros((K, D, D))
A[0] = np.asarray([[a1, 0], [0, 0]])
A[1] = np.asarray([[0, 0], [0, a1]])
s1 = 0.001
s2 = 0.003
Sigma = np.zeros((K, D, D))
Sigma[0] = np.diag([s1, s2])
Sigma[1] = np.diag([s2, s1])
cholSigma = np.zeros_like(Sigma)
for k in xrange(K):
cholSigma[k] = scipy.linalg.cholesky(Sigma[k])
def genToyData(seed=0, nObsTotal=25000):
''' Generate Toy Data
'''
switchProb = 0.005
Xprev = np.zeros((nObsTotal, D))
X = np.zeros((nObsTotal, D))
Xprev[0] = [.05, -.05]
PRNG = np.random.RandomState(seed)
XX1 = np.dot(cholSigma[1].T, PRNG.randn(D, nObsTotal)).T
PRNG = np.random.RandomState(seed + 1)
XX0 = np.dot(cholSigma[0].T, PRNG.randn(D, nObsTotal)).T
PRNG = np.random.RandomState(seed + 2)
rs = PRNG.rand(nObsTotal)
Z = np.ones(nObsTotal)
for n in xrange(nObsTotal):
if Z[n] == 1:
X[n] = np.dot(A[1], Xprev[n]) + XX1[n]
elif Z[n] == -1:
X[n] = np.dot(A[0], Xprev[n]) + XX0[n]
if n < nObsTotal - 1:
Xprev[n + 1] = X[n]
if rs[n] < switchProb:
Z[n + 1] = -1 * Z[n]
else:
Z[n + 1] = Z[n]
Z[Z < 0] = 0
return X, Xprev, Z
if __name__ == '__main__':
X, Xprev, Z = genToyData(seed=0, nObsTotal=5800)
from matplotlib import pylab
aIDs = np.flatnonzero(Z == 0)
bIDs = np.flatnonzero(Z == 1)
B = np.max(np.abs(X))
pylab.subplot(2, 1, 1)
pylab.plot(aIDs, X[aIDs, 0], 'r.')
pylab.plot(bIDs, X[bIDs, 0], 'b.')
pylab.ylim([-B, B])
pylab.subplot(2, 1, 2)
pylab.plot(aIDs, X[aIDs, 1], 'r.')
pylab.plot(bIDs, X[bIDs, 1], 'b.')
pylab.ylim([-B, B])
pylab.tight_layout()
pylab.show()
| StarcoderdataPython |
1957212 | from ArithBuses import *
from TraceType import *
import DFG
from BusReq import *
##############################################################################
# Arithmetic operators
##############################################################################
class ArithmeticInputBaseReq(BusReq):
def __init__(self, bus_class, reqfactory, expr, type):
BusReq.__init__(self, reqfactory, expr, type)
self.bus_class = bus_class
def natural_type(self):
return ARITHMETIC_TYPE
def natural_dependencies(self):
return []
def natural_impl(self):
return self.bus_class(self.board(), self.expr.storage_key.idx)
class ArithmeticInputReq(ArithmeticInputBaseReq):
def __init__(self, reqfactory, expr, type):
ArithmeticInputBaseReq.__init__(self, ArithmeticInputBus, reqfactory, expr, type)
class ArithmeticNIZKInputReq(ArithmeticInputBaseReq):
def __init__(self, reqfactory, expr, type):
ArithmeticInputBaseReq.__init__(self, ArithmeticNIZKInputBus, reqfactory, expr, type)
class AddReq(BinaryOpReq):
def __init__(self, reqfactory, expr, type):
BinaryOpReq.__init__(self, reqfactory, expr, type)
def natural_type(self): return ARITHMETIC_TYPE
def has_const_opt(self): return False
# we could implement a constant optimization here, but it doesn't
# actually reduce any muls, so why bother?
def var_impl(self, *busses):
return ArithAddBus(self.board(), self, *busses)
class MultiplyReq(BinaryOpReq):
def __init__(self, reqfactory, expr, type):
BinaryOpReq.__init__(self, reqfactory, expr, type)
def natural_type(self): return ARITHMETIC_TYPE
def has_const_opt(self): return True
def const_impl(self, const_expr, variable_bus):
return ConstantMultiplyBus(self.board(), const_expr.value, variable_bus)
def var_impl(self, *busses):
return ArithMultiplyBus(self.board(), *busses)
class NegateReq(BusReq):
def __init__(self, reqfactory, expr, type):
BusReq.__init__(self, reqfactory, expr, type)
def natural_type(self): return ARITHMETIC_TYPE
def _req(self):
return self.make_req(self.expr.expr, self.natural_type())
def natural_dependencies(self):
return [ self._req() ]
def natural_impl(self):
sub_bus = self.get_bus_from_req(self._req())
return ConstantMultiplyBus(self.board(), -1, sub_bus)
##############################################################################
# ConditionalReq operator.
# accepts a boolean condition and two arithmetic inputs;
# emits an arithmetic output.
##############################################################################
class ConditionalReq(BusReq):
def __init__(self, reqfactory, expr, type):
BusReq.__init__(self, reqfactory, expr, type)
# NB: I used to have clever code here that, given BOOLEAN
# true- and false-case values, would compute the condition in boolean
# space to avoid a transition into and out of arithmetic-land.
# Then I realized that was stupid: the transition in (join) is free,
# and the transition out a split, so the total cost is 2+k+1 muls.
# But doing the condition in boolean space is 2k muls!
def natural_type(self):
return ARITHMETIC_TYPE
def _reqcond(self):
return LogicalCastReq(self.reqfactory, self.expr.cond, BOOLEAN_TYPE)
def _reqtrue(self):
return self.reqfactory.make_req(self.expr.valtrue, ARITHMETIC_TYPE)
def _reqfalse(self):
return self.reqfactory.make_req(self.expr.valfalse, ARITHMETIC_TYPE)
def natural_dependencies(self):
return [ self._reqcond(), self._reqtrue(), self._reqfalse() ]
def natural_impl(self):
buscond = self.get_bus_from_req(self._reqcond())
bustrue = self.get_bus_from_req(self._reqtrue())
busfalse = self.get_bus_from_req(self._reqfalse())
return ArithmeticConditionalBus(
self.board(), buscond, bustrue, busfalse)
##############################################################################
# Comparison operators.
# accept arithmetic inputs; emit boolean outputs
##############################################################################
class CmpReq(BinaryOpReq):
def __init__(self, reqfactory, expr, type):
BinaryOpReq.__init__(self, reqfactory, expr, type)
# if (isinstance(expr, DFG.CmpLT)):
# self._leq = False
# elif (isinstance(expr, DFG.CmpLEQ)):
# self._leq = True
# else:
# assert(False)
def natural_type(self): return ARITHMETIC_TYPE
def has_const_opt(self): return False # doesn't cost muls.
def var_impl(self, abus, bbus):
assert(False)
class CmpLTReq(CmpReq):
def __init__(self, reqfactory, expr, type):
CmpReq.__init__(self, reqfactory, expr, type)
def var_impl(self, abus, bbus):
minusb_bus = ConstantMultiplyBus(self.board(), self.board().bit_width.get_neg1(), bbus)
self.reqfactory.add_extra_bus(minusb_bus)
comment = "CmpLT %s - %s" % (self.expr.left.__class__, self.expr.right.__class__)
aminusb_bus = ArithAddBus(self.board(), comment, abus, minusb_bus)
self.reqfactory.add_extra_bus(aminusb_bus)
split_bus = SplitBus(self.board(), aminusb_bus)
self.reqfactory.add_extra_bus(split_bus)
signbit = LeftShiftBus(self.board(), split_bus, -self.board().bit_width.get_sign_bit())
self.reqfactory.add_extra_bus(signbit)
return JoinBus(self.board(), signbit)
class CmpLEQReq(CmpLTReq):
"""I made this a subclass of CmpLTReq because the logic is nearly identical."""
def __init__(self, reqfactory, expr, type):
CmpLTReq.__init__(self, reqfactory, expr, type)
def var_impl(self, abus, bbus):
constant_one = ConstantArithmeticBus(self.board(), 1)
self.reqfactory.add_extra_bus(constant_one)
comment = "CmpLEQ %s + 1" % (self.expr.right.__class__)
bplus1_bus = ArithAddBus(self.board(), comment, bbus, constant_one)
self.reqfactory.add_extra_bus(bplus1_bus)
return CmpLTReq.var_impl(self, abus, bplus1_bus)
class CmpEQReq(CmpReq):
"""An == operation. This will have two possible implementations: one using boolean operations, one using the compare-with-zero gate."""
def __init__(self, reqfactory, expr, type):
CmpReq.__init__(self, reqfactory, expr, type)
class CmpEQReqArith(CmpEQReq):
def __init__(self, reqfactory, expr, type):
CmpEQReq.__init__(self, reqfactory, expr, type)
def var_impl(self, abus, bbus):
"""Perform equality test by subtracting and use the zerop gate"""
# neg_bbus = ConstantMultiplyBus(self.board(), self.board().bit_width.get_neg1(), bbus)
# self.reqfactory.add_extra_bus(neg_bbus)
# aminusb_bus = ArithAddBus(self.board(), "", abus, neg_bbus)
# self.reqfactory.add_extra_bus(aminusb_bus)
# print aminusb_bus
zerop_bus = ArithmeticZeroPBus(self.board(), abus, bbus)
self.reqfactory.add_extra_bus(zerop_bus)
assert(zerop_bus.get_trace_count() == 1)
return zerop_bus
class CmpEQReqBoolean(CmpEQReq):
def __init__(self, reqfactory, expr, type):
CmpEQReq.__init__(self, reqfactory, expr, type)
def var_impl(self, abus, bbus):
"""Perform equality test by subtracting, splitting, and checking for each bit being zero using a chain of ANDs"""
neg1_bus = ConstantArithmeticBus(self.board(), -1)
self.reqfactory.add_extra_bus(neg1_bus)
neg_bbus = ArithMultiplyBus(self.board(), neg1, bbus)
self.reqfactory.add_extra_bus(neg_bbus)
aminusb_bus = ArithAddBus(self.board(), "", abus, neg_bbus)
self.reqfactory.add_extra_bus(aminusb_bus)
split_bus = SplitBus(self.board(), aminusb_bus)
self.reqfactory.add_extra_bus(split_bus)
# This next section is basically a map/fold pattern
inv_bus = self.reqfactory.get_ConstantBitXorBus_class()(self.board(),
self.board().bit_width.get_neg1(), split_bus)
self.reqfactory.add_extra_bus(inv_bus)
log_inv_bus = self.reqfactory.get_AllOnesBus_class()(self.board(), inv_bus)
self.reqfactory.add_extra_bus(log_inv_bus)
return JoinBus(self.board(), log_inv_bus)
# def var_impl(self, abus, bbus):
# # a < b :: a+(neg1*b) < 0 :: sign_bit(a+(neg1*b))
# # for integers, a <= b :: a < b+1 :: sign_bit(a+(neg1*(b+1)))
# # TODO optimizations for comparisons.
# # Once again, there's enough complexity here that we might imagine
# # we could collapse common subexpressions at a lower level than we
# # are, since sign_bit is pretty expensive.
# # That way, if (a<b) { return a-b; } would be more efficient.
# # Another optimization alternative is to only use only as many bits
# # of neg1 as we need to compute a-b.
# # For now, let's do this all in 32-bits, and collapse no redundancy.
# if (self._leq):
# constant_one = ConstantArithmeticBus(self.board(), 1)
# self.reqfactory.add_extra_bus(constant_one)
# comment = "Cmp::%s + 1" % (self.expr.right.__class__,)
# bplus1_bus = ArithAddBus(self.board(), comment, bbus, constant_one)
# self.reqfactory.add_extra_bus(bplus1_bus)
# adjusted_bbus = bplus1_bus
# else:
# adjusted_bbus = bbus
# # minusb_bus = ConstantMultiplyBus(self.board(), -1, adjusted_bbus)
# minusb_bus = ConstantMultiplyBus(self.board(), self.board().bit_width.get_neg1(), adjusted_bbus)
# self.reqfactory.add_extra_bus(minusb_bus)
# comment = "Cmp::%s - %s" % (
# self.expr.left.__class__,
# self.expr.right.__class__)
# aminusb_bus = ArithAddBus(self.board(), comment, abus, minusb_bus)
# self.reqfactory.add_extra_bus(aminusb_bus)
# split_bus = SplitBus(self.board(), aminusb_bus)
# self.reqfactory.add_extra_bus(split_bus)
# signbit = LeftShiftBus(
# self.board(), split_bus, -self.board().bit_width.get_sign_bit())
# self.reqfactory.add_extra_bus(signbit)
# # NB we join the one-bit sign bit back into an ARITHMETIC bus
# # so we can keep our input and output types equivalent (ARITHMETIC),
# # and hence exploit the BinaryOpReq superclass. We could defect --
# # at a cost in more complexity -- but there's
# # no point. The join is free, and if someone else needs a boolean,
# # well, the one-bit split is free, too.
# return JoinBus(self.board(), signbit)
| StarcoderdataPython |
3493966 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
'''
inplace_callbacks.py: python module showcasing inplace callback function
registration and functionality.
Copyright (c) 2016, NLnet Labs.
This software is open source.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the organization nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
#Try:
# - dig @localhost nlnetlabs.nl +ednsopt=65002:
# This query *could* be answered from cache. If so, unbound will reply
# with the same EDNS option 65002, but with hexdata 'deadbeef' as data.
#
# - dig @localhost bogus.nlnetlabs.nl txt:
# This query returns SERVFAIL as the txt record of bogus.nlnetlabs.nl is
# intentionally bogus. The reply will contain an empty EDNS option
# with option code 65003.
# (unbound needs to be validating for this example to work)
# Useful functions:
# register_inplace_cb_reply(inplace_reply_callback, env, id):
# Register the reply_callback function as an inplace callback function
# when answering with a resolved query.
# Return True on success, False on failure.
#
# register_inplace_cb_reply_cache(inplace_reply_cache_callback, env, id):
# Register the reply_cache_callback function as an inplace callback
# function when answering from cache.
# Return True on success, False on failure.
#
# register_inplace_cb_reply_local(inplace_reply_local_callback, env, id):
# Register the reply_local_callback function as an inplace callback
# function when answering from local data or chaos reply.
# Return True on success, False on failure.
#
# register_inplace_cb_reply_servfail(inplace_reply_servfail_callback, env, id):
# Register the reply_servfail_callback function as an inplace callback
# function when answering with servfail.
# Return True on success, False on failure.
#
# Examples on how to use the functions are given in this file.
def inplace_reply_callback(qinfo, qstate, rep, rcode, edns, opt_list_out,
region):
"""Function that will be registered as an inplace callback function.
It will be called when answering with a resolved query.
:param qinfo: query_info struct;
:param qstate: module qstate. It contains the available opt_lists; It
SHOULD NOT be altered;
:param rep: reply_info struct;
:param rcode: return code for the query;
:param edns: edns_data to be sent to the client side. It SHOULD NOT be
altered;
:param opt_list_out: the list with the EDNS options that will be sent as a
reply. It can be populated with EDNS options;
:param region: region to allocate temporary data. Needs to be used when we
want to append a new option to opt_list_out.
:return: True on success, False on failure.
"""
log_info("python: called back while replying.")
return True
def inplace_cache_callback(qinfo, qstate, rep, rcode, edns, opt_list_out,
region):
"""Function that will be registered as an inplace callback function.
It will be called when answering from the cache.
:param qinfo: query_info struct;
:param qstate: module qstate. None;
:param rep: reply_info struct;
:param rcode: return code for the query;
:param edns: edns_data sent from the client side. The list with the EDNS
options is accessible through edns.opt_list. It SHOULD NOT be
altered;
:param opt_list_out: the list with the EDNS options that will be sent as a
reply. It can be populated with EDNS options;
:param region: region to allocate temporary data. Needs to be used when we
want to append a new option to opt_list_out.
:return: True on success, False on failure.
For demonstration purposes we want to see if EDNS option 65002 is present
and reply with a new value.
"""
log_info("python: called back while answering from cache.")
# Inspect the incoming EDNS options.
if not edns_opt_list_is_empty(edns.opt_list):
log_info("python: available EDNS options:")
for o in edns.opt_list_iter:
log_info("python: Code: {}, Data: '{}'".format(o.code,
"".join('{:02x}'.format(x) for x in o.data)))
if o.code == 65002:
log_info("python: *found option code 65002*")
# add to opt_list
# Data MUST be represented in a bytearray.
b = bytearray.fromhex("deadbeef")
if edns_opt_list_append(opt_list_out, o.code, b, region):
log_info("python: *added new option code 65002*")
else:
log_info("python: *failed to add new option code 65002*")
return False
break
return True
def inplace_local_callback(qinfo, qstate, rep, rcode, edns, opt_list_out,
region):
"""Function that will be registered as an inplace callback function.
It will be called when answering from local data.
:param qinfo: query_info struct;
:param qstate: module qstate. None;
:param rep: reply_info struct;
:param rcode: return code for the query;
:param edns: edns_data sent from the client side. The list with the
EDNS options is accessible through edns.opt_list. It
SHOULD NOT be altered;
:param opt_list_out: the list with the EDNS options that will be sent as a
reply. It can be populated with EDNS options;
:param region: region to allocate temporary data. Needs to be used when we
want to append a new option to opt_list_out.
:return: True on success, False on failure.
"""
log_info("python: called back while replying with local data or chaos"
" reply.")
return True
def inplace_servfail_callback(qinfo, qstate, rep, rcode, edns, opt_list_out,
region):
"""Function that will be registered as an inplace callback function.
It will be called when answering with SERVFAIL.
:param qinfo: query_info struct;
:param qstate: module qstate. If not None the relevant opt_lists are
available here;
:param rep: reply_info struct. None;
:param rcode: return code for the query. LDNS_RCODE_SERVFAIL;
:param edns: edns_data to be sent to the client side. If qstate is None
edns.opt_list contains the EDNS options sent from the client
side. It SHOULD NOT be altered;
:param opt_list_out: the list with the EDNS options that will be sent as a
reply. It can be populated with EDNS options;
:param region: region to allocate temporary data. Needs to be used when we
want to append a new option to opt_list_out.
:return: True on success, False on failure.
For demonstration purposes we want to reply with an empty EDNS code '65003'.
"""
log_info("python: called back while servfail.")
b = bytearray.fromhex("")
edns_opt_list_append(opt_list_out, 65003, b, region)
return True
def init_standard(id, env):
"""New version of the init function.
The function's signature is the same as the C counterpart and allows for
extra functionality during init.
..note:: This function is preferred by unbound over the old init function.
..note:: The previously accessible configuration options can now be found in
env.cgf.
"""
log_info("python: inited script {}".format(env.cfg.python_script))
# Register the inplace_reply_callback function as an inplace callback
# function when answering a resolved query.
if not register_inplace_cb_reply(inplace_reply_callback, env, id):
return False
# Register the inplace_cache_callback function as an inplace callback
# function when answering from cache.
if not register_inplace_cb_reply_cache(inplace_cache_callback, env, id):
return False
# Register the inplace_local_callback function as an inplace callback
# function when answering from local data.
if not register_inplace_cb_reply_local(inplace_local_callback, env, id):
return False
# Register the inplace_servfail_callback function as an inplace callback
# function when answering with SERVFAIL.
if not register_inplace_cb_reply_servfail(inplace_servfail_callback, env, id):
return False
return True
def init(id, cfg):
"""Previous version init function.
..note:: This function is still supported for backwards compatibility when
the init_standard function is missing. When init_standard is
present this function SHOULD be omitted to avoid confusion to the
reader.
"""
return True
def deinit(id): return True
def inform_super(id, qstate, superqstate, qdata): return True
def operate(id, event, qstate, qdata):
if (event == MODULE_EVENT_NEW) or (event == MODULE_EVENT_PASS):
qstate.ext_state[id] = MODULE_WAIT_MODULE
return True
elif event == MODULE_EVENT_MODDONE:
qstate.ext_state[id] = MODULE_FINISHED
return True
log_err("pythonmod: Unknown event")
qstate.ext_state[id] = MODULE_ERROR
return True
| StarcoderdataPython |
11275565 | import numpy as np
import os
import time
import random
import matplotlib.pyplot as plt
from matplotlib import mlab
import forecast_lib as fl
dropout=False
if dropout:
type_exp = '_dropout'
else:
type_exp = ''
strategic_attack=False
if strategic_attack:
type_exp='strategic_' + type_exp
else:
type_exp=''+ type_exp
type_exp += '_mult_ensemble'
dir_results = './results/'
impact = np.load(dir_results + 'impact'+type_exp+'.npy', allow_pickle=True)
pred_error = np.load(dir_results + 'pred_error'+type_exp+'.npy', allow_pickle=True)
m = 109
reps, max_num_models = impact.shape
plt.figure(1)
plt.clf()
plt.plot(range(1, max_num_models+1), np.mean(impact, axis=0))
plt.ylim((0, 1))
plt.xlabel('Number of ensembles')
plt.ylabel('Impact ($\hat y - \hat y_a$)')
plt.title('Impact with Multiple Ensembles')
plt.show()
plt.savefig('impact_mult_ensemble.pgf', bbox_inches='tight')
plt.figure(2)
plt.clf()
plt.plot(range(1, max_num_models+1), np.mean(pred_error, axis=0))
plt.ylim((0, 1))
plt.xlabel('Number of ensembles')
plt.ylabel('Prediction error MAE($y, \hat y_a$)')
plt.title('Prediction Error with Multiple Ensembles')
plt.show()
plt.savefig('pred_error_mult_ensemble.pgf', bbox_inches='tight')
| StarcoderdataPython |
6623457 | import os
import boto3
from typing import List
import json
import sys
import os
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class ProwlerScanGroup:
def __init__(self, topic_arn):
self.__topic = boto3.resource('sns').Topic(topic_arn)
self.__region = os.environ['AWS_REGION']
logger.debug(f'topic_arn={topic_arn}')
logger.debug(f'region={self.__region}')
def __get_check(self, check_id:str) -> str:
logger.debug('Executing ' + f"/prowler/prowler -r {self.__region} -c '{check_id}' -M 'json-asff' -S")
stream = os.popen(f"/prowler/prowler -r {self.__region} -f {self.__region} -c '{check_id}' -M 'json-asff' -S")
raw_out = stream.read()
return raw_out
def handle(self, event, context):
logger.debug(event)
records = event['Records']
for r in records:
group = r['Sns']['Message']
logger.debug(self.__get_check(group))
def handler(event, context):
ProwlerScanGroup(topic_arn=os.environ['topic_arn']).handle(event, context)
return 'Done: python'
| StarcoderdataPython |
6628879 | <reponame>ikuroNoriiwa/shellcode_transform<filename>utils.py
#!/usr/bin/python3
from sys import exit
def error(msg):
print("\033[31mError: {}\033[0m".format(msg))
exit(1) | StarcoderdataPython |
1971193 | <gh_stars>0
from itertools import cycle
import matplotlib.gridspec as grsp
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from GPErks.constants import HEIGHT, WIDTH
from GPErks.utils.plotting import get_col, interp_col
def boxplot(ST, S1, S2, index_i, index_ij, ylabel):
df_ST = pd.DataFrame(data=ST, columns=index_i)
df_S1 = pd.DataFrame(data=S1, columns=index_i)
df_S2 = pd.DataFrame(
data=S2,
columns=["(" + elem[0] + ", " + elem[1] + ")" for elem in index_ij],
)
plt.style.use("seaborn")
gs = grsp.GridSpec(2, 2)
fig = plt.figure(figsize=(2 * WIDTH, 2 * HEIGHT / 2))
ax0 = fig.add_subplot(gs[0, 0])
sns.boxplot(ax=ax0, data=df_S1)
ax0.set_ylim(0, 1)
ax0.set_title("First-order effect", fontweight="bold", fontsize=12)
ax0.set_xticklabels(
ax0.get_xticklabels(), rotation=45, horizontalalignment="right"
)
ax1 = fig.add_subplot(gs[0, 1])
sns.boxplot(ax=ax1, data=df_ST)
ax1.set_ylim(0, 1)
ax1.set_title("Total effect", fontweight="bold", fontsize=12)
ax1.set_xticklabels(
ax1.get_xticklabels(), rotation=45, horizontalalignment="right"
)
ax2 = fig.add_subplot(gs[1, :])
sns.boxplot(ax=ax2, data=df_S2)
ax2.set_ylim(0, 1)
ax2.set_title("Second-order effect", fontweight="bold", fontsize=12)
ax2.set_xticklabels(
ax2.get_xticklabels(), rotation=45, horizontalalignment="right"
)
fig.tight_layout()
# plt.savefig(savepath + ylabel + "_box.pdf", bbox_inches="tight", dpi=1000)
plt.show()
def donut(ST, S1, index_i, ylabel):
ST_mean = np.mean(ST, axis=0)
S1_mean = np.mean(S1, axis=0)
sum_s1 = S1_mean.sum()
sum_st = ST_mean.sum()
ho = sum_st - sum_s1
x_si = np.array(list(S1_mean) + [ho])
x_sti = ST_mean
fig, axes = plt.subplots(1, 2, figsize=(2 * WIDTH, 2 * HEIGHT / 4))
c = "blue"
colors = interp_col(get_col(c), len(index_i))
colors += [interp_col(get_col("gray"), 6)[2]]
wedges, _ = axes[0].pie(
x_si,
radius=1,
colors=colors,
startangle=90,
counterclock=False,
wedgeprops=dict(width=0.3, edgecolor="w", linewidth=1),
normalize=True,
)
axes[0].set_title("S1", fontsize=12, fontweight="bold")
axes[1].pie(
x_sti,
radius=1,
colors=colors,
startangle=90,
counterclock=False,
wedgeprops=dict(width=0.3, edgecolor="w", linewidth=1),
normalize=True,
)
axes[1].set_title("ST", fontsize=12, fontweight="bold")
plt.figlegend(
wedges, index_i + ["higher-order int."], ncol=5, loc="lower center"
)
# plt.savefig(
# savepath + ylabel + "_donut.pdf", bbox_inches="tight", dpi=1000
# )
plt.show()
def fancy_donut(ST, S1, S2, index_i, ylabel):
ST = np.median(ST, axis=0)
S1 = np.median(S1, axis=0)
S2 = np.median(S2, axis=0)
D = len(ST)
V = np.zeros((D, D), dtype=float)
for j in range(D - 1):
i1 = int(j * (D - 1) - np.sum(np.array(range(j))))
i2 = int((j + 1) * (D - 1) - np.sum(np.array(range(j + 1))))
V[j, j + 1 :] = S2[i1:i2]
for m in range(D):
for n in range(D):
if m > n:
V[n, m] = 0.5 * V[n, m]
V[m, n] = V[n, m]
S = np.hstack(
(
S1.reshape(-1, 1),
np.array([np.sum(V[ind, :]) for ind in range(D)]).reshape(-1, 1),
)
)
rem = np.sum(ST) - (np.sum(S[:, 0]) + np.sum(S[:, 1]))
S = np.vstack((S, np.array([rem, 0]).reshape(1, -1)))
fig, axis = plt.subplots(1, 1, figsize=(4 * WIDTH / 3, HEIGHT / 2))
colors = [
"red",
"purple",
"amber",
"light_green",
"blue",
"pink",
"teal",
"brown",
]
inner_colors = []
for c, _ in zip(cycle(colors), index_i):
cis = interp_col(get_col(c), 6)
inner_colors.append(cis[3])
inner_colors.append(cis[2])
inner_colors += 2 * [interp_col(get_col("gray"), 6)[2]]
outer_colors = [get_col(c)[1] for c in colors]
outer_colors += [interp_col(get_col("gray"), 6)[2]]
wedges, _ = axis.pie(
S.sum(axis=1),
radius=1,
colors=outer_colors,
startangle=90,
counterclock=False,
wedgeprops=dict(width=0.3, edgecolor="w"),
)
axis.pie(
S.flatten(),
radius=1 - 0.3,
colors=inner_colors,
startangle=90,
counterclock=False,
wedgeprops=dict(width=0.3, edgecolor="w"),
)
axis.set(aspect="equal")
axis.set_title(ylabel, fontsize=12, fontweight="bold")
plt.figlegend(
wedges,
index_i + ["higher-order\ninteractions"],
ncol=5,
loc="lower center",
)
plt.show()
def heatmap(ST, S1, index_i, ylabel):
ST_mean = np.mean(ST, axis=0).reshape(1, -1)
S1_mean = np.mean(S1, axis=0).reshape(1, -1)
fig, axes = plt.subplots(1, 2, figsize=(2 * WIDTH, 2 * HEIGHT / 8))
df = pd.DataFrame(data=S1_mean, index=[ylabel], columns=index_i)
h1 = sns.heatmap(
df,
cmap="rocket_r",
vmin=0.0,
vmax=1.0,
square=True,
linewidth=0.1,
cbar_kws={"shrink": 0.8},
ax=axes[0],
)
axes[0].set_title("S1", fontsize=12, fontweight="bold")
axes[0].tick_params(left=False, bottom=False)
h1.set_xticklabels(h1.get_xticklabels(), rotation=45, va="top")
h1.set_yticklabels(h1.get_yticklabels(), rotation=0, ha="right")
df = pd.DataFrame(data=ST_mean, index=[ylabel], columns=index_i)
ht = sns.heatmap(
df,
cmap="rocket_r",
vmin=0.0,
vmax=1.0,
square=True,
linewidth=0.1,
cbar_kws={"shrink": 0.8},
ax=axes[1],
)
axes[1].set_title("ST", fontsize=12, fontweight="bold")
axes[1].tick_params(left=False, bottom=False)
ht.set_xticklabels(ht.get_xticklabels(), rotation=45, va="top")
ht.set_yticklabels(ht.get_yticklabels(), rotation=0, ha="right")
# plt.savefig(savepath + ylabel + "_heat.pdf", bbox_inches="tight", dpi=1000)
plt.show()
def network(ST, S1, S2, index_i, index_ij, ylabel):
def angle(p, c):
[dx, dy] = p - c
if dx == 0:
if dy > 0:
return 0.5 * np.pi
else:
return 1.5 * np.pi
elif dx > 0:
if dy >= 0:
return np.arctan(dy / dx)
else:
return 2.0 * np.pi + np.arctan(dy / dx)
elif dx < 0:
return np.pi + np.arctan(dy / dx)
ST_mean = np.mean(ST, axis=0)
S1_mean = np.mean(S1, axis=0)
S2_mean = np.mean(S2, axis=0)
maximum = np.max([ST_mean.max(), S1_mean.max(), S2_mean.max()])
ST_mean /= maximum
S1_mean /= maximum
S2_mean /= maximum
min_size = 0
max_size = 200
foreground_node_size = [
min_size + (max_size - min_size) * k for k in list(S1_mean)
]
backgroud_node_size = [
min_size + (max_size - min_size) * k for k in list(ST_mean)
]
edge_width = [
np.sqrt((min_size + (max_size - min_size) * k) / np.pi)
for k in list(S2_mean)
]
Sources = list(list(zip(*index_ij))[0])
Targets = list(list(zip(*index_ij))[1])
Weights = list(S2_mean)
G = nx.Graph()
for s, t, w in zip(Sources, Targets, Weights):
G.add_edges_from([(s, t)], w=w)
Pos = nx.circular_layout(G)
c = "blue"
colors = interp_col(get_col(c), 5)
fig, axis = plt.subplots(1, 1, figsize=(WIDTH, WIDTH))
nx.draw_networkx_nodes(
G,
Pos,
node_size=backgroud_node_size,
node_color=len(index_i) * [colors[4]],
ax=axis,
)
nx.draw_networkx_nodes(
G,
Pos,
node_size=foreground_node_size,
node_color=len(index_i) * [colors[0]],
ax=axis,
)
nx.draw_networkx_edges(
G,
Pos,
width=edge_width,
edge_color=len(index_ij) * [colors[2]],
alpha=0.8,
ax=axis,
)
center = [0.0, 0.0]
radius = 1.0
names = nx.draw_networkx_labels(
G, Pos, font_size=12, font_family="DejaVu Sans", ax=axis
)
for node, text in names.items():
position = (
1.2 * radius * np.cos(angle(Pos[node], center)),
1.2 * radius * np.sin(angle(Pos[node], center)),
)
text.set_position(position)
text.set_clip_on(False)
axis.axis("equal")
axis.set_axis_off()
fig.tight_layout()
# plt.savefig(
# savepath + ylabel + "_network.pdf", bbox_inches="tight", dpi=1000
# )
plt.show()
| StarcoderdataPython |
1672325 | <reponame>medunigraz/outpost.django.base
from django.dispatch import Signal
materialized_view_refreshed = Signal(providing_args=["name", "model"])
| StarcoderdataPython |
3503718 | <filename>questionA/questionA.py
def isOverlap(x,y):
return 'overlap' if y[0] <= x[0] < y[1] or y[0] < x[1] <= y[1] else 'no overlap'
if __name__ == '__main__':
z = [(1,2),(1,3)]
print(isOverlap(z[0], z[1]))
z = [(1,2),(2,3)]
print(isOverlap(z[0], z[1]))
z = [(3,4),(1,3)]
print(isOverlap(z[0], z[1]))
z = [(3,4),(1,2)]
print(isOverlap(z[0], z[1]))
z = [(3,4),(3,4)]
print(isOverlap(z[0], z[1]))
| StarcoderdataPython |
6684583 | <filename>src/aiotube/_rgxs.py
import re
class _ChannelPatterns:
name = re.compile('channelMetadataRenderer\":{\"title\":\"(.*?)\"')
id = re.compile('channelId\":\"(.*?)\"')
verified = re.compile('"label":"Verified"')
check_live = re.compile('{"text":"LIVE"}')
live = re.compile("thumbnailOverlays\":\[(.*?)]")
video_id = re.compile('videoId\":\"(.*?)\"')
uploads = re.compile("gridVideoRenderer\":{\"videoId\":\"(.*?)\"")
subscribers = re.compile("}},\"simpleText\":\"(.*?) ")
views = re.compile("viewCountText\":{\"simpleText\":\"(.*?)\"}")
creation = re.compile("{\"text\":\"Joined \"},{\"text\":\"(.*?)\"}")
country = re.compile("country\":{\"simpleText\":\"(.*?)\"}")
custom_url = re.compile("canonicalChannelUrl\":\"(.*?)\"")
description = re.compile("{\"description\":{\"simpleText\":\"(.*?)\"}")
avatar = re.compile("height\":88},{\"url\":\"(.*?)\"")
banner = re.compile("width\":1280,\"height\":351},{\"url\":\"(.*?)\"")
playlists = re.compile("{\"url\":\"/playlist\?list=(.*?)\"")
video_count = re.compile("videoCountText\":{\"runs\":\[{\"text\":(.*?)}")
links = re.compile("q=https%3A%2F%2F(.*?)\"")
upload_chunk = re.compile("gridVideoRenderer\":{(.*?)\"navigationEndpoint")
upload_chunk_fl_1 = re.compile("simpleText\":\"Streamed")
upload_chunk_fl_2 = re.compile("default_live.")
upcoming_check = re.compile("\"title\":\"Upcoming live streams\"")
upcoming = re.compile("gridVideoRenderer\":{\"videoId\":\"(.*?)\"")
class _VideoPatterns:
video_id = re.compile('videoId\":\"(.*?)\"')
title = re.compile("title\":\"(.*?)\"")
duration = re.compile("approxDurationMs\":\"(.*?)\"")
upload_date = re.compile("uploadDate\":\"(.*?)\"")
author_id = re.compile("channelIds\":\[\"(.*?)\"")
description = re.compile("shortDescription\":\"(.*)\",\"isCrawlable")
tags = re.compile("<meta name=\"keywords\" content=\"(.*?)\">")
is_streamed = re.compile("simpleText\":\"Streamed live")
is_premiered = re.compile("dateText\":{\"simpleText\":\"Premiered")
views = re.compile("videoViewCountRenderer\":{\"viewCount\":{\"simpleText\":\"(.*?)\"")
likes = re.compile("toggledText\":{\"accessibility\":{\"accessibilityData\":{\"label\":\"(.*?) ")
thumbnail = re.compile("playerMicroformatRenderer\":{\"thumbnail\":{\"thumbnails\":\[{\"url\":\"(.*?)\"")
class _PlaylistPatterns:
name = re.compile("{\"title\":\"(.*?)\"")
video_count = re.compile("stats\":\[{\"runs\":\[{\"text\":\"(.*?)\"")
video_id = re.compile("videoId\":\"(.*?)\"")
thumbnail = re.compile("og:image\" content=\"(.*?)\?")
class _ExtraPatterns:
video_id = re.compile("videoId\":\"(.*?)\"")
class _QueryPatterns:
channel_id = re.compile("channelId\":\"(.*?)\"")
video_id = re.compile("videoId\":\"(.*?)\"")
playlist_id = re.compile("playlistId\":\"(.*?)\"")
| StarcoderdataPython |
1626523 | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import pytest
import requests
from airbyte_cdk.sources.streams.http.auth import NoAuth
from source_intercom.source import Companies, Contacts, IntercomStream
test_data = [
(
IntercomStream,
{"data": [], "pages": {"next": "https://api.intercom.io/conversations?per_page=1&page=2"}},
{"per_page": "1", "page": "2"},
),
(
Companies,
{"data": [{"type": "company"}], "scroll_param": "25b649f7-4d33-4ef6-88f5-60e5b8244309"},
{"scroll_param": "25b649f7-4d33-4ef6-88f5-60e5b8244309"},
),
(
Contacts,
{
"data": [],
"pages": {
"next": {"starting_after": "1HaSB+xrOyyMXAkS/c1RteCL7BzOzTvYjmjakgTergIH31eoe2v4/sbLsJWP" "\nIncfQLD3ouPkZlCwJ86F\n"}
},
},
{"starting_after": "1HaSB+xrOyyMXAkS/c1RteCL7BzOzTvYjmjakgTergIH31eoe2v4/sbLsJWP\nIncfQLD3ouPkZlCwJ86F\n"},
),
]
@pytest.mark.parametrize(
"intercom_class,response_json,expected_output_token", test_data, ids=["base pagination", "companies pagination", "contacts pagination"]
)
def test_get_next_page_token(intercom_class, response_json, expected_output_token, requests_mock):
"""
Test shows that next_page parameters are parsed correctly from the response object and could be passed for next request API call,
"""
requests_mock.get("https://api.intercom.io/conversations", json=response_json)
response = requests.get("https://api.intercom.io/conversations")
intercom_class = type("intercom_class", (intercom_class,), {"path": ""})
test = intercom_class(authenticator=NoAuth).next_page_token(response)
assert test == expected_output_token
| StarcoderdataPython |
6527350 | <filename>docker_emperor/commands/launch.py
import os
from docker_emperor.commands import Command
import docker_emperor.logger as logger
def run(root, *args, **kwargs):
logger.cmd('Run project <b>%s</b>' % (root.compose.name, ))
root.run_command('machine:start', internal=True)
root.run_command('down', internal=True)
root.run_command('up', *args, internal=True) | StarcoderdataPython |
9608489 | #!/usr/bin/python
import sys
from sense_hat import SenseHat
text = "Hello"
colorRed = 255
colorGreen = 0
colorBlue = 0
for index in range(len(sys.argv)):
next = index+1
if next >= len(sys.argv):
break
if "--text" == sys.argv[index]:
text = sys.argv[next]
elif "--color-red" == sys.argv[index]:
colorRed = int(sys.argv[next])
elif "--color-green" == sys.argv[index]:
colorGreen = int(sys.argv[next])
elif "--color-blue" == sys.argv[index]:
colorBlue = int(sys.argv[next])
color = (colorRed, colorGreen, colorBlue)
print "Text: "+text
sense = SenseHat()
sense.set_rotation(180)
try:
sense.show_message(text, text_colour=color)
except ValueError as error:
print "Error: " + str(error)
| StarcoderdataPython |
11293845 | <filename>output/models/nist_data/atomic/float_pkg/schema_instance/nistschema_sv_iv_atomic_float_pattern_2_xsd/__init__.py
from output.models.nist_data.atomic.float_pkg.schema_instance.nistschema_sv_iv_atomic_float_pattern_2_xsd.nistschema_sv_iv_atomic_float_pattern_2 import NistschemaSvIvAtomicFloatPattern2
__all__ = [
"NistschemaSvIvAtomicFloatPattern2",
]
| StarcoderdataPython |
3528283 | <filename>tests/karura_test/insights/test_label_format_insight.py<gh_stars>10-100
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../../"))
from datetime import datetime
import unittest
import numpy as np
import pandas as pd
from karura.core.dataframe_extension import DataFrameExtension
from karura.core.insights import LabelFormatInsight
class TestLabelFormatInsight(unittest.TestCase):
def test_adopt_categorical(self):
d = {
"numerical": [0, 1, 2, 3],
"categorical": ["a", "b", "c", "a"]
}
df = pd.DataFrame(d)
dfe = DataFrameExtension(df, categoricals=("categorical"), target="categorical")
li = LabelFormatInsight()
ts = li.get_insight_targets(dfe)
self.assertEqual("categorical", ts[0])
result = li.adopt(dfe)
self.assertTrue(result)
ts = li.get_transformer(dfe)
inv = ts.inverse_transform(dfe.df["categorical"])
self.assertEqual(inv.tolist(), d["categorical"])
def test_adopt_numerical(self):
d = {
"numerical": [0, 1, 2, 3],
"categorical": ["a", "b", "c", "a"]
}
df = pd.DataFrame(d)
dfe = DataFrameExtension(df, categoricals=("categorical"), target="numerical")
li = LabelFormatInsight()
ts = li.get_insight_targets(dfe)
self.assertEqual("numerical", ts[0])
result = li.adopt(dfe)
self.assertTrue(result)
ts = li.get_transformer(dfe)
inv = ts.inverse_transform(np.array(dfe.df["numerical"]))
diff = sum(inv.flatten() - np.array(d["numerical"]))
self.assertTrue(diff < 1e-10)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
3275999 | <reponame>BobbyEllena/bugtracker<gh_stars>0
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from bug.models import BugModel, CustUser
from bug.forms import LoginForm, CreateUser
# Register your models here.
class CustomUserAdmin(UserAdmin):
add_form = CreateUser
form = CreateUser
model = CustUser
list_display = ['email', 'username',]
admin.site.register(BugModel)
admin.site.register(CustUser) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.