id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
1843811 | <gh_stars>1-10
###############################################################################
# tsp_instance.py: data structures and support function to deal with instances
# of the Traveling Salesman Problem.
#
# (c) Copyright 2019, <NAME>. All Rights Reserved.
#
# This code is released under LICENSE.md.
#
# Created on: Nov 18, 2019 by ceandrade
# Last update: Nov 18, 2019 by ceandrade
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###############################################################################
from brkga_mp_ipr.exceptions import LoadError
class TSPInstance():
"""
Represents an instance for the Traveling Salesman Problem. The constructor
loads a upper triangular matrix as following:
number of nodes (n)
dist12 dist13 dist14 ... dist1n
dist23 dist24 ... dist2(n - 1)
...
dist(n-2)(n-1)
For example, for n = 4 we have
4
12 13 14
23 24
34
"""
def __init__(self, filename: str):
"""
Initializes the instance loading from a file.
"""
with open(filename, "r") as hd:
lines = hd.readlines()
if not lines:
raise LoadError(f"Cannot read file '{filename}'")
line_number = 1
try:
self.num_nodes = int(lines[0])
matrix_size = (self.num_nodes * (self.num_nodes - 1)) / 2
self.distances = []
for i in range(1, self.num_nodes):
line_number = i + 1
values = [float(x.strip()) for x in lines[i].split()]
self.distances.extend(values)
except Exception:
raise LoadError(f"Error reading line {line_number} of '{filename}'")
###########################################################################
def distance(self, i: int, j: int) -> float:
"""
Returns the distance between nodes `i` and `j`.
"""
if i > j:
i, j = j, i
return self.distances[(i * (self.num_nodes - 1)) - ((i - 1) * i // 2) +
(j - i - 1)]
| StarcoderdataPython |
375793 | <filename>test/test_datapipe.py
import os
import pickle
import random
import tempfile
import warnings
import tarfile
import zipfile
import numpy as np
from PIL import Image
from unittest import skipIf
import torch
import torch.nn as nn
from torch.testing._internal.common_utils import (TestCase, run_tests)
from torch.utils.data import IterDataPipe, RandomSampler, DataLoader
from typing import List, Tuple, Dict, Any, Type
import torch.utils.data.datapipes as dp
from torch.utils.data.datapipes.utils.decoder import (
basichandlers as decoder_basichandlers,
imagehandler as decoder_imagehandler)
try:
import torchvision.transforms
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = skipIf(not HAS_TORCHVISION, "no torchvision")
def create_temp_dir_and_files():
# The temp dir and files within it will be released and deleted in tearDown().
# Adding `noqa: P201` to avoid mypy's warning on not releasing the dir handle within this function.
temp_dir = tempfile.TemporaryDirectory() # noqa: P201
temp_dir_path = temp_dir.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.txt') as f:
temp_file1_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.byte') as f:
temp_file2_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.empty') as f:
temp_file3_name = f.name
with open(temp_file1_name, 'w') as f1:
f1.write('0123456789abcdef')
with open(temp_file2_name, 'wb') as f2:
f2.write(b"0123456789abcdef")
temp_sub_dir = tempfile.TemporaryDirectory(dir=temp_dir_path) # noqa: P201
temp_sub_dir_path = temp_sub_dir.name
with tempfile.NamedTemporaryFile(dir=temp_sub_dir_path, delete=False, suffix='.txt') as f:
temp_sub_file1_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_sub_dir_path, delete=False, suffix='.byte') as f:
temp_sub_file2_name = f.name
with open(temp_sub_file1_name, 'w') as f1:
f1.write('0123456789abcdef')
with open(temp_sub_file2_name, 'wb') as f2:
f2.write(b"0123456789abcdef")
return [(temp_dir, temp_file1_name, temp_file2_name, temp_file3_name),
(temp_sub_dir, temp_sub_file1_name, temp_sub_file2_name)]
class TestIterableDataPipeBasic(TestCase):
def setUp(self):
ret = create_temp_dir_and_files()
self.temp_dir = ret[0][0]
self.temp_files = ret[0][1:]
self.temp_sub_dir = ret[1][0]
self.temp_sub_files = ret[1][1:]
def tearDown(self):
try:
self.temp_sub_dir.cleanup()
self.temp_dir.cleanup()
except Exception as e:
warnings.warn("TestIterableDatasetBasic was not able to cleanup temp dir due to {}".format(str(e)))
def test_listdirfiles_iterable_datapipe(self):
temp_dir = self.temp_dir.name
datapipe = dp.iter.ListDirFiles(temp_dir, '')
count = 0
for pathname in datapipe:
count = count + 1
self.assertTrue(pathname in self.temp_files)
self.assertEqual(count, len(self.temp_files))
count = 0
datapipe = dp.iter.ListDirFiles(temp_dir, '', recursive=True)
for pathname in datapipe:
count = count + 1
self.assertTrue((pathname in self.temp_files) or (pathname in self.temp_sub_files))
self.assertEqual(count, len(self.temp_files) + len(self.temp_sub_files))
def test_loadfilesfromdisk_iterable_datapipe(self):
# test import datapipe class directly
from torch.utils.data.datapipes.iter import ListDirFiles, LoadFilesFromDisk
temp_dir = self.temp_dir.name
datapipe1 = ListDirFiles(temp_dir, '')
datapipe2 = LoadFilesFromDisk(datapipe1)
count = 0
for rec in datapipe2:
count = count + 1
self.assertTrue(rec[0] in self.temp_files)
self.assertTrue(rec[1].read() == open(rec[0], 'rb').read())
self.assertEqual(count, len(self.temp_files))
def test_readfilesfromtar_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_tarfile_pathname = os.path.join(temp_dir, "test_tar.tar")
with tarfile.open(temp_tarfile_pathname, "w:gz") as tar:
tar.add(self.temp_files[0])
tar.add(self.temp_files[1])
tar.add(self.temp_files[2])
datapipe1 = dp.iter.ListDirFiles(temp_dir, '*.tar')
datapipe2 = dp.iter.LoadFilesFromDisk(datapipe1)
datapipe3 = dp.iter.ReadFilesFromTar(datapipe2)
# read extracted files before reaching the end of the tarfile
count = 0
for rec, temp_file in zip(datapipe3, self.temp_files):
count = count + 1
self.assertEqual(os.path.basename(rec[0]), os.path.basename(temp_file))
self.assertEqual(rec[1].read(), open(temp_file, 'rb').read())
self.assertEqual(count, len(self.temp_files))
# read extracted files after reaching the end of the tarfile
count = 0
data_refs = []
for rec in datapipe3:
count = count + 1
data_refs.append(rec)
self.assertEqual(count, len(self.temp_files))
for i in range(0, count):
self.assertEqual(os.path.basename(data_refs[i][0]), os.path.basename(self.temp_files[i]))
self.assertEqual(data_refs[i][1].read(), open(self.temp_files[i], 'rb').read())
def test_readfilesfromzip_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_zipfile_pathname = os.path.join(temp_dir, "test_zip.zip")
with zipfile.ZipFile(temp_zipfile_pathname, 'w') as myzip:
myzip.write(self.temp_files[0])
myzip.write(self.temp_files[1])
myzip.write(self.temp_files[2])
datapipe1 = dp.iter.ListDirFiles(temp_dir, '*.zip')
datapipe2 = dp.iter.LoadFilesFromDisk(datapipe1)
datapipe3 = dp.iter.ReadFilesFromZip(datapipe2)
# read extracted files before reaching the end of the zipfile
count = 0
for rec, temp_file in zip(datapipe3, self.temp_files):
count = count + 1
self.assertEqual(os.path.basename(rec[0]), os.path.basename(temp_file))
self.assertEqual(rec[1].read(), open(temp_file, 'rb').read())
self.assertEqual(count, len(self.temp_files))
# read extracted files before reaching the end of the zipile
count = 0
data_refs = []
for rec in datapipe3:
count = count + 1
data_refs.append(rec)
self.assertEqual(count, len(self.temp_files))
for i in range(0, count):
self.assertEqual(os.path.basename(data_refs[i][0]), os.path.basename(self.temp_files[i]))
self.assertEqual(data_refs[i][1].read(), open(self.temp_files[i], 'rb').read())
def test_routeddecoder_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_pngfile_pathname = os.path.join(temp_dir, "test_png.png")
img = Image.new('RGB', (2, 2), color='red')
img.save(temp_pngfile_pathname)
datapipe1 = dp.iter.ListDirFiles(temp_dir, ['*.png', '*.txt'])
datapipe2 = dp.iter.LoadFilesFromDisk(datapipe1)
datapipe3 = dp.iter.RoutedDecoder(datapipe2, handlers=[decoder_imagehandler('rgb')])
datapipe3.add_handler(decoder_basichandlers)
for rec in datapipe3:
ext = os.path.splitext(rec[0])[1]
if ext == '.png':
expected = np.array([[[1., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [1., 0., 0.]]], dtype=np.single)
self.assertTrue(np.array_equal(rec[1], expected))
else:
self.assertTrue(rec[1] == open(rec[0], 'rb').read().decode('utf-8'))
def test_groupbykey_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_tarfile_pathname = os.path.join(temp_dir, "test_tar.tar")
file_list = [
"a.png", "b.png", "c.json", "a.json", "c.png", "b.json", "d.png",
"d.json", "e.png", "f.json", "g.png", "f.png", "g.json", "e.json",
"h.txt", "h.json"]
with tarfile.open(temp_tarfile_pathname, "w:gz") as tar:
for file_name in file_list:
file_pathname = os.path.join(temp_dir, file_name)
with open(file_pathname, 'w') as f:
f.write('12345abcde')
tar.add(file_pathname)
datapipe1 = dp.iter.ListDirFiles(temp_dir, '*.tar')
datapipe2 = dp.iter.LoadFilesFromDisk(datapipe1)
datapipe3 = dp.iter.ReadFilesFromTar(datapipe2)
datapipe4 = dp.iter.GroupByKey(datapipe3, group_size=2)
expected_result = [("a.png", "a.json"), ("c.png", "c.json"), ("b.png", "b.json"), ("d.png", "d.json"), (
"f.png", "f.json"), ("g.png", "g.json"), ("e.png", "e.json"), ("h.json", "h.txt")]
count = 0
for rec, expected in zip(datapipe4, expected_result):
count = count + 1
self.assertEqual(os.path.basename(rec[0][0]), expected[0])
self.assertEqual(os.path.basename(rec[1][0]), expected[1])
self.assertEqual(rec[0][1].read(), b'12345abcde')
self.assertEqual(rec[1][1].read(), b'12345abcde')
self.assertEqual(count, 8)
class IDP_NoLen(IterDataPipe):
def __init__(self, input_dp):
super().__init__()
self.input_dp = input_dp
def __iter__(self):
for i in self.input_dp:
yield i
class IDP(IterDataPipe):
def __init__(self, input_dp):
super().__init__()
self.input_dp = input_dp
self.length = len(input_dp)
def __iter__(self):
for i in self.input_dp:
yield i
def __len__(self):
return self.length
def _fake_fn(data, *args, **kwargs):
return data
def _fake_filter_fn(data, *args, **kwargs):
return data >= 5
def _worker_init_fn(worker_id):
random.seed(123)
class TestFunctionalIterDataPipe(TestCase):
def test_picklable(self):
arr = range(10)
picklable_datapipes: List[Tuple[Type[IterDataPipe], IterDataPipe, Tuple, Dict[str, Any]]] = [
(dp.iter.Map, IDP(arr), (), {}),
(dp.iter.Map, IDP(arr), (_fake_fn, (0, ), {'test': True}), {}),
(dp.iter.Collate, IDP(arr), (), {}),
(dp.iter.Collate, IDP(arr), (_fake_fn, (0, ), {'test': True}), {}),
(dp.iter.Filter, IDP(arr), (_fake_filter_fn, (0, ), {'test': True}), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in picklable_datapipes:
p = pickle.dumps(dpipe(input_dp, *dp_args, **dp_kwargs)) # type: ignore
unpicklable_datapipes: List[Tuple[Type[IterDataPipe], IterDataPipe, Tuple, Dict[str, Any]]] = [
(dp.iter.Map, IDP(arr), (lambda x: x, ), {}),
(dp.iter.Collate, IDP(arr), (lambda x: xi, ), {}),
(dp.iter.Filter, IDP(arr), (lambda x: x >= 5, ), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in unpicklable_datapipes:
with warnings.catch_warnings(record=True) as wa:
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"^Lambda function is not supported for pickle")
with self.assertRaises(AttributeError):
p = pickle.dumps(datapipe) # type: ignore
def test_concat_datapipe(self):
input_dp1 = IDP(range(10))
input_dp2 = IDP(range(5))
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.iter.Concat()
with self.assertRaisesRegex(TypeError, r"Expected all inputs to be `IterDataPipe`"):
dp.iter.Concat(input_dp1, ())
concat_dp = input_dp1.concat(input_dp2)
self.assertEqual(len(concat_dp), 15)
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
# Test Reset
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
input_dp_nl = IDP_NoLen(range(5))
concat_dp = input_dp1.concat(input_dp_nl)
with self.assertRaises(NotImplementedError):
len(concat_dp)
self.assertEqual(list(d for d in concat_dp), list(range(10)) + list(range(5)))
def test_map_datapipe(self):
input_dp = IDP(range(10))
def fn(item, dtype=torch.float, *, sum=False):
data = torch.tensor(item, dtype=dtype)
return data if not sum else data.sum()
map_dp = input_dp.map(fn)
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, torch.tensor(y, dtype=torch.float))
map_dp = input_dp.map(fn=fn, fn_args=(torch.int, ), fn_kwargs={'sum': True})
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, torch.tensor(y, dtype=torch.int).sum())
from functools import partial
map_dp = input_dp.map(partial(fn, dtype=torch.int, sum=True))
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, torch.tensor(y, dtype=torch.int).sum())
input_dp_nl = IDP_NoLen(range(10))
map_dp_nl = input_dp_nl.map()
with self.assertRaises(NotImplementedError):
len(map_dp_nl)
for x, y in zip(map_dp_nl, input_dp_nl):
self.assertEqual(x, torch.tensor(y, dtype=torch.float))
def test_collate_datapipe(self):
arrs = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
input_dp = IDP(arrs)
def _collate_fn(batch):
return torch.tensor(sum(batch), dtype=torch.float)
collate_dp = input_dp.collate(collate_fn=_collate_fn)
self.assertEqual(len(input_dp), len(collate_dp))
for x, y in zip(collate_dp, input_dp):
self.assertEqual(x, torch.tensor(sum(y), dtype=torch.float))
input_dp_nl = IDP_NoLen(arrs)
collate_dp_nl = input_dp_nl.collate()
with self.assertRaises(NotImplementedError):
len(collate_dp_nl)
for x, y in zip(collate_dp_nl, input_dp_nl):
self.assertEqual(x, torch.tensor(y))
def test_batch_datapipe(self):
arrs = list(range(10))
input_dp = IDP(arrs)
with self.assertRaises(AssertionError):
input_dp.batch(batch_size=0)
# Default not drop the last batch
bs = 3
batch_dp = input_dp.batch(batch_size=bs)
self.assertEqual(len(batch_dp), 4)
for i, batch in enumerate(batch_dp):
self.assertEqual(len(batch), 1 if i == 3 else bs)
self.assertEqual(batch, arrs[i * bs: i * bs + len(batch)])
# Drop the last batch
bs = 4
batch_dp = input_dp.batch(batch_size=bs, drop_last=True)
self.assertEqual(len(batch_dp), 2)
for i, batch in enumerate(batch_dp):
self.assertEqual(len(batch), bs)
self.assertEqual(batch, arrs[i * bs: i * bs + len(batch)])
input_dp_nl = IDP_NoLen(range(10))
batch_dp_nl = input_dp_nl.batch(batch_size=2)
with self.assertRaises(NotImplementedError):
len(batch_dp_nl)
def test_bucket_batch_datapipe(self):
input_dp = IDP(range(20))
with self.assertRaises(AssertionError):
input_dp.bucket_batch(batch_size=0)
input_dp_nl = IDP_NoLen(range(20))
bucket_dp_nl = input_dp_nl.bucket_batch(batch_size=7)
with self.assertRaises(NotImplementedError):
len(bucket_dp_nl)
# Test Bucket Batch without sort_key
def _helper(**kwargs):
arrs = list(range(100))
random.shuffle(arrs)
input_dp = IDP(arrs)
bucket_dp = input_dp.bucket_batch(**kwargs)
if kwargs["sort_key"] is None:
# BatchDataset as reference
ref_dp = input_dp.batch(batch_size=kwargs['batch_size'], drop_last=kwargs['drop_last'])
for batch, rbatch in zip(bucket_dp, ref_dp):
self.assertEqual(batch, rbatch)
else:
bucket_size = bucket_dp.bucket_size
bucket_num = (len(input_dp) - 1) // bucket_size + 1
it = iter(bucket_dp)
for i in range(bucket_num):
ref = sorted(arrs[i * bucket_size: (i + 1) * bucket_size])
bucket: List = []
while len(bucket) < len(ref):
try:
batch = next(it)
bucket += batch
# If drop last, stop in advance
except StopIteration:
break
if len(bucket) != len(ref):
ref = ref[:len(bucket)]
# Sorted bucket
self.assertEqual(bucket, ref)
_helper(batch_size=7, drop_last=False, sort_key=None)
_helper(batch_size=7, drop_last=True, bucket_size_mul=5, sort_key=None)
# Test Bucket Batch with sort_key
def _sort_fn(data):
return data
_helper(batch_size=7, drop_last=False, bucket_size_mul=5, sort_key=_sort_fn)
_helper(batch_size=7, drop_last=True, bucket_size_mul=5, sort_key=_sort_fn)
def test_filter_datapipe(self):
input_ds = IDP(range(10))
def _filter_fn(data, val, clip=False):
if clip:
return data >= val
return True
filter_dp = input_ds.filter(filter_fn=_filter_fn, fn_args=(5, ))
for data, exp in zip(filter_dp, range(10)):
self.assertEqual(data, exp)
filter_dp = input_ds.filter(filter_fn=_filter_fn, fn_kwargs={'val': 5, 'clip': True})
for data, exp in zip(filter_dp, range(5, 10)):
self.assertEqual(data, exp)
with self.assertRaises(NotImplementedError):
len(filter_dp)
def _non_bool_fn(data):
return 1
filter_dp = input_ds.filter(filter_fn=_non_bool_fn)
with self.assertRaises(ValueError):
temp = list(d for d in filter_dp)
def test_sampler_datapipe(self):
input_dp = IDP(range(10))
# Default SequentialSampler
sampled_dp = dp.iter.Sampler(input_dp) # type: ignore
self.assertEqual(len(sampled_dp), 10)
for i, x in enumerate(sampled_dp):
self.assertEqual(x, i)
# RandomSampler
random_sampled_dp = dp.iter.Sampler(input_dp, sampler=RandomSampler, sampler_kwargs={'replacement': True}) # type: ignore
# Requires `__len__` to build SamplerDataPipe
input_dp_nolen = IDP_NoLen(range(10))
with self.assertRaises(AssertionError):
sampled_dp = dp.iter.Sampler(input_dp_nolen)
def test_shuffle_datapipe(self):
exp = list(range(20))
input_ds = IDP(exp)
with self.assertRaises(AssertionError):
shuffle_dp = input_ds.shuffle(buffer_size=0)
for bs in (5, 20, 25):
shuffle_dp = input_ds.shuffle(buffer_size=bs)
self.assertEqual(len(shuffle_dp), len(input_ds))
random.seed(123)
res = list(d for d in shuffle_dp)
self.assertEqual(sorted(res), exp)
# Test Deterministic
for num_workers in (0, 1):
random.seed(123)
dl = DataLoader(shuffle_dp, num_workers=num_workers, worker_init_fn=_worker_init_fn)
dl_res = list(d for d in dl)
self.assertEqual(res, dl_res)
shuffle_dp_nl = IDP_NoLen(range(20)).shuffle(buffer_size=5)
with self.assertRaises(NotImplementedError):
len(shuffle_dp_nl)
@skipIfNoTorchVision
def test_transforms_datapipe(self):
torch.set_default_dtype(torch.float)
# A sequence of numpy random numbers representing 3-channel images
w = h = 32
inputs = [np.random.randint(0, 255, (h, w, 3), dtype=np.uint8) for i in range(10)]
tensor_inputs = [torch.tensor(x, dtype=torch.float).permute(2, 0, 1) / 255. for x in inputs]
input_dp = IDP(inputs)
# Raise TypeError for python function
with self.assertRaisesRegex(TypeError, r"`transforms` are required to be"):
input_dp.transforms(_fake_fn)
# transforms.Compose of several transforms
transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Pad(1, fill=1, padding_mode='constant'),
])
tsfm_dp = input_dp.transforms(transforms)
self.assertEqual(len(tsfm_dp), len(input_dp))
for tsfm_data, input_data in zip(tsfm_dp, tensor_inputs):
self.assertEqual(tsfm_data[:, 1:(h + 1), 1:(w + 1)], input_data)
# nn.Sequential of several transforms (required to be instances of nn.Module)
input_dp = IDP(tensor_inputs)
transforms = nn.Sequential(
torchvision.transforms.Pad(1, fill=1, padding_mode='constant'),
)
tsfm_dp = input_dp.transforms(transforms)
self.assertEqual(len(tsfm_dp), len(input_dp))
for tsfm_data, input_data in zip(tsfm_dp, tensor_inputs):
self.assertEqual(tsfm_data[:, 1:(h + 1), 1:(w + 1)], input_data)
# Single transform
input_dp = IDP_NoLen(inputs)
transform = torchvision.transforms.ToTensor()
tsfm_dp = input_dp.transforms(transform)
with self.assertRaises(NotImplementedError):
len(tsfm_dp)
for tsfm_data, input_data in zip(tsfm_dp, tensor_inputs):
self.assertEqual(tsfm_data, input_data)
def test_zip_datapipe(self):
with self.assertRaises(TypeError):
dp.iter.Zip(IDP(range(10)), list(range(10)))
zipped_dp = dp.iter.Zip(IDP(range(10)), IDP_NoLen(range(5)))
with self.assertRaises(NotImplementedError):
len(zipped_dp)
exp = list((i, i) for i in range(5))
self.assertEqual(list(d for d in zipped_dp), exp)
zipped_dp = dp.iter.Zip(IDP(range(10)), IDP(range(5)))
self.assertEqual(len(zipped_dp), 5)
self.assertEqual(list(zipped_dp), exp)
# Reset
self.assertEqual(list(zipped_dp), exp)
if __name__ == '__main__':
run_tests()
| StarcoderdataPython |
3575744 | <reponame>FloodCamML/FloodCam-WUSFeats<gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
## PLS DO NOT EDIT YET - NOT FULLY IMPLEMENTED
# ## A Weakly Supervised Machine Learning Model for Flooded Road Recognition
# ### <NAME>, Marda Science / USGS
# #### May, 2021; contribution to the COMET "Sunny Day Flooding" project
# The gist:
#
# * various image dims
# * various embedding dimensions
# * The model has 379,040 trainable parameters (no matter input image size)
#
# 1. Train a "weakly supervised" image embedding model
# * the model is an autoencoder that creates an embedding feature for each image, such that that feature is maximally distant from all other features extracted from other classes
# 2. Evaluate the feature extractor
# * study the model training history - the loss and accuracy curves of train and validation sets
# 3. Construct a "weakly supervised" classification model
# * build a k-nearest-neighbour (kNN) classifier that classifies unseen imagery based on the k nearest neighbours to the current image.
# (Or, more correctly, the nearest neighbour's of the image's embedding vector in the training set of embedding vectors)
# 4. Evaluate the classifier
# * evaluate the performance of the trained model on the validation set
# * plot a 'confusion matrix' of correspondences between actual and estimate class labels
#compare against a completely unsupervised approach, PCA, followed by kNN
# Number of interesting properties of an embedding model like this:
# * Number of model parameters doesn't increase with input image size
# * hyperparameters of embedding model: num_embedding_dims, image size, 'temperature' of logit scaling
# * hyperparameters of embedding model: number of batches, epochs, lr
# * hyperparameters of kNN model: number of nearest neighbours
# * l2_normalize or not? just training or testing set too?
# * weakly supervised feature extraction followed by weakly supervised classification
# * amenable to k-NN which is a very intuitive and fast inference technique
# * selection of multiple k for kNN lends itself to simple ensemble predictions
# * use a constant learning rate (a scheduler doesn't result in better results; this model is more stable with a constant learning rate, which becomes an important tunable hyperparameter)
#
# Interesting having a 'not sure' category - asking the model to replicate human uncertainty
# Common deep neural networks used for image recognition employ an extremely discriminative approach that explicitly maps the classes to the image features, and optimized to extract the features that explicitly predict the class.
#
# Here, we will use a deep neural network to extract features based on those that maximize the distance between classes in feature space. This isn't the same level of 'supervision' in network training - instead of extracting features that predict the class, features are extracted so they are maximally similar to features from other images in the same class, and maximally distant from features in all other classes. There is no mapping from features to class. Only feature extraction based on a knowledge of which images are in the same class. Therefore this approach is known as 'weakly supervised' image feature extraction. The network we use is an example of an 'autoencoder' that embeds the information in the image into a lower dimensional space. Therefore the extracted features are called 'embeddings'.
#
# Nor does this feature extraction result in classification directly - we don't use a classifying head to inform how image features are extracted. So, we have to utilize another model to carry out classification. We use perhaps the simplest, conceptually; K-nearest neighbours.
#The idea is that it will cluster those embeddings (extracted features) and classification is based on the class of the K nearest neighbours with the K most similar embeddings.
#TODO: make a conda or requirements.txt
#i/o
import requests, os, random
from glob import glob
from collections import Counter
from collections import defaultdict
from PIL import Image
from skimage.io import imread
import pickle
#numerica
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow import keras
from tensorflow.keras import layers
from skimage.transform import resize
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA #for data dimensionality reduction / viz.
# plots
# from sklearn.metrics import ConfusionMatrixDisplay
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns #extended functionality / style to matplotlib plots
from matplotlib.offsetbox import OffsetImage, AnnotationBbox #for visualizing image thumbnails plotted as markers
##==============================================
def standardize(img):
#standardization using adjusted standard deviation
N = np.shape(img)[0] * np.shape(img)[1]
s = np.maximum(np.std(img), 1.0/np.sqrt(N))
m = np.mean(img)
img = (img - m) / s
img = rescale(img, 0, 1)
del m, s, N
if np.ndim(img)!=3:
img = np.dstack((img,img,img))
return img
def rescale(dat,
mn,
mx):
'''
rescales an input dat between mn and mx
'''
m = min(dat.flatten())
M = max(dat.flatten())
return (mx-mn)*(dat-m)/(M-m)+mn
#-----------------------------------
def plot_one_class(inp_batch, sample_idx, label, batch_size, CLASSES, rows=8, cols=8, size=(20,15)):
"""
plot_one_class(inp_batch, sample_idx, label, batch_size, CLASSES, rows=8, cols=8, size=(20,15)):
Plot "batch_size" images that belong to the class "label"
INPUTS:
* inp_batch
* sample_idx
* label
* batch_size
OPTIONAL INPUTS:
* rows=8
* cols=8
* size=(20,15)
GLOBAL INPUTS: None (matplotlib figure, printed to file)
"""
fig = plt.figure(figsize=size)
plt.title(CLASSES[int(label)])
plt.axis('off')
for n in range(0, batch_size):
fig.add_subplot(rows, cols, n + 1)
img = inp_batch[n]
plt.imshow(img)
plt.axis('off')
#-----------------------------------
def fit_knn_to_embeddings(model, X_train, ytrain, n_neighbors):
"""
fit_knn_to_embeddings(model, X_train, ytrain, n_neighbors)
INPUTS:
* model [keras model]
* X_train [list]
* ytrain [list]
* num_dim_use [int]
OPTIONAL INPUTS: None
GLOBAL INPUTS: None
OUTPUTS:
* knn [sklearn knn model]
"""
embeddings = model.predict(X_train)
embeddings = tf.nn.l2_normalize(embeddings, axis=-1)
knn = KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(embeddings.numpy(), ytrain)
return knn
#-----------------------------------
class EmbeddingModel(keras.Model):
def train_step(self, data):
# Note: Workaround for open issue, to be removed.
if isinstance(data, tuple):
data = data[0]
anchors, positives = data[0], data[1]
with tf.GradientTape() as tape:
# Run both anchors and positives through model.
anchor_embeddings = self(anchors, training=True)
positive_embeddings = self(positives, training=True)
# Calculate cosine similarity between anchors and positives. As they have
# been normalised this is just the pair wise dot products.
similarities = tf.einsum(
"ae,pe->ap", anchor_embeddings, positive_embeddings
)
# Since we intend to use these as logits we scale them by a temperature.
# This value would normally be chosen as a hyper parameter.
temperature = 0.05 #0.1 ##0.2
similarities /= temperature
# We use these similarities as logits for a softmax. The labels for
# this call are just the sequence [0, 1, 2, ..., num_classes] since we
# want the main diagonal values, which correspond to the anchor/positive
# pairs, to be high. This loss will move embeddings for the
# anchor/positive pairs together and move all other pairs apart.
sparse_labels = tf.range(num_classes)
loss = self.compiled_loss(sparse_labels, similarities)
# Calculate gradients and apply via optimizer.
gradients = tape.gradient(loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
# Update and return metrics (specifically the one for the loss value).
self.compiled_metrics.update_state(sparse_labels, similarities)
return {m.name: m.result() for m in self.metrics}
##==================================================================
train_files = glob('data/TrainPhotosRecoded/*water*.jpg')[::2]
test_files = glob('data/TrainPhotosRecoded/*water*.jpg')[1::2]
CLASSES = ['no water', 'water'] #'not sure',
class_dict={'no_water':0, 'water':1} #'not_sure':1,
max_epochs = 100
num_batches = 500
lr = 5e-4
# n_neighbors = 3
# want a long feature vector for tsne mapping into just two dimensions for viz
num_embedding_dims = 128 #32 #100
size_vector = [224] #600,400,200,100] #image sizes to use
num_classes = len(class_dict)
print(num_classes)
y_train = []
for f in train_files:
y_train.append(class_dict[f.split(os.sep)[-1].split('_X_')[0]])
y_train = np.expand_dims(y_train,-1).astype('uint8')
y_train = np.squeeze(y_train)
class_idx_to_train_idxs = defaultdict(list)
for y_train_idx, y in enumerate(y_train):
class_idx_to_train_idxs[y].append(y_train_idx)
####################################################
############ TRAINING
#################################################
# for height_width, num_embedding_dims in zip([800,600,400,200,150], [170,128,85,42,32]):
for height_width in size_vector: #800,600,
print("image size: %i" % (height_width))
print("embedding dims: %i" % (num_embedding_dims))
x_train = np.zeros((len(train_files),height_width,height_width,3))
for counter,f in enumerate(train_files):
im = resize(imread(f), (height_width,height_width))
x_train[counter]=standardize(im)
x_train = x_train.astype("float32") #/ 255.0
class AnchorPositivePairs(keras.utils.Sequence):
def __init__(self, num_batchs):
self.num_batchs = num_batchs
def __len__(self):
return self.num_batchs
def __getitem__(self, _idx):
x = np.empty((2, num_classes, height_width, height_width, 3), dtype=np.float32)
for class_idx in range(num_classes):
examples_for_class = class_idx_to_train_idxs[class_idx]
anchor_idx = random.choice(examples_for_class)
positive_idx = random.choice(examples_for_class)
while positive_idx == anchor_idx:
positive_idx = random.choice(examples_for_class)
x[0, class_idx] = x_train[anchor_idx]
x[1, class_idx] = x_train[positive_idx]
return x
inputs = layers.Input(shape=(height_width, height_width, 3))
x = layers.Conv2D(filters=16, kernel_size=3, strides=2, activation="relu")(inputs) #
x = layers.Conv2D(filters=32, kernel_size=3, strides=2, activation="relu")(inputs) #
x = layers.Conv2D(filters=64, kernel_size=3, strides=2, activation="relu")(inputs) #32
x = layers.Conv2D(filters=128, kernel_size=3, strides=2, activation="relu")(x) #64
x = layers.Conv2D(filters=256, kernel_size=3, strides=2, activation="relu")(x) #128
x = layers.GlobalAveragePooling2D()(x)
embeddings = layers.Dense(units=num_embedding_dims, activation=None)(x) #8
embeddings = tf.nn.l2_normalize(embeddings, axis=-1)
exec('model'+str(height_width)+' = EmbeddingModel(inputs, embeddings)')
exec('model'+str(height_width)+'.compile(optimizer=keras.optimizers.Adam(learning_rate=lr),loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True))')
exec('history'+str(height_width)+'=model'+str(height_width)+'.fit(AnchorPositivePairs(num_batchs=num_batches), epochs=max_epochs)')
del x_train, embeddings
# just save the trained models
exec('model'+str(height_width)+'.save("Rmodel'+str(height_width)+'")')
K.clear_session()
cols='rgbkmyc'
for counter,height_width in enumerate(size_vector):
plt.plot(eval('history'+str(height_width)+'.history["loss"]'),cols[counter], label=str(height_width)+' px')
plt.legend(fontsize=8)
plt.savefig('hist_'+'_'.join([str(s) for s in size_vector])+'.png', dpi=300, bbox_inches="tight")
plt.close()
####################################################
############ EVALUATION
#################################################
# pyramid version, stack outputs from multiple inputs each at different size
# embedding dimension same, so can stack depthwise and feed that as inputs to unsupervised model
E = []
for height_width in size_vector: #800,600,
print("image size: %i" % (height_width))
x_train = np.zeros((len(train_files),height_width,height_width,3))
for counter,f in enumerate(train_files):
im = resize(imread(f), (height_width,height_width))
x_train[counter]=standardize(im)
x_train = x_train.astype("float32") #/ 255.0
exec('embeddings = model'+str(height_width)+'.predict(x_train)')
del x_train
## normalize in prediction mode????????????
E.append(tf.nn.l2_normalize(embeddings, axis=-1).numpy())
del embeddings
K.clear_session()
# embeddings_train = np.zeros((E[0].shape[0],E[0].shape[1],len(E)))
# embeddings_train.shape
# for counter,e in enumerate(E):
# embeddings_train[:,:,counter] = e
embeddings_train = np.hstack(E) #np.cast(E,'float32')
del E
# show examples per class
x_train = np.zeros((len(train_files),height_width,height_width,3))
for counter,f in enumerate(train_files):
im = resize(imread(f), (height_width,height_width))
x_train[counter]=standardize(im)
x_train = x_train.astype("float32") #/ 255.0
bs = 6
for class_idx in range(len(CLASSES)): # [0,1,2]:
#show_one_class(class_idx=class_idx, bs=64)
locs = np.where(y_train == class_idx)
samples = locs[:][0]
#random.shuffle(samples)
samples = samples[:bs]
print("Total number of {} (s) in the dataset: {}".format(CLASSES[class_idx], len(locs[:][0])))
X_subset = x_train[samples]
plot_one_class(X_subset, samples, class_idx, bs, CLASSES, rows=3, cols=2)
# plt.show()
plt.savefig( 'examples_class_samples_'+CLASSES[class_idx]+'.png', dpi=200, bbox_inches='tight')
plt.close('all')
#prep test data
y_test = []
for f in test_files:
y_test.append(class_dict[f.split(os.sep)[-1].split('_X_')[0]])
y_test = np.expand_dims(y_test,-1).astype('uint8')
## dim-red
tl=TSNE(n_components=2) #3)
embedding_tsne=tl.fit_transform(embeddings_train).astype('float32')
#
# colors = plt.cm.Blues(np.linspace(0, 1, num_classes))
#
# cmat = np.zeros((len(y_test),4))
# for k in range(len(y_test)):
# cmat[k,:] = colors[y_test[k]]
#
# plt.figure(figsize=(10,10))
# im = plt.scatter(embedding_tsne[:,0], embedding_tsne[:,1], color=cmat, lw=.5, edgecolor='k')
# plt.show()
# n=32 emebeddings = does not separate the two classes
# n=512 emebeddings = does not separate the two classes
# kmeans = KMeans(init='k-means++', n_clusters=num_classes, n_init=10)
# kmeans.fit(embedding_tsne)
#
# cat = kmeans.predict(embedding_tsne)
ims = [resize(im, (64,64)) for im in x_train]
fig, ax = plt.subplots(figsize=(12,12))
artists = []
for xy, i in zip(embedding_tsne, ims):
x0, y0 = xy
img = OffsetImage(i, zoom=1.0)
ab = AnnotationBbox(img, (x0, y0), xycoords='data', frameon=False)
artists.append(ax.add_artist(ab))
ax.update_datalim(embedding_tsne[:,:2])
ax.autoscale()
ax.axis('tight')
ax.scatter(embedding_tsne[:,0], embedding_tsne[:,1], 20, y_train, zorder=10)
plt.savefig( 'tsne_vizimages.png', dpi=200, bbox_inches='tight')
plt.close('all')
x_test = np.zeros((len(test_files),height_width,height_width,3))
for counter,f in enumerate(test_files):
im = resize(imread(f), (height_width,height_width))
x_test[counter]=standardize(im)
x_test = x_test.astype("float32") #/ 255.0
y_test = np.squeeze(y_test)
class_idx_to_test_idxs = defaultdict(list)
for y_test_idx, y in enumerate(y_test):
class_idx_to_test_idxs[y].append(y_test_idx)
E = []
for height_width in size_vector: #800,600,
print("image size: %i" % (height_width))
x_test = np.zeros((len(test_files),height_width,height_width,3))
for counter,f in enumerate(test_files):
im = resize(imread(f), (height_width,height_width))
x_test[counter]=standardize(im)
x_test = x_test.astype("float32") #/ 255.0
exec('embeddings = model'+str(height_width)+'.predict(x_test)')
del x_test
##normalize in prediction mode??????
E.append(tf.nn.l2_normalize(embeddings, axis=-1).numpy())
del embeddings
K.clear_session()
#get embeddings for the test imagery
embeddings_test = np.hstack(E) #np.cast(E,'float32')
#make 3 knn models for k=7, k=9, and k=11
for n_neighbors in [7,9,11]:
exec('knn'+str(n_neighbors)+'= KNeighborsClassifier(n_neighbors=n_neighbors)')
exec('knn'+str(n_neighbors)+'.fit(embeddings_train, y_train)') #.numpy()
knnPickle = open('knn'+str(n_neighbors)+'.pkl', 'wb')
exec('pickle.dump(knn'+str(n_neighbors)+', knnPickle)')
# exec('y_pred'+str(n_neighbors)+' = knn'+str(n_neighbors)+'.predict_proba(embeddings_test)')
# K.clear_session()
# # load the model from disk
# loaded_model = pickle.load(open('knnpickle_file', 'rb'))
# result = loaded_model.predict(X_test)
# test kNN model
for n_neighbors in [7,9,11]:
exec('score = knn'+str(n_neighbors)+'.score(embeddings_test, y_test)')
print('KNN score: %f' % score) #knn3.score(embeddings_test, y_test)
##2-class, 400/200/100:
# image size: 400
# image size: 200
# image size: 100
#3KNN score: 0.942222
# 5KNN score: 0.942222
# 7KNN score: 0.951111
# KNN score: 0.920000
# KNN score: 0.924444
# KNN score: 0.924444
# print('KNN score: %f' % score) #knn3.score(embeddings_test, y_test)
# touse = len(x_test) #1000
# embeddings_test = model.predict(x_test[:touse])
# embeddings_test = tf.nn.l2_normalize(embeddings_test, axis=-1)
# # del X_test
# 800 px, 32 embed dim, KNN score: 0.797834
# 600 px, 32 embed dim, KNN score: 0.808664
# 400 px, 32 embed dim, KNN score: 0.815884
# 250 px, 32 embed dim,KNN score: 0.794224
# 150 px, 32 embed dim, : KNN score: 0.848375
# print('KNN score: %f' % knn3.score(embeddings_test[:,:num_dim_use], y_test[:touse]))
# del x_test, y_test
n_neighbors = 7
exec('y_pred = knn'+str(n_neighbors)+'.predict(embeddings_test)')
cm = confusion_matrix(y_test, y_pred)
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# thres=0#.1
# cm[cm<thres] = 0
plt.figure(figsize=(8,8))
sns.heatmap(cm,
annot=True,
cmap = sns.cubehelix_palette(dark=0, light=1, as_cmap=True))
tick_marks = np.arange(len(CLASSES))+.5
plt.xticks(tick_marks, [c for c in CLASSES], rotation=90,fontsize=12)
plt.yticks(tick_marks, [c for c in CLASSES],rotation=0, fontsize=12)
plt.title('N = '+str(len(y_test)), fontsize=12)
plt.savefig('cm_nofiltbyprob.png', dpi=200, bbox_inches='tight')
plt.close()
## only 'certain' predictions
exec('y_pred = knn'+str(n_neighbors)+'.predict_proba(embeddings_test)')
K.clear_session()
y_prob = np.max(y_pred, axis=1)
y_pred = np.argmax(y_pred, axis=1)
ind = np.where(y_prob==1)[0]
print(len(ind))
cm = confusion_matrix(y_test[ind], y_pred[ind])
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# thres=0
# cm[cm<thres] = 0
plt.figure(figsize=(8,8))
sns.heatmap(cm,
annot=True,
cmap = sns.cubehelix_palette(dark=0, light=1, as_cmap=True))
tick_marks = np.arange(len(CLASSES))+.5
plt.xticks(tick_marks, [c for c in CLASSES], rotation=90,fontsize=12)
plt.yticks(tick_marks, [c for c in CLASSES],rotation=0, fontsize=12)
plt.title('N = '+str(len(ind)), fontsize=12)
plt.savefig('cm_filtbyprob.png', dpi=200, bbox_inches='tight')
plt.close()
####################################################
############ BASELINE COMPARISON
#################################################
num_components=100
pca = PCA(n_components=num_components)
reduced = pca.fit_transform(x_train.reshape(len(x_train),-1))
print('Cumulative variance explained by {} principal components: {}'.format(num_components, np.sum(pca.explained_variance_ratio_)))
#
for n_neighbors in [7,9,11]:
exec('pcaknn'+str(n_neighbors)+'= KNeighborsClassifier(n_neighbors=n_neighbors)')
exec('pcaknn'+str(n_neighbors)+'.fit(reduced, y_train)') #.numpy()
# exec('y_pred'+str(n_neighbors)+' = knn'+str(n_neighbors)+'.predict_proba(embeddings_test)')
# K.clear_session()
for height_width in size_vector: #800,600,
print("image size: %i" % (height_width))
x_test = np.zeros((len(test_files),height_width,height_width,3))
for counter,f in enumerate(test_files):
im = resize(imread(f), (height_width,height_width))
x_test[counter]=standardize(im)
x_test = x_test.astype("float32") #/ 255.0
pca = PCA(n_components=num_components)
reduced_test = pca.fit_transform(x_test.reshape(len(x_test),-1))
# test kNN model
for n_neighbors in [7,9,11]:
exec('pcaknn'+str(n_neighbors)+'= KNeighborsClassifier(n_neighbors=n_neighbors)')
exec('pcaknn'+str(n_neighbors)+'.fit(reduced, y_train)') #.numpy()
exec('score = pcaknn'+str(n_neighbors)+'.score(reduced_test, y_test)')
print('pca-KNN score: %f' % score) #knn3.score(embeddings_test, y_test)
pcaknnPickle = open('pcaknn'+str(n_neighbors)+'.pkl', 'wb')
exec('pickle.dump(pcaknn'+str(n_neighbors)+', pcaknnPickle)')
# image size: 400
# pca-KNN score: 0.715556
# pca-KNN score: 0.733333
# pca-KNN score: 0.742222
# image size: 200
# pca-KNN score: 0.742222
# pca-KNN score: 0.728889
# pca-KNN score: 0.751111
# image size: 100
# pca-KNN score: 0.800000
# pca-KNN score: 0.808889
# pca-KNN score: 0.800000
####################################################
############ APPLICATION ON UNKNOWN
#################################################
## read in 'not sure' images to classify
notsure_files = glob('data/TrainPhotosRecoded/*not*.jpg')
E = []
for height_width in size_vector: #800,600,
print("image size: %i" % (height_width))
x_test = np.zeros((len(notsure_files),height_width,height_width,3))
for counter,f in enumerate(notsure_files):
im = resize(imread(f), (height_width,height_width))
x_test[counter]=standardize(im)
x_test = x_test.astype("float32") #/ 255.0
exec('embeddings = model'+str(height_width)+'.predict(x_test)')
del x_test
E.append(tf.nn.l2_normalize(embeddings, axis=-1).numpy())
del embeddings
K.clear_session()
embeddings_notsure = np.hstack(E) #np.cast(E,'float32')
# better prediction probs with larger K
n_neighbors = 7
exec('y_pred = knn'+str(n_neighbors)+'.predict(embeddings_notsure)')
exec('y_prob = knn'+str(n_neighbors)+'.predict_proba(embeddings_notsure)')
for counter,f in enumerate(notsure_files):
im = imread(f)
plt.imshow(im); plt.axis('off')
if y_pred[counter]==0:
plt.title('Not Flooded (P='+str(y_prob[counter].max())[:5]+')')
else:
plt.title('Flooded (P='+str(y_prob[counter].max())[:5]+')')
plt.savefig('not_sure_pred/'+f.split(os.sep)[-1].split('.jpg')[0]+'_pred.png')
plt.close()
# fig = plt.figure(figsize=(12,12))
# ax = fig.add_subplot(projection='3d')
# ax.scatter(embedding_tsne[:,0], embedding_tsne[:,1], embedding_tsne[:,2], marker='o', color=cmat, alpha=1)
#
# In[67]:
#
#
# kmeans = KMeans(init='k-means++', n_clusters=num_classes, n_init=10)
# kmeans.fit(embedding_tsne)
# kmeans.fit(embeddings_train)
# In[68]:
# h = 1 #.02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# # Plot the decision boundary. For that, we will assign a color to each
# x_min, x_max = embedding_tsne[:, 0].min() - 1, embedding_tsne[:, 0].max() + 1
# y_min, y_max = embedding_tsne[:, 1].min() - 1, embedding_tsne[:, 1].max() + 1
# xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# xx = xx.astype('float32')
# yy = yy.astype('float32')
# # Obtain labels for each point in mesh. Use last trained model.
# Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# del yy
# Z = Z.reshape(xx.shape)
# del xx
# In[47]:
# # Put the result into a color plot
# plt.figure(figsize=(10,10))
# plt.clf()
# plt.imshow(Z, interpolation='nearest',
# extent=(x_min, x_max, y_min, y_max), #extent=(xx.min(), xx.max(), yy.min(), yy.max()),
# cmap=plt.cm.Blues,
# aspect='auto', origin='lower')
# plt.plot(embedding_tsne[:, 0], embedding_tsne[:, 1], 'k.', markersize=2)
# # Plot the centroids as a white X
# centroids = kmeans.cluster_centers_
# plt.scatter(centroids[:, 0], centroids[:, 1],
# marker='x', s=169, linewidths=3,
# color='m', zorder=10)
# plt.title('K-means clustering on the TSNE-reduced data\n'
# 'Centroids are marked with pink cross')
# plt.xlim(x_min, x_max)
# plt.ylim(y_min, y_max)
# plt.xticks(())
# plt.yticks(())
# for k in range(num_classes):
# ind = np.where(kmeans.labels_ == k)[0]
# plt.text(np.mean(embedding_tsne[ind, 0]), np.mean(embedding_tsne[ind, 1]), CLASSES[k] , color='k', fontsize=16)
| StarcoderdataPython |
1832502 | import threading
import time
from copy import deepcopy
import pygame
import game_sound
from Score import Score
from block import TetrisBlock
from features.feature import Feature
from field import Field
from painter import RGB_Field_Painter, Led_Matrix_Painter
from highscorelist import *
lock = threading.Lock()
tetris_songs = ['./sound-files/lied.mp3', './sound-files/lied2.mp3']
class Tetris(Feature):
def __init__(self, field_leds: Field, field_matrix: Field, rgb_field_painter: RGB_Field_Painter,
led_matrix_painter: Led_Matrix_Painter, highscorelist: Highscorelist):
super(Tetris, self).__init__(field_leds, field_matrix, rgb_field_painter, led_matrix_painter,
highscorelist)
self.score = Score()
pygame.init()
game_sound.init_mixer()
def __new_block(self):
self.check_for_full_lines()
self.current_block = self.next_block
self.next_block = TetrisBlock.get_random_block()
self.position_block_today_x = 3
self.position_block_today_y = -self.current_block.get_line_of_first_pixel_from_bottom() - 1 # evtl. mit -1 am Ende
self.refresh_led_painter()
self.refresh_matrix_painter()
def check_for_full_lines(self):
deletablelines = self.field_leds.get_all_full_lines()
if len(deletablelines) > 0:
game_sound.play_sound("breaking_line")
self.blink_deleted_lines(deletablelines)
self.field_leds.delete_lines(deletablelines)
self.score.score_for_line(len(deletablelines))
def blink_deleted_lines(self, deletablelines: list):
colors_in_line = deepcopy(self.field_leds.field)
colors_for_blink = [1, 0, 1, 0]
for color in colors_for_blink:
for y in deletablelines:
for x in range(self.field_leds.width):
if color == 1:
self.field_leds.field[y][x] = [255, 255, 255]
else:
self.field_leds.field[y][x] = colors_in_line[y][x]
self.rgb_field_painter.draw(self.field_leds)
time.sleep(0.03)
def refresh_led_painter(self):
# Blöcke auf das LED Feld malen
self.field_leds.set_block(self.current_block, self.position_block_today_x, self.position_block_today_y)
self.rgb_field_painter.draw(self.field_leds)
def refresh_matrix_painter(self):
# Blöcke auf die Matrix schreiben
self.field_matrix.set_all_pixels_to_black()
self.score.draw_score_on_field(self.field_matrix)
self.field_matrix.set_block(self.next_block.double_size(), 24, 0)
self.led_matrix_painter.draw(self.field_matrix)
def delete_current_block(self):
self.field_leds.remove_block(self.current_block, self.position_block_today_x, self.position_block_today_y)
def set_all_fields_black(self):
self.field_leds.set_all_pixels_to_black()
self.field_matrix.set_all_pixels_to_black()
def move_block_today_one_step_down(self):
self.delete_current_block()
collision = self.field_leds.give_type_of_collision(self.current_block, self.position_block_today_x,
self.position_block_today_y + 1)
if collision == Field.GameOverCollision:
print(" -> Game over")
self.game_over = True
game_sound.stop_song()
game_sound.play_sound("game_over")
self.highscorelist.add_entry(
Highscoreentry(datetime.today(), self.playername, self.score.get_score_int()))
self.highscorelist.save()
self.led_matrix_painter.show_Message("Game over - Your Points: " + self.score.get_score_str(), 250)
elif collision == Field.Collision:
game_sound.play_sound("tick")
print(" -> neuer Block")
self.refresh_led_painter()
self.__new_block()
self.score.score_for_block()
self.refresh_matrix_painter()
else:
self.position_block_today_y += 1
self.refresh_led_painter()
def move_block_today_one_step_left(self):
self.delete_current_block()
if self.field_leds.give_type_of_collision(
self.current_block,
self.position_block_today_x - 1,
self.position_block_today_y) != Field.NoCollision:
print(" -> keine Bewegung nach links")
else:
self.position_block_today_x -= 1
self.refresh_led_painter()
def move_block_today_one_step_right(self):
self.delete_current_block()
if self.field_leds.give_type_of_collision(
self.current_block,
self.position_block_today_x + 1,
self.position_block_today_y) != Field.NoCollision:
print(" -> keine Bewegung nach rechts")
else:
self.position_block_today_x += 1
self.refresh_led_painter()
def rotate_block_today_left(self):
self.delete_current_block()
block_today_for_test = self.current_block.clone()
block_today_for_test.rotateleft()
if self.field_leds.give_type_of_collision(
block_today_for_test,
self.position_block_today_x,
self.position_block_today_y) != Field.NoCollision:
print(" -> keine Rotation nach links")
else:
self.current_block.rotateleft()
self.refresh_led_painter()
def rotate_block_today_right(self):
self.delete_current_block()
block_today_for_test = self.current_block.clone()
block_today_for_test.rotateright()
if self.field_leds.give_type_of_collision(
block_today_for_test,
self.position_block_today_x,
self.position_block_today_y) != Field.NoCollision:
print(" -> keine Rotation nach rechts")
else:
self.current_block.rotateright()
self.refresh_led_painter()
def tick(self):
lock.acquire()
if not self.game_over:
self.move_block_today_one_step_down()
game_sound.play_new_musik_if_music_is_over(tetris_songs)
else:
self.led_matrix_painter.move_Message()
time.sleep(0.02)
lock.release()
if not self.game_over:
time.sleep(self.get_delay())
def event(self, eventname: str):
lock.acquire()
if not self.game_over:
if eventname == "new": # neuer Block # todo: später rauswerfen (Johannes)
self.__new_block()
elif eventname == "rotate left":
self.rotate_block_today_left()
elif eventname == "rotate right":
self.rotate_block_today_right()
elif eventname == "move left":
self.move_block_today_one_step_left()
elif eventname == "move right":
self.move_block_today_one_step_right()
elif eventname == "move down":
self.move_block_today_one_step_down()
lock.release()
def get_delay(self):
speedmap = [(50, 0.4), (100, 0.35), (500, 0.3), (1000, 0.25), (2000, 0.2), (5000, 0.15), (10000, 0.12), (20000, 0.09),
(50000, 0.07), (100000, 0.06)]
for score, delay in speedmap:
if self.score.points < score:
return delay
return 0.05
def start(self, playername: str = None):
super(Tetris, self).start(playername)
self.prepare_for_start()
self.refresh_led_painter()
self.refresh_matrix_painter()
self.game_over = False
game_sound.play_random_song(tetris_songs)
def prepare_for_start(self):
self.set_all_fields_black()
self.next_block = TetrisBlock.get_random_block()
self.current_block = TetrisBlock.get_random_block()
# Positionen block_today
self.position_block_today_x = 3
self.position_block_today_y = -self.current_block.get_line_of_first_pixel_from_bottom() - 2
# self.draw_lines_for_test()
self.score.points = 0
def stop(self) -> None:
self.game_over = True
game_sound.stop_song()
def is_game_over(self):
return super(Tetris, self).is_game_over()
def draw_lines_for_test(self):
for x in range(self.field_leds.width):
self.field_leds.field[19][x] = [0, 255, 100]
self.field_leds.field[18][x] = [0, 255, 100]
self.field_leds.field[18][5] = [0, 0, 0]
self.field_leds.field[19][5] = [0, 0, 0]
| StarcoderdataPython |
96203 | from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense
from keras.models import Model
def VGG10(weights=None, input_shape=(128, 128, 3)):
input_img = Input(shape=input_shape)
x = Convolution2D(32, 3, 3, activation='relu', border_mode='same', name='B1_C1')(input_img)
x = Convolution2D(32, 3, 3, activation='relu', border_mode='same', name='B1_C2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='B1_MP1')(x)
x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='B2_C1')(x)
x = Convolution2D(64, 3, 3, activation='relu', border_mode='same', name='B2_C2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='B2_MP1')(x)
x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='B3_C1')(x)
x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='B3_C2')(x)
x = Convolution2D(128, 3, 3, activation='relu', border_mode='same', name='B3_C3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='B3_MP1')(x)
x = Flatten(name='F')(x)
x = Dense(1024, activation='relu', name='D1')(x)
x = Dense(1024, activation='relu', name='D2')(x)
x = Dense(2, activation='sigmoid', name='O')(x)
model = Model(input=input_img, output=x)
if weights is None:
# TODO handle trained model
pass
return model
| StarcoderdataPython |
5048682 | <reponame>DougLam/rules_ios<filename>rules/xcodeproj.bzl
load("@build_bazel_rules_apple//apple:providers.bzl", "AppleBundleInfo")
load("@bazel_skylib//lib:paths.bzl", "paths")
def _get_transitive(deps, provider, field):
return [
getattr(dep[provider], field)
for dep in deps
if dep and provider in dep
]
_TargetInfo = provider()
_SrcsInfo = provider()
def _dir(o):
return [
x
for x in dir(o)
if x not in ("to_json", "to_proto")
]
def _xcodeproj_aspect_impl(target, ctx):
providers = []
deps = []
deps += getattr(ctx.rule.attr, "deps", [])
deps += getattr(ctx.rule.attr, "infoplists", [])
deps.append(getattr(ctx.rule.attr, "entitlements", None))
# TODO: handle apple_resource_bundle targets
if AppleBundleInfo in target:
bundle_info = target[AppleBundleInfo]
srcs = []
bazel_name = target.label.name
info = struct(
name = bundle_info.bundle_name,
bundle_extension = bundle_info.bundle_extension,
package = target.label.package,
bazel_name = bazel_name,
srcs = depset(srcs, transitive = _get_transitive(deps, _SrcsInfo, "srcs")),
build_files = depset([ctx.build_file_path], transitive = _get_transitive(deps, _SrcsInfo, "build_files")),
product_type = bundle_info.product_type[len("com.apple.product-type."):],
)
providers.append(
_SrcsInfo(
srcs = info.srcs,
build_files = depset([ctx.build_file_path]),
direct_srcs = srcs,
),
)
target_info = _TargetInfo(target = info, targets = depset([info], transitive = _get_transitive(deps, _TargetInfo, "targets")))
providers.append(target_info)
elif ctx.rule.kind == "apple_framework_packaging":
srcs = []
info = struct(
name = target.label.name,
package = target.label.package,
bazel_name = target.label.name,
srcs = depset(srcs, transitive = _get_transitive(deps, _SrcsInfo, "srcs")),
build_files = depset([ctx.build_file_path], transitive = _get_transitive(deps, _SrcsInfo, "build_files")),
product_type = "framework",
)
target_info = _TargetInfo(target = info, targets = depset([info], transitive = _get_transitive(deps, _TargetInfo, "targets")))
providers.append(target_info)
else:
srcs = []
for attr in _dir(ctx.rule.files):
srcs += getattr(ctx.rule.files, attr, [])
srcs = [f for f in srcs if not f.path.startswith("external/") and f.is_source]
providers.append(
_SrcsInfo(
srcs = depset(srcs, transitive = _get_transitive(deps, _SrcsInfo, "srcs")),
build_files = depset([ctx.build_file_path], transitive = _get_transitive(deps, _SrcsInfo, "build_files")),
direct_srcs = srcs,
),
)
info = None
actual = None
if ctx.rule.kind in ("test_suite"):
actual = getattr(ctx.rule.attr, "tests")[0]
elif ctx.rule.kind in ("alias"):
actual = getattr(ctx.rule.attr, "actual")
if actual and _TargetInfo in actual:
info = actual[_TargetInfo].target
providers.append(
_TargetInfo(target = info, targets = depset(transitive = _get_transitive(deps, _TargetInfo, "targets"))),
)
return providers
_xcodeproj_aspect = aspect(
implementation = _xcodeproj_aspect_impl,
attr_aspects = ["deps", "actual", "tests", "infoplists", "entitlements", "resources", "test_host"],
)
def _xcodeproj_impl(ctx):
xcodegen_yaml = ctx.actions.declare_file(
"%s-xcodegen.yaml" % ctx.attr.name,
)
project_name = ctx.attr.project_name if ctx.attr.project_name else ctx.attr.name + ".xcodeproj"
if "/" in project_name:
fail("No / allowed in project_name")
project = ctx.actions.declare_directory(project_name)
nesting = ctx.label.package.count("/") + 1 if ctx.label.package else 0
src_dot_dots = "/".join([".." for x in range(nesting + 3)])
script_dot_dots = "/".join([".." for x in range(nesting)])
yaml = """\
name: {name}
options:
createIntermediateGroups: true
defaultConfig: Debug
groupSortPosition: none
settings:
BAZEL_PATH: "{bazel_path}"
BAZEL_WORKSPACE_ROOT: "$SRCROOT/{bazel_workspace_root}"
BAZEL_STUBS_DIR: "$PROJECT_FILE_PATH/bazelstubs"
BAZEL_INSTALLER: "$BAZEL_STUBS_DIR/{installer_path}"
CC: "$BAZEL_STUBS_DIR/clang-stub"
CXX: $CC
CLANG_ANALYZER_EXEC: $CC
CODE_SIGNING_ALLOWED: false
DONT_RUN_SWIFT_STDLIB_TOOL: true
LD: "$BAZEL_STUBS_DIR/ld-stub"
LIBTOOL: /usr/bin/true
SWIFT_EXEC: "$BAZEL_STUBS_DIR/swiftc-stub"
SWIFT_OBJC_INTERFACE_HEADER_NAME: ""
SWIFT_VERSION: 5
""".format(
name = paths.split_extension(project_name)[0],
bazel_workspace_root = script_dot_dots,
bazel_path = ctx.attr.bazel_path,
installer_path = ctx.executable.installer.short_path,
)
targets = []
if ctx.attr.include_transitive_targets:
targets = depset(transitive = _get_transitive(ctx.attr.deps, _TargetInfo, "targets")).to_list()
else:
targets = [t for t in _get_transitive(ctx.attr.deps, _TargetInfo, "target") if t]
if targets:
yaml += """\
targets:
"""
for target in targets:
yaml += """\
{name}:
sources: [{sources}]
type: {product_type}
platform: iOS
settings:
PRODUCT_NAME: {name}
BAZEL_PACKAGE: {package}
MACH_O_TYPE: {macho_type}
preBuildScripts:
- name: Build with bazel
script: |
set -eux
cd $BAZEL_WORKSPACE_ROOT
$BAZEL_PATH build $BAZEL_PACKAGE:{bazel_name}
$BAZEL_INSTALLER
""".format(
name = target.name,
sources = ", ".join(['{path: "%s", group: "%s", validate: false}' % (paths.join(src_dot_dots, s.short_path), paths.dirname(s.short_path)) for s in target.srcs.to_list()]),
package = target.package,
bazel_name = target.bazel_name,
product_type = target.product_type,
macho_type = "staticlib" if target.product_type == "framework" else "$(inherited)",
)
yaml += "schemes:\n"
for target in targets:
if target.product_type == "framework":
continue
action = "test"
if target.product_type == "application":
action = "run"
yaml += """\
{name}:
build:
parallelizeBuild: false
buildImplicitDependencies: false
targets:
{name}: [{action}]
{action}:
targets:
- {name}
""".format(name = target.name, action = action)
ctx.actions.write(xcodegen_yaml, yaml)
ctx.actions.run(
executable = ctx.executable._xcodegen,
arguments = ["--quiet", "--no-env", "--spec", xcodegen_yaml.path, "--project", project.dirname],
inputs = depset([xcodegen_yaml], transitive = [target.srcs for target in targets]),
outputs = [project],
)
install_script = ctx.actions.declare_file(
"%s-install-xcodeproj.sh" % ctx.attr.name,
)
ctx.actions.expand_template(
template = ctx.file._xcodeproj_installer_template,
output = install_script,
substitutions = {
"$(project_short_path)": project.short_path,
"$(project_full_path)": project.path,
"$(installer_short_path)": ctx.executable.installer.short_path,
"$(clang_stub_short_path)": ctx.executable.clang_stub.short_path,
"$(clang_stub_ld_path)": ctx.executable.ld_stub.short_path,
"$(clang_stub_swiftc_path)": ctx.executable.swiftc_stub.short_path,
},
is_executable = True,
)
return [
DefaultInfo(
executable = install_script,
files = depset([xcodegen_yaml, project]),
runfiles = ctx.runfiles(files = [xcodegen_yaml, project], transitive_files = depset(
direct =
ctx.files.installer + ctx.files.clang_stub + ctx.files.ld_stub + ctx.files.swiftc_stub,
transitive = [ctx.attr.installer[DefaultInfo].default_runfiles.files],
)),
),
]
xcodeproj = rule(
implementation = _xcodeproj_impl,
attrs = {
"deps": attr.label_list(mandatory = True, allow_empty = False, providers = [], aspects = [_xcodeproj_aspect]),
"include_transitive_targets": attr.bool(default = False, mandatory = False),
"project_name": attr.string(mandatory = False),
"bazel_path": attr.string(mandatory = False, default = "bazel"),
"_xcodeproj_installer_template": attr.label(executable = False, default = Label("//tools/xcodeproj-shims:xcodeproj-installer.sh"), allow_single_file = ["sh"]),
"_xcodegen": attr.label(executable = True, default = Label("@com_github_yonaskolb_xcodegen//:xcodegen"), cfg = "host"),
"clang_stub": attr.label(executable = True, default = Label("//tools/xcodeproj-shims:clang-stub"), cfg = "host"),
"ld_stub": attr.label(executable = True, default = Label("//tools/xcodeproj-shims:ld-stub"), cfg = "host"),
"swiftc_stub": attr.label(executable = True, default = Label("//tools/xcodeproj-shims:swiftc-stub"), cfg = "host"),
"installer": attr.label(executable = True, default = Label("//tools/xcodeproj-shims:installer"), cfg = "host"),
},
executable = True,
)
| StarcoderdataPython |
6622570 | <reponame>stPhoenix/projecttango<gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-01-30 06:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pybb', '0009_account_accountdeletion_emailaddress_emailconfirmation_passwordexpiry_passwordhistory_signupcode_sig'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='autosubscribe',
field=models.BooleanField(default=False, help_text='Automatically subscribe to topics that you answer', verbose_name='Automatically subscribe'),
),
]
| StarcoderdataPython |
6595030 | <gh_stars>0
import os
from functools import lru_cache
from copy import deepcopy
import xmltodict
from .color import Color
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
@lru_cache()
def svg_dict(name):
"""
Turn a svg into a dict structure, and cache requests.
"""
filename = os.path.join(BASE_DIR, "img", name)
with open(filename) as svg:
return xmltodict.parse(svg.read())
DEFAULT_BLACK = "#000"
DEFAULT_WHITE = "#fff"
class MakiMarker(object):
"""
Generate SVG/PNG files for Maki markers to use with web mapping libraries.
argument:
tint (str): Any hex string to change the background color for the marker.
symbol (str): Name of a Maki icon, defaults to a filled circle.
size (str): "l" or "s" for large or small markers.
Example:
>>> from maki import MakiMarker
>>> marker = MakiMarker(symbol="park", tint="#3388ff")
>>> marker.svg()
'<?xml version=...'
"""
def __init__(self, tint="#000", symbol=None, size="l"):
self.tint = tint
self.size = "large" if size == "l" else "small"
self.symbol = symbol
def background_marker(self):
return deepcopy(svg_dict(f"marker-{self.size}.svg"))
def maki_icon(self):
size = 11 if self.size == "small" else 15
symbol = os.path.join("icons", f"{self.symbol}-{size}.svg")
try:
return deepcopy(svg_dict(symbol))
except FileNotFoundError:
raise ValueError(f"Symbol '{self.symbol}' does not exist")
def svg(self):
marker = self.background_marker()
basepaths = marker["svg"]["g"]["g"]["g"]
if self.symbol:
try:
icon = self.maki_icon()["svg"]["path"]
try:
basepaths[3]["path"] = {"@id": "icon", "@d": icon["@d"]}
except TypeError:
# some icons wrap the OrderedDict in a list, add them all in a g
basepaths[3] = {
"@id": "maki",
"g": [{"path": {"@d": i["@d"]}} for i in icon],
"@transform": "translate(6, 7)",
}
except KeyError:
# some icons have a <g>-tag containing paths. They seem to need a slightly different
# treatment.
icon = self.maki_icon()["svg"]["g"]
basepaths[3] = {"@id": "maki", "g": icon["g"], "@transform": "translate(6, 7)"}
try:
basepaths.remove(basepaths[4])
except IndexError:
pass
# move single-char svg's to the center
if len(self.symbol) == 1:
x, y = (9, 7) if self.size == "small" else (10, 8)
basepaths[3]["@transform"] = f"translate(x, y)"
color = Color.from_hex(self.tint)
# Change the tint on the marker background
basepaths[1]["@fill"] = color.hex
# Swap the border color if the tint is light and there is a symbol
basepaths[2]["@fill"] = color.most_readable().hex
# Some Maki icons have different SVG makeups. This attempts to apply the tint to the correct path
foreground_color = DEFAULT_BLACK if color.is_light else DEFAULT_WHITE
if "path" in basepaths[3]:
# If the background color is light, apply a light tint to the icon or text to make it stand out more
basepaths[3]["path"]["@style"] = f"fill:{foreground_color}"
else:
basepaths[3]["@fill"] = foreground_color
# marker["svg"]["g"]["g"]["g"] = basepaths
return xmltodict.unparse(marker)
def png(self, **kwargs):
import cairosvg
return cairosvg.svg2png(bytestring=self.svg().encode(), **kwargs)
| StarcoderdataPython |
24767 | <filename>ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/status_params.py
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.script import Script
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import default, format
config = Script.get_config()
pid_dir = config['configurations']['storm-env']['storm_pid_dir']
pid_nimbus = format("{pid_dir}/nimbus.pid")
pid_supervisor = format("{pid_dir}/supervisor.pid")
pid_drpc = format("{pid_dir}/drpc.pid")
pid_ui = format("{pid_dir}/ui.pid")
pid_logviewer = format("{pid_dir}/logviewer.pid")
pid_rest_api = format("{pid_dir}/restapi.pid")
pid_files = {"logviewer":pid_logviewer,
"ui": pid_ui,
"nimbus": pid_nimbus,
"supervisor": pid_supervisor,
"drpc": pid_drpc,
"rest_api": pid_rest_api}
# Security related/required params
hostname = config['hostname']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path()
tmp_dir = Script.get_tmp_dir()
conf_dir = "/etc/storm/conf"
storm_user = config['configurations']['storm-env']['storm_user']
storm_ui_principal = default('/configurations/storm-env/storm_ui_principal_name', None)
storm_ui_keytab = default('/configurations/storm-env/storm_ui_keytab', None)
| StarcoderdataPython |
6683537 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from protobuf.proto.grpcintegration import ControlMessagesProto_pb2 as protobuf_dot_proto_dot_grpcintegration_dot_ControlMessagesProto__pb2
from protobuf.proto.grpcintegration import EventNotificationProto_pb2 as protobuf_dot_proto_dot_grpcintegration_dot_EventNotificationProto__pb2
from protobuf.proto.grpcintegration import StatusProto_pb2 as protobuf_dot_proto_dot_grpcintegration_dot_StatusProto__pb2
from protobuf.proto.net.flow import FlowRuleProto_pb2 as protobuf_dot_proto_dot_net_dot_flow_dot_FlowRuleProto__pb2
from protobuf.proto.net.packet import OutboundPacketProto_pb2 as protobuf_dot_proto_dot_net_dot_packet_dot_OutboundPacketProto__pb2
from protobuf.proto.net.topology import TopologyGraphProto_pb2 as protobuf_dot_proto_dot_net_dot_topology_dot_TopologyGraphProto__pb2
from protobuf.proto.net.topology import TopologyProto_pb2 as protobuf_dot_proto_dot_net_dot_topology_dot_TopologyProto__pb2
class PacketOutServiceStub(object):
"""Corresponds to PacketOut service
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.emit = channel.unary_unary(
'/grpcintegration.PacketOutService/emit',
request_serializer=protobuf_dot_proto_dot_net_dot_packet_dot_OutboundPacketProto__pb2.OutboundPacketProto.SerializeToString,
response_deserializer=protobuf_dot_proto_dot_grpcintegration_dot_StatusProto__pb2.PacketOutStatus.FromString,
)
class PacketOutServiceServicer(object):
"""Corresponds to PacketOut service
"""
def emit(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PacketOutServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'emit': grpc.unary_unary_rpc_method_handler(
servicer.emit,
request_deserializer=protobuf_dot_proto_dot_net_dot_packet_dot_OutboundPacketProto__pb2.OutboundPacketProto.FromString,
response_serializer=protobuf_dot_proto_dot_grpcintegration_dot_StatusProto__pb2.PacketOutStatus.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'grpcintegration.PacketOutService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class FlowServiceStub(object):
"""Corresponds to FlowRule service
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.addFlow = channel.unary_unary(
'/grpcintegration.FlowService/addFlow',
request_serializer=protobuf_dot_proto_dot_net_dot_flow_dot_FlowRuleProto__pb2.FlowRuleProto.SerializeToString,
response_deserializer=protobuf_dot_proto_dot_grpcintegration_dot_StatusProto__pb2.FlowServiceStatus.FromString,
)
self.removeFlow = channel.unary_unary(
'/grpcintegration.FlowService/removeFlow',
request_serializer=protobuf_dot_proto_dot_net_dot_flow_dot_FlowRuleProto__pb2.FlowRuleProto.SerializeToString,
response_deserializer=protobuf_dot_proto_dot_grpcintegration_dot_StatusProto__pb2.FlowServiceStatus.FromString,
)
class FlowServiceServicer(object):
"""Corresponds to FlowRule service
"""
def addFlow(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def removeFlow(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FlowServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'addFlow': grpc.unary_unary_rpc_method_handler(
servicer.addFlow,
request_deserializer=protobuf_dot_proto_dot_net_dot_flow_dot_FlowRuleProto__pb2.FlowRuleProto.FromString,
response_serializer=protobuf_dot_proto_dot_grpcintegration_dot_StatusProto__pb2.FlowServiceStatus.SerializeToString,
),
'removeFlow': grpc.unary_unary_rpc_method_handler(
servicer.removeFlow,
request_deserializer=protobuf_dot_proto_dot_net_dot_flow_dot_FlowRuleProto__pb2.FlowRuleProto.FromString,
response_serializer=protobuf_dot_proto_dot_grpcintegration_dot_StatusProto__pb2.FlowServiceStatus.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'grpcintegration.FlowService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class TopoServiceStub(object):
"""Corresponds to Topology service
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.getGraph = channel.unary_unary(
'/grpcintegration.TopoService/getGraph',
request_serializer=protobuf_dot_proto_dot_grpcintegration_dot_ControlMessagesProto__pb2.Empty.SerializeToString,
response_deserializer=protobuf_dot_proto_dot_net_dot_topology_dot_TopologyGraphProto__pb2.TopologyGraphProto.FromString,
)
self.currentTopology = channel.unary_unary(
'/grpcintegration.TopoService/currentTopology',
request_serializer=protobuf_dot_proto_dot_grpcintegration_dot_ControlMessagesProto__pb2.Empty.SerializeToString,
response_deserializer=protobuf_dot_proto_dot_net_dot_topology_dot_TopologyProto__pb2.TopologyProto.FromString,
)
self.getPaths = channel.unary_unary(
'/grpcintegration.TopoService/getPaths',
request_serializer=protobuf_dot_proto_dot_grpcintegration_dot_ControlMessagesProto__pb2.getPathRequest.SerializeToString,
response_deserializer=protobuf_dot_proto_dot_grpcintegration_dot_ControlMessagesProto__pb2.Paths.FromString,
)
self.getHosts = channel.unary_unary(
'/grpcintegration.TopoService/getHosts',
request_serializer=protobuf_dot_proto_dot_grpcintegration_dot_ControlMessagesProto__pb2.Empty.SerializeToString,
response_deserializer=protobuf_dot_proto_dot_grpcintegration_dot_ControlMessagesProto__pb2.Hosts.FromString,
)
class TopoServiceServicer(object):
"""Corresponds to Topology service
"""
def getGraph(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def currentTopology(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getPaths(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getHosts(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_TopoServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'getGraph': grpc.unary_unary_rpc_method_handler(
servicer.getGraph,
request_deserializer=protobuf_dot_proto_dot_grpcintegration_dot_ControlMessagesProto__pb2.Empty.FromString,
response_serializer=protobuf_dot_proto_dot_net_dot_topology_dot_TopologyGraphProto__pb2.TopologyGraphProto.SerializeToString,
),
'currentTopology': grpc.unary_unary_rpc_method_handler(
servicer.currentTopology,
request_deserializer=protobuf_dot_proto_dot_grpcintegration_dot_ControlMessagesProto__pb2.Empty.FromString,
response_serializer=protobuf_dot_proto_dot_net_dot_topology_dot_TopologyProto__pb2.TopologyProto.SerializeToString,
),
'getPaths': grpc.unary_unary_rpc_method_handler(
servicer.getPaths,
request_deserializer=protobuf_dot_proto_dot_grpcintegration_dot_ControlMessagesProto__pb2.getPathRequest.FromString,
response_serializer=protobuf_dot_proto_dot_grpcintegration_dot_ControlMessagesProto__pb2.Paths.SerializeToString,
),
'getHosts': grpc.unary_unary_rpc_method_handler(
servicer.getHosts,
request_deserializer=protobuf_dot_proto_dot_grpcintegration_dot_ControlMessagesProto__pb2.Empty.FromString,
response_serializer=protobuf_dot_proto_dot_grpcintegration_dot_ControlMessagesProto__pb2.Hosts.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'grpcintegration.TopoService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class EventNotificationStub(object):
"""Corresponds to EventNotification service
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.register = channel.unary_unary(
'/grpcintegration.EventNotification/register',
request_serializer=protobuf_dot_proto_dot_grpcintegration_dot_EventNotificationProto__pb2.RegistrationRequest.SerializeToString,
response_deserializer=protobuf_dot_proto_dot_grpcintegration_dot_EventNotificationProto__pb2.RegistrationResponse.FromString,
)
self.onEvent = channel.unary_stream(
'/grpcintegration.EventNotification/onEvent',
request_serializer=protobuf_dot_proto_dot_grpcintegration_dot_EventNotificationProto__pb2.Topic.SerializeToString,
response_deserializer=protobuf_dot_proto_dot_grpcintegration_dot_EventNotificationProto__pb2.Notification.FromString,
)
class EventNotificationServicer(object):
"""Corresponds to EventNotification service
"""
def register(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def onEvent(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_EventNotificationServicer_to_server(servicer, server):
rpc_method_handlers = {
'register': grpc.unary_unary_rpc_method_handler(
servicer.register,
request_deserializer=protobuf_dot_proto_dot_grpcintegration_dot_EventNotificationProto__pb2.RegistrationRequest.FromString,
response_serializer=protobuf_dot_proto_dot_grpcintegration_dot_EventNotificationProto__pb2.RegistrationResponse.SerializeToString,
),
'onEvent': grpc.unary_stream_rpc_method_handler(
servicer.onEvent,
request_deserializer=protobuf_dot_proto_dot_grpcintegration_dot_EventNotificationProto__pb2.Topic.FromString,
response_serializer=protobuf_dot_proto_dot_grpcintegration_dot_EventNotificationProto__pb2.Notification.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'grpcintegration.EventNotification', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| StarcoderdataPython |
3355203 | <filename>code/trial/main.py
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import glob
import os
# +
import sys
from collections import Counter
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
from keras import regularizers
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from keras.layers import Dense, Dropout, Flatten, Input
from keras.layers.recurrent import LSTM
from keras.models import Sequential, model_from_json
from keras.optimizers import Adam
from keras.utils import np_utils
from mutagen.easyid3 import EasyID3
# -
def makeArtistList():
path = "/home/Takumi/data/"
l = ['Beethoven', 'Haydn', 'Bach']
artist_list = []
for i, name in enumerate(l):
print('{} process start.'.format(name))
p = path + name + '/cut/'
file_count = len(os.listdir(p))
l2 = [i] * file_count
artist_list.extend(l2)
counter = Counter(artist_list)
print(counter)
return artist_list
artist_list = makeArtistList()
# +
def readwave(file):
wav, samplerate = librosa.load(file)
return wav, samplerate
def MelFilterBank(y, sr):
return librosa.feature.melspectrogram(y=y, sr=sr)
# -
path = "/home/Takumi/data/"
name_list = ['Beethoven', 'Haydn', 'Bach']
data_list = []
for name in name_list:
print('{} process start.'.format(name))
p = path + name + '/cut/'
wav_list = sorted(list(glob.glob(p + "*.wav")))
data_list.extend(wav_list)
print(len(data_list))
data_x = np.empty([128, 1292])
for i in range(len(data_list)):
wav, sr = readwave(data_list[i])
# print(wav.shape)
# print(sr)
t = np.arange(0, len(wav)) / sr
m = MelFilterBank(wav, sr)
# print(m)
# print(m.shape)
# if not i:
# data_x = np.stack([data_x, m], 0)
# else:
# data_x = np.r_[data_x, m]
data_x = np.r_['0, 3, -1', data_x, m]
if not i % 100:
print(data_x.shape)
data_x = np.delete(data_x, 0, 0)
print(data_x.shape)
def makeLSTMmodel(hidden=128, input_shape=(128, 1292,)):
model = Sequential()
model.add(LSTM(units=hidden, dropout=0.2, input_shape=input_shape, return_sequences=True))
model.add(LSTM(units=hidden, dropout=0.2, input_shape=input_shape, kernel_regularizer=regularizers.l2(0.001)))
model.add(Dense(3, activation='softmax'))
model.compile(loss="categorical_crossentropy", optimizer=Adam(lr=1e-3), metrics=['accuracy'])
return model
data_y = np.array(artist_list)
# data_y = np.delete(data_y, 0)
data_y = np_utils.to_categorical(data_y, 3)
print(data_y.shape)
model = makeLSTMmodel()
model.summary()
early_stopping = EarlyStopping(monitor="val_loss", patience=10, mode="auto")
# reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=5, factor=0.5, min_lr=0.0001, verbose=1)
# history = model.fit(data_x, data_y, batch_size=64, epochs=100, validation_split=0.2, callbacks=[early_stopping, reduce_lr])
history = model.fit(data_x, data_y, batch_size=64, epochs=100, validation_split=0.2, callbacks=[early_stopping])
with open('model.json', 'w') as json_file:
json_file.write(model.to_json())
model.save_weights('main1.h5')
model_r = model_from_json(open('model.json', 'r').read())
model_r.load_weights('main1.h5')
model_r.compile(loss="categorical_crossentropy", optimizer=Adam(lr=1e-3), metrics=['accuracy'])
history_r = model_r.fit(data_x, data_y, batch_size=64, epochs=100, validation_split=0.2, callbacks=[early_stopping])
del model
del early_stopping
# del reduce_lr
del history
print("{}{: >25}{}{: >10}{}".format('|','Variable Name','|','Memory','|'))
print(" ------------------------------------ ")
for var_name in dir():
if not var_name.startswith("_"):
print("{}{: >25}{}{: >10}{}".format('|',var_name,'|',sys.getsizeof(eval(var_name)),'|'))
# -
del m
del t
del wav
# +
# %matplotlib inline
def plot_history(history):
plt.figure(figsize=(8, 10))
plt.subplots_adjust(hspace=0.3)
plt.subplot(2, 1, 1)
plt.plot(history.history['accuracy'], '-', label='accuracy')
plt.plot(history.history['val_accuracy'], '-', label='val_acc')
plt.title('model accuracy')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend(loc='lower right')
plt.subplot(2, 1, 2)
plt.plot(history.history['loss'], '-', label='loss')
plt.plot(history.history['val_loss'], '-', label='val_loss')
plt.title('model loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='upper right')
plt.show()
plot_history(history_r)
plt.savefig('graph.png')
# -
with open('model.json', 'w') as json_file:
json_file.write(model_r.to_json())
model_r.save_weights('main1.h5')
| StarcoderdataPython |
9686802 | # -*- coding: utf-8 -*-
TestText="""欧洲央行再次放松其货币政策。欧洲央行周四宣布全面下调三大主要利率。其中,主要再融资利率(即通常所称的主导利率)从原先的0.05%首次降至0%; 边际贷款利率将从0.3%降至0.25%,而边际存款利率则由-0.3%下调10个基点至-0.4%,这意味着银行必须为隔夜存款付更高费用。上述措施将 自3月16日生效。
此外,欧洲央行还宣布将充满争议的每月资产购买规模从600亿欧元扩大至800亿欧元。欧洲央行刚刚在去年12月将此一"量化宽松"措施(简称QE)延长6个月至2017年3月。自今年4月起,欧洲央行每月将购债800亿欧元。
欧洲央行希望通过此一系列措施提振经济,并使通胀率回归目标水平。今年2月,欧元区通胀率萎缩了0.2%。而欧洲央行的中期通胀目标是2%。
欧洲央行最新的宽松政策带动了DAX指数反弹。本周四,DAX上升2.5%至9968点,创下近两个月来的新高。与此同时,欧元对美元汇率从原先的1.0972下跌至1.0866。
"""
| StarcoderdataPython |
1688859 | <filename>test/unit/management_request_result_test.py
from .test_helper import argv_kiwi_tests
import sys
import mock
from mock import patch
from pytest import raises
from azurectl.management.request_result import RequestResult
import azurectl
from collections import namedtuple
from azurectl.azurectl_exceptions import (
AzureRequestTimeout,
AzureRequestError,
AzureRequestStatusError
)
class TestRequestResult:
def setup(self):
self.request_result = RequestResult(42)
self.service = mock.Mock()
@patch('azurectl.management.request_result.time.sleep')
def test_status(self, mock_time):
self.request_result.status(self.service)
self.service.get_operation_status.assert_called_once_with(42)
@patch('azurectl.management.request_result.time.sleep')
def test_wait_for_request_completion_timeout(self, mock_time):
MyStatus = namedtuple(
'MyStatus',
'status'
)
status = MyStatus(status='InProgress')
self.service.get_operation_status.return_value = status
with raises(AzureRequestTimeout):
self.request_result.wait_for_request_completion(self.service)
@patch('azurectl.management.request_result.time.sleep')
def test_wait_for_request_completion_error(self, mock_time):
MyStatus = namedtuple(
'MyStatus',
'status error'
)
MyError = namedtuple(
'MyError',
'message code'
)
status = MyStatus(
status='Failed', error=MyError(message='foo', code=1)
)
self.service.get_operation_status.return_value = status
with raises(AzureRequestError):
self.request_result.wait_for_request_completion(self.service)
@patch('azurectl.management.request_result.time.sleep')
def test_status_error(self, mock_time):
self.service.get_operation_status.side_effect = AzureRequestStatusError
with raises(AzureRequestStatusError):
self.request_result.status(self.service)
| StarcoderdataPython |
12861974 | <filename>pypad/collab.py
from enum import Enum
from .dev import Dev
class Collab(Enum):
UNKNOWN = -1
NONE = 0
RAGNAROK_ONLINE = 1
TAIKO_NO_TATSUJIN = 2
EMIL_CHRONICLE_ONLINE = 3
GUNMA_NO_YABOU = 5
CRYSTAL_DEFENDER = 6
FAMITSU = 7
PRINCESS_PUNT_SWEET = 8
ANDROID = 9
SHINRABANSHO_CHOCO = 10
CAPYBARA_SAN = 11
FREAK_TOWER = 12
SENGOKU_TENKA_TRIGGER = 13
EVANGELION = 14
SEVEN_ELEVEN = 15
CLASH_OF_CLANS = 16
GROOVE_COASTER = 17
RAGNAROK_ODYSSEY_ACE = 18
DRAGONS_DOGMA_QUEST = 19
TAKAOKA_CITY = 20
MONSTER_HUNTER_4G = 21
BATMAN = 22
THIRTY_ONE_ICECREAM = 23
ANGRY_BIRDS = 24
PUZZLE_AND_DRAGONS_Z = 25
HUNTER_X_HUNTER = 26
SANRIO_CHARACTERS = 27
PAD_BATTLE_TOURNAMENT = 28
BEAMS = 29
DRAGON_BALL = 30
SAINT_SEIYA = 31
ROAD_TO_DRAGON = 32
DIVINE_GATE = 33
SUMMONS_BOARD = 34
PICOTTO_KINGDOM = 35
BIKKURIMAN = 36
ANGRY_BIRDS_EPIC = 37
DC_UNIVERSE = 38
CHIBI_1 = 39 # first round chibis - three kingdoms series
FIST_OF_THE_NORTH_STAR = 40
CHIBI_2 = 41 # second round chibis
CHIBI_3 = 44 # third round chibis
FINAL_FANTASY = 45
GHOST_IN_THE_SHELL = 46
DUEL_MASTERS = 47
ATTACK_ON_TITAN = 48
NINJA_HATTORI_KUN = 49
SHONEN_SUNDAY = 50
CROWS_X_WORST = 51 # TODO VERIFY NO OVERLAP WITH VOLTRON
BLEACH = 52
ACE_ATTORNEY = 55
RUROUNI_KENSHIN = 56
PEPPER = 57
KINNIKUMAN = 58
HIRUNE_HIME = 59
MAGAZINE = 60
MONSTER_HUNTER = 61
KAIBUTSU_KUN = 62
VOLTRON = 63 # TODO VERIFY NO OVERLAP WITH CROW X WORST
FULLMETAL_ALCHEMIST = 65
KING_OF_FIGHTERS = 66
YU_YU_HAKUSHO = 67
PERSONA = 68
COCA_COLA = 69
MAGIC_THE_GATHERING = 70
CHRONO_MAGIA = 71
SEVENTH_REBIRTH = 72
CALCIO_FANTASISTA = 73
POWER_PROS = 74
GINTAMA = 75
SWORD_ART_ONLINE = 76
KAMEN_RIDER = 77
YOKAI_WATCH_W = 78
FATE_STAY_NIGHT = 79
STREET_FIGHTER_V = 80
UMAIBOU = 81
MC_DONALDS = 82
SHAMAN_KING = 83
ERROR_999 = 999
DRAGONBOUNDS_AND_DRAGON_CALLERS = 10001
@classmethod
def _missing_(cls, value):
Dev.log(f'Unknown collab: {value}')
return Collab.UNKNOWN | StarcoderdataPython |
6424176 | from unittest import TestCase
from grobber.streams.mp4upload import Mp4Upload
from . import BasicStreamTest
class TestMp4Upload(BasicStreamTest, TestCase):
CLS = Mp4Upload
TESTS = [
"https://www.mp4upload.com/embed-h2yq5i3c7xo7.html",
"https://www.mp4upload.com/embed-1934b3ai70n2.html",
"https://www.mp4upload.com/embed-8yajb93uspci.html"
]
| StarcoderdataPython |
6627698 | <gh_stars>100-1000
import numpy as numpy
import cv2
class Canvas:
#IMAGES_ACROSS = 32
#IMAGES_DOWN = 12
BOTTOM_INFO_BAR_HEIGHT_MIN = 20
TOP_INFO_BAR_HEIGHT_MIN = 150
FPS_TEXT_ROW = 2
TIMER_TEXT_ROW = 1
INFERENCE_LABEL_TEXT_ROW = 1
PAUSE_TEXT_ROW = 1
LOADING_TEXT_ROW = 1
DONE_COUNT_TEXT_ROW = 2
PRESS_ANY_KEY_ROW = 3
TEXT_FONT = cv2.FONT_HERSHEY_SIMPLEX
def __init__(self, canvas_width:int, canvas_height:int, images_down:int, images_across:int):
self._images_down = images_down
self._images_across = images_across
self._grid_max_images = self._images_across * self._images_down
self._grid_max_images_str = str(self._grid_max_images)
self._text_scale = 1.0
self._text_background_color = (40, 40, 40)
self._text_color = (255, 255, 255) # white text
text_size = cv2.getTextSize("ZZ", Canvas.TEXT_FONT, self._text_scale, 1)[0]
self._text_height = text_size[1]
self._text_bg_height = self._text_height + 14
#total canvas dimensions
self._canvas_width = canvas_width
self._canvas_height = canvas_height
# for now no use for bottom bar
self._bottom_bar_height = int(self._canvas_height * 0.01)
if (self._bottom_bar_height < Canvas.BOTTOM_INFO_BAR_HEIGHT_MIN):
self._bottom_bar_height = Canvas.BOTTOM_INFO_BAR_HEIGHT_MIN
self._bottom_bar_width = self._canvas_width
self._top_bar_height = int(self._canvas_height * 0.1)
if (self._top_bar_height < Canvas.TOP_INFO_BAR_HEIGHT_MIN):
self._top_bar_height = Canvas.TOP_INFO_BAR_HEIGHT_MIN
self._top_bar_width = canvas_width
# top info bar
self._top_bar_left = 0
self._top_bar_right = self._top_bar_left + self._top_bar_width
self._top_bar_top = 0
self._top_bar_bottom = self._top_bar_top + self._top_bar_height
# bottom info bar
self._bottom_bar_left = 0
self._bottom_bar_right = self._bottom_bar_left + self._bottom_bar_width
self._bottom_bar_top = self._canvas_height - self._bottom_bar_height
self._bottom_bar_bottom = self._bottom_bar_top + self._bottom_bar_height
#grid dimensions
self._grid_top = 0 + self._top_bar_height
max_grid_height = self._canvas_height - self._bottom_bar_height - self._top_bar_height
max_grid_width = self._canvas_width
self._grid_line_thickness = 1
#clear whole canvas to start
self._canvas_image = numpy.zeros((self._canvas_height, self._canvas_width, 3), numpy.uint8)
self._image_width = int((max_grid_width-1)/self._images_across)
self._image_height = int((max_grid_height-1)/self._images_down)
self._grid_width = self._images_across * self._image_width
self._grid_left = int((self._canvas_width - self._grid_width)/2)
self._grid_right = self._grid_left + self._grid_width
self._grid_height = self._images_down * self._image_height
self._grid_bottom = self._grid_top + self._grid_height
self._large_image_width = 112
self._large_image_height = 112
self._large_image_left = int(canvas_width/2) - int(self._large_image_width/2)
self._large_image_right = self._large_image_left + self._large_image_width
self._large_image_top = 8
self._large_image_bottom = self._large_image_top + self._large_image_height
# add some padding for the text that goes on top bar so not right against the edge of window
self._top_bar_text_left = self._top_bar_left + 10
self._top_bar_text_top = self._top_bar_top + 10
self._top_bar_text_left_width = (self._large_image_left - 10) - self._top_bar_text_left
self._grid_red = 128
self._grid_green = 128
self._grid_blue = 128
self._draw_grid_lines()
self._done_red = 255
self._done_green = 255
self._done_blue = 255
self._image_done_rect_thickness = 2
self._grid_image_list = list()
self._large_image_list = list()
self._draw_lines_large_to_grid = False
self._gird_undone_image_transparency = 0.6
self._num_bar_top_text_rows = 3
self._top_bar_text_row_tops = [None] * self._num_bar_top_text_rows
self._top_bar_text_row_tops[0] = 12
self._top_bar_text_row_tops[1] = self._top_bar_text_row_tops[0] + self._text_bg_height + 10
self._top_bar_text_row_tops[2] = self._top_bar_text_row_tops[1] + self._text_bg_height + 10
self._done_count = 0
def load_images(self, image_list:list):
self._grid_image_list.clear()
self._large_image_list.clear()
transparency = self._gird_undone_image_transparency
for image_index in range(0, len(image_list)):
if (image_index >= self._grid_max_images):
break
temp_large_image = cv2.resize(image_list[image_index], (self._large_image_width, self._large_image_height))
self._large_image_list.append(temp_large_image)
temp_image = cv2.resize(image_list[image_index], (self._image_width, self._image_height))
self._grid_image_list.append(temp_image)
self._draw_grid_image(image_index, transparency)
return
def reset_canvas(self):
#clear whole canvas to start
self._canvas_image = numpy.zeros((self._canvas_height, self._canvas_width, 3), numpy.uint8)
self._draw_grid_lines()
self._draw_undone_images()
self._done_count = 0
def _draw_undone_images(self):
for image_index in range(0, len(self._grid_image_list)):
if (image_index >= self._grid_max_images):
break
self._draw_grid_image(image_index, self._gird_undone_image_transparency)
def _draw_grid_image(self, image_index:int, transparency:float):
if (image_index >= self._grid_max_images):
return
image_left, image_top, image_right, image_bottom = self._get_image_square(image_index)
self._canvas_image[image_top:image_bottom, image_left:image_right] = \
cv2.addWeighted(self._canvas_image[image_top:image_bottom, image_left:image_right], transparency,
self._grid_image_list[image_index], 1.0 - transparency, 0.0)
def _draw_large_image(self, image_index:int, transparency:float):
if (image_index >= self._grid_max_images):
return
#image_left, image_top, image_right, image_bottom = self._get_image_square(image_index)
self._canvas_image[self._large_image_top:self._large_image_bottom, self._large_image_left:self._large_image_right] = \
cv2.addWeighted(self._canvas_image[self._large_image_top:self._large_image_bottom, self._large_image_left:self._large_image_right], transparency,
self._large_image_list[image_index], 1.0 - transparency, 0.0)
def set_draw_lines(self, val:bool):
self._draw_lines_large_to_grid = val
def _draw_grid_lines(self):
blue = self._grid_blue
green = self._grid_green
red = self._grid_red
# lines going across
for line_index in range(0, self._images_down+1):
line_y = self._grid_top + (line_index * self._image_height)
cv2.line(self._canvas_image, (self._grid_left, line_y), (self._grid_right, line_y), (blue, green, red),
self._grid_line_thickness)
#lines going down
for line_index in range(0, self._images_across+1):
line_x = self._grid_left + (line_index * self._image_width)
cv2.line(self._canvas_image, (line_x, self._grid_top), (line_x, self._grid_top + ((self._images_down) * self._image_height)), (blue, green, red),
self._grid_line_thickness)
def mark_image_done(self, image_index:int, label_text:str=None):
self._done_count += 1
if (image_index >= self._grid_max_images):
return
self._draw_grid_image(image_index, 0.0)
self._draw_large_image(image_index, 0.0)
image_left, image_top, image_right, image_bottom = self._get_image_square(image_index)
cv2.rectangle(self._canvas_image, (image_left, image_top), (image_right, image_bottom),
(self._done_blue, self._done_green, self._done_red), self._image_done_rect_thickness)
if (label_text != None):
self.draw_inference_label(label_text)
self.draw_done_count()
if (self._draw_lines_large_to_grid) :
cv2.line(self._canvas_image,
(image_left+int(self._image_width/2), image_top + int(self._image_height/2)),
(self._large_image_left + int(self._large_image_width/2), self._large_image_bottom), (255, 0, 0), 1)
def _get_image_square(self, image_index:int):
row = int(image_index / self._images_across)
col = image_index - (row * self._images_across)
image_left = self._grid_left + (self._image_width * col)
image_top = self._grid_top + (self._image_height * row)
image_right = image_left + self._image_width
image_bottom = image_top + self._image_height
return image_left, image_top, image_right, image_bottom
def get_canvas_image(self):
return self._canvas_image
def show_loading(self):
self._put_text_top_bar_left("Loading Images...", Canvas.LOADING_TEXT_ROW)
def clear_loading(self):
left, top, right, bottom = self._get_top_bar_left_text_bg_rect(Canvas.LOADING_TEXT_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def pause_start(self):
self._put_text_top_bar_left("Paused...", Canvas.PAUSE_TEXT_ROW)
def press_any_key(self):
self._put_text_top_bar_left("Press any key to continue...", Canvas.PRESS_ANY_KEY_ROW)
def press_quit_key(self):
self._put_text_top_bar_left("Press q to quit.", Canvas.PRESS_ANY_KEY_ROW)
def show_device(self, device:str):
self._put_text_top_bar_right("Device: "+ device, Canvas.PRESS_ANY_KEY_ROW)
def clear_press_any_key(self):
left, top, right, bottom = self._get_top_bar_left_text_bg_rect(Canvas.PRESS_ANY_KEY_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def pause_stop(self):
left, top, right, bottom = self._get_top_bar_left_text_bg_rect(Canvas.PAUSE_TEXT_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def draw_inference_label(self, label_text:str):
self._put_text_top_bar_left(label_text, Canvas.INFERENCE_LABEL_TEXT_ROW)
def draw_done_count(self):
draw_str = "Images: " + str(self._done_count) +"/" + self._grid_max_images_str
self._put_text_top_bar_left(draw_str, Canvas.DONE_COUNT_TEXT_ROW)
def hide_done_count(self):
left, top, right, bottom = self._get_top_bar_left_text_bg_rect(Canvas.DONE_COUNT_TEXT_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def clear_top_bar(self):
clear_image = numpy.full((self._top_bar_height, self._top_bar_width, 3),
(0, 0, 0), numpy.uint8)
self._canvas_image[self._top_bar_top:self._top_bar_bottom, self._top_bar_left: self._top_bar_right] = clear_image
def show_fps(self, fps:float):
fps_str = "FPS: %2.1f" % fps
self._put_text_top_bar_right(fps_str, Canvas.FPS_TEXT_ROW)
def hide_fps(self):
left, top, right, bottom = self._get_top_bar_right_text_bg_rect(Canvas.FPS_TEXT_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def show_timer(self, time:float):
time_str = "Elapsed: %3.1f" % time
self._put_text_top_bar_right(time_str, Canvas.TIMER_TEXT_ROW)
def hide_timer(self):
left, top, right, bottom = self._get_top_bar_right_text_bg_rect(Canvas.TIMER_TEXT_ROW)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
def _put_text_top_bar_right(self, text:str, text_row:int=1):
left, top, right, bottom = self._get_top_bar_right_text_bg_rect(text_row)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
text_top_index = text_row -1
self._put_text_on_canvas(text, -1, self._top_bar_text_row_tops[text_top_index], 0)
def _put_text_top_bar_left(self, text:str, text_row:int=1):
left, top, right, bottom = self._get_top_bar_left_text_bg_rect(text_row)
cv2.rectangle(self._canvas_image, (left, top), (right, bottom),
self._text_background_color, -1)
text_top_index = text_row -1
self._put_text_on_canvas(text, self._top_bar_text_left, self._top_bar_text_row_tops[text_top_index], 0)
def _get_top_bar_right_text_leftmost(self):
return self._large_image_right + 10
def _get_top_bar_left_text_leftmost(self):
return self._top_bar_text_left
def _get_top_bar_right_text_bg_rect(self, text_row:int):
left = self._get_top_bar_right_text_leftmost()
text_top_index = text_row - 1
top = self._top_bar_text_row_tops[text_top_index] - 4
right = self._canvas_width - 10
bottom = top + self._text_bg_height
return (left, top, right, bottom)
def _get_top_bar_left_text_bg_rect(self, text_row:int):
left = self._get_top_bar_left_text_leftmost()
text_top_index = text_row - 1
top = self._top_bar_text_row_tops[text_top_index] - 4
right = self._top_bar_text_left + self._top_bar_text_left_width
bottom = top + self._text_bg_height
return (left, top, right, bottom)
def _put_text_on_canvas(self, text:str, text_left:int, text_top: int, text_min_width:int):
text_size = cv2.getTextSize(text, Canvas.TEXT_FONT, self._text_scale, 1)[0]
text_width = text_size[0]
text_height = text_size[1]
if (text_left == -1):
display_image_width = self._canvas_image.shape[1]
text_left = display_image_width - text_width - 10
text_bottom = text_top + text_height
cv2.putText(self._canvas_image, text, (text_left, text_bottom), cv2.FONT_HERSHEY_SIMPLEX, self._text_scale,
self._text_color, 1)
| StarcoderdataPython |
3304320 | <filename>release/scripts/presets/tracking_camera/Samsung_Galaxy_S4.py
import bpy
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 4.8
camera.units = 'MILLIMETERS'
camera.focal_length = 4.2
camera.pixel_aspect = 1
camera.k1 = 0.0
camera.k2 = 0.0
camera.k3 = 0.0
| StarcoderdataPython |
9767815 | <reponame>amitkakde007/To-Do-App_React<filename>Backend/api/views.py
# Django Imports
from django.shortcuts import render
from rest_framework.response import Response
from rest_framework import viewsets
from django.shortcuts import get_object_or_404
from rest_framework import status
# Local Imports
from api.models import Task
from api.serializers import TaskSerializers
# Create your views here.
class TaskViewSet(viewsets.ViewSet):
serializers_class = TaskSerializers
def list(self, request, *args, **kwargs):
queryset = Task.objects.all()
serializer = self.serializers_class(queryset, many=True)
return Response(serializer.data)
def retrieve(self, request, pk, *args, **kwargs):
queryset = get_object_or_404(Task, pk=pk)
serializer = self.serializers_class(queryset)
return Response(serializer.data)
def create(self, request, *args, **kwargs):
serializer = self.serializers_class(data=request.data)
if serializer.is_valid():
serializer.save()
return Response('Data addedd successfully')
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
def update(self, request, pk, *args, **kwargs):
task = Task.objects.get(id=pk)
serializer = self.serializers_class(instance=task, data=request.data)
if serializer.is_valid():
serializer.save()
return Response('Data addedd successfully')
else:
return Response(serializer.error, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk, *args, **kwargs):
queryset = Task.objects.get(id=pk)
queryset.delete()
return Response('Deleted successfully')
| StarcoderdataPython |
12820190 | from django.conf import settings
from django.contrib.auth.models import User
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models, transaction
from guardian.shortcuts import assign_perm
from model_utils.models import TimeStampedModel
class Currency(TimeStampedModel):
name = models.CharField(unique=True, max_length=255)
symbol = models.SlugField(unique=True, max_length=255)
class Meta:
verbose_name_plural = "currencies"
def __str__(self):
return self.name
class Balance(TimeStampedModel):
currency = models.ForeignKey(
Currency,
related_name='balances',
on_delete=models.CASCADE,
)
user = models.ForeignKey(
User,
related_name='balances',
on_delete=models.CASCADE,
)
amount = models.DecimalField(
max_digits=30,
decimal_places=9,
default=0.0,
validators=[MinValueValidator(0), MaxValueValidator(2**64)]
)
locked_amount = models.DecimalField(
max_digits=30,
decimal_places=9,
default=0.0,
validators=[MinValueValidator(0), MaxValueValidator(2**64)]
)
def __str__(self):
return '{}, total: {}, locked: {}, user: {}'.format(
self.currency.symbol,
self.amount,
self.locked_amount,
self.user.username
)
@transaction.atomic
def save(self, *args, **kwargs):
"""Needed to manually run validators on amounts."""
# full_clean runs validators
self.full_clean()
return super().save(*args, **kwargs)
class Deposit(TimeStampedModel):
STATUSES = (
('awaiting transaction signature', 'awaiting transaction signature'),
('awaiting confirmation', 'awaiting confirmation'),
('finished', 'finished'),
('canceled', 'canceled'),
)
balance = models.ForeignKey(
Balance,
related_name='deposits',
on_delete=models.CASCADE,
)
amount = models.DecimalField(
max_digits=30,
decimal_places=9,
default=0.0,
validators=[MinValueValidator(0), MaxValueValidator(2**64)]
)
status = models.CharField(max_length=255, choices=STATUSES)
confirmations = models.IntegerField(
validators=[MinValueValidator(0)],
default=0
)
# tx_slate_id is needed in case when we want to cancel the deposit after the
# first step of RSR (eg. when new deposit is initiated)
tx_slate_id = models.CharField(unique=True, max_length=255)
# we store kernel excess to update number of confirmations
kernel_excess = models.CharField(
unique=True, null=True, blank=True, max_length=255)
class Meta:
ordering = ['created']
def __str__(self):
return '{}, amount: {}, status: {}, user:{}'.format(
self.balance.currency.symbol,
self.amount,
self.status,
self.balance.user.username
)
@transaction.atomic
def save(self, *args, **kwargs):
"""On deposit create set permissions"""
created = self.pk is None
if not created:
current_deposit = Deposit.objects.get(pk=self.pk)
if (
current_deposit.status == 'awaiting transaction signature' and
self.status == 'awaiting confirmation'
):
# finished the transaction, lock amount in balance
balance = self.balance
balance.locked_amount = balance.locked_amount + self.amount
balance.save()
elif (
self.status == 'awaiting confirmation' and
self.confirmations == settings.REQUIRED_CONFIRMATIONS
):
self.status = 'finished'
# deposit completed, transfer locked amount to available amount
balance = self.balance
balance.locked_amount = balance.locked_amount - self.amount
balance.amount = balance.amount + self.amount
balance.save()
# full_clean runs validators
self.full_clean()
res = super().save(*args, **kwargs)
if created:
assign_perm('api.view_deposit', self.balance.user, self)
return res
@transaction.atomic
def delete(self, **kwargs):
# we need to remove locked amount from balance if anything is locked
# NOTE: this can also be called on an already confirmed deposit in
# which case nothing is locked
if self.status == 'awaiting confirmation':
# it means it's still waiting signature or confirmations in which
# case deposit's amount is locked in its balance
balance = self.balance
balance.locked_amount = balance.locked_amount - self.amount
balance.save()
# NOTE: we should cancel tx here, but it's more explicit to do it
# in the view. The downside is that when we delete it through a
# shell we need to manually cancel the transaction
return super().delete(**kwargs)
class Withdrawal(TimeStampedModel):
STATUSES = (
('awaiting transaction signature', 'awaiting transaction signature'),
('awaiting confirmation', 'awaiting confirmation'),
('finished', 'finished'),
('canceled', 'canceled'),
)
balance = models.ForeignKey(
Balance,
related_name='withdrawals',
on_delete=models.CASCADE,
)
amount = models.DecimalField(
max_digits=30,
decimal_places=9,
default=0.0,
validators=[MinValueValidator(0), MaxValueValidator(2**64)]
)
status = models.CharField(max_length=255, choices=STATUSES)
confirmations = models.IntegerField(
validators=[MinValueValidator(0)],
default=0
)
# tx_slate_id is needed in case when we want to cancel the withdrawal after
# the first step of SRS (eg. when new withdrawal is initiated)
tx_slate_id = models.CharField(unique=True, max_length=255)
# we store kernel excess to update number of confirmations
kernel_excess = models.CharField(
unique=True, null=True, blank=True, max_length=255)
class Meta:
ordering = ['created']
def __str__(self):
return '{}, amount: {}, status: {}, user:{}'.format(
self.balance.currency.symbol,
self.amount,
self.status,
self.balance.user.username
)
@transaction.atomic
def save(self, *args, **kwargs):
"""On withdrawal create set permissions"""
created = self.pk is None
if not created:
current_withdrawal = Withdrawal.objects.get(pk=self.pk)
if (
current_withdrawal.status == 'awaiting transaction signature' and
self.status == 'awaiting confirmation'
):
# finished the transaction, lock amount in balance
balance = self.balance
balance.locked_amount = balance.locked_amount + self.amount
balance.amount = balance.amount - self.amount
balance.save()
elif (
self.status == 'awaiting confirmation' and
self.confirmations == settings.REQUIRED_CONFIRMATIONS
):
self.status = 'finished'
# withdrawal completed, remove locked amount
balance = self.balance
balance.locked_amount = balance.locked_amount - self.amount
balance.save()
# full_clean runs validators
self.full_clean()
res = super().save(*args, **kwargs)
if created:
assign_perm('api.view_withdrawal', self.balance.user, self)
return res
@transaction.atomic
def delete(self, **kwargs):
# we need to remove locked amount from balance if anything is locked
# NOTE: this can also be called on an already confirmed withdrawal in
# which case nothing is locked
if self.status == 'awaiting confirmation':
# the withdrawal's amount is locked in its balance, return it to
# the available balance
balance = self.balance
balance.locked_amount = balance.locked_amount - self.amount
balance.amount = balance.amount + self.amount
balance.save()
# NOTE: we should cancel tx here, but it's more explicit to do it
# in the view. The downside is that when we delete it through a
# shell we need to manually cancel the transaction
return super().delete(**kwargs)
| StarcoderdataPython |
318750 | <reponame>itgocode/qxf2-lambdas<filename>raspberry_notifier/credentials.py
import os
USERNAME = os.environ['USERNAME']
PASSWORD = <PASSWORD>['PASSWORD']
PROXY_USERNAME = os.environ['PROXY_USERNAME']
PROXY_PASSWORD = os.environ['PROXY_PASSWORD']
DEVELOPER_KEY = os.environ['DEVELOPER_KEY'] | StarcoderdataPython |
8061709 | <gh_stars>1-10
import numpy as np
def white_noise(t):
"""White noise generated by sampling at random from a uniform distribution.
"""
N = len(t)
x = np.random.uniform(-1.0, 1.0, N)
return x
def brownian_noise(t):
"""Brown noise generated """
N = len(t)
dx = np.zeros(N)
for n in range(0, N):
dx[n] = np.random.normal(0, scale=np.sqrt(t[n]))
x = np.zeros(N)
for n in range(1, len(x)):
x[n] = x[n - 1] + dx[n]
return x
# TODO(Jason) GENERATE PINK NOISE
| StarcoderdataPython |
6673592 | # bot.py
import discord
import json
import datetime
from threading import Timer
tokenReader = open("token.txt", "r")
token = tokenReader.read()
client = discord.Client()
@client.event
async def on_ready():
print('Logged in as {0.user}'.format(client))
# client.loop.create_task(checkTime())
egging = False
prefix = '!'
approved_egging_words = ['egg', 'huevo', 'tamago', 'たまご', '卵']
eggChannel = None
def deltaEgg():
global egging
egging = False
print("Egg stopped")
@client.event
async def on_message(message):
global egging, prefix, approved_egging_words, eggChannel
if message.author == client.user:
return
msg = message.content
if msg[:len(prefix)] != prefix or len(msg) <= len(prefix):
return
msg = msg[1:]
msg = msg.split(' ')
if msg[0] in approved_egging_words:
if egging:
return
if eggChannel == None:
await message.channel.send("Egg channel has not been set up! Use `" + prefix + "setChannel` to set up")
return
# elif len(message.author.voice.channel) >= 2:
mav = message.author.voice
if mav != None:
if len(mav.channel.members) >= 2:
egging = True
t = Timer(10.0, deltaEgg)
t.start()
print(message.author.display_name + " started a " + msg[0] + ".")
elif msg[0] == "setChannel":
eggChannel = message.channel.id
await message.channel.send("Egg channel set!")
@client.event
async def on_voice_state_update(member, before, after):
global egging, members_in_vc, eggChannel
if egging:
if after.channel == None:
channelPeople = before.channel.members
if len(channelPeople) == 1:
channel = client.get_channel(eggChannel)
await channel.send("{} egg" .format(channelPeople[0].mention))
egging = False
client.run(token) | StarcoderdataPython |
9733194 | <reponame>astro-friedel/yggdrasil
import os
import copy
import shutil
import tempfile
import numpy as np
import unittest
from yggdrasil.metaschema.datatypes.tests import (
test_JSONObjectMetaschemaType as parent)
from yggdrasil.metaschema.datatypes import PlyMetaschemaType
from yggdrasil.tests import YggTestClassInfo, assert_raises, assert_equal
from yggdrasil.drivers.LPyModelDriver import LPyModelDriver
vcoords = np.array([[0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 0]], 'float32').T
vcolors = np.array([[255, 255, 255, 255, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 255, 255, 255, 255]], 'uint8').T
eindexs = np.array([[0, 1, 2, 3, 2],
[1, 2, 3, 0, 0]], 'int32')
ecolors = np.array([[255, 255, 255, 255, 0],
[255, 255, 255, 255, 0],
[255, 255, 255, 255, 0]], 'uint8')
_test_value = {'material': 'fake_material', 'vertices': [], 'edges': [],
'faces': [{'vertex_index': [0, 1, 2]},
{'vertex_index': [0, 2, 3]},
{'vertex_index': [7, 6, 5, 4]},
{'vertex_index': [0, 4, 5, 1]},
{'vertex_index': [1, 5, 6, 2]},
{'vertex_index': [2, 6, 7, 3]},
{'vertex_index': [3, 7, 4, 0]}]}
for i in range(len(vcoords)):
ivert = {}
for j, k in enumerate('xyz'):
ivert[k] = vcoords[i, j]
for j, k in enumerate(['red', 'green', 'blue']):
ivert[k] = vcolors[i, j]
_test_value['vertices'].append(ivert)
for i in range(len(eindexs)):
iedge = {}
for j, k in enumerate(['vertex1', 'vertex2']):
iedge[k] = eindexs[i, j]
for j, k in enumerate(['red', 'green', 'blue']):
iedge[k] = ecolors[i, j]
_test_value['edges'].append(iedge)
for f in _test_value['faces']:
f['vertex_index'] = [np.int32(x) for x in f['vertex_index']]
_test_value_simple = {'vertices': copy.deepcopy(_test_value['vertices']),
'faces': [{'vertex_index': [0, 1, 2]},
{'vertex_index': [0, 2, 3]}]}
_test_value_int64 = copy.deepcopy(_test_value)
for f in _test_value_int64['faces']:
f['vertex_index'] = [np.int64(x) for x in f['vertex_index']]
def test_create_schema():
r"""Test create_schema."""
assert_raises(RuntimeError, PlyMetaschemaType.create_schema, overwrite=False)
temp = os.path.join(tempfile.gettempdir(), 'temp_schema')
old_schema = PlyMetaschemaType.get_schema()
try:
shutil.move(PlyMetaschemaType._schema_file, temp)
new_schema = PlyMetaschemaType.get_schema()
assert_equal(old_schema, new_schema)
except BaseException: # pragma: debug
shutil.move(temp, PlyMetaschemaType._schema_file)
raise
shutil.move(temp, PlyMetaschemaType._schema_file)
def test_translate_ply2fmt_errors():
r"""Test errors in translate_ply2fmt."""
assert_raises(ValueError, PlyMetaschemaType.translate_ply2fmt, 'invalid')
def test_translate_ply2py_errors():
r"""Test errors in translate_ply2py."""
assert_raises(ValueError, PlyMetaschemaType.translate_ply2py, 'invalid')
def test_translate_py2ply_errors():
r"""Test errors in translate_py2ply."""
assert_raises(ValueError, PlyMetaschemaType.translate_py2ply, 'float128')
def test_singular2plural():
r"""Test conversion from singular element names to plural ones and back."""
pairs = [('face', 'faces'), ('vertex', 'vertices'),
('vertex_index', 'vertex_indices')]
for s, p in pairs:
assert_equal(PlyMetaschemaType.singular2plural(s), p)
assert_equal(PlyMetaschemaType.plural2singular(p), s)
assert_raises(ValueError, PlyMetaschemaType.plural2singular, 'invalid')
class TestPlyDict(YggTestClassInfo):
r"""Test for PlyDict class."""
_mod = 'PlyMetaschemaType'
_cls = 'PlyDict'
_simple_test = _test_value_simple
@property
def mod(self):
r"""str: Absolute name of module containing class to be tested."""
return 'yggdrasil.metaschema.datatypes.%s' % self._mod
@property
def inst_kwargs(self):
r"""dict: Keyword arguments for creating a class instance."""
return _test_value
def test_count_elements(self):
r"""Test count_elements."""
self.assert_raises(ValueError, self.instance.count_elements, 'invalid')
x = self.instance.count_elements('vertices')
y = self.instance.count_elements('vertex')
self.assert_equal(x, y)
def test_mesh(self):
r"""Test mesh."""
self.instance.mesh
def test_merge(self):
r"""Test merging two ply objects."""
ply1 = copy.deepcopy(self.instance)
ply2 = ply1.merge(self.instance)
ply1.merge([self.instance], no_copy=True)
self.assert_equal(ply1, ply2)
def test_append(self):
r"""Test appending ply objects."""
basic = self.import_cls(vertices=self.instance['vertices'],
faces=[])
basic.append(self.instance)
def test_apply_scalar_map(self, _as_obj=False):
r"""Test applying a scalar colormap."""
o = copy.deepcopy(self.instance)
scalar_arr = np.arange(o.count_elements('faces')).astype('float')
self.assert_raises(NotImplementedError, o.apply_scalar_map,
scalar_arr, scale_by_area=True)
new_faces = []
if _as_obj:
for f in o['faces']:
if len(f) == 3:
new_faces.append(f)
else:
for f in o['faces']:
if len(f['vertex_index']) == 3:
new_faces.append(f)
o['faces'] = new_faces
for scale in ['linear', 'log']:
o2 = copy.deepcopy(o)
o1 = o.apply_scalar_map(scalar_arr, scaling=scale, scale_by_area=True)
o2.apply_scalar_map(scalar_arr, scaling=scale, scale_by_area=True,
no_copy=True)
self.assert_equal(o1, o2)
@unittest.skipIf(not LPyModelDriver.is_installed(), "LPy library not installed.")
def test_to_from_scene(self, _as_obj=False): # pragma: lpy
r"""Test conversion to/from PlantGL scene."""
o1 = self.instance
cls = o1.__class__
s = o1.to_scene(name='test')
o2 = cls.from_scene(s)
# Direct equivalence won't happen unless test is just for simple mesh
# as faces with more than 3 vertices will be triangulated.
cls = self.import_cls
o1 = cls(self._simple_test)
s = o1.to_scene(name='test')
o2 = cls.from_scene(s)
# import pprint
# print('o2')
# pprint.pprint(o2)
# print('o1')
# pprint.pprint(o1)
self.assert_equal(o2, o1)
def test_to_from_dict(self):
r"""Test transformation to/from dict."""
x = self.instance.as_dict()
y = self.import_cls.from_dict(x)
self.assert_equal(y, self.instance)
def test_properties(self):
r"""Test explicit exposure of specific element counts as properties
against counts based on singular elements."""
self.instance.bounds
self.assert_equal(self.instance.nvert, self.instance.count_elements('vertex'))
self.assert_equal(self.instance.nface, self.instance.count_elements('face'))
class TestPlyMetaschemaType(parent.TestJSONObjectMetaschemaType):
r"""Test class for PlyMetaschemaType class."""
_mod = 'PlyMetaschemaType'
_cls = 'PlyMetaschemaType'
def __init__(self, *args, **kwargs):
super(TestPlyMetaschemaType, self).__init__(*args, **kwargs)
self._value = _test_value
self._fulldef = {'type': self.import_cls.name}
self._typedef = {'type': self.import_cls.name}
self._valid_encoded = [self._fulldef]
self._valid_decoded = [self._value,
PlyMetaschemaType.PlyDict(**_test_value),
{'vertices': [], 'faces': [],
'alt_verts': copy.deepcopy(_test_value['vertices'])},
_test_value_int64]
self._invalid_encoded = [{}]
self._invalid_decoded = [{'vertices': [{k: 0.0 for k in 'xyz'}],
'faces': [{'vertex_index': [0, 1, 2]}]}]
self._compatible_objects = [(self._value, self._value, None)]
self._encode_data_kwargs = {'comments': ['Test comment']}
def test_decode_data_errors(self):
r"""Test errors in decode_data."""
self.assert_raises(ValueError, self.import_cls.decode_data, 'hello', None)
| StarcoderdataPython |
4820363 | <gh_stars>0
from fornax.utils.generics.factory import GenericFactory
from fornax.consts import Environment
from .executor import Executor
from .bash_shell import BashShellExecutor
from .docker import DockerExecutor
class ExecutorFactory(GenericFactory[Environment, Executor]):
def __init__(self) -> None:
"""Initailize executor factory."""
super().__init__(prototype_class=Executor)
self.add_builder(Environment.BARE, BashShellExecutor)
self.add_builder(Environment.DOCKER, DockerExecutor)
| StarcoderdataPython |
11250407 | <reponame>youqingkui/zhihufav<filename>add_queue.py<gh_stars>0
#!/usr/bin/env python
#coding=utf-8
from lib.get_fav_list import CheckList
from lib.db_conn import FavList, session
import random
if __name__ == '__main__':
fav_list = session.query(FavList).all()
for fav in fav_list:
print(fav.api_url)
check_num = int(random.random() * 10 + 1)
c = CheckList(fav.api_url, fav.note_book, fav_id=fav.id, title=fav.title, force=True)
c.get_list()
session.close() | StarcoderdataPython |
22819 | <reponame>thanhnhan311201/via-line-detection<filename>src/util.py
import torch.nn as nn
import cv2
import torch
from copy import deepcopy
import numpy as np
from torch.autograd import Variable
from torch.autograd import Function as F
from numpy.polynomial import Polynomial as P
try:
from parameters import Parameters
except:
from src.parameters import Parameters
import math
p = Parameters()
###############################################################
##
## visualize
##
###############################################################
def visualize_points(image, x, y):
image = image
image = np.rollaxis(image, axis=2, start=0)
image = np.rollaxis(image, axis=2, start=0)#*255.0
image = image.astype(np.uint8).copy()
for k in range(len(y)):
for i, j in zip(x[k], y[k]):
if i > 0:
image = cv2.circle(image, (int(i), int(j)), 2, p.color[1], -1)
cv2.imshow("test2", image)
cv2.waitKey(0)
def visualize_points_origin_size(x, y, test_image, ratio_w, ratio_h):
color = 0
image = deepcopy(test_image)
image = np.rollaxis(image, axis=2, start=0)
image = np.rollaxis(image, axis=2, start=0)#*255.0
image = image.astype(np.uint8).copy()
image = cv2.resize(image, (int(p.x_size/ratio_w), int(p.y_size/ratio_h)))
for i, j in zip(x, y):
color += 1
for index in range(len(i)):
cv2.circle(image, (int(i[index]), int(j[index])), 10, p.color[color], -1)
cv2.imshow("test2", image)
cv2.waitKey(0)
return test_image
def visualize_gt(gt_point, gt_instance, ground_angle, image):
image = np.rollaxis(image, axis=2, start=0)
image = np.rollaxis(image, axis=2, start=0)#*255.0
image = image.astype(np.uint8).copy()
for y in range(p.grid_y):
for x in range(p.grid_x):
if gt_point[0][y][x] > 0:
xx = int(gt_point[1][y][x]*p.resize_ratio+p.resize_ratio*x)
yy = int(gt_point[2][y][x]*p.resize_ratio+p.resize_ratio*y)
image = cv2.circle(image, (xx, yy), 10, p.color[1], -1)
cv2.imshow("image", image)
cv2.waitKey(0)
def visualize_regression(image, gt):
image = np.rollaxis(image, axis=2, start=0)
image = np.rollaxis(image, axis=2, start=0)*255.0
image = image.astype(np.uint8).copy()
for i in gt:
for j in range(p.regression_size):#gt
y_value = p.y_size - (p.regression_size-j)*(220/p.regression_size)
if i[j] >0:
x_value = int(i[j]*p.x_size)
image = cv2.circle(image, (x_value, y_value), 5, p.color[1], -1)
cv2.imshow("image", image)
cv2.waitKey(0)
def draw_points(x, y, image):
color_index = 0
for i, j in zip(x, y):
color_index += 1
if color_index > 12:
color_index = 12
for index in range(len(i)):
# print( (int(i[index]), int(j[index])))
image = cv2.circle(image, (int(i[index]), int(j[index])), 5, p.color[color_index], -1)
return image
def draw_poly(poly, image, color):
if poly == []:
return image
y = np.linspace(256*12/20, 256, 10)
p = np.poly1d(poly)
x = [(p - _y).roots[0] for _y in y ]
draw_points = (np.asarray([x, y]).T).astype(np.int32)
cv2.polylines(image, [draw_points], False, color,3)
return image
###############################################################
##
## calculate
##
###############################################################
def adjust_fits(fits):
min_y = 20
len_fit = fits.shape[0]
values_x = np.array([np.poly1d(fit)(min_y) for fit in fits ])
order = np.argsort(values_x)
fits_sorted = fits[order]
if len(fits_sorted) > 3:
fits_sorted = fits_sorted[:3]
return fits_sorted
def get_steer_angle(fits):
min_y = 20
len_fit = fits.shape[0]
if len_fit > 3:
pass
if len_fit >= 2:
y = 20
x = (np.poly1d(fits[-1])(y) + np.poly1d(fits[-2])(y)) // 2
return_value = errorAngle((x,y))
#update point in lane
temp_y = 200
temp_x = (np.poly1d(fits[-1])(temp_y) + np.poly1d(fits[-2])(temp_y)) // 2
p.point_in_lane = (temp_x,temp_y)
return return_value
if len_fit == 1:# missing 1 line
y = 20
avaiable_fit = np.poly1d(fits[0])
x_avaiable = avaiable_fit(y)
# check where do line?
point_x = p.point_in_lane[0]
point_y = p.point_in_lane[1]
val = point_x - avaiable_fit(point_y)
# print(val)
if val > 0: # is right
x = x_avaiable + 150
else: # is left
x = x_avaiable - 150
return_value = errorAngle((x,y))
return return_value
return 0
def convert_to_original_size(x, y, ratio_w, ratio_h):
# convert results to original size
out_x = []
out_y = []
for i, j in zip(x,y):
out_x.append((np.array(i)/ratio_w).tolist())
out_y.append((np.array(j)/ratio_h).tolist())
return out_x, out_y
def get_closest_upper_point(x, y, point, n):
x = np.array(x)
y = np.array(y)
x = x[y<point[1]]
y = y[y<point[1]]
dis = (x - point[0])**2 + (y - point[1])**2
ind = np.argsort(dis, axis=0)
x = np.take_along_axis(x, ind, axis=0).tolist()
y = np.take_along_axis(y, ind, axis=0).tolist()
points = []
for i, j in zip(x[:n], y[:n]):
points.append((i,j))
return points
def sort_along_y(x, y):
out_x = []
out_y = []
for i, j in zip(x, y):
i = np.array(i)
j = np.array(j)
ind = np.argsort(j, axis=0)
out_x.append(np.take_along_axis(i, ind[::-1], axis=0).tolist())
out_y.append(np.take_along_axis(j, ind[::-1], axis=0).tolist())
return out_x, out_y
def sort_along_x(x, y):
temp = np.min(y)
try:
min_y = temp[0]
except:
min_y = temp
# print(min_y)
fits = np.array([np.polyfit(_y,_x, 2) for _x, _y in zip(x,y)])
# print(fits)
values_x = np.array([np.poly1d(fit)(min_y) for fit in fits ])
# print(values_x)
order = np.argsort(values_x)
print(order)
return np.array(x)[order], np.array(y)[order]
def sort_batch_along_y(target_lanes, target_h):
out_x = []
out_y = []
for x_batch, y_batch in zip(target_lanes, target_h):
temp_x = []
temp_y = []
for x, y, in zip(x_batch, y_batch):
ind = np.argsort(y, axis=0)
sorted_x = np.take_along_axis(x, ind[::-1], axis=0)
sorted_y = np.take_along_axis(y, ind[::-1], axis=0)
temp_x.append(sorted_x)
temp_y.append(sorted_y)
out_x.append(temp_x)
out_y.append(temp_y)
return out_x, out_y
def errorAngle(point):
carPosx , carPosy = 512//2, 254
dstx, dsty = point
if dstx == carPosx:
return 0
if dsty == carPosy:
if dstx < carPosx:
return -25
else:
return 25
pi = math.acos(-1.0)
dx = dstx - carPosx
dy = carPosy - dsty
if dx < 0:
angle = (math.atan(-dx / dy) * -180 / pi)/2.5
if angle >= 16 or angle <= -16: # maybe must turn 90
if angle > 0:
return 25
return -25
return angle
#################################################
angle = (math.atan(dx / dy) * 180 / pi)/2.5
if angle >= 16 or angle <= -16: # maybe must turn 90
if angle > 0:
return 25
return -25
return angle
def calcul_speed(steer_angle):
max_speed = 70
max_angle = 25
if steer_angle == -10 or steer_angle == 10:
return 0
if steer_angle >= 1 or steer_angle <= -1:
if steer_angle > 0:
return max_speed - (max_speed/max_angle)*steer_angle
else:
return max_speed + (max_speed/max_angle)*steer_angle
elif steer_angle >= 4 or steer_angle <= -4:
if steer_angle > 0:
return 40 - (40/max_angle)*steer_angle
else:
return 40 + (30/max_angle)*steer_angle
# elif steer_angle >= 10 or steer_angle <= -10:
# if steer_angle > 0:
# return max_speed - (max_speed/max_angle)*steer_angle
# else:
# return max_speed + (max_speed/max_angle)*steer_angle
# if steer_angle >=0:
# return max_speed - (max_speed/max_angle)*steer_angle
return max_speed
def clear_StatusObjs(StatusObjs):
list_result = []
for obj in StatusObjs:
if 'i5' in obj:
obj.remove('i5')
if 'pne' in obj:
obj.remove('pne')
if 'car' in obj:
obj.remove('car')
if 'w65' in obj:
obj.remove('w65')
list_result.append(obj)
return list_result
| StarcoderdataPython |
4956020 | import torch
import torch.nn as nn
class Locate_Entity(nn.Module):
"""
输入 sentence,预测 entity的首尾位置
"""
def __init__(self,
hidden_dim=256,
device='cpu'):
super(Locate_Entity, self).__init__()
self.predict_B = nn.Sequential(
nn.Linear(hidden_dim, 1)
)
self.predict_E = nn.Sequential(
nn.Linear(hidden_dim, 1)
)
self.device = device
def cal_loss(self,
s_B_idxs,
s_E_idxs,
s_B_labels,
s_E_labels,
mask_idx,
weight=4, ):
mask_idx = mask_idx.float()
# 计算subject_B的损失,提高正样本权重,去除mask部分
loss1 = nn.BCEWithLogitsLoss(reduce=False)(s_B_idxs, s_B_labels)
weight1 = torch.where(s_B_labels == 1, s_B_labels + weight - 1., s_B_labels + 1.)
loss1 = loss1 * weight1
loss1 = (loss1 * mask_idx).sum() / mask_idx.sum()
# 计算subject_E的损失,提高正样本权重,去除mask部分
loss2 = nn.BCEWithLogitsLoss(reduce=False)(s_E_idxs, s_E_labels)
weight2 = torch.where(s_E_labels == 1, s_E_labels + weight - 1., s_E_labels + 1.)
loss2 = loss2 * weight2
loss2 = (loss2 * mask_idx).sum() / mask_idx.sum()
return loss1 + loss2
def forward(self, sentence_features):
s_B_scores = self.predict_B(sentence_features)
s_E_scores = self.predict_E(sentence_features)
return s_B_scores.squeeze(-1), s_E_scores.squeeze(-1)
def slice_entity(batch_input, batch_slice):
"""
1.从batch_input做切片,取batch_slice作为索引
2.计算均值作为entity的语义
:param
batch_input: [batch_size, time_step, hidden_dim]
:param
batch_slice: [batch_size, 2]
:return:
batch_input = torch.Tensor([
[[1, 2], [2, 3], [3, 4]],
[[2, 3], [3, 4], [4, 5]],
[[3, 4], [4, 5], [5, 6]]
])
batch_slice = torch.LongTensor([
[0, 1],
[1, 2],
[0, 2]
])
return = torch.Tensor([
[1.5000, 2.5000],
[3.5000, 4.5000],
[4.0000, 5.0000]])
"""
shape_input = batch_input.size()
batch_slice = batch_slice.long().unsqueeze(2).repeat(1, 1, shape_input[2])
entity_slice = torch.gather(batch_input, 1, batch_slice)
entity_slice = entity_slice.mean(dim=1)
return entity_slice
| StarcoderdataPython |
9767789 | <gh_stars>0
from utils import *
def task1_1(inputpath, groundTruthPath, dataset, ret=False):
window_size = np.array([50]) # np.array([10, 20, 30, 40, 50])
look_area = np.array([2, 4]) # np.array([10, 20, 30, 40, 50])
step = 10
images = read_of_images(inputpath)
groundtruth = readOF(groundTruthPath)
all_msen = np.zeros((len(window_size), len(look_area), len(images) - 1))
all_pepn = np.zeros((len(window_size), len(look_area), len(images) - 1))
vector_field_ret = []
for wi in range(0, window_size.shape[0]):
for li in range(0, look_area.shape[0]):
for i in range(1, len(images)):
print 'Getting Optical Flow'
vector_field = block_matching(images[i-1], images[i], window_size[wi], look_area[li], step)
vec_field_mat = np.ones((vector_field[0].shape[0], vector_field[0].shape[1], 3))
vec_field_mat[:, :, 1] = vector_field[0]
vec_field_mat[:, :, 2] = vector_field[1]
gt = groundtruth[i-1][range(0, int((images[i-1].shape[0] - window_size[wi]) / step) * step + step, step), :, :] \
[:, range(0, int((images[i-1].shape[1] - window_size[wi]) / step) * step + step, step), :]
print 'Gettin measures'
msen, pepn, _ = opticalFlowMetrics(vec_field_mat, gt, normInp=False)
all_msen[wi, li, i - 1] = msen
all_pepn[wi, li, i - 1] = pepn
for i in range(0, len(window_size)):
plt.plot(look_area, all_msen[i, :, :], label='Win Size ' + str(window_size[i]))
plt.xlabel('Area of search')
plt.ylabel('MSEN%')
plt.legend()
plt.title('Seq 157')
plt.show()
plt.close()
for i in range(0, len(window_size)):
plt.plot(look_area, all_pepn[i, :, :], label='Win Size ' + str(window_size[i]))
plt.xlabel('Area of search')
plt.ylabel('PEPN%')
plt.legend()
plt.title('Seq 157')
plt.show()
plt.close()
| StarcoderdataPython |
9721665 | import sys
import math
sys.path.append("../airconicsv021")
import primitives, airconics_setup
import rhinoscriptsyntax as rs
class CurveSplit:
def __init__(self,_leadingEdgeCurve,_trailingEdgeCurve,_numberOfDivisions):
self.leadingEdgeCurve = _leadingEdgeCurve
self.trailingEdgeCurve = _trailingEdgeCurve
self.numberOfDivisions = _numberOfDivisions
def GetPoints(self,curve):
curveDomain = rs.CurveDomain(curve)
# Get the minimim point
minDomainValue = curveDomain[0]
minPoint = rs.EvaluateCurve(curve,minDomainValue)
# get the maximum point
maxDomainValue = curveDomain[1]
maxPoint = rs.EvaluateCurve(curve,maxDomainValue)
yLength = abs(minPoint[1] - maxPoint[1])
points = []
for i in range (0,self.numberOfDivisions+1):
print minPoint
startingPoint = (minPoint[0],(yLength/self.numberOfDivisions)*i,minPoint[2])
print startingPoint
endPoint = (100,startingPoint[1],startingPoint[2])
tmpLine = rs.AddLine(startingPoint,endPoint)
# get the point where the new line and the traling edge curve is
intersection_list = rs.CurveCurveIntersection(curve, tmpLine)
if intersection_list is None:
print "Selected curves do not intersect."
return
for intersection in intersection_list:
if intersection[0] == 1:
print intersection[1]
#rs.AddPoint(intersection[1])
points.append(intersection[1])
rs.DeleteObject(tmpLine)
return points
def dihedralFunction(self,Epsilon):
# This funtion sets the dihedral of the foil.
# This allows for one single bend which starts at Transition1 and finishes at Transiiton2.
# With the dihedral angles set as D1 and D2
BaseDihedral = 0
Dihedral = -0
TransitionStart = 0.5
TransitionEnd = 0.55
if Epsilon < TransitionStart:
return BaseDihedral
elif Epsilon > TransitionEnd:
return Dihedral
else:
return BaseDihedral + ((Epsilon - TransitionStart)/(TransitionEnd - TransitionStart))*(Dihedral-BaseDihedral)
def washoutFunction(self,Epsilon):
# This function lets you set a washout value for the foil.
BaseTwist = 0
Washout = 0
WashoutStart = 0.70
if Epsilon < WashoutStart:
return BaseTwist
else:
return BaseTwist + ((Epsilon - WashoutStart)/(1 - WashoutStart))*(Washout-BaseTwist)
def generateLeadingEdgeWithDihedral(self,originalLeadingEdgePoints, numberOfSegments):
# Due to the fixed rotation that we have to draw in we can get the
# length by subtracting coordinate values at each tip.
# A more generic solution would be to get all 3 axies and use the max length
# insted of hardcoding to the Y axis
n = len(originalLeadingEdgePoints)-1
YLength = originalLeadingEdgePoints[n][1] - originalLeadingEdgePoints[0][1]
SegmentLength = YLength/numberOfSegments
# Start the leading edge at the origin
XLE = [0.0]
YLE = [0.0]
ZLE = [0.0]
LEPoints = []
list.append(LEPoints,([0, 0, 0]))
previousDepth = 0.0
pointZ = 0.0
for i in range(1,numberOfSegments+1):
TiltAngle = self.dihedralFunction(((i-1)/float(numberOfSegments)+i/float(numberOfSegments))/2)
DeltaY = (originalLeadingEdgePoints[i][1] - originalLeadingEdgePoints[i-1][1])*math.cos(TiltAngle*math.pi/180.0)
DeltaZ = (originalLeadingEdgePoints[i][1] - originalLeadingEdgePoints[i-1][1])*math.tan(TiltAngle*math.pi/180.0)
pointX = originalLeadingEdgePoints[i-1][0]
pointY = originalLeadingEdgePoints[i-1][1]
pointZ = originalLeadingEdgePoints[i-1][2] + DeltaZ + previousDepth
previousDepth = pointZ
list.append(LEPoints,(pointX, pointY, pointZ))
print LEPoints
return LEPoints
def _GenerateLeadingEdge(self):
# Epsilon coordinate attached to leading edge defines sweep
# Returns airfoil leading edge points
# Start the leading edge at the origin
XLE = [0.0]
YLE = [0.0]
ZLE = [0.0]
SegmentLength = 1.0/self.SegmentNo
LEPoints = []
list.append(LEPoints,rs.AddPoint(XLE[0], YLE[0], ZLE[0]))
for i in range(1,self.SegmentNo+1):
# We are essentially reconstructing a curve from known slopes at
# known curve length stations - a sort of Hermite interpolation without
# knowing the ordinate values. If SegmentNo -> Inf, the actual slope
# at each point -> the sweep angle specified by SweepFunct
TiltAngle = self.DihedralFunct(((i-1)/float(self.SegmentNo)+i/float(self.SegmentNo))/2)
SweepAngle = self.SweepFunct(((i-1)/float(self.SegmentNo)+i/float(self.SegmentNo))/2)
DeltaX = SegmentLength*math.sin(SweepAngle*math.pi/180.0)
DeltaY = SegmentLength*math.cos(TiltAngle*math.pi/180.0)*math.cos(SweepAngle*math.pi/180.0)
DeltaZ = DeltaY*math.tan(TiltAngle*math.pi/180.0)
list.append(XLE, XLE[i-1] + DeltaX)
list.append(YLE, YLE[i-1] + DeltaY)
list.append(ZLE, ZLE[i-1] + DeltaZ)
list.append(LEPoints,rs.AddPoint(XLE[i], YLE[i], ZLE[i]))
return LEPoints
def GetPointsOnSecondCurve(self,trailingEdgeCurve, pointsOnFirstCurve):
TrailingEdgePoints = []
for i in range (0,len(pointsOnFirstCurve)):
# draw a line from the point on the first curve
startingPoint = pointsOnFirstCurve[i]
endPoint = (100,startingPoint[1],startingPoint[2])
tmpLine = rs.AddLine(startingPoint,endPoint)
# get the point where the new line and the traling edge curve is
intersection_list = rs.CurveCurveIntersection(trailingEdgeCurve, tmpLine)
if intersection_list is None:
print "Selected curves do not intersect."
return
for intersection in intersection_list:
if intersection[0] == 1:
print intersection[1]
#rs.AddPoint(intersection[1])
TrailingEdgePoints.append(intersection[1])
rs.DeleteObject(tmpLine)
return TrailingEdgePoints
def AerofoilAtPoint(self,numberOfSegments, epsilon,leadingEdgePoint,chordLength):
# Create a 3d point
#LEPoint = (leadingEdgePoint[0],leadingEdgePoint[1],leadingEdgePoint[2])
# Determine the twist for any washout
Twist = self.washoutFunction(epsilon)
### Determine the dihedral at the given section
# Start with the dihedral angle of the foil
Angle = self.dihedralFunction(epsilon)
# Create a 3d point
#LEPoint = [0,0,0]
LEPoint = (leadingEdgePoint[0],leadingEdgePoint[1],leadingEdgePoint[2])
# Instantiate class to set up a generic airfoil with these basic parameters
Af = primitives.Airfoil(LEPoint,chordLength, Angle , Twist , airconics_setup.SeligPath)
# Name of the file containing the airfoil coordinates + smoothing
AirfoilSeligName = 'dae11'
SmoothingPasses = 1
# Add airfoil curve to document, and retrieve handles to it and its chord
#AfCurve,Chrd = primitives.Airfoil.AddAirfoilFromSeligFile(Af, AirfoilSeligName, SmoothingPasses)
return primitives.Airfoil.AddAirfoilFromSeligFile(Af, AirfoilSeligName, SmoothingPasses)
NumberOfSegments = 20
leCurve = rs.GetObject("Select the leading edge curve")
if leCurve is not None:
teCurve = rs.GetObject("Select the trailing edge curve")
if teCurve is not None:
if rs.IsCurve(leCurve):
obj = CurveSplit(leCurve,teCurve,NumberOfSegments)
lePoints = obj.GetPoints(leCurve)
tePoints = obj.GetPointsOnSecondCurve(teCurve,lePoints)
# Adjust the leading edge point locaiton based on the desired dihedral
newLEPoints = obj.generateLeadingEdgeWithDihedral(lePoints, NumberOfSegments)
Sections = []
for i in range (0,len(lePoints)):
epsilon = i/len(lePoints)
print "Epsilon %.2f" % (epsilon)
# Determine the length of the chord to generate
chordLength = abs(lePoints[i][0] - tePoints[i][0])
Airfoil, ChordLine = obj.AerofoilAtPoint(NumberOfSegments, epsilon, lePoints[i],chordLength)
#Airfoil, ChordLine = obj.AerofoilAtPoint(NumberOfSegments, epsilon, newLEPoints[i],chordLength)
rs.DeleteObjects(ChordLine)
list.append(Sections,Airfoil)
#lss = rs.AddLoftSrf(Sections,)
LS = rs.AddLoftSrf(Sections)
rs.DeleteObjects(Sections)
if LS==None:
# Failed to fit loft surface. Try another fitting algorithm
print "loft failed"
| StarcoderdataPython |
4841132 | import os
import sys
import typing
from subprocess import Popen, PIPE
class Config:
_instance = None
_first_run: bool = False
def __new__(cls, *args, **kwargs):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._first_run = True
else:
cls._first_run = False
return cls._instance
def __init__(self, app_cfg: typing.Tuple[dict, None] = None):
if not self._first_run:
return
self.CONTAINER_NAME = os.getenv('CONTAINER_NAME', None)
self.CONTAINER_ID = self._get_container_id()
self.PID = str(os.getpid())
self.FFMPEG_PID = ''
self.WORKDIR = os.getenv('WORKDIR', '/tmp/ff_wrapper')
self.PROGRESS_FIFO_PATH = os.path.join(self.WORKDIR, 'pipes/')
self.LOGS_PATH_BASE = os.getenv('LOGS_PATH', '/var/log/ffmpeg/')
if self.CONTAINER_NAME:
self.LOGS_PATH = os.path.join(self.LOGS_PATH_BASE, 'ff_wrapper_' + str(self.CONTAINER_NAME))
elif self.CONTAINER_ID:
self.LOGS_PATH = os.path.join(self.LOGS_PATH_BASE, 'ff_wrapper_' + str(self.CONTAINER_ID[:13]))
else:
self.LOGS_PATH = os.path.join(self.LOGS_PATH_BASE, 'ff_wrapper_' + str(self.PID))
self.STATUS_PATH = os.path.join(self.WORKDIR, 'status/')
# 100к строк ~= 14 часам логов и 120мб ram
self.PROGRESS_BUFFER_LEN = self._get_int_env('PROGRESS_BUFFER_LEN', 100000)
self.STDOUT_BUFFER_LEN = self._get_int_env('STDOUT_BUFFER_LEN', 100000)
self.NO_FILE_LOG = os.getenv('NO_FILE_LOG', False)
self.LOG_ROTATION_MODE = os.getenv('LOG_ROTATION_MODE', 'days') # days or size
self.LOG_ROTATION_DAYS = self._get_int_env('LOG_ROTATION_DAYS', 1)
self.LOG_ROTATION_MAX_KBYTES = self._get_int_env('LOG_ROTATION_MAX_KBYTES', 25000) # in kbytes
self.LOG_ROTATION_BACKUP = self._get_int_env('LOG_ROTATION_BACKUP', 3)
self.IS_DEBUG = os.getenv('IS_DEBUG', False)
# seconds, задержка перед стартом менеджера проверок
self.MANAGER_START_DELAY = self._get_int_env('MANAGER_START_DELAY', 5)
# seconds, задержка перед стартом проверки кодирования
self.ENCODING_CHECK_START_DELAY = self._get_int_env('ENCODING_CHECK_START_DELAY', 55)
self.ENCODING_DISABLE_CHECK = os.getenv('ENCODING_DISABLE_CHECK', False)
# Значение, ниже которого кодирование будет считаться ошибочным
self.ENCODING_MIN_SPEED = self._get_float_env('ENCODING_MIN_SPEED', 0.80)
# Если базовая скорость ниже, чем минимально возможная скорость - минимально возможная скорость становится равной
# ENCODING_MIN_SPEED = <базовая скорость> - ENCODING_DELTA_SPEED
self.ENCODING_DELTA_SPEED = self._get_float_env('ENCODING_DELTA_SPEED', 0.20)
# Значение, которое вычитается из текущего fps и если результат ниже - кодирование остановится
self.ENCODING_DELTA_FPS = self._get_int_env('ENCODING_DELTA_FPS', 10)
# Если базовый фпс (без дельты) ниже, чем это значение - стрим считается сбойным
self.ENCODING_MIN_BASE_FPS = self._get_float_env('ENCODING_MIN_BASE_FPS', 14)
# Сколько секунд может продолжаться ошибка до остановки стрима менеджером
self.ENCODING_MAX_ERROR_TIME = self._get_int_env('ENCODING_MAX_ERROR_TIME', 10)
# Сколько секунд может не обновляться stdout
self.ENCODING_MAX_STDOUT_STUCK_TIME = self._get_int_env('ENCODING_MAX_STDOUT_STUCK_TIME', 15)
self.create_dirs()
self.exit_if_already_running()
self.save_status_to_files()
def _get_int_env(self, env_name: str, default) -> int:
try:
env_var = os.getenv(env_name, default)
return int(env_var)
except ValueError:
print("Error. {} env parameter must be int ({})".format(env_name, env_var))
os._exit(1)
def _get_float_env(self, env_name: str, default) -> float:
try:
env_var = os.getenv(env_name, default)
return float(env_var)
except ValueError:
print("Error. {} env parameter must be float ({})".format(env_name, env_var))
os._exit(1)
def _get_container_id(self) -> str:
id = ''
with open('/proc/1/cpuset', 'r') as f:
id = f.read()
if id.startswith('/docker'):
id = id.split('/')[-1]
else:
id = ''
return id
def _get_pids(self) -> list:
pids = []
process = Popen(['ps', '-eo', 'pid'], stdout=PIPE, stderr=PIPE)
stdout, _ = process.communicate()
for line in stdout.splitlines():
line = line.decode('utf-8')
pids.append(line.strip())
return pids
def exit_if_already_running(self):
if self.PID == '1':
return
pid_path = os.path.join(self.STATUS_PATH, 'PID')
if not os.path.exists:
return
try:
with open(pid_path, 'r') as f:
already_running_pid = f.read()
pids = self._get_pids()
if self.PID != already_running_pid and already_running_pid in pids:
print('WORKDIR {} is busy by process with pid {}'.format(self.WORKDIR, already_running_pid))
sys.exit(1)
except OSError:
print("PID check. Can't open file {}, creating new".format(pid_path))
def create_dirs(self):
if not os.path.exists(self.PROGRESS_FIFO_PATH):
try:
os.makedirs(self.PROGRESS_FIFO_PATH)
except Exception as e:
print("Error while init app. Can't create pipes dir: {}".format(self.PROGRESS_FIFO_PATH))
raise e
if not os.path.exists(self.STATUS_PATH):
try:
os.makedirs(self.STATUS_PATH)
except Exception as e:
print("Error while init app. Can't create status dir: {}".format(self.STATUS_PATH))
raise e
if not self.NO_FILE_LOG and not os.path.exists(self.LOGS_PATH):
try:
os.makedirs(self.LOGS_PATH)
except Exception as e:
print("Error while init app. Can't create log dir: {}".format(self.LOGS_PATH))
raise e
def save_status_to_files(self):
for k, v in vars(self).items():
if not k.startswith('_'):
with open(os.path.join(self.STATUS_PATH, k), 'w') as f:
f.write(str(v))
| StarcoderdataPython |
6528869 | # -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
Provides constants for locating |Engine| subclasses.
[`source`_]
========================= ================================================
Constant Description
========================= ================================================
:const:`~ENGINES_PACKAGE` The name of this package.
:const:`~ENGINE_MODULE` Name for modules containing |Engine| subclasses.
:const:`~CURRENT_PATH` Path containing `engines` subpackages.
:const:`~BACKEND_CHOICES` Choices for data stores (e.g., Elasticsearch).
========================= ================================================
.. _source: ../_modules/engines/registry.html
"""
# standard library
import os
# local
from utils.choices import choices
def _get_this_package():
"""Get the name of this package.
Returns the name of the package in which this module resides.
"""
current_path = os.path.dirname(__file__)
return os.path.basename(current_path)
ENGINES_PACKAGE = _get_this_package()
"""|str|
The name of this package, which contains subpackages for specific
backends.
"""
ENGINE_MODULE = 'engine'
"""|str|
Standard name for a module in an `engines` subpackage that contains an
|Engine| sublclass used to interact with a data store.
"""
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
"""|str|
Path containing `engines` subpackages for particular backends.
"""
BACKEND_CHOICES = choices.get_package_choices(CURRENT_PATH)
"""|tuple| of |tuple|
Backend choices, based on `engines` subpackages. Each item provides a
(value, label) choice for a package, which can be used in a `choices`
argument for a CharField in a Django Model, e.g.::
(
('elasticsearch', 'elasticsearch'),
('mongodb', 'mongodb'),
)
"""
| StarcoderdataPython |
5158144 | import pandas as pd
import requests
import json
import csv
import os
def get_census_data_from_api(base_url, fips_df, op_file):
count = 0
with open(
f'/Users/salma/Studies/Research/Criminal_Justice/research_projects/main_census_merge/data/census_cities_1990/{op_file}.csv',
'w') as file_wrtr:
# take a csv writer object to write to this output file
op_file_writer = csv.writer(file_wrtr)
for row in fips_df.itertuples():
try:
print('count: ', count)
st_fips = str(getattr(row, "state"))
cnty_fips = str(getattr(row, "county"))
if st_fips.__len__() < 2:
st_fips = '0' + st_fips
if cnty_fips.__len__() < 3:
cnty_fips = ('0' * (3 - cnty_fips.__len__())) + cnty_fips
response = requests.get(
f'{base_url}?get=NAME,P012001,P012A001,P012B001,P012H001,P012A006,P012A007,P012A008,P012A009,P012A010,P012A002,P012A030,P012A031,P012A032,P012A033,P012A034,P012A026,P012B006,P012B007,P012B008,P012B009,P012B010,P012B002,P012B030,P012B031,P012B032,P012B033,P012B034,P012B026,P012H002,P012H006,P012H007,P012H008,P012H009,P012H010,P012H030,P012H031,P012H032,P012H033,P012H034,P012H026&for=county%20subdivision:*&in=state:{st_fips}%20county:{cnty_fips}&key=<KEY>')
resp = json.loads(response.content)
if count == 0:
# iterate over respone python object and write each row to the csv
for res in resp:
op_file_writer.writerow(res)
else:
# skipping 1st row which are the variable names starting from 2nd calls.
for res in resp[1:]:
op_file_writer.writerow(res)
count += 1
except Exception as ex:
print('Error code: ', response.status_code)
print('Error Response: ', response.content)
print('Exception: ', ex)
print('Total API Calls: ', count)
break
# Get 1990 state and county fips
st_cnty_fips_90 = pd.read_csv('/Users/salma/Studies/Research/Criminal_Justice/research_projects/main_census_merge/data/wip_merge_files/st_cnty_fips_1990.csv')
get_census_data_from_api('https://api.census.gov/data/2000/sf1', st_cnty_fips_90, 'new_census_townships_00_initial') | StarcoderdataPython |
203660 | <gh_stars>0
from libpythonpro_github.spam.enviador_de_email import Enviador
from libpythonpro_github.spam.main import EnviadorDeSpam
def test_envio_de_spam(sessao):
enviador_de_spam = EnviadorDeSpam(sessao, Enviador())
enviador_de_spam.envia_emails('<EMAIL>', 'Curso Python Pro', 'Testando essa parte do codigo')
| StarcoderdataPython |
1710856 | <gh_stars>0
# -*- coding: utf-8 -*-
from datetime import datetime
from functools import wraps
FORMAT_EXT = {
"GPKG": '.gpkg',
"KML": '.kml',
"GeoJSON": '.json',
"GML": '.gml',
"GPX": '.gpx',
"GPSTrackMaker": ".gmt",
"ESRI Shapefile": ".shp"
}
class FormatException(Exception):
pass
def ensure_supported_format(func):
def wrap(*args, **kwargs):
format = kwargs.get('target_format', 'GPKG')
if format in FORMAT_EXT.keys():
return func(*args, **kwargs)
else:
raise FormatException("Unsupported Format")
return wrap
def time_it(function):
@wraps(function)
def wrap(request, *args, **kwargs):
start = datetime.now()
result = function(request, *args, **kwargs)
end = datetime.now()
print("{} took ------>{} seconds".format(
function.func.__name__, (end - start).total_seconds()))
print("{} took ------>{} milliseconds".format(
function.func.__name__, (end - start).total_seconds() * 1000))
return result
return wrap
| StarcoderdataPython |
3590772 | <filename>homework/1.py
# -*- coding: utf-8 -*-
# 抽样函数及其傅里叶变换
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib
import mpl_toolkits.axisartist as axisartist
font = {'family':'SimHei'}
matplotlib.rc('font',**font)
matplotlib.rcParams['axes.unicode_minus']=False
fig = plt.figure()
# 第一个图
plt.subplot(1,2,1)# 画布分为一列两行,取第一个
matplotlib.rcParams['axes.unicode_minus']=False
fig = plt.figure()
# 第一个图
plt.subplot(1,2,1)
fig = plt.figure(figsize=(6,6))
ax = axisartist.Subplot(fig, 111)
fig.add_axes(ax)
ax.axis[:].set_visible(False)
ax.axis["x"] = ax.new_floating_axis(0,0)
ax.axis["x"].set_axisline_style("->", size = 1.0)
ax.axis["y"] = ax.new_floating_axis(1,0)
ax.axis["y"].set_axisline_style("-|>", size = 1.0)
ax.axis["x"].set_axis_direction("top")
ax.axis["y"].set_axis_direction("right")
t = np.arange(-16,16,0.1)
y = np.sin(t)/t
plt.title(r'$Sa(t)=\frac{\sin(t)}{t}$',fontsize=14)
plt.text(0,-0.06,'O',fontdict={'size':16})
plt.text(16.5,-0.06,'t',fontdict={'size':14})
plt.xticks([])
plt.yticks([])
plt.plot(t,y,'b')
plt.show()
fig = plt.figure(figsize=(6,6))
ax = axisartist.Subplot(fig, 111)
fig.add_axes(ax)
ax.axis[:].set_visible(False)
ax.axis["x"] = ax.new_floating_axis(0,0)
ax.axis["x"].set_axisline_style("->", size = 1.0)
ax.axis["y"] = ax.new_floating_axis(1,0)
ax.axis["y"].set_axisline_style("-|>", size = 1.0)
ax.axis["x"].set_axis_direction("top")
ax.axis["y"].set_axis_direction("right")
t = np.arange(-11,11,0.1)
y = np.sin(t)/t
plt.title(r'$Sa(t)=\frac{\sin(t)}{t}$',fontsize=14)
plt.text(0,-0.06,'O',fontdict={'size':16})
plt.text(11,-2,r'$\omega_0$',fontdict={'size':14})
plt.plot(t,np.fft.fft(y),'b')# 傅里叶变换
plt.show()
| StarcoderdataPython |
120935 | import tensorflow
from PIL import Image
import numpy
import argparse
import os
import sys
import pandas
from ModelsEnum import Models
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), '..')))
def predict(model, imgPath, imgHeight, imgWidth):
# 如果 imgPath 是数组
if isinstance(imgPath, list):
return [predict(model, img, imgHeight, imgWidth) for img in imgPath]
img = numpy.array(
Image.open(imgPath)
.convert('RGB')
.resize((imgHeight, imgWidth))
)
img = tensorflow.expand_dims(img, 0)
return tensorflow.nn.softmax(model.predict(img))
def main():
models = [Models.Model2Classi.value, Models.ModelMulClassi.value]
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="Input image",
required=True, type=str, nargs='+')
parser.add_argument("-m", "--model", help="Model to use", required=False,
default='2classi', type=str, choices=models)
args = parser.parse_args()
import globalConfig.config as config
from models.model2Classi.modelDefinition import model as model2Classi
from models.modelMulClassi.modelDefinition import model as modelMulClassi
if args.model == Models.Model2Classi.value:
model = model2Classi
classNames = config.model2Classi.classNames
model.load_weights(config.model2Classi.savedPath)
imgHeight = config.model2Classi.imgHeight
imgWidth = config.model2Classi.imgWidth
elif args.model == Models.ModelMulClassi.value:
model = modelMulClassi
classNames = config.modelMulClassi.classNames
modelMulClassi.load_weights(config.modelMulClassi.savedPath)
imgHeight = config.modelMulClassi.imgHeight
imgWidth = config.modelMulClassi.imgWidth
else:
raise ValueError('Model not found')
# 检查输入的图片是否是存在
for img in args.input:
if not os.path.exists(img):
print(f"Error: Input img {img} not exists")
exit(1)
# 检查输入的图片是否是文件
for img in args.input:
if not os.path.isfile(img):
print(f"Error: Input img {img} is not a file")
exit(1)
scores = predict(model, args.input, imgHeight, imgWidth)
print()
for i, score in enumerate(scores):
print(f'{args.input[i]} probability: {score[0]}')
print(pandas.Series({classification: f'{str(float(score[0][i])*100)} %' for i, classification in enumerate(
classNames)}))
print(f'result: {classNames[numpy.argmax(score)]}')
print()
main()
| StarcoderdataPython |
9772319 | <reponame>slmjy/oci-ansible-modules
#!/usr/bin/python
# Copyright (c) 2018, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["deprecated"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_swift_password
short_description: Create, update and delete Swift (OpenStack Object Store Service) passwords in OCI
description:
- This module allows the user to create, update and delete Swift passwords in OCI. This module is deprecated.
Please use M(oci_auth_token) instead. This module may be removed in a future release. Swift is the OpenStack
object storage service. A SwiftPassword is an Oracle-provided password for using a Swift client with the Oracle
Cloud Infrastructure Object Storage Service. This password is associated with the user's Console login. Swift
passwords never expire. A user can have up to two Swift passwords at a time. Note - The password is always an
Oracle-generated string; you can't change it to a string of your choice.
version_added: "2.5"
options:
user_id:
description: The OCID of the user.
required: true
swift_password_id:
description: The OCID of the swift password. Required when the password's description needs to be updated
with I(state=present) and for deleting a swift password with I(state=absent)
required: false
aliases: ['id']
description:
description: The description you assign to the Swift password during creation. Does not have to be unique, and
it's changeable. Required when creating a swift password. The length of the description must be
between 1 and 400 characters.
required: false
state:
description: The state of the swift password that must be asserted to. When I(state=present), and the
swift password doesn't exist, the swift password is created. When I(state=absent),
the swift password is deleted.
required: false
default: "present"
choices: ['present', 'absent']
author: "<NAME> (@sivakumart)"
extends_documentation_fragment: [ oracle, oracle_creatable_resource ]
"""
EXAMPLES = """
- name: Create a new swift password
oci_swift_password:
user_id: "ocid1.user.oc1..xxxxxEXAMPLExxxxx"
description: "my first swift password"
- name: Update a swift password's description
oci_swift_password:
id: "ocid1.credential.oc1..xxxxxEXAMPLExxxxx"
user_id: "ocid1.user.oc1..xxxxxEXAMPLExxxxx"
description: "swift password #1"
- name: Delete a swift password
oci_swift_password:
id: "ocid1.credential.oc1..xxxxxEXAMPLExxxxx"
user_id: "ocid1.user.oc1..xxxxxEXAMPLExxxxx"
state: "absent"
"""
RETURN = """
oci_swift_password:
description: Details of the Swift password
returned: On success. The password is only returned only during creation.
type: dict
sample: {
"description": "My first swift password description",
"expires_on": null,
"id": "ocid1.credential.oc1..xxxxxEXAMPLExxxxx",
"inactive_status": null,
"lifecycle_state": "ACTIVE",
"password": <PASSWORD>",
"time_created": "2018-01-03T12:47:25.759000+00:00",
"user_id": "ocid1.user.oc1..xxxxxEXAMPLExxxxx"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils
try:
import oci
from oci.identity.identity_client import IdentityClient
from oci.identity.models import (
CreateSwiftPasswordDetails,
UpdateSwiftPasswordDetails,
)
from oci.util import to_dict
from oci.exceptions import ServiceError, MaximumWaitTimeExceeded
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
logger = None
RESOURCE_NAME = "swift_password"
def set_logger(provided_logger):
global logger
logger = provided_logger
def get_logger():
return logger
def _get_swift_password_from_id(identity_client, user_id, id, module):
try:
resp = oci_utils.call_with_backoff(
identity_client.list_swift_passwords, user_id=user_id
)
if resp is not None:
for sw_pass in resp.data:
if sw_pass.id == id:
return sw_pass
return None
except ServiceError as ex:
module.fail_json(msg=ex.message)
def delete_swift_password(identity_client, user_id, id, module):
result = {}
changed = False
try:
sw_pass = _get_swift_password_from_id(identity_client, user_id, id, module)
oci_utils.call_with_backoff(
identity_client.delete_swift_password, user_id=user_id, swift_password_id=id
)
get_logger().info("Deleted swift password %s", id)
changed = True
# XXX: The Swift password is not returned by list swift passwords after it
# is deleted, and so we currently reuse the earlier swift password object and mark
# its lifecycle state as DELETED.
# Note: This current approach has problems around idempotency.
# We also don't wait, as there is no state transition that we need to wait for.
sw_pass.lifecycle_state = "DELETED"
result[RESOURCE_NAME] = to_dict(sw_pass)
except ServiceError as ex:
module.fail_json(msg=ex.message)
except MaximumWaitTimeExceeded as mwte:
module.fail_json(msg=str(mwte))
result["changed"] = changed
return result
def update_swift_password(identity_client, user_id, id, description, module):
result = dict()
changed = False
try:
uspd = UpdateSwiftPasswordDetails()
uspd.description = description
get_logger().debug(
"Swift Password %s - updating with new description: %s", id, description
)
response = oci_utils.call_with_backoff(
identity_client.update_swift_password,
user_id=user_id,
swift_password_id=id,
update_swift_password_details=uspd,
)
get_logger().info("Updated Swift Password %s", to_dict(response.data))
result[RESOURCE_NAME] = to_dict(response.data)
changed = True
except ServiceError as ex:
module.fail_json(msg=ex.message)
result["changed"] = changed
return result
def _is_swift_password_active(swift_passwords, sw_pass_id):
result = [
sw_pass
for sw_pass in swift_passwords
if sw_pass.id == sw_pass_id and sw_pass.lifecycle_state == "ACTIVE"
]
return len(result) == 1
def create_swift_password(identity_client, user_id, description, module):
result = {}
try:
cspd = CreateSwiftPasswordDetails()
cspd.description = description
result = oci_utils.create_resource(
resource_type=RESOURCE_NAME,
create_fn=identity_client.create_swift_password,
kwargs_create={"user_id": user_id, "create_swift_password_details": cspd},
module=module,
)
resource = result[RESOURCE_NAME]
sw_pass_id = resource["id"]
# cache the swift password's password as it is only provided during creation
cached_pass = resource["password"]
get_logger().info("Created Swift Password %s", to_dict(resource))
response = identity_client.list_swift_passwords(user_id)
# wait until the created Swift password reaches Active state
oci.wait_until(
identity_client,
response,
evaluate_response=lambda resp: _is_swift_password_active(
resp.data, sw_pass_id
),
)
sw_pass = _get_swift_password_from_id(
identity_client, user_id, sw_pass_id, module
)
# stuff the cached password in the returned swift_password model
sw_pass.password = <PASSWORD>
result[RESOURCE_NAME] = to_dict(sw_pass)
return result
except ServiceError as ex:
module.fail_json(msg=ex.message)
except MaximumWaitTimeExceeded as mwte:
module.fail_json(msg=str(mwte))
def main():
set_logger(oci_utils.get_logger("oci_swift_password"))
module_args = oci_utils.get_common_arg_spec(supports_create=True)
module_args.update(
dict(
user_id=dict(type="str", required=True),
swift_password_id=dict(
type="str", required=False, aliases=["id"], no_log=True
),
description=dict(type="str", required=False),
state=dict(
type="str",
required=False,
default="present",
choices=["present", "absent"],
),
)
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False,
required_if=[("state", "absent", ["swift_password_id"])],
)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
identity_client = oci_utils.create_service_client(module, IdentityClient)
state = module.params["state"]
result = dict(changed=False)
user_id = module.params.get("user_id", None)
id = module.params.get("swift_password_id", None)
description = module.params.get("description", None)
get_logger().debug("Id is " + str(id))
if id is not None:
sw_pass = _get_swift_password_from_id(identity_client, user_id, id, module)
if state == "absent":
get_logger().debug(
"Delete swift password %s for user %s requested", id, user_id
)
if sw_pass:
get_logger().debug("Deleting %s", sw_pass.id)
result = delete_swift_password(identity_client, user_id, id, module)
else:
get_logger().debug("Swift Password %s already deleted.", id)
elif state == "present":
if sw_pass.description != description:
result = update_swift_password(
identity_client, user_id, id, description, module
)
else:
# No change needed, return existing swift password details
result[RESOURCE_NAME] = to_dict(sw_pass)
else:
# Check and create swift password if necessary
result = oci_utils.check_and_create_resource(
resource_type=RESOURCE_NAME,
create_fn=create_swift_password,
kwargs_create={
"identity_client": identity_client,
"user_id": user_id,
"description": description,
"module": module,
},
list_fn=identity_client.list_swift_passwords,
kwargs_list={"user_id": user_id},
module=module,
model=CreateSwiftPasswordDetails(),
)
module.exit_json(**result)
if __name__ == "__main__":
main()
| StarcoderdataPython |
224502 | # bidi multimap
from collections import defaultdict
class MMap:
"multimap"
def __init__(self):
self._m = defaultdict(set)
self._pair_cnt = 0
def __contains__(self, k):
"if a key in mmap"
return k in self._m
def __getitem__(self, k):
if k not in self._m:
raise KeyError(f"unknown key {k}")
return frozenset(self._m[k])
def __len__(self):
return len(self._m)
def pair_count(self):
return self._pair_cnt
def keys(self):
return self._m.keys()
def values(self):
return self._m.values()
def items(self):
return self._m.items()
def iter_all_pairs(self):
for k, s in self._m.items():
for v in s:
yield k, v
def clear(self):
self._m.clear()
self._pair_cnt = 0
def has_pair(self, k, v):
s = self._m.get(k, None)
if not s: return False
return v in s
def add_pair(self, k, v):
"add a pair k,v to mmap"
self._m[k].add(v)
self._pair_cnt += 1
def pop_pair(self, k, v):
"if k,v pair not present, raise KeyError"
if k not in self._m:
raise KeyError(f"unknown key {k}")
if v not in self._m[k]:
raise KeyError(f"unknown value {v}")
self._m[k].remove(v)
self._pair_cnt -= 1
def pop_all(self, k, d=None):
"""remove all pairs under given key, return the value
if k not found, return d or KeyError if d is None
"""
if k not in self._m:
if d is None:
raise KeyError(f"unknown key {k}")
else:
return d
s = self._m.pop(k)
self._pair_cnt -= len(s)
return s
class BiMMap:
"""a bidirectional multimap"""
DEF_NONE = "BIMAP__NONE"
def __init__(self):
self._m = MMap()
self._im = MMap()
def pair_count(self):
return self._m.pair_count()
def keys(self):
return self._m.keys()
def rkeys(self):
return self._im.keys()
def values(self):
return self._m.values()
def rvalues(self):
return self._im.values()
def items(self):
return self._m.items()
def ritems(self):
return self._im.items()
def clear(self):
self._m.clear()
self._im.clear()
def get(self, k, d=DEF_NONE):
if d is self.DEF_NONE:
return self._m[k]
else:
return self._m[k] if k in self._m else d
def rget(self, k, d=DEF_NONE):
if d is self.DEF_NONE:
return self._im[k]
else:
return self._im[k] if k in self._im else d
def iter_all_pairs(self):
return self._m.iter_all_pairs()
def riter_all_pairs(self):
return self._im.iter_all_pairs()
def has_pair(self, k, v):
return self._m.has_pair(k, v)
def rhas_pair(self, k, v):
return self._im.has_pair(k, v)
def add_pair(self, k, v, dup='raise'):
if self.has_pair(k, v):
if dup is 'raise': raise KeyError(f"({k}, {v}) already in map")
elif dup is 'ignore': return
else: raise RuntimeError(f"unexpected dup op {dup}")
self._m.add_pair(k, v)
self._im.add_pair(v, k)
def radd_pair(self, k, v, dup='raise'):
if self.rhas_pair(k, v):
if dup is 'raise': raise KeyError(f"({k}, {v}) already in map")
elif dup is 'ignore': return
else: raise RuntimeError(f"unexpected dup op {dup}")
self._im.add_pair(k, v)
self._m.add_pair(v, k)
def pop_pair(self, k, v):
self._m.pop_pair(k, v)
self._im.pop_pair(v, k)
def rpop_pair(self, k, v):
self._im.pop_pair(k, v)
self._m.pop_pair(v, k)
def pop_all(self, k):
s = self._m.pop_all(k)
for v in s:
self._im.pop_pair(v, k)
def rpop_all(self, k):
s = self._im.pop_all(k)
for v in s:
self._m.pop_pair(v, k)
| StarcoderdataPython |
5036954 | <gh_stars>10-100
import numpy as np
import json
from load_data import make_freq_dict
from bayesian_blocks import bayesian_blocks
def find_lamhdas(x,bins):
'''
Parameters:
-----------
=> x = train array
=> bins = bins estimated by using bayesian blocks on train data
Function:
----------
Used to find the lambda parameters for each bin.
Output:
-------
returns list of lambdas.
'''
bin_num =[]
bin_wd=[]
lambdas=[]
for i in range(len(bins)-1):
num=0
for j in x:
if j>=bins[i] and j<bins[i+1]:
num+=1
if i==len(bins)-2 and j== bins[i+1]:
num+=1
bin_wd.append(abs(bins[i]-bins[i+1]))
bin_num.append(num)
lambdas.append((num/abs(bins[i]-bins[i+1])))
return lambdas
def likeli_poisson(x_test,lambdas,bins):
'''
Parameters:
-----------
=> x_test = test array
=> lambdas = list of lambdas estimated for each bin on train data.
=> bins = bins estimated by using bayesian blocks on train data
Function:
----------
Used to find the Log Likelihood of the test data, using paramters estimated on train data.
Output:
-------
returns likelihood value.
'''
likli =0
for i in range(len(bins)-1):
for j in x_test:
if j>=bins[i] and j<bins[i+1]:
likli+= (j)*np.log(lambdas[i])-lambdas[i]#ℓ(λ)=ln (f(x|λ))=−nλ+tlnλ.,n=1, likelihood of that point
if i==len(bins)-2 and j== bins[i+1]:
likli+= (j)*np.log(lambdas[i])-lambdas[i] #ℓ(λ)=ln (f(x|λ))=−nλ+tlnλ.,n=1, likelihood of that point
return likli
def poisson_bay_block(tr,tes,k,gammas,iter):
'''
Parameters:
-----------
=> tr = train data
=> tes = test data
=> k = value of factor in prior function (1-gamma)/(1-gamma**(N//2**k))
=> gammas = list of gammas to iterate over
=> iter = number of times the experiment has to be performed
Function:
----------
Used to estimate the parameters of poisson on train data.
And calculate the log likelihood for test data for varying gamma.
Output:
-------
returns likelihood list with parameters.
'''
total_likeli=[]
dumper={}
for gamma in gammas:
likeli_poi=[]
fold=0
for_best =[]
for i in range(iter):
X_train = tr[i]
X_test =tes[i]
bin_edges = bayesian_blocks(X_train,fitness='poisson',lam=k,gamma=gamma)
lambdas = find_lamhdas(X_train,bin_edges)
likli=likeli_poisson(X_test,lambdas,bin_edges)
tr_likeli= likeli_poisson(X_train,lambdas,bin_edges)
likeli_poi.append([fold,-likli,len(bin_edges)-1])#negetive log likeli
for_best.append(-likli)
fold+=1
total_likeli.append([gamma,likeli_poi])
mu = np.mean(for_best)
sig = np.std(for_best)
dumper[gamma]=mu/sig
with open("./select_best/poi_mu_sig_"+str(len(tes[0]))+".json", "w") as write_file:
json.dump(dumper, write_file)
return total_likeli | StarcoderdataPython |
257869 | #!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import random
SEED=212
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(
description='Determine which IDs have not been used. Issue a requested number of IDs.\
IDs are assumed to be 5 digit numbers 10000 - 99999.')
arg_parser.add_argument('--used_ids',
default='ised_ids.txt',
type=str,
help='A file containing all used IDs. No headers, white space delimeted IDs.\
One or more per row.')
arg_parser.add_argument('--number_of_ids',
default=1,
type=int,
help='A number of IDs to issue.')
arg_parser.add_argument('--update_used_ids',
default=False,
action='store_true',
help='Flag to update the used ids file.')
args = arg_parser.parse_args()
random.seed(SEED)
used_ids = []
with open(args.used_ids) as f:
for l in f:
used_ids += l.rstrip('\n').split()
all_ids = set(map(str, range(10000, 100000)))
avail_ids = list(all_ids.difference(set(used_ids)))
print('Number of avilable IDs: %s' % str(len(avail_ids)))
random.shuffle(avail_ids)
print('Requested IDs:')
for i in range(args.number_of_ids):
print(avail_ids[i])
if args.update_used_ids:
print('Updating used IDs file ...')
with open(args.used_ids, 'a') as f:
for i in range(args.number_of_ids):
f.write(avail_ids[i] + '\n')
print('Done.')
| StarcoderdataPython |
6463936 | from CodeWriter import CodeWriter
from Parser import Parser
from Lex import Lex, ARITHMETIC_COMMANDS, PUSH_OR_POP_COMMANDS
import os, sys
# Parsing and sanytizing args
asm_file = sys.argv[1]
if asm_file == None:
raise Exception('missing .asm file')
file_name, file_type = os.path.splitext(asm_file)
if file_type.lower() != '.vm':
raise Exception('File type is not supported - {}'.format(file_type))
# files
current_dir = os.path.dirname(os.path.realpath(__file__))
src_file = os.path.realpath(asm_file)
dest_file = src_file.replace(".vm", ".asm")
# RUN!
p = Parser(src_file)
cw = CodeWriter(dest_file)
while p.has_more_command():
c_type = p.command_type()
if c_type == Lex.C_ARITMETIC:
cw.write_aritmethic(p.current_command())
if c_type == Lex.C_PUSH_OR_POP:
cw.write_push_pop(p.current_command(), p.arg1(), p.arg2())
p.advance()
cw.close()
| StarcoderdataPython |
3292588 | <filename>exercicios/aula12-ex044.py
print('{:=^40}'.format('FORMAS DE PAGAMENTO'))
preco = float(input('Digite o valor das compras: '))
print('''ESCOLHA SUA OPÇÃO DE PAGAMENTO
[ 1 ] Dinheiro 10% DESCONTO
[ 2 ] Débito 5% DESCONTO
[ 3 ] Cartão 2x SEM DESCONTO
[ 4 ] Cartão 3x acima 20% JUROS''')
opcao = int(input('Digite sua opção: '))
if opcao == 1:
desconto = preco * 0.90
print('Sua compra de R${:.2f} menos 10% desc. no Dinheiro = R${}'.format(preco, desconto))
elif opcao == 2:
desconto = preco * 0.95
print('Sua compra de R${:.2f} menos 5% desc. no Débito = R${}'.format(preco, desconto))
elif opcao == 3:
print('Sua compras de R${:.2f}, parcelado em 2x sem juros.'.format(preco))
elif opcao == 4:
parcelas = int(input('Quantas parcelas? '))
acrescimo = preco * 1.20
print('Sua compra de R${:.2f} parcelado em {}x com juros de 20% no cartão = R${}'.format(preco, parcelas, acrescimo))
| StarcoderdataPython |
44947 | """
Generate download locations within a country and download them.
Written by <NAME>.
5/2020
"""
import os
import configparser
import math
import pandas as pd
import numpy as np
import random
import geopandas as gpd
from shapely.geometry import Point
import requests
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO
from tqdm import tqdm
import logging
import time
BASE_DIR = '.'
# repo imports
import sys
sys.path.append(BASE_DIR)
from utils import PlanetDownloader
from config import VIS_CONFIG, RANDOM_SEED
COUNTRY_ABBRV = VIS_CONFIG['COUNTRY_ABBRV']
COUNTRIES_DIR = os.path.join(BASE_DIR, 'data', 'countries')
GRID_DIR = os.path.join(COUNTRIES_DIR, COUNTRY_ABBRV, 'grid')
IMAGE_DIR = os.path.join(COUNTRIES_DIR, COUNTRY_ABBRV, 'images')
ACCESS_TOKEN_DIR = os.path.join(BASE_DIR, 'planet_api_key.txt')
ACCESS_TOKEN = None
with open(ACCESS_TOKEN_DIR, 'r') as f:
ACCESS_TOKEN = f.readlines()[0]
assert ACCESS_TOKEN is not None, print("Access token is not valid")
def create_folders():
"""
Function to create new folders.
"""
os.makedirs(IMAGE_DIR, exist_ok=True)
def get_polygon_download_locations(polygon, number, seed=7):
"""
Samples NUMBER points evenly but randomly from a polygon. Seed is set for
reproducibility.
At first tries to create sub-grid of size n x n where n = sqrt(number)
It checks these coordinates and if they are in the polygon it uses them
If the number of points found is still less than the desired number,
samples are taken randomly from the polygon until the required number
is achieved.
"""
random.seed(seed)
min_x, min_y, max_x, max_y = polygon.bounds
edge_num = math.floor(math.sqrt(number))
lats = np.linspace(min_y, max_y, edge_num)
lons = np.linspace(min_x, max_x, edge_num)
# performs cartesian product
evenly_spaced_points = np.transpose(
[np.tile(lats, len(lons)), np.repeat(lons, len(lats))])
assert len(evenly_spaced_points) <= number
# tries using evenly spaced points
points = []
for proposed_lat, proposed_lon in evenly_spaced_points:
point = Point(proposed_lon, proposed_lat)
if polygon.contains(point):
points.append([proposed_lat, proposed_lon])
# fills the remainder with random points
while len(points) < number:
point = Point(random.uniform(min_x, max_x),
random.uniform(min_y, max_y))
if polygon.contains(point):
points.append([point.y, point.x])
return points # returns list of lat/lon pairs
def generate_country_download_locations(min_population=100, num_per_grid=4):
"""
Generates a defined number of download locations (NUM_PER_GRID) for each
grid with at least the minimum number of specified residents (MIN_
POPULATION).
"""
grid = gpd.read_file(os.path.join(GRID_DIR, 'grid.shp'))
grid = grid[grid['population'] >= min_population]
lat_lon_pairs = grid['geometry'].apply(
lambda polygon: get_polygon_download_locations(
polygon, number=num_per_grid))
centroids = grid['geometry'].centroid
columns = [
'centroid_lat', 'centroid_lon', 'image_lat', 'image_lon', 'image_name'
]
with open(os.path.join(GRID_DIR, 'image_download_locs.csv'), 'w') as f:
f.write(','.join(columns) + '\n')
for lat_lons, centroid in zip(lat_lon_pairs, centroids):
for lat, lon in lat_lons:
name = str(lat) + '_' + str(lon) + '.png'
to_write = [
str(centroid.y), str(centroid.x), str(lat), str(lon), name]
f.write(','.join(to_write) + '\n')
print('Generated image download locations and saved at {}'.format(
os.path.join(GRID_DIR, 'image_download_locs.csv')))
def download_images(df):
"""
Download images using a pandas DataFrame that has "image_lat", "image_lon",
"image_name" as columns.
"""
imd = PlanetDownloader(ACCESS_TOKEN)
zoom = 16
NUM_RETRIES = 20
WAIT_TIME = 0.1 # seconds
# drops what is already downloaded
already_downloaded = os.listdir(IMAGE_DIR)
print('Already downloaded ' + str(len(already_downloaded)))
df = df.set_index('image_name').drop(already_downloaded).reset_index()
print('Need to download ' + str(len(df)))
# use three years of images to find one that matches search critera
min_year = 2014
min_month = 1
max_year = 2016
max_month = 12
for _, r in tqdm(df.iterrows(), total=df.shape[0]):
lat = r.image_lat
lon = r.image_lon
name = r.image_name
try:
im = imd.download_image(lat, lon, min_year, min_month, max_year, max_month)
if im is None:
resolved = False
for _ in range(num_retries):
time.sleep(wait_time)
im = imd.download_image(lat, lon, min_year, min_month, max_year, max_month)
if im is None:
continue
else:
plt.imsave(image_save_path, im)
resolved = True
break
if not resolved:
# print(f'Could not download {lat}, {lon} despite several retries and waiting')
continue
else:
pass
else:
# no issues, save according to naming convention
plt.imsave(os.path.join(IMAGE_DIR, name), im)
except Exception as e:
# logging.error(f"Error-could not download {lat}, {lon}", exc_info=True)
continue
return
if __name__ == '__main__':
create_folders()
arg = '--all'
if len(sys.argv) >= 2:
arg = sys.argv[1]
assert arg in ['--all', '--generate-download-locations', '--download-images']
if arg == '--all':
print('Generating download locations...')
generate_country_download_locations()
df_download = pd.read_csv(os.path.join(GRID_DIR, 'image_download_locs.csv'))
print('Downloading images. Might take a while...')
download_images(df_download)
elif arg == '--generate-download-locations':
print('Generating download locations...')
generate_country_download_locations()
elif arg == '--download-images':
df_download = pd.read_csv(os.path.join(GRID_DIR, 'image_download_locs.csv'))
print('Downloading images. Might take a while...')
download_images(df_download)
else:
raise ValueError('Args not handled correctly')
| StarcoderdataPython |
4943756 | from django.core.exceptions import ValidationError
import os
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
my_file = os.path.join(THIS_FOLDER,"passwords.txt")
class CommonPasswordValidator:
def validate(self,password,user=None):
with open(my_file,"r") as f:
for line in f:
if password ==str(line):
raise ValidationError("the password is too common")
| StarcoderdataPython |
1679263 | <reponame>cgoIT/lexicon<filename>lexicon/tests/providers/test_ultradns.py<gh_stars>0
"""Integration tests for UltraDNS"""
from unittest import TestCase
from lexicon.tests.providers.integration_tests import IntegrationTests
# Hook into testing framework by inheriting unittest.TestCase and reuse
# the tests which *each and every* implementation of the interface must
# pass, by inheritance from integration_tests.IntegrationTests
class UltradnsProviderTests(TestCase, IntegrationTests):
"""TestCase for UltraDNS"""
provider_name = 'ultradns'
domain = 'example-abtest.com'
def _filter_headers(self):
return ['Authorization']
def _filter_post_data_parameters(self):
return ['username', 'password', 'accessToken']
| StarcoderdataPython |
6689101 | <reponame>zscole/rchain
def test_data_is_stored_and_served_by_node(command_line_options, docker_client):
assert True
| StarcoderdataPython |
8114026 | <filename>main.py
#!/usr/bin/env python3
try:
from cvelookup.db.dblookup import Database
from colored import fg, attr
import subprocess
import sys
import os
if os.name != 'nt':
import readline
except ImportError as err:
print(err)
VALID_OPERATORS = (">", "<", "=" , ">=", "<=")
class CveLookup:
def __init__(self):
"""Creates a `Dbutils` objects and a dictionary
containing all valid commands and there corresponding
functions.
"""
self.database_obj = Database()
self.command_dict = {
'showall': self.database_obj.show_all,
'search': self.database_obj.search,
'year': self.year,
'update': self.update,
'exec': self.exec,
'exit': self.exit,
'cls': self.clear,
'help': self.get_help
}
def initiate(self):
"""Displays prompt and handles all commands"""
while True:
try:
user_input = input(("%scvelookup%s" % (fg(99), attr(0))) + " > ").strip().split()
cmd = user_input[0].strip()
except IndexError:
continue
if cmd in self.command_dict.keys():
args = [arg.strip() for arg in user_input[1:]]
if len(args) == 0:
self.command_dict[cmd]()
continue
self.command_dict[cmd](args)
else:
print("%s[!] Invalid command%s" % (fg(9), attr(0)))
def year(self, args):
"""Returns the CVE by year or range of years"""
if args[0] not in VALID_OPERATORS:
print("\n%s[-] Invalid operator... see help menu by typing `help` %s" % (fg(9), attr(0)))
return
print("\n%s[*] Performing search for CVE where year is%s" % (fg(49), attr(0)), args[0], args[1])
self.database_obj.search_year(args)
def update(self):
"""Calls `updatedb` from `dbutils.py` to update the CVE database"""
print("\n[+] --------------------------- %sUpdating database%s ---------------------------" % (fg(49), attr(0)))
self.database_obj.updatedb()
print("%s[*] Database has been succesfully updated!%s" % (fg(218), attr(0)))
def exec(self, args):
"""Execute system commands"""
try:
subprocess.run(args)
except:
print("\n%s[*] Could not execute command: %s" % (fg(9), attr(0)), *args)
def exit(self):
"""Exit program"""
print("%s[*] Exiting Program...%s" % (fg(9), attr(0)))
try:
sys.exit(0)
except SystemExit:
os._exit(0)
def clear(self):
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
def get_help(self):
"""Print help message"""
print(
"""
Command Arguments Description
------ --------- -----------
help : display this help message
showall : show CVE id's and descriptions
search [string] : search CVE database for specific string
year >|<|=|>=|<= [year] : get CVE's for a particular year or range of years
update : update the cve database (will take a couple of seconds)
exec [command] : execute system command
exit : exit the program
cls : clear screen
"""
)
def main():
obj = CveLookup()
obj.initiate()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("\n%s[*] Exiting Program...%s" % (fg(9), attr(0)))
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| StarcoderdataPython |
294102 | a = 25
b = 0o31
c = 0x19
print(a)
print(b)
print(c)
| StarcoderdataPython |
3531453 | import os.path
import sys
import math
import argparse
import time
import random
import numpy as np
from collections import OrderedDict
import logging
from PIL import Image
import torch
import torchvision
from torchvision import transforms
import options.options as option
from utils import util
from data import create_dataloader, create_dataset
from models import create_model
def main():
# options
parser = argparse.ArgumentParser()
parser.add_argument('-opt', type=str, required=True, help='Path to option JSON file.')
opt = option.parse(parser.parse_args().opt, is_train=True)
opt = option.dict_to_nonedict(opt) # Convert to NoneDict, which return None for missing key.
# train from scratch OR resume training
if opt['path']['resume_state']: # resuming training
resume_state = torch.load(opt['path']['resume_state'])
else: # training from scratch
resume_state = None
util.mkdir_and_rename(opt['path']['experiments_root']) # rename old folder if exists
util.mkdirs((path for key, path in opt['path'].items() if not key == 'experiments_root'
and 'pretrain_model' not in key and 'resume' not in key))
# config loggers. Before it, the log will not work
util.setup_logger(None, opt['path']['log'], 'train', level=logging.INFO, screen=True)
util.setup_logger('val', opt['path']['log'], 'val', level=logging.INFO)
logger = logging.getLogger('base')
if resume_state:
# resume_state[]
logger.info('Resuming training from epoch: {}, iter: {}.'.format(
resume_state['epoch'], resume_state['iter']))
option.check_resume(opt) # check resume options
logger.info(option.dict2str(opt))
# tensorboard logger
if opt['use_tb_logger'] and 'debug' not in opt['name']:
from tensorboardX import SummaryWriter
tb_logger = SummaryWriter(logdir='../../SRN_tb_logger/' + opt['name'])
# random seed
seed = opt['train']['manual_seed']
if seed is None:
seed = random.randint(1, 10000)
logger.info('Random seed: {}'.format(seed))
util.set_random_seed(seed)
torch.backends.cudnn.benckmark = True
# torch.backends.cudnn.deterministic = True
# create train and val dataloader
for phase, dataset_opt in opt['datasets'].items():
if phase == 'train':
train_set = create_dataset(dataset_opt)
train_size = int(math.ceil(len(train_set) / dataset_opt['batch_size']))
logger.info('Number of train images: {:,d}, iters: {:,d}'.format(
len(train_set), train_size))
total_iters = int(opt['train']['niter'])
total_epochs = int(math.ceil(total_iters / train_size))
logger.info('Total epochs needed: {:d} for iters {:,d}'.format(
total_epochs, total_iters))
train_loader = create_dataloader(train_set, dataset_opt)
elif phase == 'val':
val_set = create_dataset(dataset_opt)
val_loader = create_dataloader(val_set, dataset_opt)
logger.info('Number of val images in [{:s}]: {:d}'.format(dataset_opt['name'],
len(val_set)))
else:
raise NotImplementedError('Phase [{:s}] is not recognized.'.format(phase))
assert train_loader is not None
# create model
model = create_model(opt)
# resume training
if resume_state:
start_epoch = resume_state['epoch']
current_step = resume_state['iter']
model.resume_training(resume_state, opt['train']) # handle optimizers and schedulers
else:
current_step = 0
start_epoch = 0
# training
logger.info('Start training from epoch: {:d}, iter: {:d}'.format(start_epoch, current_step))
for epoch in range(start_epoch, total_epochs):
for _, train_data in enumerate(train_loader):
current_step += 1
if current_step > total_iters:
break
# update learning rate
model.update_learning_rate()
# training
model.feed_data(train_data, True)
model.optimize_parameters(current_step)
# log
if current_step % opt['logger']['print_freq'] == 0:
logs = model.get_current_log()
message = '<epoch:{:3d}, iter:{:8,d}, lr:{:.3e}> '.format(
epoch, current_step, model.get_current_learning_rate())
for k, v in logs.items():
message += '{:s}: {:.4e} '.format(k, v)
# tensorboard logger
if opt['use_tb_logger'] and 'debug' not in opt['name']:
tb_logger.add_scalar(k, v, current_step)
logger.info(message)
# training samples
if opt['train']['save_tsamples'] and current_step % opt['train']['save_tsamples'] == 0:
fake_LRs = os.listdir(opt['datasets']['train']['dataroot_fake_LR'])
real_LRs = os.listdir(opt['datasets']['train']['dataroot_real_LR'])
HRs = os.listdir(opt['datasets']['train']['dataroot_HR'])
for i in range(5):
random_index = np.random.choice(range(np.min([len(fake_LRs),len(real_LRs)])))
fake_LR_path = os.path.join(opt['datasets']['train']['dataroot_fake_LR'], fake_LRs[random_index])
real_LR_path = os.path.join(opt['datasets']['train']['dataroot_real_LR'], real_LRs[random_index])
HR_path = os.path.join(opt['datasets']['train']['dataroot_HR'], HRs[random_index])
fake_LR = np.array(Image.open(fake_LR_path))
real_LR = np.array(Image.open(real_LR_path))
HR = np.array(Image.open(HR_path))
h, w, _ = fake_LR.shape
fake_LR = fake_LR[h // 2 - 64:h // 2 + 64, w//2 - 64:w//2+64, :]
h, w, _ = HR.shape
HR = HR[h // 2 - 64*4:h // 2 + 64*4, w//2 - 64*4:w//2+64*4, :]
h, w, _ = real_LR.shape
real_LR = real_LR[h // 2 - 64:h // 2 + 64, w//2 - 64:w//2+64, :]
fake_LR_temp = np.ascontiguousarray(np.transpose(fake_LR, (2, 0, 1)))
real_LR_temp = np.ascontiguousarray(np.transpose(real_LR, (2, 0, 1)))
fake_LR = np.zeros((np.shape(fake_LR_temp)[0],42,42))
fake_LR[:,:np.shape(fake_LR_temp)[1],:np.shape(fake_LR_temp)[2]]=fake_LR_temp
real_LR = np.zeros((np.shape(real_LR_temp)[0],42,42))
real_LR[:,:np.shape(real_LR_temp)[1],:np.shape(real_LR_temp)[2]]=real_LR_temp
fake_LR = torch.from_numpy(fake_LR).float().unsqueeze(0) / 255
real_LR = torch.from_numpy(real_LR).float().unsqueeze(0) / 255
HR = torch.from_numpy(np.ascontiguousarray(np.transpose(HR, (2, 0, 1)))).float().unsqueeze(0) / 255
LR = torch.cat([fake_LR, real_LR], dim=0)
data = {'LR': LR, 'HR': HR}
model.feed_data(data, False)
model.test(tsamples=True)
visuals = model.get_current_visuals(tsamples=True)
fake_SR = visuals['SR'][0]
real_SR = visuals['SR'][1]
fake_hf = visuals['hf'][0]
real_hf = visuals['hf'][1]
HR = visuals['HR']
HR_hf = visuals['HR_hf'][0]
# image_1 = torch.cat([fake_LR[0], fake_SR[0]], dim=2)
# image_2 = torch.cat([real_LR[0], real_SR[0]], dim=2)
#image_1 = np.clip(torch.cat([fake_SR, HR, real_SR], dim=2), 0, 1)
#image_2 = np.clip(torch.cat([fake_hf, HR_hf, real_hf], dim=2), 0, 1)
#image = torch.cat([image_1, image_2], dim=1)
#tb_logger.add_image('train/train_samples_{}'.format(str(i)), image, current_step)
#logger.info('Saved training Samples')
# validation
if current_step % opt['train']['val_freq'] == 0:
avg_psnr = 0.0
idx = 0
avg_lpips = 0.0
for val_data in val_loader:
idx += 1
img_name = os.path.splitext(os.path.basename(val_data['LR_path'][0]))[0]
img_dir = os.path.join(opt['path']['val_images'], img_name)
util.mkdir(img_dir)
model.feed_data(val_data, False)
model.test()
visuals = model.get_current_visuals()
sr_img = util.tensor2img(visuals['SR']) # uint8
if 'HR' in opt['datasets']['val']['mode']:
gt_img = util.tensor2img(visuals['HR']) # uint8
log_info = '{}'.format(val_data['HR_path'][0].split('/')[-1])
if opt['val_lpips']:
lpips = visuals['LPIPS']
avg_lpips += lpips
log_info += ' LPIPS:{:.3f}'.format(lpips.numpy())
if opt['use_domain_distance_map']:
ada_w = visuals['adaptive_weights']
log_info += ' Adaptive weights:{:.2f}'.format(ada_w.numpy())
# logger.info('{} LPIPS: {:.3f}'.format(val_data['HR_path'][0].split('/')[-1], lpips.numpy()))
# print('img:', val_data['HR_path'][0].split('/')[-1], 'LPIPS: %.3f' % lpips.numpy())
# else:
# print('img:', val_data['LR_path'][0].split('/')[-1])
logger.info(log_info)
# Save SR images for reference
save_img_path = os.path.join(img_dir, '{:s}_{:d}.png'.format(\
img_name, current_step))
util.save_img(sr_img, save_img_path)
# calculate PSNR
if 'HR' in opt['datasets']['val']['mode']:
crop_size = opt['scale']
gt_img = gt_img / 255.
sr_img = sr_img / 255.
cropped_sr_img = sr_img[crop_size:-crop_size, crop_size:-crop_size, :]
cropped_gt_img = gt_img[crop_size:-crop_size, crop_size:-crop_size, :]
avg_psnr += util.calculate_psnr(cropped_sr_img * 255, cropped_gt_img * 255)
avg_psnr = avg_psnr / idx
if opt['val_lpips']:
avg_lpips = avg_lpips / idx
print('Mean LPIPS:', avg_lpips.numpy())
# log
logger.info('# Validation # PSNR: {:.4e}'.format(avg_psnr))
logger_val = logging.getLogger('val') # validation logger
if opt['val_lpips']:
logger_val.info('<epoch:{:3d}, iter:{:8,d}> psnr: {:.4e}, LPIPS: {:.4f}'.format(
epoch, current_step, avg_psnr, avg_lpips))
else:
logger_val.info('<epoch:{:3d}, iter:{:8,d}> psnr: {:.4e}'.format(
epoch, current_step, avg_psnr))
# tensorboard logger
if opt['use_tb_logger'] and 'debug' not in opt['name']:
tb_logger.add_scalar('psnr', avg_psnr, current_step)
tb_logger.add_scalar('LPIPS', avg_lpips, current_step)
# save models and training states
if current_step % opt['logger']['save_checkpoint_freq'] == 0:
logger.info('Saving models and training states.')
model.save(current_step)
model.save_training_state(epoch, current_step)
logger.info('Saving the final model.')
model.save('latest')
logger.info('End of training.')
if __name__ == '__main__':
main()
| StarcoderdataPython |
11284659 | import time
import questionary
import serial_ports
import edit_list
import config
import twitchapi
import serial
import time
def menu():
menu = questionary.select('What do you want to do?',
choices=['Start', 'Show the streamers list',
'Delete a streamer from the list', 'Add a streamer to the list'],
default='Start'
).ask()
return menu
serial_port = serial_ports.select_serial_port()
while serial_port == 'Refresh':
serial_port = serial_ports.select_serial_port()
arduino = serial.Serial(serial_port, 9600)
def get_streamer_status(streamer):
request = twitchapi.request_json(streamer)
if request['data'] == []:
pass
else:
get_color_and_send(streamer)
time.sleep(4)
def get_color_and_send(streamer):
dic = edit_list.read_file()
color = dic[streamer]
arduino.write(str.encode(color[0]))
while True:
selected = menu()
if selected == 'Show the streamers list':
questionary.select(edit_list.dict_keys_to_list(),
choices=['Back']).ask()
elif selected == 'Delete a streamer from the list':
delete_nick = questionary.select(
'Who do you want to delete?', choices=edit_list.dictionary_with_back()).ask()
if delete_nick == 'Back':
pass
else:
edit_list.delete_element(delete_nick)
elif selected == 'Add a streamer to the list':
nickname = questionary.text(
"Enter streamer's twitch.tv nickname").ask()
color = questionary.select(
'Choose a color of the lamp that will be on, when the streamer is online', choices=config.colors).ask()
edit_list.add_element(nickname, color)
while selected == 'Start':
for streamer in edit_list.read_file():
get_streamer_status(streamer)
| StarcoderdataPython |
1791924 | def register_routes(api, app, root="app"):
from app.api.model import register_routes as attach_model
from app.api.meta import register_routes as attach_meta
from app.api.pipelines import register_routes as attach_pipelines
from app.api.composer import register_routes as attach_composer
from app.api.data import register_routes as attach_data
from app.api.auth import register_routes as attach_token
from app.api.showcase import register_routes as attach_showcase
from app.api.sandbox import register_routes as attach_sandbox
from app.api.analytics import register_routes as attach_analytics
# Add routes
attach_model(api, app)
attach_meta(api, app)
attach_pipelines(api, app)
attach_composer(api, app)
attach_data(api, app)
attach_token(api, app)
attach_showcase(api, app)
attach_sandbox(api, app)
attach_analytics(api, app)
| StarcoderdataPython |
3519342 | <gh_stars>1-10
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# pylint: disable=missing-function-docstring
import logging
import pickle
import tempfile
from pathlib import Path
from typing import Any, Dict, Optional
import numpy as np
import torch
from sacred import Experiment
from tsbench.analysis import ModelRecommenderAnalyzer
from tsbench.constants import DEFAULT_DATA_PATH, DEFAULT_EVALUATIONS_PATH
from tsbench.evaluations.tracking import ModelTracker
from tsbench.recommender import create_recommender
from tsbench.surrogate import create_surrogate
ex = Experiment()
@ex.config
def experiment_config():
# pylint: disable=unused-variable
experiment = "test" # type: ignore
data_path = str(DEFAULT_DATA_PATH) # type: ignore
evaluations_path = str(DEFAULT_EVALUATIONS_PATH) # type: ignore
recommender = "pareto" # type: ignore
num_recommendations = 20 # type: ignore
objectives = "ncrps_mean,latency_mean" # type: ignore
focus_objective = None # type: ignore
enforce_single_objective = False # type: ignore
surrogate = { # type: ignore
"name": "mlp",
"inputs": {
"use_simple_dataset_features": False,
"use_seasonal_naive_performance": False,
"use_catch22_features": False,
},
"outputs": {
"normalization": "quantile",
"imputation": False,
},
"xgboost": {
"objective": "regression",
},
"autogluon": {
"time_limit": 10,
},
"mlp": {
"objective": "ranking",
"discount": "linear",
"hidden_layer_sizes": [32, 32],
"weight_decay": 0.01,
"dropout": 0.0,
},
}
@ex.automain
def main(
_seed: int,
data_path: str,
evaluations_path: str,
recommender: str,
num_recommendations: int,
objectives: str,
focus_objective: Optional[str],
enforce_single_objective: bool,
surrogate: Dict[str, Any],
):
assert (
recommender != "surrogate" or surrogate["name"] is not None
), "Name of surrogate must be provided if surrogate recommender is used."
np.random.seed(_seed)
torch.manual_seed(_seed)
logging.getLogger("pytorch_lightning").setLevel(logging.WARNING)
# First, get the tracker
print("Fetching the data...")
tracker = ModelTracker.from_directory(
Path(evaluations_path), data_path=Path(data_path)
)
# Then, potentially initialize the surrogate
recommender_args: Dict[str, Any] = {
"objectives": objectives.split(","),
"focus": focus_objective,
}
if recommender == "pareto":
print("Initializing the surrogate...")
surrogate_metrics = [
m
for m in objectives.split(",")
if (
not m.startswith("latency")
and not m.startswith("num_model_parameters")
)
or not surrogate["outputs"]["imputation"]
]
recommender_args["surrogate"] = create_surrogate(
surrogate["name"],
predict=surrogate_metrics,
tracker=tracker,
input_flags=surrogate["inputs"],
output_normalization=surrogate["outputs"]["normalization"],
impute_simulatable=surrogate["outputs"]["imputation"],
**(
surrogate[surrogate["name"]]
if surrogate["name"] in surrogate
else {}
)
)
elif recommender == "optimal":
recommender_args["tracker"] = tracker
elif recommender == "greedy":
recommender_args["enforce_single_objective"] = enforce_single_objective
# Then, we can create the recommender
print("Initializing the recommender...")
recommender_instance = create_recommender(recommender, **recommender_args)
# And evaluate it
print("Evaluating the recommender...")
evaluator = ModelRecommenderAnalyzer(
tracker,
recommender_instance,
num_recommendations=num_recommendations,
)
recommendations = evaluator.run()
# Eventually, we store the results
print("Storing the results...")
with tempfile.TemporaryDirectory() as d:
path = Path(d) / "recommendations.pickle"
with path.open("wb+") as f:
pickle.dump(recommendations, f)
ex.add_artifact(path, content_type="application/octet-stream")
| StarcoderdataPython |
6645782 | from base_sub_rig_template import SubRigTemplate
class QuadrupedArm(SubRigTemplate):
BUILT_IN_META_DATA = SubRigTemplate.BUILT_IN_META_DATA.merge({'name': 'quadarm'}, new=True)
| StarcoderdataPython |
4803608 | import json
if __name__ == "__main__":
fp = open("result.json", "r")
wp = open("textrank_output.txt", "w")
for line in fp:
wd = json.loads(line.strip())
wp.write("{0}:{1}\n".format(wd["text"], wd["rank"]))
fp.close()
wp.close()
| StarcoderdataPython |
6509548 | def get_urls(*args, **kwargs):
return {
'https://raw.githubusercontent.com/rtfd/sphinx_rtd_theme/master/README.rst'
}, set()
| StarcoderdataPython |
11204633 | import tensorflow as tf
import numpy as np
class Selector(object):
def __init__(self, num_classes, is_training = False, drop_prob = None):
self.num_classes = num_classes
self.is_training = is_training
self.dropout = drop_prob
def __call__(self, is_training = False, drop_prob = None):
self.is_training = is_training
self.dropout = drop_prob
def __dropout__(self, x):
if self.dropout:
return tf.layers.dropout(x, rate = self.dropout, training = self.is_training)
else:
return x
def __logits__(self, x, var_scope = None, reuse = tf.AUTO_REUSE):
with tf.variable_scope(var_scope or 'logits', reuse = reuse):
relation_matrix = tf.get_variable('relation_matrix', [self.num_classes, x.shape[1]], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
bias = tf.get_variable('bias', [self.num_classes], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
logits = tf.matmul(x, tf.transpose(relation_matrix)) + bias
return logits
def __attention_train_logits__(self, x, query, var_scope = None, reuse = None):
with tf.variable_scope(var_scope or 'attention_logits', reuse = reuse):
relation_matrix = tf.get_variable('relation_matrix', [self.num_classes, x.shape[1]], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
bias = tf.get_variable('bias', [self.num_classes], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
current_attention = tf.nn.embedding_lookup(relation_matrix, query)
attention_logit = tf.reduce_sum(current_attention * x, 1)
return attention_logit
def __attention_test_logits__(self, x, var_scope = None, reuse = None):
with tf.variable_scope(var_scope or 'attention_logits', reuse = reuse):
relation_matrix = tf.get_variable('relation_matrix', [self.num_classes, x.shape[1]], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
bias = tf.get_variable('bias', [self.num_classes], dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer())
return tf.matmul(x, tf.transpose(relation_matrix))
def no_bag(self, x):
with tf.name_scope("no_bag"):
x = self.__dropout__(x)
return self.__logits__(x, "no_bag_logits", False), x
def attention(self, x, scope, query, dropout_before = False):
with tf.name_scope("attention"):
if self.is_training:
if dropout_before:
x = self.__dropout__(x)
attention_logit = self.__attention_train_logits__(x, query, "attention_logits", False)
tower_repre = []
for i in range(scope.shape[0] - 1):
sen_matrix = x[scope[i]:scope[i + 1]]
attention_score = tf.nn.softmax(tf.reshape(attention_logit[scope[i]:scope[i + 1]], [1, -1]))
final_repre = tf.squeeze(tf.matmul(attention_score, sen_matrix))
tower_repre.append(final_repre)
if not dropout_before:
stack_repre = self.__dropout__(tf.stack(tower_repre))
else:
stack_repre = tf.stack(tower_repre)
return self.__logits__(stack_repre, "attention_logits", True), stack_repre
else:
test_attention_logit = self.__attention_test_logits__(x, "attention_logits", False)
test_tower_output = []
test_repre = []
for i in range(scope.shape[0] - 1):
test_attention_score = tf.nn.softmax(tf.transpose(test_attention_logit[scope[i]:scope[i+1],:]))
final_repre = tf.matmul(test_attention_score, x[scope[i]:scope[i+1]])
logits = self.__logits__(final_repre, "attention_logits", True)
test_repre.append(final_repre)
# test_tower_output.append(tf.diag_part(tf.nn.softmax(logits)))
test_tower_output.append(tf.reduce_max(tf.nn.softmax(logits), axis=0))
test_repre = tf.reshape(tf.stack(test_repre), [scope.shape[0] - 1, self.num_classes, -1])
test_output = tf.reshape(tf.stack(test_tower_output), [scope.shape[0] - 1, self.num_classes])
return test_output, test_repre
def average(self, x, scope, dropout_before = False):
with tf.name_scope("average"):
if dropout_before:
x = self.__dropout__(x)
tower_repre = []
for i in range(scope.shape[0] - 1):
repre_mat = x[scope[i]:scope[i + 1]]
repre = tf.reduce_mean(repre_mat, axis=0)
tower_repre.append(repre)
if not dropout_before:
stack_repre = self.__dropout__(tf.stack(tower_repre))
else:
stack_repre = tf.stack(tower_repre)
return self.__logits__(stack_repre, "average_logits", False), stack_repre
def maximum(self, x, scope, dropout_before = False):
with tf.name_scope("maximum"):
if dropout_before:
x = self.__dropout__(x)
tower_repre = []
for i in range(scope.shape[0] - 1):
repre_mat = x[scope[i]:scope[i + 1]]
logits = self.__logits__(repre_mat, "maximum_logits")
j = tf.argmax(tf.reduce_max(logits, axis = 1), output_type=tf.int32)
tower_repre.append(repre_mat[j])
if not dropout_before:
stack_repre = self.__dropout__(tf.stack(tower_repre))
else:
stack_repre = tf.stack(tower_repre)
return self.__logits__(stack_repre, "maximum_logits", True), stack_repre
| StarcoderdataPython |
11368177 | <filename>src/cool/cmp/.ipynb_checkpoints/tools-checkpoint.py
import zlib, base64
exec(zlib.decompress(base64.b64decode('<KEY>)))
# Created by pyminifier (https://github.com/liftoff/pyminifier)
deprecated_metodo_predictivo_no_recursivo = metodo_predictivo_no_recursivo
def metodo_predictivo_no_recursivo(G, M):
parser = deprecated_metodo_predictivo_no_recursivo(G, M)
def updated(tokens):
return parser([t.token_type for t in tokens])
return updated | StarcoderdataPython |
3372428 | """
A service that will process requests from the public web
and will have access the CA certificates.
"""
| StarcoderdataPython |
3443817 | # This is test file for typing,
# No automatic testing is used at the moment. Just use your type checker and see if it works.
# Pytest here is used to make sure that runtime behavir matches with type checker expecter errors.
from typing import Any, Callable
import pytest
from decopatch import DECORATED, function_decorator
def test_invalid_parameter():
with pytest.raises(TypeError):
# Error, invalid argument
@function_decorator(invalid_param=True)
def decorator_wint_invalid_param(fn=DECORATED):
return fn
def test_normal_decorator():
@function_decorator
def decorator(scope="test", fn=DECORATED): # type: (str, Any) -> Callable[..., Any]
assert isinstance(scope, str)
return fn
# Ok
@decorator
def decorated_flat():
pass
assert decorated_flat
with pytest.raises(AssertionError):
# Error, Literal[2] is incompatible with str
@decorator(scope=2)
def decorated_with_invalid_options():
pass
# Ok, should reveal correct type for `scope`
@decorator(scope="success")
def decorated_with_valid_options():
pass
assert decorated_with_valid_options
def test_function_decorator_with_params():
# Ok, should reveal correct type for `enable_stack_introspection`
@function_decorator(enable_stack_introspection=True)
def decorator_with_params(scope = "test", fn=DECORATED): # type: (str, Any) -> Callable[..., Any]
return fn
# Ok, should reveal correct type for `scope`
@decorator_with_params(scope="success")
def decorated_with_valid_options():
pass
assert decorated_with_valid_options
| StarcoderdataPython |
12815692 | <reponame>MartienJun/flask-Travelnesia
from flask import Blueprint, render_template, request, redirect, url_for
from flask.helpers import flash
from flask_login import current_user, login_required
from app.Models.transportation import Transportation
from app.Controllers.transportation_controller import TransportationController
# Insialisasi Blueprint dengan url_prefix transportation
blueprint = Blueprint("transportation", __name__, url_prefix="/admin/transportation")
# Routing untuk ke halaman view
@blueprint.route('/')
@blueprint.route('/view')
@login_required
def view():
# Jika session rolenya bukan admin, redirect kembali ke sign in
if current_user.role != 'adm':
flash("You must sign in as admin to use this feature")
return redirect(url_for('auth.signin'))
# Jika session admin ada, tampilkan halaman view
return render_template("admin/transportation/view.html", list_transportation=TransportationController.get_all())
# Routing untuk halaman insert
@blueprint.route('/insert', methods=['GET', 'POST'])
@login_required
def insert():
# Jika session rolenya bukan admin, redirect kembali ke sign in
if current_user.role != 'adm':
flash("You must sign in as admin to use this feature")
return redirect(url_for('auth.signin'))
# Jika metodenya adalah get, tampilkan halaman insert
if request.method == 'GET':
return render_template("admin/transportation/insert.html")
# Jika metodenya adalah post, dapatkan data dari post
transport_id = request.form['transport_id']
transport = request.form['transport']
type = request.form['type']
# Cek apakah transport_id sudah ada dalam database
if TransportationController.get_by_id(transport_id) is not None:
# jika iya, tampilkan error message
return render_template('admin/transportation/insert.html', message="transport_id sudah pernah terdaftar!")
# Jika data sudah sesuai, masukan data tersebut ke dalam database melalui model
transportation = Transportation(transport_id, transport, type)
TransportationController.insert(transportation)
# Redirect ke halaman view
return redirect(url_for('transportation.view'))
# Routing untuk halaman update
@blueprint.route('/update/<id>', methods=['GET', 'POST'])
@login_required
def update(id):
# Jika session rolenya bukan admin, redirect kembali ke sign in
if current_user.role != 'adm':
flash("You must sign in as admin to use this feature")
return redirect(url_for('auth.signin'))
# Jika metodenya adalah get, tampilkan halaman update
if request.method == 'GET':
return render_template("admin/transportation/update.html", transportation=TransportationController.get_by_id(id))
# Jika metodenya adalah post, dapatkan data dari post
transport_id = request.form['transport_id']
transport = request.form['transport']
type = request.form['type']
# Update data tersebut ke dalam database melalui model
transportation = Transportation(transport_id, transport, type)
TransportationController.update(transportation)
# Redirect kembali ke view
return redirect(url_for('transportation.view'))
# Routing untuk halaman delete
@blueprint.route('/delete/<id>', methods=['GET', 'POST'])
@login_required
def delete(id):
# Jika session rolenya bukan admin, redirect kembali ke sign in
if current_user.role != 'adm':
flash("You must sign in as admin to use this feature")
return redirect(url_for('auth.signin'))
# Hapus data tersebut dari database
TransportationController.delete(id)
# Redirect kembali ke View
return redirect(url_for('transportation.view'))
| StarcoderdataPython |
1820319 | <reponame>Qointum/pypy
import py, weakref
from rpython.jit.backend import model
from rpython.jit.backend.llgraph import support
from rpython.jit.backend.llsupport import symbolic
from rpython.jit.metainterp.history import AbstractDescr
from rpython.jit.metainterp.history import Const, getkind
from rpython.jit.metainterp.history import INT, REF, FLOAT, VOID
from rpython.jit.metainterp.resoperation import rop
from rpython.jit.metainterp.optimizeopt import intbounds
from rpython.jit.codewriter import longlong, heaptracker
from rpython.jit.codewriter.effectinfo import EffectInfo
from rpython.rtyper.llinterp import LLInterpreter, LLException
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, rstr
from rpython.rtyper import rclass
from rpython.rlib.clibffi import FFI_DEFAULT_ABI
from rpython.rlib.rarithmetic import ovfcheck, r_uint, r_ulonglong
class LLTrace(object):
has_been_freed = False
invalid = False
def __init__(self, inputargs, operations):
# We need to clone the list of operations because the
# front-end will mutate them under our feet again. We also
# need to make sure things get freed.
def mapping(box, _cache={}):
if isinstance(box, Const) or box is None:
return box
try:
newbox = _cache[box]
except KeyError:
newbox = _cache[box] = box.__class__()
return newbox
#
self.inputargs = map(mapping, inputargs)
self.operations = []
for op in operations:
if op.getdescr() is not None:
if op.is_guard() or op.getopnum() == rop.FINISH:
newdescr = op.getdescr()
else:
newdescr = WeakrefDescr(op.getdescr())
else:
newdescr = None
newop = op.copy_and_change(op.getopnum(),
map(mapping, op.getarglist()),
mapping(op.result),
newdescr)
if op.getfailargs() is not None:
newop.setfailargs(map(mapping, op.getfailargs()))
self.operations.append(newop)
class WeakrefDescr(AbstractDescr):
def __init__(self, realdescr):
self.realdescrref = weakref.ref(realdescr)
self.final_descr = getattr(realdescr, 'final_descr', False)
class ExecutionFinished(Exception):
def __init__(self, deadframe):
self.deadframe = deadframe
class Jump(Exception):
def __init__(self, jump_target, args):
self.jump_target = jump_target
self.args = args
class CallDescr(AbstractDescr):
def __init__(self, RESULT, ARGS, extrainfo, ABI=FFI_DEFAULT_ABI):
self.RESULT = RESULT
self.ARGS = ARGS
self.ABI = ABI
self.extrainfo = extrainfo
def __repr__(self):
return 'CallDescr(%r, %r, %r)' % (self.RESULT, self.ARGS,
self.extrainfo)
def get_extra_info(self):
return self.extrainfo
def get_arg_types(self):
return ''.join([getkind(ARG)[0] for ARG in self.ARGS])
def get_result_type(self):
return getkind(self.RESULT)[0]
class SizeDescr(AbstractDescr):
def __init__(self, S):
self.S = S
def as_vtable_size_descr(self):
return self
def count_fields_if_immutable(self):
return heaptracker.count_fields_if_immutable(self.S)
def __repr__(self):
return 'SizeDescr(%r)' % (self.S,)
class FieldDescr(AbstractDescr):
def __init__(self, S, fieldname):
self.S = S
self.fieldname = fieldname
self.FIELD = getattr(S, fieldname)
def get_vinfo(self):
return self.vinfo
def __repr__(self):
return 'FieldDescr(%r, %r)' % (self.S, self.fieldname)
def sort_key(self):
return self.fieldname
def is_pointer_field(self):
return getkind(self.FIELD) == 'ref'
def is_float_field(self):
return getkind(self.FIELD) == 'float'
def is_field_signed(self):
return _is_signed_kind(self.FIELD)
def is_integer_bounded(self):
return getkind(self.FIELD) == 'int' \
and rffi.sizeof(self.FIELD) < symbolic.WORD
def get_integer_min(self):
if getkind(self.FIELD) != 'int':
assert False
return intbounds.get_integer_min(
not _is_signed_kind(self.FIELD), rffi.sizeof(self.FIELD))
def get_integer_max(self):
if getkind(self.FIELD) != 'int':
assert False
return intbounds.get_integer_max(
not _is_signed_kind(self.FIELD), rffi.sizeof(self.FIELD))
def _is_signed_kind(TYPE):
return (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and
rffi.cast(TYPE, -1) == -1)
class ArrayDescr(AbstractDescr):
def __init__(self, A):
self.A = self.OUTERA = A
if isinstance(A, lltype.Struct):
self.A = A._flds[A._arrayfld]
def __repr__(self):
return 'ArrayDescr(%r)' % (self.OUTERA,)
def is_array_of_pointers(self):
return getkind(self.A.OF) == 'ref'
def is_array_of_floats(self):
return getkind(self.A.OF) == 'float'
def is_item_signed(self):
return _is_signed_kind(self.A.OF)
def is_array_of_structs(self):
return isinstance(self.A.OF, lltype.Struct)
def is_item_integer_bounded(self):
return getkind(self.A.OF) == 'int' \
and rffi.sizeof(self.A.OF) < symbolic.WORD
def get_item_integer_min(self):
if getkind(self.A.OF) != 'int':
assert False
return intbounds.get_integer_min(
not _is_signed_kind(self.A.OF), rffi.sizeof(self.A.OF))
def get_item_integer_max(self):
if getkind(self.A.OF) != 'int':
assert False
return intbounds.get_integer_max(
not _is_signed_kind(self.A.OF), rffi.sizeof(self.A.OF))
class InteriorFieldDescr(AbstractDescr):
def __init__(self, A, fieldname):
self.A = A
self.fieldname = fieldname
self.FIELD = getattr(A.OF, fieldname)
def __repr__(self):
return 'InteriorFieldDescr(%r, %r)' % (self.A, self.fieldname)
def sort_key(self):
return self.fieldname
def is_pointer_field(self):
return getkind(self.FIELD) == 'ref'
def is_float_field(self):
return getkind(self.FIELD) == 'float'
def is_integer_bounded(self):
return getkind(self.FIELD) == 'int' \
and rffi.sizeof(self.FIELD) < symbolic.WORD
def get_integer_min(self):
if getkind(self.FIELD) != 'int':
assert False
return intbounds.get_integer_min(
not _is_signed_kind(self.FIELD), rffi.sizeof(self.FIELD))
def get_integer_max(self):
if getkind(self.FIELD) != 'int':
assert False
return intbounds.get_integer_max(
not _is_signed_kind(self.FIELD), rffi.sizeof(self.FIELD))
_example_res = {'v': None,
'r': lltype.nullptr(llmemory.GCREF.TO),
'i': 0,
'f': 0.0}
class LLGraphCPU(model.AbstractCPU):
from rpython.jit.metainterp.typesystem import llhelper as ts
supports_floats = True
supports_longlong = r_uint is not r_ulonglong
supports_singlefloats = True
translate_support_code = False
is_llgraph = True
def __init__(self, rtyper, stats=None, *ignored_args, **kwds):
model.AbstractCPU.__init__(self)
self.rtyper = rtyper
self.llinterp = LLInterpreter(rtyper)
self.descrs = {}
class MiniStats:
pass
self.stats = stats or MiniStats()
self.vinfo_for_tests = kwds.get('vinfo_for_tests', None)
def compile_loop(self, inputargs, operations, looptoken, jd_id=0,
unique_id=0, log=True, name='', logger=None):
clt = model.CompiledLoopToken(self, looptoken.number)
looptoken.compiled_loop_token = clt
lltrace = LLTrace(inputargs, operations)
clt._llgraph_loop = lltrace
clt._llgraph_alltraces = [lltrace]
self._record_labels(lltrace)
def compile_bridge(self, faildescr, inputargs, operations,
original_loop_token, log=True, logger=None):
clt = original_loop_token.compiled_loop_token
clt.compiling_a_bridge()
lltrace = LLTrace(inputargs, operations)
faildescr._llgraph_bridge = lltrace
clt._llgraph_alltraces.append(lltrace)
self._record_labels(lltrace)
def _record_labels(self, lltrace):
for i, op in enumerate(lltrace.operations):
if op.getopnum() == rop.LABEL:
_getdescr(op)._llgraph_target = (lltrace, i)
def invalidate_loop(self, looptoken):
for trace in looptoken.compiled_loop_token._llgraph_alltraces:
trace.invalid = True
def redirect_call_assembler(self, oldlooptoken, newlooptoken):
oldtrace = oldlooptoken.compiled_loop_token._llgraph_loop
newtrace = newlooptoken.compiled_loop_token._llgraph_loop
OLD = [box.type for box in oldtrace.inputargs]
NEW = [box.type for box in newtrace.inputargs]
assert OLD == NEW
assert not hasattr(oldlooptoken, '_llgraph_redirected')
oldlooptoken.compiled_loop_token._llgraph_redirected = True
oldlooptoken.compiled_loop_token._llgraph_loop = newtrace
alltraces = newlooptoken.compiled_loop_token._llgraph_alltraces
oldlooptoken.compiled_loop_token._llgraph_alltraces = alltraces
def free_loop_and_bridges(self, compiled_loop_token):
for c in compiled_loop_token._llgraph_alltraces:
c.has_been_freed = True
compiled_loop_token._llgraph_alltraces = []
compiled_loop_token._llgraph_loop = None
model.AbstractCPU.free_loop_and_bridges(self, compiled_loop_token)
def make_execute_token(self, *argtypes):
return self._execute_token
def _execute_token(self, loop_token, *args):
lltrace = loop_token.compiled_loop_token._llgraph_loop
frame = LLFrame(self, lltrace.inputargs, args)
try:
frame.execute(lltrace)
assert False
except ExecutionFinished, e:
return e.deadframe
def get_int_value(self, deadframe, index):
v = deadframe._values[index]
assert lltype.typeOf(v) == lltype.Signed
return v
def get_ref_value(self, deadframe, index):
v = deadframe._values[index]
assert lltype.typeOf(v) == llmemory.GCREF
return v
def get_float_value(self, deadframe, index):
v = deadframe._values[index]
assert lltype.typeOf(v) == longlong.FLOATSTORAGE
return v
def get_latest_descr(self, deadframe):
return deadframe._latest_descr
def grab_exc_value(self, deadframe):
if deadframe._last_exception is not None:
result = deadframe._last_exception.args[1]
gcref = lltype.cast_opaque_ptr(llmemory.GCREF, result)
else:
gcref = lltype.nullptr(llmemory.GCREF.TO)
return gcref
def force(self, force_token):
frame = force_token
assert isinstance(frame, LLFrame)
assert frame.forced_deadframe is None
values = []
for box in frame.force_guard_op.getfailargs():
if box is not None:
if box is not frame.current_op.result:
value = frame.env[box]
else:
value = box.value # 0 or 0.0 or NULL
else:
value = None
values.append(value)
frame.forced_deadframe = LLDeadFrame(
_getdescr(frame.force_guard_op), values)
return frame.forced_deadframe
def set_savedata_ref(self, deadframe, data):
deadframe._saved_data = data
def get_savedata_ref(self, deadframe):
assert deadframe._saved_data is not None
return deadframe._saved_data
# ------------------------------------------------------------
def calldescrof(self, FUNC, ARGS, RESULT, effect_info):
key = ('call', getkind(RESULT),
tuple([getkind(A) for A in ARGS]),
effect_info)
try:
return self.descrs[key]
except KeyError:
descr = CallDescr(RESULT, ARGS, effect_info)
self.descrs[key] = descr
return descr
def sizeof(self, S):
key = ('size', S)
try:
return self.descrs[key]
except KeyError:
descr = SizeDescr(S)
self.descrs[key] = descr
return descr
def fielddescrof(self, S, fieldname):
key = ('field', S, fieldname)
try:
return self.descrs[key]
except KeyError:
descr = FieldDescr(S, fieldname)
self.descrs[key] = descr
if self.vinfo_for_tests is not None:
descr.vinfo = self.vinfo_for_tests
return descr
def arraydescrof(self, A):
key = ('array', A)
try:
return self.descrs[key]
except KeyError:
descr = ArrayDescr(A)
self.descrs[key] = descr
return descr
def interiorfielddescrof(self, A, fieldname):
key = ('interiorfield', A, fieldname)
try:
return self.descrs[key]
except KeyError:
descr = InteriorFieldDescr(A, fieldname)
self.descrs[key] = descr
return descr
def _calldescr_dynamic_for_tests(self, atypes, rtype,
abiname='FFI_DEFAULT_ABI'):
# XXX WTF is that and why it breaks all abstractions?
from rpython.jit.backend.llsupport import ffisupport
return ffisupport.calldescr_dynamic_for_tests(self, atypes, rtype,
abiname)
def calldescrof_dynamic(self, cif_description, extrainfo):
# XXX WTF, this is happy nonsense
from rpython.jit.backend.llsupport.ffisupport import get_ffi_type_kind
from rpython.jit.backend.llsupport.ffisupport import UnsupportedKind
ARGS = []
try:
for itp in range(cif_description.nargs):
arg = cif_description.atypes[itp]
kind = get_ffi_type_kind(self, arg)
if kind != VOID:
ARGS.append(support.kind2TYPE[kind[0]])
RESULT = support.kind2TYPE[get_ffi_type_kind(self, cif_description.rtype)[0]]
except UnsupportedKind:
return None
key = ('call_dynamic', RESULT, tuple(ARGS),
extrainfo, cif_description.abi)
try:
return self.descrs[key]
except KeyError:
descr = CallDescr(RESULT, ARGS, extrainfo, ABI=cif_description.abi)
self.descrs[key] = descr
return descr
# ------------------------------------------------------------
def maybe_on_top_of_llinterp(self, func, args, RESULT):
ptr = llmemory.cast_int_to_adr(func).ptr
if hasattr(ptr._obj, 'graph'):
res = self.llinterp.eval_graph(ptr._obj.graph, args)
else:
res = ptr._obj._callable(*args)
if RESULT is lltype.Void:
return None
return support.cast_result(RESULT, res)
def _do_call(self, func, args_i, args_r, args_f, calldescr):
TP = llmemory.cast_int_to_adr(func).ptr._obj._TYPE
args = support.cast_call_args(TP.ARGS, args_i, args_r, args_f)
return self.maybe_on_top_of_llinterp(func, args, TP.RESULT)
bh_call_i = _do_call
bh_call_r = _do_call
bh_call_f = _do_call
bh_call_v = _do_call
def bh_getfield_gc(self, p, descr):
p = support.cast_arg(lltype.Ptr(descr.S), p)
return support.cast_result(descr.FIELD, getattr(p, descr.fieldname))
bh_getfield_gc_pure = bh_getfield_gc
bh_getfield_gc_i = bh_getfield_gc
bh_getfield_gc_r = bh_getfield_gc
bh_getfield_gc_f = bh_getfield_gc
bh_getfield_raw = bh_getfield_gc
bh_getfield_raw_pure = bh_getfield_raw
bh_getfield_raw_i = bh_getfield_raw
bh_getfield_raw_r = bh_getfield_raw
bh_getfield_raw_f = bh_getfield_raw
def bh_setfield_gc(self, p, newvalue, descr):
p = support.cast_arg(lltype.Ptr(descr.S), p)
setattr(p, descr.fieldname, support.cast_arg(descr.FIELD, newvalue))
bh_setfield_gc_i = bh_setfield_gc
bh_setfield_gc_r = bh_setfield_gc
bh_setfield_gc_f = bh_setfield_gc
bh_setfield_raw = bh_setfield_gc
bh_setfield_raw_i = bh_setfield_raw
bh_setfield_raw_f = bh_setfield_raw
def bh_arraylen_gc(self, a, descr):
array = a._obj.container
if descr.A is not descr.OUTERA:
array = getattr(array, descr.OUTERA._arrayfld)
return array.getlength()
def bh_getarrayitem_gc(self, a, index, descr):
a = support.cast_arg(lltype.Ptr(descr.A), a)
array = a._obj
return support.cast_result(descr.A.OF, array.getitem(index))
bh_getarrayitem_gc_pure = bh_getarrayitem_gc
bh_getarrayitem_gc_i = bh_getarrayitem_gc
bh_getarrayitem_gc_r = bh_getarrayitem_gc
bh_getarrayitem_gc_f = bh_getarrayitem_gc
bh_getarrayitem_raw = bh_getarrayitem_gc
bh_getarrayitem_raw_pure = bh_getarrayitem_raw
bh_getarrayitem_raw_i = bh_getarrayitem_raw
bh_getarrayitem_raw_r = bh_getarrayitem_raw
bh_getarrayitem_raw_f = bh_getarrayitem_raw
def bh_setarrayitem_gc(self, a, index, item, descr):
a = support.cast_arg(lltype.Ptr(descr.A), a)
array = a._obj
array.setitem(index, support.cast_arg(descr.A.OF, item))
bh_setarrayitem_gc_i = bh_setarrayitem_gc
bh_setarrayitem_gc_r = bh_setarrayitem_gc
bh_setarrayitem_gc_f = bh_setarrayitem_gc
bh_setarrayitem_raw = bh_setarrayitem_gc
bh_setarrayitem_raw_i = bh_setarrayitem_raw
bh_setarrayitem_raw_r = bh_setarrayitem_raw
bh_setarrayitem_raw_f = bh_setarrayitem_raw
def bh_getinteriorfield_gc(self, a, index, descr):
array = a._obj.container
return support.cast_result(descr.FIELD,
getattr(array.getitem(index), descr.fieldname))
bh_getinteriorfield_gc_i = bh_getinteriorfield_gc
bh_getinteriorfield_gc_r = bh_getinteriorfield_gc
bh_getinteriorfield_gc_f = bh_getinteriorfield_gc
def bh_setinteriorfield_gc(self, a, index, item, descr):
array = a._obj.container
setattr(array.getitem(index), descr.fieldname,
support.cast_arg(descr.FIELD, item))
bh_setinteriorfield_gc_i = bh_setinteriorfield_gc
bh_setinteriorfield_gc_r = bh_setinteriorfield_gc
bh_setinteriorfield_gc_f = bh_setinteriorfield_gc
def bh_raw_load_i(self, struct, offset, descr):
ll_p = rffi.cast(rffi.CCHARP, struct)
ll_p = rffi.cast(lltype.Ptr(descr.A), rffi.ptradd(ll_p, offset))
value = ll_p[0]
return support.cast_result(descr.A.OF, value)
def bh_raw_load_f(self, struct, offset, descr):
ll_p = rffi.cast(rffi.CCHARP, struct)
ll_p = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE),
rffi.ptradd(ll_p, offset))
return ll_p[0]
def bh_raw_load(self, struct, offset, descr):
if descr.A.OF == lltype.Float:
return self.bh_raw_load_f(struct, offset, descr)
else:
return self.bh_raw_load_i(struct, offset, descr)
def bh_increment_debug_counter(self, addr):
p = rffi.cast(rffi.CArrayPtr(lltype.Signed), addr)
p[0] += 1
def unpack_arraydescr_size(self, arraydescr):
from rpython.jit.backend.llsupport.symbolic import get_array_token
from rpython.jit.backend.llsupport.descr import get_type_flag, FLAG_SIGNED
assert isinstance(arraydescr, ArrayDescr)
basesize, itemsize, _ = get_array_token(arraydescr.A, False)
flag = get_type_flag(arraydescr.A.OF)
is_signed = (flag == FLAG_SIGNED)
return basesize, itemsize, is_signed
def bh_raw_store_i(self, struct, offset, newvalue, descr):
ll_p = rffi.cast(rffi.CCHARP, struct)
ll_p = rffi.cast(lltype.Ptr(descr.A), rffi.ptradd(ll_p, offset))
if descr.A.OF == lltype.SingleFloat:
newvalue = longlong.int2singlefloat(newvalue)
ll_p[0] = rffi.cast(descr.A.OF, newvalue)
def bh_raw_store_f(self, struct, offset, newvalue, descr):
ll_p = rffi.cast(rffi.CCHARP, struct)
ll_p = rffi.cast(rffi.CArrayPtr(longlong.FLOATSTORAGE),
rffi.ptradd(ll_p, offset))
ll_p[0] = newvalue
def bh_raw_store(self, struct, offset, newvalue, descr):
if descr.A.OF == lltype.Float:
self.bh_raw_store_f(struct, offset, newvalue, descr)
else:
self.bh_raw_store_i(struct, offset, newvalue, descr)
def bh_newstr(self, length):
return lltype.cast_opaque_ptr(llmemory.GCREF,
lltype.malloc(rstr.STR, length,
zero=True))
def bh_strlen(self, s):
return s._obj.container.chars.getlength()
def bh_strgetitem(self, s, item):
return ord(s._obj.container.chars.getitem(item))
def bh_strsetitem(self, s, item, v):
s._obj.container.chars.setitem(item, chr(v))
def bh_copystrcontent(self, src, dst, srcstart, dststart, length):
src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), src)
dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), dst)
assert 0 <= srcstart <= srcstart + length <= len(src.chars)
assert 0 <= dststart <= dststart + length <= len(dst.chars)
rstr.copy_string_contents(src, dst, srcstart, dststart, length)
def bh_newunicode(self, length):
return lltype.cast_opaque_ptr(llmemory.GCREF,
lltype.malloc(rstr.UNICODE, length,
zero=True))
def bh_unicodelen(self, string):
return string._obj.container.chars.getlength()
def bh_unicodegetitem(self, string, index):
return ord(string._obj.container.chars.getitem(index))
def bh_unicodesetitem(self, string, index, newvalue):
string._obj.container.chars.setitem(index, unichr(newvalue))
def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length):
src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), src)
dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), dst)
assert 0 <= srcstart <= srcstart + length <= len(src.chars)
assert 0 <= dststart <= dststart + length <= len(dst.chars)
rstr.copy_unicode_contents(src, dst, srcstart, dststart, length)
def bh_new(self, sizedescr):
return lltype.cast_opaque_ptr(llmemory.GCREF,
lltype.malloc(sizedescr.S, zero=True))
def bh_new_with_vtable(self, vtable, descr):
result = lltype.malloc(descr.S, zero=True)
result_as_objptr = lltype.cast_pointer(rclass.OBJECTPTR, result)
result_as_objptr.typeptr = support.cast_from_int(rclass.CLASSTYPE,
vtable)
return lltype.cast_opaque_ptr(llmemory.GCREF, result)
def bh_new_array(self, length, arraydescr):
array = lltype.malloc(arraydescr.A, length, zero=True)
assert getkind(arraydescr.A.OF) != 'ref' # getkind crashes on structs
return lltype.cast_opaque_ptr(llmemory.GCREF, array)
def bh_new_array_clear(self, length, arraydescr):
array = lltype.malloc(arraydescr.A, length, zero=True)
return lltype.cast_opaque_ptr(llmemory.GCREF, array)
def bh_classof(self, struct):
struct = lltype.cast_opaque_ptr(rclass.OBJECTPTR, struct)
result_adr = llmemory.cast_ptr_to_adr(struct.typeptr)
return heaptracker.adr2int(result_adr)
def bh_new_raw_buffer(self, size):
return lltype.malloc(rffi.CCHARP.TO, size, flavor='raw')
def store_fail_descr(self, deadframe, descr):
pass # I *think*
class LLDeadFrame(object):
_TYPE = llmemory.GCREF
def __init__(self, latest_descr, values,
last_exception=None, saved_data=None):
self._latest_descr = latest_descr
self._values = values
self._last_exception = last_exception
self._saved_data = saved_data
class LLFrame(object):
_TYPE = llmemory.GCREF
forced_deadframe = None
overflow_flag = False
last_exception = None
force_guard_op = None
def __init__(self, cpu, argboxes, args):
self.env = {}
self.cpu = cpu
assert len(argboxes) == len(args)
for box, arg in zip(argboxes, args):
self.setenv(box, arg)
def __eq__(self, other):
# this is here to avoid crashes in 'token == TOKEN_TRACING_RESCALL'
from rpython.jit.metainterp.virtualizable import TOKEN_NONE
from rpython.jit.metainterp.virtualizable import TOKEN_TRACING_RESCALL
if isinstance(other, LLFrame):
return self is other
if other == TOKEN_NONE or other == TOKEN_TRACING_RESCALL:
return False
assert 0
def __ne__(self, other):
return not (self == other)
def _identityhash(self):
return hash(self)
def setenv(self, box, arg):
if box.type == INT:
# typecheck the result
if isinstance(arg, bool):
arg = int(arg)
assert lltype.typeOf(arg) == lltype.Signed
elif box.type == REF:
assert lltype.typeOf(arg) == llmemory.GCREF
elif box.type == FLOAT:
assert lltype.typeOf(arg) == longlong.FLOATSTORAGE
else:
raise AssertionError(box)
#
self.env[box] = arg
def lookup(self, arg):
if isinstance(arg, Const):
return arg.value
return self.env[arg]
def execute(self, lltrace):
self.lltrace = lltrace
del lltrace
i = 0
while True:
assert not self.lltrace.has_been_freed
op = self.lltrace.operations[i]
args = [self.lookup(arg) for arg in op.getarglist()]
self.current_op = op # for label
self.current_index = i
execute = getattr(self, 'execute_' + op.getopname())
try:
resval = execute(_getdescr(op), *args)
except Jump, j:
self.lltrace, i = j.jump_target
if i >= 0:
label_op = self.lltrace.operations[i]
i += 1
targetargs = label_op.getarglist()
else:
targetargs = self.lltrace.inputargs
i = 0
self.do_renaming(targetargs, j.args)
continue
if op.result is not None:
self.setenv(op.result, resval)
else:
assert resval is None
i += 1
def do_renaming(self, newargs, newvalues):
assert len(newargs) == len(newvalues)
self.env = {}
self.framecontent = {}
for new, newvalue in zip(newargs, newvalues):
self.setenv(new, newvalue)
# -----------------------------------------------------
def fail_guard(self, descr, saved_data=None):
values = []
for box in self.current_op.getfailargs():
if box is not None:
value = self.env[box]
else:
value = None
values.append(value)
if hasattr(descr, '_llgraph_bridge'):
target = (descr._llgraph_bridge, -1)
values = [value for value in values if value is not None]
raise Jump(target, values)
else:
raise ExecutionFinished(LLDeadFrame(descr, values,
self.last_exception,
saved_data))
def execute_force_spill(self, _, arg):
pass
def execute_finish(self, descr, *args):
raise ExecutionFinished(LLDeadFrame(descr, args))
def execute_label(self, descr, *args):
argboxes = self.current_op.getarglist()
self.do_renaming(argboxes, args)
def execute_guard_true(self, descr, arg):
if not arg:
self.fail_guard(descr)
def execute_guard_false(self, descr, arg):
if arg:
self.fail_guard(descr)
def execute_guard_value(self, descr, arg1, arg2):
if arg1 != arg2:
self.fail_guard(descr)
def execute_guard_nonnull(self, descr, arg):
if not arg:
self.fail_guard(descr)
def execute_guard_isnull(self, descr, arg):
if arg:
self.fail_guard(descr)
def execute_guard_class(self, descr, arg, klass):
value = lltype.cast_opaque_ptr(rclass.OBJECTPTR, arg)
expected_class = llmemory.cast_adr_to_ptr(
llmemory.cast_int_to_adr(klass),
rclass.CLASSTYPE)
if value.typeptr != expected_class:
self.fail_guard(descr)
def execute_guard_nonnull_class(self, descr, arg, klass):
self.execute_guard_nonnull(descr, arg)
self.execute_guard_class(descr, arg, klass)
def execute_guard_no_exception(self, descr):
if self.last_exception is not None:
self.fail_guard(descr)
def execute_guard_exception(self, descr, excklass):
lle = self.last_exception
if lle is None:
gotklass = lltype.nullptr(rclass.CLASSTYPE.TO)
else:
gotklass = lle.args[0]
excklass = llmemory.cast_adr_to_ptr(
llmemory.cast_int_to_adr(excklass),
rclass.CLASSTYPE)
if gotklass != excklass:
self.fail_guard(descr)
#
res = lle.args[1]
self.last_exception = None
return support.cast_to_ptr(res)
def execute_guard_not_forced(self, descr):
if self.forced_deadframe is not None:
saved_data = self.forced_deadframe._saved_data
self.fail_guard(descr, saved_data)
self.force_guard_op = self.current_op
execute_guard_not_forced_2 = execute_guard_not_forced
def execute_guard_not_invalidated(self, descr):
if self.lltrace.invalid:
self.fail_guard(descr)
def execute_int_add_ovf(self, _, x, y):
try:
z = ovfcheck(x + y)
except OverflowError:
ovf = True
z = 0
else:
ovf = False
self.overflow_flag = ovf
return z
def execute_int_sub_ovf(self, _, x, y):
try:
z = ovfcheck(x - y)
except OverflowError:
ovf = True
z = 0
else:
ovf = False
self.overflow_flag = ovf
return z
def execute_int_mul_ovf(self, _, x, y):
try:
z = ovfcheck(x * y)
except OverflowError:
ovf = True
z = 0
else:
ovf = False
self.overflow_flag = ovf
return z
def execute_guard_no_overflow(self, descr):
if self.overflow_flag:
self.fail_guard(descr)
def execute_guard_overflow(self, descr):
if not self.overflow_flag:
self.fail_guard(descr)
def execute_jump(self, descr, *args):
raise Jump(descr._llgraph_target, args)
def _do_math_sqrt(self, value):
import math
y = support.cast_from_floatstorage(lltype.Float, value)
x = math.sqrt(y)
return support.cast_to_floatstorage(x)
def execute_cond_call(self, calldescr, cond, func, *args):
if not cond:
return
# cond_call can't have a return value
self.execute_call(calldescr, func, *args)
def execute_call(self, calldescr, func, *args):
effectinfo = calldescr.get_extra_info()
if effectinfo is not None and hasattr(effectinfo, 'oopspecindex'):
oopspecindex = effectinfo.oopspecindex
if oopspecindex == EffectInfo.OS_MATH_SQRT:
return self._do_math_sqrt(args[0])
TP = llmemory.cast_int_to_adr(func).ptr._obj._TYPE
call_args = support.cast_call_args_in_order(TP.ARGS, args)
try:
res = self.cpu.maybe_on_top_of_llinterp(func, call_args, TP.RESULT)
self.last_exception = None
except LLException, lle:
self.last_exception = lle
res = _example_res[getkind(TP.RESULT)[0]]
return res
def execute_call_may_force(self, calldescr, func, *args):
call_op = self.lltrace.operations[self.current_index]
guard_op = self.lltrace.operations[self.current_index + 1]
assert guard_op.getopnum() == rop.GUARD_NOT_FORCED
self.force_guard_op = guard_op
res = self.execute_call(calldescr, func, *args)
del self.force_guard_op
return res
def execute_call_release_gil(self, descr, saveerr, func, *args):
if hasattr(descr, '_original_func_'):
func = descr._original_func_ # see pyjitpl.py
# we want to call the function that does the aroundstate
# manipulation here (as a hack, instead of really doing
# the aroundstate manipulation ourselves)
return self.execute_call_may_force(descr, func, *args)
guard_op = self.lltrace.operations[self.current_index + 1]
assert guard_op.getopnum() == rop.GUARD_NOT_FORCED
self.force_guard_op = guard_op
call_args = support.cast_call_args_in_order(descr.ARGS, args)
#
func_adr = llmemory.cast_int_to_adr(func)
if hasattr(func_adr.ptr._obj, '_callable'):
# this is needed e.g. by test_fficall.test_guard_not_forced_fails,
# because to actually force the virtualref we need to llinterp the
# graph, not to directly execute the python function
result = self.cpu.maybe_on_top_of_llinterp(func, call_args, descr.RESULT)
else:
FUNC = lltype.FuncType(descr.ARGS, descr.RESULT, descr.ABI)
func_to_call = rffi.cast(lltype.Ptr(FUNC), func)
result = func_to_call(*call_args)
del self.force_guard_op
return support.cast_result(descr.RESULT, result)
def execute_call_assembler(self, descr, *args):
# XXX simplify the following a bit
#
# pframe = CALL_ASSEMBLER(args..., descr=looptoken)
# ==>
# pframe = CALL looptoken.loopaddr(*args)
# JUMP_IF_FAST_PATH @fastpath
# res = CALL assembler_call_helper(pframe)
# jmp @done
# @fastpath:
# res = GETFIELD(pframe, 'result')
# @done:
#
call_op = self.lltrace.operations[self.current_index]
guard_op = self.lltrace.operations[self.current_index + 1]
assert guard_op.getopnum() == rop.GUARD_NOT_FORCED
self.force_guard_op = guard_op
pframe = self.cpu._execute_token(descr, *args)
del self.force_guard_op
#
jd = descr.outermost_jitdriver_sd
assert jd is not None, ("call_assembler(): the loop_token needs "
"to have 'outermost_jitdriver_sd'")
if jd.index_of_virtualizable != -1:
vable = args[jd.index_of_virtualizable]
else:
vable = lltype.nullptr(llmemory.GCREF.TO)
#
# Emulate the fast path
#
faildescr = self.cpu.get_latest_descr(pframe)
if faildescr == self.cpu.done_with_this_frame_descr_int:
return self.cpu.get_int_value(pframe, 0)
elif faildescr == self.cpu.done_with_this_frame_descr_ref:
return self.cpu.get_ref_value(pframe, 0)
elif faildescr == self.cpu.done_with_this_frame_descr_float:
return self.cpu.get_float_value(pframe, 0)
elif faildescr == self.cpu.done_with_this_frame_descr_void:
return None
assembler_helper_ptr = jd.assembler_helper_adr.ptr # fish
try:
result = assembler_helper_ptr(pframe, vable)
except LLException, lle:
assert self.last_exception is None, "exception left behind"
self.last_exception = lle
# fish op
op = self.current_op
return op.result and op.result.value
if isinstance(result, float):
result = support.cast_to_floatstorage(result)
return result
def execute_same_as(self, _, x):
return x
def execute_debug_merge_point(self, descr, *args):
from rpython.jit.metainterp.warmspot import get_stats
try:
stats = get_stats()
except AttributeError:
pass
else:
stats.add_merge_point_location(args[1:])
def execute_enter_portal_frame(self, descr, *args):
pass
def execute_leave_portal_frame(self, descr, *args):
pass
def execute_new_with_vtable(self, _, vtable):
descr = heaptracker.vtable2descr(self.cpu, vtable)
return self.cpu.bh_new_with_vtable(vtable, descr)
def execute_force_token(self, _):
return self
def execute_cond_call_gc_wb(self, descr, a):
py.test.skip("cond_call_gc_wb not supported")
def execute_cond_call_gc_wb_array(self, descr, a, b):
py.test.skip("cond_call_gc_wb_array not supported")
def execute_keepalive(self, descr, x):
pass
def _getdescr(op):
d = op.getdescr()
if d is not None and isinstance(d, WeakrefDescr):
d = d.realdescrref()
assert d is not None, "the descr disappeared: %r" % (op,)
return d
def _setup():
def _make_impl_from_blackhole_interp(opname):
from rpython.jit.metainterp.blackhole import BlackholeInterpreter
name = 'bhimpl_' + opname.lower()
try:
func = BlackholeInterpreter.__dict__[name]
except KeyError:
return
for argtype in func.argtypes:
if argtype not in ('i', 'r', 'f'):
return
#
def _op_default_implementation(self, descr, *args):
# for all operations implemented in the blackhole interpreter
return func(*args)
#
_op_default_implementation.func_name = 'execute_' + opname
return _op_default_implementation
def _new_execute(opname):
def execute(self, descr, *args):
if descr is not None:
new_args = args + (descr,)
else:
new_args = args
return getattr(self.cpu, 'bh_' + opname)(*new_args)
execute.func_name = 'execute_' + opname
return execute
for k, v in rop.__dict__.iteritems():
if not k.startswith("_"):
fname = 'execute_' + k.lower()
if not hasattr(LLFrame, fname):
func = _make_impl_from_blackhole_interp(k)
if func is None:
func = _new_execute(k.lower())
setattr(LLFrame, fname, func)
_setup()
| StarcoderdataPython |
3316573 | <reponame>JLTastet/scalar_portal<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from nose.tools import assert_equals, assert_raises
from ..data import constants as c
def test_ckm():
assert_equals(c.ckm(1,1), c.Vud)
assert_equals(c.ckm(1,2), c.Vus)
assert_equals(c.ckm(1,3), c.Vub)
assert_equals(c.ckm(2,1), c.Vcd)
assert_equals(c.ckm(2,2), c.Vcs)
assert_equals(c.ckm(2,3), c.Vcb)
assert_equals(c.ckm(3,1), c.Vtd)
assert_equals(c.ckm(3,2), c.Vts)
assert_equals(c.ckm(3,3), c.Vtb)
assert_raises(ValueError, lambda: c.ckm(0,1))
assert_raises(ValueError, lambda: c.ckm(4,1))
assert_raises(ValueError, lambda: c.ckm(1,0))
assert_raises(ValueError, lambda: c.ckm(1,4))
def test_VUD():
assert_equals(c.VUD('u','d'), c.Vud)
assert_equals(c.VUD('u','s'), c.Vus)
assert_equals(c.VUD('u','b'), c.Vub)
assert_equals(c.VUD('c','d'), c.Vcd)
assert_equals(c.VUD('c','s'), c.Vcs)
assert_equals(c.VUD('c','b'), c.Vcb)
assert_equals(c.VUD('t','d'), c.Vtd)
assert_equals(c.VUD('t','s'), c.Vts)
assert_equals(c.VUD('t','b'), c.Vtb)
assert_raises(ValueError, lambda: c.VUD('d', 'u'))
assert_raises(ValueError, lambda: c.VUD('a', 'd'))
assert_raises(ValueError, lambda: c.VUD('u', 'z'))
| StarcoderdataPython |
8057050 | from sqlobject import *
sqlhub.processConnection = connectionForURI('sqlite:mydatabase.db')
class User(SQLObject):
class sqlmeta:
table = 'users'
name = StringCol()
fullname = StringCol()
password = StringCol()
if __name__ == "__main__":
User.dropTable(ifExists=True)
User.createTable()
| StarcoderdataPython |
11244133 | <reponame>DeepRank/DeepRank_VariantPred<gh_stars>0
import os
from tempfile import mkdtemp, mkstemp
from shutil import rmtree
import h5py
import numpy
import torch.optim as optim
from nose.tools import ok_
from deeprank.models.variant import PdbVariantSelection
from deeprank.generate.DataGenerator import DataGenerator
from deeprank.learn.DataSet import DataSet
from deeprank.learn.NeuralNet import NeuralNet
from deeprank.learn.model3d import cnn_reg
from deeprank.domain.amino_acid import valine, cysteine, serine
import deeprank.config
deeprank.config.DEBUG = True
def test_learn():
""" This test will simply run deeprank's learning code. It doesn't
test any particular feature or target classes.
The result of deeprank's learning is not verified. This test
only runs the code to be sure there are no exceptions thrown.
"""
feature_modules = ["test.feature.feature1", "test.feature.feature2"]
target_modules = ["test.target.target1"]
atomic_densities = {'C': 1.7, 'N': 1.55, 'O': 1.52, 'S': 1.8}
grid_info = {
'number_of_points': [30,30,30],
'resolution': [1.,1.,1.],
'atomic_densities': atomic_densities,
}
variants = [PdbVariantSelection("test/101m.pdb", "A", 10, valine, cysteine, {"A": "test/101M.A.pdb.pssm"},
protein_accession="P02144", protein_residue_number=10),
PdbVariantSelection("test/data/pdb/5EYU/5EYU.pdb", "A", 8, serine, cysteine, {"A": "test/data/pssm/5EYU/5eyu.A.pdb.pssm",
"B": "test/data/pssm/5EYU/5eyu.B.pdb.pssm",
"C": "test/data/pssm/5EYU/5eyu.C.pdb.pssm",
"D": "test/data/pssm/5EYU/5eyu.D.pdb.pssm"},
protein_accession="Q9L4P8")]
work_dir_path = mkdtemp()
try:
hdf5_path = os.path.join(work_dir_path, "test.hdf5")
# data_augmentation has been set to a high number, so that
# the train, valid and test set can be large enough.
data_generator = DataGenerator(variants, data_augmentation=25,
compute_targets=target_modules,
compute_features=feature_modules,
hdf5=hdf5_path)
data_generator.create_database()
data_generator.map_features(grid_info)
dataset = DataSet(hdf5_path, grid_info=grid_info,
select_feature='all',
select_target='target1',
normalize_features=False)
ok_(len(dataset) > 0)
ok_(dataset[0] is not None)
net_output_dir_path = os.path.join(work_dir_path, 'net-output')
neural_net = NeuralNet(dataset, cnn_reg, model_type='3d',task='reg',
cuda=False, plot=True, outdir=net_output_dir_path)
neural_net.optimizer = optim.SGD(neural_net.net.parameters(),
lr=0.001,
momentum=0.9,
weight_decay=0.005)
epoch_data_path = "epoch_data.hdf5"
neural_net.train(nepoch = 50, divide_trainset=0.8, train_batch_size = 5, num_workers=0, hdf5=epoch_data_path)
# Check the contents of the variant data output
with h5py.File(os.path.join(work_dir_path, "net-output", epoch_data_path), 'r') as f5:
variant_data = f5['epoch_0000/train/variant'][()]
assert len(variant_data.shape) == 2, "unexpected variant data shape: {}".format(variant_data.shape)
assert variant_data.shape[1] == 7, "unexpected variant data row format: {}".format(variant_data[0, :])
assert variant_data[0, 0].decode().lower().endswith(".pdb"), "unexpected structure {}".format(variant_data[0, 0])
finally:
rmtree(work_dir_path)
def test_plot_mcc():
plot_file, plot_path = mkstemp(prefix="plot-mcc", suffix=".png")
os.close(plot_file)
try:
with h5py.File("test/data/epoch_data.hdf5", "r") as f5:
NeuralNet.plot_mcc(f5, plot_path)
finally:
if os.path.isfile(plot_path):
os.remove(plot_path)
| StarcoderdataPython |
3244555 | <reponame>jurayev/data-structures-algorithms-solutions<gh_stars>0
class File:
def __init__(self, name, content):
self.name = name
self.content = [content]
self.is_file = True
class Dir:
def __init__(self):
self.subdirs = defaultdict(Dir)
self.is_file = False
class FileSystem:
"""
File structure as below
/
/a
/b
/c
/d = content
/g
/f = content
/h
Time Complexity: O(N), N - is the path length per call per function.
Space Complexity: O(M), M - is the max path length that exist
"""
def __init__(self):
self.dirs = Dir()
def ls(self, path: str) -> List[str]: # /
parts = path.split("/")
root = self.dirs
if path == "/":
dir_list = root.subdirs[""].subdirs.keys()
return sorted(dir_list, key=lambda x: x)
for name in parts:
if name not in root.subdirs:
return []
root = root.subdirs[name]
if root.is_file:
return [root.name]
dir_list = root.subdirs.keys()
return sorted(dir_list, key=lambda x: x)
def mkdir(self, path: str) -> None: # "/a/b/c"
parts = path.split("/")
root = self.dirs
for name in parts: # "", "a", "b", "c"
if name not in root.subdirs:
root.subdirs[name] = Dir()
root = root.subdirs[name]
def addContentToFile(self, filePath: str, content: str) -> None: #"/a/b/d"
parts = filePath.split("/")
file_name = parts.pop()
root = self.dirs
for name in parts: # "", "a", "b",
if name not in root.subdirs:
root.subdirs[name] = Dir()
root = root.subdirs[name]
if file_name in root.subdirs:
root.subdirs[file_name].content.append(content)
else:
root.subdirs[file_name] = File(file_name, content)
def readContentFromFile(self, filePath: str) -> str: #"/a/b/d"
parts = filePath.split("/")
root = self.dirs
for name in parts:
if name not in root.subdirs:
return ""
root = root.subdirs[name]
return "".join(root.content)
# Your FileSystem object will be instantiated and called as such:
# obj = FileSystem()
# param_1 = obj.ls(path)
# obj.mkdir(path)
# obj.addContentToFile(filePath,content)
# param_4 = obj.readContentFromFile(filePath)
"""
Test-Cases:
["FileSystem","ls","mkdir", "mkdir", "mkdir", "addContentToFile", "ls", "ls", "addContentToFile", "readContentFromFile" , "ls", "ls", "ls", "ls"]
[[], ["/"],["/a/b/c"], ["/a/b/c/f"],["/a/b/c/g"],["/a/b/d","hello"],["/"], ["/a/b"], ["/a/b/d","world"], ["/a/b/d"], ["/a/b/d"], ["/a/b/c"], ["/a/b/c/f"],["/a/b/d"]]
["FileSystem","mkdir","ls","ls","mkdir","ls","ls","addContentToFile","readContentFromFile","ls","readContentFromFile"]
[[],["/zijzllb"],["/"],["/zijzllb"],["/r"],["/"],["/r"],["/zijzllb/hfktg","d"],["/zijzllb/hfktg"],["/"],["/zijzllb/hfktg"]]
""" | StarcoderdataPython |
1921674 |
class AccessError(RuntimeError):
pass | StarcoderdataPython |
8151206 | <gh_stars>1-10
import datetime
import pytest
from django.urls import reverse
from django.utils import timezone
from oauth2_provider.models import AccessToken, Application
from rest_framework import status
from rest_framework.test import APIClient
from sso.user.models import User, UserProfile
def setup_data():
superuser = User.objects.create_user(email='<EMAIL>', password='<PASSWORD>', is_superuser=True)
application = Application.objects.create(
client_id='test',
user=superuser,
client_type='Confidential',
authorization_grant_type='Authorization code',
skip_authorization=True,
)
user = User.objects.create(email='<EMAIL>', password='<PASSWORD>')
user_profile = UserProfile.objects.create(
first_name='alexander',
last_name='thegreatdotgovdotuk',
mobile_phone_number='0203044213',
job_title='Director',
user=user,
)
access_token = AccessToken.objects.create(
user=user,
token='test',
application=application,
expires=timezone.now() + datetime.timedelta(days=1),
scope='profile',
)
return superuser, application, user, user_profile, access_token
@pytest.mark.django_db
def test_user_retrieve_view_authorised():
_, _, user, user_profile, access_token = setup_data()
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='Bearer {}'.format(access_token.token))
response = client.get(reverse('oauth2_provider:user-profile'))
assert response.status_code == status.HTTP_200_OK
assert response.data['email'] == user.email
assert response.data['user_profile']['first_name'] == user_profile.first_name
assert response.data['user_profile']['last_name'] == user_profile.last_name
@pytest.mark.django_db
def test_user_retrieve_view_no_token():
setup_data()
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='Bearer ')
response = client.get(reverse('oauth2_provider:user-profile'))
assert response.status_code == status.HTTP_401_UNAUTHORIZED
@pytest.mark.django_db
def test_user_retrieve_view_invalid_token():
setup_data()
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='Bearer invalid_token')
response = client.get(reverse('oauth2_provider:user-profile'))
assert response.status_code == status.HTTP_401_UNAUTHORIZED
| StarcoderdataPython |
1247 | # -*- coding: utf-8 -*-
'''
Tests for the file state
'''
# Import python libs
from __future__ import absolute_import
import errno
import os
import textwrap
import tempfile
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.paths import TMP_STATE_TREE
from tests.support.mixins import SaltReturnAssertsMixin
# Import salt libs
import salt.utils
IS_WINDOWS = salt.utils.is_windows()
class CMDTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the cmd state
'''
def test_run_simple(self):
'''
cmd.run
'''
cmd = 'dir' if IS_WINDOWS else 'ls'
ret = self.run_state('cmd.run', name=cmd, cwd=tempfile.gettempdir())
self.assertSaltTrueReturn(ret)
def test_test_run_simple(self):
'''
cmd.run test interface
'''
ret = self.run_state('cmd.run', name='ls',
cwd=tempfile.gettempdir(), test=True)
self.assertSaltNoneReturn(ret)
class CMDRunRedirectTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the cmd state of run_redirect
'''
def setUp(self):
self.state_name = 'run_redirect'
state_filename = self.state_name + '.sls'
self.state_file = os.path.join(TMP_STATE_TREE, state_filename)
# Create the testfile and release the handle
fd, self.test_file = tempfile.mkstemp()
try:
os.close(fd)
except OSError as exc:
if exc.errno != errno.EBADF:
raise exc
# Create the testfile and release the handle
fd, self.test_tmp_path = tempfile.mkstemp()
try:
os.close(fd)
except OSError as exc:
if exc.errno != errno.EBADF:
raise exc
super(CMDRunRedirectTest, self).setUp()
def tearDown(self):
for path in (self.state_file, self.test_tmp_path, self.test_file):
try:
os.remove(path)
except OSError:
# Not all of the tests leave files around that we want to remove
# As some of the tests create the sls files in the test itself,
# And some are using files in the integration test file state tree.
pass
super(CMDRunRedirectTest, self).tearDown()
def test_run_unless(self):
'''
test cmd.run unless
'''
state_key = 'cmd_|-{0}_|-{0}_|-run'.format(self.test_tmp_path)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
{0}:
cmd.run:
- unless: echo cheese > {1}
'''.format(self.test_tmp_path, self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
def test_run_unless_multiple_cmds(self):
'''
test cmd.run using multiple unless options where the first cmd in the
list will pass, but the second will fail. This tests the fix for issue
#35384. (The fix is in PR #35545.)
'''
sls = self.run_function('state.sls', mods='issue-35384')
self.assertSaltTrueReturn(sls)
# We must assert against the comment here to make sure the comment reads that the
# command "echo "hello"" was run. This ensures that we made it to the last unless
# command in the state. If the comment reads "unless execution succeeded", or similar,
# then the unless state run bailed out after the first unless command succeeded,
# which is the bug we're regression testing for.
self.assertEqual(sls['cmd_|-cmd_run_unless_multiple_|-echo "hello"_|-run']['comment'],
'Command "echo "hello"" run')
def test_run_creates_exists(self):
'''
test cmd.run creates already there
'''
state_key = 'cmd_|-echo >> {0}_|-echo >> {0}_|-run'.format(self.test_file)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
echo >> {0}:
cmd.run:
- creates: {0}
'''.format(self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
self.assertEqual(len(ret[state_key]['changes']), 0)
def test_run_creates_new(self):
'''
test cmd.run creates not there
'''
os.remove(self.test_file)
state_key = 'cmd_|-echo >> {0}_|-echo >> {0}_|-run'.format(self.test_file)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
echo >> {0}:
cmd.run:
- creates: {0}
'''.format(self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
self.assertEqual(len(ret[state_key]['changes']), 4)
def test_run_redirect(self):
'''
test cmd.run with shell redirect
'''
state_key = 'cmd_|-echo test > {0}_|-echo test > {0}_|-run'.format(self.test_file)
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
echo test > {0}:
cmd.run
'''.format(self.test_file)))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[state_key]['result'])
class CMDRunWatchTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the cmd state of run_watch
'''
def setUp(self):
self.state_name = 'run_watch'
state_filename = self.state_name + '.sls'
self.state_file = os.path.join(TMP_STATE_TREE, state_filename)
super(CMDRunWatchTest, self).setUp()
def tearDown(self):
os.remove(self.state_file)
super(CMDRunWatchTest, self).tearDown()
def test_run_watch(self):
'''
test cmd.run watch
'''
saltines_key = 'cmd_|-saltines_|-echo changed=true_|-run'
biscuits_key = 'cmd_|-biscuits_|-echo biscuits_|-wait'
with salt.utils.fopen(self.state_file, 'w') as fb_:
fb_.write(textwrap.dedent('''
saltines:
cmd.run:
- name: echo changed=true
- cwd: /
- stateful: True
biscuits:
cmd.wait:
- name: echo biscuits
- cwd: /
- watch:
- cmd: saltines
'''))
ret = self.run_function('state.sls', [self.state_name])
self.assertTrue(ret[saltines_key]['result'])
self.assertTrue(ret[biscuits_key]['result'])
| StarcoderdataPython |
11204758 | <filename>linformer/__init__.py
from linformer.linformer import LinformerLM, Linformer, LinformerSelfAttention
| StarcoderdataPython |
11399525 | <filename>tests/framework/project/test_pipeline_registry.py
import sys
import textwrap
import pytest
from kedro.framework.project import configure_project, pipelines
from kedro.pipeline import Pipeline
@pytest.fixture
def mock_package_name_with_pipelines_file(tmpdir):
pipelines_file_path = tmpdir.mkdir("test_package") / "pipeline_registry.py"
pipelines_file_path.write(
textwrap.dedent(
"""
from kedro.pipeline import Pipeline
def register_pipelines():
return {"new_pipeline": Pipeline([])}
"""
)
)
project_path, package_name, _ = str(pipelines_file_path).rpartition("test_package")
sys.path.insert(0, project_path)
yield package_name
sys.path.pop(0)
def test_pipelines_without_configure_project_is_empty():
assert pipelines == {}
@pytest.fixture
def mock_package_name_with_unimportable_pipelines_file(tmpdir):
pipelines_file_path = tmpdir.mkdir("test_broken_package") / "pipeline_registry.py"
pipelines_file_path.write(
textwrap.dedent(
"""
import this_is_not_a_real_thing
from kedro.pipeline import Pipeline
def register_pipelines():
return {"new_pipeline": Pipeline([])}
"""
)
)
project_path, package_name, _ = str(pipelines_file_path).rpartition(
"test_broken_package"
)
sys.path.insert(0, project_path)
yield package_name
sys.path.pop(0)
def test_pipelines_after_configuring_project_shows_updated_values(
mock_package_name_with_pipelines_file,
):
configure_project(mock_package_name_with_pipelines_file)
assert isinstance(pipelines["new_pipeline"], Pipeline)
def test_configure_project_should_not_raise_for_unimportable_pipelines(
mock_package_name_with_unimportable_pipelines_file,
):
# configure_project should not raise error for unimportable pipelines
# since pipelines loading is lazy
configure_project(mock_package_name_with_unimportable_pipelines_file)
# accessing data should raise for unimportable pipelines
with pytest.raises(
ModuleNotFoundError, match="No module named 'this_is_not_a_real_thing'"
):
_ = pipelines["new_pipeline"]
| StarcoderdataPython |
3492180 | <filename>unmix/test/mask/mask.py
import numpy as np
import os
import time
import argparse
import librosa
from unmix.source.data.song import Song
from unmix.source.configuration import Configuration
from unmix.source.engine import Engine
from unmix.source.logging.logger import Logger
from unmix.source.data.dataloader import DataLoader
from unmix.source.helpers.masker import mask
if __name__ == "__main__":
global config
parser = argparse.ArgumentParser(
description="Executes a training session.")
parser.add_argument('--configuration', default='../../../configurations/default-mask.jsonc',
type=str, help="Environment and training configuration.")
parser.add_argument('--workingdir', default='../../../', type=str,
help="Working directory (default: current directory).")
args = parser.parse_args()
start = time.time()
Configuration.initialize(args.configuration, args.workingdir)
Logger.initialize()
Logger.h1("unmix.io Neuronal Network Training Application")
Logger.info("Environment: %s" % Configuration.get('environment.name'))
Logger.info("Collection: %s" % Configuration.get('collection.folder'))
Logger.info("Model: %s" % Configuration.get('training.model.name'))
Logger.info("Arguments: ", str(args))
engine = Engine()
training_songs, validation_songs, test_songs = DataLoader.load()
song = Song(training_songs[0])
vocals = song.vocals.load().channels
instrumentals = song.instrumental.load().channels
vocal_magnitude = np.abs(vocals)
instrumental_magnitude = np.abs(instrumentals)
target_mask = np.empty_like(vocal_magnitude)
target_mask[instrumental_magnitude <= vocal_magnitude] = 1
target_mask[instrumental_magnitude > vocal_magnitude] = 0
mix_magnitude = vocal_magnitude + instrumental_magnitude
target_mask_ratio = mask(vocal_magnitude, mix_magnitude)
prediction_binary = mix_magnitude * target_mask
prediction_ratio = mix_magnitude * target_mask_ratio
mix_complex = vocals + instrumentals
predicted_vocals_binary = prediction_binary * np.exp(np.angle(mix_complex) * 1j)
predicted_vocals_ratio = prediction_ratio * np.exp(np.angle(mix_complex) * 1j)
data_binary = librosa.istft(predicted_vocals_binary[0])
audio_binary = np.array(data_binary)
data_ratio = librosa.istft(predicted_vocals_ratio[0])
audio_ratio = np.array(data_ratio)
output_file_binary = os.path.join(Configuration.get_path('collection.folder'), os.path.basename(training_songs[0]) + "_predicted_binary.wav")
Logger.info("Output prediction file: %s." % output_file_binary)
librosa.output.write_wav(output_file_binary, audio_binary, 11025, norm=False)
output_file_ratio = os.path.join(Configuration.get_path('collection.folder'),
os.path.basename(training_songs[0]) + "_predicted_ratio.wav")
Logger.info("Output prediction file: %s." % output_file_ratio)
librosa.output.write_wav(output_file_ratio, audio_ratio, 11025, norm=False)
| StarcoderdataPython |
6547561 | import tkinter
window = tkinter.Tk()
# The model.
counter = tkinter.IntVar()
counter.set(0)
#One controller with parameters.
def click(variable, value):
variable.set(variable.get()+value)
# Two controllers
def click_up():
click(counter, 1)
def click_down():
click(counter, -1)
# The views.
frame = tkinter.Frame(window)
frame.pack()
button = tkinter.Button(frame, text='Up', command=click_up)
button.pack()
button = tkinter.Button(frame, text='Down', command=click_down)
button.pack()
label = tkinter.Label(frame, textvariable=counter)
label.pack()
window.mainloop()
| StarcoderdataPython |
1676196 | <reponame>wh629/CNLI-generalization
import os
import time
import torch
import traceback
from contextlib import contextmanager
import jiant.utils.python.io as py_io
import jiant.utils.python.filesystem as filesystem
class BaseZLogger:
def log_context(self):
raise NotImplementedError()
def write_entry(self, key, entry):
raise NotImplementedError()
def write_obj(self, key, obj, entry):
raise NotImplementedError()
def flush(self):
raise NotImplementedError()
class ZLogger(BaseZLogger):
def __init__(self, fol_path, log_errors=True, overwrite=False):
self.fol_path = fol_path
self.log_errors = log_errors
self.overwrite = overwrite
self.write_mode = "w" if overwrite else "a"
os.makedirs(fol_path)
self.handles = {}
@contextmanager
def log_context(self):
try:
yield self
except Exception:
if self.log_errors:
self.write_entry("errors", traceback.format_exc())
raise
finally:
for f in self.handles.values():
f.close()
def write_entry(self, key, entry, do_print=False):
if isinstance(entry, dict):
entry = entry.copy()
else:
entry = {"data": entry}
entry["TIMESTAMP"] = time.time()
self._write_entry_to_file(key=key, entry=entry)
if do_print:
print(entry)
def write_obj(self, key, obj, entry):
assert "DATA" not in entry
if isinstance(entry, dict):
entry = entry.copy()
else:
entry = {"data": entry}
time_stamp = time.time()
entry["DATA"] = self._save_obj(key, time_stamp, obj)
entry["TIMESTAMP"] = time_stamp
self._write_entry_to_file(key=key, entry=entry)
def _save_obj(self, key, time_stamp, obj):
cache_path = self.get_cache_path(key)
os.makedirs(cache_path, exist_ok=True)
save_path = os.path.join(cache_path, str(time_stamp))
torch.save(obj, save_path)
return save_path
def check_handle_open(self, key):
if key in self.handles:
return
handle_path = self.get_path(key)
py_io.create_containing_folder(handle_path)
self.handles[key] = open(handle_path, self.write_mode)
def get_path(self, key):
return os.path.join(self.fol_path, key + ".zlog")
def get_cache_path(self, key):
return os.path.join(self.fol_path, key + "___CACHE")
def flush(self, key=None):
if key is None:
for f in self.handles.values():
f.flush()
elif isinstance(key, list):
for k in key:
self.handles[k].flush()
else:
self.handles[key].flush()
def _write_entry_to_file(self, key, entry):
self.check_handle_open(key)
self.handles[key].write(py_io.to_jsonl(entry) + "\n")
class ZBufferedLogger(ZLogger):
def __init__(
self,
fol_path,
default_buffer_size=1,
buffer_size_dict=None,
log_errors=True,
overwrite=False,
):
super().__init__(fol_path=fol_path, log_errors=log_errors, overwrite=overwrite)
self.default_buffer_size = default_buffer_size
self.buffer_size_dict = buffer_size_dict.copy() if buffer_size_dict else {}
self.buffer_dict = {}
def check_handle_open(self, key):
super().check_handle_open(key=key)
if key not in self.buffer_dict:
self.buffer_dict[key] = []
if key not in self.buffer_size_dict:
self.buffer_size_dict[key] = self.default_buffer_size
def _write_entry_to_file(self, key, entry):
self.check_handle_open(key)
self.buffer_dict[key].append(entry)
if len(self.buffer_dict[key]) >= self.buffer_size_dict[key]:
self.flush(key)
def _write_buffer(self, key):
if not self.buffer_dict[key]:
return
self.handles[key].write(
"".join(py_io.to_jsonl(entry) + "\n" for entry in self.buffer_dict[key])
)
self.buffer_dict[key] = []
def flush(self, key=None):
if key is None:
for k, f in self.handles.items():
self._write_buffer(k)
f.flush()
elif isinstance(key, list):
for k in key:
self._write_buffer(k)
self.handles[k].flush()
else:
self._write_buffer(key)
self.handles[key].flush()
class _VoidZLogger(BaseZLogger):
def log_context(self):
yield
def write_entry(self, key, entry):
pass
def write_obj(self, key, obj, entry):
pass
def flush(self):
pass
class _PrintZLogger(BaseZLogger):
def log_context(self):
yield
def write_entry(self, key, entry):
print(f"{key}: {entry}")
def write_obj(self, key, obj, entry):
print(f"{key}: {obj}")
def flush(self):
pass
class InMemoryZLogger(BaseZLogger):
def __init__(self):
self.entries = {}
self.data = {}
def log_context(self):
yield
def write_entry(self, key, entry):
if isinstance(entry, dict):
entry = entry.copy()
else:
entry = {"data": entry}
entry["TIMESTAMP"] = time.time()
self._write_entry(key=key, entry=entry)
def write_obj(self, key, obj, entry):
assert "DATA" not in entry
if isinstance(entry, dict):
entry = entry.copy()
else:
entry = {"data": entry}
time_stamp = time.time()
entry["DATA"] = obj
entry["TIMESTAMP"] = time_stamp
self._write_entry(key=key, entry=entry)
def _write_entry(self, key, entry):
if key not in self.entries:
self.entries[key] = []
self.entries[key].append(entry)
def flush(self):
pass
VOID_LOGGER = _VoidZLogger()
PRINT_LOGGER = _PrintZLogger()
def load_log(fol_path):
all_paths = filesystem.find_files_with_ext(fol_path, "zlog")
log_data = {}
for path in all_paths:
key = os.path.abspath(path).replace(os.path.abspath(fol_path), "")[1:].replace(".zlog", "")
log_data[key] = py_io.read_jsonl(path)
return log_data
| StarcoderdataPython |
6644253 | <reponame>JoseALermaIII/python-tutorials
#! python3
"""Rename dates
Renames filenames with American MM-DD-YYYY date format to European DD-MM-YYYY date format.
Uses :py:mod:`os` to get the list of files, :py:mod:`re` to find the files with the American
date format, and :py:mod:`shutil` to rename them.
Note:
* Assumes only files with American date format are in the folder. May also match files with
European date format.
* Using debug mode: Prints out files to be renamed. Uncomment to rename files.
"""
def main():
import shutil, os, re
# Create a regex that matches files with the American date format.
datePattern = re.compile(r"""
^(.*?) # all text before the date
((0|1)?\d)- # one or two digits for the month
((0|1|2|3)?\d)- # one or two digits for the day
((19|20)\d\d) # four digits for the year
(.*?)$ # all the text after the date
""", re.VERBOSE)
"""re.compile: Regex object representing American date format MM-DD-YYYY"""
# Loop over the files in the working directory.
for amerFilename in os.listdir('.'):
mo = datePattern.search(amerFilename)
# Skip files without a date.
if mo is None:
continue
# Get the different parts of the filename.
beforePart = mo.group(1)
monthPart = mo.group(2)
dayPart = mo.group(4)
yearPart = mo.group(6)
afterPart = mo.group(8)
# Form the European-style filename.
euroFilename = beforePart + dayPart + '-' + monthPart + '-' + yearPart + afterPart
# Get the full, absolute file paths.
absWorkingDir = os.path.abspath('.')
amerFilename = os.path.join(absWorkingDir, amerFilename)
euroFilename = os.path.join(absWorkingDir, euroFilename)
# Rename the files.
print('Renaming "%s" to "%s"...' % (amerFilename, euroFilename)) # DEBUG
#shutil.move(amerFilename, euroFilename) # uncomment after testing
if __name__ == '__main__':
main()
| StarcoderdataPython |
4865034 | <reponame>twilio-labs/gordon<filename>gordon/configurations/pagerduty_config.py
from gordon.services.common.secrets_loader import get_secrets
import os
# Configuration to load Pagerduty relevant data used at runtime
class PagerDutyConfig:
def get_pagerduty_url(self):
pd_url = os.environ.get("PAGERDUTY_URL")
return pd_url
def get_pagerduty_api_token(self):
_pd_token = get_secrets("SECRET_PAGERDUTY_API_TOKEN")
pd_token = _pd_token["api_token"]
return pd_token
| StarcoderdataPython |
9730984 | <filename>RealisticWeatherAPI/services/report_service.py
import datetime
import uuid
from typing import List
from models.location import Location
from models.report import Report
__reports: List[Report] = []
async def get_reports() -> List[Report]:
return list(__reports)
async def add_report(description: str, location: Location) -> Report:
now = datetime.datetime.now()
report = Report(id=str(uuid.uuid4()), location=location, description=description, created_date=now)
__reports.append(report)
__reports.sort(key=lambda r: r.created_date, reverse=True)
return report
| StarcoderdataPython |
5054772 | <reponame>nftlabs/nftlabs-sdk-python<gh_stars>10-100
from typing import List, Generic
from thirdweb.common.error import NotFoundException
from thirdweb.common.nft import fetch_token_metadata
from thirdweb.constants.currency import ZERO_ADDRESS
from thirdweb.constants.role import Role, get_role_hash
from thirdweb.core.classes.contract_wrapper import ContractWrapper
from thirdweb.core.classes.base_contract import BaseContract
from thirdweb.core.classes.ipfs_storage import IpfsStorage
from thirdweb.types.contract import TERC721
from thirdweb.types.nft import NFTMetadata, NFTMetadataOwner, QueryAllParams
from web3.eth import TxReceipt
class ERC721(Generic[TERC721], BaseContract[TERC721]):
_storage: IpfsStorage
def __init__(
self,
contract_wrapper: ContractWrapper,
storage: IpfsStorage,
):
super().__init__(contract_wrapper)
self._storage = storage
"""
READ FUNCTIONS
"""
def get(self, token_id: int) -> NFTMetadataOwner:
"""
Get metadata for a token
```python
nft = contract.get(0)
print(nft)
```
:param token_id: token ID of the token to get the metadata for
:return: the metadata for the token and its owner
"""
try:
owner = self.owner_of(token_id)
except:
owner = ZERO_ADDRESS
metadata = self._get_token_metadata(token_id)
return NFTMetadataOwner(metadata, owner)
def get_all(
self, query_params: QueryAllParams = QueryAllParams()
) -> List[NFTMetadataOwner]:
"""
Get the metadata of all tokens in the contract
```python
nfts = contract.get_all()
print(nfts)
```
:param query_params: optionally define a QueryAllParams instance to narrow the metadata query to specific tokens
:return: the metadata of all tokens in the contract
"""
max_id = min(query_params.start + query_params.count, self.get_total_count())
nfts = []
for token_id in range(query_params.start, max_id):
try:
nft = self.get(token_id)
nfts.append(nft)
except:
pass
return nfts
def get_total_count(self) -> int:
"""
Get the total number of NFTs minted by this contract
:return: the total number of NFTs minted by this contract
"""
return self._contract_wrapper._contract_abi.next_token_id_to_mint.call()
def owner_of(self, token_id: int) -> str:
"""
Get the owner of a token
:param token_id: the token ID of the token to get the owner of
:return: the owner of the token
"""
return self._contract_wrapper._contract_abi.owner_of.call(token_id)
def total_supply(
self,
) -> int:
"""
Get the total number of tokens in the contract
:return: the total number of tokens in the contract
"""
return self._contract_wrapper._contract_abi.next_token_id_to_mint.call()
def balance(
self,
) -> int:
"""
Get the token balance of the connected wallet
:return: the token balance of the connected wallet
"""
return self.balance_of(self._contract_wrapper.get_signer_address())
def balance_of(self, address: str) -> int:
"""
Get the token balance of a specific address
```python
balance = contract.balance_of("{{wallet_address}}")
print(balance)
```
:param address: the address to get the token balance of
"""
return self._contract_wrapper._contract_abi.balance_of.call(address)
def is_transfer_restricted(
self,
) -> bool:
"""
Check if the contract is restricted to transfers only by admins
:return: True if the contract is restricted to transfers only by admins, False otherwise
"""
anyone_can_transfer = self._contract_wrapper._contract_abi.has_role.call(
get_role_hash(Role.TRANSFER), ZERO_ADDRESS
)
return not anyone_can_transfer
def is_approved(self, address: str, operator: str) -> bool:
"""
Check whether an operator address is approved for all operations of a specific addresses assets
:param address: the address whose assets are to be checked
:param operator: the address of the operator to check
:return: True if the operator is approved for all operations of the assets, False otherwise
"""
return self._contract_wrapper._contract_abi.is_approved_for_all.call(
address, operator
)
"""
WRITE FUNCTIONS
"""
def transfer(self, to: str, token_id: int) -> TxReceipt:
"""
Transfer a specified token from the connected wallet to a specified address.
```python
to = "{{wallet_address}}"
token_id = 0
receipt = contract.transfer(to, token_id)
```
:param to: wallet address to transfer the tokens to
:param token_id: the specific token ID to transfer
:returns: transaction receipt of the transfer
"""
fr = self._contract_wrapper.get_signer_address()
return self._contract_wrapper.send_transaction(
"safe_transfer_from1", [fr, to, token_id]
)
def burn(self, token_id: int) -> TxReceipt:
"""
Burn a specified token from the connected wallet.
:param token_id: token ID of the token to burn
:returns: transaction receipt of the burn
"""
return self._contract_wrapper.send_transaction("burn", [token_id])
def set_approval_for_all(self, operator: str, approved: bool) -> TxReceipt:
"""
Set the approval of an operator for all operations of a specific address's assets
:param operator: the address of the operator to set the approval for
:param approved: the address whos assets the operator is approved to manage
:returns: transaction receipt of the approval
"""
return self._contract_wrapper.send_transaction(
"set_approval_for_all", [operator, approved]
)
def set_approval_for_token(self, operator: str, token_id: int) -> TxReceipt:
"""
Approve an operator for the NFT owner, which allows the operator to call transferFrom
or safeTransferFrom for the specified token.
:param operator: the address of the operator to set the approval for
:param token_id: the specific token ID to set the approval for
:returns: transaction receipt of the approval
"""
return self._contract_wrapper.send_transaction("approve", [operator, token_id])
"""
INTERNAL FUNCTIONS
"""
def _get_token_metadata(self, token_id: int) -> NFTMetadata:
token_uri = self._contract_wrapper._contract_abi.token_uri.call(token_id)
if not token_uri:
raise NotFoundException(str(token_id))
return fetch_token_metadata(token_id, token_uri, self._storage)
| StarcoderdataPython |
8114833 | <reponame>yuyichao/rr<filename>src/test/restart_unstable.py
from rrutil import *
send_gdb('c')
expect_gdb('exited normally')
restart_replay()
expect_gdb('exited normally')
ok()
| StarcoderdataPython |
4919796 | # sql/functions.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .. import types as sqltypes, schema
from .expression import (
ClauseList, Function, _literal_as_binds, literal_column, _type_from_args,
cast, extract
)
from . import operators
from .visitors import VisitableType
from .. import util
_registry = util.defaultdict(dict)
def register_function(identifier, fn, package="_default"):
"""Associate a callable with a particular func. name.
This is normally called by _GenericMeta, but is also
available by itself so that a non-Function construct
can be associated with the :data:`.func` accessor (i.e.
CAST, EXTRACT).
"""
reg = _registry[package]
reg[identifier] = fn
class _GenericMeta(VisitableType):
def __init__(cls, clsname, bases, clsdict):
cls.name = name = clsdict.get('name', clsname)
cls.identifier = identifier = clsdict.get('identifier', name)
package = clsdict.pop('package', '_default')
# legacy
if '__return_type__' in clsdict:
cls.type = clsdict['__return_type__']
register_function(identifier, cls, package)
super(_GenericMeta, cls).__init__(clsname, bases, clsdict)
class GenericFunction(util.with_metaclass(_GenericMeta, Function)):
"""Define a 'generic' function.
A generic function is a pre-established :class:`.Function`
class that is instantiated automatically when called
by name from the :data:`.func` attribute. Note that
calling any name from :data:`.func` has the effect that
a new :class:`.Function` instance is created automatically,
given that name. The primary use case for defining
a :class:`.GenericFunction` class is so that a function
of a particular name may be given a fixed return type.
It can also include custom argument parsing schemes as well
as additional methods.
Subclasses of :class:`.GenericFunction` are automatically
registered under the name of the class. For
example, a user-defined function ``as_utc()`` would
be available immediately::
from sqlalchemy.sql.functions import GenericFunction
from sqlalchemy.types import DateTime
class as_utc(GenericFunction):
type = DateTime
print select([func.as_utc()])
User-defined generic functions can be organized into
packages by specifying the "package" attribute when defining
:class:`.GenericFunction`. Third party libraries
containing many functions may want to use this in order
to avoid name conflicts with other systems. For example,
if our ``as_utc()`` function were part of a package
"time"::
class as_utc(GenericFunction):
type = DateTime
package = "time"
The above function would be available from :data:`.func`
using the package name ``time``::
print select([func.time.as_utc()])
A final option is to allow the function to be accessed
from one name in :data:`.func` but to render as a different name.
The ``identifier`` attribute will override the name used to
access the function as loaded from :data:`.func`, but will retain
the usage of ``name`` as the rendered name::
class GeoBuffer(GenericFunction):
type = Geometry
package = "geo"
name = "ST_Buffer"
identifier = "buffer"
The above function will render as follows::
>>> print func.geo.buffer()
ST_Buffer()
.. versionadded:: 0.8 :class:`.GenericFunction` now supports
automatic registration of new functions as well as package
and custom naming support.
.. versionchanged:: 0.8 The attribute name ``type`` is used
to specify the function's return type at the class level.
Previously, the name ``__return_type__`` was used. This
name is still recognized for backwards-compatibility.
"""
coerce_arguments = True
def __init__(self, *args, **kwargs):
parsed_args = kwargs.pop('_parsed_args', None)
if parsed_args is None:
parsed_args = [_literal_as_binds(c) for c in args]
self.packagenames = []
self._bind = kwargs.get('bind', None)
self.clause_expr = ClauseList(
operator=operators.comma_op,
group_contents=True, *parsed_args).self_group()
self.type = sqltypes.to_instance(
kwargs.pop("type_", None) or getattr(self, 'type', None))
register_function("cast", cast)
register_function("extract", extract)
class next_value(GenericFunction):
"""Represent the 'next value', given a :class:`.Sequence`
as it's single argument.
Compiles into the appropriate function on each backend,
or will raise NotImplementedError if used on a backend
that does not provide support for sequences.
"""
type = sqltypes.Integer()
name = "next_value"
def __init__(self, seq, **kw):
assert isinstance(seq, schema.Sequence), \
"next_value() accepts a Sequence object as input."
self._bind = kw.get('bind', None)
self.sequence = seq
@property
def _from_objects(self):
return []
class AnsiFunction(GenericFunction):
def __init__(self, **kwargs):
GenericFunction.__init__(self, **kwargs)
class ReturnTypeFromArgs(GenericFunction):
"""Define a function whose return type is the same as its arguments."""
def __init__(self, *args, **kwargs):
args = [_literal_as_binds(c) for c in args]
kwargs.setdefault('type_', _type_from_args(args))
kwargs['_parsed_args'] = args
GenericFunction.__init__(self, *args, **kwargs)
class coalesce(ReturnTypeFromArgs):
pass
class max(ReturnTypeFromArgs):
pass
class min(ReturnTypeFromArgs):
pass
class sum(ReturnTypeFromArgs):
pass
class now(GenericFunction):
type = sqltypes.DateTime
class concat(GenericFunction):
type = sqltypes.String
class char_length(GenericFunction):
type = sqltypes.Integer
def __init__(self, arg, **kwargs):
GenericFunction.__init__(self, arg, **kwargs)
class random(GenericFunction):
pass
class count(GenericFunction):
"""The ANSI COUNT aggregate function. With no arguments,
emits COUNT \*.
"""
type = sqltypes.Integer
def __init__(self, expression=None, **kwargs):
if expression is None:
expression = literal_column('*')
GenericFunction.__init__(self, expression, **kwargs)
class current_date(AnsiFunction):
type = sqltypes.Date
class current_time(AnsiFunction):
type = sqltypes.Time
class current_timestamp(AnsiFunction):
type = sqltypes.DateTime
class current_user(AnsiFunction):
type = sqltypes.String
class localtime(AnsiFunction):
type = sqltypes.DateTime
class localtimestamp(AnsiFunction):
type = sqltypes.DateTime
class session_user(AnsiFunction):
type = sqltypes.String
class sysdate(AnsiFunction):
type = sqltypes.DateTime
class user(AnsiFunction):
type = sqltypes.String
| StarcoderdataPython |
4817378 | <gh_stars>0
#!/usr/bin/python3
"""
Sensors aggregation and storage.
https://github.com/dimitar-kunchev/NR-VentilationMonitoring
@author: <NAME>
@license: See the LICENSE file
@email: <EMAIL>
"""
import RPi.GPIO as GPIO
import serial
import time
import pymysql
import configparser
import json
import os
import sys
import threading
import queue
db_connection = None # type: pymysql.Connection
# These declarations use some default settings. Adjust with the config file
PIN_S0 = 4
PIN_S1 = 3
PIN_S2 = 17
PIN_EN = 18
SENSORS_COUNT = 6
# Conversion of RPM to airflow for each sensor
RPM_TO_AIRFLOW_COEFFICIENTS = [
120.0 / 6000.0,
120.0 / 6000.0,
120.0 / 6000.0,
120.0 / 6000.0,
120.0 / 6000.0,
120.0 / 6000.0
]
# When set to True the mysql thread should stop
STOP_MYSQL_THREAD_FLAG = False
def set_slave_address(addr: int):
"""Sets the S* pins high/low to conenct to a sensor. Note we use inverted logic. Depends on your wiring!"""
GPIO.output(PIN_S0, GPIO.LOW if (0x01 & addr) else GPIO.HIGH)
GPIO.output(PIN_S1, GPIO.LOW if (0x01 & (addr >> 1)) else GPIO.HIGH)
GPIO.output(PIN_S2, GPIO.LOW if (0x01 & (addr >> 2)) else GPIO.HIGH)
def mysql_thread_func(config: configparser.ConfigParser, q: queue.Queue):
"""The MySQL thread. Push queries in the queue and it executes them. Two while loops so we reconnect when
something goes wrong"""
while not STOP_MYSQL_THREAD_FLAG:
# Connect database
try:
db_con = pymysql.connect(host=config.get('db', 'host'),
user=config.get('db', 'user'),
password=config.get('db', 'pass'),
database=config.get('db', 'db'))
while not STOP_MYSQL_THREAD_FLAG:
if not q.empty():
_query = q.get()
with db_con.cursor() as _c:
_c.execute(_query)
db_con.commit()
else:
time.sleep(1)
except pymysql.err.OperationalError:
time.sleep(2)
if __name__ == "__main__":
# Load the config
if not os.path.exists('./config.ini'):
print('No config file!')
sys.exit(1)
config = configparser.ConfigParser()
config.read('./config.ini')
SENSORS_COUNT = config.getint('sensors', 'count')
_tmp = config.get('sensors', 'rpm_to_airflow')
RPM_TO_AIRFLOW_COEFFICIENTS = json.loads(_tmp)
PIN_S0 = config.getint('gpio', 'address_0')
PIN_S1 = config.getint('gpio', 'address_1')
PIN_S2 = config.getint('gpio', 'address_2')
PIN_EN = config.getint('gpio', 'enable')
# Setup hardware
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN_S0, GPIO.OUT)
GPIO.setup(PIN_S1, GPIO.OUT)
GPIO.setup(PIN_S2, GPIO.OUT)
GPIO.setup(PIN_EN, GPIO.OUT)
# Setup UART
uart = serial.Serial(port=config.get('uart', 'uart'), baudrate=config.getint('uart', 'baudrate'),
xonxoff=False, rtscts=False, timeout=1)
# Enable the multiplexor IC. Inverted logic!
GPIO.output(PIN_EN, GPIO.HIGH)
# Setup a queue to push mysql queries
queries_queue = queue.Queue()
# Start the mysql queries process
mysql_thread = threading.Thread(target=mysql_thread_func, args=(config, queries_queue))
mysql_thread.start()
# Loop reading and saving
try:
while True:
_sql_insert_values = []
for i in range(0, SENSORS_COUNT):
set_slave_address(i)
uart.flushInput()
# time.sleep(0.1)
# Wait for S symbol - frame start
uart.read_until(terminator=b'\x53', size=20)
# Wait for E symbol - frame end
_l = uart.read_until(terminator=b'\x45', size=20)
_parsed = False
_rpm = 0
_temp = 0
if _l and len(_l) > 1:
try:
_str = _l[:-1].decode('ASCII')
if _str and len(_str) > 1:
_ch = _str.split(',')
if len(_ch) is 2:
_rpm = float(_ch[0])
_temp = float(_ch[1])
_parsed = True
except:
_parsed = False
if _parsed:
# print('S%d RPM: %d Temp: %.2f' % (i, _rpm, _temp))
_airflow = RPM_TO_AIRFLOW_COEFFICIENTS[i] * _rpm
# _last_readings[i] = {'temp': _temp, 'rpm': _rpm, 'airflow': _airflow}
if _temp > -127:
_sql_insert_values.append('(now(), %d, %.2f, %.2f, %.2f)' % (i, _temp, _rpm, _airflow))
# else:
# print('S%d ERR' % i)
# with db_connection.cursor() as cr:
# cr.execute('insert into sensors_data (ts, sensor, temperature, rpm, airflow) values ' +
# ','.join(_sql_insert_values))
# db_connection.commit()
if len(_sql_insert_values):
_q = 'insert into sensors_data (ts, sensor, temperature, rpm, airflow) values ' + \
','.join(_sql_insert_values)
queries_queue.put(_q)
except KeyboardInterrupt:
print("Signal received")
print('Shutting down')
STOP_MYSQL_THREAD_FLAG = True
mysql_thread.join(2)
| StarcoderdataPython |
5184291 | import csv
import xlrd
from rich import print
from Iciba import Iciba
from concurrent.futures import ThreadPoolExecutor
# debug
import time
# 你Diy的单词本
InFilename = 'data.xls'
# 输出文本
OutFilename = 'eggs1.csv'
# 初始化Iciba模块
Dictionary = Iciba()
# Iciba 查词错误码
ErrIciba = -1
query_res = {}
def word(query):
# eg:
# query = "good"
print('[bold yellow][INFO][/bold yellow] word() : Find \"%s\" .' % query.lower())
word_query = Dictionary.get(query.lower())
if word_query['errorCode'] is ErrIciba:
return ErrIciba
query_res[word_query["query"]] = word_query["value"]
def words():
pool = ThreadPoolExecutor(20)
for query in from_excel(InFilename):
pool.submit(word, query)
pool.shutdown(wait=True)
def from_excel(filename):
# 读取生词本单词
# eg:
# |asd||
# |abc||
#
# return : ['asd', 'abc']
excelfile = xlrd.open_workbook(filename)
table = excelfile.sheets()[0]
res = table.col_values(0)
print(res)
print('-' * 0x20)
return res
def to_csv(filename, words_res):
with open(filename, 'w', newline='') as csvfile:
spamwriter = csv.writer(csvfile,
quotechar=',', quoting=csv.QUOTE_MINIMAL)
# eg:
# spamwriter.writerow(['Spam', 'assss'])
# spamwriter.writerow(['Spam', 'Lovely Spam', 'Wonderful Spam'])
for key, value in words_res.items():
spamwriter.writerow([key, value])
if __name__ == '__main__':
start = time.time()
words()
to_csv(OutFilename, query_res)
end = time.time()
print('-' * 0x20)
print('[bold yellow][INFO][/bold yellow] main() : Use time %s' % (end - start))
| StarcoderdataPython |
6598481 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QToolTip, QPushButton, QComboBox, QMessageBox, QDesktopWidget, QProgressBar, QLabel, QHBoxLayout, QVBoxLayout, QSizePolicy, QMenu, QAction
from PyQt5.QtGui import QIcon, QPalette, QBrush, QImage, QPixmap, QFont, QCursor
from PyQt5.QtCore import Qt, QSize
from patcher import Patcher, PatcherCommunication
from subprocess import call, Popen
import os
from config import Config
import configManager
import functools
class MainWindow(QMainWindow):
logger = None
patcherThread = None
patchRunning = True
def __init__(self):
QMainWindow.__init__(self)
self.setWindowTitle('ClassicMetin2 - Patcher')
self.setWindowIcon(QIcon("resources/metin2.ico"))
configManager.ReadConfig()
self.centerWindow()
def initialize(self):
self.createInterface()
self.logger.debug("Patching thread start")
self.comm = PatcherCommunication()
self.comm.enableButtons.connect(self.enableButtons)
self.comm.disableButtons.connect(self.disableButtons)
self.comm.progressTotal.connect(self.setTotalProgress)
self.comm.progressCurrent.connect(self.setCurrentProgress)
self.comm.statusTotal.connect(self.setTotalAction)
self.comm.statusCurrent.connect(self.setCurrentAction)
self.comm.finished.connect(self.terminatePatcher)
#self.comm.shopDataLoaded.connect(self.onShopDataLoaded)
#self.comm.newsDataLoaded.connect(self.onNewsDataLoaded)
if not Config.skipPatch:
self.patcherThread = Patcher(self.comm)
self.patcherThread.setLogger(self.logger)
self.patcherThread.start()
else:
self.CurrentLabel.setText("Idle.")
self.TotalLabel.setText("Can't patch while a client is running.")
def setLogger(self, logger):
self.logger = logger
self.logger.debug("Logger -> Window")
def createInterface(self):
self.logger.debug("Creating UI")
self.setObjectName("MainWindow")
WINDOW_WIDTH = 1024
WINDOW_HEIGHT = 576
self.WINDOW_WIDTH = WINDOW_WIDTH
self.WINDOW_HEIGHT = WINDOW_HEIGHT
self.setFixedSize(WINDOW_WIDTH, WINDOW_HEIGHT)
# BgOverlay
bgOverlay = QLabel()
bgOverlay.resize(WINDOW_WIDTH, WINDOW_HEIGHT)
bgOverlay.move(0, 0)
bgOverlay.setObjectName("BgOverlay")
bgOverlay.setParent(self)
self.bgOverlay = bgOverlay
# LinePattern
linePatternTop = QLabel()
linePatternTop.resize(WINDOW_WIDTH, 46)
linePatternTop.move(0, 0)
linePatternTop.setObjectName("LinePatternTop")
linePatternTop.setParent(self)
self.linePatternTop = linePatternTop
linePatternBottom = QLabel()
linePatternBottom.resize(WINDOW_WIDTH, 46)
linePatternBottom.move(0, WINDOW_HEIGHT - 40)
linePatternBottom.setObjectName("LinePatternBottom")
linePatternBottom.setParent(self)
self.linePatternBottom = linePatternBottom
baseX = 0
baseY = 0
# ButtonLogo
ButtonLogo = QPushButton('', self)
ButtonLogo.setObjectName("ButtonLogo")
ButtonLogo.resize(420, 201)
ButtonLogo.move( WINDOW_WIDTH / 2 - 200, 45)
ButtonLogo.setParent(self)
self.ButtonLogo = ButtonLogo
# UIBG
UIBG_WIDTH = 700
UIBG_HEIGHT = 260
uiBG = QLabel()
uiBG.resize(UIBG_WIDTH, UIBG_HEIGHT)
uiBG.move(WINDOW_WIDTH / 2 - UIBG_WIDTH / 2, 250)
uiBG.setObjectName("UiBG")
uiBG.setParent(self)
self.uiBG = uiBG
# LanguagePicker
LP_WIDTH = 130
languagePicker = QComboBox()
languagePicker.setObjectName("LanguagePicker")
languagePicker.resize(LP_WIDTH, 32)
languagePicker.setParent(self.uiBG)
languagePicker.move(15, UIBG_HEIGHT - 32 - 10)
languagePicker.addItems(configManager.languages)
languagePicker.setEditable(False)
for i in range(len(configManager.languages)):
icon = QIcon(configManager.languageIcons[i])
languagePicker.setItemIcon(i, icon)
selectedLanguage = configManager.Get("LOCALE", 0)
if selectedLanguage >= len(configManager.languages):
selectedLanguage = 0
languagePicker.setCurrentIndex(selectedLanguage)
languagePicker.currentIndexChanged.connect(self.onLanguageChange)
self.languagePicker = languagePicker
# ButtonStart
ButtonStart = QPushButton('', self)
ButtonStart.setObjectName("ButtonStart")
ButtonStart.setToolTip('Start ClassicMetin2!')
ButtonStart.resize(300, 68)
ButtonStart.move( UIBG_WIDTH / 2 - 150, 180)
ButtonStart.setParent(self.uiBG)
ButtonStart.clicked.connect(self.openGame)
#ButtonStart.hide()
self.ButtonStart = ButtonStart
# CurrentProgress
CurrentProgress = QProgressBar(self)
CurrentProgress.setValue(0)
CurrentProgress.setAlignment(Qt.AlignCenter)
CurrentProgress.resize(600, 40)
CurrentProgress.move(UIBG_WIDTH/2 - 300, 40)
CurrentProgress.setParent(self.uiBG)
self.CurrentProgress = CurrentProgress
# CurrentLabel
CurrentLabel = QLabel(self)
CurrentLabel.setText("Current Status")
CurrentLabel.setObjectName("CurrentLabel")
CurrentLabel.move(UIBG_WIDTH/2-300, 20)
CurrentLabel.resize(412, 20)
CurrentLabel.setParent(self.uiBG)
self.CurrentLabel = CurrentLabel
# TotalProgress
TotalProgress = QProgressBar(self)
TotalProgress.setValue(0)
TotalProgress.setAlignment(Qt.AlignCenter)
TotalProgress.resize(600, 40)
TotalProgress.move(UIBG_WIDTH/2 - 300, 117)
TotalProgress.setParent(self.uiBG)
self.TotalProgress = TotalProgress
#TotalLabel
TotalLabel = QLabel(self)
TotalLabel.setText("Global Status")
TotalLabel.setObjectName("TotalLabel")
TotalLabel.resize(412,20)
TotalLabel.move(UIBG_WIDTH/2 - 300, 97)
TotalLabel.setParent(self.uiBG)
self.TotalLabel = TotalLabel
# ButtonSettings
ButtonSettings = QPushButton('', self)
ButtonSettings.setObjectName("ButtonSettings")
ButtonSettings.setToolTip('Open client options')
ButtonSettings.setParent(self.uiBG)
ButtonSettings.resize(32, 32)
ButtonSettings.move( UIBG_WIDTH - 45, UIBG_HEIGHT - 40)
ButtonSettings.clicked.connect(self.openConfig)
self.ButtonSettings = ButtonSettings
# ButtonRepair
ButtonRepair = QPushButton('', self)
ButtonRepair.setObjectName("ButtonRepair")
ButtonRepair.setToolTip('Repair the Client')
ButtonRepair.setParent(self.uiBG)
ButtonRepair.resize(32, 32)
ButtonRepair.move( UIBG_WIDTH - 45 - 40, UIBG_HEIGHT - 40)
ButtonRepair.clicked.connect(self.onRepair)
self.ButtonRepair = ButtonRepair
# VersionLabel
VersionLabel = QPushButton("Patcher v1.%d" % Config.currentVersion, self)
VersionLabel.setObjectName("VersionLabel")
VersionLabel.move( WINDOW_WIDTH - 100 , 3)
VersionLabel.resize(100, 40)
VersionLabel.setParent(self)
self.VersionLabel = VersionLabel
self.VersionLabel.clicked.connect(self.openContextMenu)
try:
font = QFont("Tahoma", 9)
font.setStyleStrategy(QFont.NoAntialias)
QApplication.setFont(font)
styleSheetFile = open("resources/style.css", "r")
styleSheet = styleSheetFile.read()
styleSheetFile.close()
self.setStyleSheet(styleSheet)
#self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)
#self.setAttribute(Qt.WA_NoSystemBackground, True)
#self.setAttribute(Qt.WA_TranslucentBackground, True)
except:
self.logger.exception("Unable to initialize the UI")
pass
def openContextMenu(self):
menu = QMenu(self)
syserr = QAction("Show syserr.txt", self)
patch = QAction("Show patch.log", self)
screensot = QAction("Show screenshot folder", self)
syserr.triggered.connect(self.showSyserr)
patch.triggered.connect(self.showPatchLog)
menu.addAction(syserr)
menu.addAction(patch)
menu.popup(QCursor.pos())
def showScreenshot(self,q):
Popen("explorer.exe \".\\screenshot\"")
def showSyserr(self,q):
Popen("notepad.exe \".\\syserr.txt\"")
def showPatchLog(self,q):
Popen("notepad.exe \".\\patch.log\"")
def onLoadUrl(self, url):
Popen("explorer %s" % url)
def onRepair(self):
reply = QMessageBox.question(self, 'ClassicMetin2',
"Repairing the client checks for each file's integrity!<br />Do a repair only if you're experiencing trouble with the client!<br /><i>Do you want to coninue?</i>", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
self.logger.info("Closing for Repair")
try:
call("repairclient.bat")
except:
reply = QMessageBox.critical(self, "Error", "Unable to start the repair.")
def openConfig(self):
if os.path.isfile("config.exe"):
call("config.exe")
else:
reply = QMessageBox.critical(self, "Error", "Unable to open config.")
def onLanguageChange(self, value):
if value >= len(configManager.languages):
value = 0
configManager.Set("LOCALE", value)
def openGame(self):
if os.path.isfile("metin2client.exe"):
Popen("metin2client.exe")
else:
reply = QMessageBox.critical(self, "Error", "Unable to open the client.")
def setCurrentProgress(self, value):
self.CurrentProgress.setValue(value)
def setTotalProgress(self, value):
self.TotalProgress.setValue(value)
def setCurrentAction(self, text):
self.CurrentLabel.setText(text)
def setTotalAction(self, text):
self.TotalLabel.setText(text)
def disableButtons(self):
self.ButtonStart.hide()
def enableButtons(self):
self.ButtonStart.show()
def terminatePatcher(self):
self.patchRunning = False
self.patcherThread.terminate()
self.patcherThread.wait()
def centerWindow(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def closeEvent(self, event):
if self.patchRunning:
reply = QMessageBox.question(self, 'ClassicMetin2',
"The Patch is still running!<br /><i>Are you sure you want to exit?</i>", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
self.logger.info("User exit")
event.accept()
else:
event.ignore()
else:
self.logger.info("User exit")
event.accept()
offset = None
def mousePressEvent(self, event):
self.offset = event.pos()
def mouseReleaseEvent(self, event):
self.offset = None
def mouseMoveEvent(self, event):
if self.offset != None:
x=event.globalX()
y=event.globalY()
x_w = self.offset.x()
y_w = self.offset.y()
self.move(x-x_w, y-y_w) | StarcoderdataPython |
9713198 | """
Static RMQ
"""
N, Q = [int(x) for x in input().split()]
AS = [int(x) for x in input().split()]
INF = 10 ** 10
SEG_TREE_WIDTH = 1 << N.bit_length()
seg = [INF] * (SEG_TREE_WIDTH * 2) # 0-origin
def range_min_query(start, end, node=1, nstart=0, nend=SEG_TREE_WIDTH):
# print("node", node, (nstart, nend))
if end < nstart or nend <= start:
# print("no overlap")
return INF
if node >= SEG_TREE_WIDTH:
# print("leaf")
return seg[node]
if start <= nstart and nend < end:
# print("perfect overlap")
return seg[node]
# print("to visit children", node * 2, node * 2 + 1)
return min(
range_min_query(start, end, node * 2, nstart, (nstart + nend) // 2),
range_min_query(start, end, node * 2 + 1, (nstart + nend) // 2, nend),
)
for i, x in enumerate(AS):
seg[SEG_TREE_WIDTH + i] = x
for i in reversed(range(SEG_TREE_WIDTH)):
seg[i] = min(seg[i * 2], seg[i * 2 + 1])
for q in range(Q):
l, r = [int(x) for x in input().split()]
# print(l, r)
print(range_min_query(l, r-1))
| StarcoderdataPython |
4875706 | <gh_stars>1-10
from zmq import devices
import collections
import time
import operator
import memcache
def cluster(spec):
address, key = spec.split('/')
return memcache.Client([address]).get(key).split(',')
def elect(client, name, candidate, ttl=60):
""" Simple leader election-esque distributed selection method """
if not client.append(name, ',%' % cadidate, time=ttl):
if client.add(name, candidate, time=ttl):
return True
else:
return False
else:
return False
class SampledRate(object):
"""Tool for pushing rate over time data"""
def __init__(self, frequency=1, resolution=1, parent=None, callback=None, name=None):
""" frequency: Rate update frequency in seconds
resolution: Interval to average data over in seconds
parent: Another SampledRate that ticks will propagate to
callback: Optional callback when frequency is updated"""
self.frequency = frequency
self.resolution = resolution
self.parent = parent
self.callback = callback
self.samples = collections.defaultdict(int)
self.ticks = 0
self.last_start = None
self.last_value = 0
if not name and parent:
self.name = parent.name
else:
self.name = name
def _update(self):
if self.last_start and int(time.time() - self.last_start) > self.frequency:
# Add empty samples
for x in range(self.frequency-len(self.samples)):
self.samples[x] = 0
self.last_value = reduce(operator.add, self.samples.values()) / self.resolution / self.frequency
self.last_start = int(time.time())
if self.callback:
# reactor.callLater(0, self.callback, self.last_value, self.ticks)
self.callback(self.last_value, self.ticks)
self.ticks = 0
self.samples = collections.defaultdict(int)
def tick(self, ticks=1):
if not self.last_start:
self.last_start = int(time.time())
self._update()
if self.parent:
self.parent.tick(ticks)
self.samples[int(time.time() / self.resolution)] += ticks
self.ticks += ticks
return self
def getvalue(self):
self._update()
return self.last_value
def __int__(self):
return self.getvalue()
def __str__(self):
# Okay, hardcoding 1 sec resolutions for now
return "%i %s/sec" % (self.getvalue(), self.name or 'ticks')
def __repr__(self):
return "<SampledRate: %i avg/%is updated/%is>" % (self.getvalue(), self.frequency, self.resolution)
class Device(devices.ThreadDevice):
def __init__(self, type, in_type, out_type, ctx):
self._context = ctx
devices.ThreadDevice.__init__(self, type, in_type, out_type)
def _setup_sockets(self):
# create the sockets
ins = self._context.socket(self.in_type)
if self.out_type < 0:
outs = ins
else:
outs = self._context.socket(self.out_type)
# set sockopts (must be done first, in case of zmq.IDENTITY)
for opt,value in self._in_sockopts:
ins.setsockopt(opt, value)
for opt,value in self._out_sockopts:
outs.setsockopt(opt, value)
for iface in self._in_binds:
ins.bind(iface)
for iface in self._out_binds:
outs.bind(iface)
for iface in self._in_connects:
ins.connect(iface)
for iface in self._out_connects:
outs.connect(iface)
return ins,outs | StarcoderdataPython |
266297 | """Create new objects of various types. Deprecated.
This module is no longer required except for backward compatibility.
Objects of most types can now be created by calling the type object.
"""
from warnings import warnpy3k
warnpy3k("The 'new' module has been removed in Python 3.0; use the 'types' "
"module instead.", stacklevel=2)
del warnpy3k
from types import ClassType as classobj
from types import FunctionType as function
from types import InstanceType as instance
from types import MethodType as instancemethod
from types import ModuleType as module
# CodeType is not accessible in restricted execution mode
try:
from types import CodeType as code
except ImportError:
pass
| StarcoderdataPython |
1680545 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 <NAME>, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from .namespaces import NUMBERNS
from .element import Element
from .style import StyleElement
# Autogenerated
def AmPm(**args):
return Element(qname = (NUMBERNS,'am-pm'), **args)
def Boolean(**args):
return Element(qname = (NUMBERNS,'boolean'), **args)
def BooleanStyle(**args):
return StyleElement(qname = (NUMBERNS,'boolean-style'), **args)
def CurrencyStyle(**args):
return StyleElement(qname = (NUMBERNS,'currency-style'), **args)
def CurrencySymbol(**args):
return Element(qname = (NUMBERNS,'currency-symbol'), **args)
def DateStyle(**args):
return StyleElement(qname = (NUMBERNS,'date-style'), **args)
def Day(**args):
return Element(qname = (NUMBERNS,'day'), **args)
def DayOfWeek(**args):
return Element(qname = (NUMBERNS,'day-of-week'), **args)
def EmbeddedText(**args):
return Element(qname = (NUMBERNS,'embedded-text'), **args)
def Era(**args):
return Element(qname = (NUMBERNS,'era'), **args)
def Fraction(**args):
return Element(qname = (NUMBERNS,'fraction'), **args)
def Hours(**args):
return Element(qname = (NUMBERNS,'hours'), **args)
def Minutes(**args):
return Element(qname = (NUMBERNS,'minutes'), **args)
def Month(**args):
return Element(qname = (NUMBERNS,'month'), **args)
def Number(**args):
return Element(qname = (NUMBERNS,'number'), **args)
def NumberStyle(**args):
return StyleElement(qname = (NUMBERNS,'number-style'), **args)
def PercentageStyle(**args):
return StyleElement(qname = (NUMBERNS,'percentage-style'), **args)
def Quarter(**args):
return Element(qname = (NUMBERNS,'quarter'), **args)
def ScientificNumber(**args):
return Element(qname = (NUMBERNS,'scientific-number'), **args)
def Seconds(**args):
return Element(qname = (NUMBERNS,'seconds'), **args)
def Text(**args):
return Element(qname = (NUMBERNS,'text'), **args)
def TextContent(**args):
return Element(qname = (NUMBERNS,'text-content'), **args)
def TextStyle(**args):
return StyleElement(qname = (NUMBERNS,'text-style'), **args)
def TimeStyle(**args):
return StyleElement(qname = (NUMBERNS,'time-style'), **args)
def WeekOfYear(**args):
return Element(qname = (NUMBERNS,'week-of-year'), **args)
def Year(**args):
return Element(qname = (NUMBERNS,'year'), **args)
| StarcoderdataPython |
1667738 | #!/usr/bin/env python
import requests
import subprocess
import sys
import os
import lxml.html as lh
import logging
logging.basicConfig(filename='pyget.log', filemode='w', \
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', \
level=logging.DEBUG)
def main(url, dir_dest):
print("## Start ##")
if not os.path.isdir(dir_dest):
print("Creating %s" % dir_dest)
os.mkdir(dir_dest)
os.chdir(dir_dest)
page = requests.get(url)
doc = lh.fromstring(page.content)
a_elements = doc.xpath('//a')
for a in range(5, len(a_elements)):
file_name = a_elements[a].get("href")
try:
print("=====")
print("file_name:", file_name)
file_url = url + file_name
print("file_url:", file_url)
file_req = requests.get(file_url)
file_cont = file_req.content
#print(file_cont)
open(file_name, 'wb').write(file_cont)
except Exception as e:
print("File %s downloading error: %s" % (file_name, str(e)))
logging.error(str(e))
if __name__=="__main__":
if len(sys.argv)!=3:
print("usage:\n./main.py http://mywebsite.com/dir $PWD/tmp")
exit(1)
main(sys.argv[1], sys.argv[2])
| StarcoderdataPython |
6669522 | from django.shortcuts import redirect, HttpResponse
from django.views.generic.base import TemplateView
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from processing.base.decorators import group_required
from processing.base.managers import MessageManager
from processing.base.managers import Collect
from initialarticle.models import InitialArticle
from initialarticle.forms import InitialArticleCreateForm, InitialArticleEditForm
class CreateInitialArticle(TemplateView):
template_name = 'initialarticle/create_initial_article.html'
@method_decorator(login_required)
@method_decorator(group_required('stuff'))
def get(self, request, *args, **kwargs):
self.form = InitialArticleCreateForm(request = request)
return super(CreateInitialArticle, self).get(request, *args, **kwargs)
@method_decorator(login_required)
@method_decorator(group_required('stuff'))
def post(self, request, *args, **kwargs):
self.form = InitialArticleCreateForm(request.POST, request = request)
if self.form.is_valid():
initial_article = self.form.save(request.user)
if initial_article:
return redirect('/article/stuff/create/'+ request.LANGUAGE_CODE +'/' + str(initial_article.keywords))
MessageManager().makeMessage(request, message = 'keywords_exists')
return redirect(request.META.get('HTTP_REFERER'))
MessageManager().makeMessage(request, message = 'form_is_not_valid')
return redirect(request.META.get('HTTP_REFERER'))
def get_context_data(self, **kwargs):
context = super(CreateInitialArticle, self).get_context_data(**kwargs)
context['create_initial_article_fileds'] = self.form
return context
class EditInitialArticle(TemplateView):
template_name = 'initialarticle/edit_initial_article.html'
@method_decorator(login_required)
@method_decorator(group_required('stuff'))
def get(self, request, *args, **kwargs):
initial_article = InitialArticle.objects.get_by_keywords(keywords = kwargs['keywords'])
if InitialArticle.objects.initial_article_owner_is_requester(request, initial_article):
self.form = InitialArticleEditForm(instance = initial_article, request = request)
return super(EditInitialArticle, self).get(request, *args, **kwargs)
MessageManager().makeMessage(request, message = 'no_privileg_to_reach_this_page')
return redirect(request.META.get('HTTP_REFERER'))
@method_decorator(login_required)
@method_decorator(group_required('stuff'))
def post(self, request, *args, **kwargs):
initial_article = InitialArticle.objects.get_by_keywords(keywords = kwargs['keywords'])
if InitialArticle.objects.initial_article_owner_is_requester(request, initial_article):
self.form = InitialArticleEditForm(request.POST, instance = initial_article, request = request)
if self.form.is_valid():
self.form.save_(article_keywords_object = initial_article, user = request.user)
return redirect('/initialarticle/userarticles')
MessageManager().makeMessage(request, message='form_is_not_valid')
return redirect(request.META.get('HTTP_REFERER'))
MessageManager().makeMessage(request, message = 'no_privileg_to_reach_this_page')
return redirect(request.META.get('HTTP_REFERER'))
def get_context_data(self, **kwargs):
context = super(EditInitialArticle, self).get_context_data(**kwargs)
context['edit_initial_article_fileds'] = self.form
return context
def removeInitialArticle(request, *args, **kwargs):
if request.method == 'GET':
initial_article = InitialArticle.objects.get(keywords = kwargs['keywords'])
if InitialArticle.objects.initial_article_owner_is_requester(request, initial_article):
initial_article.delete()
return redirect(request.META.get('HTTP_REFERER'))
return redirect(request.path_info)
return HttpResponse('501')
class RequesterInitialarticles(TemplateView):
template_name = 'initialarticle/requester_initilarticles.html'
@method_decorator(login_required)
@method_decorator(group_required('stuff'))
def get(self, request, *args, **kwargs):
self.initialarticles = InitialArticle.objects.filter(by = request.user)
return super(RequesterInitialarticles, self).get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(RequesterInitialarticles, self).get_context_data(**kwargs)
context['initialarticles'] = self.initialarticles
return context
| StarcoderdataPython |
4851066 | <reponame>charliecalvert/CloudNotes
#! /usr/bin/python3
import shutil
def step01():
for x in range(1, 10):
source = 'Isit322-Week0' + str(x) + '-2015.md'
destination = 'Isit322-Week0' + str(x) + '-2016.md'
shutil.copyfile(source, destination);
def step02():
for x in range(0, 2):
source = 'Isit322-Week1' + str(x) + '-2015.md'
destination = 'Isit322-Week1' + str(x) + '-2016.md'
shutil.copyfile(source, destination);
#step01()
step02()
| StarcoderdataPython |
11381139 | <gh_stars>1-10
#! /usr/bin/env python
####################################################################################################
# SET UP
#
#
# TODO: Probabilistic Guess Function
# TODO: Account for likelihood that ships are not adjacent
# TODO: Minimize Entropy Model
import socket
import time
import util
from copy import copy
ascii_board = 'A00000000B\nA00000000B\nA00000000B\nA00000000B\nA000000000\n0000000000\n0000000000\n0000000000\nP000000000\nP00DDD0SSS\n'
TEST_BOARD = [['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','P','?','?','?','?','?'],
['?','?','?','?','P','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?']]
BOARD = [['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?'],
['?','?','?','?','?','?','?','?','?','?']]
NON_HUMAN_OPPONENTS = ['players/hunter_parity.py',
'players/hunter.py',
'players/randguess.py',
'players/tile.py']
SHIPS = ['A', 'B', 'D', 'P', 'S']
SHIP_SIZE = {'A':5, 'B':4, 'D':3, 'P':2, 'S':3}
####################################################################################################
# UTILITY FUNCTIONS
#
#
def read_board(ascii_board):
"""
Reads in a board from a ascii format as a board of array of arrays format
"""
board = []
row = []
col_num = 0
for char in ascii_board:
if(col_num < 10):
row.append(char)
col_num += 1
else:
board.append(row)
row = []
col_num = 0
return board
def print_board(board):
"""
Prints a board neatly
"""
for row in board:
for index in range(len(row)):
if(isinstance(row[index], int)):
if(row[index] < 10):
row[index] = ' ' + str(row[index])
else:
row[index] = str(row[index])
print(row)
def is_valid(row, col):
"""
Returns a boolean based on whether on not a (row, col) pair is a valid board coordinate
"""
return ((row >= 0) and (row <= 9) and (col >= 0) and (col <= 9))
def copy_of(board):
"""
Returns a copy of a board
"""
copy = []
for row_num in range(10):
row = []
for col_num in range(10):
row.append(board[row_num][col_num])
copy.append(row)
return copy
def generate_question_mark_board():
"""
Retruns a board filled with question marks
"""
board = []
for row in range(10):
row = []
for col in range(10):
row.append('?')
board.append(row)
return board
def generate_scoring_board():
"""
Generates a board of values used to score your arrangement of ships
"""
scoring_board = board_possibility_counter(BOARD)
max_possibilites = scoring_board[4][4]
for row in range(10):
for col in range(10):
scoring_board[row][col] = max_possibilites - scoring_board[row][col]
return scoring_board
def generate_playing_board(duration):
"""
Generates a random playing board
"""
timeout = time.time() + duration
random_board_string = util.gen_random_board_str()
max_score = score(read_board(random_board_string))
while time.time() < timeout:
new_random_board_string = util.gen_random_board_str()
new_score = score(read_board(new_random_board_string))
if new_score > max_score:
random_board_string = new_random_board_string
max_score = new_score
print max_score
return random_board_string
def x_in_board(board):
"""
Determines whether or not there is a hit but unsunk ship in board
"""
for row in range(10):
for col in range(10):
if(board[row][col] == 'X'):
return True
return False
def smallest_ship_size(ships):
"""
Returns the size of the smallest ship in a given list of ships
"""
if len(ships) != 0:
ship_sizes = []
for ship in ships:
ship_sizes.append(SHIP_SIZE[ship])
return min(ship_sizes)
else:
return max(SHIP_SIZE.values())
def unsunk_ships(board):
"""
Returns a list of the ships that have not yet been sunk
"""
ships = SHIPS
for row in range(10):
for col in range(10):
if(board[row][col] in ships):
ships.remove(board[row][col])
return ships
def surrounding_unsunk_hits(board, row, col):
"""
Return the coordinates of all surrounding X's
"""
unsunk_hits = []
if(is_valid(row - 1, col)):
if(board[row - 1][col] == 'X'):
unsunk_hits.append((row - 1, col))
if(is_valid(row + 1, col)):
if(board[row + 1][col] == 'X'):
unsunk_hits.append((row + 1, col))
if(is_valid(row, col - 1)):
if(board[row][col - 1] == 'X'):
unsunk_hits.append((row, col - 1))
if(is_valid(row, col + 1)):
if(board[row][col + 1] == 'X'):
unsunk_hits.append((row, col + 1))
return unsunk_hits
def count_unknown_spaces(board, row, col, direction):
"""
Counts the number (up to 4) of '?' spaces in any given direction ('up', 'down', 'left', 'right')
From the (row, col) space on the board
"""
unknown = 0
shift = 1
if(direction == 'left'):
while(is_valid(row, col - shift)):
if(board[row][col - shift] != '?'):
break
unknown += 1
shift += 1
elif(direction == 'right'):
while(is_valid(row, col + shift)):
if(board[row][col + shift] != '?'):
break
unknown += 1
shift += 1
elif(direction == 'up'):
while(is_valid(row - shift, col)):
if(board[row - shift][col] != '?'):
break
unknown += 1
shift += 1
elif(direction == 'down'):
while(is_valid(row + shift, col)):
if(board[row + shift][col] != '?'):
break
unknown += 1
shift += 1
return unknown
def sunken_ship_update(board, row, col, ship):
"""
Returns the direction from (row, col) that the specified newly sunken ship is
"""
updated_board = copy_of(board)
# Check Left
flag = True
for shift in range(SHIP_SIZE[ship]):
if(is_valid(row, col - shift) == False):
flag = False
break
elif(board[row][col - shift] != 'X'):
flag = False
break
else:
updated_board[row][col - shift] = ship
if(flag == True):
return updated_board
else:
updated_board = copy_of(board)
# Check Right
flag = True
for shift in range(SHIP_SIZE[ship]):
if(is_valid(row, col + shift) == False):
flag = False
break
elif(board[row][col + shift] != 'X'):
flag = False
break
else:
updated_board[row][col + shift] = ship
if(flag == True):
return updated_board
else:
updated_board = copy_of(board)
# Check Up
flag = True
for shift in range(SHIP_SIZE[ship]):
if(is_valid(row - shift, col) == False):
flag = False
break
elif(board[row - shift][col] != 'X'):
flag = False
break
else:
updated_board[row - shift][col] = ship
if(flag == True):
return updated_board
else:
updated_board = copy_of(board)
# Check Down
flag = True
for shift in range(SHIP_SIZE[ship]):
if(is_valid(row + shift, col) == False):
flag = False
break
elif(board[row + shift][col] != 'X'):
flag = False
break
else:
updated_board[row + shift][col] = ship
if(flag == True):
return updated_board
else:
updated_board = copy_of(board)
return updated_board
####################################################################################################
# CALCULATION FUNCTIONS
#
#
def score(board):
"""
Returns the score of a board according to the scoring board
"""
scoring_board = generate_scoring_board()
score = 0
for row in range(10):
for col in range(10):
if board[row][col] != '0':
score += scoring_board[row][col]
return score
def line_possibility_counter(spaces1, spaces2, given_spaces, ships):
"""
Counts the number of possible ways to place ships in a horizontal line from a given space,
Given that there are spaces1 unknown spaces on one side, spaces2 unknown spaces on the other side,
and the array of ships currently still unsunk
"""
count = 0
for ship in ships:
count += single_ship_line_possibility_counter(spaces1, spaces2, given_spaces, ship)
return count
def single_ship_line_possibility_counter(spaces1, spaces2, given_spaces, ship):
"""
Counts the number of possible ways to place a single ship in a horizontal line from a given space,
Given that there are spaces1 unknown spaces on one side, spaces2 unknown spaces on the other side
"""
ship_size = SHIP_SIZE[ship]
free_spaces = ship_size - given_spaces
if(spaces1 > free_spaces):
spaces1 = free_spaces
if(spaces2 > free_spaces):
spaces2 = free_spaces
if(spaces1 + spaces2 - free_spaces + 1 > 0):
return spaces1 + spaces2 - free_spaces + 1
else:
return 0
def board_possibility_counter(board):
"""
Counts the number of ways ships can be placed in each spot in a given board
"""
counts_board = []
for row in range(10):
counts_row = []
for col in range(10):
count = 0
if(board[row][col] == '?'):
ships = unsunk_ships(board)
left_unknown = count_unknown_spaces(board, row, col, 'left')
right_unknown = count_unknown_spaces(board, row, col, 'right')
up_unknown = count_unknown_spaces(board, row, col, 'up')
down_unknown = count_unknown_spaces(board, row, col, 'down')
count += line_possibility_counter(left_unknown, right_unknown, 1, ships)
count += line_possibility_counter(up_unknown, down_unknown, 1, ships)
counts_row.append(count)
counts_board.append(counts_row)
return counts_board
####################################################################################################
# MOVE GUESSING FUNCTIONS
#
#
def guess(their_board):
"""
Returns a calculated guess based on the opponent's board
"""
board = board_possibility_counter(their_board)
ships_remaining = unsunk_ships(board)
min_size = smallest_ship_size(ships_remaining)
max_coords = (0, 0)
for row in range(10):
for col in range(10):
if(((row + col) % min_size) == 0):
if(board[row][col] > board[max_coords[0]][max_coords[1]]):
max_coords = (row, col)
return max_coords
def target_helper(board, line_type, num, ships):
"""
Returns a targeted guess for when there have been hits but no sink
Only searches a single row (line_type = 'row') or column (line_type = 'col')
"""
possibilities_map = {}
if(line_type == 'row'):
left_hit_col = -1
right_hit_col = -1
for col in range(10):
if(board[num][col] == 'X'):
# Set start col of hits in row (if applicable)
if(is_valid(num, col - 1)):
if(board[num][col - 1] != 'X'):
left_hit_col = col - 1
# Set end col of hits in row (if applicable)
if(is_valid(num, col + 1)):
if(board[num][col + 1] != 'X'):
right_hit_col = col + 1
left_hit_ways = 0
right_hit_ways = 0
given_spaces = right_hit_col - left_hit_col
in_a_row_bonus = 1
if(given_spaces >= 3):
in_a_row_bonus = 100
if(is_valid(num, left_hit_col)):
if(board[num][left_hit_col] == '?'):
left_unknown = count_unknown_spaces(board, num, left_hit_col, 'left')
right_unknown = count_unknown_spaces(board, num, right_hit_col - 1, 'right')
left_hit_ways = line_possibility_counter(left_unknown, right_unknown, given_spaces, ships)
possibilities_map[(num, left_hit_col)] = in_a_row_bonus + left_hit_ways
if(is_valid(num, right_hit_col)):
if(board[num][right_hit_col] == '?'):
left_unknown = count_unknown_spaces(board, num, left_hit_col + 1, 'left')
right_unknown = count_unknown_spaces(board, num, right_hit_col, 'right')
right_hit_ways = line_possibility_counter(left_unknown, right_unknown, given_spaces, ships)
possibilities_map[(num, right_hit_col)] = in_a_row_bonus + right_hit_ways
elif(line_type == 'col'):
up_hit_row = -1
down_hit_row = -1
for row in range(10):
if(board[row][num] == 'X'):
# Set start col of hits in row (if applicable)
if(is_valid(row - 1, num)):
if(board[row - 1][num] != 'X'):
up_hit_row = row - 1
# Set end col of hits in row (if applicable)
if(is_valid(row + 1, num)):
if(board[row + 1][num] != 'X'):
down_hit_row = row + 1
up_hit_ways = 0
down_hit_ways = 0
given_spaces = down_hit_row - up_hit_row
in_a_row_bonus = 1
if(given_spaces >= 3):
in_a_row_bonus = 100
if(is_valid(up_hit_row, num)):
if(board[up_hit_row][num] == '?'):
up_unknown = count_unknown_spaces(board, up_hit_row, num, 'up')
down_unknown = count_unknown_spaces(board, down_hit_row - 1, num, 'down')
up_hit_ways = line_possibility_counter(up_unknown, down_unknown, given_spaces, ships)
possibilities_map[(up_hit_row, num)] = in_a_row_bonus + up_hit_ways
if(is_valid(down_hit_row, num)):
if(board[down_hit_row][num] == '?'):
up_unknown = count_unknown_spaces(board, up_hit_row + 1, num, 'up')
down_unknown = count_unknown_spaces(board, down_hit_row, num, 'down')
down_hit_ways = line_possibility_counter(up_unknown, down_unknown, given_spaces, ships)
possibilities_map[(down_hit_row, num)] = in_a_row_bonus + down_hit_ways
return possibilities_map
def target(board):
"""
Returns a targeted guess given a board with hit but unsunk ships
"""
# TODO: Have targeted guesses prefer a specific parity and/or squares with high guess index
possibilities_map = {}
ships = unsunk_ships(board)
for row in range(10):
row_map = target_helper(board, 'row', row, ships)
for coords in row_map:
if(coords in possibilities_map):
possibilities_map[coords] += row_map[coords]
else:
possibilities_map[coords] = row_map[coords]
for col in range(10):
col_map = target_helper(board, 'col', col, ships)
for coords in col_map:
if(coords in possibilities_map):
possibilities_map[coords] += col_map[coords]
else:
possibilities_map[coords] = col_map[coords]
target_coords = max(possibilities_map, key=possibilities_map.get)
# return possibilities_map
return target_coords
def fire(board):
"""
Returns the move for the turn
"""
if(x_in_board(board)):
return target(board)
else:
return guess(board)
def update_board(board, row, col, last_fire_result):
"""
Returns an updated game board based on the last_fire_result
"""
updated_board = copy_of(board)
if(last_fire_result == 'M'):
updated_board[row][col] = '0'
elif(last_fire_result == 'H'):
updated_board[row][col] = 'X'
elif last_fire_result.startswith('S'):
updated_board[row][col] = 'X'
updated_board = sunken_ship_update(updated_board, row, col, last_fire_result[1])
return updated_board
def play_game():
"""
Plays a full game of battleship against an opponent
"""
board = copy_of(BOARD)
comm = util.Communication()
initstring = comm.readline()
turn, opponent = initstring.split(",")
opponent = opponent.strip()
# Generate and send my board
if opponent in NON_HUMAN_OPPONENTS:
genboard = ascii_board
else:
genboard = generate_playing_board(1.95)
for line in genboard.splitlines():
comm.sendline(line)
if turn == "0":
myturn = True
else:
myturn = False
guesses = set()
while True:
try:
if myturn:
# Send a guess
guess = fire(board)
guessx, guessy = guess
guesses.add(guess)
comm.sendline("{},{}".format(guessx, guessy))
# Read what happened
data = comm.readline().strip()
board = update_board(board, guessx, guessy, data)
myturn = False
else:
# Read opponent's guess
data = comm.readline()
myturn = True
except socket.error:
# Game is over, we either won or lost
print "Game Finished"
play_game()
| StarcoderdataPython |
3439454 |
import unittest
from import_new_tournaments.process_hh_files.process.tournament.extract.price import price
class test(unittest.TestCase):
def test_price(self):
filename = "HH20201217 SITGOID-G23140238T1 TN-$0{FULLSTOP}50 Hold'Em Turbo - On Demand GAMETYPE-Hold'em LIMIT-no CUR-REAL OND-T BUYIN-0.txt"
self.assertEqual(
price(filename),
0.55)
| StarcoderdataPython |
3452640 | <filename>proj_issues/issues/views.py
# Imports {{{
from __future__ import print_function, unicode_literals, division
from pprint import pprint
from difflib import Differ
from django.http import HttpResponse
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib.admin.views.decorators import staff_member_required
from django.forms import forms
from django.core.mail import send_mail
from django.template.defaultfilters import date
from django.contrib.auth.decorators import permission_required
from django.utils.decorators import method_decorator
from django.forms.formsets import formset_factory, BaseFormSet, all_valid
from django.forms.models import modelformset_factory
from django.db.models import Q
from shared.utils import *
from issues.models import *
from issues.forms import *
from mcbv.edit import CreateView, UpdateView, FormSetView, ModelFormSetView
from mcbv.base import TemplateView
from mcbv.detail import DetailView
from mcbv.list_custom import DetailListCreateView, ListView
# }}}
def context_processor(request):
return dict(app_name="MangoTrac")
# add_issue delete_issue
@staff_member_required
def update_issue(request, pk, mode=None, action=None):
""" AJAX view, toggle Closed on/off, set progress or delete an issue.
closed toggle logic:
done/won't fix => open
any other value => done
"""
issue = Issue.obj.get(pk=pk)
open_code = settings.SPECIAL_STATUS_CODES["open"]
done_code = settings.SPECIAL_STATUS_CODES["done"]
s_open = Status.obj.filter(status=open_code).first()
s_done = Status.obj.filter(status=done_code).first()
if mode == "delete":
issue.delete()
return redir("admin:issues_issue_changelist")
else:
if mode == "progress":
val = int(action)
setattr(issue, mode, val)
elif mode == "closed":
mode = "status"
if action == "on":
val = s_done
status = "closed"
else:
val = s_open
status = "opened"
# title = "Issue %s %s" % (issue, status)
msg_tpl = "Issue '%s' was " + status + " <%s%s>\n\n%s"
NotificationMixin().send_notification(issue, msg_tpl, make_diff=False, show_descr=False, request=request)
setattr(issue, mode, val)
issue.save()
return HttpResponse('')
@staff_member_required
def delete_comment(request, pk):
Comment.obj.get(pk=pk).delete()
return redir(referer(request))
class NotificationMixin:
def diff(self, oldobj, obj):
"""Create a diff of `obj` vs. `oldobj`; description is handled using difflib module."""
difflist = []
skip = "description_html".split()
nl = '\n'
for fld in obj._meta.fields:
name = fld.name
if name not in skip:
oldval = getattr(oldobj, fld.name)
val = getattr(obj, fld.name)
if name == "description":
olddesc = oldobj.description.splitlines(1)
desc = obj.description.splitlines(1)
if olddesc:
olddesc[-1] = olddesc[-1].strip() + '\r\n'
if desc:
desc[-1] = desc[-1].strip() + '\r\n'
d = Differ()
result = list(d.compare(olddesc, desc))
# note: Differ returns full(?) content when there are no changes!!!?
if olddesc != desc:
difflist.extend( [nl + "Description diff:" + nl] + result + [nl] )
else:
if oldval != val:
difflist.append("%s: changed from '%s' to '%s'" % (fld.name, oldval, val) + nl)
diff = ''.join(difflist)
return diff
def send_notification(self, obj, msg_tpl, comment_body='', show_descr=True, make_diff=True, request=None):
""" Send notification to creator / new|old owner on issue change.
For description, show a diff; for other fields, show what it changed from / to.
"""
request = request or self.request
oldobj = Issue.obj.get(pk=obj.pk) if make_diff else None
if comment_body:
body = comment_body
elif oldobj:
body = self.diff(oldobj, obj)
elif show_descr:
body = obj.description
else:
body = ''
# from_ = "<EMAIL>"
old_owner = Issue.obj.get(pk=obj.pk).owner # if owner changed, we need to notify him
from_ = settings.DEFAULT_FROM_EMAIL
serv_root = request.META["HTTP_ORIGIN"]
url = reverse2("issue", dpk=obj.pk)
values = [obj.title, serv_root, url, body]
msg = msg_tpl % tuple(values)
send_to = set()
title = "%s (%s) #%s: %s" % (old_owner, obj.status, obj.pk, obj.title)
send_to.add(old_owner)
send_to.add(obj.owner)
send_to.add(obj.creator)
if settings.TEST_NOTIFY:
send_to = [u.email for u in send_to if u] # use for testing
else:
send_to = [u.email for u in send_to if u and u!=request.user]
if obj.cc:
send_to.extend(obj.cc.split())
send_mail(title, msg, from_, send_to, fail_silently=False)
class ReportList(ListView):
list_model = Report
template_name = "reports.html"
class CreateReport(CreateView):
form_model = Report
modelform_class = ReportForm
template_name = "report_form.html"
def modelform_valid(self, modelform):
resp = super(CreateReport, self).modelform_valid(modelform)
self.modelform_object.update(creator=self.request.user)
return resp
class UpdateReport(UpdateView):
form_model = Report
modelform_class = ReportForm
template_name = "report_form.html"
class DuplicateReport(DetailView):
detail_model = Report
def get(self, request, *args, **kwargs):
report = self.get_detail_object()
report.pk = None
report.name += " copy"
report.save()
return redir("update_report", report.pk)
class IssuesMixin(object):
def add_context(self):
return dict(bold_labels=settings.BOLD_LABELS)
def get_success_url(self):
"""Return to view issue page on success."""
# return reverse("admin:issues_issue_changelist") + "?status__id__exact=1&o=5.-3"
return reverse2("issue", self.modelform_object.pk)
class UpdateIssue(IssuesMixin, UpdateView, NotificationMixin):
form_model = Issue
modelform_class = IssueForm
msg_tpl = "Issue '%s' was updated <%s%s>\n\n%s"
template_name = "issue_form.html"
def modelform_invalid(self, modelform):
preview = None
post = self.request.POST
if "preview" in post:
preview = markdown(post["description"])
return self.get_context_data(modelform=modelform, preview=preview)
def modelform_valid(self, modelform):
""" If form was changed, send notification email the (new) issue owner.
Note: at the start of the function, FK relationships are already updated in `self.object`.
"""
if modelform.has_changed():
self.send_notification(self.modelform_object, self.msg_tpl)
return super(UpdateIssue, self).modelform_valid(modelform)
class CreateIssue(IssuesMixin, CreateView, NotificationMixin):
form_model = Issue
modelform_class = IssueForm
msg_tpl = "Issue '%s' was created <%s%s>\n\n%s"
template_name = "issue_form.html"
def modelform_invalid(self, modelform):
preview = None
post = self.request.POST
if "preview" in post:
preview = markdown(post["description"])
return self.get_context_data(modelform=modelform, preview=preview)
def modelform_valid(self, modelform):
resp = super(CreateIssue, self).modelform_valid(modelform)
self.modelform_object.update(creator=self.request.user)
self.send_notification(self.modelform_object, self.msg_tpl, make_diff=False)
return resp
class UpdateComment(UpdateView):
form_model = Comment
modelform_class = CommentForm
template_name = "issues/comment_form.html"
def get_success_url(self):
return self.modelform_object.issue.get_absolute_url()
class ViewIssue(DetailListCreateView, NotificationMixin):
""" View issue, comments and new comment form.
When new comment is submitted, issue status / owner may also be updated.
"""
detail_model = Issue
list_model = Comment
modelform_class = CommentForm
related_name = "comments"
fk_attr = "issue"
msg_tpl = "Comment was added to the Issue '%s' <%s%s>\n\n%s"
template_name = "issue.html"
def modelform_get(self, request, *args, **kwargs):
"""Get issue modelform with two fields: owner and status; return both comment & issue modelforms."""
modelform2 = OwnerStatusForm(instance=self.detail_object)
return self.get_modelform_context_data( modelform=self.get_modelform(), modelform2=modelform2 )
def add_context(self):
"""List of fields to display at the top of issue."""
fields = "status owner cc project priority_code difficulty type version tags creator created updated".split()
return dict(fields=fields)
def modelform2_valid(self, modelform):
"""Update issue based on the small form with 2 fields."""
if modelform.has_changed():
issue = modelform.save(commit=False)
self.send_notification(issue, UpdateIssue.msg_tpl)
issue.save()
def modelform_valid(self, modelform):
"""Add a comment; send notification email to the issue owner."""
if modelform.has_changed():
resp = super(ViewIssue, self).modelform_valid(modelform)
obj = self.modelform_object
obj.update(creator=self.user)
self.send_notification(obj.issue, self.msg_tpl, comment_body=obj.description)
self.modelform2_valid( OwnerStatusForm(instance=self.detail_object, data=self.request.POST) )
return redir(self.detail_object.get_absolute_url())
class AddIssues(IssuesMixin, FormSetView, NotificationMixin):
"""Create new issues."""
formset_model = Issue
formset_form_class = IssueForm
msg_tpl = "New Issue '%s' was created <%s%s>\n\n%s"
extra = 5
template_name = "add_issues.html"
def get_success_url(self):
# can't redir to issue page because -- multiple issues
return reverse("admin:issues_issue_changelist") + "?status__id__exact=1&o=5.-3"
def process_form(self, form):
issue = form.save(commit=False)
issue.update(creator=self.request.user)
self.send_notification(issue, self.msg_tpl, make_diff=False)
class AttachmentsView(ModelFormSetView, DetailView):
"""Create new issues."""
detail_model = Issue
formset_model = Attachment
formset_form_class = AttachmentForm
msg_tpl = "New attachments '%s' were added <%s%s>\n\n%s"
can_delete = True
extra = 15
template_name = "attachments.html"
def get_success_url(self):
return self.detail_object.get_absolute_url()
def process_form(self, form):
file = form.save(commit=False)
file.creator = self.request.user
file.issue = self.detail_object
file.save()
def formset_valid(self, formset):
"""Handle deletion of attachments."""
for form in formset:
if form.cleaned_data.get("file"):
if form.cleaned_data.get("DELETE"):
form.instance.delete()
else:
self.process_form(form)
return HttpResponseRedirect(self.get_success_url())
class ReportView(DetailView):
detail_model = Report
template_name = "report.html"
def resolve_filter_relations(self, arg_filters, kw_filters):
""" Resolve 1to1 or MtoM filter relations (also add __in and split list of values)
Example:
priority_code = 1, 2 ==>
priority_code__priority__in=(1,2)
"""
relation_filters = dict(
owner = (User, "username"),
status = (Status, "status"),
priority_code = (Priority, "priority"),
project = (Project, "project"),
type = (Type, "type"),
version = (Version, "version"),
tags = (Tag, "tag"),
)
for flt, vals in kw_filters.items():
vals = [v.strip() for v in vals.split(',')]
if flt in relation_filters:
cls, fldname = relation_filters[flt]
kw_filters["%s__%s__in" % (flt, fldname)] = vals
del kw_filters[flt]
else:
if len(vals) > 1:
kw_filters["%s__in" % flt] = vals
del kw_filters[flt]
else:
kw_filters[flt] = vals[0]
def add_context(self):
""" Create grouped and filtered rows of issues based on GET args.
Grouped columns are moved to the left side.
e.g. ?group=owner.project & closed=0 & priority__gt=0
=> group by owner, project; filter out closed and 0 priority issues
"""
group_by = ()
filters = {}
report = self.detail_object
# by default, use all cols
cols = "title owner status priority_code difficulty project type version created progress tags".split()
# get groups and filters
group_by = [l.strip() for l in report.group_by.splitlines() if l.strip()]
sort_by = [l.strip() for l in report.sort_by.splitlines() if l.strip()]
columns = [l.strip() for l in report.columns.splitlines() if l.strip()]
columns = columns or cols
arg_filters = []
kw_filters = dict(
[(k.strip(), v.strip()) for k, v in
[l.split('=', 1) for l in report.filters.splitlines()
if '=' in l]
])
self.resolve_filter_relations(arg_filters, kw_filters)
# move to front (or insert) group by columns
issues = Issue.obj.all().filter(*arg_filters, **kw_filters)
group_by_names = [x.strip('-') for x in group_by] # remove order reversal char
for n in reversed(group_by_names):
if n in columns:
columns.remove(n)
columns.insert(0, n)
# make table rows
issues = issues.order_by( *(group_by + sort_by) )
rows = []
last_row = None
for issue in issues:
row = []
ref_row = [] # reference row, includes omitted values
# when new group starts, subsequent columns need to show the value even if it hasn't changed
reset_group = False
# make row
for n, col in enumerate(columns):
border = col not in group_by_names # no border for groups to make them stand out visually
val = use_val = getattr(issue, col)
if hasattr(val, "all"):
val = use_val = sjoin(val.all(), ', ')
if last_row and col in group_by_names:
last = last_row[n]
# see note above about reset_group
if val != last:
use_val = val
reset_group = True
elif not reset_group:
use_val = ''
if col in ("type", "version") and use_val is None:
use_val = ''
if col == "title":
use_val = "<a href='%s'>%s</a>" % (reverse2("issue", issue.pk), use_val)
if col=="created" or col=="updated":
use_val = date(use_val, "DATETIME_FORMAT")
if col == "description":
use_val = issue.description_html
row.append((use_val, border))
ref_row.append(val)
last_row = ref_row
rows.append(row)
headers = [Issue._meta.get_field(c).verbose_name for c in columns]
return dict(headers=headers, rows=rows)
| StarcoderdataPython |
3454731 | import argparse
import io
import os
import math
import uuid
import tensorflow as tf
import hypergan as hg
import hyperchamber as hc
import json
from hypergan.generators import *
from hypergan.search.random_search import RandomSearch
from hypergan.viewer import GlobalViewer
from common import *
from PIL import Image
import plotly.graph_objs as go
arg_parser = ArgumentParser("Test your gan vs a known distribution", require_directory=False)
arg_parser.parser.add_argument('--distribution', '-t', type=str, default='circle', help='what distribution to test, options are circle, modes')
arg_parser.parser.add_argument('--contour_size', '-cs', type=int, default=128, help='number of points to plot the discriminator contour with. must be a multiple of batch_size')
arg_parser.parser.add_argument('--sample_points', '-p', type=int, default=512, help='number of scatter points to plot. must be a multiple of batch_size')
args = arg_parser.parse_args()
import plotly.plotly as py
import plotly.graph_objs as go
import plotly.figure_factory as ff
import plotly.io as pio
class Custom2DDiscriminator(BaseGenerator):
def __init__(self, gan, config, g=None, x=None, name=None, input=None, reuse=None, features=[], skip_connections=[]):
self.x = x
self.g = g
GANComponent.__init__(self, gan, config, name=name, reuse=reuse)
def create(self):
gan = self.gan
if self.x is None:
self.x = gan.inputs.x
if self.g is None:
self.g = gan.generator.sample
net = tf.concat(axis=0, values=[self.x,self.g])
net = self.build(net)
self.sample = net
return net
def build(self, net):
gan = self.gan
config = self.config
ops = self.ops
layers=2
end_features = 1
for i in range(layers):
net = ops.linear(net, 16)
net = ops.lookup('bipolar')(net)
net = ops.linear(net, 1)
self.sample = net
return net
class Custom2DGenerator(BaseGenerator):
def create(self):
gan = self.gan
config = self.config
ops = self.ops
end_features = config.end_features or 1
ops.describe('custom_generator')
net = gan.latent.sample
for i in range(2):
net = ops.linear(net, 16)
net = ops.lookup('bipolar')(net)
net = ops.linear(net, end_features)
print("-- net is ", net)
self.sample = net
return net
class Custom2DInputDistribution:
def __init__(self, args):
with tf.device(args.device):
def circle(x):
spherenet = tf.square(x)
spherenet = tf.reduce_sum(spherenet, 1)
lam = tf.sqrt(spherenet)
return x/tf.reshape(lam,[int(lam.get_shape()[0]), 1])
def modes(x):
shape = x.get_shape()
return tf.round(x*2)/2.0#+tf.random_normal(shape, 0, 0.04)
if args.distribution == 'circle':
x = tf.random_normal([args.batch_size, 2])
x = circle(x)
elif args.distribution == 'modes':
x = tf.random_uniform([args.batch_size, 2], -1, 1)
x = modes(x)
elif args.distribution == 'modal-gaussian':
x = tf.random_uniform([args.batch_size, 2], -1, 1)
y = tf.random_normal([args.batch_size, 2], stddev=0.04, mean=0.15)
x = tf.round(x) + y
elif args.distribution == 'sin':
x = tf.random_uniform((1, args.batch_size), -10.5, 10.5 )
x = tf.transpose(x)
r_data = tf.random_normal((args.batch_size,1), mean=0, stddev=0.1)
xy = tf.sin(0.75*x)*7.0+x*0.5+r_data*1.0
x = tf.concat([xy,x], 1)/16.0
elif args.distribution == 'static-point':
x = tf.ones([args.batch_size, 2])
self.x = x
self.xy = tf.zeros_like(self.x)
x_v, z_v = None, None
class Custom2DSampler(BaseSampler):
def __init__(self, gan):
self.gan = gan
self.copy_vars = [tf.Variable(x) for x in self.gan.variables()]
self.reset_vars = [y.assign(x) for y, x in zip(self.copy_vars, self.gan.variables())]
def sample(self, filename, save_samples):
gan = self.gan
generator = gan.generator.sample
sess = gan.session
config = gan.config
contours = args.contour_size
x,y = np.meshgrid(np.arange(-1.5, 1.5, 3/contours), np.arange(-1.5, 1.5, 3/contours))
d = []
for i in range(args.contour_size):
_x = np.reshape(x[:,i], [-1])
_y = np.reshape(y[:,i], [-1])
for j in range(args.contour_size // gan.batch_size()):
offset = j*gan.batch_size()
endoffset = (j+1)*gan.batch_size()
_x_sample = _x[offset:endoffset]
_y_sample = _y[offset:endoffset]
_d = gan.session.run(gan.loss.d_real, {gan.inputs.x: [[__x,__y] for __x, __y in zip(_x_sample, _y_sample)]})
d.append(_d)
contour = go.Contour(
z = np.reshape(d, [-1]),
x = np.reshape(x, [-1]),
y = np.reshape(y, [-1]),
opacity=0.5,
showlegend=False,
contours = dict(
start=-0.5,
end=0.5,
size=0.03,
)
)
print(np.shape(x), np.shape(y))
#z = sess.run(gan.discriminator.sample,
global x_v, z_v
if x_v is None:
x_v = []
z_v = []
for j in range(args.sample_points // gan.batch_size()):
_x_v, _z_v = sess.run([gan.inputs.x, gan.latent.sample])
x_v.append(_x_v)
z_v.append( _z_v)
x_v = np.reshape(x_v, [-1,gan.inputs.x.shape[1]])
z_v = np.reshape(z_v, [-1,gan.latent.sample.shape[1]])
sample = []
for j in range(args.sample_points // gan.batch_size()):
offset = j*gan.batch_size()
endoffset = (j+1)*gan.batch_size()
z_v_sample = z_v[offset:endoffset]
x_v_sample = x_v[offset:endoffset]
_sample = sess.run(generator, {gan.inputs.x: x_v_sample, gan.latent.sample: z_v_sample})
sample.append(_sample)
sample = np.reshape(sample, [-1, 2])
points = go.Scatter(x=sample[:,0], y=sample[:,1],
mode='markers',
marker = dict(
size = 10,
color = 'rgba(0, 152, 0, .8)',
line = dict(
width = 2,
color = 'rgb(0, 0, 0)'
)),
name='fake')
xpoints = go.Scatter(x=x_v[:,0], y=x_v[:,1],
mode='markers',
marker = dict(
size = 10,
color = 'rgba(255, 182, 193, .9)',
line = dict(
width = 2,
color = 'rgb(0, 0, 0)'
)),
name='real')
layout = go.Layout(hovermode='closest',
xaxis=dict(range=[-1.5,1.5]),
yaxis=dict(range=[-1.5,1.5]),
width=1920,
showlegend=False,
height=1080
)
fig = go.Figure([contour, xpoints, points], layout=layout)
data = pio.to_image(fig, format='png')
#pio.write_image(fig,"sample.png")
img = Image.open(io.BytesIO(data))
#img = Image.open("sample.png").convert("RGB")
#img.save("save.jpg")
#plt.savefig(filename)
self.plot(np.array(img), filename, save_samples, regularize=False)
return [{'image': filename, 'label': '2d'}]
config = lookup_config(args)
if args.action == 'search':
config = hc.Config(json.loads(open(os.getcwd()+'/randomsearch.json', 'r').read()))
config['trainer']['rbbr']['optimizer']['optimizer']['learn_rate'] = random.choice([0.1,0.01,0.001, 0.005, 0.0001])
config['trainer']['rbbr']['optimizer']['optimizer']['beta1'] = random.choice([0.1, 0.0001, 0.5, 0.9, 0.999])
config['trainer']['rbbr']['optimizer']['optimizer']['beta2'] = random.choice([0.1, 0.0001, 0.5, 0.9, 0.999])
config['trainer']['rbbr']['optimizer']['beta'] = random.choice([0, 1, 0.5, 0.99, 0.1])
config['trainer']['rbbr']['optimizer']['gamma'] = random.choice([0, 1, 0.5, 0.99, 0.1, 10])
config['trainer']['rbbr']['optimizer']['rho'] = random.choice([0, 1, 0.5, 0.99, 0.1])
def train(config, args):
title = "[hypergan] 2d-test " + args.config
GlobalViewer.title = title
GlobalViewer.enabled = args.viewer
with tf.device(args.device):
config.generator['end_features'] = 2
config.generator["class"]="class:__main__.Custom2DGenerator" # TODO
config.discriminator["class"]="class:__main__.Custom2DDiscriminator" # TODO
gan = hg.GAN(config, inputs = Custom2DInputDistribution(args))
gan.name = args.config
accuracy_x_to_g=distribution_accuracy(gan.inputs.x, gan.generator.sample)
accuracy_g_to_x=distribution_accuracy(gan.generator.sample, gan.inputs.x)
sampler = Custom2DSampler(gan)
gan.selected_sampler = sampler
tf.train.start_queue_runners(sess=gan.session)
samples = 0
steps = args.steps
sampler.sample("samples/000000.png", args.save_samples)
metrics = [accuracy_x_to_g, accuracy_g_to_x]
sum_metrics = [0 for metric in metrics]
for i in range(steps):
gan.step()
if args.viewer and i % args.sample_every == 0:
samples += 1
print("Sampling "+str(samples), args.save_samples)
sample_file="samples/%06d.png" % (samples)
sampler.sample(sample_file, args.save_samples)
if i > steps * 9.0/10:
for k, metric in enumerate(gan.session.run(metrics)):
sum_metrics[k] += metric
if i % 300 == 0:
for k, metric in enumerate(gan.metrics().keys()):
metric_value = gan.session.run(gan.metrics()[metric])
print("--", metric, metric_value)
if math.isnan(metric_value) or math.isinf(metric_value):
print("Breaking due to invalid metric")
return None
tf.reset_default_graph()
gan.session.close()
return sum_metrics
if args.action == 'train':
metrics = train(config, args)
print("Resulting metrics:", metrics)
elif args.action == 'search':
metric_sum = train(config, args)
if 'search_output' in args:
search_output = args.search_output
else:
search_output = "2d-test-results.csv"
config_filename = "2d-measure-accuracy-"+str(uuid.uuid4())+'.json'
hc.Selector().save(config_filename, config)
with open(search_output, "a") as myfile:
total = sum(metric_sum)
myfile.write(config_filename+","+",".join([str(x) for x in metric_sum])+","+str(total)+"\n")
else:
print("Unknown action: "+args.action)
| StarcoderdataPython |
1704657 | # -*- coding: utf-8 -*-
# Copyright (c) 2022 by <NAME>
import os
import sys
import glob
import torch
import random
import timeit
import configs
import logging
import numpy as np
import pandas as pd
import transformers
from tqdm import tqdm, trange
from adapt_mrc_model import AdaptMRCModel
from data_generator import QADataset, qa_collate
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import (
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
WEIGHTS_NAME,
AdamW,
AutoConfig,
AutoModelForQuestionAnswering,
AutoTokenizer,
get_linear_schedule_with_warmup,
squad_convert_examples_to_features,
)
from transformers.data.metrics.squad_metrics import (
compute_predictions_log_probs,
compute_predictions_logits,
squad_evaluate,
get_raw_scores,
)
from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor
from transformers.trainer_utils import is_main_process
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # specify which GPU(s) to be used
MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def set_seed(configs):
random.seed(configs.seed)
np.random.seed(configs.seed)
torch.manual_seed(configs.seed)
if configs.n_gpu > 0:
torch.cuda.manual_seed_all(configs.seed)
def to_list(tensor):
return tensor.detach().cpu().tolist()
def train(model, tokenizer):
"""Train the model"""
set_seed(configs)
if configs.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
configs.train_batch_size = configs.per_gpu_train_batch_size * max(1, configs.n_gpu)
train_dataset = QADataset()
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=configs.train_batch_size,
shuffle=False,
num_workers=configs.num_workers,
collate_fn=qa_collate
)
if configs.max_steps > 0:
t_total = configs.max_steps
configs.num_train_epochs = configs.max_steps // (
len(train_dataloader) // configs.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // configs.gradient_accumulation_steps * configs.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
# check the parameters
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": configs.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=configs.learning_rate,
eps=configs.adam_epsilon
)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=configs.warmup_steps,
num_training_steps=t_total * configs.lr_multiplier
)
# Check if saved optimizer or scheduler states exist
if (os.path.isfile(os.path.join(configs.pretrained_model_name_or_path, "optimizer.pt"))
and os.path.isfile(os.path.join(configs.pretrained_model_name_or_path, "scheduler.pt"))):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(configs.pretrained_model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(configs.pretrained_model_name_or_path, "scheduler.pt")))
if configs.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=configs.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if configs.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if configs.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[configs.local_rank], output_device=configs.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", configs.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", configs.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
configs.train_batch_size
* configs.gradient_accumulation_steps
* (torch.distributed.get_world_size() if configs.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", configs.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(configs.pretrained_model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = configs.pretrained_model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // configs.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // configs.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(configs.num_train_epochs), desc="Epoch", disable=configs.local_rank not in [-1, 0]
)
# Added here for reproductibility
set_seed(configs)
ite = 0
patience = configs.patience_threshold
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=configs.local_rank not in [-1, 0])
local_step = 0
mean_loss = []
for step, batch in enumerate(epoch_iterator):
local_step += 1
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
outputs = model(batch)
encodings, factoid_qa_outputs, aux_qa_outputs, \
adv_loss, aux_qa_loss, original_qa_loss, loss = outputs
if configs.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if configs.gradient_accumulation_steps > 1:
loss = loss / configs.gradient_accumulation_steps
if configs.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
mean_loss.append(loss)
if (step + 1) % configs.gradient_accumulation_steps == 0:
if configs.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), configs.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), configs.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
# Log metrics
if configs.local_rank in [-1, 0] and configs.logging_steps > 0 and global_step % configs.logging_steps == 0:
# Only evaluate when single GPU otherwise metrics may not average well
if configs.local_rank == -1 and configs.evaluate_during_training:
results = evaluate(model, tokenizer, None, in_domain=None, out_domain=None, evaluate_all=False,
evaluate_domain_0=False)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / configs.logging_steps, global_step)
logging_loss = tr_loss
# Save model checkpoint
if configs.local_rank in [-1, 0] and configs.save_steps > 0 and global_step % configs.save_steps == 0:
output_dir = os.path.join(configs.output_model_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir) and configs.local_rank in [-1, 0]:
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, 'module') else model
torch.save(model_to_save.state_dict(), f'{output_dir}/model.pt')
tokenizer.save_pretrained(output_dir)
# torch.save(configs, os.path.join(output_dir, "training_configs.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if configs.max_steps > 0 and global_step > configs.max_steps:
epoch_iterator.close()
break
ite += 1
if (ite % 10 == 0):
if (configs.reverse_layer_lambda < 0.04):
configs.reverse_layer_lambda = configs.reverse_layer_lambda + configs.lambda_delta
if configs.max_steps > 0 and global_step > configs.max_steps:
train_iterator.close()
break
if configs.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def main():
set_seed(configs)
if configs.doc_stride >= configs.max_seq_length - configs.max_query_length:
logger.warning(
"WARNING - You've set a doc stride which may be superior to the document length in some "
"examples. This could result in errors when building features from the examples. Please reduce the doc "
"stride or increase the maximum length to ensure the features are correctly built."
)
if (
os.path.exists(configs.output_dir)
and os.listdir(configs.output_dir)
and configs.do_train
and not configs.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
configs.output_dir
)
)
# Setup distant debugging if needed
if configs.server_ip and configs.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(configs.server_ip, configs.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if configs.local_rank == -1 or configs.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not configs.no_cuda else "cpu")
configs.n_gpu = 0 if configs.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(configs.local_rank)
device = torch.device("cuda", configs.local_rank)
torch.distributed.init_process_group(backend="nccl")
configs.n_gpu = 1
configs.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if configs.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
configs.local_rank,
device,
configs.n_gpu,
bool(configs.local_rank != -1),
configs.fp16,
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(configs.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set seed
set_seed(configs)
# Load pretrained model and tokenizer
if configs.local_rank not in [-1, 0]:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
configs.model_type = configs.model_type.lower()
tokenizer = AutoTokenizer.from_pretrained(
configs.tokenizer_name if configs.tokenizer_name else configs.pretrained_model_name_or_path,
do_lower_case=configs.do_lower_case,
cache_dir=configs.cache_dir if configs.cache_dir else None,
use_fast=False, # SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling
)
if configs.local_rank == 0:
# Make sure only the first process in distributed training will download model & vocab
torch.distributed.barrier()
model = AdaptMRCModel()
model.to(configs.device)
logger.info("Training/evaluation parameters %s", configs)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if configs.fp16 is set.
# Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will
# remove the need for this code, but it is still valid.
if configs.fp16:
try:
import apex
apex.amp.register_half_function(torch, "einsum")
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
# Training
if configs.do_train:
global_step, tr_loss = train(model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save the trained model and the tokenizer
if configs.do_train and (configs.local_rank == -1 or torch.distributed.get_rank() == 0):
if not os.path.exists(configs.output_model_dir):
os.makedirs(configs.output_model_dir)
logger.info("Saving model checkpoint to %s", configs.output_model_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, 'module') else model
# TODO: Save model to
save_folder_dir = f'{configs.output_model_dir}/adapt-mrc-mbbank'
print(f"Save model to {save_folder_dir}")
if os.path.exists(save_folder_dir):
os.makedirs(save_folder_dir)
model_to_save.pretrained_model.save_pretrained(save_folder_dir)
tokenizer.save_pretrained(save_folder_dir)
# torch.save(model_to_save.state_dict(), f'{configs.output_model_dir}model.pt')
# tokenizer.save_pretrained(configs.output_model_dir)
if __name__ == "__main__":
main()
| StarcoderdataPython |
12836513 | # Metrics dataset that comprises of vectors which will be searched for in job postings.
pl = [
['python', False],
['c#', False],
['c', False],
['c++', False],
['ruby', False],
['java', False],
[['javascript', 'js'], False],
[['html', 'html5'], False],
['css', False],
['sql', False],
['r', False],
['assembly', False],
['swift', False],
['pascal', False],
[['objective-c', 'objectivec'], False],
['php', False],
[['go', 'golang'], False],
['perl', False],
['f#', False],
['scala', False],
['apex', False],
['kotlin', False],
[['typescript', 'ts'], False]
]
f = [
[['dotnet', '.net', 'asp.net', 'aspnet', 'net'], False],
[['react', 'reactjs', 'react.js'], False],
[['angular', 'angular.js', 'angularjs'], False],
['django', False],
['splunk', False],
['spring', False],
['rails', False],
['redux', False],
[['express', 'expressjs', 'express.js'], False],
[['vue', 'vuejs', 'vue.js'], False],
['flask', False],
['laravel', False],
['symfony', False],
[['gatsby', 'gatsbyjs', 'gatsby.js'], False],
['sinatra', False],
['materialize', False],
['bootstrap', False],
['tailwind', False],
['ionic', False],
['xamarin', False],
['phonegap', False],
['native', False],
['corona', False],
['jquery', False],
['flutter', False],
['pytorch', False],
['pandas', False],
[['sci-kit', 'scikit'], False],
[['ml.net', 'mlnet'], False],
['chainer', False],
['pytest', False],
['jest', False],
['mocha', False],
['jasmine', False],
['cypress', False],
['scrapy', False],
[['node', 'nodejs', 'npm'], False],
[['git', 'github'], False],
[['api', 'apis'], False],
[['sdk', 'sdks'], False],
[['postgres', 'postgresql', 'psql'], False],
['mysql', False],
['docker', False],
['jenkins', False],
['jira', False],
[['rally','rallydev'], False],
['azure', False],
['kubernetes', False],
['swagger', False],
[['scrum', 'agile'], False]
] | StarcoderdataPython |
5032126 | <reponame>CodingForVega/silverstrike
from datetime import date
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.urls import reverse, reverse_lazy
from django.views import generic
from silverstrike.forms import DepositForm, RecurringTransactionForm, TransferForm, WithdrawForm
from silverstrike.lib import last_day_of_month
from silverstrike.models import RecurringTransaction, Transaction
class RecurrenceCreateView(LoginRequiredMixin, generic.edit.CreateView):
form_class = RecurringTransactionForm
model = RecurringTransaction
success_url = reverse_lazy('recurrences')
class ReccurrenceSetNextOccurence(LoginRequiredMixin, generic.View):
def post(self, request, *args, **kwargs):
for r in RecurringTransaction.objects.due_in_month():
old = r.date
t = r.recurrences.first()
if not t:
continue
while t.date >= r.date:
r.date = r.update_date()
if old != r.date:
r.save()
return HttpResponseRedirect(reverse('recurrences'))
class RecurrenceDetailView(LoginRequiredMixin, generic.DetailView):
model = RecurringTransaction
context_object_name = 'recurrence'
class RecurrenceUpdateView(LoginRequiredMixin, generic.edit.UpdateView):
form_class = RecurringTransactionForm
model = RecurringTransaction
success_url = reverse_lazy('recurrences')
class RecurrenceTransactionCreateView(LoginRequiredMixin, generic.edit.CreateView):
model = Transaction
template_name = 'silverstrike/transaction_edit.html'
def get_form(self, form_class=None):
self.recurrence = get_object_or_404(RecurringTransaction, pk=self.kwargs['pk'])
if self.recurrence.transaction_type == Transaction.WITHDRAW:
form_class = WithdrawForm
elif self.recurrence.transaction_type == Transaction.DEPOSIT:
form_class = DepositForm
else:
form_class = TransferForm
return form_class(**self.get_form_kwargs())
def get_initial(self):
initial = super().get_initial()
initial['title'] = self.recurrence.title
initial['source_account'] = self.recurrence.src
initial['destination_account'] = self.recurrence.dst
initial['amount'] = self.recurrence.amount
initial['date'] = self.recurrence.date
initial['recurrence'] = self.recurrence.pk
initial['category'] = self.recurrence.category
return initial
def form_valid(self, form):
response = super().form_valid(form)
self.object.recurrence = self.recurrence
self.object.save()
self.recurrence.update_date(save=True)
return response
class RecurrenceDeleteView(LoginRequiredMixin, generic.edit.DeleteView):
model = RecurringTransaction
success_url = reverse_lazy('recurrences')
class RecurringTransactionIndex(LoginRequiredMixin, generic.ListView):
template_name = 'silverstrike/recurring_transactions.html'
context_object_name = 'transactions'
queryset = RecurringTransaction.objects.exclude(interval=RecurringTransaction.DISABLED)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['menu'] = 'recurrences'
income = 0
expenses = 0
today = date.today()
last = last_day_of_month(today)
remaining = 0
for t in context['transactions']:
if t.interval == RecurringTransaction.MONTHLY or (
t.interval == RecurringTransaction.ANNUALLY and
t.date.month == today.month and t.date.year == today.year):
if t.transaction_type == Transaction.WITHDRAW:
expenses += t.amount
if t.date <= last:
remaining -= t.amount
elif t.transaction_type == Transaction.DEPOSIT:
income += t.amount
if t.date <= last:
remaining += t.amount
context['expenses'] = expenses
context['income'] = income
context['total'] = income - expenses
context['remaining'] = remaining
return context
class DisabledRecurrencesView(LoginRequiredMixin, generic.ListView):
template_name = 'silverstrike/disabled_recurrences.html'
queryset = RecurringTransaction.objects.filter(interval=RecurringTransaction.DISABLED)
paginate_by = 20
| StarcoderdataPython |
8194895 | <reponame>panc-test/python-study
"""
退出程序:
exit()
quit()
"""
# for i in range(10):
#
# if i == 5:
# # print(quit())
# # quit()
# print(exit())
# exit()
#
# print(i)
print(type(range(10))) | StarcoderdataPython |
1684394 | """Constant learning rate."""
from optimus.types import LRMethod
class Constant(LRMethod):
"""Used when a single learning rate will be used for all steps."""
def __init__(self, learning_rate: float):
self.learning_rate = learning_rate
def __call__(self, *args, **kwargs):
return self.learning_rate
| StarcoderdataPython |
3285346 | # (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# (c) 2017 Red Hat Inc.
from ansible.modules.cloud.amazon.s3_bucket import compare_policies
small_policy_one = {
'Version': '2012-10-17',
'Statement': [
{
'Action': 's3:PutObjectAcl',
'Sid': 'AddCannedAcl2',
'Resource': 'arn:aws:s3:::test_policy/*',
'Effect': 'Allow',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}
}
]
}
# The same as small_policy_one, except the single resource is in a list and the contents of Statement are jumbled
small_policy_two = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': 's3:PutObjectAcl',
'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']},
'Resource': ['arn:aws:s3:::test_policy/*'],
'Sid': 'AddCannedAcl2'
}
]
}
larger_policy_one = {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Test",
"Effect": "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::XXXXXXXXXXXX:user/testuser1",
"arn:aws:iam::XXXXXXXXXXXX:user/testuser2"
]
},
"Action": "s3:PutObjectAcl",
"Resource": "arn:aws:s3:::test_policy/*"
},
{
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::XXXXXXXXXXXX:user/testuser2"
},
"Action": [
"s3:PutObject",
"s3:PutObjectAcl"
],
"Resource": "arn:aws:s3:::test_policy/*"
}
]
}
# The same as larger_policy_one, except having a list of length 1 and jumbled contents
larger_policy_two = {
"Version": "2012-10-17",
"Statement": [
{
"Principal": {
"AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser2"]
},
"Effect": "Allow",
"Resource": "arn:aws:s3:::test_policy/*",
"Action": [
"s3:PutObject",
"s3:PutObjectAcl"
]
},
{
"Action": "s3:PutObjectAcl",
"Principal": {
"AWS": [
"arn:aws:iam::XXXXXXXXXXXX:user/testuser1",
"arn:aws:iam::XXXXXXXXXXXX:user/testuser2"
]
},
"Sid": "Test",
"Resource": "arn:aws:s3:::test_policy/*",
"Effect": "Allow"
}
]
}
# Different than larger_policy_two: a different principal is given
larger_policy_three = {
"Version": "2012-10-17",
"Statement": [
{
"Principal": {
"AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser2"]
},
"Effect": "Allow",
"Resource": "arn:aws:s3:::test_policy/*",
"Action": [
"s3:PutObject",
"s3:PutObjectAcl"]
},
{
"Action": "s3:PutObjectAcl",
"Principal": {
"AWS": [
"arn:aws:iam::XXXXXXXXXXXX:user/testuser1",
"arn:aws:iam::XXXXXXXXXXXX:user/testuser3"
]
},
"Sid": "Test",
"Resource": "arn:aws:s3:::test_policy/*",
"Effect": "Allow"
}
]
}
def test_compare_small_policies_without_differences():
""" Testing two small policies which are identical except for:
* The contents of the statement are in different orders
* The second policy contains a list of length one whereas in the first it is a string
"""
assert compare_policies(small_policy_one, small_policy_two) is False
def test_compare_large_policies_without_differences():
""" Testing two larger policies which are identical except for:
* The statements are in different orders
* The contents of the statements are also in different orders
* The second contains a list of length one for the Principal whereas in the first it is a string
"""
assert compare_policies(larger_policy_one, larger_policy_two) is False
def test_compare_larger_policies_with_difference():
""" Testing two larger policies which are identical except for:
* one different principal
"""
assert compare_policies(larger_policy_two, larger_policy_three)
def test_compare_smaller_policy_with_larger():
""" Testing two policies of different sizes """
assert compare_policies(larger_policy_one, small_policy_one)
| StarcoderdataPython |
4879087 | # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
import argparse
import numpy as np
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import re
import sys
import time
import socket
import cv2
sys.path.append('Library')
import pretrained_networks
import SpoutSDK
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
def msg_to_bytes(msg):
return msg.encode('utf-8')
#----------------------------------------------------------------------------
def generate_images(network_pkl, truncation_psi):
# spout setup
width = 1024
height = 1024
display = (width,height)
senderName = "outputGAN"
receiverName = "input"
silent = True
# window setup
pygame.init()
pygame.display.set_caption(senderName)
pygame.display.set_mode(display, DOUBLEBUF|OPENGL)
# OpenGL init
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0,width,height,0,1,-1)
glMatrixMode(GL_MODELVIEW)
glDisable(GL_DEPTH_TEST)
glClearColor(0.0,0.0,0.0,0.0)
glEnable(GL_TEXTURE_2D)
# init spout receiver
spoutReceiverWidth = width
spoutReceiverHeight = height
# create spout receiver
spoutReceiver = SpoutSDK.SpoutReceiver()
# Its signature in c++ looks like this: bool pyCreateReceiver(const char* theName, unsigned int theWidth, unsigned int theHeight, bool bUseActive);
spoutReceiver.pyCreateReceiver(receiverName,width,height, False)
# create textures for spout receiver and spout sender
textureReceiveID = glGenTextures(1)
# initalise receiver texture
glBindTexture(GL_TEXTURE_2D, textureReceiveID)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
# copy data into texture
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, None )
glBindTexture(GL_TEXTURE_2D, 0)
spoutSender = SpoutSDK.SpoutSender()
spoutSenderWidth = width
spoutSenderHeight = height
# Its signature in c++ looks like this: bool CreateSender(const char *Sendername, unsigned int width, unsigned int height, DWORD dwFormat = 0);
spoutSender.CreateSender(senderName, spoutSenderWidth, spoutSenderHeight, 0)
# create textures for spout receiver and spout sender
textureSendID = glGenTextures(1)
# setup UDP
udp_ip = "127.0.0.1"
udp_port = 7000
rec_port = 6000
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print('Setting up UDP on ip={} and port={}'.format(udp_ip, udp_port))
except:
print('Failed to create socket')
sys.exit()
try:
sock.bind(('', rec_port))
print('Listening on ip={} and port={}'.format(udp_ip, rec_port))
except:
print('Bind failed')
sys.exit()
starting_msg = "Ready"
sock.sendto( msg_to_bytes(starting_msg), (udp_ip, udp_port))
# load nmmetwork and prepare to generate
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
Gs_kwargs = dnnlib.EasyDict()
Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
Gs_kwargs.randomize_noise = False
print()
print('LISTENING')
while (True):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sock.close()
quit()
#background = False
# receive texture
# Its signature in c++ looks like this: bool pyReceiveTexture(const char* theName, unsigned int theWidth, unsigned int theHeight, GLuint TextureID, GLuint TextureTarget, bool bInvert, GLuint HostFBO);
spoutReceiver.pyReceiveTexture(receiverName, spoutReceiverWidth, spoutReceiverHeight, textureReceiveID.item(), GL_TEXTURE_2D, False, 0)
glBindTexture(GL_TEXTURE_2D, textureReceiveID)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
# copy pixel byte array from received texture
data = glGetTexImage(GL_TEXTURE_2D, 0, GL_RGB, GL_UNSIGNED_BYTE, outputType=None) #Using GL_RGB can use GL_RGBA
glBindTexture(GL_TEXTURE_2D, 0)
# swap width and height data around due to oddness with glGetTextImage. http://permalink.gmane.org/gmane.comp.python.opengl.user/2423
data.shape = (data.shape[1], data.shape[0], data.shape[2])
update = data[0,0,0]
if update > 1:
z = data[0,:512,0]
z = z / 255.0 * 7 - 3.5
z = np.array(z)
z = np.expand_dims(z, axis=0)
if truncation_psi is not None:
Gs_kwargs.truncation_psi = truncation_psi
#print('Generating image for seed %d ...' % (seed))
rnd = np.random.RandomState(0)
#z = rnd.randn(1, *Gs.input_shape[1:]) # [minibatch, component]
#print(z)
#print(z.shape)
tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars}) # [height, width]
images = Gs.run(z, None, **Gs_kwargs) # [minibatch, height, width, channel]
output = images[0]
# setup the texture so we can load the output into it
glBindTexture(GL_TEXTURE_2D, textureSendID);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
# copy output into texture
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, output )
# setup window to draw to screen
glActiveTexture(GL_TEXTURE0)
# clean start
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )
# reset drawing perspective
glLoadIdentity()
# draw texture on screen
glBegin(GL_QUADS)
glTexCoord(0,0)
glVertex2f(0,0)
glTexCoord(1,0)
glVertex2f(width,0)
glTexCoord(1,1)
glVertex2f(width,height)
glTexCoord(0,1)
glVertex2f(0,height)
glEnd()
if silent:
pygame.display.iconify()
# update window
pygame.display.flip()
spoutSender.SendTexture(textureSendID.item(), GL_TEXTURE_2D, spoutSenderWidth, spoutSenderHeight, False, 0)
#sock.sendto(msg_to_bytes(reply), (udp_ip, udp_port))
sock.close()
pygame.quit()
quit()
#----------------------------------------------------------------------------
def _parse_num_range(s):
'''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return range(int(m.group(1)), int(m.group(2))+1)
vals = s.split(',')
return [int(x) for x in vals]
#----------------------------------------------------------------------------
_examples = '''examples:
# Generate ffhq uncurated images (matches paper Figure 12)
python %(prog)s generate-images --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --seeds=6600-6625 --truncation-psi=0.5
# Generate ffhq curated images (matches paper Figure 11)
python %(prog)s generate-images --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --seeds=66,230,389,1518 --truncation-psi=1.0
# Generate uncurated car images (matches paper Figure 12)
python %(prog)s generate-images --network=gdrive:networks/stylegan2-car-config-f.pkl --seeds=6000-6025 --truncation-psi=0.5
# Generate style mixing example (matches style mixing video clip)
python %(prog)s style-mixing-example --network=gdrive:networks/stylegan2-ffhq-config-f.pkl --row-seeds=85,100,75,458,1500 --col-seeds=55,821,1789,293 --truncation-psi=1.0
'''
#----------------------------------------------------------------------------
def main():
print()
print()
print()
print('GENERATOR STARTED')
parser = argparse.ArgumentParser(
description='''StyleGAN2 generator.
Run 'python %(prog)s <subcommand> --help' for subcommand help.''',
epilog=_examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
subparsers = parser.add_subparsers(help='Sub-commands', dest='command')
parser_generate_images = subparsers.add_parser('generate-images', help='Generate images')
parser_generate_images.add_argument('--network', help='Network pickle filename', dest='network_pkl', default='results/002332.pkl')
parser_generate_images.add_argument('--truncation-psi', type=float, help='Truncation psi (default: %(default)s)', default=0.5)
parser_generate_images.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
args = parser.parse_args()
kwargs = vars(args)
subcmd = kwargs.pop('command')
if subcmd is None:
print ('Error: missing subcommand. Re-run with --help for usage.')
sys.exit(1)
sc = dnnlib.SubmitConfig()
sc.num_gpus = 1
sc.submit_target = dnnlib.SubmitTarget.LOCAL
sc.local.do_not_copy_source_files = True
sc.run_dir_root = kwargs.pop('result_dir')
sc.run_desc = subcmd
func_name_map = {
'generate-images': 'TD_listen.generate_images',
'style-mixing-example': 'TD_listen.style_mixing_example'
}
dnnlib.submit_run(sc, func_name_map[subcmd], **kwargs)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.