hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0c81bfd26f2a5645994b62a19327844a56c59f
| 3,943
|
py
|
Python
|
tests/benchmark_djangocache.py
|
allaudet/python-diskcache
|
2774689c60bac3ebd06246943bca2014779ee2c6
|
[
"Apache-2.0"
] | null | null | null |
tests/benchmark_djangocache.py
|
allaudet/python-diskcache
|
2774689c60bac3ebd06246943bca2014779ee2c6
|
[
"Apache-2.0"
] | null | null | null |
tests/benchmark_djangocache.py
|
allaudet/python-diskcache
|
2774689c60bac3ebd06246943bca2014779ee2c6
|
[
"Apache-2.0"
] | null | null | null |
"""Benchmark diskcache.DjangoCache
$ export PYTHONPATH=/Users/grantj/repos/python-diskcache
$ python tests/benchmark_djangocache.py > tests/timings_djangocache.txt
"""
from __future__ import print_function
import collections as co
import multiprocessing as mp
import os
import random
import shutil
import sys
import time
import warnings
if sys.hexversion < 0x03000000:
range = xrange
import cPickle as pickle
else:
import pickle
from utils import display
PROCS = 8
OPS = int(1e5)
RANGE = int(1.1e3)
WARMUP = int(1e3)
def setup():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')
import django
django.setup()
def worker(num, name):
setup()
from django.core.cache import caches
obj = caches[name]
random.seed(num)
timings = co.defaultdict(list)
time.sleep(0.01) # Let other processes start.
for count in range(OPS):
key = str(random.randrange(RANGE)).encode('utf-8')
value = str(count).encode('utf-8') * random.randrange(1, 100)
choice = random.random()
if choice < 0.900:
start = time.time()
result = obj.get(key)
end = time.time()
miss = result is None
action = 'get'
elif choice < 0.990:
start = time.time()
result = obj.set(key, value)
end = time.time()
miss = result == False
action = 'set'
else:
start = time.time()
result = obj.delete(key)
end = time.time()
miss = result == False
action = 'delete'
if count > WARMUP:
delta = end - start
timings[action].append(delta)
if miss:
timings[action + '-miss'].append(delta)
with open('output-%d.pkl' % num, 'wb') as writer:
pickle.dump(timings, writer, protocol=pickle.HIGHEST_PROTOCOL)
def prepare(name):
setup()
from django.core.cache import caches
obj = caches[name]
for key in range(RANGE):
key = str(key).encode('utf-8')
obj.set(key, key)
try:
obj.close()
except:
pass
def dispatch():
setup()
from django.core.cache import caches
for name in ['locmem', 'memcached', 'redis', 'diskcache', 'filebased']:
shutil.rmtree('tmp', ignore_errors=True)
preparer = mp.Process(target=prepare, args=(name,))
preparer.start()
preparer.join()
processes = [
mp.Process(target=worker, args=(value, name))
for value in range(PROCS)
]
for process in processes:
process.start()
for process in processes:
process.join()
timings = co.defaultdict(list)
for num in range(PROCS):
filename = 'output-%d.pkl' % num
with open(filename, 'rb') as reader:
output = pickle.load(reader)
for key in output:
timings[key].extend(output[key])
os.remove(filename)
display(name, timings)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'-p', '--processes', type=int, default=PROCS,
help='Number of processes to start',
)
parser.add_argument(
'-n', '--operations', type=float, default=OPS,
help='Number of operations to perform',
)
parser.add_argument(
'-r', '--range', type=int, default=RANGE,
help='Range of keys',
)
parser.add_argument(
'-w', '--warmup', type=float, default=WARMUP,
help='Number of warmup operations before timings',
)
args = parser.parse_args()
PROCS = int(args.processes)
OPS = int(args.operations)
RANGE = int(args.range)
WARMUP = int(args.warmup)
dispatch()
| 22.66092
| 75
| 0.583566
|
4a0c82c30180fbcdcf5b0cd27c559054ce409fb5
| 11,030
|
py
|
Python
|
src/cluster/fine_tuning/custom_trainers.py
|
maximumSHOT-HSE/CurriculumLearning
|
bf5291812a9ec3feb083d3d84b579329781c8a6a
|
[
"MIT"
] | null | null | null |
src/cluster/fine_tuning/custom_trainers.py
|
maximumSHOT-HSE/CurriculumLearning
|
bf5291812a9ec3feb083d3d84b579329781c8a6a
|
[
"MIT"
] | null | null | null |
src/cluster/fine_tuning/custom_trainers.py
|
maximumSHOT-HSE/CurriculumLearning
|
bf5291812a9ec3feb083d3d84b579329781c8a6a
|
[
"MIT"
] | null | null | null |
import collections
from transformers import Trainer
from transformers import PreTrainedModel
from transformers.trainer_callback import TrainerState
import datasets
import os
import torch
from torch.utils.data import RandomSampler, Sampler, Dataset, DataLoader, SequentialSampler
from typing import Iterator, Optional, Sequence, List, TypeVar, Generic, Sized
import numpy as np
import math
from transformers.file_utils import is_torch_tpu_available
from transformers.trainer_pt_utils import get_tpu_sampler
class SequentialTrainer(Trainer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
return SequentialSampler(self.train_dataset)
class ReverseSequentialSampler(Sampler):
def __init__(self, data_source):
super().__init__(data_source)
self.data_source = data_source
def __iter__(self):
return iter(range(len(self.data_source) - 1, -1, -1))
def __len__(self) -> int:
return len(self.data_source)
class ReverseSequentialTrainer(Trainer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
return ReverseSequentialSampler(self.train_dataset)
class CurriculumSamplerHyperbole(Sampler):
def __init__(
self,
data_source: Optional[Sized],
state: TrainerState,
n_bins: int,
window_width: int,
n_see: int,
ro: float,
drop: bool = False,
drop_ratio: float = 0.1
):
super().__init__(data_source)
self.data_source = data_source
self.state = state
self.n_bins = n_bins
self.size = len(self.data_source)
self.window_width = window_width
self.n_see = n_see
self.bin_size = math.ceil(self.size / n_bins)
self.ro = ro
self.drop = drop
self.drop_ratio = drop_ratio
self.indices = self.build_indices()
def build_indices(self):
indices = []
for t in range(-self.window_width + 1, self.n_bins + self.window_width - 1):
for _ in range(self.n_see):
p = 1 / (abs(np.arange(self.n_bins) - t) + 1) ** self.ro
p /= p.sum()
k = math.ceil(self.size / (self.n_bins + 2 * self.window_width - 2))
ids = np.random.choice(self.n_bins, k, p=p) * self.bin_size + np.random.choice(self.bin_size, k)
ids = ids[ids < self.size]
indices.append(ids)
result = np.concatenate(indices).tolist()
if self.drop:
drop_size = int(self.drop_ratio * self.size)
result = list(filter(lambda i: i > drop_size, result))
return result
def __iter__(self):
yield from self.indices
def __len__(self):
return len(self.indices)
class CurriculumTrainerHyperbole(Trainer):
def __init__(self, n_bins=10, window_width=3, n_see=3, ro=0.5, drop=False, drop_ratio=0.1, *args, **kwargs):
super().__init__(*args, **kwargs)
self.n_bins = n_bins
self.window_width = window_width
self.n_see = n_see
self.ro = ro
self.drop = drop
self.drop_ratio = drop_ratio
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
return CurriculumSamplerHyperbole(
data_source=self.train_dataset,
state=self.state,
n_bins=self.n_bins,
window_width=self.window_width,
n_see=self.n_see,
ro=self.ro,
drop=self.drop,
drop_ratio=self.drop_ratio
)
class CurriculumSamplerDifficultyBiased(Sampler):
def __init__(
self,
data_source: Optional[Sized],
state: TrainerState,
n_bins: int,
n_see: int
):
super().__init__(data_source)
self.data_source = data_source
self.state = state
self.n_bins = n_bins
self.n_see = n_see
self.size = len(self.data_source)
self.bin_size = math.ceil(self.size / n_bins)
self.indices = self.build_indices()
def build_indices(self):
indices = []
k = math.ceil(self.n_see * self.size * 2 / self.n_bins / (self.n_bins + 1))
for t in range(self.n_bins):
for _ in range(self.n_bins - t):
p = np.zeros(self.n_bins)
p[t:] = 1
p /= p.sum()
ids = np.random.choice(self.n_bins, k, p=p) * self.bin_size + np.random.choice(self.bin_size, k)
ids = ids[ids < self.size]
indices.append(ids)
return np.concatenate(indices).tolist()
def __iter__(self):
yield from self.indices
def __len__(self):
return len(self.indices)
class CurriculumTrainerDifficultyBiased(Trainer):
def __init__(self, n_bins=10, n_see=3, *args, **kwargs):
super().__init__(*args, **kwargs)
self.n_bins = n_bins
self.n_see = n_see
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
return CurriculumSamplerDifficultyBiased(
data_source=self.train_dataset,
state=self.state,
n_bins=self.n_bins,
n_see=self.n_see
)
class CurriculumSamplerCompetenceBased(Sampler):
def get_sqrt_competence(self):
return lambda t: min(1, math.sqrt(t * (1 - self.c0 ** 2) / self.T + self.c0 ** 2))
def get_linear_comptence(self):
return lambda t: min(1, t * (1 - self.c0) / self.T + self.c0)
def __init__(
self,
data_source: Optional[Sized],
curriculum_ratio: float,
max_steps: int,
batch_size: int,
c0: float = 0.2,
type: str = 'sqrt'
):
super().__init__(data_source)
self.data_source = data_source
self.curriculum_ratio = curriculum_ratio
self.max_steps = max_steps
self.batch_size = batch_size
assert type in ['sqrt', 'linear']
self.type = type
self.c0 = c0
self.T = int(curriculum_ratio * max_steps)
self.competence = {
'sqrt': self.get_sqrt_competence(),
'linear': self.get_linear_comptence()
}[self.type]
self.size = len(self.data_source)
self.ps = []
def __iter__(self):
for t in range(self.max_steps):
prefix_size = math.ceil(self.competence(t) * self.size)
prefix_size = max(1, min(prefix_size, self.size))
ids = np.random.choice(a=prefix_size, size=self.batch_size, replace=True)
self.ps.append(prefix_size)
for id in ids:
yield int(id)
def __len__(self):
return self.max_steps * self.batch_size
class CurriculumTrainerCompetenceBased(Trainer):
def __init__(self, curriculum_ratio=0.2, c0=0.2, type='sqrt', *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.args.max_steps > 0
self.curriculum_ratio = curriculum_ratio
self.c0 = c0
self.type = type
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
return CurriculumSamplerCompetenceBased(
data_source=self.train_dataset,
curriculum_ratio=self.curriculum_ratio,
max_steps=self.args.max_steps,
batch_size=self.args.train_batch_size,
c0=self.c0,
type=self.type
)
class FromFileSampler(Sampler):
def __init__(self, data_source, file: str = None):
super().__init__(data_source)
self.data_source = data_source
self.file = file
def __iter__(self):
with open(self.file, 'r') as fin:
for line in fin:
x = line.strip().strip('\x00')
yield int(x)
def __len__(self) -> int:
return len(self.data_source)
class FromFileTrainer(Trainer):
def __init__(self, file: str = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.file = file
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
return FromFileSampler(self.train_dataset, self.file)
class LadderSampler(Sampler):
def __init__(
self,
data_source,
curriculum_ratio: float,
max_steps: int,
n_stairs: int,
batch_size: int,
):
super().__init__(data_source)
self.data_source = data_source
self.curriculum_ratio = curriculum_ratio
self.max_steps = max_steps
self.n_stairs = n_stairs
self.batch_size = batch_size
self.ps = []
def __iter__(self):
curriculum_length = int(self.curriculum_ratio * self.max_steps)
stair_length = int(curriculum_length / self.n_stairs)
for t in range(self.max_steps):
stair_height = (t // stair_length + 1) / self.n_stairs
stair_height = np.clip(stair_height, 0, 1)
prefix_size = int(stair_height * len(self.data_source))
prefix_size = np.clip(prefix_size, 1, len(self.data_source))
self.ps.append(prefix_size)
ids = np.random.choice(a=prefix_size, size=self.batch_size, replace=True).reshape(-1)
for id in ids:
yield int(id)
def __len__(self):
return self.max_steps * self.batch_size
class LadderTrainer(Trainer):
def __init__(
self,
curriculum_ratio: float = 0.7,
n_stairs: int = 3,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
assert self.args.max_steps > 0
self.curriculum_ratio = curriculum_ratio
self.n_stairs = n_stairs
def _get_train_sampler(self):
return LadderSampler(
data_source=self.train_dataset,
curriculum_ratio=self.curriculum_ratio,
max_steps=self.args.max_steps,
n_stairs=self.n_stairs,
batch_size=self.args.train_batch_size
)
| 32.063953
| 113
| 0.59447
|
4a0c8344aeb64b181bcadd144816892ed1c745ae
| 228
|
py
|
Python
|
metasearch/tests/unit_test/view/__init__.py
|
suzanagi/materials-researchactivity-uoa-2020-public-metasearch-mosaicsearch_publication
|
37553698e6f778b313922dca23c4ed40530d8f31
|
[
"MIT"
] | null | null | null |
metasearch/tests/unit_test/view/__init__.py
|
suzanagi/materials-researchactivity-uoa-2020-public-metasearch-mosaicsearch_publication
|
37553698e6f778b313922dca23c4ed40530d8f31
|
[
"MIT"
] | null | null | null |
metasearch/tests/unit_test/view/__init__.py
|
suzanagi/materials-researchactivity-uoa-2020-public-metasearch-mosaicsearch_publication
|
37553698e6f778b313922dca23c4ed40530d8f31
|
[
"MIT"
] | null | null | null |
from metasearch.tests.unit_test.view.development_utilities import DevelopmentUtilitiesTests
from metasearch.tests.unit_test.view.tests import MetasearchFunctionTests
from metasearch.tests.unit_test.view.scraping_modules import *
| 76
| 91
| 0.894737
|
4a0c83e1f766ba95924abd78dffab29df30806d6
| 883
|
py
|
Python
|
BotHandler.py
|
Krylovsentry/nature-morning-telegram-bot
|
a287f4b31eabb95d58a2b3733c390825817465b6
|
[
"MIT"
] | null | null | null |
BotHandler.py
|
Krylovsentry/nature-morning-telegram-bot
|
a287f4b31eabb95d58a2b3733c390825817465b6
|
[
"MIT"
] | null | null | null |
BotHandler.py
|
Krylovsentry/nature-morning-telegram-bot
|
a287f4b31eabb95d58a2b3733c390825817465b6
|
[
"MIT"
] | null | null | null |
import requests
class BotHandler:
def __init__(self, token):
self.token = token
self.api_url = "https://api.telegram.org/bot{}/".format(token)
def get_updates(self, offset=None, timeout=180):
method = 'getUpdates'
params = {'timeout': timeout, 'offset': offset}
resp = requests.get(self.api_url + method, params)
result_json = resp.json()['result']
return result_json
def send_message(self, chat_id, text):
params = {'chat_id': chat_id, 'text': text}
method = 'sendMessage'
resp = requests.post(self.api_url + method, params)
return resp
def get_last_update(self):
get_result = self.get_updates()
if len(get_result) > 0:
last_update = get_result[-1]
else:
last_update = get_result[len(get_result)]
return last_update
| 29.433333
| 70
| 0.608154
|
4a0c84b5ec44680bf829b65c160c61aeb5592522
| 22
|
py
|
Python
|
src/simple_backup/__init__.py
|
Am6puk/Simple_Mysql_Backup
|
a9cfcfd421fbd09d5dd1a693456065bd24e6ee4f
|
[
"MIT"
] | 1
|
2021-06-08T09:45:47.000Z
|
2021-06-08T09:45:47.000Z
|
src/simple_backup/__init__.py
|
Am6puk/Simple_Mysql_Backup
|
a9cfcfd421fbd09d5dd1a693456065bd24e6ee4f
|
[
"MIT"
] | null | null | null |
src/simple_backup/__init__.py
|
Am6puk/Simple_Mysql_Backup
|
a9cfcfd421fbd09d5dd1a693456065bd24e6ee4f
|
[
"MIT"
] | null | null | null |
__author__ = 'Am6puk'
| 11
| 21
| 0.727273
|
4a0c8547703baecc438ff42074c6295224409465
| 14,004
|
py
|
Python
|
archai/common/metrics.py
|
cclauss/archai
|
a5fb8f937f7f1319e3204120803b2a045e9f768b
|
[
"MIT"
] | 1
|
2020-10-03T18:18:41.000Z
|
2020-10-03T18:18:41.000Z
|
archai/common/metrics.py
|
cclauss/archai
|
a5fb8f937f7f1319e3204120803b2a045e9f768b
|
[
"MIT"
] | null | null | null |
archai/common/metrics.py
|
cclauss/archai
|
a5fb8f937f7f1319e3204120803b2a045e9f768b
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import time
import copy
from typing import List, Mapping, Optional, Tuple
import pathlib
import math
import statistics
from collections import defaultdict
from torch import Tensor
import yaml
from . import utils, ml_utils
from .common import logger, get_tb_writer
from .apex_utils import ApexUtils
class Metrics:
"""Record top1, top5, loss metrics, track best so far.
There are 3 levels of metrics:
1. Run level - these for the one call of 'fit', example, best top1
2. Epoch level - these are the averages maintained top1, top5, loss
3. Step level - these are for every step in epoch
The pre_run must be called before fit call which will reset all metrics. Similarly
pre_epoch will reset running averages and pre_step will reset step level metrics like average step time.
The post_step will simply update the running averages while post_epoch updates
best we have seen for each epoch.
"""
def __init__(self, title:str, apex:Optional[ApexUtils], logger_freq:int=50) -> None:
"""Create the metrics object to maintain epoch stats
Arguments:
title {str} -- descriptive name of the stage for which metrics are collected
Keyword Arguments:
logger_freq {int} -- Must be > 0 for epoch level logging, the step level logging is decided by this number (default: {50})
"""
self.logger_freq = logger_freq
self.title = title
self._apex = apex
self._reset_run()
def _reset_run(self)->None:
self.run_metrics = RunMetrics()
self.global_step = -1
self._tb_path = logger.path()
def pre_run(self)->None:
self._reset_run()
self.run_metrics.pre_run()
def post_run(self)->None:
self.run_metrics.post_run()
# logging
if self.logger_freq > 0:
with logger.pushd('timings'):
logger.info({'epoch':self.run_metrics.epoch_time_avg(),
'step': self.run_metrics.step_time_avg(),
'run': self.run_metrics.duration()})
if self.is_dist():
logger.info({'dist_epoch_sum': self.reduce_sum(self.run_metrics.epoch_time_avg()),
'dist_step': self.reduce_mean(self.run_metrics.step_time_avg()),
'dist_run_sum': self.reduce_sum(self.run_metrics.duration())})
best_train, best_val = self.run_metrics.best_epoch()
with logger.pushd('best_train'):
logger.info({'epoch': best_train.index,
'top1': best_train.top1.avg})
if self.is_dist():
logger.info({'dist_epoch': self.reduce_mean(best_train.index),
'dist_top1': self.reduce_mean(best_train.top1.avg)})
if best_val:
with logger.pushd('best_val'):
logger.info({'epoch': best_val.index,
'top1': best_val.val_metrics.top1.avg})
if self.is_dist():
logger.info({'dist_epoch': self.reduce_mean(best_val.index),
'dist_top1': self.reduce_mean(best_val.val_metrics.top1.avg)})
def pre_step(self, x: Tensor, y: Tensor):
self.run_metrics.cur_epoch().pre_step()
self.global_step += 1
def post_step(self, x: Tensor, y: Tensor, logits: Tensor,
loss: Tensor, steps: int) -> None:
assert len(x)==len(y) and len(y)==len(logits) and len(loss.shape)==0
# update metrics after optimizer step
batch_size = x.size(0)
top1, top5 = ml_utils.accuracy(logits, y, topk=(1, 5))
epoch = self.run_metrics.cur_epoch()
epoch.post_step(top1.item(), top5.item(),
loss.item(), batch_size)
if self.logger_freq > 0 and \
((epoch.step+1) % self.logger_freq == 0):
logger.info({'top1': epoch.top1.avg,
'top5': epoch.top5.avg,
'loss': epoch.loss.avg,
'step_time': epoch.step_time.last})
if self.is_dist():
logger.info({'dist_top1': self.reduce_mean(epoch.top1.avg),
'dist_top5': self.reduce_mean(epoch.top5.avg),
'dist_loss': self.reduce_mean(epoch.loss.avg),
'dist_step_time': self.reduce_mean(epoch.step_time.last)})
# NOTE: Tensorboard step-level logging is removed as it becomes exponentially expensive on Azure blobs
# writer = get_tb_writer()
# writer.add_scalar(f'{self._tb_path}/train_steps/loss',
# epoch.loss.avg, self.global_step)
# writer.add_scalar(f'{self._tb_path}/train_steps/top1',
# epoch.top1.avg, self.global_step)
# writer.add_scalar(f'{self._tb_path}/train_steps/top5',
# epoch.top5.avg, self.global_step)
def pre_epoch(self, lr:float=math.nan)->None:
epoch = self.run_metrics.add_epoch()
epoch.pre_epoch(lr)
if lr is not None:
writer = get_tb_writer()
if writer is not None:
if self.logger_freq > 0 and not math.isnan(lr):
logger.debug({'start_lr': lr})
writer.add_scalar(f'{self._tb_path}/train_steps/lr',
lr, self.global_step)
def post_epoch(self, val_metrics:Optional['Metrics'], lr:float=math.nan):
epoch = self.run_metrics.cur_epoch()
epoch.post_epoch(val_metrics, lr)
test_epoch = None
if val_metrics:
test_epoch = val_metrics.run_metrics.epochs_metrics[0]
if self.logger_freq > 0:
with logger.pushd('train'):
logger.info({'top1': epoch.top1.avg,
'top5': epoch.top5.avg,
'loss': epoch.loss.avg,
'duration': epoch.duration(),
'step_time': epoch.step_time.avg,
'end_lr': lr})
if self.is_dist():
logger.info({'dist_top1': self.reduce_mean(epoch.top1.avg),
'dist_top5': self.reduce_mean(epoch.top5.avg),
'dist_loss': self.reduce_mean(epoch.loss.avg),
'dist_duration': self.reduce_mean(epoch.duration()),
'dist_step_time': self.reduce_mean(epoch.step_time.avg),
'dist_end_lr': self.reduce_mean(lr)})
if test_epoch:
with logger.pushd('val'):
logger.info({'top1': test_epoch.top1.avg,
'top5': test_epoch.top5.avg,
'loss': test_epoch.loss.avg,
'duration': test_epoch.duration()})
if self.is_dist():
logger.info({'dist_top1': self.reduce_mean(test_epoch.top1.avg),
'dist_top5': self.reduce_mean(test_epoch.top5.avg),
'dist_loss': self.reduce_mean(test_epoch.loss.avg),
'dist_duration': self.reduce_mean(test_epoch.duration())})
# writer = get_tb_writer()
# writer.add_scalar(f'{self._tb_path}/train_epochs/loss',
# epoch.loss.avg, epoch.index)
# writer.add_scalar(f'{self._tb_path}/train_epochs/top1',
# epoch.top1.avg, epoch.index)
# writer.add_scalar(f'{self._tb_path}/train_epochs/top5',
# epoch.top5.avg, epoch.index)
# if test_epoch:
# writer.add_scalar(f'{self._tb_path}/val_epochs/loss',
# test_epoch.loss.avg, epoch.index)
# writer.add_scalar(f'{self._tb_path}/val_epochs/top1',
# test_epoch.top1.avg, epoch.index)
# writer.add_scalar(f'{self._tb_path}/val_epochs/top5',
# test_epoch.top5.avg, epoch.index)
def state_dict(self)->Mapping:
return utils.state_dict(self)
def load_state_dict(self, state_dict:dict)->None:
utils.load_state_dict(self, state_dict)
def __getstate__(self):
state = self.__dict__.copy()
if '_apex' in state:
del state['_apex'] # cannot serialize this
return state
# no need to define __setstate__ because _apex should be set from constructor
def save(self, filepath:str)->Optional[str]:
if filepath:
filepath = utils.full_path(filepath)
pathlib.Path(filepath).write_text(yaml.dump(self))
return filepath
def epochs(self)->int:
"""Returns epochs recorded so far"""
return len(self.run_metrics.epochs_metrics)
def cur_epoch(self)->'EpochMetrics':
return self.run_metrics.cur_epoch()
def reduce_min(self, val):
if not self._apex:
return val
return self._apex.reduce(val, op='min')
def reduce_max(self, val):
if not self._apex:
return val
return self._apex.reduce(val, op='max')
def reduce_sum(self, val):
if not self._apex:
return val
return self._apex.reduce(val, op='sum')
def reduce_mean(self, val):
if not self._apex:
return val
return self._apex.reduce(val, op='mean')
def is_dist(self)->bool:
if not self._apex:
return False
return self._apex.is_dist()
def best_train_top1(self)->float:
return self.run_metrics.best_epoch()[0].top1.avg
def best_val_top1(self)->float:
val_epoch_metrics = self.run_metrics.best_epoch()[1]
return val_epoch_metrics.top1.avg if val_epoch_metrics is not None else math.nan
class Accumulator:
# TODO: replace this with Metrics class
def __init__(self):
self.metrics = defaultdict(lambda: 0.)
def add(self, key, value):
self.metrics[key] += value
def add_dict(self, dict):
for key, value in dict.items():
self.add(key, value)
def __getitem__(self, item):
return self.metrics[item]
def __setitem__(self, key, value):
self.metrics[key] = value
def get_dict(self):
return copy.deepcopy(dict(self.metrics))
def items(self):
return self.metrics.items()
def __str__(self):
return str(dict(self.metrics))
def __truediv__(self, other):
newone = Accumulator()
for key, value in self.items():
if isinstance(other, str):
if other != key:
newone[key] = value / self[other]
else:
newone[key] = value
else:
newone[key] = value / other
return newone
class EpochMetrics:
def __init__(self, index:int) -> None:
self.index = index
self.top1 = utils.AverageMeter()
self.top5 = utils.AverageMeter()
self.loss = utils.AverageMeter()
self.step_time = utils.AverageMeter()
self.start_time = math.nan
self.end_time = math.nan
self.step = -1
self.start_lr = math.nan
self.end_lr = math.nan
self.val_metrics:Optional[EpochMetrics] = None
def pre_step(self):
self._step_start_time = time.time()
self.step += 1
def post_step(self, top1:float, top5:float, loss:float, batch:int):
self.step_time.update(time.time() - self._step_start_time)
self.top1.update(top1, batch)
self.top5.update(top5, batch)
self.loss.update(loss, batch)
def pre_epoch(self, lr:float):
self.start_time = time.time()
self.start_lr = lr
def post_epoch(self, val_metrics:Optional[Metrics], lr:float):
self.end_time = time.time()
self.end_lr = lr
self.val_metrics = val_metrics.run_metrics.epochs_metrics[-1] \
if val_metrics is not None else None
def duration(self):
return self.end_time-self.start_time
class RunMetrics:
def __init__(self) -> None:
self.epochs_metrics:List[EpochMetrics] = []
self.start_time = math.nan
self.end_time = math.nan
self.epoch = -1
def pre_run(self):
self.start_time = time.time()
def post_run(self):
self.end_time = time.time()
def add_epoch(self)->EpochMetrics:
self.epoch = len(self.epochs_metrics)
epoch_metrics = EpochMetrics(self.epoch)
self.epochs_metrics.append(epoch_metrics)
return epoch_metrics
def cur_epoch(self)->EpochMetrics:
return self.epochs_metrics[self.epoch]
def best_epoch(self)->Tuple[EpochMetrics, Optional[EpochMetrics]]:
best_train = max(self.epochs_metrics, key=lambda e:e.top1.avg)
best_val = max(self.epochs_metrics,
key=lambda e:e.val_metrics.top1.avg if e.val_metrics else -1)
best_val = best_val if best_val.val_metrics else None
return best_train, best_val
def epoch_time_avg(self):
return statistics.mean((e.duration() for e in self.epochs_metrics))
def step_time_avg(self):
return statistics.mean((e.step_time.avg for e in self.epochs_metrics))
def duration(self):
return self.end_time-self.start_time
| 40.357349
| 135
| 0.56441
|
4a0c86a75414d2f85b7adc7f4559b975e5351f14
| 6,300
|
py
|
Python
|
examples/reinforcement_learning/TD3_model_evaluation.py
|
DavidMeda/gym-donkeycar
|
d162fd58d0369282e54dd2ee1ae4538d049c2575
|
[
"MIT"
] | null | null | null |
examples/reinforcement_learning/TD3_model_evaluation.py
|
DavidMeda/gym-donkeycar
|
d162fd58d0369282e54dd2ee1ae4538d049c2575
|
[
"MIT"
] | null | null | null |
examples/reinforcement_learning/TD3_model_evaluation.py
|
DavidMeda/gym-donkeycar
|
d162fd58d0369282e54dd2ee1ae4538d049c2575
|
[
"MIT"
] | null | null | null |
from tqdm import tqdm
import sys
# setting path
sys.path.append('../reinforcement_learning')
import os
import pandas as pd
from torch.utils.data import Dataset, DataLoader
import torch
from torch.nn import L1Loss, MSELoss
from torchvision import transforms
from PIL import Image
import numpy as np
import glob
import json
from sklearn.model_selection import train_test_split
import shutil
from stable_baselines3 import TD3
import warnings
from autoencoder import load_ae
warnings.filterwarnings('ignore')
def create_testset(path):
paths = glob.glob(os.path.join(path, "*.json"))
img_name = []
steering_values = []
throttle_values = []
for p in paths:
json_file = json.load(open(p, "r"))
img_name.append(json_file["cam/image_array"])
steering_values.append(json_file["user/angle"])
throttle_values.append(json_file["user/throttle"])
dt = pd.DataFrame()
dt['user/angle'] = steering_values
dt['img_name'] = img_name
# dt["user/throttle"] = throttle_values
bins_index = pd.cut(dt['user/angle'], 20, labels=False)
record_train, record_test, label_train, label_test = train_test_split(
np.asarray(dt['img_name']), np.asarray(dt['user/angle']),
stratify=bins_index, test_size=(1. - 0.9), random_state=444)
os.makedirs(os.path.join(path, "test_set"), exist_ok=True)
for f in record_test:
num_file = f.split("_")[0]
# copy image
shutil.copy(os.path.join(path, f), os.path.join(path, "test_set"))
#copy json
shutil.copy(os.path.join(path, "record_" + num_file + ".json"), os.path.join(path, "test_set"))
test_set_path = os.path.join(path, "test_set")
print(len(glob.glob(os.path.join(test_set_path, "*.json"))))
return record_train, record_test, label_train, label_test
class CustomImageDataset(Dataset):
def __init__(self, data_dir, labels_name_arr):
self.data_dir = data_dir
self.label_list = labels_name_arr
self.convert_tensor = transforms.ToTensor()
def __len__(self):
return len(self.label_list)
def __getitem__(self, idx):
label_json = json.load(open(self.label_list[idx], "r"))
img_name = label_json["cam/image_array"]
image = Image.open(os.path.join(self.data_dir, img_name))
if encoder is not None:
image = np.asarray(image)
image = encoder.encode_from_raw_image(image).flatten()
else:
image = self.convert_tensor(image).permute(1, 2, 0)
label = torch.tensor([label_json["user/angle"], label_json["user/throttle"]], dtype=torch.float32)
return image, label
def myLoss(output, target):
return (1+torch.exp(torch.abs(target)))*torch.abs(target-output)
def eval_model(model, test_set, loss_func1, loss_func2, encoder=None,):
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# print("Torch device avariable:", device)
loss1_steering = []
loss1_throttle = []
loss2_steering = []
loss2_throttle = []
with torch.no_grad():
# model.eval()
pbar = tqdm(total=len(test_set))
for i, (x_test, y_test) in enumerate(test_set):
y_pred = model.predict(x_test)
# model.predict return tuple (array[steering, throttle], None)
y_pred = torch.tensor(y_pred[0])
# print(f"y_pred={y_pred}")
# print(f"y_true={y_test}", "\n")
# print(f"Iter {i}: y_pred= {y_pred} - y_true= {y_test}")
loss1_steering.append(loss_func1(y_pred[0], y_test[0]))
# loss1_steering.append(myLoss(y_pred[0], y_test[0]))
loss1_throttle.append(loss_func1(y_pred[1], y_test[1]))
loss2_steering.append(loss_func2(y_pred[0], y_test[0]))
# loss2_steering.append(myLoss(y_pred[0], y_test[0]))
loss2_throttle.append(loss_func2(y_pred[1], y_test[1]))
# print(f"Loss1 (MSE) result= {loss1_steering[0]} - {loss1_throttle[0]}")
# print(f"Loss2 (MAE) result= {loss2_steering[0]} - {loss2_throttle[0]}")
pbar.update(1)
pbar.close()
# print(len(test_set), len(loss1_throttle))
return np.mean(loss1_steering), np.mean(loss1_throttle), np.mean(loss2_steering), np.mean(loss2_throttle)
if __name__ == "__main__":
# path = "C:/Users/david/Documents/project/gym-donkeycar/examples/reinforcement_learning/data/right_lane_simul_recalibrated/test_set"
#path = "C:\\Users\\david\\Documents\\project\\gym-donkeycar\\examples\\reinforcement_learning\\data\\right_lane_simul\\test_set"
path = "C:\\Users\\david\\Documents\\project\\gym-donkeycar\\examples\\reinforcement_learning\\data\\right_lane_road_gen_test_set"
# record_train, record_test, label_train, label_test = create_testset(path)
# print("train len:", len(record_train), len(label_train))
# print("test len:", len(record_test), len(label_test))
label_list = glob.glob(os.path.join(path,"*.json"))
# print(len(label_list))
dataset = CustomImageDataset(path, label_list)
#test_set = DataLoader(dataset, batch_size=64, shuffle=True)
# test_img, train_labels = next(iter(dataset))
# print(f"Feature batch shape: {test_img.size()}")
# print(f"Labels batch shape: {train_labels.size()}")
# for i in range (10):
# img = test_img[i].permute(1, 2, 0).squeeze()
# label = train_labels[i]
# print(f"Label: {label}")
# plt.imshow(img)
# plt.show()
log_dir = "C:/Users/david/Documents/project/gym-donkeycar/examples/reinforcement_learning/models"
name_model = "TD3_500k_encoder_best_model"
model = TD3.load(os.path.join(log_dir, name_model))
# print("Loaded model\n", "-" * 30, "\n", model.policy, "\n", "-" * 30)
encoder = None
name_encoder = "encoder_1000_transfer_best.pkl"
encoder = load_ae(os.path.join(log_dir, name_encoder))
# print("\n\n", encoder, "\n\n")
loss1_steering, loss1_throttle, loss2_steering, loss2_throttle = \
eval_model(model, dataset, MSELoss(), L1Loss(), encoder)
print("name Model: ", name_model)
print(
f"MSE_steering={loss1_steering}, MSE_throttle={loss1_throttle}, \nMAE_steering={loss2_steering}, MAE_throttle={loss2_throttle}")
| 39.873418
| 137
| 0.663968
|
4a0c87908cf08573cb7a426067ade2e91777051d
| 967
|
py
|
Python
|
model/contact.py
|
barancev/python_training_final
|
a479150fd7003f2dadf42df2e0348253a9013429
|
[
"Apache-2.0"
] | 3
|
2020-01-12T21:09:41.000Z
|
2022-01-13T00:20:28.000Z
|
model/contact.py
|
barancev/python_training_final
|
a479150fd7003f2dadf42df2e0348253a9013429
|
[
"Apache-2.0"
] | null | null | null |
model/contact.py
|
barancev/python_training_final
|
a479150fd7003f2dadf42df2e0348253a9013429
|
[
"Apache-2.0"
] | 1
|
2019-10-11T12:13:19.000Z
|
2019-10-11T12:13:19.000Z
|
__author__ = 'alexei'
from sys import maxsize
class Contact:
def __init__(self, firstname=None, lastname=None, id=None,
homephone=None, mobilephone=None, workphone=None, secondaryphone=None, all_phones_from_home_page=None):
self.firstname = firstname
self.lastname = lastname
self.homephone = homephone
self.mobilephone = mobilephone
self.workphone = workphone
self.secondaryphone = secondaryphone
self.all_phones_from_home_page = all_phones_from_home_page
self.id = id
def __repr__(self):
return "%s:%s %s" % (self.id, self.firstname, self.lastname)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id)\
and self.firstname == other.firstname and self.lastname == other.lastname
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 32.233333
| 120
| 0.644261
|
4a0c87a0d10bbd1181fc8034670dcde2f89918fb
| 19,263
|
py
|
Python
|
mozillians/mozspaces/migrations/0010_auto_20190109_0324.py
|
divyamoncy/mozillians
|
d53d1d05d1f05b74f8533541e37083dcb89b29a8
|
[
"BSD-3-Clause"
] | 202
|
2015-01-14T10:19:55.000Z
|
2021-12-11T06:04:16.000Z
|
mozillians/mozspaces/migrations/0010_auto_20190109_0324.py
|
divyamoncy/mozillians
|
d53d1d05d1f05b74f8533541e37083dcb89b29a8
|
[
"BSD-3-Clause"
] | 2,924
|
2015-01-07T11:27:32.000Z
|
2021-01-19T14:05:17.000Z
|
mozillians/mozspaces/migrations/0010_auto_20190109_0324.py
|
divyamoncy/mozillians
|
d53d1d05d1f05b74f8533541e37083dcb89b29a8
|
[
"BSD-3-Clause"
] | 270
|
2015-01-02T18:31:01.000Z
|
2021-02-17T20:57:44.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-09 11:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mozspaces', '0009_auto_20171016_0340'),
]
operations = [
migrations.AlterField(
model_name='mozspace',
name='timezone',
field=models.CharField(choices=[(b'Africa/Abidjan', b'Africa/Abidjan'), (b'Africa/Accra', b'Africa/Accra'), (b'Africa/Addis_Ababa', b'Africa/Addis_Ababa'), (b'Africa/Algiers', b'Africa/Algiers'), (b'Africa/Asmara', b'Africa/Asmara'), (b'Africa/Bamako', b'Africa/Bamako'), (b'Africa/Bangui', b'Africa/Bangui'), (b'Africa/Banjul', b'Africa/Banjul'), (b'Africa/Bissau', b'Africa/Bissau'), (b'Africa/Blantyre', b'Africa/Blantyre'), (b'Africa/Brazzaville', b'Africa/Brazzaville'), (b'Africa/Bujumbura', b'Africa/Bujumbura'), (b'Africa/Cairo', b'Africa/Cairo'), (b'Africa/Casablanca', b'Africa/Casablanca'), (b'Africa/Ceuta', b'Africa/Ceuta'), (b'Africa/Conakry', b'Africa/Conakry'), (b'Africa/Dakar', b'Africa/Dakar'), (b'Africa/Dar_es_Salaam', b'Africa/Dar_es_Salaam'), (b'Africa/Djibouti', b'Africa/Djibouti'), (b'Africa/Douala', b'Africa/Douala'), (b'Africa/El_Aaiun', b'Africa/El_Aaiun'), (b'Africa/Freetown', b'Africa/Freetown'), (b'Africa/Gaborone', b'Africa/Gaborone'), (b'Africa/Harare', b'Africa/Harare'), (b'Africa/Johannesburg', b'Africa/Johannesburg'), (b'Africa/Juba', b'Africa/Juba'), (b'Africa/Kampala', b'Africa/Kampala'), (b'Africa/Khartoum', b'Africa/Khartoum'), (b'Africa/Kigali', b'Africa/Kigali'), (b'Africa/Kinshasa', b'Africa/Kinshasa'), (b'Africa/Lagos', b'Africa/Lagos'), (b'Africa/Libreville', b'Africa/Libreville'), (b'Africa/Lome', b'Africa/Lome'), (b'Africa/Luanda', b'Africa/Luanda'), (b'Africa/Lubumbashi', b'Africa/Lubumbashi'), (b'Africa/Lusaka', b'Africa/Lusaka'), (b'Africa/Malabo', b'Africa/Malabo'), (b'Africa/Maputo', b'Africa/Maputo'), (b'Africa/Maseru', b'Africa/Maseru'), (b'Africa/Mbabane', b'Africa/Mbabane'), (b'Africa/Mogadishu', b'Africa/Mogadishu'), (b'Africa/Monrovia', b'Africa/Monrovia'), (b'Africa/Nairobi', b'Africa/Nairobi'), (b'Africa/Ndjamena', b'Africa/Ndjamena'), (b'Africa/Niamey', b'Africa/Niamey'), (b'Africa/Nouakchott', b'Africa/Nouakchott'), (b'Africa/Ouagadougou', b'Africa/Ouagadougou'), (b'Africa/Porto-Novo', b'Africa/Porto-Novo'), (b'Africa/Sao_Tome', b'Africa/Sao_Tome'), (b'Africa/Tripoli', b'Africa/Tripoli'), (b'Africa/Tunis', b'Africa/Tunis'), (b'Africa/Windhoek', b'Africa/Windhoek'), (b'America/Adak', b'America/Adak'), (b'America/Anchorage', b'America/Anchorage'), (b'America/Anguilla', b'America/Anguilla'), (b'America/Antigua', b'America/Antigua'), (b'America/Araguaina', b'America/Araguaina'), (b'America/Argentina/Buenos_Aires', b'America/Argentina/Buenos_Aires'), (b'America/Argentina/Catamarca', b'America/Argentina/Catamarca'), (b'America/Argentina/Cordoba', b'America/Argentina/Cordoba'), (b'America/Argentina/Jujuy', b'America/Argentina/Jujuy'), (b'America/Argentina/La_Rioja', b'America/Argentina/La_Rioja'), (b'America/Argentina/Mendoza', b'America/Argentina/Mendoza'), (b'America/Argentina/Rio_Gallegos', b'America/Argentina/Rio_Gallegos'), (b'America/Argentina/Salta', b'America/Argentina/Salta'), (b'America/Argentina/San_Juan', b'America/Argentina/San_Juan'), (b'America/Argentina/San_Luis', b'America/Argentina/San_Luis'), (b'America/Argentina/Tucuman', b'America/Argentina/Tucuman'), (b'America/Argentina/Ushuaia', b'America/Argentina/Ushuaia'), (b'America/Aruba', b'America/Aruba'), (b'America/Asuncion', b'America/Asuncion'), (b'America/Atikokan', b'America/Atikokan'), (b'America/Bahia', b'America/Bahia'), (b'America/Bahia_Banderas', b'America/Bahia_Banderas'), (b'America/Barbados', b'America/Barbados'), (b'America/Belem', b'America/Belem'), (b'America/Belize', b'America/Belize'), (b'America/Blanc-Sablon', b'America/Blanc-Sablon'), (b'America/Boa_Vista', b'America/Boa_Vista'), (b'America/Bogota', b'America/Bogota'), (b'America/Boise', b'America/Boise'), (b'America/Cambridge_Bay', b'America/Cambridge_Bay'), (b'America/Campo_Grande', b'America/Campo_Grande'), (b'America/Cancun', b'America/Cancun'), (b'America/Caracas', b'America/Caracas'), (b'America/Cayenne', b'America/Cayenne'), (b'America/Cayman', b'America/Cayman'), (b'America/Chicago', b'America/Chicago'), (b'America/Chihuahua', b'America/Chihuahua'), (b'America/Costa_Rica', b'America/Costa_Rica'), (b'America/Creston', b'America/Creston'), (b'America/Cuiaba', b'America/Cuiaba'), (b'America/Curacao', b'America/Curacao'), (b'America/Danmarkshavn', b'America/Danmarkshavn'), (b'America/Dawson', b'America/Dawson'), (b'America/Dawson_Creek', b'America/Dawson_Creek'), (b'America/Denver', b'America/Denver'), (b'America/Detroit', b'America/Detroit'), (b'America/Dominica', b'America/Dominica'), (b'America/Edmonton', b'America/Edmonton'), (b'America/Eirunepe', b'America/Eirunepe'), (b'America/El_Salvador', b'America/El_Salvador'), (b'America/Fort_Nelson', b'America/Fort_Nelson'), (b'America/Fortaleza', b'America/Fortaleza'), (b'America/Glace_Bay', b'America/Glace_Bay'), (b'America/Godthab', b'America/Godthab'), (b'America/Goose_Bay', b'America/Goose_Bay'), (b'America/Grand_Turk', b'America/Grand_Turk'), (b'America/Grenada', b'America/Grenada'), (b'America/Guadeloupe', b'America/Guadeloupe'), (b'America/Guatemala', b'America/Guatemala'), (b'America/Guayaquil', b'America/Guayaquil'), (b'America/Guyana', b'America/Guyana'), (b'America/Halifax', b'America/Halifax'), (b'America/Havana', b'America/Havana'), (b'America/Hermosillo', b'America/Hermosillo'), (b'America/Indiana/Indianapolis', b'America/Indiana/Indianapolis'), (b'America/Indiana/Knox', b'America/Indiana/Knox'), (b'America/Indiana/Marengo', b'America/Indiana/Marengo'), (b'America/Indiana/Petersburg', b'America/Indiana/Petersburg'), (b'America/Indiana/Tell_City', b'America/Indiana/Tell_City'), (b'America/Indiana/Vevay', b'America/Indiana/Vevay'), (b'America/Indiana/Vincennes', b'America/Indiana/Vincennes'), (b'America/Indiana/Winamac', b'America/Indiana/Winamac'), (b'America/Inuvik', b'America/Inuvik'), (b'America/Iqaluit', b'America/Iqaluit'), (b'America/Jamaica', b'America/Jamaica'), (b'America/Juneau', b'America/Juneau'), (b'America/Kentucky/Louisville', b'America/Kentucky/Louisville'), (b'America/Kentucky/Monticello', b'America/Kentucky/Monticello'), (b'America/Kralendijk', b'America/Kralendijk'), (b'America/La_Paz', b'America/La_Paz'), (b'America/Lima', b'America/Lima'), (b'America/Los_Angeles', b'America/Los_Angeles'), (b'America/Lower_Princes', b'America/Lower_Princes'), (b'America/Maceio', b'America/Maceio'), (b'America/Managua', b'America/Managua'), (b'America/Manaus', b'America/Manaus'), (b'America/Marigot', b'America/Marigot'), (b'America/Martinique', b'America/Martinique'), (b'America/Matamoros', b'America/Matamoros'), (b'America/Mazatlan', b'America/Mazatlan'), (b'America/Menominee', b'America/Menominee'), (b'America/Merida', b'America/Merida'), (b'America/Metlakatla', b'America/Metlakatla'), (b'America/Mexico_City', b'America/Mexico_City'), (b'America/Miquelon', b'America/Miquelon'), (b'America/Moncton', b'America/Moncton'), (b'America/Monterrey', b'America/Monterrey'), (b'America/Montevideo', b'America/Montevideo'), (b'America/Montserrat', b'America/Montserrat'), (b'America/Nassau', b'America/Nassau'), (b'America/New_York', b'America/New_York'), (b'America/Nipigon', b'America/Nipigon'), (b'America/Nome', b'America/Nome'), (b'America/Noronha', b'America/Noronha'), (b'America/North_Dakota/Beulah', b'America/North_Dakota/Beulah'), (b'America/North_Dakota/Center', b'America/North_Dakota/Center'), (b'America/North_Dakota/New_Salem', b'America/North_Dakota/New_Salem'), (b'America/Ojinaga', b'America/Ojinaga'), (b'America/Panama', b'America/Panama'), (b'America/Pangnirtung', b'America/Pangnirtung'), (b'America/Paramaribo', b'America/Paramaribo'), (b'America/Phoenix', b'America/Phoenix'), (b'America/Port-au-Prince', b'America/Port-au-Prince'), (b'America/Port_of_Spain', b'America/Port_of_Spain'), (b'America/Porto_Velho', b'America/Porto_Velho'), (b'America/Puerto_Rico', b'America/Puerto_Rico'), (b'America/Punta_Arenas', b'America/Punta_Arenas'), (b'America/Rainy_River', b'America/Rainy_River'), (b'America/Rankin_Inlet', b'America/Rankin_Inlet'), (b'America/Recife', b'America/Recife'), (b'America/Regina', b'America/Regina'), (b'America/Resolute', b'America/Resolute'), (b'America/Rio_Branco', b'America/Rio_Branco'), (b'America/Santarem', b'America/Santarem'), (b'America/Santiago', b'America/Santiago'), (b'America/Santo_Domingo', b'America/Santo_Domingo'), (b'America/Sao_Paulo', b'America/Sao_Paulo'), (b'America/Scoresbysund', b'America/Scoresbysund'), (b'America/Sitka', b'America/Sitka'), (b'America/St_Barthelemy', b'America/St_Barthelemy'), (b'America/St_Johns', b'America/St_Johns'), (b'America/St_Kitts', b'America/St_Kitts'), (b'America/St_Lucia', b'America/St_Lucia'), (b'America/St_Thomas', b'America/St_Thomas'), (b'America/St_Vincent', b'America/St_Vincent'), (b'America/Swift_Current', b'America/Swift_Current'), (b'America/Tegucigalpa', b'America/Tegucigalpa'), (b'America/Thule', b'America/Thule'), (b'America/Thunder_Bay', b'America/Thunder_Bay'), (b'America/Tijuana', b'America/Tijuana'), (b'America/Toronto', b'America/Toronto'), (b'America/Tortola', b'America/Tortola'), (b'America/Vancouver', b'America/Vancouver'), (b'America/Whitehorse', b'America/Whitehorse'), (b'America/Winnipeg', b'America/Winnipeg'), (b'America/Yakutat', b'America/Yakutat'), (b'America/Yellowknife', b'America/Yellowknife'), (b'Antarctica/Casey', b'Antarctica/Casey'), (b'Antarctica/Davis', b'Antarctica/Davis'), (b'Antarctica/DumontDUrville', b'Antarctica/DumontDUrville'), (b'Antarctica/Macquarie', b'Antarctica/Macquarie'), (b'Antarctica/Mawson', b'Antarctica/Mawson'), (b'Antarctica/McMurdo', b'Antarctica/McMurdo'), (b'Antarctica/Palmer', b'Antarctica/Palmer'), (b'Antarctica/Rothera', b'Antarctica/Rothera'), (b'Antarctica/Syowa', b'Antarctica/Syowa'), (b'Antarctica/Troll', b'Antarctica/Troll'), (b'Antarctica/Vostok', b'Antarctica/Vostok'), (b'Arctic/Longyearbyen', b'Arctic/Longyearbyen'), (b'Asia/Aden', b'Asia/Aden'), (b'Asia/Almaty', b'Asia/Almaty'), (b'Asia/Amman', b'Asia/Amman'), (b'Asia/Anadyr', b'Asia/Anadyr'), (b'Asia/Aqtau', b'Asia/Aqtau'), (b'Asia/Aqtobe', b'Asia/Aqtobe'), (b'Asia/Ashgabat', b'Asia/Ashgabat'), (b'Asia/Atyrau', b'Asia/Atyrau'), (b'Asia/Baghdad', b'Asia/Baghdad'), (b'Asia/Bahrain', b'Asia/Bahrain'), (b'Asia/Baku', b'Asia/Baku'), (b'Asia/Bangkok', b'Asia/Bangkok'), (b'Asia/Barnaul', b'Asia/Barnaul'), (b'Asia/Beirut', b'Asia/Beirut'), (b'Asia/Bishkek', b'Asia/Bishkek'), (b'Asia/Brunei', b'Asia/Brunei'), (b'Asia/Chita', b'Asia/Chita'), (b'Asia/Choibalsan', b'Asia/Choibalsan'), (b'Asia/Colombo', b'Asia/Colombo'), (b'Asia/Damascus', b'Asia/Damascus'), (b'Asia/Dhaka', b'Asia/Dhaka'), (b'Asia/Dili', b'Asia/Dili'), (b'Asia/Dubai', b'Asia/Dubai'), (b'Asia/Dushanbe', b'Asia/Dushanbe'), (b'Asia/Famagusta', b'Asia/Famagusta'), (b'Asia/Gaza', b'Asia/Gaza'), (b'Asia/Hebron', b'Asia/Hebron'), (b'Asia/Ho_Chi_Minh', b'Asia/Ho_Chi_Minh'), (b'Asia/Hong_Kong', b'Asia/Hong_Kong'), (b'Asia/Hovd', b'Asia/Hovd'), (b'Asia/Irkutsk', b'Asia/Irkutsk'), (b'Asia/Jakarta', b'Asia/Jakarta'), (b'Asia/Jayapura', b'Asia/Jayapura'), (b'Asia/Jerusalem', b'Asia/Jerusalem'), (b'Asia/Kabul', b'Asia/Kabul'), (b'Asia/Kamchatka', b'Asia/Kamchatka'), (b'Asia/Karachi', b'Asia/Karachi'), (b'Asia/Kathmandu', b'Asia/Kathmandu'), (b'Asia/Khandyga', b'Asia/Khandyga'), (b'Asia/Kolkata', b'Asia/Kolkata'), (b'Asia/Krasnoyarsk', b'Asia/Krasnoyarsk'), (b'Asia/Kuala_Lumpur', b'Asia/Kuala_Lumpur'), (b'Asia/Kuching', b'Asia/Kuching'), (b'Asia/Kuwait', b'Asia/Kuwait'), (b'Asia/Macau', b'Asia/Macau'), (b'Asia/Magadan', b'Asia/Magadan'), (b'Asia/Makassar', b'Asia/Makassar'), (b'Asia/Manila', b'Asia/Manila'), (b'Asia/Muscat', b'Asia/Muscat'), (b'Asia/Nicosia', b'Asia/Nicosia'), (b'Asia/Novokuznetsk', b'Asia/Novokuznetsk'), (b'Asia/Novosibirsk', b'Asia/Novosibirsk'), (b'Asia/Omsk', b'Asia/Omsk'), (b'Asia/Oral', b'Asia/Oral'), (b'Asia/Phnom_Penh', b'Asia/Phnom_Penh'), (b'Asia/Pontianak', b'Asia/Pontianak'), (b'Asia/Pyongyang', b'Asia/Pyongyang'), (b'Asia/Qatar', b'Asia/Qatar'), (b'Asia/Qostanay', b'Asia/Qostanay'), (b'Asia/Qyzylorda', b'Asia/Qyzylorda'), (b'Asia/Riyadh', b'Asia/Riyadh'), (b'Asia/Sakhalin', b'Asia/Sakhalin'), (b'Asia/Samarkand', b'Asia/Samarkand'), (b'Asia/Seoul', b'Asia/Seoul'), (b'Asia/Shanghai', b'Asia/Shanghai'), (b'Asia/Singapore', b'Asia/Singapore'), (b'Asia/Srednekolymsk', b'Asia/Srednekolymsk'), (b'Asia/Taipei', b'Asia/Taipei'), (b'Asia/Tashkent', b'Asia/Tashkent'), (b'Asia/Tbilisi', b'Asia/Tbilisi'), (b'Asia/Tehran', b'Asia/Tehran'), (b'Asia/Thimphu', b'Asia/Thimphu'), (b'Asia/Tokyo', b'Asia/Tokyo'), (b'Asia/Tomsk', b'Asia/Tomsk'), (b'Asia/Ulaanbaatar', b'Asia/Ulaanbaatar'), (b'Asia/Urumqi', b'Asia/Urumqi'), (b'Asia/Ust-Nera', b'Asia/Ust-Nera'), (b'Asia/Vientiane', b'Asia/Vientiane'), (b'Asia/Vladivostok', b'Asia/Vladivostok'), (b'Asia/Yakutsk', b'Asia/Yakutsk'), (b'Asia/Yangon', b'Asia/Yangon'), (b'Asia/Yekaterinburg', b'Asia/Yekaterinburg'), (b'Asia/Yerevan', b'Asia/Yerevan'), (b'Atlantic/Azores', b'Atlantic/Azores'), (b'Atlantic/Bermuda', b'Atlantic/Bermuda'), (b'Atlantic/Canary', b'Atlantic/Canary'), (b'Atlantic/Cape_Verde', b'Atlantic/Cape_Verde'), (b'Atlantic/Faroe', b'Atlantic/Faroe'), (b'Atlantic/Madeira', b'Atlantic/Madeira'), (b'Atlantic/Reykjavik', b'Atlantic/Reykjavik'), (b'Atlantic/South_Georgia', b'Atlantic/South_Georgia'), (b'Atlantic/St_Helena', b'Atlantic/St_Helena'), (b'Atlantic/Stanley', b'Atlantic/Stanley'), (b'Australia/Adelaide', b'Australia/Adelaide'), (b'Australia/Brisbane', b'Australia/Brisbane'), (b'Australia/Broken_Hill', b'Australia/Broken_Hill'), (b'Australia/Currie', b'Australia/Currie'), (b'Australia/Darwin', b'Australia/Darwin'), (b'Australia/Eucla', b'Australia/Eucla'), (b'Australia/Hobart', b'Australia/Hobart'), (b'Australia/Lindeman', b'Australia/Lindeman'), (b'Australia/Lord_Howe', b'Australia/Lord_Howe'), (b'Australia/Melbourne', b'Australia/Melbourne'), (b'Australia/Perth', b'Australia/Perth'), (b'Australia/Sydney', b'Australia/Sydney'), (b'Canada/Atlantic', b'Canada/Atlantic'), (b'Canada/Central', b'Canada/Central'), (b'Canada/Eastern', b'Canada/Eastern'), (b'Canada/Mountain', b'Canada/Mountain'), (b'Canada/Newfoundland', b'Canada/Newfoundland'), (b'Canada/Pacific', b'Canada/Pacific'), (b'Europe/Amsterdam', b'Europe/Amsterdam'), (b'Europe/Andorra', b'Europe/Andorra'), (b'Europe/Astrakhan', b'Europe/Astrakhan'), (b'Europe/Athens', b'Europe/Athens'), (b'Europe/Belgrade', b'Europe/Belgrade'), (b'Europe/Berlin', b'Europe/Berlin'), (b'Europe/Bratislava', b'Europe/Bratislava'), (b'Europe/Brussels', b'Europe/Brussels'), (b'Europe/Bucharest', b'Europe/Bucharest'), (b'Europe/Budapest', b'Europe/Budapest'), (b'Europe/Busingen', b'Europe/Busingen'), (b'Europe/Chisinau', b'Europe/Chisinau'), (b'Europe/Copenhagen', b'Europe/Copenhagen'), (b'Europe/Dublin', b'Europe/Dublin'), (b'Europe/Gibraltar', b'Europe/Gibraltar'), (b'Europe/Guernsey', b'Europe/Guernsey'), (b'Europe/Helsinki', b'Europe/Helsinki'), (b'Europe/Isle_of_Man', b'Europe/Isle_of_Man'), (b'Europe/Istanbul', b'Europe/Istanbul'), (b'Europe/Jersey', b'Europe/Jersey'), (b'Europe/Kaliningrad', b'Europe/Kaliningrad'), (b'Europe/Kiev', b'Europe/Kiev'), (b'Europe/Kirov', b'Europe/Kirov'), (b'Europe/Lisbon', b'Europe/Lisbon'), (b'Europe/Ljubljana', b'Europe/Ljubljana'), (b'Europe/London', b'Europe/London'), (b'Europe/Luxembourg', b'Europe/Luxembourg'), (b'Europe/Madrid', b'Europe/Madrid'), (b'Europe/Malta', b'Europe/Malta'), (b'Europe/Mariehamn', b'Europe/Mariehamn'), (b'Europe/Minsk', b'Europe/Minsk'), (b'Europe/Monaco', b'Europe/Monaco'), (b'Europe/Moscow', b'Europe/Moscow'), (b'Europe/Oslo', b'Europe/Oslo'), (b'Europe/Paris', b'Europe/Paris'), (b'Europe/Podgorica', b'Europe/Podgorica'), (b'Europe/Prague', b'Europe/Prague'), (b'Europe/Riga', b'Europe/Riga'), (b'Europe/Rome', b'Europe/Rome'), (b'Europe/Samara', b'Europe/Samara'), (b'Europe/San_Marino', b'Europe/San_Marino'), (b'Europe/Sarajevo', b'Europe/Sarajevo'), (b'Europe/Saratov', b'Europe/Saratov'), (b'Europe/Simferopol', b'Europe/Simferopol'), (b'Europe/Skopje', b'Europe/Skopje'), (b'Europe/Sofia', b'Europe/Sofia'), (b'Europe/Stockholm', b'Europe/Stockholm'), (b'Europe/Tallinn', b'Europe/Tallinn'), (b'Europe/Tirane', b'Europe/Tirane'), (b'Europe/Ulyanovsk', b'Europe/Ulyanovsk'), (b'Europe/Uzhgorod', b'Europe/Uzhgorod'), (b'Europe/Vaduz', b'Europe/Vaduz'), (b'Europe/Vatican', b'Europe/Vatican'), (b'Europe/Vienna', b'Europe/Vienna'), (b'Europe/Vilnius', b'Europe/Vilnius'), (b'Europe/Volgograd', b'Europe/Volgograd'), (b'Europe/Warsaw', b'Europe/Warsaw'), (b'Europe/Zagreb', b'Europe/Zagreb'), (b'Europe/Zaporozhye', b'Europe/Zaporozhye'), (b'Europe/Zurich', b'Europe/Zurich'), (b'GMT', b'GMT'), (b'Indian/Antananarivo', b'Indian/Antananarivo'), (b'Indian/Chagos', b'Indian/Chagos'), (b'Indian/Christmas', b'Indian/Christmas'), (b'Indian/Cocos', b'Indian/Cocos'), (b'Indian/Comoro', b'Indian/Comoro'), (b'Indian/Kerguelen', b'Indian/Kerguelen'), (b'Indian/Mahe', b'Indian/Mahe'), (b'Indian/Maldives', b'Indian/Maldives'), (b'Indian/Mauritius', b'Indian/Mauritius'), (b'Indian/Mayotte', b'Indian/Mayotte'), (b'Indian/Reunion', b'Indian/Reunion'), (b'Pacific/Apia', b'Pacific/Apia'), (b'Pacific/Auckland', b'Pacific/Auckland'), (b'Pacific/Bougainville', b'Pacific/Bougainville'), (b'Pacific/Chatham', b'Pacific/Chatham'), (b'Pacific/Chuuk', b'Pacific/Chuuk'), (b'Pacific/Easter', b'Pacific/Easter'), (b'Pacific/Efate', b'Pacific/Efate'), (b'Pacific/Enderbury', b'Pacific/Enderbury'), (b'Pacific/Fakaofo', b'Pacific/Fakaofo'), (b'Pacific/Fiji', b'Pacific/Fiji'), (b'Pacific/Funafuti', b'Pacific/Funafuti'), (b'Pacific/Galapagos', b'Pacific/Galapagos'), (b'Pacific/Gambier', b'Pacific/Gambier'), (b'Pacific/Guadalcanal', b'Pacific/Guadalcanal'), (b'Pacific/Guam', b'Pacific/Guam'), (b'Pacific/Honolulu', b'Pacific/Honolulu'), (b'Pacific/Kiritimati', b'Pacific/Kiritimati'), (b'Pacific/Kosrae', b'Pacific/Kosrae'), (b'Pacific/Kwajalein', b'Pacific/Kwajalein'), (b'Pacific/Majuro', b'Pacific/Majuro'), (b'Pacific/Marquesas', b'Pacific/Marquesas'), (b'Pacific/Midway', b'Pacific/Midway'), (b'Pacific/Nauru', b'Pacific/Nauru'), (b'Pacific/Niue', b'Pacific/Niue'), (b'Pacific/Norfolk', b'Pacific/Norfolk'), (b'Pacific/Noumea', b'Pacific/Noumea'), (b'Pacific/Pago_Pago', b'Pacific/Pago_Pago'), (b'Pacific/Palau', b'Pacific/Palau'), (b'Pacific/Pitcairn', b'Pacific/Pitcairn'), (b'Pacific/Pohnpei', b'Pacific/Pohnpei'), (b'Pacific/Port_Moresby', b'Pacific/Port_Moresby'), (b'Pacific/Rarotonga', b'Pacific/Rarotonga'), (b'Pacific/Saipan', b'Pacific/Saipan'), (b'Pacific/Tahiti', b'Pacific/Tahiti'), (b'Pacific/Tarawa', b'Pacific/Tarawa'), (b'Pacific/Tongatapu', b'Pacific/Tongatapu'), (b'Pacific/Wake', b'Pacific/Wake'), (b'Pacific/Wallis', b'Pacific/Wallis'), (b'US/Alaska', b'US/Alaska'), (b'US/Arizona', b'US/Arizona'), (b'US/Central', b'US/Central'), (b'US/Eastern', b'US/Eastern'), (b'US/Hawaii', b'US/Hawaii'), (b'US/Mountain', b'US/Mountain'), (b'US/Pacific', b'US/Pacific'), (b'UTC', b'UTC')], max_length=100),
),
]
| 917.285714
| 18,855
| 0.713856
|
4a0c8892216a601116094e01bcc39f4a616f4bd2
| 2,062
|
py
|
Python
|
code/deploy/score.py
|
andrwwong-uw/azure-github-actions-demo
|
5c25ac57729b81598e0ae02bdc9af0b16b57543a
|
[
"MIT"
] | null | null | null |
code/deploy/score.py
|
andrwwong-uw/azure-github-actions-demo
|
5c25ac57729b81598e0ae02bdc9af0b16b57543a
|
[
"MIT"
] | null | null | null |
code/deploy/score.py
|
andrwwong-uw/azure-github-actions-demo
|
5c25ac57729b81598e0ae02bdc9af0b16b57543a
|
[
"MIT"
] | null | null | null |
import os
import joblib
import numpy as np
from sklearn.svm import SVC
from azureml.core import Model
from azureml.monitoring import ModelDataCollector
from inference_schema.schema_decorators import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
from inference_schema.parameter_types.standard_py_parameter_type import StandardPythonParameterType
# The init() method is called once, when the web service starts up.
# Typically you would deserialize the model file, as shown here using joblib,
# and store it in a global variable so your run() method can access it later.
def init():
global model
global inputs_dc, prediction_dc
# The AZUREML_MODEL_DIR environment variable indicates
# a directory containing the model file you registered.
model_path = Model.get_model_path(model_name="mymodel")
model = joblib.load(model_path)
inputs_dc = ModelDataCollector("sample-model", designation="inputs", feature_names=["feat1", "feat2", "feat3", "feat4"])
prediction_dc = ModelDataCollector("sample-model", designation="predictions", feature_names=["prediction"])
# The run() method is called each time a request is made to the scoring API.
# Shown here are the optional input_schema and output_schema decorators
# from the inference-schema pip package. Using these decorators on your
# run() method parses and validates the incoming payload against
# the example input you provide here. This will also generate a Swagger
# API document for your web service.
@input_schema('data', NumpyParameterType(np.array([[0.1, 1.2, 2.3, 3.4]])))
@output_schema(StandardPythonParameterType({'predict': [['Iris-virginica']]}))
def run(data):
# Use the model object loaded by init().
result = model.predict(data)
inputs_dc.collect(data) #this call is saving our input data into Azure Blob
prediction_dc.collect(result) #this call is saving our input data into Azure Blob
# You can return any JSON-serializable object.
return { "predict": result.tolist() }
| 46.863636
| 124
| 0.774976
|
4a0c897bf2ca5a1578b8ec833840f4e3f22b8415
| 1,812
|
py
|
Python
|
frontend/pages/utils.py
|
DaniilJSN/timeflow
|
9b61bdb207f994fc1088db9c9957455a107e6907
|
[
"MIT"
] | null | null | null |
frontend/pages/utils.py
|
DaniilJSN/timeflow
|
9b61bdb207f994fc1088db9c9957455a107e6907
|
[
"MIT"
] | null | null | null |
frontend/pages/utils.py
|
DaniilJSN/timeflow
|
9b61bdb207f994fc1088db9c9957455a107e6907
|
[
"MIT"
] | null | null | null |
# pages shared logic goes here
import requests
from config import base_url
from typing import TypedDict, Callable, List
from datetime import datetime
from idom import component
class Select(TypedDict):
value: str
dispay_value: str
# fmt: off
year_month_list = ["2022_01","2022_02","2022_03","2022_04","2022_05",
"2022_06","2022_07","2022_08","2022_09","2022_10","2022_11", "2022_12"
]
month_start_list = []
for date in year_month_list:
month_start = date + "_01"
month_start_list.append(month_start)
hours = ["07", "08", "09", "10", "11", "12", "13", "14", "15", "16",
"17", "18", "19", "20", "21", "22"]
quarters = ["00", "15", "30", "45"]
# fmt: on
forecast_days_list = []
forecast_days_nr = range(1, 30)
for n in forecast_days_nr:
forecast_days_list.append(n)
days_in_month_list = []
days_in_month_nr = range(1, 32)
for n in days_in_month_nr:
days_in_month_list.append(n)
capacity_days_list = []
capacity_days_nr = range(1, 21)
for n in capacity_days_nr:
capacity_days_list.append(n)
def days_list(days: int) -> List:
days_list = []
days_nr = range(1, (days + 1))
for n in days_nr:
days_list.append(n)
return days_list
hours_list = []
for h in hours:
for q in quarters:
hours = f"{h}:{q}"
hours_list.append(hours)
def month_start_to_str(month_start):
ms = month_start
year = ms[:4]
month = ms[5:7]
day = ms[8:10]
ms_str = year + "-" + month + "-" + day
return ms_str
def date_str_to_date(date: str):
date_date = datetime.strptime(date, "%Y-%m-%d").date()
return date_date
far_date = date_str_to_date("9999-12-31")
def switch_state(value: bool, set_value: Callable):
if value == True:
set_value(False)
elif value == False:
set_value(True)
return True
| 21.571429
| 70
| 0.651766
|
4a0c89b936f8e76b2a038c671af64b2395d40269
| 2,974
|
py
|
Python
|
ciscoisesdk/models/validators/v3_0_0/jsd_d011417d18d055ccb864c1dc2ae0456d.py
|
oianson/ciscoisesdk
|
c8fe9d80416048dd0ff2241209c4f78ab78c1a4a
|
[
"MIT"
] | null | null | null |
ciscoisesdk/models/validators/v3_0_0/jsd_d011417d18d055ccb864c1dc2ae0456d.py
|
oianson/ciscoisesdk
|
c8fe9d80416048dd0ff2241209c4f78ab78c1a4a
|
[
"MIT"
] | null | null | null |
ciscoisesdk/models/validators/v3_0_0/jsd_d011417d18d055ccb864c1dc2ae0456d.py
|
oianson/ciscoisesdk
|
c8fe9d80416048dd0ff2241209c4f78ab78c1a4a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Identity Services Engine leaveDomainWithAllNodes data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from ciscoisesdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorD011417D18D055CcB864C1Dc2Ae0456D(object):
"""leaveDomainWithAllNodes request schema definition."""
def __init__(self):
super(JSONSchemaValidatorD011417D18D055CcB864C1Dc2Ae0456D, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"OperationAdditionalData": {
"properties": {
"additionalData": {
"items": {
"properties": {
"name": {
"type": "string"
},
"value": {
"type": "string"
}
},
"required": [
"name",
"value"
],
"type": "object"
},
"type": "array"
}
},
"required": [
"additionalData"
],
"type": "object"
}
},
"required": [
"OperationAdditionalData"
],
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 33.41573
| 83
| 0.585407
|
4a0c8a77280747fd63e1c2894428fb57e18e82b5
| 1,546
|
py
|
Python
|
src/smtp_text.py
|
viraatdas/Twitter-Notifications
|
9bc3d97e71aeacdafba3a884c6536ade4d7566db
|
[
"MIT"
] | null | null | null |
src/smtp_text.py
|
viraatdas/Twitter-Notifications
|
9bc3d97e71aeacdafba3a884c6536ade4d7566db
|
[
"MIT"
] | 7
|
2021-01-14T06:23:43.000Z
|
2021-03-18T05:54:03.000Z
|
src/smtp_text.py
|
viraatdas/Twitter-Notifications
|
9bc3d97e71aeacdafba3a884c6536ade4d7566db
|
[
"MIT"
] | null | null | null |
import smtplib
import email
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.message import EmailMessage
class smtp_text:
def __init__(self, GMAIL_EMAIL, GMAIL_PASSWORD):
self.GMAIL_EMAIL = GMAIL_EMAIL
self.GMAIL_PASSWORD = GMAIL_PASSWORD
self.server = None
self.SmsGateways = [
'tmomail.net', # tmobile
'mms.att.net', # at&t
'vtext.com', # verizon
'pm.sprint.com', # sprint
'sms.mycricket.com', # cricket
'sms.myboostmobile.com' # boost
]
# Establish a secure session with gmail's outgoing SMTP server using your gmail account
def reconnect(self):
self.server = smtplib.SMTP("smtp.gmail.com", 587)
self.server.starttls()
self.server.login(self.GMAIL_EMAIL, self.GMAIL_PASSWORD)
def send_message(self, phone, message):
message += "\n\nTweet powered by VIRU coins"
# Due to company acquisitions, multiple gateways for a single number might work
for gateway in self.SmsGateways:
destination = f"{phone}@{gateway}"
mimed_msg = f"From: {self.GMAIL_EMAIL}\r\nTo: {destination}\r\nSubject: \r\n\r\n{message}"
try:
self.server.sendmail(
self.GMAIL_EMAIL, destination, mimed_msg)
except Exception as e:
print(e)
continue
| 35.136364
| 102
| 0.581501
|
4a0c8c093d62b0a2ec857a640bfb651888ad56ae
| 14,850
|
py
|
Python
|
sdk/python/pulumi_azure_native/apimanagement/email_template.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/apimanagement/email_template.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/apimanagement/email_template.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['EmailTemplateArgs', 'EmailTemplate']
@pulumi.input_type
class EmailTemplateArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
service_name: pulumi.Input[str],
body: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input['EmailTemplateParametersContractPropertiesArgs']]]] = None,
subject: Optional[pulumi.Input[str]] = None,
template_name: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a EmailTemplate resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[str] body: Email Template Body. This should be a valid XDocument
:param pulumi.Input[str] description: Description of the Email Template.
:param pulumi.Input[Sequence[pulumi.Input['EmailTemplateParametersContractPropertiesArgs']]] parameters: Email Template Parameter values.
:param pulumi.Input[str] subject: Subject of the Template.
:param pulumi.Input[str] template_name: Email Template Name Identifier.
:param pulumi.Input[str] title: Title of the Template.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "service_name", service_name)
if body is not None:
pulumi.set(__self__, "body", body)
if description is not None:
pulumi.set(__self__, "description", description)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if subject is not None:
pulumi.set(__self__, "subject", subject)
if template_name is not None:
pulumi.set(__self__, "template_name", template_name)
if title is not None:
pulumi.set(__self__, "title", title)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Input[str]:
"""
The name of the API Management service.
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter
def body(self) -> Optional[pulumi.Input[str]]:
"""
Email Template Body. This should be a valid XDocument
"""
return pulumi.get(self, "body")
@body.setter
def body(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "body", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the Email Template.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EmailTemplateParametersContractPropertiesArgs']]]]:
"""
Email Template Parameter values.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['EmailTemplateParametersContractPropertiesArgs']]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter
def subject(self) -> Optional[pulumi.Input[str]]:
"""
Subject of the Template.
"""
return pulumi.get(self, "subject")
@subject.setter
def subject(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subject", value)
@property
@pulumi.getter(name="templateName")
def template_name(self) -> Optional[pulumi.Input[str]]:
"""
Email Template Name Identifier.
"""
return pulumi.get(self, "template_name")
@template_name.setter
def template_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "template_name", value)
@property
@pulumi.getter
def title(self) -> Optional[pulumi.Input[str]]:
"""
Title of the Template.
"""
return pulumi.get(self, "title")
@title.setter
def title(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "title", value)
class EmailTemplate(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
body: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EmailTemplateParametersContractPropertiesArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
subject: Optional[pulumi.Input[str]] = None,
template_name: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Email Template details.
API Version: 2020-12-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] body: Email Template Body. This should be a valid XDocument
:param pulumi.Input[str] description: Description of the Email Template.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EmailTemplateParametersContractPropertiesArgs']]]] parameters: Email Template Parameter values.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] service_name: The name of the API Management service.
:param pulumi.Input[str] subject: Subject of the Template.
:param pulumi.Input[str] template_name: Email Template Name Identifier.
:param pulumi.Input[str] title: Title of the Template.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EmailTemplateArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Email Template details.
API Version: 2020-12-01.
:param str resource_name: The name of the resource.
:param EmailTemplateArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EmailTemplateArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
body: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['EmailTemplateParametersContractPropertiesArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
subject: Optional[pulumi.Input[str]] = None,
template_name: Optional[pulumi.Input[str]] = None,
title: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EmailTemplateArgs.__new__(EmailTemplateArgs)
__props__.__dict__["body"] = body
__props__.__dict__["description"] = description
__props__.__dict__["parameters"] = parameters
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__.__dict__["service_name"] = service_name
__props__.__dict__["subject"] = subject
__props__.__dict__["template_name"] = template_name
__props__.__dict__["title"] = title
__props__.__dict__["is_default"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:apimanagement:EmailTemplate"), pulumi.Alias(type_="azure-native:apimanagement/v20170301:EmailTemplate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20170301:EmailTemplate"), pulumi.Alias(type_="azure-native:apimanagement/v20180101:EmailTemplate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180101:EmailTemplate"), pulumi.Alias(type_="azure-native:apimanagement/v20180601preview:EmailTemplate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20180601preview:EmailTemplate"), pulumi.Alias(type_="azure-native:apimanagement/v20190101:EmailTemplate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20190101:EmailTemplate"), pulumi.Alias(type_="azure-native:apimanagement/v20191201:EmailTemplate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201:EmailTemplate"), pulumi.Alias(type_="azure-native:apimanagement/v20191201preview:EmailTemplate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20191201preview:EmailTemplate"), pulumi.Alias(type_="azure-native:apimanagement/v20200601preview:EmailTemplate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20200601preview:EmailTemplate"), pulumi.Alias(type_="azure-native:apimanagement/v20201201:EmailTemplate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20201201:EmailTemplate"), pulumi.Alias(type_="azure-native:apimanagement/v20210101preview:EmailTemplate"), pulumi.Alias(type_="azure-nextgen:apimanagement/v20210101preview:EmailTemplate")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(EmailTemplate, __self__).__init__(
'azure-native:apimanagement:EmailTemplate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'EmailTemplate':
"""
Get an existing EmailTemplate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = EmailTemplateArgs.__new__(EmailTemplateArgs)
__props__.__dict__["body"] = None
__props__.__dict__["description"] = None
__props__.__dict__["is_default"] = None
__props__.__dict__["name"] = None
__props__.__dict__["parameters"] = None
__props__.__dict__["subject"] = None
__props__.__dict__["title"] = None
__props__.__dict__["type"] = None
return EmailTemplate(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def body(self) -> pulumi.Output[str]:
"""
Email Template Body. This should be a valid XDocument
"""
return pulumi.get(self, "body")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the Email Template.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="isDefault")
def is_default(self) -> pulumi.Output[bool]:
"""
Whether the template is the default template provided by Api Management or has been edited.
"""
return pulumi.get(self, "is_default")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Optional[Sequence['outputs.EmailTemplateParametersContractPropertiesResponse']]]:
"""
Email Template Parameter values.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter
def subject(self) -> pulumi.Output[str]:
"""
Subject of the Template.
"""
return pulumi.get(self, "subject")
@property
@pulumi.getter
def title(self) -> pulumi.Output[Optional[str]]:
"""
Title of the Template.
"""
return pulumi.get(self, "title")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
| 44.196429
| 1,515
| 0.654276
|
4a0c8c6fd9034fbdfd1212124652dc10c2aa4210
| 5,934
|
py
|
Python
|
kubernetes_asyncio/client/models/v2beta1_pods_metric_status.py
|
playground-julia/kubernetes_asyncio
|
91b2c41eedd282d9ebc059377fb7f207e220133d
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v2beta1_pods_metric_status.py
|
playground-julia/kubernetes_asyncio
|
91b2c41eedd282d9ebc059377fb7f207e220133d
|
[
"Apache-2.0"
] | null | null | null |
kubernetes_asyncio/client/models/v2beta1_pods_metric_status.py
|
playground-julia/kubernetes_asyncio
|
91b2c41eedd282d9ebc059377fb7f207e220133d
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.15.9
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V2beta1PodsMetricStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'current_average_value': 'str',
'metric_name': 'str',
'selector': 'V1LabelSelector'
}
attribute_map = {
'current_average_value': 'currentAverageValue',
'metric_name': 'metricName',
'selector': 'selector'
}
def __init__(self, current_average_value=None, metric_name=None, selector=None, local_vars_configuration=None): # noqa: E501
"""V2beta1PodsMetricStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._current_average_value = None
self._metric_name = None
self._selector = None
self.discriminator = None
self.current_average_value = current_average_value
self.metric_name = metric_name
if selector is not None:
self.selector = selector
@property
def current_average_value(self):
"""Gets the current_average_value of this V2beta1PodsMetricStatus. # noqa: E501
currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity) # noqa: E501
:return: The current_average_value of this V2beta1PodsMetricStatus. # noqa: E501
:rtype: str
"""
return self._current_average_value
@current_average_value.setter
def current_average_value(self, current_average_value):
"""Sets the current_average_value of this V2beta1PodsMetricStatus.
currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity) # noqa: E501
:param current_average_value: The current_average_value of this V2beta1PodsMetricStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and current_average_value is None: # noqa: E501
raise ValueError("Invalid value for `current_average_value`, must not be `None`") # noqa: E501
self._current_average_value = current_average_value
@property
def metric_name(self):
"""Gets the metric_name of this V2beta1PodsMetricStatus. # noqa: E501
metricName is the name of the metric in question # noqa: E501
:return: The metric_name of this V2beta1PodsMetricStatus. # noqa: E501
:rtype: str
"""
return self._metric_name
@metric_name.setter
def metric_name(self, metric_name):
"""Sets the metric_name of this V2beta1PodsMetricStatus.
metricName is the name of the metric in question # noqa: E501
:param metric_name: The metric_name of this V2beta1PodsMetricStatus. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and metric_name is None: # noqa: E501
raise ValueError("Invalid value for `metric_name`, must not be `None`") # noqa: E501
self._metric_name = metric_name
@property
def selector(self):
"""Gets the selector of this V2beta1PodsMetricStatus. # noqa: E501
:return: The selector of this V2beta1PodsMetricStatus. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V2beta1PodsMetricStatus.
:param selector: The selector of this V2beta1PodsMetricStatus. # noqa: E501
:type: V1LabelSelector
"""
self._selector = selector
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V2beta1PodsMetricStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V2beta1PodsMetricStatus):
return True
return self.to_dict() != other.to_dict()
| 33.150838
| 132
| 0.634816
|
4a0c8d0855750753bb3bb747dd23037c86dea364
| 3,021
|
py
|
Python
|
wsgi-server.py
|
iboukris/webgss
|
0e4732e2f7b4de111eafc60b5876aedcc3577f97
|
[
"MIT"
] | 1
|
2021-12-26T22:17:24.000Z
|
2021-12-26T22:17:24.000Z
|
wsgi-server.py
|
iboukris/webgss
|
0e4732e2f7b4de111eafc60b5876aedcc3577f97
|
[
"MIT"
] | null | null | null |
wsgi-server.py
|
iboukris/webgss
|
0e4732e2f7b4de111eafc60b5876aedcc3577f97
|
[
"MIT"
] | null | null | null |
# MIT Licensed, see LICENSE file
# Copyright (c) 2021 Isaac Boukris <iboukris@gmail.com>
import os
import sys
import mimetypes
import kdcproxy
import base64
import gssapi
from wsgiref.simple_server import make_server
from wsgiref import util
# Run with: KRB5_KTNAME=keytab python ./wsgi-server.py
class gssAuth(object):
def __init__(self, app):
self.app = app
self.creds = gssapi.Credentials(usage='accept')
def __call__(self, environ, start_response):
auth = environ.get('HTTP_AUTHORIZATION')
if not auth or not auth.startswith('Negotiate '):
start_response('401 Unauthorized', [('WWW-Authenticate', 'Negotiate')])
return [b'']
req_token = base64.b64decode(auth[len('Negotiate '):])
ctx = gssapi.SecurityContext(creds=self.creds, usage='accept')
rep_token = ctx.step(req_token)
if not ctx.complete:
start_response('500 Internal Server Error')
return [b'']
environ['REMOTE_USER'] = str(ctx.initiator_name)
def wrap_start_response(status, headers, exc_info=None):
if rep_token:
val = 'Negotiate ' + base64.b64encode(rep_token).decode('ascii')
headers.append( ('WWW-Authenticate', val) )
return start_response(status, headers, exc_info)
return self.app(environ, wrap_start_response)
class webGssApp(object):
def __init__(self, app, kdcproxy, logindir=None):
self.app = gssAuth(app)
self.kproxy = kdcproxy
self.logindir = logindir
def __call__(self, environ, start_response):
path = environ.get('PATH_INFO','')
if (path == '/KdcProxy'):
return self.kproxy(environ, start_response)
if (self.logindir and path.startswith('/login/')):
fn = os.path.join(self.logindir, path[7:])
if '.' not in fn.split(os.path.sep)[-1]:
fn = os.path.join(fn, 'index.html')
mtype = mimetypes.guess_type(fn)[0] or 'text/plain'
if os.path.exists(fn):
start_response('200 OK', [('Content-Type', mtype)])
return util.FileWrapper(open(fn, "rb"))
else:
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return [b'not found']
return self.app(environ, start_response)
class helloApp(object):
def __call__(self, environ, start_response):
start_response('200 OK', [('Content-type','text/plain')])
user = environ.get('REMOTE_USER')
out = 'Hello ' + user + '!\n'
return [ out.encode('utf8') ]
if __name__ == '__main__':
hostname = 'localhost'
port = 8080
if len(sys.argv) > 1:
hostname = sys.argv[1]
if len(sys.argv) > 2:
port = int(sys.argv[2])
wgss = webGssApp(helloApp(), kdcproxy.Application(), os.path.abspath(('.')))
server = make_server(hostname, port, wgss)
print('Server started')
server.serve_forever()
| 29.910891
| 83
| 0.609401
|
4a0c8de977ca9f744fe1def0ad4fca532a94deb4
| 1,304
|
py
|
Python
|
ch12/MC_All_Paths/all_paths_12_nodes.py
|
yosais/Computer-Simulation-Book
|
87b2d83eded02f69cd7b8d839adfa554b25ae19d
|
[
"MIT"
] | 16
|
2017-10-26T02:35:31.000Z
|
2021-11-24T04:31:32.000Z
|
ch12/MC_All_Paths/all_paths_12_nodes.py
|
yosais/Computer-Simulation-Book
|
87b2d83eded02f69cd7b8d839adfa554b25ae19d
|
[
"MIT"
] | null | null | null |
ch12/MC_All_Paths/all_paths_12_nodes.py
|
yosais/Computer-Simulation-Book
|
87b2d83eded02f69cd7b8d839adfa554b25ae19d
|
[
"MIT"
] | 11
|
2018-02-04T05:10:11.000Z
|
2021-11-24T05:30:45.000Z
|
import timeit
def find_all_paths(graph, start, end, path=[]):
path = path + [start]
if start == end:
return [path]
# if start not in graph:
# return []
paths = []
for node in graph[start]:
if node not in path:
newpaths = find_all_paths(graph, node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
graph = {
"1": ("2","3","4","5","6","7","9","10","11","12"),
"2": ("1","3","4","5","6","7","8","9","10","11","12"),
"3": ("1","2","4","5","6","7","8","9","11","12"),
"4": ("1","2","3","5","6","7","8","9","10","12"),
"5": ("1","2","3","6","7","8","10","11","12"),
"6": ("1","2","3","4","5","7","8","9","10","11"),
"7": ("1","2","3","4","5","6","8","9","10","11","12"),
"8": ("1","2","3","4","5","6","7","9","10","11","12"),
"9": ("1","2","3","4","5","6","7","8","10","11","12"),
"10": ("1","2","3","4","5","6","7","8","9","11"),
"11": ("1","2","3","4","5","6","7","8","9","10","12"),
"12": ("1","2","3","4","5","6","7","8","9","10","11"),
}
start = timeit.default_timer()
paths = find_all_paths(graph, "1", "12")
stop = timeit.default_timer()
print("Number of Paths = ", len(paths))
print("Runtime = ", round(stop - start, 2), " seconds")
| 31.047619
| 61
| 0.411043
|
4a0c8ee3449ff57355a3e148c02b42d43906fa0e
| 7,888
|
py
|
Python
|
victor_hardware_interface/scripts/xbox_control.py
|
MMintLab/kuka_iiwa_interface
|
0dd258641377263e7275bc63f37cf32eb12f3e56
|
[
"BSD-2-Clause"
] | 5
|
2021-01-11T09:00:26.000Z
|
2021-12-13T15:59:01.000Z
|
victor_hardware_interface/scripts/xbox_control.py
|
MMintLab/kuka_iiwa_interface
|
0dd258641377263e7275bc63f37cf32eb12f3e56
|
[
"BSD-2-Clause"
] | 35
|
2020-07-01T14:48:40.000Z
|
2021-07-13T18:38:53.000Z
|
victor_hardware_interface/scripts/xbox_control.py
|
MMintLab/kuka_iiwa_interface
|
0dd258641377263e7275bc63f37cf32eb12f3e56
|
[
"BSD-2-Clause"
] | 1
|
2021-01-08T23:39:17.000Z
|
2021-01-08T23:39:17.000Z
|
#! /usr/bin/env python
# ROS node to turn joystick msgs into Messages for Victor
import rospy
from arc_utilities.listener import Listener
from victor_hardware_interface_msgs.msg import Robotiq3FingerStatus, Robotiq3FingerCommand
from sensor_msgs.msg import Joy
from copy import deepcopy
from numpy import clip
from arc_utilities.ros_helpers import joy_to_xbox
class VictorJoystick:
def __init__(self):
self.output_throttle_period = 5.0
self.gripper_status = \
{"right": Listener("right_arm/gripper_status", Robotiq3FingerStatus),
"left": Listener("left_arm/gripper_status", Robotiq3FingerStatus)}
self.gripper_command_publisher = \
{"right": rospy.Publisher("right_arm/gripper_command", Robotiq3FingerCommand, queue_size=1),
"left": rospy.Publisher("left_arm/gripper_command", Robotiq3FingerCommand, queue_size=1)}
self.joy_sub = rospy.Subscriber("joy", Joy, self.joy_callback)
self.prev_xbox_msg = None
@staticmethod
def minus(xbox_lhs, xbox_rhs):
"""
Returns a new Xbox_msg object with values lhs - rhs
"""
xbox_diff = deepcopy(xbox_lhs)
xbox_diff.A -= xbox_rhs.A
xbox_diff.B -= xbox_rhs.B
xbox_diff.X -= xbox_rhs.X
xbox_diff.Y -= xbox_rhs.Y
xbox_diff.LB -= xbox_rhs.LB
xbox_diff.RB -= xbox_rhs.RB
xbox_diff.back -= xbox_rhs.back
xbox_diff.start -= xbox_rhs.start
xbox_diff.power -= xbox_rhs.power
xbox_diff.stick_button_left -= xbox_rhs.stick_button_left
xbox_diff.stick_button_right -= xbox_rhs.stick_button_right
xbox_diff.LH -= xbox_rhs.LH
xbox_diff.LV -= xbox_rhs.LV
xbox_diff.LT -= xbox_rhs.LT
xbox_diff.RH -= xbox_rhs.RH
xbox_diff.RV -= xbox_rhs.RV
xbox_diff.RT -= xbox_rhs.RT
xbox_diff.DH -= xbox_rhs.DH
xbox_diff.DV -= xbox_rhs.DV
return xbox_diff
def joy_callback(self, joy_msg):
"""
Assumes that we are using xboxdrv without mimic mode
"""
xbox_msg = joy_to_xbox(joy_msg, xpad=False)
if self.prev_xbox_msg is None:
self.prev_xbox_msg = xbox_msg
enable_finger_open_close_control = rospy.get_param("~enable_finger_open_close_control", True)
enable_scissor_open_close_control = rospy.get_param("~enable_scissor_open_close_control", True)
rospy.loginfo_throttle(self.output_throttle_period,
"Finger open close control enabled: " + str(enable_finger_open_close_control))
rospy.loginfo_throttle(self.output_throttle_period,
"Scissor open close control enabled: " + str(enable_scissor_open_close_control))
if enable_finger_open_close_control:
self.finger_open_close_callback(xbox_msg)
if enable_scissor_open_close_control:
self.scissor_open_close_callback(xbox_msg)
self.prev_xbox_msg = xbox_msg
def finger_open_close_callback(self, xbox_msg):
"""Open and close the gripper fingers"""
xboxdiff = VictorJoystick.minus(xbox_msg, self.prev_xbox_msg)
gripper_stop_dist = 0.05
if xboxdiff.LT > 0:
self.stop_gripper("left", gripper_stop_dist)
elif xboxdiff.LB < 0:
self.stop_gripper("left", -gripper_stop_dist)
else:
if xbox_msg.LT == -1:
self.close_gripper("left")
if xbox_msg.LB:
self.open_gripper("left")
if xboxdiff.RT > 0:
self.stop_gripper("right", gripper_stop_dist)
elif xboxdiff.RB < 0:
self.stop_gripper("right", -gripper_stop_dist)
else:
if xbox_msg.RT == -1:
self.close_gripper("right")
if xbox_msg.RB:
self.open_gripper("right")
def scissor_open_close_callback(self, xbox_msg):
"""Open and close the scissors on the gripper"""
xboxdiff = VictorJoystick.minus(xbox_msg, self.prev_xbox_msg)
scissor_stop_dist = 0.05
if xboxdiff.X < 0:
self.stop_scissor("left", scissor_stop_dist)
elif xboxdiff.Y < 0:
self.stop_scissor("left", -scissor_stop_dist)
else:
if xbox_msg.X:
self.close_scissor("left")
if xbox_msg.Y:
self.open_scissor("left")
if xboxdiff.A < 0:
self.stop_scissor("right", scissor_stop_dist)
elif xboxdiff.B < 0:
self.stop_scissor("right", -scissor_stop_dist)
else:
if xbox_msg.A:
self.close_scissor("right")
if xbox_msg.B:
self.open_scissor("right")
def stop_gripper(self, gripper_name, motion=0.0):
"""stops gripper fingers in current position + motion to allow for delay"""
cur = self.gripper_status[gripper_name].get()
finger_pos = [clip(cur.finger_a_status.position + motion, 0.0, 1.0),
clip(cur.finger_b_status.position + motion, 0.0, 1.0),
clip(cur.finger_c_status.position + motion, 0.0, 1.0)]
self.set_gripper(gripper_name, finger_pos=finger_pos)
def stop_scissor(self, gripper_name, motion=0.0):
"""stops gripper scissor in current position + motion to allow for delay"""
cur = self.gripper_status[gripper_name].get()
scissor_pos = cur.scissor_status.position + motion
self.set_gripper(gripper_name, scissor_pos=scissor_pos)
def close_gripper(self, gripper_name):
self.set_gripper(gripper_name, finger_pos=(1.0, 1.0, 1.0))
def open_gripper(self, gripper_name):
self.set_gripper(gripper_name, finger_pos=(0.0, 0.0, 0.0))
def close_scissor(self, gripper_name):
self.set_gripper(gripper_name, scissor_pos=1.0)
def open_scissor(self, gripper_name):
self.set_gripper(gripper_name, scissor_pos=0.0)
def set_gripper(self, gripper_name, finger_pos=None, scissor_pos=None):
"""
Sets the gripper finger position, as well as the scissor
Parameters:
gripper_name string - "left" or "right"
finger_pos float[] - position values for fingers a,b,c. [0 to 1]
scissor_pos float - position values for scissor. [0 to 1]
"""
cur = self.gripper_status[gripper_name].get()
cmd = self.default_gripper_command()
# Set the finger position if commanded
if finger_pos is not None:
cmd.finger_a_command.position = finger_pos[0]
cmd.finger_b_command.position = finger_pos[1]
cmd.finger_c_command.position = finger_pos[2]
else:
cmd.finger_a_command.position = cur.finger_a_status.position_request
cmd.finger_b_command.position = cur.finger_b_status.position_request
cmd.finger_c_command.position = cur.finger_c_status.position_request
# Set the scissor position if commanded
if scissor_pos is not None:
cmd.scissor_command.position = scissor_pos
else:
cmd.scissor_command.position = cur.scissor_status.position_request
self.gripper_command_publisher[gripper_name].publish(cmd)
def default_gripper_command(self):
cmd = Robotiq3FingerCommand()
cmd.finger_a_command.speed = 0.5
cmd.finger_b_command.speed = 0.5
cmd.finger_c_command.speed = 0.5
cmd.scissor_command.speed = 1.0
cmd.finger_a_command.force = 1.0
cmd.finger_b_command.force = 1.0
cmd.finger_c_command.force = 1.0
cmd.scissor_command.force = 1.0
cmd.scissor_command.position = 1.0
return cmd
def main():
rospy.init_node('xbox_control')
vj = VictorJoystick()
rospy.spin()
if __name__ == "__main__":
main()
| 35.372197
| 111
| 0.64427
|
4a0c8f82f50046e7a18aa4686ffb33bdbe74d216
| 2,207
|
py
|
Python
|
tests/test_reliable_executor.py
|
Kazzer/reliable_executor
|
e6c32711e80112cdff22bb592ddd636de59a4272
|
[
"WTFPL"
] | null | null | null |
tests/test_reliable_executor.py
|
Kazzer/reliable_executor
|
e6c32711e80112cdff22bb592ddd636de59a4272
|
[
"WTFPL"
] | null | null | null |
tests/test_reliable_executor.py
|
Kazzer/reliable_executor
|
e6c32711e80112cdff22bb592ddd636de59a4272
|
[
"WTFPL"
] | null | null | null |
#!/usr/bin/env python
"""Tests for the reliable_executor module"""
import functools
import random
import unittest
import reliable_executor
class ReliablyExecuteTest(unittest.TestCase):
"""Test Case for reliable_executor.reliably_execute()"""
def setUp(self):
"""Sets up a function with a random number of fails before success"""
self.number_of_fails = random.randint(1, 4)
def intermittent_result(self, number_of_fails=0):
"""An intermittently failing function"""
remaining_fails = number_of_fails
while remaining_fails:
yield UserWarning('Failing result')
remaining_fails -= 1
yield 0
def intermittent_function(self, generator):
"""A function that will fail intermittently"""
result = next(generator)
if isinstance(result, Exception):
raise result
return result
def test_success_without_failures(self):
"""Tests that the correct result is returned if the function doesn't fail"""
result = reliable_executor.reliably_execute(
functools.partial(
self.intermittent_function,
self.intermittent_result(),
),
retry=0,
wait=0,
)
self.assertEqual(result, 0)
def test_success_with_failures(self):
"""Tests that the correct result is returned if the function fails"""
result = reliable_executor.reliably_execute(
functools.partial(
self.intermittent_function,
self.intermittent_result(self.number_of_fails),
),
retry=self.number_of_fails,
wait=0,
)
self.assertEqual(result, 0)
def test_failure(self):
"""Tests that an exception is raised if the function doesn't succeed"""
with self.assertRaises(RuntimeError):
reliable_executor.reliably_execute(
functools.partial(
self.intermittent_function,
self.intermittent_result(self.number_of_fails),
),
retry=(self.number_of_fails - 1),
wait=0,
)
| 30.232877
| 84
| 0.61169
|
4a0c90d56c79a77b28eff8bdd2c8692a958701b5
| 2,142
|
py
|
Python
|
setup.py
|
nepython/django-netjsongraph
|
ab8de783b71864e06cf0e0dc5d564559df3f21cd
|
[
"MIT"
] | null | null | null |
setup.py
|
nepython/django-netjsongraph
|
ab8de783b71864e06cf0e0dc5d564559df3f21cd
|
[
"MIT"
] | null | null | null |
setup.py
|
nepython/django-netjsongraph
|
ab8de783b71864e06cf0e0dc5d564559df3f21cd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
from django_netjsongraph import get_version
from setuptools import find_packages, setup
def get_install_requires():
"""
parse requirements.txt, ignore links, exclude comments
"""
requirements = []
for line in open('requirements.txt').readlines():
# skip to next iteration if comment or empty line
if line.startswith('#') or line == '' or line.startswith('http') or line.startswith('git'):
continue
# add line to requirements
requirements.append(line)
return requirements
if sys.argv[-1] == 'publish':
# delete any *.pyc, *.pyo and __pycache__
os.system('find . | grep -E "(__pycache__|\.pyc|\.pyo$)" | xargs rm -rf')
os.system("python setup.py sdist bdist_wheel")
os.system("twine upload -s dist/*")
os.system("rm -rf dist build")
args = {'version': get_version()}
print("You probably want to also tag the version now:")
print(" git tag -a %(version)s -m 'version %(version)s'" % args)
print(" git push --tags")
sys.exit()
setup(
name='django-netjsongraph',
version=get_version(),
license='MIT',
author='Federico Capoano',
author_email='federico.capoano@gmail.com',
description='Reusable django app for collecting and visualizing network topology',
long_description=open('README.rst').read(),
url='http://netjson.org',
download_url='https://github.com/interop-dev/django-netjsongraph/releases',
platforms=['Platform Independent'],
keywords=['django', 'netjson', 'mesh', 'networking'],
packages=find_packages(exclude=['tests']),
install_requires=get_install_requires(),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Topic :: Internet :: WWW/HTTP',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Framework :: Django',
'Topic :: System :: Networking',
'Programming Language :: Python :: 3',
]
)
| 33.46875
| 99
| 0.644258
|
4a0c9361b851c727385ec38fc209d62634ee4d5e
| 4,288
|
py
|
Python
|
xmlschema/tests/check_memory.py
|
jayvdb/xmlschema
|
257ef230c4b4c42a3374d0a5bc620c68d579e0fd
|
[
"MIT"
] | null | null | null |
xmlschema/tests/check_memory.py
|
jayvdb/xmlschema
|
257ef230c4b4c42a3374d0a5bc620c68d579e0fd
|
[
"MIT"
] | null | null | null |
xmlschema/tests/check_memory.py
|
jayvdb/xmlschema
|
257ef230c4b4c42a3374d0a5bc620c68d579e0fd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c), 2016-2019, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
Check xmlschema package memory usage.
Refs:
https://pypi.org/project/memory_profiler/
https://github.com/brunato/xmlschema/issues/32
"""
import argparse
from memory_profiler import profile
def test_choice_type(value):
if value not in (str(v) for v in range(1, 9)):
msg = "%r must be an integer between [1 ... 8]." % value
raise argparse.ArgumentTypeError(msg)
return int(value)
parser = argparse.ArgumentParser(add_help=True)
parser.usage = """%(prog)s TEST_NUM [XML_FILE]
Run memory tests:
1) Package import or schema build
2) Iterate XML file with parse
3) Iterate XML file with full iterparse
4) Iterate XML file with emptied iterparse
5) Decode XML file with xmlschema
6) Decode XML file with xmlschema in lazy mode
7) Validate XML file with xmlschema
8) Validate XML file with xmlschema in lazy mode
"""
parser.add_argument('test_num', metavar="TEST_NUM", type=test_choice_type, help="Test number to run")
parser.add_argument('xml_file', metavar='XML_FILE', nargs='?', help='Input XML file')
args = parser.parse_args()
# noinspection PyUnresolvedReferences
@profile
def import_package():
# Imports of packages used by xmlschema that
# have a significant memory usage impact.
import decimal
from urllib.error import URLError
import lxml.etree
import elementpath
import xmlschema
return xmlschema
@profile
def build_schema(source):
xs = xmlschema.XMLSchema(source)
return xs
@profile
def etree_parse(source):
xt = ElementTree.parse(source)
for _ in xt.iter():
pass
@profile
def etree_full_iterparse(source):
context = ElementTree.iterparse(source, events=('start', 'end'))
for event, elem in context:
if event == 'start':
pass
@profile
def etree_emptied_iterparse(source):
context = ElementTree.iterparse(source, events=('start', 'end'))
for event, elem in context:
if event == 'end':
elem.clear()
@profile
def decode(source):
decoder = xmlschema.XMLSchema.meta_schema if source.endswith('.xsd') else xmlschema
return decoder.to_dict(source)
@profile
def lazy_decode(source):
decoder = xmlschema.XMLSchema.meta_schema if source.endswith('.xsd') else xmlschema
for result in decoder.to_dict(xmlschema.XMLResource(source, lazy=True), path='*'):
del result
@profile
def validate(source):
validator = xmlschema.XMLSchema.meta_schema if source.endswith('.xsd') else xmlschema
return validator.validate(source)
@profile
def lazy_validate(source):
if source.endswith('.xsd'):
validator, path = xmlschema.XMLSchema.meta_schema, '*'
else:
validator, path = xmlschema, None
return validator.validate(xmlschema.XMLResource(source, lazy=True), path=path)
if __name__ == '__main__':
if args.test_num == 1:
if args.xml_file is None:
import_package()
else:
import xmlschema
build_schema(args.xml_file)
elif args.test_num == 2:
import xml.etree.ElementTree as ElementTree
etree_parse(args.xml_file)
elif args.test_num == 3:
import xml.etree.ElementTree as ElementTree
etree_full_iterparse(args.xml_file)
elif args.test_num == 4:
import xml.etree.ElementTree as ElementTree
etree_emptied_iterparse(args.xml_file)
elif args.test_num == 5:
import xmlschema
xmlschema.XMLSchema.meta_schema.build()
decode(args.xml_file)
elif args.test_num == 6:
import xmlschema
xmlschema.XMLSchema.meta_schema.build()
lazy_decode(args.xml_file)
elif args.test_num == 7:
import xmlschema
xmlschema.XMLSchema.meta_schema.build()
validate(args.xml_file)
elif args.test_num == 8:
import xmlschema
xmlschema.XMLSchema.meta_schema.build()
lazy_validate(args.xml_file)
| 28.026144
| 101
| 0.692631
|
4a0c950f7959613bea7d779bd0b96616dc1f3a1e
| 254
|
py
|
Python
|
ko_model/crawler.py
|
Ohjiwoo-lab/News-Articles-Recommendation
|
a0880fad62114c4ddda05ca5c41a1ceab2a923b4
|
[
"MIT"
] | null | null | null |
ko_model/crawler.py
|
Ohjiwoo-lab/News-Articles-Recommendation
|
a0880fad62114c4ddda05ca5c41a1ceab2a923b4
|
[
"MIT"
] | null | null | null |
ko_model/crawler.py
|
Ohjiwoo-lab/News-Articles-Recommendation
|
a0880fad62114c4ddda05ca5c41a1ceab2a923b4
|
[
"MIT"
] | 4
|
2021-08-23T15:02:11.000Z
|
2021-11-09T01:10:40.000Z
|
from korea_news_crawler.articlecrawler import ArticleCrawler
if __name__ == "__main__":
Crawler = ArticleCrawler()
Crawler.set_category("정치", "IT과학", "economy", "사회", "생활문화")
Crawler.set_date_range(2017, 1, 2018, 4)
Crawler.start()
| 36.285714
| 65
| 0.692913
|
4a0c9606b23f3bae09fc55bfb41ce3c011036e75
| 9,829
|
py
|
Python
|
server/user.py
|
psylopunk/mpei-timetable-bot
|
be8e51676a4688c9a31ac5f837c030b9ea8ba190
|
[
"MIT"
] | 3
|
2021-05-06T00:20:49.000Z
|
2021-12-03T20:56:38.000Z
|
server/user.py
|
psylopunk/mpei-timetable-bot
|
be8e51676a4688c9a31ac5f837c030b9ea8ba190
|
[
"MIT"
] | null | null | null |
server/user.py
|
psylopunk/mpei-timetable-bot
|
be8e51676a4688c9a31ac5f837c030b9ea8ba190
|
[
"MIT"
] | 1
|
2020-12-31T18:33:21.000Z
|
2020-12-31T18:33:21.000Z
|
from storage import db
from config import TELEGRAM_API_KEY
from aiogram import Bot
from functions import log, get_default_inline_keyboard, get_keyboard, get_inline_keyboard, get_weekday_name, get_timetable_json
from datetime import datetime, timedelta
bot = Bot(token=TELEGRAM_API_KEY)
class User:
def log(self, text: str):
log(f'[{self}] {text}')
def __repr__(self):
return f'User(telegram_id={self.telegram_id}, first_name={self.first_name}, last_name={self.last_name}, username={self.username})'
def __init__(self, user_object):
self._id = user_object['_id'] if '_id' in user_object else None
self.telegram_id = user_object['tid']
self.balance = user_object['balance']
self.message_id = user_object['message_id']
self.username = user_object['username'] if 'username' in user_object else None
self.first_name = user_object['first_name'] if 'first_name' in user_object else None
self.last_name = user_object['last_name'] if 'last_name' in user_object else None
self.group = user_object['group']
self.group_id = user_object['group_id']
self.history_messages_id = user_object['history_messages_id']
self.settings = user_object['settings']
self.last_update_id = 0
db.users.update_one({
'tid': self.telegram_id
}, {
'$set': {
'last_use': datetime.now()
}
})
self.clear_action()
async def send_welcome(self, message=None):
self.clear_action()
await self.clear_messages()
if self.message_id:
await self.delete_message(self.message_id)
GROUP_NOT_SETTED = '⚠️ <b>Группа не выбрана</b>\n<i>Найдите свою группу с помощью кнопки под сообщением для начала работы</i>'
m = await self.send_message(
f"""{message if message else '💎 <b>Привет, здесь ты можешь найти расписание групп МЭИ</b>'}
{f'👥 Ваша группа: <b>{self.group}</b>' if self.group else GROUP_NOT_SETTED}
Выбери пункт ниже 👇""",
save=False,
reply_markup=get_default_inline_keyboard(self)
)
if m:
self.message_id = m.message_id
db.users.update_one({
'_id': self._id
}, {
'$set': {
'message_id': self.message_id
}
})
async def send_message(self, message, save=True, *args, **kwargs):
try:
r = await bot.send_message(
self.telegram_id,
message,
parse_mode='html',
*args,
**kwargs
)
if save:
self.save_message(r.message_id)
return r
except Exception as e:
self.log(f'Error (cause.sendMessage): {e}')
async def delete_message(self, message_id):
try:
await bot.delete_message(self.telegram_id, message_id)
except Exception as e:
self.log(f'Error (cause.deleteMessage): {e}')
async def edit_message(self, text, *args, **kwargs):
await self.clear_messages()
try:
return await bot.edit_message_text(
chat_id=self.telegram_id,
message_id=self.message_id,
text=text,
parse_mode='html',
*args,
**kwargs
)
except Exception as e:
if f'{e}'.strip() == 'Message to edit not found':
await self.send_welcome()
self.log(f'Error (cause.editMessage): {e}')
return False
def clear_action(self):
self.action = None
self.data = {}
def upload_settings(self):
db.users.update_one({
'_id': self._id
}, {
'$set': {
'settings': self.settings
}
})
def set_group(self, group, group_id):
self.group = group.upper()
self.group_id = group_id
db.users.update_one({
'_id': self._id
}, {
'$set': {
'group': self.group,
'group_id': self.group_id
}
})
async def clear_messages(self):
for message_id in self.history_messages_id:
await self.delete_message(message_id)
self.history_messages_id = []
db.users.update_one({
'_id': self._id
}, {
'$set': {
'history_messages_id': []
}
})
async def answer_callback(self, cd_id, text=None):
try:
await bot.answer_callback_query(
callback_query_id=cd_id,
text=(text or 'Выполнено'),
show_alert=False
)
except Exception as e:
self.log(f'Error (cause.answerCallback): {e}')
async def send_settings(self):
await self.clear_messages()
print(self._id)
print(self.settings)
await self.edit_message(
"""⚙️ <b>Настройки</b>
<b>Уведомления о приближении пары</b>
<i>Вы можете установить время, за сколько перед началом пары, Вам нужно будет прислать сообщение</i>
<b>Уведомления о начале пары</b>
<i>Включив эту настройку, Вы будете получать уведомления при начале каждой пары</i>""",
reply_markup=get_inline_keyboard([
[{
'text': f"""{'🟢' if self.settings['lesson_notification_previously']['enabled'] else '🔴'} Уведомления о приближении пары""",
'callback_data': 'setting_toggle_lessonNotification_previously'
}],
[{
'text': f"""{'🟢' if self.settings['lesson_notification_beginning']['enabled'] else '🔴'} Уведомления о начале пары""",
'callback_data': 'setting_toggle_lessonNotification_beginning'
}],
[{
'text': 'На главную ⌘',
'callback_data': 'home'
}]
])
)
def save_message(self, message_id):
self.history_messages_id.append(message_id)
db.users.update_one({
'_id': self._id
}, {
'$set': {
'history_messages_id': self.history_messages_id
}
})
async def send_timetable(self, date_obj):
day = await get_timetable_json(self, date_obj)
lessons_message = ''
time_now = datetime.now() + timedelta(hours=3)
_two_endl = '\n\n'
for lesson in day:
if time_now < lesson['begin_lesson']:
lessons_message += '⚪️ '
elif time_now > lesson['begin_lesson'] and time_now < lesson['end_lesson']:
lessons_message += '🟡 '
elif time_now > lesson['end_lesson']:
lessons_message += '🟢 '
else:
lessons_message += 'ERR!'
lessons_message += f"""<b>{lesson['name']}</b>
<i>{lesson['begin_lesson'].strftime('%H:%M')} - {lesson['end_lesson'].strftime('%H:%M')}</i>
📍 {lesson['place']}
👨🏫 {lesson['lecturer'] if '!' not in lesson['lecturer'] else '<i>Нет информации</i>'}
<code>{lesson['type']}</code>
"""
return await self.edit_message(
f"""🔰 <b>Расписание на {date_obj.strftime('%d.%m')}, {get_weekday_name(date_obj)}</b>
<i>Информация обновлена {time_now.strftime('%H:%M')}</i>
{lessons_message if lessons_message else f'🌀 <b>В этот день нет занятий</b>{_two_endl}'}🟡 <b>Пара идет</b>
🟢 <b>Пара закончилась</b>""",
reply_markup=get_inline_keyboard([
[
{
'text': f'◀️ {(date_obj - timedelta(days=1)).strftime("%d.%m")}, {get_weekday_name(date_obj - timedelta(days=1))}',
'callback_data': f'timetable_mem_{int((date_obj - timedelta(days=1)).timestamp())}'
},
{
'text': f'Обновить',
'callback_data': f'timetable_mem_{int(date_obj.timestamp())}'
},
{
'text': f'{(date_obj + timedelta(days=1)).strftime("%d.%m")}, {get_weekday_name(date_obj + timedelta(days=1))} ▶️',
'callback_data': f'timetable_mem_{int((date_obj + timedelta(days=1)).timestamp())}'
}
],
[
{
'text': f'⏪ {(date_obj - timedelta(days=7)).strftime("%d.%m")}, {get_weekday_name(date_obj - timedelta(days=7))}',
'callback_data': f'timetable_mem_{int((date_obj - timedelta(days=7)).timestamp())}'
},
{
'text': f'Сегодня',
'callback_data': f'timetable_mem_{int(datetime.now().timestamp())}'
} if datetime.now().strftime('%d.%m.%Y') != date_obj.strftime('%d.%m.%Y') else {},
{
'text': f'{(date_obj + timedelta(days=7)).strftime("%d.%m")}, {get_weekday_name(date_obj + timedelta(days=7))} ⏩',
'callback_data': f'timetable_mem_{int((date_obj + timedelta(days=7)).timestamp())}'
}
],
[{'text': 'На главную ⌘', 'callback_data': 'home'}]
])
)
@classmethod
def from_tid(cls, tid):
user_object = db.users.find_one({
'tid': int(tid)
})
if not user_object:
raise Exception('User not found')
return User(user_object)
| 37.949807
| 143
| 0.52223
|
4a0c977267f002a137154eccd9351924dc41288d
| 97
|
py
|
Python
|
stib_administraciones/personales/admin.py
|
nfheredia/stib-administraciones
|
05dae746d2fecf75c1d50c4ee679c02ddbf7208a
|
[
"BSD-3-Clause"
] | null | null | null |
stib_administraciones/personales/admin.py
|
nfheredia/stib-administraciones
|
05dae746d2fecf75c1d50c4ee679c02ddbf7208a
|
[
"BSD-3-Clause"
] | null | null | null |
stib_administraciones/personales/admin.py
|
nfheredia/stib-administraciones
|
05dae746d2fecf75c1d50c4ee679c02ddbf7208a
|
[
"BSD-3-Clause"
] | 1
|
2020-10-28T15:46:48.000Z
|
2020-10-28T15:46:48.000Z
|
from django.contrib import admin
from models import Personales
admin.site.register(Personales)
| 16.166667
| 32
| 0.835052
|
4a0c97905370d484d687fd13369876008e48a472
| 2,180
|
py
|
Python
|
src/lp_detector/lp_detection.py
|
heroclass728/alpr_egyptian
|
a94e287245595233100b4f3683e68dcc25f8c11b
|
[
"CNRI-Python"
] | null | null | null |
src/lp_detector/lp_detection.py
|
heroclass728/alpr_egyptian
|
a94e287245595233100b4f3683e68dcc25f8c11b
|
[
"CNRI-Python"
] | null | null | null |
src/lp_detector/lp_detection.py
|
heroclass728/alpr_egyptian
|
a94e287245595233100b4f3683e68dcc25f8c11b
|
[
"CNRI-Python"
] | null | null | null |
import os
import time
import cv2
import numpy as np
from settings import LP_MODEL_DIR, LP_CONFIG_DIR, LP_NAMES_DIR
class LPDetector:
def __init__(self):
self.model_path = os.path.join(LP_MODEL_DIR, 'yolov2-tiny-custom_final.weights')
self.config_path = os.path.join(LP_CONFIG_DIR, 'yolov2-tiny-custom.cfg')
self.names_path = os.path.join(LP_NAMES_DIR, 'custom.names')
def detect_lp_frame(self, frame_path):
CONF_THRESH, NMS_THRESH = 0.5, 0.5
# Load the network
net = cv2.dnn.readNetFromDarknet(self.config_path, self.model_path)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
# Get the output layer from YOLO
layers = net.getLayerNames()
output_layers = [layers[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# Read and convert the image to blob and perform forward pass to get the bounding boxes with their confidence
# scores
st_time = time.time()
img = cv2.imread(frame_path)
height, width = img.shape[:2]
blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), swapRB=True, crop=False)
net.setInput(blob)
layer_outputs = net.forward(output_layers)
lp_frames = []
for output in layer_outputs:
for detection in output:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > CONF_THRESH:
center_x, center_y, w, h = (detection[0:4] * np.array([width, height, width, height])).astype('int')
x = int(center_x - w / 2)
y = int(center_y - h / 2)
lp_frame = img[y:y+h, x:x+w]
lp_frames.append(lp_frame)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
end_time = time.time()
time_interval = end_time - st_time
print("time elapsed:", time_interval)
cv2.imshow("image", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
return lp_frames
| 32.537313
| 120
| 0.600459
|
4a0c979659dac99fdadac234683bec9699c15d75
| 118
|
py
|
Python
|
ML_NBD/analogs_search/urls.py
|
danielSoler93/DrugDiscoveryAI
|
74736e4807fefe254dd983afdcd90fa4640fd359
|
[
"MIT"
] | 2
|
2021-06-13T15:11:56.000Z
|
2022-01-09T11:20:10.000Z
|
ML_NBD/analogs_search/urls.py
|
AspirinCode/DrugDiscoveryAI
|
74736e4807fefe254dd983afdcd90fa4640fd359
|
[
"MIT"
] | 2
|
2020-06-06T00:50:29.000Z
|
2021-06-10T22:40:41.000Z
|
ML_NBD/analogs_search/urls.py
|
AspirinCode/DrugDiscoveryAI
|
74736e4807fefe254dd983afdcd90fa4640fd359
|
[
"MIT"
] | 2
|
2020-05-14T03:02:34.000Z
|
2020-05-22T16:36:06.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='analogs-home')
]
| 16.857143
| 45
| 0.661017
|
4a0c9830d6a56e1604c511e4aada9e90caa803a8
| 2,243
|
py
|
Python
|
demo/tasks_app/migrations/0003_create_superuser_and_data.py
|
ociule/django-users-admin
|
817590dc7a6c1e3657f31202d4068fd25002a30a
|
[
"BSD-3-Clause"
] | 1
|
2019-03-30T15:28:36.000Z
|
2019-03-30T15:28:36.000Z
|
demo/tasks_app/migrations/0003_create_superuser_and_data.py
|
ociule/django-users-admin
|
817590dc7a6c1e3657f31202d4068fd25002a30a
|
[
"BSD-3-Clause"
] | null | null | null |
demo/tasks_app/migrations/0003_create_superuser_and_data.py
|
ociule/django-users-admin
|
817590dc7a6c1e3657f31202d4068fd25002a30a
|
[
"BSD-3-Clause"
] | null | null | null |
from django.db import migrations
from django.conf import settings
from django.contrib.auth.admin import User
def create_superuser(apps, schema_editor):
superuser = User()
superuser.is_active = True
superuser.is_superuser = True
superuser.is_staff = True
superuser.username = 'admin'
superuser.email = 'admin@admin.net'
superuser.set_password('djangoadmin')
superuser.save()
def delete_superuser(apps, schema_editor):
User.objects.filter(username="admin").delete()
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tasks_app', '0002_create_users_and_group'),
]
operations = [
migrations.RunPython(create_superuser, delete_superuser),
migrations.RunSQL("INSERT INTO tasks_app_tasklist (user_id, name, created_at) \
SELECT auth_user.id, 'Alice''s Task List', CURRENT_TIMESTAMP FROM auth_user WHERE \
auth_user.username = 'alice_non_staff';", "DELETE FROM tasks_app_tasklist"),
migrations.RunSQL("INSERT INTO tasks_app_tasklist (user_id, name, created_at) \
SELECT auth_user.id, 'Bob''s Task List', CURRENT_TIMESTAMP FROM auth_user WHERE \
auth_user.username = 'bob_non_staff';", "DELETE FROM tasks_app_tasklist"),
migrations.RunSQL("INSERT INTO tasks_app_task (list_id, name, created_at) VALUES (1, 'M1', CURRENT_TIMESTAMP);", "DELETE FROM tasks_app_task"),
migrations.RunSQL("INSERT INTO tasks_app_task (list_id, name, created_at) VALUES (1, 'M2', CURRENT_TIMESTAMP);", "DELETE FROM tasks_app_task"),
migrations.RunSQL("INSERT INTO tasks_app_task (list_id, name, created_at) VALUES (1, 'M3', CURRENT_TIMESTAMP);", "DELETE FROM tasks_app_task"),
migrations.RunSQL("INSERT INTO tasks_app_task (list_id, name, created_at) VALUES (1, 'D1', CURRENT_TIMESTAMP);", "DELETE FROM tasks_app_task"),
migrations.RunSQL("INSERT INTO tasks_app_task (list_id, name, created_at) VALUES (1, 'D2', CURRENT_TIMESTAMP);", "DELETE FROM tasks_app_task"),
migrations.RunSQL("INSERT INTO tasks_app_task (list_id, name, created_at) VALUES (1, 'R1', CURRENT_TIMESTAMP);", "DELETE FROM tasks_app_task"),
]
| 48.76087
| 151
| 0.719572
|
4a0c98ae0065805180bfea75449d728178c902f8
| 1,718
|
py
|
Python
|
objects/leg/foot.py
|
martin-helmich/spacicon
|
284a389fd75bdc8558ddde9d7fd30bb0f50fc6f3
|
[
"MIT"
] | 1
|
2020-10-16T18:49:54.000Z
|
2020-10-16T18:49:54.000Z
|
objects/leg/foot.py
|
martin-helmich/spacicon
|
284a389fd75bdc8558ddde9d7fd30bb0f50fc6f3
|
[
"MIT"
] | 3
|
2021-06-08T19:04:04.000Z
|
2022-03-11T23:19:09.000Z
|
objects/leg/foot.py
|
martin-helmich/spacicon
|
284a389fd75bdc8558ddde9d7fd30bb0f50fc6f3
|
[
"MIT"
] | null | null | null |
from svgwrite import Drawing
from svgwrite.container import Group
class LegWithFoot:
def __init__(self,
leg_length: float,
leg_color: str,
thickness_thigh: float = 50,
thickness_foot: float = 30,
boot_height: float = 20,
foot_length: float = 50,
foot_color: str = "#ffff00"
) -> None:
self.leg_length = leg_length
self.leg_color = leg_color
self.thickness_foot = thickness_foot
self.thickness_thigh = thickness_thigh
self.foot_color = foot_color
self.boot_height = boot_height
self.foot_length = foot_length
def render(self, dwg: Drawing) -> Group:
g = dwg.g()
leg = dwg.path(fill=self.leg_color)
leg.push("M 0 0")
leg.push("L 0 %f" % self.leg_length)
leg.push("l %f 0" % self.thickness_foot)
leg.push("L %f 0" % self.thickness_thigh)
leg.push("Z")
g.add(leg)
boot_start = .7
boot_height = self.boot_height
foot_length = self.foot_length
boot = dwg.path(fill=self.foot_color)
boot.push("M 0 %f" % (self.leg_length * boot_start))
boot.push("L 0 %f" % (self.leg_length + boot_height))
boot.push("l %f 0" % foot_length)
boot.push("a %f %f 0 0 0 %f %f" % (min(boot_height, abs(foot_length - self.thickness_foot)), boot_height, -min(boot_height, foot_length - self.thickness_foot), - boot_height))
boot.push("L %f %f" % (self.thickness_thigh - (self.thickness_thigh - self.thickness_foot) * boot_start + 1, self.leg_length * boot_start))
g.add(boot)
return g
| 35.791667
| 183
| 0.582072
|
4a0c9926d1be49cfd2f7dcb042f286ef6e0b72a4
| 4,485
|
py
|
Python
|
benchmark/startQiskit_Class1490.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_Class1490.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_Class1490.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=5
# total number=57
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=38
prog.cz(input_qubit[1],input_qubit[0]) # number=39
prog.h(input_qubit[0]) # number=40
prog.h(input_qubit[0]) # number=51
prog.cz(input_qubit[1],input_qubit[0]) # number=52
prog.h(input_qubit[0]) # number=53
prog.z(input_qubit[1]) # number=49
prog.cx(input_qubit[1],input_qubit[0]) # number=50
prog.h(input_qubit[0]) # number=32
prog.cz(input_qubit[1],input_qubit[0]) # number=33
prog.h(input_qubit[0]) # number=34
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=54
prog.cz(input_qubit[3],input_qubit[0]) # number=55
prog.h(input_qubit[0]) # number=56
prog.z(input_qubit[3]) # number=42
prog.cx(input_qubit[3],input_qubit[0]) # number=43
prog.cx(input_qubit[1],input_qubit[3]) # number=44
prog.cx(input_qubit[3],input_qubit[2]) # number=45
prog.x(input_qubit[0]) # number=9
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[0],input_qubit[3]) # number=37
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.x(input_qubit[3]) # number=46
prog.y(input_qubit[1]) # number=47
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.x(input_qubit[1]) # number=23
# circuit end
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =7924
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class1490.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 32.035714
| 80
| 0.612486
|
4a0c99fd03c9ecd9abd13b2fda770251f5862c05
| 1,838
|
py
|
Python
|
Intermediate/RandomWords/RandomWords.py
|
arunkgupta/PythonTrainingExercises
|
d260cf71298e34b2a18bd11a76f1764ef28677c7
|
[
"BSD-3-Clause"
] | 150
|
2015-11-27T14:19:15.000Z
|
2019-11-03T18:34:21.000Z
|
Intermediate/RandomWords/RandomWords.py
|
prmohanty/PythonTrainingExercises
|
00a2435649fcf53fdafede2d10b40f08463728fe
|
[
"BSD-3-Clause"
] | 1
|
2015-12-30T11:41:30.000Z
|
2015-12-30T11:41:30.000Z
|
Intermediate/RandomWords/RandomWords.py
|
prmohanty/PythonTrainingExercises
|
00a2435649fcf53fdafede2d10b40f08463728fe
|
[
"BSD-3-Clause"
] | 95
|
2015-12-01T18:44:13.000Z
|
2019-10-28T16:25:08.000Z
|
"""Write a function that randomises letters in words after the first N letters.
It will be called randomWords(words, n) where words is a list of words and n
the number of leading letters of each word to keep in order the remainder are
to be randomly jumbled up.
The code in main will call your function with different values of n to see
how much unscrambled text in each word you need to understand it.
Example:
Words please, <cr> to exit: Numeric is a hedge fund operating out of offices in Boston
0: imeNucr si a deghe dfnu rntpigeao out of ifosfce in otnBso
1: Nmuerci is a hedeg fudn oantrgiep out of ofsfcie in Bontos
2: Nucermi is a heedg fudn opertaign out of ofiscfe in Botson
3: Numrcei is a hedge fund operantgi out of offseic in Boston
4: Numeicr is a hedge fund operingat out of offisec in Bostno
5: Numeric is a hedge fund operaigtn out of offices in Boston
6: Numeric is a hedge fund operatngi out of offices in Boston
7: Numeric is a hedge fund operating out of offices in Boston
8: Numeric is a hedge fund operating out of offices in Boston
So you need about 4 or so letters to understand the sentence.
HINT: The random module is useful, it has a function shuffle(sequence) that
return the sequence randomly shuffled.
Created on 26 Feb 2015
@author: paulross
"""
import random
def randomWords(words, n):
pass
def main():
# Loop round until the user just gives a <cr> i.e. enter response.
while True:
line = raw_input('Words please, <cr> to exit: ')
words = line.split()
if len(words) == 0:
# User wants to quit
break
max_len = max([len(w) for w in words])
for n in range(max_len):
print '%4d: %s' % (n, ' '.join(randomWords(words, n)))
if __name__ == '__main__':
main()
print 'Bye, bye.'
| 36.039216
| 86
| 0.702394
|
4a0c9a8fd6fad6e957743a5f25c27d243daed396
| 952
|
py
|
Python
|
mundo3/utilidadecv/dados/dados.py
|
KayanOkagawa/Cursoemvideo-Python3-Exercicios
|
10c8386102cc0928f8f090070eb3218deb3d60fe
|
[
"MIT"
] | null | null | null |
mundo3/utilidadecv/dados/dados.py
|
KayanOkagawa/Cursoemvideo-Python3-Exercicios
|
10c8386102cc0928f8f090070eb3218deb3d60fe
|
[
"MIT"
] | null | null | null |
mundo3/utilidadecv/dados/dados.py
|
KayanOkagawa/Cursoemvideo-Python3-Exercicios
|
10c8386102cc0928f8f090070eb3218deb3d60fe
|
[
"MIT"
] | null | null | null |
def leia_dinheiro(msg):
while True:
numero = input(f'{msg} ').strip()
if numero.isnumeric():
return float(numero)
elif numero.isalnum() or numero.isalpha():
print('\033[01;31mERRO! Digite um valor númerico.\033[m')
else:
if numero.find(',') >= 0 or numero.find('.') >= 0:
if numero.find(',') >= 0:
numero.replace(',', '.')
pos = numero.find('.')
if numero[0:pos].isnumeric() and numero[pos+1:].isnumeric():
num_int = int(numero[0:pos])
if len(numero[pos+1:]) == 2:
num_float = float(numero[pos+1:]) / 100
else:
num_float = float(numero[pos+1:] + '0') / 100
return num_int + num_float
else:
print('\033[01;31mERRO! Digite um valor númerico.\033[m')
| 43.272727
| 77
| 0.452731
|
4a0c9ab58e88c736c3e70d42b8a69a7c8af02300
| 20
|
py
|
Python
|
data/studio21_generated/introductory/3149/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
data/studio21_generated/introductory/3149/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
data/studio21_generated/introductory/3149/starter_code.py
|
vijaykumawat256/Prompt-Summarization
|
614f5911e2acd2933440d909de2b4f86653dc214
|
[
"Apache-2.0"
] | null | null | null |
def roof_fix(f,r):
| 10
| 18
| 0.65
|
4a0c9b97c4df152fbfbec33b0eabdd3f7322f6db
| 698
|
py
|
Python
|
dpk_annotator/utils/__init__.py
|
jgraving/deepposekit-annotator
|
6115e47604f604e34d00189beacb54e4ed5457f1
|
[
"Apache-2.0"
] | 3
|
2019-05-22T16:35:21.000Z
|
2019-05-24T08:20:32.000Z
|
dpk_annotator/utils/__init__.py
|
jgraving/deepposekit-annotator
|
6115e47604f604e34d00189beacb54e4ed5457f1
|
[
"Apache-2.0"
] | 1
|
2019-10-22T01:23:48.000Z
|
2019-10-22T11:30:46.000Z
|
analysis/chrelli_annotator/dpk_annotator/utils/__init__.py
|
chrelli/3DDD_social_mouse_tracker
|
291d2ed90029628dd65db0ce3e8972b721159a15
|
[
"Apache-2.0"
] | 2
|
2021-08-17T06:50:51.000Z
|
2021-09-26T07:22:14.000Z
|
# -*- coding: utf-8 -*-
"""
Copyright 2018-2019 Jacob M. Graving <jgraving@gmail.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from . import image
from . import hotkeys
| 31.727273
| 72
| 0.772206
|
4a0c9b9c4f3a562ee6cef2bbe9f09eb1ca503278
| 3,183
|
py
|
Python
|
core/ip.py
|
richaSinha120/phython
|
12dd5a2abd8279d98416f3f5dcdd76e85ea9603e
|
[
"Apache-2.0"
] | 1
|
2021-08-29T05:34:02.000Z
|
2021-08-29T05:34:02.000Z
|
core/ip.py
|
zer0x0/Nettacker
|
52c5e39a3fd0ab9842c7cf1a8be2de9f560c5d53
|
[
"Apache-2.0"
] | 104
|
2018-04-30T03:59:58.000Z
|
2022-03-31T02:31:34.000Z
|
core/ip.py
|
pradeepjairamani/OWASP-Nettacker
|
988bd960d31e1982d422f6e58590b0f34d7e5215
|
[
"Apache-2.0"
] | 1
|
2021-07-23T23:38:19.000Z
|
2021-07-23T23:38:19.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import netaddr
import time
import sys
import requests
from core.alert import *
from core.compatible import version
from netaddr import iprange_to_cidrs
from netaddr import IPNetwork
from core.log import __log_into_file
def getIPRange(IP):
"""
get IPv4 range from RIPE online database
Args:
IP: IP address
Returns:
IP Range
"""
n = 0
while 1:
try:
data = requests.get(
'http://rest.db.ripe.net/search.json?query-string={0}&flags=no-filtering'.format(IP)).content
for line in data.rsplit('\n'):
line = line.rsplit('"')
for R in line:
if R.count('.') == 6 and R.count('-') == 1 and R.count(' ') == 2:
return R.replace(' ', '')
except:
n += 1
if n == 3:
return IP
time.sleep(0.1)
return data
def isIP(IP):
"""
to check a value if its IPv4 address
Args:
IP: the value to check if its IPv4
Returns:
True if it's IPv4 otherwise False
"""
IP = str(IP)
ip_flag = netaddr.valid_ipv4(IP)
return ip_flag
def IPRange(Range, range_temp, language):
"""
IP range string to IPNetwork type
Args:
Range: IP range string
range_temp: range_temp filename
language: language
Returns:
an array of IP range in IPNetwork type
"""
myranges_now = open(range_temp).read().rsplit()
if Range not in myranges_now:
__log_into_file(range_temp, 'a', Range + '\n', language)
if len(Range.rsplit('.')) == 7 and '-' in Range and '/' not in Range:
if len(Range.rsplit('-')) == 2:
start_ip, stop_ip = Range.rsplit('-')
if isIP(start_ip) and isIP(stop_ip):
return iprange_to_cidrs(start_ip, stop_ip)
else:
return []
else:
return []
elif len(Range.rsplit('.')) == 4 and '-' not in Range and '/' in Range:
return IPNetwork(Range)
else:
return []
else:
warn(messages(language, "skip_duplicate_target"))
return []
def _generate_IPRange(Range):
"""
IP range to CIDR and IPNetwork type
Args:
Range: IP range
Returns:
an array with CIDRs
"""
if len(Range.rsplit('.')) == 7 and '-' in Range and '/' not in Range:
if len(Range.rsplit('-')) == 2:
start_ip, stop_ip = Range.rsplit('-')
if isIP(start_ip) and isIP(stop_ip):
return iprange_to_cidrs(start_ip, stop_ip)
else:
return []
else:
return []
elif len(Range.rsplit('.')) == 4 and '-' not in Range and '/' in Range:
return IPNetwork(Range)
else:
return []
def isIP6(IP):
"""
to check a value if its IPv6 address
Args:
IP: the value to check if its IPv6
Returns:
True if it's IPv6 otherwise False
"""
IP = str(IP)
ip_flag = netaddr.valid_ipv6(IP)
return ip_flag
| 24.867188
| 109
| 0.534087
|
4a0c9e5830f5b25ecc04f33f46beaf5d79f62e26
| 709
|
py
|
Python
|
interview/urls.py
|
JorgeScp/performance
|
b7de0498fdcf346ad0b44558ff62f4bf9cc1a986
|
[
"MIT"
] | 1
|
2022-01-05T19:52:35.000Z
|
2022-01-05T19:52:35.000Z
|
interview/urls.py
|
JorgeScp/performance
|
b7de0498fdcf346ad0b44558ff62f4bf9cc1a986
|
[
"MIT"
] | null | null | null |
interview/urls.py
|
JorgeScp/performance
|
b7de0498fdcf346ad0b44558ff62f4bf9cc1a986
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from . import views
urlpatterns = [
#path('int_main_form/<int:pk>/', views.int_form,name='int_insert'), # get and post req. for insert operation
path('int_add/<int:id>/<int:pk>/', views.int_form,name='int_insert'), # get and post req. for update operation
path('int_update/<int:id>/<int:pk>/', views.int_form,name='int_update'), # get and post req. for update operation
path('int_delete/<int:id>/',views.int_delete,name='int_delete'),
path('int_list/',views.int_list,name='int_list'), # get req. to retrieve and display all records
path('export_pdf/<int:id>/',views.export_pdf,name='export_pdf'), # get req. to retrieve and display all records
]
| 59.083333
| 117
| 0.70945
|
4a0c9eec4c35e50df0ce7c06a1b830ac36985a21
| 944
|
py
|
Python
|
nemo/collections/nlp/modules/common/transformer/__init__.py
|
vadam5/NeMo
|
3c5db09539293c3c19a6bb7437011f91261119af
|
[
"Apache-2.0"
] | 10
|
2021-04-01T05:55:18.000Z
|
2022-02-15T01:41:41.000Z
|
nemo/collections/nlp/modules/common/transformer/__init__.py
|
vadam5/NeMo
|
3c5db09539293c3c19a6bb7437011f91261119af
|
[
"Apache-2.0"
] | null | null | null |
nemo/collections/nlp/modules/common/transformer/__init__.py
|
vadam5/NeMo
|
3c5db09539293c3c19a6bb7437011f91261119af
|
[
"Apache-2.0"
] | 12
|
2021-06-20T08:56:10.000Z
|
2022-03-16T19:07:10.000Z
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.modules.common.transformer.transformer_decoders import *
from nemo.collections.nlp.modules.common.transformer.transformer_encoders import *
from nemo.collections.nlp.modules.common.transformer.transformer_generators import *
from nemo.collections.nlp.modules.common.transformer.transformer_modules import *
| 49.684211
| 84
| 0.797669
|
4a0c9f0799c0d3fbb2b44cec8bc81ebc2dcbed7c
| 4,171
|
py
|
Python
|
ultracart/models/email_stat_postcard_summary_request.py
|
UltraCart/rest_api_v2_sdk_python
|
d734ea13fabc7a57872ff68bac06861edb8fd882
|
[
"Apache-2.0"
] | 1
|
2018-03-15T16:56:23.000Z
|
2018-03-15T16:56:23.000Z
|
ultracart/models/email_stat_postcard_summary_request.py
|
UltraCart/rest_api_v2_sdk_python
|
d734ea13fabc7a57872ff68bac06861edb8fd882
|
[
"Apache-2.0"
] | null | null | null |
ultracart/models/email_stat_postcard_summary_request.py
|
UltraCart/rest_api_v2_sdk_python
|
d734ea13fabc7a57872ff68bac06861edb8fd882
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class EmailStatPostcardSummaryRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'commseq_postcard_uuids': 'list[str]',
'days': 'int'
}
attribute_map = {
'commseq_postcard_uuids': 'commseq_postcard_uuids',
'days': 'days'
}
def __init__(self, commseq_postcard_uuids=None, days=None): # noqa: E501
"""EmailStatPostcardSummaryRequest - a model defined in Swagger""" # noqa: E501
self._commseq_postcard_uuids = None
self._days = None
self.discriminator = None
if commseq_postcard_uuids is not None:
self.commseq_postcard_uuids = commseq_postcard_uuids
if days is not None:
self.days = days
@property
def commseq_postcard_uuids(self):
"""Gets the commseq_postcard_uuids of this EmailStatPostcardSummaryRequest. # noqa: E501
:return: The commseq_postcard_uuids of this EmailStatPostcardSummaryRequest. # noqa: E501
:rtype: list[str]
"""
return self._commseq_postcard_uuids
@commseq_postcard_uuids.setter
def commseq_postcard_uuids(self, commseq_postcard_uuids):
"""Sets the commseq_postcard_uuids of this EmailStatPostcardSummaryRequest.
:param commseq_postcard_uuids: The commseq_postcard_uuids of this EmailStatPostcardSummaryRequest. # noqa: E501
:type: list[str]
"""
self._commseq_postcard_uuids = commseq_postcard_uuids
@property
def days(self):
"""Gets the days of this EmailStatPostcardSummaryRequest. # noqa: E501
:return: The days of this EmailStatPostcardSummaryRequest. # noqa: E501
:rtype: int
"""
return self._days
@days.setter
def days(self, days):
"""Sets the days of this EmailStatPostcardSummaryRequest.
:param days: The days of this EmailStatPostcardSummaryRequest. # noqa: E501
:type: int
"""
self._days = days
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EmailStatPostcardSummaryRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EmailStatPostcardSummaryRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.373239
| 120
| 0.603452
|
4a0ca04a85b3cfb28d49976a8c09d0f87d013abb
| 3,733
|
py
|
Python
|
deepdefacer/defacer_utils.py
|
chrisfilo/DeepDeface
|
c362eb6a0b563557ed726838ae64c4a3bd454a71
|
[
"MIT"
] | null | null | null |
deepdefacer/defacer_utils.py
|
chrisfilo/DeepDeface
|
c362eb6a0b563557ed726838ae64c4a3bd454a71
|
[
"MIT"
] | null | null | null |
deepdefacer/defacer_utils.py
|
chrisfilo/DeepDeface
|
c362eb6a0b563557ed726838ae64c4a3bd454a71
|
[
"MIT"
] | null | null | null |
import numpy as np
import nibabel as nib
import SimpleITK as sitk
import os
from termcolor import colored
try:
from keras import backend as K
from keras.models import *
from tensorflow.python.client import device_lib
except Exception as e:
print(e)
print('-' * 100)
print(colored("""ERROR: Failed to initialize tensorflow and Keras.
If you are using deepdefacer[tf-gpu], please make
sure that your CUDA and NVIDIA drivers are properly
installed.""", 'red'))
print('-' * 100)
sys.exit(1)
def dice_coefficient(y_true, y_pred, smooth=1.):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def resample_image(nifti_img, specified_shape, mask=False):
if mask:
revised_nifti = nib.Nifti1Image(nifti_img, np.eye(4))
nib.save(revised_nifti, 'mask_intermediate.nii.gz')
nifti_img = 'mask_intermediate.nii.gz'
img = sitk.ReadImage(nifti_img)
img_data = sitk.GetArrayFromImage(img)
if len(img_data.shape) != 3:
img_data = np.squeeze(img_data)
revised_nifti = nib.Nifti1Image(img_data, nib.load(nifti_img).affine)
nib.save(revised_nifti, nifti_img)
img = sitk.ReadImage(nifti_img)
dimension = img.GetDimension()
reference_physical_size = np.zeros(dimension)
reference_physical_size[:] = [(sz-1)*spc if sz*spc>mx else mx for sz,spc,mx in
zip(img.GetSize(), img.GetSpacing(),
reference_physical_size)]
reference_origin = np.zeros(dimension)
reference_direction = np.identity(dimension).flatten()
reference_size = specified_shape
reference_spacing = [phys_sz/(sz-1) for sz,phys_sz in
zip(reference_size, reference_physical_size)]
reference_image = sitk.Image(reference_size, img.GetPixelIDValue())
reference_image.SetOrigin(reference_origin)
reference_image.SetSpacing(reference_spacing)
reference_image.SetDirection(reference_direction)
reference_center = np.array(reference_image.TransformContinuousIndexToPhysicalPoint(np.array(reference_image.GetSize())/2.0))
transform = sitk.AffineTransform(dimension)
transform.SetMatrix(img.GetDirection())
transform.SetTranslation(np.array(img.GetOrigin()) - reference_origin)
centering_transform = sitk.TranslationTransform(dimension)
img_center = np.array(img.TransformContinuousIndexToPhysicalPoint(np.array(img.GetSize())/2.0))
centering_transform.SetOffset(np.array(transform.GetInverse().TransformPoint(img_center) - reference_center))
centered_transform = sitk.Transform(transform)
centered_transform.AddTransform(centering_transform)
resampled_img_data = sitk.Resample(img, reference_image, centered_transform, sitk.sitkLinear, 0.0)
resampled_img_data = np.swapaxes(sitk.GetArrayFromImage(resampled_img_data), 0, -1)
if mask:
os.remove('mask_intermediate.nii.gz')
return resampled_img_data
def pre_process_image(img_file):
optimal_dims = [160, 160, 160]
img_data = resample_image(img_file, optimal_dims)
resamp_img = img_data.astype(np.float32)
img_data = np.expand_dims(resamp_img, axis=0)
img_data = np.expand_dims(img_data, axis=0)
min_val = np.min(img_data)
max_val = np.max(img_data)
norm_img_data = (img_data - min_val) / (max_val - min_val + 1e-7)
return norm_img_data, resamp_img
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
| 34.564815
| 129
| 0.710956
|
4a0ca085cdbf984239ab18d4fd513c5d37d143a1
| 20,403
|
py
|
Python
|
src/test/scenarios/managed-network/output/src/managed-network/azext_managed_network/vendored_sdks/managed-network/operations/_managed_network_peering_policy_operations.py
|
changlong-liu/autorest.az
|
d6a85324b2849f65ccfef872d0ecb44eb28e16a0
|
[
"MIT"
] | null | null | null |
src/test/scenarios/managed-network/output/src/managed-network/azext_managed_network/vendored_sdks/managed-network/operations/_managed_network_peering_policy_operations.py
|
changlong-liu/autorest.az
|
d6a85324b2849f65ccfef872d0ecb44eb28e16a0
|
[
"MIT"
] | null | null | null |
src/test/scenarios/managed-network/output/src/managed-network/azext_managed_network/vendored_sdks/managed-network/operations/_managed_network_peering_policy_operations.py
|
changlong-liu/autorest.az
|
d6a85324b2849f65ccfef872d0ecb44eb28e16a0
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ManagedNetworkPeeringPolicyOperations(object):
"""ManagedNetworkPeeringPolicyOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~managed_network_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
managed_network_name, # type: str
managed_network_peering_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.ManagedNetworkPeeringPolicy"
"""The Get ManagedNetworkPeeringPolicies operation gets a Managed Network Peering Policy resource, specified by the resource group, Managed Network name, and peering policy name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param managed_network_name: The name of the Managed Network.
:type managed_network_name: str
:param managed_network_peering_policy_name: The name of the Managed Network Peering Policy.
:type managed_network_peering_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedNetworkPeeringPolicy or the result of cls(response)
:rtype: ~managed_network_management_client.models.ManagedNetworkPeeringPolicy
:raises: ~managed_network_management_client.models.ErrorResponseException:
"""
cls = kwargs.pop('cls', None ) # type: ClsType["models.ManagedNetworkPeeringPolicy"]
error_map = kwargs.pop('error_map', {})
api_version = "2019-06-01-preview"
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedNetworkName': self._serialize.url("managed_network_name", managed_network_name, 'str'),
'managedNetworkPeeringPolicyName': self._serialize.url("managed_network_peering_policy_name", managed_network_peering_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.ErrorResponseException.from_response(response, self._deserialize)
deserialized = self._deserialize('ManagedNetworkPeeringPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkPeeringPolicies/{managedNetworkPeeringPolicyName}'}
def _create_or_update_initial(
self,
resource_group_name, # type: str
managed_network_name, # type: str
managed_network_peering_policy_name, # type: str
location=None, # type: Optional[str]
properties=None, # type: Optional["models.ManagedNetworkPeeringPolicyProperties"]
**kwargs # type: Any
):
# type: (...) -> "models.ManagedNetworkPeeringPolicy"
cls = kwargs.pop('cls', None ) # type: ClsType["models.ManagedNetworkPeeringPolicy"]
error_map = kwargs.pop('error_map', {})
managed_network_policy = models.ManagedNetworkPeeringPolicy(location=location, properties=properties)
api_version = "2019-06-01-preview"
# Construct URL
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedNetworkName': self._serialize.url("managed_network_name", managed_network_name, 'str'),
'managedNetworkPeeringPolicyName': self._serialize.url("managed_network_peering_policy_name", managed_network_peering_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json'
# Construct body
body_content = self._serialize.body(managed_network_policy, 'ManagedNetworkPeeringPolicy')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.ErrorResponseException.from_response(response, self._deserialize)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ManagedNetworkPeeringPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagedNetworkPeeringPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkPeeringPolicies/{managedNetworkPeeringPolicyName}'}
def begin_create_or_update(
self,
resource_group_name, # type: str
managed_network_name, # type: str
managed_network_peering_policy_name, # type: str
location=None, # type: Optional[str]
properties=None, # type: Optional["models.ManagedNetworkPeeringPolicyProperties"]
**kwargs # type: Any
):
# type: (...) -> "models.ManagedNetworkPeeringPolicy"
"""The Put ManagedNetworkPeeringPolicies operation creates/updates a new Managed Network Peering Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param managed_network_name: The name of the Managed Network.
:type managed_network_name: str
:param managed_network_peering_policy_name: The name of the Managed Network Peering Policy.
:type managed_network_peering_policy_name: str
:param location: The geo-location where the resource lives.
:type location: str
:param properties: Properties of a Managed Network Peering Policy.
:type properties: ~managed_network_management_client.models.ManagedNetworkPeeringPolicyProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:return: An instance of LROPoller that returns ManagedNetworkPeeringPolicy
:rtype: ~azure.core.polling.LROPoller[~managed_network_management_client.models.ManagedNetworkPeeringPolicy]
:raises ~managed_network_management_client.models.ErrorResponseException:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None ) # type: ClsType["models.ManagedNetworkPeeringPolicy"]
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
managed_network_name=managed_network_name,
managed_network_peering_policy_name=managed_network_peering_policy_name,
location=location,
properties=properties,
cls=lambda x,y,z: x,
**kwargs
)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedNetworkPeeringPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
lro_delay = kwargs.get(
'polling_interval',
self._config.polling_interval
)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkPeeringPolicies/{managedNetworkPeeringPolicyName}'}
def _delete_initial(
self,
resource_group_name, # type: str
managed_network_name, # type: str
managed_network_peering_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None ) # type: ClsType[None]
error_map = kwargs.pop('error_map', {})
api_version = "2019-06-01-preview"
# Construct URL
url = self._delete_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedNetworkName': self._serialize.url("managed_network_name", managed_network_name, 'str'),
'managedNetworkPeeringPolicyName': self._serialize.url("managed_network_peering_policy_name", managed_network_peering_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.ErrorResponseException.from_response(response, self._deserialize)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkPeeringPolicies/{managedNetworkPeeringPolicyName}'}
def begin_delete(
self,
resource_group_name, # type: str
managed_network_name, # type: str
managed_network_peering_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""The Delete ManagedNetworkPeeringPolicies operation deletes a Managed Network Peering Policy, specified by the resource group, Managed Network name, and peering policy name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param managed_network_name: The name of the Managed Network.
:type managed_network_name: str
:param managed_network_peering_policy_name: The name of the Managed Network Peering Policy.
:type managed_network_peering_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:return: An instance of LROPoller that returns None
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~managed_network_management_client.models.ErrorResponseException:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None ) # type: ClsType[None]
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
managed_network_name=managed_network_name,
managed_network_peering_policy_name=managed_network_peering_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
lro_delay = kwargs.get(
'polling_interval',
self._config.polling_interval
)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkPeeringPolicies/{managedNetworkPeeringPolicyName}'}
def list_by_managed_network(
self,
resource_group_name, # type: str
managed_network_name, # type: str
top=None, # type: Optional[int]
skiptoken=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.ManagedNetworkPeeringPolicyListResult"
"""The ListByManagedNetwork PeeringPolicies operation retrieves all the Managed Network Peering Policies in a specified Managed Network, in a paginated format.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param managed_network_name: The name of the Managed Network.
:type managed_network_name: str
:param top: May be used to limit the number of results in a page for list queries.
:type top: int
:param skiptoken: Skiptoken is only used if a previous operation returned a partial result. If
a previous response contains a nextLink element, the value of the nextLink element will include
a skiptoken parameter that specifies a starting point to use for subsequent calls.
:type skiptoken: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedNetworkPeeringPolicyListResult or the result of cls(response)
:rtype: ~managed_network_management_client.models.ManagedNetworkPeeringPolicyListResult
:raises: ~managed_network_management_client.models.ErrorResponseException:
"""
cls = kwargs.pop('cls', None ) # type: ClsType["models.ManagedNetworkPeeringPolicyListResult"]
error_map = kwargs.pop('error_map', {})
api_version = "2019-06-01-preview"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_managed_network.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'managedNetworkName': self._serialize.url("managed_network_name", managed_network_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
else:
url = next_link
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=20, minimum=1)
if skiptoken is not None:
query_parameters['$skiptoken'] = self._serialize.query("skiptoken", skiptoken, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedNetworkPeeringPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise models.ErrorResponseException.from_response(response, self._deserialize)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_managed_network.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetwork/managedNetworks/{managedNetworkName}/managedNetworkPeeringPolicies'}
| 51.263819
| 254
| 0.6923
|
4a0ca133b655f6451272eed072620dfdf9b651dc
| 400
|
py
|
Python
|
17/9.py
|
deadnotxaa/homework
|
c56b1298f0f3198ff1be919e369256c67f54915f
|
[
"MIT"
] | null | null | null |
17/9.py
|
deadnotxaa/homework
|
c56b1298f0f3198ff1be919e369256c67f54915f
|
[
"MIT"
] | null | null | null |
17/9.py
|
deadnotxaa/homework
|
c56b1298f0f3198ff1be919e369256c67f54915f
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
path = input("Enter a path to file: ")
with open(path, "r", encoding="utf-8") as f:
a = [int(i) for i in f]
count = 0
max_sum = 0
for i in range(len(a) - 1):
for j in range(i + 1, len(a)):
if (a[i] * a[j]) % 26 == 0:
count += 1
max_sum = max(max_sum, a[i] + a[j])
print(count, max_sum)
| 25
| 51
| 0.4575
|
4a0ca23f7407aabc1e595046ea13e0ca4173caf9
| 3,176
|
py
|
Python
|
flask/main.py
|
RengeMiyauchi/vrchat-time-shaders
|
486f0a7064a6689bcfec79938ff2aa5ffec8a0f8
|
[
"CC0-1.0"
] | null | null | null |
flask/main.py
|
RengeMiyauchi/vrchat-time-shaders
|
486f0a7064a6689bcfec79938ff2aa5ffec8a0f8
|
[
"CC0-1.0"
] | null | null | null |
flask/main.py
|
RengeMiyauchi/vrchat-time-shaders
|
486f0a7064a6689bcfec79938ff2aa5ffec8a0f8
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
import io
import requests
from pendulum import timezone
from datetime import datetime
from timezonefinder import TimezoneFinder
from PIL import Image, ImageDraw
from flask_caching import Cache
from werkzeug.wsgi import FileWrapper
from flask import Flask, Response, request, redirect
config = {
'CACHE_TYPE': 'simple',
"CACHE_DEFAULT_TIMEOUT": 60*60*24*7
}
tf = TimezoneFinder()
app = Flask(__name__)
app.config.from_mapping(config)
cache = Cache(app)
@app.route('/')
def index():
return ""
@app.route('/vrctime_test')
def vrc_time_test():
ip = request.headers['x-appengine-user-ip']
#ip = request.headers['X-Real-IP']
ctime = get_current_time(ip)
readable = ctime.strftime("%m/%d/%Y, %H:%M:%S")
return "ip: {0}, time: {1}".format(ip, readable)
@app.route('/vrctime')
def vrc_time():
ip = request.headers['x-appengine-user-ip']
#ip = request.headers['X-Real-IP']
ctime = get_current_time(ip)
img = generate_image(ctime)
f = FileWrapper(img)
return Response(f, mimetype="image/PNG", direct_passthrough=True)
@cache.memoize(timeout=60*60*24*7)
def get_geo_info(ip):
URL = "http://ip-api.com/json/{0}?fields=lat,lon".format(ip)
result = requests.get(url = URL).json()
if "lat" not in result:
raise Exception("don't cache")
return result
def get_current_time(ip):
try:
location = get_geo_info(ip)
except:
ctime = datetime.now().astimezone(timezone("Asia/Tokyo"))
else:
timezone_str = tf.timezone_at(lng=location["lon"], lat=location["lat"])
tz = timezone(timezone_str)
now = datetime.now()
ctime = now.astimezone(tz)
return ctime
def generate_image(now):
CELL = 8
im = Image.new("RGB", (CELL*8, CELL*8), (0,0,0))
dr = ImageDraw.Draw(im)
def drawCell(x, y, v):
x0 = x*CELL
y0 = y*CELL
x1 = (x+1)*CELL
y1 = (y+1)*CELL
r = 255 if ((v&(1<<0)) != 0) else 0
g = 255 if ((v&(1<<1)) != 0) else 0
b = 255 if ((v&(1<<2)) != 0) else 0
dr.rectangle([x0, y0, x1, y1], fill=(r, g, b))
year = now.year-1900
month = now.month-1
day = now.day
hour = now.hour
minute = now.minute
second = now.second
ms = int(now.microsecond/1000*64/1000)
weekday = now.isoweekday()%7
moonAge = (((now.year-2009)%19)*11+(now.month+1)+(now.day+1)) % 30
drawCell(0, 0, hour&0b111)
drawCell(1, 0, hour>>3)
drawCell(2, 0, minute&0b111)
drawCell(3, 0, minute>>3)
drawCell(4, 0, second&0b111)
drawCell(5, 0, second>>3)
drawCell(6, 0, ms&0b111)
drawCell(7, 0, ms>>3)
drawCell(0, 1, year&0b111)
drawCell(1, 1, (year>>3)&0b111)
drawCell(2, 1, (year>>6)&0b111)
drawCell(3, 1, month&0b111)
drawCell(4, 1, month>>3)
drawCell(5, 1, day&0b111)
drawCell(6, 1, day>>3)
drawCell(7, 1, weekday)
drawCell(0, 2, moonAge&0b111)
drawCell(1, 2, moonAge>>3)
file_object = io.BytesIO()
im.save(file_object, "PNG")
file_object.seek(0)
return file_object
if __name__ == "__main__":
app.run(host='127.0.0.1', port=8080, debug=True)
| 25.408
| 79
| 0.61335
|
4a0ca29ac1c621537787e633c7822d463cb218b0
| 10,416
|
py
|
Python
|
pytorch/function/evaluate_s3dis_dist.py
|
paulwong16/CloserLook3D
|
71a7f519e55354c96c579adc7322a26fe7779ce7
|
[
"MIT"
] | 215
|
2020-07-03T01:39:57.000Z
|
2022-03-21T09:05:55.000Z
|
pytorch/function/evaluate_s3dis_dist.py
|
eglrp/CloserLook3D
|
f640be8e7ec9fb99a563034616b7ef413f5bf58e
|
[
"MIT"
] | 35
|
2020-07-15T04:12:26.000Z
|
2022-01-15T13:38:36.000Z
|
pytorch/function/evaluate_s3dis_dist.py
|
eglrp/CloserLook3D
|
f640be8e7ec9fb99a563034616b7ef413f5bf58e
|
[
"MIT"
] | 39
|
2020-07-05T13:53:00.000Z
|
2022-03-14T01:53:50.000Z
|
"""
Distributed evaluating script for scene segmentation with S3DIS dataset
"""
import argparse
import os
import sys
import time
import json
import random
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
import torch
import torch.nn as nn
from torchvision import transforms
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
from torch.nn.parallel import DistributedDataParallel
import datasets.data_utils as d_utils
from models import build_scene_segmentation
from datasets import S3DISSeg
from utils.util import AverageMeter, s3dis_metrics, sub_s3dis_metrics
from utils.logger import setup_logger
from utils.config import config, update_config
def parse_option():
parser = argparse.ArgumentParser('S3DIS scene-segmentation evaluating')
parser.add_argument('--cfg', type=str, required=True, help='config file')
parser.add_argument('--load_path', required=True, type=str, metavar='PATH',
help='path to latest checkpoint')
parser.add_argument('--log_dir', type=str, default='log_eval', help='log dir [default: log_eval]')
parser.add_argument('--data_root', type=str, default='data', help='root director of dataset')
parser.add_argument('--num_workers', type=int, default=4, help='num of workers to use')
parser.add_argument('--batch_size', type=int, help='batch_size')
parser.add_argument('--num_points', type=int, help='num_points')
parser.add_argument('--num_steps', type=int, help='num_steps')
parser.add_argument("--local_rank", type=int, help='local rank for DistributedDataParallel')
parser.add_argument("--rng_seed", type=int, default=0, help='manual seed')
args, unparsed = parser.parse_known_args()
update_config(args.cfg)
config.data_root = args.data_root
config.num_workers = args.num_workers
config.load_path = args.load_path
config.rng_seed = args.rng_seed
config.local_rank = args.local_rank
ddir_name = args.cfg.split('.')[-2].split('/')[-1]
config.log_dir = os.path.join(args.log_dir, 's3dis', f'{ddir_name}_{int(time.time())}')
if args.batch_size:
config.batch_size = args.batch_size
if args.num_points:
config.num_points = args.num_points
if args.num_steps:
config.num_steps = args.num_steps
print(args)
print(config)
torch.manual_seed(args.rng_seed)
torch.cuda.manual_seed_all(args.rng_seed)
random.seed(args.rng_seed)
np.random.seed(args.rng_seed)
return args, config
def get_loader(config):
test_transforms = transforms.Compose([
d_utils.PointcloudToTensor()
])
val_dataset = S3DISSeg(input_features_dim=config.input_features_dim,
subsampling_parameter=config.sampleDl, color_drop=config.color_drop,
in_radius=config.in_radius, num_points=config.num_points,
num_steps=config.num_steps, num_epochs=20,
data_root=config.data_root, transforms=test_transforms,
split='val')
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=config.batch_size,
shuffle=False,
num_workers=config.num_workers,
pin_memory=True,
sampler=val_sampler,
drop_last=False)
return val_loader
def load_checkpoint(config, model):
logger.info("=> loading checkpoint '{}'".format(config.load_path))
checkpoint = torch.load(config.load_path, map_location='cpu')
config.start_epoch = checkpoint['epoch'] + 1
model.load_state_dict(checkpoint['model'])
logger.info("=> loaded successfully '{}' (epoch {})".format(config.load_path, checkpoint['epoch']))
del checkpoint
torch.cuda.empty_cache()
def main(config):
val_loader = get_loader(config)
n_data = len(val_loader.dataset)
logger.info(f"length of validation dataset: {n_data}")
model, criterion = build_scene_segmentation(config)
model.cuda()
criterion.cuda()
model = DistributedDataParallel(model, device_ids=[config.local_rank], broadcast_buffers=False)
# optionally resume from a checkpoint
if config.load_path:
assert os.path.isfile(config.load_path)
load_checkpoint(config, model)
logger.info("==> checking loaded ckpt")
validate('resume', val_loader, model, criterion, config, num_votes=20)
validate('Last', val_loader, model, criterion, config, num_votes=20)
def validate(epoch, test_loader, model, criterion, config, num_votes=10):
vote_logits_sum = [np.zeros((config.num_classes, l.shape[0]), dtype=np.float32) for l in
test_loader.dataset.sub_clouds_points_labels]
vote_logits = [np.zeros((config.num_classes, l.shape[0]), dtype=np.float32) for l in
test_loader.dataset.sub_clouds_points_labels]
vote_counts = [np.zeros((1, l.shape[0]), dtype=np.float32) + 1e-6 for l in
test_loader.dataset.sub_clouds_points_labels]
validation_proj = test_loader.dataset.projections
validation_labels = test_loader.dataset.clouds_points_labels
val_proportions = np.zeros(config.num_classes, dtype=np.float32)
for label_value in range(config.num_classes):
val_proportions[label_value] = np.sum(
[np.sum(labels == label_value) for labels in test_loader.dataset.clouds_points_labels])
batch_time = AverageMeter()
losses = AverageMeter()
model.eval()
with torch.no_grad():
end = time.time()
RT = d_utils.BatchPointcloudRandomRotate(x_range=config.x_angle_range, y_range=config.y_angle_range,
z_range=config.z_angle_range)
TS = d_utils.BatchPointcloudScaleAndJitter(scale_low=config.scale_low, scale_high=config.scale_high,
std=config.noise_std, clip=config.noise_clip,
augment_symmetries=config.augment_symmetries)
for v in range(num_votes):
test_loader.dataset.epoch = v
for idx, (points, mask, features, points_labels, cloud_label, input_inds) in enumerate(test_loader):
# augment for voting
if v > 0:
points = RT(points)
points = TS(points)
if config.input_features_dim <= 5:
pass
elif config.input_features_dim == 6:
color = features[:, :3, :]
features = torch.cat([color, points.transpose(1, 2).contiguous()], 1)
elif config.input_features_dim == 7:
color_h = features[:, :4, :]
features = torch.cat([color_h, points.transpose(1, 2).contiguous()], 1)
else:
raise NotImplementedError(
f"input_features_dim {config.input_features_dim} in voting not supported")
# forward
points = points.cuda(non_blocking=True)
mask = mask.cuda(non_blocking=True)
features = features.cuda(non_blocking=True)
points_labels = points_labels.cuda(non_blocking=True)
cloud_label = cloud_label.cuda(non_blocking=True)
input_inds = input_inds.cuda(non_blocking=True)
pred = model(points, mask, features)
loss = criterion(pred, points_labels, mask)
losses.update(loss.item(), points.size(0))
# collect
bsz = points.shape[0]
for ib in range(bsz):
mask_i = mask[ib].cpu().numpy().astype(np.bool)
logits = pred[ib].cpu().numpy()[:, mask_i]
inds = input_inds[ib].cpu().numpy()[mask_i]
c_i = cloud_label[ib].item()
vote_logits_sum[c_i][:, inds] = vote_logits_sum[c_i][:, inds] + logits
vote_counts[c_i][:, inds] += 1
vote_logits[c_i] = vote_logits_sum[c_i] / vote_counts[c_i]
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if idx % config.print_freq == 0:
logger.info(
f'Test: [{idx}/{len(test_loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'Loss {losses.val:.4f} ({losses.avg:.4f})')
subIoUs, submIoU = sub_s3dis_metrics(config.num_classes, vote_logits,
test_loader.dataset.sub_clouds_points_labels, val_proportions)
logger.info(f'E{epoch} V{v} * sub_mIoU {submIoU:.3%}')
logger.info(f'E{epoch} V{v} * sub_msIoU {subIoUs}')
IoUs, mIoU = s3dis_metrics(config.num_classes, vote_logits, validation_proj, validation_labels)
logger.info(f'E{epoch} V{v} * mIoU {mIoU:.3%}')
logger.info(f'E{epoch} V{v} * msIoU {IoUs}')
return mIoU
if __name__ == "__main__":
opt, config = parse_option()
torch.cuda.set_device(config.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
os.makedirs(opt.log_dir, exist_ok=True)
os.environ["JOB_LOAD_DIR"] = os.path.dirname(config.load_path)
logger = setup_logger(output=config.log_dir, distributed_rank=dist.get_rank(), name="s3dis_eval")
if dist.get_rank() == 0:
path = os.path.join(config.log_dir, "config.json")
with open(path, 'w') as f:
json.dump(vars(opt), f, indent=2)
json.dump(vars(config), f, indent=2)
os.system('cp %s %s' % (opt.cfg, config.log_dir))
logger.info("Full config saved to {}".format(path))
main(config)
| 42.864198
| 112
| 0.619816
|
4a0ca36225309a19df1b213ef8992816148da31e
| 11,638
|
py
|
Python
|
dali/test/python/test_torch_pipeline_rnnt.py
|
roclark/DALI
|
e44a212d89a5449bbe7f4bae3d0f55f11a262932
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-10-07T23:07:23.000Z
|
2020-10-07T23:07:23.000Z
|
dali/test/python/test_torch_pipeline_rnnt.py
|
MAKali4737/DALI
|
3b114c6ebee38ff3815a9b4a234402e4d1affaa0
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
dali/test/python/test_torch_pipeline_rnnt.py
|
MAKali4737/DALI
|
3b114c6ebee38ff3815a9b4a234402e4d1affaa0
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import numpy as np
import os
import test_utils
import librosa
import torch
import math
dali_extra_path = test_utils.get_dali_extra_path()
class FilterbankFeatures():
def __init__(self, sample_rate=8000, window_size=0.02, window_stride=0.01,
window="hann", normalize="per_feature", n_fft=None,
preemph=0.97,
nfilt=64, lowfreq=0, highfreq=None, log=True, dither=.00001,
pad_to=8,
max_duration=16.7,
frame_splicing=1):
torch_windows = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}
self.win_length = int(sample_rate * window_size) # frame size
self.hop_length = int(sample_rate * window_stride)
self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length))
self.normalize = normalize
self.log = log
self.dither = dither
self.frame_splicing = frame_splicing
self.nfilt = nfilt
self.preemph = preemph
self.pad_to = pad_to
highfreq = highfreq or sample_rate / 2
window_fn = torch_windows.get(window, None)
window_tensor = window_fn(self.win_length,
periodic=False) if window_fn else None
filterbanks = torch.tensor(
librosa.filters.mel(sample_rate, self.n_fft, n_mels=nfilt, fmin=lowfreq,
fmax=highfreq), dtype=torch.float).unsqueeze(0)
self.fb = filterbanks
self.window = window_tensor
# self.register_buffer("fb", filterbanks)
# self.register_buffer("window", window_tensor)
# Calculate maximum sequence length (# frames)
max_length = 1 + math.ceil(
(max_duration * sample_rate - self.win_length) / self.hop_length
)
max_pad = 16 - (max_length % 16)
self.max_length = max_length + max_pad
@staticmethod
def splice_frames(x, frame_splicing):
""" Stacks frames together across feature dim
input is batch_size, feature_dim, num_frames
output is batch_size, feature_dim*frame_splicing, num_frames
"""
seq = [x]
for n in range(1, frame_splicing):
tmp = torch.zeros_like(x)
tmp[:, :, :-n] = x[:, :, n:]
seq.append(tmp)
return torch.cat(seq, dim=1)[:, :, ::frame_splicing]
@staticmethod
def normalize_batch(x, seq_len, normalize_type):
constant = 1e-5
if normalize_type == "per_feature":
x_mean = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype,
device=x.device)
x_std = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype,
device=x.device)
for i in range(x.shape[0]):
x_mean[i, :] = x[i, :, :seq_len[i]].mean(dim=1)
x_std[i, :] = x[i, :, :seq_len[i]].std(dim=1)
# make sure x_std is not zero
x_std += constant
return (x - x_mean.unsqueeze(2)) / x_std.unsqueeze(2)
elif normalize_type == "all_features":
x_mean = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
x_std = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
for i in range(x.shape[0]):
x_mean[i] = x[i, :, :seq_len[i].item()].mean()
x_std[i] = x[i, :, :seq_len[i].item()].std()
# make sure x_std is not zero
x_std += constant
return (x - x_mean.view(-1, 1, 1)) / x_std.view(-1, 1, 1)
else:
return x
def get_seq_len(self, seq_len):
x = torch.ceil(seq_len.to(dtype=torch.float) / self.hop_length).to(
dtype=torch.int)
if self.frame_splicing > 1:
x = torch.ceil(x.float() / self.frame_splicing).to(dtype=torch.int)
return x
def forward(self, inp, seq_len):
x = inp
dtype = x.dtype
seq_len = self.get_seq_len(seq_len)
# do preemphasis
if self.preemph is not None:
x = torch.cat((x[:, 0].unsqueeze(1), x[:, 1:] - self.preemph * x[:, :-1]),
dim=1)
# do stft
x = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_length,
win_length=self.win_length,
center=True, window=self.window.to(dtype=torch.float))
# get power spectrum
x = x.pow(2).sum(-1)
# dot with filterbank energies
x = torch.matmul(self.fb.to(x.dtype), x)
# log features if required
if self.log:
x = torch.log(x + 1e-20)
# frame splicing if required
if self.frame_splicing > 1:
x = self.splice_frames(x, self.frame_splicing)
# normalize if required
if self.normalize:
x = self.normalize_batch(x, seq_len, normalize_type=self.normalize)
x = x[:, :, :seq_len.max()] # rnnt loss requires lengths to match
return x.to(dtype)
class RnntTrainPipeline(nvidia.dali.pipeline.Pipeline):
def __init__(self,
device_id,
n_devices,
file_root,
file_list,
batch_size,
sample_rate=16000,
window_size=.02,
window_stride=.01,
nfeatures=64,
nfft=512,
frame_splicing_factor=3,
silence_threshold=-80,
dither=.00001,
preemph_coeff=.97,
lowfreq=0.0,
highfreq=0.0,
num_threads=1):
super().__init__(batch_size, num_threads, device_id, seed=42)
self.dither = dither
self.frame_splicing_factor = frame_splicing_factor
self.read = ops.FileReader(file_root=file_root, file_list=file_list, device="cpu",
shard_id=device_id, num_shards=n_devices)
self.decode = ops.AudioDecoder(device="cpu", dtype=types.FLOAT, downmix=True)
self.normal_distribution = ops.NormalDistribution(device="cpu")
self.preemph = ops.PreemphasisFilter(preemph_coeff=preemph_coeff)
self.spectrogram = ops.Spectrogram(device="cpu", nfft=nfft,
window_length=window_size * sample_rate,
window_step=window_stride * sample_rate)
self.mel_fbank = ops.MelFilterBank(device="cpu", sample_rate=sample_rate, nfilter=nfeatures,
normalize=True, freq_low=lowfreq, freq_high=highfreq)
self.log_features = ops.ToDecibels(device="cpu", multiplier=np.log(10), reference=1.0,
cutoff_db=-80)
self.get_shape = ops.Shapes(device="cpu")
self.normalize = ops.Normalize(axes=[0], device="cpu")
self.splicing_transpose = ops.Transpose(device="cpu", perm=[1, 0])
self.splicing_reshape = ops.Reshape(device="cpu", rel_shape=[-1, frame_splicing_factor])
self.splicing_pad = ops.Pad(axes=[0], fill_value=0, align=frame_splicing_factor, shape=[1],
device="cpu")
self.get_nonsilent_region = ops.NonsilentRegion(device="cpu", cutoff_db=silence_threshold)
self.trim_silence = ops.Slice(device="cpu", axes=[0])
self.to_float = ops.Cast(dtype=types.FLOAT)
@staticmethod
def _div_ceil(dividend, divisor):
return (dividend + divisor - 1) // divisor
def _splice_frames(self, input):
"""
Frame splicing is implemented by transposing the input, padding it and reshaping.
Theoretically, to achieve the result now there should be one more transpose at the end,
but it can be skipped as an optimization
"""
out = self.splicing_transpose(input)
# Because of the padding, we need to determine length of audio sample before it occurs
audio_len = self._div_ceil(self.get_shape(out), self.frame_splicing_factor)
out = self.splicing_pad(out)
out = self.splicing_reshape(out)
# Skipping transposing back
return out, audio_len
def remove_silence(self, input):
begin, len = self.get_nonsilent_region(input)
out = self.trim_silence(input, begin, len)
return out
def define_graph(self):
input, label = self.read()
decoded, sr = self.decode(input)
audio = self.remove_silence(decoded)
# DALI's preemph works a little bit different than the one in native code.
# The difference occurs in first value in buffer.
audio = self.preemph(audio)
audio = self.spectrogram(audio)
audio = self.mel_fbank(audio)
audio = self.log_features(audio)
audio, audio_sh = self._splice_frames(audio)
# This normalization goes across ax=0, since
# the frame splicing doesn't transpose the tensor back
audio = self.normalize(audio)
return audio, audio_sh
def test_rnnt_data_pipeline():
"""
Test compares pre-calculated output of native data pipeline with an output
from DALI data pipeline. There are few modification of native data pipeline
comparing to the reference: random operations (i.e. dither and presampling
aka "speed perturbation") are turned off
"""
batch_size = 2
ref_pipeline = FilterbankFeatures(sample_rate=16000, n_fft=512, highfreq=.0, dither=.00001,
frame_splicing=3)
data_path = os.path.join(dali_extra_path, "db", "audio", "rnnt_data_pipeline")
rec_names = ["and_showed_itself_decoded.npy", "asked_her_father_decoded.npy"]
recordings = [np.load(os.path.join(data_path, name)) for name in rec_names]
pipe = RnntTrainPipeline(device_id=0, n_devices=1, file_root=data_path,
file_list=os.path.join(data_path, "file_list.txt"),
batch_size=batch_size)
pipe.build()
dali_out = pipe.run()
seq_len = torch.tensor([rec.shape[0] for rec in recordings])
reference_data = [ref_pipeline.forward(torch.tensor([rec]), seq_len) for rec in recordings]
for sample_idx in range(batch_size):
output_data = dali_out[0].at(sample_idx)
output_data = np.transpose(output_data, (1, 0))
audio_len = dali_out[1].at(sample_idx)[0]
assert audio_len == reference_data[sample_idx].shape[2]
assert reference_data[sample_idx].shape[1:] == output_data.shape
size = reference_data[sample_idx][1:].flatten().shape[0]
assert np.sum(
np.isclose(reference_data[sample_idx], output_data, atol=.01, rtol=0)) / size > .99
| 39.585034
| 100
| 0.602509
|
4a0ca37878e99734984a91777a00f3174ba749de
| 3,876
|
py
|
Python
|
woocommerce/api.py
|
brumar/wc-api-python
|
7824e4410dca6950326fbd9d62ae9cfa4a886c72
|
[
"MIT"
] | null | null | null |
woocommerce/api.py
|
brumar/wc-api-python
|
7824e4410dca6950326fbd9d62ae9cfa4a886c72
|
[
"MIT"
] | null | null | null |
woocommerce/api.py
|
brumar/wc-api-python
|
7824e4410dca6950326fbd9d62ae9cfa4a886c72
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
WooCommerce API Class
"""
__title__ = "woocommerce-api"
__version__ = "2.1.0"
__author__ = "Claudio Sanches @ Automattic"
__license__ = "MIT"
from requests import request
from json import dumps as jsonencode
from time import time
from woocommerce.oauth import OAuth
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
class API(object):
""" API Class """
def __init__(self, url, consumer_key, consumer_secret, **kwargs):
self.url = url
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.wp_api = kwargs.get("wp_api", True)
self.version = kwargs.get("version", "wc/v3")
self.is_ssl = self.__is_ssl()
self.timeout = kwargs.get("timeout", 5)
self.verify_ssl = kwargs.get("verify_ssl", True)
self.query_string_auth = kwargs.get("query_string_auth", False)
def __is_ssl(self):
""" Check if url use HTTPS """
return self.url.startswith("https")
def __get_url(self, endpoint):
""" Get URL for requests """
url = self.url
api = "wc-api"
if url.endswith("/") is False:
url = "%s/" % url
if self.wp_api:
api = "wp-json"
return "%s%s/%s/%s" % (url, api, self.version, endpoint)
def __get_oauth_url(self, url, method, **kwargs):
""" Generate oAuth1.0a URL """
oauth = OAuth(
url=url,
consumer_key=self.consumer_key,
consumer_secret=self.consumer_secret,
version=self.version,
method=method,
oauth_timestamp=kwargs.get("oauth_timestamp", int(time()))
)
return oauth.get_oauth_url()
def __request(self, method, endpoint, data, params=None, **kwargs):
""" Do requests """
if params is None:
params = {}
url = self.__get_url(endpoint)
auth = None
headers = {
"user-agent": "WooCommerce API Client-Python/%s" % __version__,
"accept": "application/json"
}
new_headers = kwargs.pop("headers", {})
headers.update(new_headers)
if self.is_ssl is True and self.query_string_auth is False:
auth = (self.consumer_key, self.consumer_secret)
elif self.is_ssl is True and self.query_string_auth is True:
params.update({
"consumer_key": self.consumer_key,
"consumer_secret": self.consumer_secret
})
else:
encoded_params = urlencode(params)
url = "%s?%s" % (url, encoded_params)
url = self.__get_oauth_url(url, method, **kwargs)
if data is not None:
data = jsonencode(data, ensure_ascii=False).encode('utf-8')
headers["content-type"] = "application/json;charset=utf-8"
return request(
method=method,
url=url,
verify=self.verify_ssl,
auth=auth,
params=params,
data=data,
timeout=self.timeout,
headers=headers,
**kwargs
)
def get(self, endpoint, **kwargs):
""" Get requests """
return self.__request("GET", endpoint, None, **kwargs)
def post(self, endpoint, data, **kwargs):
""" POST requests """
return self.__request("POST", endpoint, data, **kwargs)
def put(self, endpoint, data, **kwargs):
""" PUT requests """
return self.__request("PUT", endpoint, data, **kwargs)
def delete(self, endpoint, **kwargs):
""" DELETE requests """
return self.__request("DELETE", endpoint, None, **kwargs)
def options(self, endpoint, **kwargs):
""" OPTIONS requests """
return self.__request("OPTIONS", endpoint, None, **kwargs)
| 30.28125
| 75
| 0.579205
|
4a0ca39a3e42dc10215109e6e441ccb062093f45
| 2,574
|
py
|
Python
|
udemy/Aula17_Conjuntos_Exercicios.py
|
estercardosotoja/curso_python
|
dc115bee4bbb66cd5224e80e13e295d30fb5473e
|
[
"MIT"
] | null | null | null |
udemy/Aula17_Conjuntos_Exercicios.py
|
estercardosotoja/curso_python
|
dc115bee4bbb66cd5224e80e13e295d30fb5473e
|
[
"MIT"
] | 1
|
2021-12-10T18:44:42.000Z
|
2021-12-10T18:44:42.000Z
|
udemy/Aula17_Conjuntos_Exercicios.py
|
estercardosotoja/curso_python
|
dc115bee4bbb66cd5224e80e13e295d30fb5473e
|
[
"MIT"
] | null | null | null |
"""
1) Sami e Dudu irão fazer uma competição de quem visita mais Estados no Brasil em um período de 6 meses,
até então Dudu já visitou o Espírito Santo e São Paulo, enquanto Sami visitou Rio de Janeiro e Bahia.
Crie dois conjuntos diferentes para simbolizar os estados que cada um foi. Após 6 meses Dudu
visitou Bahia, Acre, Santa Catarina e Sergipe, enquanto Sami visitou Bahia, Minas Gerais, Amazonas e Paraná,
atualize cada um dos conjuntos com os novos Estados. Responda:
"""
#Minha Resolução
Sami = {"Rio de Janeiro", "Bahia"}
Dudu = {"Espírito Santo", "São Paulo"}
resposta = ' '
adicionar_Dudu = input(f"Adicionar mais estados para Dudu? s/n ")
if adicionar_Dudu != 'n':
while resposta != 'n':
estado_adional = input("Estado adicional Dudu")
Dudu.add(estado_adional)
resposta = input('Adicionar mais estado para Dudu? S/N')
resposta = ' '
adicionar_Sami = input(f"Adicionar mais estados para Sami? s/n ")
if adicionar_Sami != 'n':
while resposta != 'n':
estado_adional = input("Estado adicional Sami")
Sami.add(estado_adional)
resposta = input('Adicionar mais estado para Sami? S/N')
print(f"\n Estados visitados por Dudu: {Dudu}"
f"\n Estados visitados por Sami: {Sami}"
f"\n Estados Dudu visitou que Sami não foi: {Dudu.difference(Sami)}"
f"\n Estados Dudu e Sami visitaram: {Dudu.union(Sami)}")
porcentagem_Sami = ((len(Sami)*100)//27)
porcentagem_Dudu = ((len(Dudu)*100)//27)
if porcentagem_Sami < porcentagem_Dudu:
print(f"Dudu venceu com {porcentagem_Dudu}%")
elif porcentagem_Sami > porcentagem_Dudu:
print(f"Sami venceu com {porcentagem_Sami}%")
else:
print(f"Empate \n Dudu: {porcentagem_Dudu} \n Sami: {porcentagem_Sami}")
# Resolução do Curso
#Situação inicial ao começar o desafio
estados_sami = {'RJ','BA'}
estados_dudu = {'ES','SP'}
sair = ''
while sair != 'nao':
estados_sami.add(input('Qual Estado Sami visitou a mais? '))
sair = input('Tem mais Estados a adicionar? ')
sair = ''
while sair != 'nao':
estados_dudu.add(input('Qual Estado Dudu visitou a mais? '))
sair = input('Tem mais Estados a adicionar? ')
print(estados_dudu.difference(estados_sami))
print(estados_sami.intersection(estados_dudu))
if len(estados_sami) > len(estados_dudu):
print(f'Sami ganhou e visitou {len(estados_sami)*100 // 27} %')
elif len(estados_dudu) > len(estados_sami):
print(f'Dudu ganhou e visitou {len(estados_dudu)*100 // 27} %')
else:
print(f'Deu empate e ambos visitaram {len(estados_dudu)*100//27}%')
| 35.75
| 108
| 0.692696
|
4a0ca5630ac30e7aca65f496695528efc7ab1ca8
| 2,449
|
py
|
Python
|
src/arch/x86/isa/insts/general_purpose/flags/push_and_pop.py
|
sjl1826/gem5
|
47ab53ef35b03aad7a92439d268ac57f10a90612
|
[
"BSD-3-Clause"
] | 1
|
2022-03-25T13:18:26.000Z
|
2022-03-25T13:18:26.000Z
|
src/arch/x86/isa/insts/general_purpose/flags/push_and_pop.py
|
sjl1826/gem5
|
47ab53ef35b03aad7a92439d268ac57f10a90612
|
[
"BSD-3-Clause"
] | 1
|
2022-03-25T14:15:30.000Z
|
2022-03-25T14:15:30.000Z
|
src/arch/x86/isa/insts/general_purpose/flags/push_and_pop.py
|
ksco/gem5-xiangshan
|
0baf1b5229885d81d689a677102f0665aaac5514
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
microcode = '''
def macroop PUSHF {
.adjust_env oszIn64Override
rflags t1
st t1, ss, [1, t0, rsp], "-env.dataSize", addressSize=ssz
subi rsp, rsp, dsz, dataSize=ssz
};
def macroop POPF {
.adjust_env oszIn64Override
ld t1, ss, [1, t0, rsp], addressSize=ssz
addi rsp, rsp, dsz, dataSize=ssz
wrflags t1, t0
};
'''
| 46.207547
| 72
| 0.775419
|
4a0ca644da5039a5c3f031f1e0bdbfef64929e31
| 3,993
|
py
|
Python
|
_old/HW6.py
|
haydenshively/AME-261
|
1cf686835e4eead3a5d23cae65dd7644515fe665
|
[
"MIT"
] | 1
|
2022-03-29T23:59:23.000Z
|
2022-03-29T23:59:23.000Z
|
_old/HW6.py
|
haydenshively/AME-261
|
1cf686835e4eead3a5d23cae65dd7644515fe665
|
[
"MIT"
] | null | null | null |
_old/HW6.py
|
haydenshively/AME-261
|
1cf686835e4eead3a5d23cae65dd7644515fe665
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('..')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
if __name__ == '__main__':
from haydens_code.atmosphere import std_atm_earth
from haydens_code.plane import Plane
# Initialize Cessna CJ-1 plane object
# plane = Plane(
# Cd_0=0.020,
# Em=16.9,
# e_w=0.81,
# chord=29.5 / 16.2,
# span=16.2,
# Cl_max=1.4,
# Lam=25.0,
# tc_max=0.11,
# W_0=88100,
# W_fuel=33200,
# cj=0.60,
# T_a_sl=32500,
# atmosphere=std_atm_earth()
# )
# Initialize MD-11 plane object
plane = Plane(
Cd_0=0.018,
Em=None,
e=0.85,
chord=339 / 51.8,
span=51.8,
Cl_max=1.4,
Lam=35.0,
tc_max=0.12,
W_0=2710000,
W_fuel=None,
cj=None,
T_a_sl=800000,
atmosphere=std_atm_earth()
)
"""QUESTION 1 PLOT"""
plane.set_altitude(0) # meters
T_a = plane.jet_thrust_available() # N
V_limits = plane.speed_min_max(T_a) # m/s
V_range = np.arange(V_limits[0, 0], V_limits[1, 0] + 5, 5)
Cd_inc = plane.Cd(plane.Cd_i(plane.Cl(V_range)))
Cd_com = Cd_inc + plane.Cd_c(plane.Cl(V_range), V_range)
D_inc = plane.drag(Cd_inc, V_range)/1000
D_com = plane.drag(Cd_com, V_range)/1000
plt.plot(V_range, D_inc, '-r', label='Drag (incompressible)')
plt.plot(V_range, D_com, '--r', label='Drag (compressible)')
plt.plot(V_range, [T_a/1000 for i in range(V_range.shape[0])], '-b', label='Thrust Available')
plt.xlim(V_range.min() - 10, V_range.max() + 10)
plt.ylim(D_inc.min() - 50, D_inc.max() + 10)
plt.title('Drag vs Velocity for MD-11 at Sea Level')
plt.xlabel('Velocity [m/s]')
plt.ylabel('Drag, Thrust [kN]')
plt.legend(loc='lower right')
plt.show()
"""QUESTION 1 TABLE"""
# Reformat as Strings
v_str = ['%d' % i for i in V_range]
t_str = ['%d' % (T_a/1000) for i in V_range]
di_str = ['%d' % i for i in D_inc]
dc_str = ['%d' % i for i in D_com]
data = [[v, t, di, dc] for v, t, di, dc in zip(v_str, t_str, di_str, dc_str)]
fig, axs = plt.subplots(1, 1)
fig.set_size_inches(11, 8.5)
col_labels = ('V [m/s]', 'Thrust Available [kN]', 'Drag (incompressible) [kN]', 'Drag (compressible) [kN]')
axs.axis('auto')
axs.axis('off')
table = axs.table(cellText=data, colLabels=col_labels, loc='center', cellLoc='center')
# make headers bold
for (row, col), cell in table.get_celld().items():
if (row == 0) or (col == -1):
cell.set_text_props(fontproperties=FontProperties(weight='bold'))
# show plot
plt.show()
# save as PNG file
fig.savefig('MD11DragTypes.png', dpi=200, bbox_inches='tight', pad_inches=0.5)
"""QUESTION 2"""
plane.set_altitude(5000) # meters
T_a_5000 = plane.jet_thrust_available() # N
V_limits = plane.speed_min_max(T_a_5000) # m/s
V_range = np.arange(V_limits[0, 0], 300 + 2, 2)
Cd_5000 = plane.Cd(plane.Cd_i(plane.Cl(V_range)), plane.Cd_c(plane.Cl(V_range), V_range))
D_5000 = plane.drag(Cd_5000, V_range) / 1000
plane.set_altitude(10000) # meters
T_a_10000 = plane.jet_thrust_available() # N
Cd_10000 = plane.Cd(plane.Cd_i(plane.Cl(V_range)), plane.Cd_c(plane.Cl(V_range), V_range))
D_10000 = plane.drag(Cd_10000, V_range) / 1000
plt.plot(V_range, D_5000, '-r', label='Drag @ h=5km')
plt.plot(V_range, [T_a_5000/1000 for _ in range(V_range.shape[0])], '--r', label='Thrust Available @ h=5km')
plt.plot(V_range, D_10000, '-b', label='Drag @ h=10km')
plt.plot(V_range, [T_a_10000 / 1000 for _ in range(V_range.shape[0])], '--b', label='Thrust Available @ h=10km')
plt.ylim(D_5000.min() - 50, 500)
plt.title('Drag vs Velocity for MD-11')
plt.xlabel('Velocity [m/s]')
plt.ylabel('Drag, Thrust [kN]')
plt.legend(loc='lower left')
plt.show()
| 33
| 116
| 0.604057
|
4a0ca6bd07c02758b4056997815d2895b3d585ad
| 3,397
|
py
|
Python
|
service/tools/generate_sdk_configuration.py
|
lcrh/falken
|
7545431c7bfa34a9b45c2243cae40dbb58adefaa
|
[
"Apache-2.0"
] | 213
|
2021-06-11T01:15:16.000Z
|
2022-02-25T16:18:57.000Z
|
service/tools/generate_sdk_configuration.py
|
lcrh/falken
|
7545431c7bfa34a9b45c2243cae40dbb58adefaa
|
[
"Apache-2.0"
] | 32
|
2021-06-17T17:58:54.000Z
|
2022-02-02T05:58:10.000Z
|
service/tools/generate_sdk_configuration.py
|
lcrh/falken
|
7545431c7bfa34a9b45c2243cae40dbb58adefaa
|
[
"Apache-2.0"
] | 28
|
2021-06-17T17:34:21.000Z
|
2022-03-24T14:05:20.000Z
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Generates a SDK JSON configuration file to connect to a local service."""
import copy
import json
import os
from absl import app
from absl import flags
from absl import logging
FLAGS = flags.FLAGS
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
flags.DEFINE_string('ssl_dir', THIS_DIR,
'Path containing the SSL cert.')
flags.DEFINE_string('project_id', None, 'Falken project to use.')
flags.DEFINE_string('api_key', None,
'API key to authenticate the Falken project.')
flags.DEFINE_string(
'config_json', os.path.join(THIS_DIR, 'falken_config.json'),
'JSON configuration file to create for the client SDK to connect to the '
'service.')
# Base configuration for a local connection.
_LOCAL_CONNECTION_CONFIG = {
'service': {
# Connect to localhost.
'environment': 'local',
'connection': {
'address': '[::1]:50051',
},
},
}
def read_file_to_lines(filename):
"""Read a file into a list of lines.
Args:
filename: File to read.
Returns:
List of lines read from the file with all trailing whitespace stripped.
"""
with open(filename, 'rt') as fileobj:
return [l.rstrip() for l in fileobj.readlines()]
def generate_configuration(cert_lines, project_id=None, api_key=None):
"""Generate a client SDK configuration.
Args:
cert_lines: List of strings that make up the certificate.
project_id: ID of the project to use when connecting to the service.
api_key: API key used to authenticate the project with the service.
Returns:
Dictionary in the form:
{
'project_id': project_id,
'api_key': api_key,
'service': {
'environment': 'local',
'connection': {
'address': '[::1]:50051',
'ssl_certificate': cert_lines,
},
}
}
Where 'project_id', 'api_key' and 'ssl_certificate' are only populated if
the arguments are not None or empty.
"""
configuration = copy.deepcopy(_LOCAL_CONNECTION_CONFIG)
if cert_lines:
configuration['service']['connection']['ssl_certificate'] = cert_lines
if project_id:
configuration['project_id'] = project_id
if api_key:
configuration['api_key'] = api_key
return configuration
def main(unused_argv):
"""Generate SDK configuration."""
with open(FLAGS.config_json, 'wt') as config_json_file:
config_json_file.write(
json.dumps(generate_configuration(
read_file_to_lines(os.path.join(FLAGS.ssl_dir, 'cert.pem')),
project_id=FLAGS.project_id,
api_key=FLAGS.api_key), sort_keys=True, indent=2))
full_path = os.path.join(os.getcwd(), FLAGS.config_json)
logging.info('Wrote SDK configuration to %s', full_path)
if __name__ == '__main__':
app.run(main)
| 29.284483
| 77
| 0.685899
|
4a0ca70ee4e8213b43782c218727693766c17eed
| 1,364
|
py
|
Python
|
Currency.py
|
michael-canaran/pybank-app
|
491885d3732c922dad4649cf56650a6fd721908c
|
[
"MIT"
] | null | null | null |
Currency.py
|
michael-canaran/pybank-app
|
491885d3732c922dad4649cf56650a6fd721908c
|
[
"MIT"
] | null | null | null |
Currency.py
|
michael-canaran/pybank-app
|
491885d3732c922dad4649cf56650a6fd721908c
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class Currency(ABC):
"""
Abstract factory class of Currency.
"""
def __init__(self):
prefix = ""
rate = None
@abstractmethod
def get_prefix(self) -> str:
"""
Gets the string pre-fix of the currency.
:return: String currency pre-fix.
"""
pass
@abstractmethod
def get_rate(self) -> float:
"""
Gets the exchange rate of the currency to CAD.
:return: Float of the exchange rate to CAD.
"""
pass
class CAD(Currency):
"""
CAD currency class.
"""
def __init__(self):
self.prefix = "CA$"
self.rate = 1.00
def get_prefix(self):
return self.prefix
def get_rate(self):
return self.rate
class USD(Currency):
"""
USD currency class.
"""
def __init__(self):
self.prefix = "US$"
self.rate = 1.50
def get_prefix(self):
return self.prefix
def get_rate(self):
return self.rate
class EUR(Currency):
"""
EUR currency class.
"""
def __init__(self):
self.prefix = "€"
self.rate = 2.00
def get_prefix(self):
return self.prefix
def get_rate(self):
return self.rate
| 18.432432
| 55
| 0.515396
|
4a0ca7bd73965ba49d781fa068f717ae72070eb3
| 2,429
|
py
|
Python
|
src/loader/transition_time.py
|
Sensors-in-Paradise/OpportunityML
|
a123b4842de45f735d517be6bcd96ca35171db91
|
[
"MIT"
] | 1
|
2022-03-25T16:00:36.000Z
|
2022-03-25T16:00:36.000Z
|
src/loader/transition_time.py
|
Sensors-in-Paradise/OpportunityML
|
a123b4842de45f735d517be6bcd96ca35171db91
|
[
"MIT"
] | 1
|
2022-03-28T13:50:28.000Z
|
2022-03-28T13:50:28.000Z
|
src/loader/transition_time.py
|
Sensors-in-Paradise/OpportunityML
|
a123b4842de45f735d517be6bcd96ca35171db91
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import utils.settings as settings
def transition_time_cut(recordings: "list[Recording]", seconds_from_start = 2, seconds_from_end = 0) -> "list[Recording]":
"""
1 - max 2 seconds at the end of every activity, to make windows cleaner
- the timestep_frequency needs to be set in the DATA_CONFIG (Opportunity dataset: 30 Hz)
- will return the same number of recordings (no smooth transition anymore)
- alternative: return much more recordings with only one activity each
"""
# side effect implementation (modifies input data, no return required)
# RAM performance decision to not deep copy and return new recordings
timestep_frequency = settings.DATA_CONFIG.timestep_frequency
n_timesteps_from_start = int(seconds_from_start * timestep_frequency)
n_timesteps_from_end = int(seconds_from_end * timestep_frequency)
for recording in recordings:
activities = recording.activities.to_numpy()
# change_idx = on this index new number
inner_change_idxs = np.where(activities[:-1] != activities[1:])[0] + 1
# add start and end
change_idxs = np.concatenate(
(np.array([0]), inner_change_idxs, np.array([len(activities)]))
)
cutting_start_end_tuples = []
for i in range(len(change_idxs) - 1):
cutting_start_end_tuples.append((change_idxs[i], change_idxs[i + 1]))
# add n_timesteps_from_start from tuple[0] and substract n_timesteps_from_end from tuple[1]
cut_tuple_idxs = lambda tuple: (tuple[0] + n_timesteps_from_start, tuple[1] - n_timesteps_from_end)
cutting_start_end_tuples = list(map(cut_tuple_idxs, cutting_start_end_tuples))
# filter out tuples doesnt make sense anymore
has_window_len_bigger_0 = lambda tuple: tuple[1] - tuple[0] > 0
cutting_start_end_tuples = list(filter(has_window_len_bigger_0, cutting_start_end_tuples))
def cut_frame(frame):
sub_frames = []
for start, end in cutting_start_end_tuples:
sub_frames.append(frame.iloc[start:end])
return pd.concat(sub_frames).reset_index(drop=True)
recording.time_frame = cut_frame(recording.time_frame)
recording.activities = cut_frame(recording.activities)
recording.sensor_frame = cut_frame(recording.sensor_frame)
return recordings
| 45.830189
| 122
| 0.701112
|
4a0ca7fa0ed8567e362832662d5e017d4c84f754
| 797
|
py
|
Python
|
py/py_0504_square_on_the_inside.py
|
lcsm29/project-euler
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
[
"MIT"
] | null | null | null |
py/py_0504_square_on_the_inside.py
|
lcsm29/project-euler
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
[
"MIT"
] | null | null | null |
py/py_0504_square_on_the_inside.py
|
lcsm29/project-euler
|
fab794ece5aa7a11fc7c2177f26250f40a5b1447
|
[
"MIT"
] | null | null | null |
# Solution of;
# Project Euler Problem 504: Square on the Inside
# https://projecteuler.net/problem=504
#
# Let ABCD be a quadrilateral whose vertices are lattice points lying on the
# coordinate axes as follows:A(a, 0), B(0, b), C(−c, 0), D(0, −d), where 1 ≤
# a, b, c, d ≤ m and a, b, c, d, m are integers. It can be shown that for m =
# 4 there are exactly 256 valid ways to construct ABCD. Of these 256
# quadrilaterals, 42 of them strictly contain a square number of lattice
# points. How many quadrilaterals ABCD strictly contain a square number of
# lattice points for m = 100?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 504
timed.caller(dummy, n, i, prob_id)
| 30.653846
| 78
| 0.685069
|
4a0ca81ab2254ef9adec22ddf86b6a5e79bf5a1b
| 1,633
|
py
|
Python
|
cogs/Makeemoji.py
|
nerdguyahmad/discord-emoji-stealer
|
5e34b7edac0f14cae21de2bd43c573cee0f7631f
|
[
"MIT"
] | null | null | null |
cogs/Makeemoji.py
|
nerdguyahmad/discord-emoji-stealer
|
5e34b7edac0f14cae21de2bd43c573cee0f7631f
|
[
"MIT"
] | null | null | null |
cogs/Makeemoji.py
|
nerdguyahmad/discord-emoji-stealer
|
5e34b7edac0f14cae21de2bd43c573cee0f7631f
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
import requests
import discord
class Makeemoji(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def makeemoji(self, ctx, name, url=None):
if url:
file_request = requests.get(url)
try:
emoji = await ctx.guild.create_custom_emoji(image=file_request.content, name=name)
await ctx.send(f"Emoji <:{emoji.name}:{emoji.id}> was created!")
except discord.InvalidArgument:
await ctx.send("You must attach an **image** or a **gif** for the emoji, not a different type of the file.")
return
try:
attachment_url = ctx.message.attachments[0].url
except IndexError:
await ctx.send("You must attach an image or a gif for the emoji.")
return
file_request = requests.get(attachment_url)
try:
emoji = await ctx.guild.create_custom_emoji(image=file_request.content, name=name)
except discord.InvalidArgument:
await ctx.send("You must attach an **image** or a **gif** for the emoji, not a different type of the file.")
return
await ctx.send(f"Emoji <:{emoji.name}:{emoji.id}> was created!")
@makeemoji.error
async def makeemoji_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("Specify a name for the emoji. Example: `makeemoji emoji1`")
return
raise error
def setup(bot):
bot.add_cog(Makeemoji(bot))
| 39.829268
| 125
| 0.607471
|
4a0ca825652b4e62259cb63a4270f86b39495f3e
| 21,761
|
py
|
Python
|
test/functional/wallet_importmulti.py
|
bitstockproject/bitstock-core
|
5d18f0f2c922e8de2b1ff26fa3687ac3d28ec60c
|
[
"MIT"
] | null | null | null |
test/functional/wallet_importmulti.py
|
bitstockproject/bitstock-core
|
5d18f0f2c922e8de2b1ff26fa3687ac3d28ec60c
|
[
"MIT"
] | null | null | null |
test/functional/wallet_importmulti.py
|
bitstockproject/bitstock-core
|
5d18f0f2c922e8de2b1ff26fa3687ac3d28ec60c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importmulti RPC."""
from test_framework.test_framework import BitstockTestFramework
from test_framework.util import *
class ImportMultiTest (BitstockTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"]]
self.setup_clean_chain = True
def setup_network(self):
self.setup_nodes()
def run_test (self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
node0_address1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
#Check only one address
assert_equal(node0_address1['ismine'], True)
#Node 1 sync test
assert_equal(self.nodes[1].getblockcount(),1)
#Address Test - before import
address_info = self.nodes[1].validateaddress(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# Bitcoin Address
self.log.info("Should import an address")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_address = address['address']
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": "not valid address",
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Invalid address')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + !internal
self.log.info("Should not import a scriptPubKey without internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Public key + !Internal
self.log.info("Should import an address with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + internal
self.log.info("Should import a scriptPubKey with internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + !internal
self.log.info("Should not import a scriptPubKey without internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
self.log.info("Should not import an address with private key if is already imported")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -4)
assert_equal(result[0]['error']['message'], 'The wallet already contains the private key for this address or script')
# Address + Private key + watchonly
self.log.info("Should not import an address with private key and with watchonly")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal
self.log.info("Should import a scriptPubKey with internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Private key + !internal
self.log.info("Should not import a scriptPubKey without internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# P2SH address
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['isscript'], True)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript']
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
# Address + Public key + !Internal + Wrong pubkey
self.log.info("Should not import an address with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should not import a scriptPubKey with internal and with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly + Wrong private key
self.log.info("Should not import an address with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should not import a scriptPubKey with internal and with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Importing existing watch only address with new timestamp should replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": watchonly_address,
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of watch only address
self.stop_nodes()
self.start_nodes()
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
}])
assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "",
}])
if __name__ == '__main__':
ImportMultiTest ().main ()
| 48.143805
| 135
| 0.629842
|
4a0ca8e3fc5da55d6d6c4c79b36a17cddbfb11bc
| 760
|
py
|
Python
|
tools/mo/unit_tests/mo/front/kaldi/sigmoid_ext_test.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tools/mo/unit_tests/mo/front/kaldi/sigmoid_ext_test.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tools/mo/unit_tests/mo/front/kaldi/sigmoid_ext_test.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.front.kaldi.sigmoid_ext import SigmoidFrontExtractor
from openvino.tools.mo.ops.activation_ops import Sigmoid
from unit_tests.mo.front.kaldi.extractors.common_ext_test import KaldiFrontExtractorTest
from openvino.tools.mo.ops.op import Op
class SigmoidFrontExtractorTest(KaldiFrontExtractorTest):
@classmethod
def register_op(cls):
Op.registered_ops['Sigmoid'] = Sigmoid
def test_assertion(self):
self.assertRaises(AttributeError, SigmoidFrontExtractor.extract, None)
def test_extracted_blobs_add_shift(self):
SigmoidFrontExtractor.extract(self.test_node)
self.assertTrue(self.test_node.op, 'Sigmoid')
| 36.190476
| 88
| 0.788158
|
4a0ca9264a8e3d18498aa6478bcff4f7ef6fa782
| 1,646
|
py
|
Python
|
samples/scripts/read_dataset.py
|
OkanoShogo0903/open3d_workspace
|
38386e9dd65a75ea62202487bd4d18712eab3f1e
|
[
"MIT"
] | null | null | null |
samples/scripts/read_dataset.py
|
OkanoShogo0903/open3d_workspace
|
38386e9dd65a75ea62202487bd4d18712eab3f1e
|
[
"MIT"
] | null | null | null |
samples/scripts/read_dataset.py
|
OkanoShogo0903/open3d_workspace
|
38386e9dd65a75ea62202487bd4d18712eab3f1e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# rgbd_redwood.py
#conda install pillow matplotlib
import types
#from py3d import *
from open3d import *
#from py3d import *
import matplotlib.pyplot as plt
def run():
print("Read Redwood dataset")
color_raw = read_image("./TestData/RGBD/color/00000.jpg")
depth_raw = read_image("./TestData/RGBD/depth/00000.png")
rgbd_image = create_rgbd_image_from_color_and_depth(
color_raw, depth_raw);
print color_raw
print depth_raw
print "*"*30
print type( rgbd_image.color )
print "*"*30
plt.figure(figsize=(12,5))
# subplot(行,列,何番目のプロットか) 複数のグラフを整列させて描写したい時につかう
plt.subplot(3, 3, 1)
plt.title('Redwood original image')
plt.imshow(color_raw) # rgbの3チャンネルの情報を持つ640x480の行
plt.subplot(3, 3, 3)
plt.title('Redwood grayscale image')
plt.imshow(rgbd_image.color,cmap='gray') # 深度の1チャンネルの情報を持つ640x480の行
plt.subplot(3, 3, 7)
plt.title('Redwood depth image')
plt.imshow(rgbd_image.depth)
#plt.show()
#try: # py3d old version?
#pcd = create_point_cloud_from_rgbd_image(rgbd_image,\
# PinholeCameraIntrinsic.prime_sense_default)
#except: # new version?
pcd = create_point_cloud_from_rgbd_image(rgbd_image,\
PinholeCameraIntrinsic.get_prime_sense_default())
# Flip it, otherwise the pointcloud will be upside down
pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
draw_geometries([pcd])
if __name__ == "__main__":
run()
| 33.591837
| 81
| 0.617861
|
4a0ca944d82b344c21981063d02bd497c6a82442
| 5,919
|
py
|
Python
|
pybind/slxos/v16r_1_00b/routing_system/interface/ve/ipv6/ipv6_nd_ra/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/routing_system/interface/ve/ipv6/ipv6_nd_ra/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/slxos/v16r_1_00b/routing_system/interface/ve/ipv6/ipv6_nd_ra/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import ipv6_intf_cmds
class ipv6_nd_ra(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/interface/ve/ipv6/ipv6-nd-ra. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__ipv6_intf_cmds',)
_yang_name = 'ipv6-nd-ra'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__ipv6_intf_cmds = YANGDynClass(base=ipv6_intf_cmds.ipv6_intf_cmds, is_container='container', presence=False, yang_name="ipv6-intf-cmds", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'interface', u've', u'ipv6', u'ipv6-nd-ra']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Ve', u'ipv6']
def _get_ipv6_intf_cmds(self):
"""
Getter method for ipv6_intf_cmds, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds (container)
"""
return self.__ipv6_intf_cmds
def _set_ipv6_intf_cmds(self, v, load=False):
"""
Setter method for ipv6_intf_cmds, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_intf_cmds is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_intf_cmds() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ipv6_intf_cmds.ipv6_intf_cmds, is_container='container', presence=False, yang_name="ipv6-intf-cmds", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv6_intf_cmds must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=ipv6_intf_cmds.ipv6_intf_cmds, is_container='container', presence=False, yang_name="ipv6-intf-cmds", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='container', is_config=True)""",
})
self.__ipv6_intf_cmds = t
if hasattr(self, '_set'):
self._set()
def _unset_ipv6_intf_cmds(self):
self.__ipv6_intf_cmds = YANGDynClass(base=ipv6_intf_cmds.ipv6_intf_cmds, is_container='container', presence=False, yang_name="ipv6-intf-cmds", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='container', is_config=True)
ipv6_intf_cmds = __builtin__.property(_get_ipv6_intf_cmds, _set_ipv6_intf_cmds)
_pyangbind_elements = {'ipv6_intf_cmds': ipv6_intf_cmds, }
| 47.733871
| 452
| 0.718365
|
4a0ca96e977a0589542cdf85880ed2ca55ccb2fb
| 4,828
|
py
|
Python
|
core/dbt/context/parser.py
|
NiallRees/dbt
|
da7afd84ab961cd1b90775e900514f7379ba2e84
|
[
"Apache-2.0"
] | 1
|
2021-09-01T20:50:52.000Z
|
2021-09-01T20:50:52.000Z
|
core/dbt/context/parser.py
|
NiallRees/dbt
|
da7afd84ab961cd1b90775e900514f7379ba2e84
|
[
"Apache-2.0"
] | 1
|
2019-10-28T15:33:04.000Z
|
2019-10-28T15:33:04.000Z
|
core/dbt/context/parser.py
|
NiallRees/dbt
|
da7afd84ab961cd1b90775e900514f7379ba2e84
|
[
"Apache-2.0"
] | 2
|
2019-05-10T21:23:08.000Z
|
2021-06-09T01:28:37.000Z
|
import dbt.exceptions
import dbt.context.common
from dbt.adapters.factory import get_adapter
from dbt.contracts.graph.parsed import Docref
def docs(unparsed, docrefs, column_name=None):
def do_docs(*args):
if len(args) != 1 and len(args) != 2:
dbt.exceptions.doc_invalid_args(unparsed, args)
doc_package_name = ''
doc_name = args[0]
if len(args) == 2:
doc_package_name = args[1]
docref = Docref(documentation_package=doc_package_name,
documentation_name=doc_name,
column_name=column_name)
docrefs.append(docref)
# At parse time, nothing should care about what doc() returns
return ''
return do_docs
class Config:
def __init__(self, model, source_config):
self.model = model
self.source_config = source_config
def _transform_config(self, config):
for oldkey in ('pre_hook', 'post_hook'):
if oldkey in config:
newkey = oldkey.replace('_', '-')
if newkey in config:
dbt.exceptions.raise_compiler_error(
'Invalid config, has conflicting keys "{}" and "{}"'
.format(oldkey, newkey),
self.model
)
config[newkey] = config.pop(oldkey)
return config
def __call__(self, *args, **kwargs):
if len(args) == 1 and len(kwargs) == 0:
opts = args[0]
elif len(args) == 0 and len(kwargs) > 0:
opts = kwargs
else:
dbt.exceptions.raise_compiler_error(
"Invalid inline model config",
self.model)
opts = self._transform_config(opts)
self.source_config.update_in_model_config(opts)
return ''
def set(self, name, value):
return self.__call__({name: value})
def require(self, name, validator=None):
return ''
def get(self, name, validator=None, default=None):
return ''
class DatabaseWrapper(dbt.context.common.BaseDatabaseWrapper):
"""The parser subclass of the database wrapper applies any explicit
parse-time overrides.
"""
def __getattr__(self, name):
override = (name in self.adapter._available_ and
name in self.adapter._parse_replacements_)
if override:
return self.adapter._parse_replacements_[name]
elif name in self.adapter._available_:
return getattr(self.adapter, name)
else:
raise AttributeError(
"'{}' object has no attribute '{}'".format(
self.__class__.__name__, name
)
)
class Var(dbt.context.base.Var):
def get_missing_var(self, var_name):
# in the parser, just always return None.
return None
class RefResolver(dbt.context.common.BaseResolver):
def __call__(self, *args):
# When you call ref(), this is what happens at parse time
if len(args) == 1 or len(args) == 2:
self.model.refs.append(list(args))
else:
dbt.exceptions.ref_invalid_args(self.model, args)
return self.Relation.create_from(self.config, self.model)
class SourceResolver(dbt.context.common.BaseResolver):
def __call__(self, *args):
# When you call source(), this is what happens at parse time
if len(args) == 2:
self.model.sources.append(list(args))
else:
dbt.exceptions.raise_compiler_error(
"source() takes exactly two arguments ({} given)"
.format(len(args)), self.model)
return self.Relation.create_from(self.config, self.model)
class Provider(dbt.context.common.Provider):
execute = False
Config = Config
DatabaseWrapper = DatabaseWrapper
Var = Var
ref = RefResolver
source = SourceResolver
def generate(model, runtime_config, manifest, source_config):
# during parsing, we don't have a connection, but we might need one, so we
# have to acquire it.
# In the future, it would be nice to lazily open the connection, as in some
# projects it would be possible to parse without connecting to the db
with get_adapter(runtime_config).connection_for(model):
return dbt.context.common.generate(
model, runtime_config, manifest, Provider(), source_config
)
def generate_macro(model, runtime_config, manifest):
# parser.generate_macro is called by the get_${attr}_func family of Parser
# methods, which preparse and cache the generate_${attr}_name family of
# macros for use during parsing
return dbt.context.common.generate_execute_macro(
model, runtime_config, manifest, Provider()
)
| 31.97351
| 79
| 0.61599
|
4a0caa8cd3af62c7ccc69fd6d9143221a8972aa9
| 13,609
|
py
|
Python
|
barbican/tests/tasks/test_keystone_consumer.py
|
dmend/barbican
|
5ff7b4ca1474225acabc36acedcf70a41946e6d0
|
[
"Apache-2.0"
] | 177
|
2015-01-02T09:35:53.000Z
|
2022-02-26T01:43:55.000Z
|
barbican/tests/tasks/test_keystone_consumer.py
|
dmend/barbican
|
5ff7b4ca1474225acabc36acedcf70a41946e6d0
|
[
"Apache-2.0"
] | 3
|
2015-06-23T19:07:31.000Z
|
2017-08-19T04:38:11.000Z
|
barbican/tests/tasks/test_keystone_consumer.py
|
dmend/barbican
|
5ff7b4ca1474225acabc36acedcf70a41946e6d0
|
[
"Apache-2.0"
] | 87
|
2015-01-13T17:33:40.000Z
|
2021-11-09T05:30:36.000Z
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from oslo_utils import uuidutils
import sqlalchemy
from barbican.common import exception
from barbican.common import resources as c_resources
from barbican.model import models
from barbican.model import repositories as rep
from barbican.plugin.crypto import manager
from barbican.plugin import resources as plugin
from barbican.tasks import keystone_consumer as consumer
from barbican.tests import database_utils
class InitializeDatabaseMixin(object):
def _init_memory_db_setup(self):
# Force a refresh of the singleton plugin manager for each test.
manager._PLUGIN_MANAGER = None
manager.CONF.set_override('enabled_crypto_plugins',
['simple_crypto'],
group='crypto')
self.project_id1 = uuidutils.generate_uuid()
self.project_id2 = uuidutils.generate_uuid(dashed=False)
self.project1_data = c_resources.get_or_create_project(
self.project_id1)
self.assertIsNotNone(self.project1_data)
self.project2_data = c_resources.get_or_create_project(
self.project_id2)
self.assertIsNotNone(self.project2_data)
def _create_secret_for_project(self, project_data):
secret_info = {"name": uuidutils.generate_uuid(dashed=False),
"algorithm": "aes", "bit_length": 256, "mode": "cbc",
"payload_content_type": "application/octet-stream"}
new_secret = plugin.generate_secret(
secret_info, secret_info.get('payload_content_type'), project_data)
return new_secret
class WhenUsingKeystoneEventConsumer(
database_utils.RepositoryTestCase,
InitializeDatabaseMixin):
"""Test all but the process() method on KeystoneEventConsumer class.
For unit testing the process() method, use the
WhenUsingKeystoneEventConsumerProcessMethod class.
"""
def setUp(self):
super(WhenUsingKeystoneEventConsumer, self).setUp()
self.kek_repo = rep.get_kek_datum_repository()
self.project_repo = rep.get_project_repository()
self.secret_meta_repo = rep.get_secret_meta_repository()
self.secret_repo = rep.get_secret_repository()
self.transport_key_repo = rep.get_transport_key_repository()
def test_get_project_entities_lookup_call(self):
self._init_memory_db_setup()
secret = self._create_secret_for_project(self.project2_data)
project2_id = self.project2_data.id
self.assertIsNotNone(secret)
db_secrets = self.secret_repo.get_project_entities(project2_id)
self.assertEqual(1, len(db_secrets))
self.assertEqual(secret.id, db_secrets[0].id)
db_kek = self.kek_repo.get_project_entities(project2_id)
self.assertEqual(1, len(db_kek))
# secret_meta_repo does not implement function
# _build_get_project_entities_query, so it should raise error
self.assertRaises(NotImplementedError,
self.secret_meta_repo.get_project_entities,
project2_id)
# transport_key_repo does not implement function
# _build_get_project_entities_query, so it should raise error
self.assertRaises(NotImplementedError,
self.transport_key_repo.get_project_entities,
project2_id)
@mock.patch.object(models.Project, 'delete',
side_effect=sqlalchemy.exc.SQLAlchemyError)
def test_delete_project_entities_alchemy_error_suppress_exception_true(
self, mock_entity_delete):
self._init_memory_db_setup()
secret = self._create_secret_for_project(self.project1_data)
self.assertIsNotNone(secret)
project1_id = self.project1_data.id
# sqlalchemy error is suppressed here
no_error = self.project_repo.delete_project_entities(
project1_id, suppress_exception=True)
self.assertIsNone(no_error)
@mock.patch.object(models.Project, 'delete',
side_effect=sqlalchemy.exc.SQLAlchemyError)
def test_delete_project_entities_alchemy_error_suppress_exception_false(
self, mock_entity_delete):
self._init_memory_db_setup()
secret = self._create_secret_for_project(self.project1_data)
self.assertIsNotNone(secret)
project1_id = self.project1_data.id
# sqlalchemy error is not suppressed here
self.assertRaises(exception.BarbicanException,
self.project_repo.delete_project_entities,
project1_id, suppress_exception=False)
def test_delete_project_entities_not_impl_error_suppress_exception_true(
self):
self._init_memory_db_setup()
secret = self._create_secret_for_project(self.project1_data)
self.assertIsNotNone(secret)
project1_id = self.project1_data.id
# NotImplementedError is not suppressed regardless of related flag
self.assertRaises(NotImplementedError,
self.secret_meta_repo.delete_project_entities,
project1_id, suppress_exception=True)
def test_delete_project_entities_not_impl_error_suppress_exception_false(
self):
self._init_memory_db_setup()
secret = self._create_secret_for_project(self.project1_data)
self.assertIsNotNone(secret)
project1_id = self.project1_data.id
# NotImplementedError is not suppressed regardless of related flag
self.assertRaises(NotImplementedError,
self.secret_meta_repo.delete_project_entities,
project1_id, suppress_exception=False)
def test_invoke_handle_error(self):
task = consumer.KeystoneEventConsumer()
project = mock.MagicMock()
project.project_id = 'project_id'
status = 'status'
message = 'message'
exception_test = ValueError('Abort!')
resource_type = 'type'
operation_type = 'operation'
task.handle_error(
project, status, message, exception_test, project_id=None,
resource_type=resource_type, operation_type=operation_type)
class WhenUsingKeystoneEventConsumerProcessMethod(
database_utils.RepositoryTestCase,
InitializeDatabaseMixin):
"""Test only the process() method on KeystoneEventConsumer class.
For unit testing all but the process() method, use the
WhenUsingKeystoneEventConsumer class.
"""
def setUp(self):
super(WhenUsingKeystoneEventConsumerProcessMethod, self).setUp()
# Override the database start function as repositories.start() is
# already invoked by the RepositoryTestCase base class setUp().
# Similarly, override the clear function.
self.task = consumer.KeystoneEventConsumer(
db_start=mock.MagicMock(),
db_clear=mock.MagicMock()
)
def test_project_entities_cleanup_for_no_matching_barbican_project(self):
self._init_memory_db_setup()
result = self.task.process(project_id=self.project_id1,
resource_type='project',
operation_type='deleted')
self.assertIsNone(result, 'No return is expected as result')
def test_project_entities_cleanup_for_missing_barbican_project(self):
self._init_memory_db_setup()
result = self.task.process(project_id=None,
resource_type='project',
operation_type='deleted')
self.assertIsNone(result, 'No return is expected as result')
@mock.patch.object(consumer.KeystoneEventConsumer, 'handle_success')
def test_existing_project_entities_cleanup_for_plain_secret(
self, mock_handle_success):
self._init_memory_db_setup()
secret = self._create_secret_for_project(self.project1_data)
self.assertIsNotNone(secret)
secret_id = secret.id
project1_id = self.project1_data.id
secret_repo = rep.get_secret_repository()
db_secrets = secret_repo.get_project_entities(project1_id)
self.assertEqual(1, len(db_secrets))
self.assertEqual(secret.id, db_secrets[0].id)
# Get secret_store_metadata for related secret
self.assertGreater(len(db_secrets[0].secret_store_metadata), 0)
secret_metadata_id = list(db_secrets[0].
secret_store_metadata.values())[0].id
self.assertIsNotNone(secret_metadata_id)
# Get db entry for secret_store_metadata by id to make sure its
# presence before removing via delete project task
secret_meta_repo = rep.get_secret_meta_repository()
db_secret_store_meta = secret_meta_repo.get(
entity_id=secret_metadata_id)
self.assertIsNotNone(db_secret_store_meta)
kek_repo = rep.get_kek_datum_repository()
db_kek = kek_repo.get_project_entities(project1_id)
self.assertEqual(1, len(db_kek))
# task = consumer.KeystoneEventConsumer()
result = self.task.process(project_id=self.project_id1,
resource_type='project',
operation_type='deleted')
self.assertIsNone(result, 'No return is expected as result')
mock_handle_success.assert_has_calls([])
_, kwargs = mock_handle_success.call_args
self.assertEqual(self.project_id1, kwargs['project_id'])
self.assertEqual('project', kwargs['resource_type'])
self.assertEqual('deleted', kwargs['operation_type'])
# After project entities delete, make sure secret is not found
ex = self.assertRaises(exception.NotFound, secret_repo.get,
entity_id=secret_id,
external_project_id=self.project_id1)
self.assertIn(secret_id, str(ex))
# After project entities delete, make sure kek data is not found
entities = kek_repo.get_project_entities(project1_id)
self.assertEqual(0, len(entities))
project_repo = rep.get_project_repository()
db_project = project_repo.get_project_entities(project1_id)
self.assertEqual(0, len(db_project))
# Should have deleted SecretStoreMetadatum via children delete
self.assertRaises(exception.NotFound,
secret_meta_repo.get,
entity_id=secret_metadata_id)
@mock.patch.object(consumer.KeystoneEventConsumer, 'handle_error')
@mock.patch.object(rep.ProjectRepo, 'delete_project_entities',
side_effect=exception.BarbicanException)
def test_rollback_with_error_during_project_cleanup(self, mock_delete,
mock_handle_error):
self._init_memory_db_setup()
secret = self._create_secret_for_project(self.project1_data)
self.assertIsNotNone(secret)
secret_id = secret.id
project1_id = self.project1_data.id
secret_repo = rep.get_secret_repository()
db_secrets = secret_repo.get_project_entities(project1_id)
self.assertEqual(1, len(db_secrets))
self.assertEqual(secret.id, db_secrets[0].id)
kek_repo = rep.get_kek_datum_repository()
db_kek = kek_repo.get_project_entities(project1_id)
self.assertEqual(1, len(db_kek))
# Commit changes made so far before creating rollback scenario
rep.commit()
handle_error_mock = mock.MagicMock()
self.task.handler_error = handle_error_mock
self.assertRaises(exception.BarbicanException,
self.task.process, project_id=self.project_id1,
resource_type='project', operation_type='deleted')
mock_handle_error.assert_called_once_with(
self.project1_data,
500,
mock.ANY,
mock.ANY,
operation_type='deleted',
project_id=mock.ANY,
resource_type='project',
)
args, kwargs = mock_handle_error.call_args
self.assertEqual(500, args[1])
self.assertEqual(self.project_id1, kwargs['project_id'])
self.assertEqual('project', kwargs['resource_type'])
self.assertEqual('deleted', kwargs['operation_type'])
# Make sure entities are still present after rollback
db_secrets = secret_repo.get_project_entities(project1_id)
self.assertEqual(1, len(db_secrets))
self.assertEqual(secret_id, db_secrets[0].id)
db_kek = kek_repo.get_project_entities(project1_id)
self.assertEqual(1, len(db_kek))
project_repo = rep.get_project_repository()
db_project = project_repo.get_project_entities(project1_id)
self.assertEqual(1, len(db_project))
| 40.382789
| 79
| 0.67764
|
4a0cab360ca849a3b6562caa72fae083d0387dc7
| 2,313
|
py
|
Python
|
cryomem/tnminstruments/SR830.py
|
bebaek/cryomem
|
088fba2568d10451adda51a068c15c8c2a73d9ce
|
[
"MIT"
] | 1
|
2018-09-16T12:29:04.000Z
|
2018-09-16T12:29:04.000Z
|
cryomem/tnminstruments/SR830.py
|
bebaek/cryomem
|
088fba2568d10451adda51a068c15c8c2a73d9ce
|
[
"MIT"
] | null | null | null |
cryomem/tnminstruments/SR830.py
|
bebaek/cryomem
|
088fba2568d10451adda51a068c15c8c2a73d9ce
|
[
"MIT"
] | null | null | null |
from .base import Interface
class SR830(Interface):
"""SRS lock-in"""
def __init__(self, interface="gpib0"):
super().__init__(interface)
self.vsens = [2e-9, 5e-9, 10e-9, 2e-8, 5e-8, 10e-8, 2e-7, 5e-7, 10e-7,\
2e-6, 5e-6, 10e-6, 2e-5, 5e-5, 10e-5, 2e-4, 5e-4, 10e-4,\
2e-3, 5e-3, 10e-3, 2e-2, 5e-2, 10e-2, 2e-1, 5e-1, 10e-1]
def get_x(self):
self.write('OUTP? 1')
return float(self.read())
# r = sqrt(x2+y2) in V
def get_r(self):
self.write('OUTP? 3')
return float(self.read())
# needed in offset subtracted measurements
def get_offset(self):
self.write('SENS?') # get range
msg = self.read()
print(msg, flush=True)
idx = int(msg)
sens = self.vsens[idx]
self.write('OEXP? 3') # get R offset (%)
off, exp = tuple(map(float, self.read().split(',')))
return sens*off/100
# use get_r instead
def getdisp(self, ch):
self.write('OUTR? %d'%ch)
msg = self.read()
return float(msg)
# get r = sqrt(x**2 + y**2) calculated with offset and expand
def get_r_(self, ch):
disp = self.getdisp(ch) # get display
self.write('OEXP? 3') # get offset/expand
off, exp = tuple(map(float, self.read().split(',')))
return disp
def dacout(self, ch, voltage):
self.write('AUXV %d,%7.3f'%(ch,voltage)) # 1 mV resolution
print ('AUXV %d,%7.3f'%(ch,voltage), flush=True)
# conform with yaml configs
def set_auxvout(self, voltage, channel=-1):
v_actual = "%7.3f"%(voltage)
msg = 'AUXV %d,%s'%(channel, v_actual)
self.write(msg)
print(msg)
return v_actual
def get_auxvout(self, ch):
self.write('AUXV? %d'%ch)
msg = self.read()
return float(msg)
def dacoff(self, ch):
self.write('AUXV %d,%7.3f'%(ch,0)) # 1 mV resolution
def sinelvl(self, v):
if v < 0.004:
v = 0.004
print('Setting to lower limit, %5.3f'%v, flush=True)
msg = 'SLVL %7.3f'%v
self.write(msg)
print (msg, flush=True)
def freq(self, v):
msg = 'FREQ %9.4f'%v
self.write(msg)
print (msg, flush=True)
| 29.653846
| 79
| 0.525724
|
4a0cab5869563f2d42b64d60c002d206d686b726
| 766
|
py
|
Python
|
project/globalnews/news/models.py
|
sam-littlefield/GlobalNews
|
217d0c030a1b028da99117600d48751300a81e15
|
[
"MIT"
] | null | null | null |
project/globalnews/news/models.py
|
sam-littlefield/GlobalNews
|
217d0c030a1b028da99117600d48751300a81e15
|
[
"MIT"
] | null | null | null |
project/globalnews/news/models.py
|
sam-littlefield/GlobalNews
|
217d0c030a1b028da99117600d48751300a81e15
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.utils.timezone import now
class Article(models.Model):
sumbitter = models.ForeignKey(User,on_delete=models.CASCADE)
submitted_date = models.DateTimeField(default=now, editable=False)
title = models.CharField(max_length=200)
description = models.CharField(max_length=200)
content = models.CharField(max_length=2000)
latitude = models.DecimalField(max_digits=9, decimal_places=6)
longitude = models.DecimalField(max_digits=9, decimal_places=6)
def __str__(self):
return self.title
class Meta:
verbose_name = "Article"
verbose_name_plural = "Articles"
| 33.304348
| 70
| 0.738903
|
4a0cac1bd732d208609f04bf7af09c982965fa8b
| 5,485
|
py
|
Python
|
noxfile.py
|
PaoloTo/renault-api
|
66976aa23dee1b4790363a610193b990acd0a345
|
[
"MIT"
] | null | null | null |
noxfile.py
|
PaoloTo/renault-api
|
66976aa23dee1b4790363a610193b990acd0a345
|
[
"MIT"
] | null | null | null |
noxfile.py
|
PaoloTo/renault-api
|
66976aa23dee1b4790363a610193b990acd0a345
|
[
"MIT"
] | null | null | null |
"""Nox sessions."""
import shutil
import sys
from pathlib import Path
from textwrap import dedent
import nox
import nox_poetry.patch
from nox.sessions import Session
package = "renault_api"
python_versions = ["3.9", "3.8", "3.7"]
nox.options.sessions = (
"pre-commit",
"safety",
"mypy",
"tests",
"typeguard",
"xdoctest",
"docs-build",
)
def activate_virtualenv_in_precommit_hooks(session: Session) -> None:
"""Activate virtualenv in hooks installed by pre-commit.
This function patches git hooks installed by pre-commit to activate the
session's virtual environment. This allows pre-commit to locate hooks in
that environment when invoked from git.
Args:
session: The Session object.
"""
if session.bin is None:
return
virtualenv = session.env.get("VIRTUAL_ENV")
if virtualenv is None:
return
hookdir = Path(".git") / "hooks"
if not hookdir.is_dir():
return
for hook in hookdir.iterdir():
if hook.name.endswith(".sample") or not hook.is_file():
continue
text = hook.read_text()
bindir = repr(session.bin)[1:-1] # strip quotes
if not (
Path("A") == Path("a") and bindir.lower() in text.lower() or bindir in text
):
continue
lines = text.splitlines()
if not (lines[0].startswith("#!") and "python" in lines[0].lower()):
continue
header = dedent(
f"""\
import os
os.environ["VIRTUAL_ENV"] = {virtualenv!r}
os.environ["PATH"] = os.pathsep.join((
{session.bin!r},
os.environ.get("PATH", ""),
))
"""
)
lines.insert(1, header)
hook.write_text("\n".join(lines))
@nox.session(name="pre-commit", python="3.9")
def precommit(session: Session) -> None:
"""Lint using pre-commit."""
args = session.posargs or ["run", "--all-files", "--show-diff-on-failure"]
session.install(
"black",
"darglint",
"flake8",
"flake8-bandit",
"flake8-bugbear",
"flake8-docstrings",
"flake8-rst-docstrings",
"pep8-naming",
"pre-commit",
"pre-commit-hooks",
"reorder-python-imports",
)
session.run("pre-commit", *args)
if args and args[0] == "install":
activate_virtualenv_in_precommit_hooks(session)
@nox.session(python="3.9")
def safety(session: Session) -> None:
"""Scan dependencies for insecure packages."""
requirements = nox_poetry.export_requirements(session)
session.install("safety")
session.run("safety", "check", f"--file={requirements}", "--bare")
@nox.session(python=python_versions)
def mypy(session: Session) -> None:
"""Type-check using mypy."""
args = session.posargs or ["src", "tests", "docs/conf.py"]
session.install(".")
session.install("mypy", "pytest")
session.run("mypy", *args)
if not session.posargs:
session.run("mypy", f"--python-executable={sys.executable}", "noxfile.py")
@nox.session(python=python_versions)
def tests(session: Session) -> None:
"""Run the test suite."""
session.install(".[cli]")
session.install(
"coverage[toml]", "pytest", "pygments", "pytest-asyncio", "aioresponses"
)
try:
session.run("coverage", "run", "--parallel", "-m", "pytest", *session.posargs)
finally:
if session.interactive:
session.notify("coverage")
@nox.session
def coverage(session: Session) -> None:
"""Produce the coverage report."""
# Do not use session.posargs unless this is the only session.
has_args = session.posargs and len(session._runner.manifest) == 1
args = session.posargs if has_args else ["report"]
session.install("coverage[toml]")
if not has_args and any(Path().glob(".coverage.*")):
session.run("coverage", "combine")
session.run("coverage", *args)
@nox.session(python=python_versions)
def typeguard(session: Session) -> None:
"""Runtime type checking using Typeguard."""
session.install(".[cli]")
session.install("pytest", "typeguard", "pygments", "pytest-asyncio", "aioresponses")
session.run("pytest", f"--typeguard-packages={package}", *session.posargs)
@nox.session(python=python_versions)
def xdoctest(session: Session) -> None:
"""Run examples with xdoctest."""
args = session.posargs or ["all"]
session.install(".")
session.install("xdoctest[colors]")
session.run("python", "-m", "xdoctest", package, *args)
@nox.session(name="docs-build", python="3.8")
def docs_build(session: Session) -> None:
"""Build the documentation."""
args = session.posargs or ["docs", "docs/_build"]
session.install(".[cli]")
session.install("sphinx", "sphinx-click", "sphinx-rtd-theme")
build_dir = Path("docs", "_build")
if build_dir.exists():
shutil.rmtree(build_dir)
session.run("sphinx-build", *args)
@nox.session(python="3.8")
def docs(session: Session) -> None:
"""Build and serve the documentation with live reloading on file changes."""
args = session.posargs or ["--open-browser", "docs", "docs/_build"]
session.install(".")
session.install("sphinx", "sphinx-autobuild", "sphinx-click", "sphinx-rtd-theme")
build_dir = Path("docs", "_build")
if build_dir.exists():
shutil.rmtree(build_dir)
session.run("sphinx-autobuild", *args)
| 29.021164
| 88
| 0.618778
|
4a0cac5f3095a1a455c007ea83a16cec1848333f
| 131
|
py
|
Python
|
twitchBot/bot/cogs/__init__.py
|
IPLSplatoon/Radia-Productions
|
e7d27f3dd686557cee95340be56cee81a0d02c2f
|
[
"MIT"
] | null | null | null |
twitchBot/bot/cogs/__init__.py
|
IPLSplatoon/Radia-Productions
|
e7d27f3dd686557cee95340be56cee81a0d02c2f
|
[
"MIT"
] | 1
|
2021-07-17T11:40:46.000Z
|
2021-07-17T13:58:24.000Z
|
twitchBot/bot/cogs/__init__.py
|
IPLSplatoon/Radia-Productions
|
e7d27f3dd686557cee95340be56cee81a0d02c2f
|
[
"MIT"
] | 1
|
2021-05-19T21:26:17.000Z
|
2021-05-19T21:26:17.000Z
|
from .commentators import CommentatorsCog
from .information import InformationCog
names = [
"commentators",
"information"
]
| 16.375
| 41
| 0.763359
|
4a0cac9c73e3210e6fe321e3a2b4869ca8970f0c
| 1,022
|
py
|
Python
|
samples/lipanampesa.py
|
CleoMugs/mpesa
|
970b8fecf9b1b963495587c671328261c356acaf
|
[
"MIT"
] | null | null | null |
samples/lipanampesa.py
|
CleoMugs/mpesa
|
970b8fecf9b1b963495587c671328261c356acaf
|
[
"MIT"
] | null | null | null |
samples/lipanampesa.py
|
CleoMugs/mpesa
|
970b8fecf9b1b963495587c671328261c356acaf
|
[
"MIT"
] | null | null | null |
import requests
from requests.auth import HTTPBasicAuth
from access_token import generate_access_token
from encode import generate_password
from utils import get_timestamp
import keys
def lipa_na_mpesa():
formatted_time = get_timestamp()
decoded_password = generate_password(formatted_time)
access_token = generate_access_token()
api_url = "https://sandbox.safaricom.co.ke/mpesa/stkpush/v1/processrequest"
headers = { "Authorization": "Bearer %s" % access_token }
request = {
"BusinessShortCode": keys.business_shortCode,
"Password": decoded_password,
"Timestamp": formatted_time,
"TransactionType": "CustomerPayBillOnline",
"Amount": "1",
"PartyA": keys.phone_number,
"PartyB": keys.business_shortCode,
"PhoneNumber": keys.phone_number,
"CallBackURL": "https://fullstackdjango.com/lipanampesa/",
"AccountReference": "12345678",
"TransactionDesc": "Pay School Fees"
}
response = requests.post(api_url, json = request, headers=headers)
print (response.text)
lipa_na_mpesa()
| 27.621622
| 76
| 0.760274
|
4a0cad8dc0052f9f6466d36ee82a21a76aef1ccc
| 1,174
|
py
|
Python
|
examples/grad_descent.py
|
seba-1511/randopt
|
74cefcc734c6a38418151025b0a4d8b6cb41eb14
|
[
"Apache-2.0"
] | 115
|
2016-11-21T06:44:19.000Z
|
2022-01-21T22:21:27.000Z
|
examples/grad_descent.py
|
seba-1511/randopt
|
74cefcc734c6a38418151025b0a4d8b6cb41eb14
|
[
"Apache-2.0"
] | 26
|
2016-11-21T07:31:37.000Z
|
2019-01-16T14:13:23.000Z
|
examples/grad_descent.py
|
seba-1511/randopt
|
74cefcc734c6a38418151025b0a4d8b6cb41eb14
|
[
"Apache-2.0"
] | 9
|
2018-04-02T19:54:20.000Z
|
2020-02-11T09:12:41.000Z
|
#!/usr/bin/env python
import randopt as ro
def loss(x):
return x**2 + 2*x - 3
def dloss(x):
return 2*x + 2
def grad_descent(f, df, init, num_epochs, lr):
params = init
convergence = []
for epoch in range(num_epochs):
params = params - (lr * df(params))
convergence.append(f(params))
# Return final result + convergence array
return f(params), convergence
if __name__ == '__main__':
init = 10.0
num_runs = 100
exp = ro.Experiment('grad_descent', {
'learning_rate': ro.Gaussian(mean=0.01, std=0.01),
'num_epochs': ro.Truncated(
ro.Gaussian(mean=50, std=10, dtype='int'),
low=10,
high=100)
})
# Run the experiment a couple of time
for _ in range(num_runs):
exp.sample_all_params()
result, convergence = grad_descent(loss, dloss, init, exp.num_epochs,
exp.learning_rate)
exp.add_result(result, data={
'convergence': convergence
})
opt = exp.minimum()
print('Optimal result: ', opt.result, ', with convergence: ', opt.params['convergence'])
| 25.521739
| 92
| 0.572402
|
4a0caf4f27ac19245b4a216d2a6e8579b6b80c0d
| 1,982
|
py
|
Python
|
conf.py
|
phuang1024/tutorials
|
092f999431aa6ba5148c2a2f95ea26e913f3f7bb
|
[
"CC0-1.0"
] | 1
|
2021-11-23T19:58:37.000Z
|
2021-11-23T19:58:37.000Z
|
conf.py
|
phuang1024/tutorials
|
092f999431aa6ba5148c2a2f95ea26e913f3f7bb
|
[
"CC0-1.0"
] | null | null | null |
conf.py
|
phuang1024/tutorials
|
092f999431aa6ba5148c2a2f95ea26e913f3f7bb
|
[
"CC0-1.0"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Tutorials'
copyright = '2021, Patrick Huang'
author = 'Patrick Huang'
# The full version, including alpha/beta/rc tags
release = '0.0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
import sphinx_rtd_theme
extensions = [
"sphinx_rtd_theme"
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 34.77193
| 79
| 0.668012
|
4a0cb0d97b69ec6757f5bdb7495cae00c014408e
| 17,484
|
py
|
Python
|
msgraph-cli-extensions/v1_0/identitydirmgt_v1_0/azext_identitydirmgt_v1_0/vendored_sdks/identitydirmgt/operations/_directory_roles_directory_role_operations.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/v1_0/identitydirmgt_v1_0/azext_identitydirmgt_v1_0/vendored_sdks/identitydirmgt/operations/_directory_roles_directory_role_operations.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | 22
|
2022-03-29T22:54:37.000Z
|
2022-03-29T22:55:27.000Z
|
msgraph-cli-extensions/v1_0/identitydirmgt_v1_0/azext_identitydirmgt_v1_0/vendored_sdks/identitydirmgt/operations/_directory_roles_directory_role_operations.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DirectoryRolesDirectoryRoleOperations(object):
"""DirectoryRolesDirectoryRoleOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~identity_directory_management.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_directory_role(
self,
orderby=None, # type: Optional[List[Union[str, "models.Enum54"]]]
select=None, # type: Optional[List[Union[str, "models.Enum55"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum56"]]]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.CollectionOfDirectoryRole"]
"""Get entities from directoryRoles.
Get entities from directoryRoles.
:param orderby: Order items by property values.
:type orderby: list[str or ~identity_directory_management.models.Enum54]
:param select: Select properties to be returned.
:type select: list[str or ~identity_directory_management.models.Enum55]
:param expand: Expand related entities.
:type expand: list[str or ~identity_directory_management.models.Enum56]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CollectionOfDirectoryRole or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~identity_directory_management.models.CollectionOfDirectoryRole]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CollectionOfDirectoryRole"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_directory_role.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if self._config.top is not None:
query_parameters['$top'] = self._serialize.query("self._config.top", self._config.top, 'int', minimum=0)
if self._config.skip is not None:
query_parameters['$skip'] = self._serialize.query("self._config.skip", self._config.skip, 'int', minimum=0)
if self._config.search is not None:
query_parameters['$search'] = self._serialize.query("self._config.search", self._config.search, 'str')
if self._config.filter is not None:
query_parameters['$filter'] = self._serialize.query("self._config.filter", self._config.filter, 'str')
if self._config.count is not None:
query_parameters['$count'] = self._serialize.query("self._config.count", self._config.count, 'bool')
if orderby is not None:
query_parameters['$orderby'] = self._serialize.query("orderby", orderby, '[str]', div=',')
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CollectionOfDirectoryRole', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.odata_next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.OdataError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_directory_role.metadata = {'url': '/directoryRoles'} # type: ignore
def create_directory_role(
self,
body, # type: "models.MicrosoftGraphDirectoryRole"
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphDirectoryRole"
"""Add new entity to directoryRoles.
Add new entity to directoryRoles.
:param body: New entity.
:type body: ~identity_directory_management.models.MicrosoftGraphDirectoryRole
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphDirectoryRole, or the result of cls(response)
:rtype: ~identity_directory_management.models.MicrosoftGraphDirectoryRole
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphDirectoryRole"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_directory_role.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphDirectoryRole')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphDirectoryRole', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_directory_role.metadata = {'url': '/directoryRoles'} # type: ignore
def get_directory_role(
self,
directory_role_id, # type: str
select=None, # type: Optional[List[Union[str, "models.Enum57"]]]
expand=None, # type: Optional[List[Union[str, "models.Enum58"]]]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphDirectoryRole"
"""Get entity from directoryRoles by key.
Get entity from directoryRoles by key.
:param directory_role_id: key: id of directoryRole.
:type directory_role_id: str
:param select: Select properties to be returned.
:type select: list[str or ~identity_directory_management.models.Enum57]
:param expand: Expand related entities.
:type expand: list[str or ~identity_directory_management.models.Enum58]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphDirectoryRole, or the result of cls(response)
:rtype: ~identity_directory_management.models.MicrosoftGraphDirectoryRole
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphDirectoryRole"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_directory_role.metadata['url'] # type: ignore
path_format_arguments = {
'directoryRole-id': self._serialize.url("directory_role_id", directory_role_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if select is not None:
query_parameters['$select'] = self._serialize.query("select", select, '[str]', div=',')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, '[str]', div=',')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphDirectoryRole', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_directory_role.metadata = {'url': '/directoryRoles/{directoryRole-id}'} # type: ignore
def update_directory_role(
self,
directory_role_id, # type: str
body, # type: "models.MicrosoftGraphDirectoryRole"
**kwargs # type: Any
):
# type: (...) -> None
"""Update entity in directoryRoles.
Update entity in directoryRoles.
:param directory_role_id: key: id of directoryRole.
:type directory_role_id: str
:param body: New property values.
:type body: ~identity_directory_management.models.MicrosoftGraphDirectoryRole
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_directory_role.metadata['url'] # type: ignore
path_format_arguments = {
'directoryRole-id': self._serialize.url("directory_role_id", directory_role_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MicrosoftGraphDirectoryRole')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update_directory_role.metadata = {'url': '/directoryRoles/{directoryRole-id}'} # type: ignore
def delete_directory_role(
self,
directory_role_id, # type: str
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Delete entity from directoryRoles.
Delete entity from directoryRoles.
:param directory_role_id: key: id of directoryRole.
:type directory_role_id: str
:param if_match: ETag.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_directory_role.metadata['url'] # type: ignore
path_format_arguments = {
'directoryRole-id': self._serialize.url("directory_role_id", directory_role_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_directory_role.metadata = {'url': '/directoryRoles/{directoryRole-id}'} # type: ignore
| 46.748663
| 133
| 0.655857
|
4a0cb18d69e023ff658e8c0808e1c0ddeb395755
| 1,408
|
py
|
Python
|
plotly/validators/heatmap/colorbar/_tickfont.py
|
fcollonval/plotly.py
|
5c7f100db1af8c82bb740a38ef684955a8ed6d0e
|
[
"MIT"
] | 2
|
2020-03-24T11:41:14.000Z
|
2021-01-14T07:59:43.000Z
|
plotly/validators/heatmap/colorbar/_tickfont.py
|
fcollonval/plotly.py
|
5c7f100db1af8c82bb740a38ef684955a8ed6d0e
|
[
"MIT"
] | null | null | null |
plotly/validators/heatmap/colorbar/_tickfont.py
|
fcollonval/plotly.py
|
5c7f100db1af8c82bb740a38ef684955a8ed6d0e
|
[
"MIT"
] | 4
|
2019-06-03T14:49:12.000Z
|
2022-01-06T01:05:12.000Z
|
import _plotly_utils.basevalidators
class TickfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='tickfont', parent_name='heatmap.colorbar', **kwargs
):
super(TickfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Tickfont',
data_docs="""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
**kwargs
)
| 38.054054
| 78
| 0.569602
|
4a0cb1ce3eadfbfc3895c39d26a96fd19d20f0f6
| 6,990
|
py
|
Python
|
tools/build/v2/tools/package.py
|
jmuskaan72/Boost
|
047e36c01841a8cd6a5c74d4e3034da46e327bc1
|
[
"BSL-1.0"
] | 198
|
2015-01-13T05:47:18.000Z
|
2022-03-09T04:46:46.000Z
|
libs/boost/tools/build/src/tools/package.py
|
flingone/frameworks_base_cmds_remoted
|
4509d9f0468137ed7fd8d100179160d167e7d943
|
[
"Apache-2.0"
] | 9
|
2015-01-28T16:33:19.000Z
|
2020-04-12T23:03:28.000Z
|
libs/boost/tools/build/src/tools/package.py
|
flingone/frameworks_base_cmds_remoted
|
4509d9f0468137ed7fd8d100179160d167e7d943
|
[
"Apache-2.0"
] | 139
|
2015-01-15T20:09:31.000Z
|
2022-01-31T15:21:16.000Z
|
# Status: ported
# Base revision: 64488
#
# Copyright (c) 2005, 2010 Vladimir Prus.
# Copyright 2006 Rene Rivera.
#
# Use, modification and distribution is subject to the Boost Software
# License Version 1.0. (See accompanying file LICENSE_1_0.txt or
# http://www.boost.org/LICENSE_1_0.txt)
# Provides mechanism for installing whole packages into a specific directory
# structure. This is opposed to the 'install' rule, that installs a number of
# targets to a single directory, and does not care about directory structure at
# all.
# Example usage:
#
# package.install boost : <properties>
# : <binaries>
# : <libraries>
# : <headers>
# ;
#
# This will install binaries, libraries and headers to the 'proper' location,
# given by command line options --prefix, --exec-prefix, --bindir, --libdir and
# --includedir.
#
# The rule is just a convenient wrapper, avoiding the need to define several
# 'install' targets.
#
# The only install-related feature is <install-source-root>. It will apply to
# headers only and if present, paths of headers relatively to source root will
# be retained after installing. If it is not specified, then "." is assumed, so
# relative paths in headers are always preserved.
import b2.build.feature as feature
import b2.build.property as property
import b2.util.option as option
import b2.tools.stage as stage
from b2.build.alias import alias
from b2.manager import get_manager
from b2.util import bjam_signature
from b2.util.utility import ungrist
import os
feature.feature("install-default-prefix", [], ["free", "incidental"])
@bjam_signature((["name", "package_name", "?"], ["requirements", "*"],
["binaries", "*"], ["libraries", "*"], ["headers", "*"]))
def install(name, package_name=None, requirements=[], binaries=[], libraries=[], headers=[]):
requirements = requirements[:]
binaries = binaries[:]
libraries
if not package_name:
package_name = name
if option.get("prefix"):
# If --prefix is explicitly specified on the command line,
# then we need wipe away any settings of libdir/includir that
# is specified via options in config files.
option.set("bindir", None)
option.set("libdir", None)
option.set("includedir", None)
# If <install-source-root> is not specified, all headers are installed to
# prefix/include, no matter what their relative path is. Sometimes that is
# what is needed.
install_source_root = property.select('install-source-root', requirements)
if install_source_root:
requirements = property.change(requirements, 'install-source-root', None)
install_header_subdir = property.select('install-header-subdir', requirements)
if install_header_subdir:
install_header_subdir = ungrist(install_header_subdir[0])
requirements = property.change(requirements, 'install-header-subdir', None)
# First, figure out all locations. Use the default if no prefix option
# given.
prefix = get_prefix(name, requirements)
# Architecture dependent files.
exec_locate = option.get("exec-prefix", prefix)
# Binaries.
bin_locate = option.get("bindir", os.path.join(prefix, "bin"))
# Object code libraries.
lib_locate = option.get("libdir", os.path.join(prefix, "lib"))
# Source header files.
include_locate = option.get("includedir", os.path.join(prefix, "include"))
stage.install(name + "-bin", binaries, requirements + ["<location>" + bin_locate])
alias(name + "-lib", [name + "-lib-shared", name + "-lib-static"])
# Since the install location of shared libraries differs on universe
# and cygwin, use target alternatives to make different targets.
# We should have used indirection conditioanl requirements, but it's
# awkward to pass bin-locate and lib-locate from there to another rule.
alias(name + "-lib-shared", [name + "-lib-shared-universe"])
alias(name + "-lib-shared", [name + "-lib-shared-cygwin"], ["<target-os>cygwin"])
# For shared libraries, we install both explicitly specified one and the
# shared libraries that the installed executables depend on.
stage.install(name + "-lib-shared-universe", binaries + libraries,
requirements + ["<location>" + lib_locate, "<install-dependencies>on",
"<install-type>SHARED_LIB"])
stage.install(name + "-lib-shared-cygwin", binaries + libraries,
requirements + ["<location>" + bin_locate, "<install-dependencies>on",
"<install-type>SHARED_LIB"])
# For static libraries, we do not care about executable dependencies, since
# static libraries are already incorporated into them.
stage.install(name + "-lib-static", libraries, requirements +
["<location>" + lib_locate, "<install-dependencies>on", "<install-type>STATIC_LIB"])
stage.install(name + "-headers", headers, requirements \
+ ["<location>" + os.path.join(include_locate, s) for s in install_header_subdir]
+ install_source_root)
alias(name, [name + "-bin", name + "-lib", name + "-headers"])
pt = get_manager().projects().current()
for subname in ["bin", "lib", "headers", "lib-shared", "lib-static", "lib-shared-universe", "lib-shared-cygwin"]:
pt.mark_targets_as_explicit([name + "-" + subname])
@bjam_signature((["target_name"], ["package_name"], ["data", "*"], ["requirements", "*"]))
def install_data(target_name, package_name, data, requirements):
if not package_name:
package_name = target_name
if option.get("prefix"):
# If --prefix is explicitly specified on the command line,
# then we need wipe away any settings of datarootdir
option.set("datarootdir", None)
prefix = get_prefix(package_name, requirements)
datadir = option.get("datarootdir", os.path.join(prefix, "share"))
stage.install(target_name, data,
requirements + ["<location>" + os.path.join(datadir, package_name)])
get_manager().projects().current().mark_targets_as_explicit([target_name])
def get_prefix(package_name, requirements):
specified = property.select("install-default-prefix", requirements)
if specified:
specified = ungrist(specified[0])
prefix = option.get("prefix", specified)
requirements = property.change(requirements, "install-default-prefix", None)
# Or some likely defaults if neither is given.
if not prefix:
if os.name == "nt":
prefix = "C:\\" + package_name
elif os.name == "posix":
prefix = "/usr/local"
return prefix
| 41.360947
| 118
| 0.646066
|
4a0cb1cfb325d70f415e502d5873a1b89f13a3ce
| 26,797
|
py
|
Python
|
tensorflow_text/python/ops/pointer_ops.py
|
unclepeddy/text
|
71e07fd898b8ceae26fd765b251d615ab06b2306
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_text/python/ops/pointer_ops.py
|
unclepeddy/text
|
71e07fd898b8ceae26fd765b251d615ab06b2306
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_text/python/ops/pointer_ops.py
|
unclepeddy/text
|
71e07fd898b8ceae26fd765b251d615ab06b2306
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 TF.Text Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ops that consume or generate index-based pointers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_gather_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_where_op
from tensorflow.python.ops.ragged import segment_id_ops
def gather_with_default(params, indices, default, name=None, axis=0):
"""Gather slices with `indices=-1` mapped to `default`.
This operation is similar to `tf.gather()`, except that any value of `-1`
in `indices` will be mapped to `default`. Example:
```python
>>> gather_with_default(['a', 'b', 'c', 'd'], [2, 0, -1, 2, -1], '_').eval()
array(['c', 'a', '_', 'c', '_'], dtype=object)
```
Args:
params: The `Tensor` from which to gather values. Must be at least rank
`axis + 1`.
indices: The index `Tensor`. Must have dtype `int32` or `int64`, and values
must be in the range `[-1, params.shape[axis])`.
default: The value to use when `indices` is `-1`. `default.shape` must
be equal to `params.shape[axis + 1:]`.
name: A name for the operation (optional).
axis: The axis in `params` to gather `indices` from. Must be a scalar
`int32` or `int64`. Supports negative indices.
Returns:
A `Tensor` with the same type as `param`, and with shape
`params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.
"""
# This implementation basically just concatenates the default value and
# the params together, and then uses gather(default_plus_params, indices + 1)
# to get the appropriate values. Most of the complexity below has to do
# with properly handling cases where axis != 0, in which case we need to tile
# the default before concatenating it.
with ops.name_scope(name, 'GatherWithDefault',
[params, indices, default, axis]):
# Convert inputs to tensors.
indices = ops.convert_to_tensor(
indices, name='indices', preferred_dtype=dtypes.int32)
params = ops.convert_to_tensor(params, name='params')
default = ops.convert_to_tensor(default, name='default', dtype=params.dtype)
if axis == 0:
tiled_default = array_ops.stack([default])
else:
# Get ranks & shapes of inputs.
params_rank = array_ops.rank(params)
params_shape = array_ops.shape(params)
default_shape = array_ops.shape(default)
outer_params_shape = params_shape[:axis]
# This will equal `axis` if axis>=0.
outer_params_rank = array_ops.shape(outer_params_shape)[0]
# Add dimensions (with size=1) to default, so its rank matches params.
new_shape = array_ops.concat([
array_ops.ones([outer_params_rank + 1], dtypes.int32), default_shape
],
axis=0)
reshaped_default = array_ops.reshape(default, new_shape)
# Tile the default for any dimension dim<axis, so its size matches params.
multiples = array_ops.concat([
outer_params_shape,
array_ops.ones(params_rank - outer_params_rank, dtypes.int32)
],
axis=0)
tiled_default = array_ops.tile(reshaped_default, multiples)
# Prepend the default value to params (on the chosen axis). Thus, the
# default value is at index 0, and all other values have their index
# incremented by one.
default_plus_params = array_ops.concat([tiled_default, params], axis=axis)
return array_ops.gather(default_plus_params, indices + 1, axis=axis)
def span_overlaps(source_start,
source_limit,
target_start,
target_limit,
contains=False,
contained_by=False,
partial_overlap=False,
name=None):
"""Returns a boolean tensor indicating which source and target spans overlap.
The source and target spans are specified using B+1 dimensional tensors,
with `B>=0` batch dimensions followed by a final dimension that lists the
span offsets for each span in the batch:
* The `i`th source span in batch `b1...bB` starts at
`source_start[b1...bB, i]` (inclusive), and extends to just before
`source_limit[b1...bB, i]` (exclusive).
* The `j`th target span in batch `b1...bB` starts at
`target_start[b1...bB, j]` (inclusive), and extends to just before
`target_limit[b1...bB, j]` (exclusive).
`result[b1...bB, i, j]` is true if the `i`th source span overlaps with the
`j`th target span in batch `b1...bB`, where a source span overlaps a target
span if any of the following are true:
* The spans are identical.
* `contains` is true, and the source span contains the target span.
* `contained_by` is true, and the source span is contained by the target
span.
* `partial_overlap` is true, and there is a non-zero overlap between the
source span and the target span.
Args:
source_start: A B+1 dimensional potentially ragged tensor with shape
`[D1...DB, source_size]`: the start offset of each source span.
source_limit: A B+1 dimensional potentially ragged tensor with shape
`[D1...DB, source_size]`: the limit offset of each source span.
target_start: A B+1 dimensional potentially ragged tensor with shape
`[D1...DB, target_size]`: the start offset of each target span.
target_limit: A B+1 dimensional potentially ragged tensor with shape
`[D1...DB, target_size]`: the limit offset of each target span.
contains: If true, then a source span is considered to overlap a target span
when the source span contains the target span.
contained_by: If true, then a source span is considered to overlap a target
span when the source span is contained by the target span.
partial_overlap: If true, then a source span is considered to overlap a
target span when the source span partially overlaps the target span.
name: A name for the operation (optional).
Returns:
A B+2 dimensional potentially ragged boolean tensor with shape
`[D1...DB, source_size, target_size]`.
Raises:
ValueError: If the span tensors are incompatible.
#### Example:
Given the following source and target spans (with no batch dimensions):
```python
# 0 5 10 15 20 25 30 35 40
# |====|====|====|====|====|====|====|====|
# Source: [-0-] [-1-] [2] [-3-][-4-][-5-]
# Target: [-0-][-1-] [-2-] [3] [-4-][-5-]
# |====|====|====|====|====|====|====|====|
>>> source_start = [0, 10, 16, 20, 25, 30]
>>> source_limit = [5, 15, 19, 25, 30, 35]
>>> target_start = [0, 5, 15, 21, 27, 31]
>>> target_limit = [5, 10, 20, 24, 32, 37]
```
`result[i, j]` will be true at the following locations:
* `[0, 0]` (always)
* `[2, 2]` (if contained_by=True or partial_overlaps=True)
* `[3, 3]` (if contains=True or partial_overlaps=True)
* `[4, 4]` (if partial_overlaps=True)
* `[5, 5]` (if partial_overlaps=True)
"""
_check_type(contains, 'contains', bool)
_check_type(contained_by, 'contained_by', bool)
_check_type(partial_overlap, 'partial_overlap', bool)
scope_tensors = [source_start, source_limit, target_start, target_limit]
with ops.name_scope(name, 'SpanOverlaps', scope_tensors):
# Convert input tensors.
source_start = ragged_tensor.convert_to_tensor_or_ragged_tensor(
source_start, name='source_start')
source_limit = ragged_tensor.convert_to_tensor_or_ragged_tensor(
source_limit, name='source_limit')
target_start = ragged_tensor.convert_to_tensor_or_ragged_tensor(
target_start, name='target_start')
target_limit = ragged_tensor.convert_to_tensor_or_ragged_tensor(
target_limit, name='target_limit')
span_tensors = [source_start, source_limit, target_start, target_limit]
# Verify input tensor shapes and types.
source_start.shape.assert_is_compatible_with(source_limit.shape)
target_start.shape.assert_is_compatible_with(target_limit.shape)
source_start.shape.assert_same_rank(target_start.shape)
source_start.shape.assert_same_rank(target_limit.shape)
source_limit.shape.assert_same_rank(target_start.shape)
source_limit.shape.assert_same_rank(target_limit.shape)
if not (source_start.dtype == target_start.dtype == source_limit.dtype ==
target_limit.dtype):
raise TypeError('source_start, source_limit, target_start, and '
'target_limit must all have the same dtype')
ndims = set(
[t.shape.ndims for t in span_tensors if t.shape.ndims is not None])
assert len(ndims) <= 1 # because of assert_same_rank statements above.
if all(not isinstance(t, ragged_tensor.RaggedTensor) for t in span_tensors):
return _span_overlaps(source_start, source_limit, target_start,
target_limit, contains, contained_by,
partial_overlap)
elif all(isinstance(t, ragged_tensor.RaggedTensor) for t in span_tensors):
if not ndims:
raise ValueError('For ragged inputs, the shape.ndims of at least one '
'span tensor must be statically known.')
if list(ndims)[0] == 2:
return _span_overlaps(source_start, source_limit, target_start,
target_limit, contains, contained_by,
partial_overlap)
else:
# Handle ragged batch dimension by recursion on values.
row_splits = span_tensors[0].row_splits
shape_checks = [
check_ops.assert_equal(
t.row_splits,
row_splits,
message='Mismatched ragged shapes for batch dimensions')
for t in span_tensors[1:]
]
with ops.control_dependencies(shape_checks):
return ragged_tensor.RaggedTensor.from_row_splits(
span_overlaps(source_start.values, source_limit.values,
target_start.values, target_limit.values, contains,
contained_by, partial_overlap), row_splits)
else:
# Mix of dense and ragged tensors.
raise ValueError('Span tensors must all have the same ragged_rank')
def _span_overlaps(source_start, source_limit, target_start, target_limit,
contains, contained_by, partial_overlap):
"""Implementation of span_overlaps().
If the inputs are ragged, then the source tensors must have exactly one
batch dimension. (I.e., `B=1` in the param descriptions below.)
Args:
source_start: `<int>[D1...DB, source_size]`
source_limit: `<int>[D1...DB, source_size]`
target_start: `<int>[D1...DB, target_size]`
target_limit: `<int>[D1...DB, target_size]`
contains: `bool`
contained_by: `bool`
partial_overlap: `bool`
Returns:
`<bool>[D1...DB, source_size, target_size]`
"""
if isinstance(source_start, ops.Tensor):
# Reshape the source tensors to [D1...DB, source_size, 1] and the
# target tensors to [D1...DB, 1, target_size], so we can use broadcasting.
# In particular, elementwise_op(source_x, target_x) will have shape
# [D1...DB, source_size, target_size].
source_start = array_ops.expand_dims(source_start, -1)
source_limit = array_ops.expand_dims(source_limit, -1)
target_start = array_ops.expand_dims(target_start, -2)
target_limit = array_ops.expand_dims(target_limit, -2)
equal = math_ops.equal
less_equal = math_ops.less_equal
less = math_ops.less
logical_and = math_ops.logical_and
logical_or = math_ops.logical_or
else:
# Broadcast the source span indices to all have shape
# [batch_size, (source_size), (target_size)].
(source_start, source_limit) = _broadcast_ragged_sources_for_overlap(
source_start, source_limit, target_start.row_splits)
(target_start, target_limit) = _broadcast_ragged_targets_for_overlap(
target_start, target_limit, source_start.row_splits)
# Use map_flat_values to perform elementwise operations.
equal = functools.partial(ragged_functional_ops.map_flat_values,
math_ops.equal)
less_equal = functools.partial(ragged_functional_ops.map_flat_values,
math_ops.less_equal)
less = functools.partial(ragged_functional_ops.map_flat_values,
math_ops.less)
logical_and = functools.partial(ragged_functional_ops.map_flat_values,
math_ops.logical_and)
logical_or = functools.partial(ragged_functional_ops.map_flat_values,
math_ops.logical_or)
if partial_overlap:
return logical_or(
logical_and(
less_equal(source_start, target_start),
less(target_start, source_limit)),
logical_and(
less_equal(target_start, source_start),
less(source_start, target_limit)))
elif contains and contained_by:
return logical_or(
logical_and(
less_equal(source_start, target_start),
less_equal(target_limit, source_limit)),
logical_and(
less_equal(target_start, source_start),
less_equal(source_limit, target_limit)))
elif contains:
return logical_and(
less_equal(source_start, target_start),
less_equal(target_limit, source_limit))
elif contained_by:
return logical_and(
less_equal(target_start, source_start),
less_equal(source_limit, target_limit))
else:
return logical_and(
equal(target_start, source_start), equal(source_limit, target_limit))
def _broadcast_ragged_targets_for_overlap(target_start, target_limit,
source_splits):
"""Repeats target indices for each source item in the same batch.
Args:
target_start: `<int>[batch_size, (target_size)]`
target_limit: `<int>[batch_size, (target_size)]`
source_splits: `<int64>[batch_size, (source_size+1)]`
Returns:
`<int>[batch_size, (source_size), (target_size)]`.
A tuple of ragged tensors `(tiled_target_start, tiled_target_limit)` where:
* `tiled_target_start[b, s, t] = target_start[b, t]`
* `tiled_target_limit[b, s, t] = target_limit[b, t]`
"""
source_batch_ids = segment_id_ops.row_splits_to_segment_ids(source_splits)
target_start = ragged_tensor.RaggedTensor.from_value_rowids(
ragged_gather_ops.gather(target_start, source_batch_ids),
source_batch_ids)
target_limit = ragged_tensor.RaggedTensor.from_value_rowids(
ragged_gather_ops.gather(target_limit, source_batch_ids),
source_batch_ids)
return (target_start, target_limit)
def _broadcast_ragged_sources_for_overlap(source_start, source_limit,
target_splits):
"""Repeats source indices for each target item in the same batch.
Args:
source_start: `<int>[batch_size, (source_size)]`
source_limit: `<int>[batch_size, (source_size)]`
target_splits: `<int64>[batch_size, (target_size+1)]`
Returns:
`<int>[batch_size, (source_size), (target_size)]`.
A tuple of tensors `(tiled_source_start, tiled_source_limit)` where:
* `tiled_target_start[b, s, t] = source_start[b, s]`
* `tiled_target_limit[b, s, t] = source_limit[b, s]`
"""
source_splits = source_start.row_splits
target_rowlens = target_splits[1:] - target_splits[:-1]
source_batch_ids = segment_id_ops.row_splits_to_segment_ids(source_splits)
# <int64>[sum(source_size[b] for b in range(batch_size))]
# source_repeats[i] is the number of target spans in the batch that contains
# source span i. We need to add a new ragged dimension that repeats each
# source span this number of times.
source_repeats = ragged_gather_ops.gather(target_rowlens, source_batch_ids)
# <int64>[sum(source_size[b] for b in range(batch_size)) + 1]
# The row_splits tensor for the inner ragged dimension of the result tensors.
inner_splits = array_ops.concat([[0], math_ops.cumsum(source_repeats)],
axis=0)
# <int64>[sum(source_size[b] * target_size[b] for b in range(batch_size))]
# Indices for gathering source indices.
source_indices = segment_id_ops.row_splits_to_segment_ids(inner_splits)
source_start = ragged_tensor.RaggedTensor.from_nested_row_splits(
array_ops.gather(source_start.values, source_indices),
[source_splits, inner_splits])
source_limit = ragged_tensor.RaggedTensor.from_nested_row_splits(
array_ops.gather(source_limit.values, source_indices),
[source_splits, inner_splits])
return source_start, source_limit
def span_alignment(source_start,
source_limit,
target_start,
target_limit,
contains=False,
contained_by=False,
partial_overlap=False,
multivalent_result=False,
name=None):
"""Return an alignment from a set of source spans to a set of target spans.
The source and target spans are specified using B+1 dimensional tensors,
with `B>=0` batch dimensions followed by a final dimension that lists the
span offsets for each span in the batch:
* The `i`th source span in batch `b1...bB` starts at
`source_start[b1...bB, i]` (inclusive), and extends to just before
`source_limit[b1...bB, i]` (exclusive).
* The `j`th target span in batch `b1...bB` starts at
`target_start[b1...bB, j]` (inclusive), and extends to just before
`target_limit[b1...bB, j]` (exclusive).
`result[b1...bB, i]` contains the index (or indices) of the target span that
overlaps with the `i`th source span in batch `b1...bB`. The
`multivalent_result` parameter indicates whether the result should contain
a single span that aligns with the source span, or all spans that align with
the source span.
* If `multivalent_result` is false (the default), then `result[b1...bB, i]=j`
indicates that the `j`th target span overlaps with the `i`th source span
in batch `b1...bB`. If no target spans overlap with the `i`th target span,
then `result[b1...bB, i]=-1`.
* If `multivalent_result` is true, then `result[b1...bB, i, n]=j` indicates
that the `j`th target span is the `n`th span that overlaps with the `i`th
source span in in batch `b1...bB`.
For a definition of span overlap, see the docstring for `span_overlaps()`.
Args:
source_start: A B+1 dimensional potentially ragged tensor with shape
`[D1...DB, source_size]`: the start offset of each source span.
source_limit: A B+1 dimensional potentially ragged tensor with shape
`[D1...DB, source_size]`: the limit offset of each source span.
target_start: A B+1 dimensional potentially ragged tensor with shape
`[D1...DB, target_size]`: the start offset of each target span.
target_limit: A B+1 dimensional potentially ragged tensor with shape
`[D1...DB, target_size]`: the limit offset of each target span.
contains: If true, then a source span is considered to overlap a target span
when the source span contains the target span.
contained_by: If true, then a source span is considered to overlap a target
span when the source span is contained by the target span.
partial_overlap: If true, then a source span is considered to overlap a
target span when the source span partially overlaps the target span.
multivalent_result: Whether the result should contain a single target span
index (if `multivalent_result=False`) or a list of target span indices (if
`multivalent_result=True`) for each source span.
name: A name for the operation (optional).
Returns:
An int64 tensor with values in the range: `-1 <= result < target_size`.
If `multivalent_result=False`, then the returned tensor has shape
`[source_size]`, where `source_size` is the length of the `source_start`
and `source_limit` input tensors. If `multivalent_result=True`, then the
returned tensor has shape `[source_size, (num_aligned_target_spans)].
#### Examples:
Given the following source and target spans (with no batch dimensions):
```python
>>> # 0 5 10 15 20 25 30 35 40 45 50 55 60
>>> # |====|====|====|====|====|====|====|====|====|====|====|====|
>>> # Source: [-0-] [-1-] [2] [3] [4][-5-][-6-][-7-][-8-][-9-]
>>> # Target: [-0-][-1-] [-2-][-3-][-4-] [5] [6] [7] [-8-][-9-][10]
>>> # |====|====|====|====|====|====|====|====|====|====|====|====|
>>> source_start=[0, 10, 16, 20, 27, 30, 35, 40, 45, 50]
>>> source_limit=[5, 15, 19, 23, 30, 35, 40, 45, 50, 55]
>>> target_start=[0, 5, 15, 20, 25, 31, 35, 42, 47, 52, 57]
>>> target_limit=[5, 10, 20, 25, 30, 34, 38, 45, 52, 57, 61]
>>> span_alignment_lists(source_starts, source_limits,
target_starts, target_limits)
[0, -1, -1, -1, -1, -1, -1, -1, -1, -1]
>>> span_alignment_lists(source_starts, source_limits,
... target_starts, target_limits,
... multivalent_result=True)
[[0], [], [], [], [], [], [], [], [], []]
>>> span_alignment_lists(source_starts, source_limits,
... target_starts, target_limits,
... contains=True)
[ 0, -1, -1, -1, -1, 5, 6, 7, -1, -1]
>>> span_alignment_lists(source_starts, source_limits,
... target_starts, target_limits,
... partial_overlap=True,
... multivalent_result=True)
[[0], [], [2], [3], [4], [5], [6], [7], [8], [8, 9]]
"""
scope_tensors = [source_start, source_limit, target_start, target_limit]
with ops.name_scope(name, 'SpanAlignment', scope_tensors):
source_start = ragged_tensor.convert_to_tensor_or_ragged_tensor(
source_start, name='source_start')
source_limit = ragged_tensor.convert_to_tensor_or_ragged_tensor(
source_limit, name='source_limit')
target_start = ragged_tensor.convert_to_tensor_or_ragged_tensor(
target_start, name='target_start')
target_limit = ragged_tensor.convert_to_tensor_or_ragged_tensor(
target_limit, name='target_limit')
# <bool>[D1...DB, source_size, target_size]
# overlaps[b1...bB, i, j] is true if source span i overlaps target span j
# (in batch b1...bB).
overlaps = span_overlaps(source_start, source_limit, target_start,
target_limit, contains, contained_by,
partial_overlap)
# <int64>[D1...DB, source_size, (num_aligned_spans)]
# alignment[b1...bB, i, n]=j if target span j is the n'th target span
# that aligns with source span i (in batch b1...bB).
alignment = _multivalent_span_alignment(overlaps)
if not multivalent_result:
# <int64>[D1...DB, source_size]
# alignment[b1...bB, i]=j if target span j is the last target span
# that aligns with source span i, or -1 if no target spans align.
alignment = ragged_functional_ops.map_flat_values(
math_ops.maximum, ragged_math_ops.reduce_max(alignment, axis=-1), -1)
return alignment
def _multivalent_span_alignment(overlaps):
"""Returns the multivalent span alignment for a given overlaps tensor.
Args:
overlaps: `<int64>[D1...DB, source_size, target_size]`: `overlaps[b1...bB,
i, j]` is true if source span `i` overlaps target span `j` (in batch
`b1...bB`).
Returns:
`<int64>[D1...DB, source_size, (num_aligned_spans)]`:
`result[b1...bB, i, n]=j` if target span `j` is the `n`'th target span
that aligns with source span `i` (in batch `b1...bB`).
"""
overlaps_ndims = overlaps.shape.ndims
assert overlaps_ndims is not None # guaranteed/checked by span_overlaps()
assert overlaps_ndims >= 2
# If there are multiple batch dimensions, then flatten them and recurse.
if overlaps_ndims > 3:
if not isinstance(overlaps, ragged_tensor.RaggedTensor):
overlaps = ragged_tensor.RaggedTensor.from_tensor(
overlaps, ragged_rank=overlaps.shape.ndims - 3)
return overlaps.with_values(_multivalent_span_alignment(overlaps.values))
elif overlaps_ndims == 2: # no batch dimension
assert not isinstance(overlaps, ragged_tensor.RaggedTensor)
overlap_positions = array_ops.where(overlaps)
return ragged_tensor.RaggedTensor.from_value_rowids(
values=overlap_positions[:, 1],
value_rowids=overlap_positions[:, 0],
nrows=array_ops.shape(overlaps, out_type=dtypes.int64)[0])
else: # batch dimension
if not isinstance(overlaps, ragged_tensor.RaggedTensor):
overlaps = ragged_tensor.RaggedTensor.from_tensor(overlaps, ragged_rank=1)
overlap_positions = ragged_where_op.where(overlaps.values)
if isinstance(overlaps.values, ragged_tensor.RaggedTensor):
overlaps_values_nrows = overlaps.values.nrows()
else:
overlaps_values_nrows = array_ops.shape(overlaps.values,
out_type=dtypes.int64)[0]
return overlaps.with_values(
ragged_tensor.RaggedTensor.from_value_rowids(
values=overlap_positions[:, 1],
value_rowids=overlap_positions[:, 0],
nrows=overlaps_values_nrows))
def _check_type(value, name, expected_type):
"""Raises TypeError if not isinstance(value, expected_type)."""
if not isinstance(value, expected_type):
raise TypeError('%s must be %s, not %s' % (name, expected_type.__name__,
type(value).__name__))
| 44.811037
| 80
| 0.667314
|
4a0cb208484377c0c963386c28c7c984bb200852
| 513
|
py
|
Python
|
server/djangoapp/admin.py
|
inzi95/agfzb-CloudAppDevelopment_Capstone
|
615f1021258f232c1cd4f1d28c571b77a401e32e
|
[
"Apache-2.0"
] | null | null | null |
server/djangoapp/admin.py
|
inzi95/agfzb-CloudAppDevelopment_Capstone
|
615f1021258f232c1cd4f1d28c571b77a401e32e
|
[
"Apache-2.0"
] | null | null | null |
server/djangoapp/admin.py
|
inzi95/agfzb-CloudAppDevelopment_Capstone
|
615f1021258f232c1cd4f1d28c571b77a401e32e
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import CarMake, CarModel
# Register your models here.
# CarModelInline class
class CarModelInline(admin.StackedInline):
model = CarModel
extra = 2
# CarModelAdmin class
class CarModelAdmin(admin.ModelAdmin):
list_filter = ['dealership_id']
model = CarModel
class CarMakeAdmin(admin.ModelAdmin):
inlines = [CarModelInline]
# Register models here
admin.site.register(CarMake, CarMakeAdmin)
admin.site.register(CarModel, CarModelAdmin)
| 19
| 44
| 0.758285
|
4a0cb283699b1fade50ac2d6677e0e2385a92861
| 5,050
|
py
|
Python
|
py/abd/abdcmd_arcyd.py
|
aevri/phabricator-tools
|
ef7501bcaee83e98d168d16f64b3f73e744d3336
|
[
"Apache-2.0"
] | 150
|
2015-01-21T15:52:22.000Z
|
2021-11-09T05:53:36.000Z
|
py/abd/abdcmd_arcyd.py
|
aevri/phabricator-tools
|
ef7501bcaee83e98d168d16f64b3f73e744d3336
|
[
"Apache-2.0"
] | 72
|
2015-05-08T04:33:08.000Z
|
2017-01-27T09:37:36.000Z
|
py/abd/abdcmd_arcyd.py
|
aevri/phabricator-tools
|
ef7501bcaee83e98d168d16f64b3f73e744d3336
|
[
"Apache-2.0"
] | 38
|
2015-01-30T10:33:47.000Z
|
2021-11-09T05:53:30.000Z
|
"""Arcyd - daemon to watch git repos, create and land reviews automatically.
Intended to make it easy for large teams to start using Differential without
individual contributors needing to install and configure Arcanist.
Individual contributors are still free to use Arcanist if they wish, Arcyd
provides a zero-config layer over Git to get them started.
Arcyd does the following:
- watches for specially named branches and automatically creates revisions
- automatically updates revisions when the branch changes
- automatically lands revisions when they are approved
minimal user workflow:
$ git checkout feature/mywork
~ commit some work on the branch ~
$ git push origin feature/mywork:arcyd-review/mywork/master
.. Arcyd see's the 'arcyd-review' branch and creates a review ..
.. Reviewer accepts the change ..
.. Arcyd squashes the 'arcyd-review' branch onto master and deletes it ..
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdcmd_arcyd
#
# Public Functions:
# main
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import phlsys_subcommand
import abdcmd_addphabricator
import abdcmd_addrepo
import abdcmd_addrepohost
import abdcmd_fetch
import abdcmd_fsck
import abdcmd_init
import abdcmd_listrepos
import abdcmd_reload
import abdcmd_restart
import abdcmd_rmrepo
import abdcmd_start
import abdcmd_stop
_USAGE_EXAMPLES = """
usage example:
To setup arcyd using the example accounts baked into the 'phabricator-tools'
vagrant/puppet installation. (see ./README)
$ mkdir arcyd
$ cd arcyd
$ arcyd init --arcyd-email arcyd@localhost
$ arcyd add-phabricator \\
--name local \\
--instance-uri http://127.0.0.1/api/ \\
--review-url-format 'http://127.0.0.1/D{review}' \\
--admin-emails 'local-phab-admin@localhost' \\
--arcyd-user phab \\
--arcyd-cert \\
xnh5tpatpfh4pff4tpnvdv74mh74zkmsualo4l6mx7bb262zqr55vcachxgz7ru3lrvafgzqu\
zl3geyjxw426ujcyqdi2t4ktiv7gmrtlnc3hsy2eqsmhvgifn2vah2uidj6u6hhhxo2j3y2w6lcseh\
s2le4msd5xsn4f333udwvj6aowokq5l2llvfsl3efcucraawtvzw462q2sxmryg5y5rpicdk3lyr3u\
vot7fxrotwpi3ty2b2sa2kvlpf
$ arcyd add-repohost \\
--name local_repos \\
--repo-url-format '/path/to/repos/{}'
$ arcyd add-repo \\
--name local_1 \\
--repo-url local_1 \\
--repo-desc local_1 \\
--phabricator-name local \\
--repohost-name local_repos \\
--admin-emails 'local-repo1-admin@localhost'
$ arcyd start
run each command with the '--help' option for more information, e.g.:
$ arcyd init --help
"""
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__,
epilog=_USAGE_EXAMPLES)
subparsers = parser.add_subparsers()
phlsys_subcommand.setup_parser(
"init", abdcmd_init, subparsers)
phlsys_subcommand.setup_parser(
"list-repos", abdcmd_listrepos, subparsers)
phlsys_subcommand.setup_parser(
"add-phabricator", abdcmd_addphabricator, subparsers)
phlsys_subcommand.setup_parser(
"add-repohost", abdcmd_addrepohost, subparsers)
phlsys_subcommand.setup_parser(
"add-repo", abdcmd_addrepo, subparsers)
phlsys_subcommand.setup_parser(
"rm-repo", abdcmd_rmrepo, subparsers)
phlsys_subcommand.setup_parser(
"start", abdcmd_start, subparsers)
phlsys_subcommand.setup_parser(
"stop", abdcmd_stop, subparsers)
phlsys_subcommand.setup_parser(
"restart", abdcmd_restart, subparsers)
phlsys_subcommand.setup_parser(
"reload", abdcmd_reload, subparsers)
phlsys_subcommand.setup_parser(
"fsck", abdcmd_fsck, subparsers)
phlsys_subcommand.setup_parser(
"fetch", abdcmd_fetch, subparsers)
args = parser.parse_args()
return args.func(args)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| 33.443709
| 79
| 0.665941
|
4a0cb2cb804b15dea33615a20817b48051bd9972
| 159
|
py
|
Python
|
tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_Lag1Trend_Seasonal_Hour_LSTM.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_Lag1Trend_Seasonal_Hour_LSTM.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/model_control/detailed/transf_BoxCox/model_control_one_enabled_BoxCox_Lag1Trend_Seasonal_Hour_LSTM.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['Lag1Trend'] , ['Seasonal_Hour'] , ['LSTM'] );
| 39.75
| 81
| 0.748428
|
4a0cb46e582566edae9f8806328df8ab47b4a519
| 14,599
|
py
|
Python
|
nnunet/network_architecture/custom_modules/conv_block_for_dense_residual_decoder.py
|
hasukmin12/nnUNet_MDD_UNet_with_Semi_Supervised
|
58c5665a5d89d1ad77038e5d6420be76fadab136
|
[
"Apache-2.0"
] | 3
|
2022-03-07T07:59:14.000Z
|
2022-03-17T08:50:42.000Z
|
nnunet/network_architecture/custom_modules/conv_block_for_dense_residual_decoder.py
|
hasukmin12/nnUNet_MDD_UNet_with_Semi_Supervised
|
58c5665a5d89d1ad77038e5d6420be76fadab136
|
[
"Apache-2.0"
] | 1
|
2022-03-07T13:21:42.000Z
|
2022-03-07T13:21:42.000Z
|
nnunet/network_architecture/custom_modules/conv_block_for_dense_residual_decoder.py
|
hasukmin12/nnUNet_MDD_UNet_with_Semi_Supervised
|
58c5665a5d89d1ad77038e5d6420be76fadab136
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from copy import deepcopy
from nnunet.network_architecture.custom_modules.helperModules import Identity
from torch import nn
class ConvDropoutNormReLU(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, network_props):
"""
if network_props['dropout_op'] is None then no dropout
if network_props['norm_op'] is None then no norm
:param input_channels:
:param output_channels:
:param kernel_size:
:param network_props:
"""
super(ConvDropoutNormReLU, self).__init__()
network_props = deepcopy(network_props) # network_props is a dict and mutable, so we deepcopy to be safe.
self.conv = network_props['conv_op'](input_channels, output_channels, kernel_size,
padding=[(i - 1) // 2 for i in kernel_size],
**network_props['conv_op_kwargs'])
# maybe dropout
if network_props['dropout_op'] is not None:
self.do = network_props['dropout_op'](**network_props['dropout_op_kwargs'])
else:
self.do = Identity()
if network_props['norm_op'] is not None:
self.norm = network_props['norm_op'](output_channels, **network_props['norm_op_kwargs'])
else:
self.norm = Identity()
self.nonlin = network_props['nonlin'](**network_props['nonlin_kwargs'])
self.all = nn.Sequential(self.conv, self.do, self.norm, self.nonlin)
def forward(self, x):
return self.all(x)
class StackedConvLayers(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, network_props, num_convs, first_stride=None):
"""
if network_props['dropout_op'] is None then no dropout
if network_props['norm_op'] is None then no norm
:param input_channels:
:param output_channels:
:param kernel_size:
:param network_props:
"""
super(StackedConvLayers, self).__init__()
network_props = deepcopy(network_props) # network_props is a dict and mutable, so we deepcopy to be safe.
network_props_first = deepcopy(network_props)
if first_stride is not None:
network_props_first['conv_op_kwargs']['stride'] = first_stride
self.convs = nn.Sequential(
ConvDropoutNormReLU(input_channels, output_channels, kernel_size, network_props_first),
*[ConvDropoutNormReLU(output_channels, output_channels, kernel_size, network_props) for _ in
range(num_convs - 1)]
)
def forward(self, x):
return self.convs(x)
class Attention_block(nn.Module):
def __init__(self, F_x, F_y,F_z, F_int):
super(Attention_block, self).__init__()
self.W_x = nn.Sequential(
nn.Conv3d(F_x, F_int, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm3d(F_int)
)
self.W_y = nn.Sequential(
nn.Conv3d(F_y, F_int, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm3d(F_int)
)
self.W_z = nn.Sequential(
nn.Conv3d(F_z, F_int, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm3d(F_int)
)
self.psi = nn.Sequential(
nn.Conv3d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True),
nn.BatchNorm3d(1),
# nn.Sigmoid()
nn.LeakyReLU(negative_slope=0.01, inplace=True)
)
self.relu = nn.LeakyReLU(negative_slope=0.01, inplace=True)
def forward(self, g, x, y, z):
x1 = self.W_x(x)
y1 = self.W_y(y)
z1 = self.W_z(z)
sum = x1 + y1+ z1
# sum = sum * 1.5
psi = self.relu(sum)
psi = self.psi(psi)
return g * psi
# sigmoid를 LeakyReLU로도 바꿔보고
# sum = 1.5로도 바꿔보면서 학습해보자
class DenseDownBlock_first(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, props, stride=None):
"""
This is the conv bn nonlin conv bn nonlin kind of block
:param in_planes:
:param out_planes:
:param props:
:param override_stride:
"""
super().__init__()
# if props['dropout_op_kwargs'] is None and props['dropout_op_kwargs'] > 0:
# raise NotImplementedError("ResidualBottleneckBlock does not yet support dropout!")
self.kernel_size = kernel_size
props['conv_op_kwargs']['stride'] = 1
self.stride = stride
self.props = props
self.out_planes = out_planes
self.in_planes = in_planes
self.bottleneck_planes = out_planes // 4
if stride is not None:
kwargs_conv1 = deepcopy(props['conv_op_kwargs'])
kwargs_conv1['stride'] = (1,1,1)
else:
kwargs_conv1 = props['conv_op_kwargs']
self.pool_op = nn.MaxPool3d(2,stride=2)
# small version
self.conv1 = props['conv_op'](in_planes, in_planes, kernel_size,
padding=[(i - 1) // 2 for i in kernel_size],
**kwargs_conv1)
self.norm1 = props['norm_op'](in_planes, **props['norm_op_kwargs'])
self.nonlin1 = props['nonlin'](**props['nonlin_kwargs'])
self.conv2 = props['conv_op'](in_planes * 2, in_planes, kernel_size,
padding=[(i - 1) // 2 for i in kernel_size],
**props['conv_op_kwargs'])
self.norm2 = props['norm_op'](in_planes, **props['norm_op_kwargs'])
self.nonlin2 = props['nonlin'](**props['nonlin_kwargs'])
def forward(self, x):
# small version
residual_1 = x # 32
out_1 = self.nonlin1(self.norm1(self.conv1(x))) # 32
residual_2 = out_1
concat_1 = torch.cat((out_1, residual_1), dim=1) # 32 * 2
residual_out = self.nonlin2(self.norm2(self.conv2(concat_1))) # 32 * 2
concat_2 = torch.cat((concat_1, residual_1), dim=1)
out = self.pool_op(residual_out)
return out , residual_out, residual_1, concat_1, concat_2
# 여기서 DenseBlock 구현
class DenseDownBlock_2(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, props, stride=None):
"""
This is the conv bn nonlin conv bn nonlin kind of block
:param in_planes:
:param out_planes:
:param props:
:param override_stride:
"""
super().__init__()
# if props['dropout_op_kwargs'] is None and props['dropout_op_kwargs'] > 0:
# raise NotImplementedError("ResidualBottleneckBlock does not yet support dropout!")
self.kernel_size = kernel_size
props['conv_op_kwargs']['stride'] = 1
self.stride = stride
self.props = props
self.out_planes = out_planes
self.in_planes = in_planes
self.bottleneck_planes = out_planes // 4
if stride is not None:
kwargs_conv1 = deepcopy(props['conv_op_kwargs'])
kwargs_conv1['stride'] = (1,1,1)
else:
kwargs_conv1 = props['conv_op_kwargs']
# maxpooling 구현
self.pool_op = nn.MaxPool3d(2,stride=2)
self.conv1 = props['conv_op'](in_planes, in_planes, kernel_size,
padding=[(i - 1) // 2 for i in kernel_size],
**kwargs_conv1)
self.norm1 = props['norm_op'](in_planes, **props['norm_op_kwargs'])
self.nonlin1 = props['nonlin'](**props['nonlin_kwargs'])
self.conv2 = props['conv_op'](in_planes * 2, in_planes*2, kernel_size,
padding=[(i - 1) // 2 for i in kernel_size],
**props['conv_op_kwargs'])
self.norm2 = props['norm_op'](in_planes*2, **props['norm_op_kwargs'])
self.nonlin2 = props['nonlin'](**props['nonlin_kwargs'])
self.conv3 = props['conv_op'](in_planes * 4, in_planes * 2, [1 for _ in kernel_size],
padding=[0 for i in kernel_size],
**props['conv_op_kwargs'])
self.norm3 = props['norm_op'](in_planes * 2, **props['norm_op_kwargs'])
self.nonlin3 = props['nonlin'](**props['nonlin_kwargs'])
def forward(self, x):
residual_1 = x # 32
out_1 = self.nonlin1(self.norm1(self.conv1(x))) # 32
residual_2 = out_1
concat_1 = torch.cat((out_1, residual_1), dim=1) # 32 * 2
out = self.nonlin2(self.norm2(self.conv2(concat_1))) # 32 * 2
concat_2 = torch.cat((out, residual_1), dim=1) # 32*2 + 32*1 = 32 * 3
concat_2 = torch.cat((concat_2,residual_2), dim = 1) # 32*3 + 32* = 32 * 4
residual_out = self.nonlin3(self.norm3(self.conv3(concat_2)))
out = self.pool_op(residual_out)
return out ,residual_out, residual_1, concat_1, concat_2
class DenseDownLayer_2(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, network_props, num_blocks, first_stride=None, block=DenseDownBlock_2):
super().__init__()
network_props = deepcopy(network_props) # network_props is a dict and mutable, so we deepcopy to be safe.
self.convs = nn.Sequential(
block(input_channels, output_channels, kernel_size, network_props, first_stride),
*[block(output_channels, output_channels, kernel_size, network_props) for _ in
range(num_blocks - 1)]
)
def forward(self, x):
return self.convs(x)
class DenseDownLayer_first(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, network_props, num_blocks, first_stride=None, block=DenseDownBlock_first):
super().__init__()
network_props = deepcopy(network_props) # network_props is a dict and mutable, so we deepcopy to be safe.
self.convs = nn.Sequential(
block(input_channels, output_channels, kernel_size, network_props, first_stride),
*[block(output_channels, output_channels, kernel_size, network_props) for _ in
range(num_blocks - 1)]
)
def forward(self, x):
return self.convs(x)
# 여기는 Dense_Up_Block 구현하기
class ResidualUpBlock(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, props, stride=None):
"""
This is the conv bn nonlin conv bn nonlin kind of block
:param in_planes:
:param out_planes:
:param props:
:param override_stride:
"""
super().__init__()
# if props['dropout_op_kwargs'] is None and props['dropout_op_kwargs'] > 0:
# raise NotImplementedError("ResidualBottleneckBlock does not yet support dropout!")
self.kernel_size = kernel_size
props['conv_op_kwargs']['stride'] = 1
self.stride = stride
self.props = props
self.out_planes = out_planes
self.in_planes = in_planes
# self.bottleneck_planes = out_planes // 4
if stride is not None:
kwargs_conv1 = deepcopy(props['conv_op_kwargs'])
kwargs_conv1['stride'] = (1,1,1)
else:
kwargs_conv1 = props['conv_op_kwargs']
# small version
aim_planes = in_planes // 2 # 256
self.conv0 = props['conv_op'](in_planes, aim_planes, [1 for _ in kernel_size],
padding=[0 for i in kernel_size],
**props['conv_op_kwargs'])
self.norm0 = props['norm_op'](aim_planes, **props['norm_op_kwargs'])
self.nonlin0 = props['nonlin'](**props['nonlin_kwargs'])
self.conv1 = props['conv_op'](aim_planes, aim_planes, kernel_size, padding=[(i - 1) // 2 for i in kernel_size],
**kwargs_conv1)
self.norm1 = props['norm_op'](aim_planes, **props['norm_op_kwargs'])
self.nonlin1 = props['nonlin'](**props['nonlin_kwargs'])
self.conv2 = props['conv_op'](aim_planes, aim_planes, kernel_size,
padding=[(i - 1) // 2 for i in kernel_size],
**props['conv_op_kwargs'])
self.norm2 = props['norm_op'](aim_planes, **props['norm_op_kwargs'])
self.nonlin2 = props['nonlin'](**props['nonlin_kwargs'])
self.conv3 = props['conv_op'](aim_planes, aim_planes, [1 for _ in kernel_size],
padding=[0 for i in kernel_size],
**props['conv_op_kwargs'])
self.norm3 = props['norm_op'](aim_planes, **props['norm_op_kwargs'])
self.nonlin3 = props['nonlin'](**props['nonlin_kwargs'])
def forward(self, x):
x = self.nonlin0(self.norm0(self.conv0(x))) # 512
residual_1 = x # 256
x = self.nonlin1(self.norm1(self.conv1(x))) # 256
x = x + residual_1
residual_2 = x
x = self.nonlin2(self.norm2(self.conv2(x))) # 256
out = x + residual_2
# out = self.norm3(self.conv3(x))
# out = self.nonlin3(out)
return out
class ResidualUpLayer(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, network_props, num_blocks, first_stride=None, block=ResidualUpBlock):
super().__init__()
network_props = deepcopy(network_props) # network_props is a dict and mutable, so we deepcopy to be safe.
self.convs = nn.Sequential(
block(input_channels, output_channels, kernel_size, network_props, first_stride),
*[block(output_channels, output_channels, kernel_size, network_props) for _ in
range(num_blocks - 1)]
)
def forward(self, x):
return self.convs(x)
| 32.156388
| 143
| 0.601069
|
4a0cb488d7b2f2a70ead84a2ff831d5c1c232921
| 144
|
py
|
Python
|
mayiutils/algorithm/algorithmset/__init__.py
|
mayi140611/mayiutils
|
5340d7bd4590e2a41afd5d02ffc569745d67c866
|
[
"Apache-2.0"
] | null | null | null |
mayiutils/algorithm/algorithmset/__init__.py
|
mayi140611/mayiutils
|
5340d7bd4590e2a41afd5d02ffc569745d67c866
|
[
"Apache-2.0"
] | null | null | null |
mayiutils/algorithm/algorithmset/__init__.py
|
mayi140611/mayiutils
|
5340d7bd4590e2a41afd5d02ffc569745d67c866
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# encoding: utf-8
"""
@author: Ian
@contact:yongguiluo@hotmail.com
@file: __init__.py.py
@time: 2019/3/5 15:19
牛逼的算法汇总
"""
| 11.076923
| 31
| 0.673611
|
4a0cb4907ac2b9fc8982109a0bbbd790a3cbab25
| 3,530
|
py
|
Python
|
vb_simulation_pkgs/pkg_vb_sim/scripts/node_merge_tf_tress.py
|
1arshan/Eyantra_Virgi-bot
|
30ebe99fec6a0d4767fe94468b21bc00091bc527
|
[
"MIT"
] | 1
|
2021-09-09T04:41:28.000Z
|
2021-09-09T04:41:28.000Z
|
vb_simulation_pkgs/pkg_vb_sim/scripts/node_merge_tf_tress.py
|
1arshan/Eyantra_Virgi-bot
|
30ebe99fec6a0d4767fe94468b21bc00091bc527
|
[
"MIT"
] | null | null | null |
vb_simulation_pkgs/pkg_vb_sim/scripts/node_merge_tf_tress.py
|
1arshan/Eyantra_Virgi-bot
|
30ebe99fec6a0d4767fe94468b21bc00091bc527
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2.7
import rospy
# Because of transformations
import tf_conversions
import tf2_ros
import geometry_msgs.msg
import turtlesim.msg
def handle_turtle_pose(msg, turtlename):
br = tf2_ros.TransformBroadcaster()
t = geometry_msgs.msg.TransformStamped()
if __name__ == '__main__':
rospy.init_node('tf2_broadcaster_tree_merge')
br = tf2_ros.TransformBroadcaster()
t = geometry_msgs.msg.TransformStamped()
rospy.loginfo("TF Broadcaster Started.")
# t.header.stamp = rospy.Time.now()
# t.header.frame_id = "world"
# t.child_frame_id = "ur5_2_tf/world"
# t.transform.translation.x = 0.0
# t.transform.translation.y = 0.0
# t.transform.translation.z = 0.0
# q = tf_conversions.transformations.quaternion_from_euler(0, 0, 0)
# t.transform.rotation.x = q[0]
# t.transform.rotation.y = q[1]
# t.transform.rotation.z = q[2]
# t.transform.rotation.w = q[3]
# br.sendTransform(t)
# t.header.stamp = rospy.Time.now()
# t.header.frame_id = "world"
# t.child_frame_id = "ur5_1_tf/world"
# t.transform.translation.x = 0.0
# t.transform.translation.y = 7.0
# t.transform.translation.z = 0.0
# q = tf_conversions.transformations.quaternion_from_euler(0, 0, 0)
# t.transform.rotation.x = q[0]
# t.transform.rotation.y = q[1]
# t.transform.rotation.z = q[2]
# t.transform.rotation.w = q[3]
# br.sendTransform(t)
while not rospy.is_shutdown():
t.header.stamp = rospy.Time.now()
t.header.frame_id = "world"
t.child_frame_id = "ur5_2_tf/world"
t.transform.translation.x = 0.0
t.transform.translation.y = 0.0
t.transform.translation.z = 0.0
q = tf_conversions.transformations.quaternion_from_euler(0, 0, 0)
t.transform.rotation.x = q[0]
t.transform.rotation.y = q[1]
t.transform.rotation.z = q[2]
t.transform.rotation.w = q[3]
br.sendTransform(t)
t.header.stamp = rospy.Time.now()
t.header.frame_id = "world"
t.child_frame_id = "ur5_1_tf/world"
t.transform.translation.x = 0.0
t.transform.translation.y = 7.0
t.transform.translation.z = 0.0
q = tf_conversions.transformations.quaternion_from_euler(0, 0, 0)
t.transform.rotation.x = q[0]
t.transform.rotation.y = q[1]
t.transform.rotation.z = q[2]
t.transform.rotation.w = q[3]
br.sendTransform(t)
# t.header.stamp = rospy.Time.now()
# t.header.frame_id = "world"
# t.child_frame_id = "ur5_2_frame"
# t.transform.translation.x = 0.0
# t.transform.translation.y = 0.0
# t.transform.translation.z = 0.0
# q = tf_conversions.transformations.quaternion_from_euler(0, 0, 0)
# t.transform.rotation.x = q[0]
# t.transform.rotation.y = q[1]
# t.transform.rotation.z = q[2]
# t.transform.rotation.w = q[3]
# br.sendTransform(t)
# t.header.stamp = rospy.Time.now()
# t.header.frame_id = "world"
# t.child_frame_id = "ur5_1_frame"
# t.transform.translation.x = 0.0
# t.transform.translation.y = 7.0
# t.transform.translation.z = 0.0
# q = tf_conversions.transformations.quaternion_from_euler(0, 0, 0)
# t.transform.rotation.x = q[0]
# t.transform.rotation.y = q[1]
# t.transform.rotation.z = q[2]
# t.transform.rotation.w = q[3]
# br.sendTransform(t)
rospy.spin()
| 31.238938
| 75
| 0.620397
|
4a0cb4d5c533fbe6bb88d248380cc65cd5e8d49f
| 2,306
|
py
|
Python
|
04-network/utils.py
|
rionbr/meionav
|
3e0cca6bea206023ea3a3b322c5bfdd2081e9842
|
[
"MIT"
] | 1
|
2019-08-15T10:50:38.000Z
|
2019-08-15T10:50:38.000Z
|
04-network/utils.py
|
rionbr/spermnet
|
3e0cca6bea206023ea3a3b322c5bfdd2081e9842
|
[
"MIT"
] | null | null | null |
04-network/utils.py
|
rionbr/spermnet
|
3e0cca6bea206023ea3a3b322c5bfdd2081e9842
|
[
"MIT"
] | null | null | null |
import os
import gzip
from io import StringIO
import pandas as pd
import networkx as nx
from collections import defaultdict, Counter
def transpose_variable_across_layers(G, variable, combination='sum'):
""" Method to transpose results in one layer to another. Note duplicate results are either summed or set to majority."""
dict_i_values = {i: d[variable] for i, d in G.nodes(data=True) if d.get(variable, None) is not None}
dict_j_values = defaultdict(list)
for i, v in dict_i_values.items():
cross_edges = [j for _i, j, d in G.edges(i, data=True) if d.get('type', None) == 'cross']
for j in cross_edges:
dict_j_values[j].append(v)
# Combine multiple values
if combination == 'sum':
dict_j_values = {k: sum(l) for k, l in dict_j_values.items()}
elif combination == 'majority':
dict_j_values = {k: Counter(l).most_common()[0][0] for k, l in dict_j_values.items()}
else:
TypeError("Combination must be either 'sum', or 'majority'.")
# Set attributes to network
nx.set_node_attributes(G, values=dict_j_values, name=variable)
return G
def get_network_layer(G, layer=''):
return G.subgraph([n for n, d in G.nodes(data=True) if (d.get('layer') == layer)]).copy()
def get_network_by_attribute(G, attribute='', value=''):
return G.subgraph([n for n, d in G.nodes(data=True) if (d.get(attribute) == value)]).copy()
def get_network_largest_connected_component(G):
largest_cc = max(nx.connected_components(G), key=len)
return G.subgraph(largest_cc).copy()
def open_undefined_last_column_files(filepath, skiprows=0, n_fixed_cols=None, sep='\t', *args, **kwargs):
""" Some StringDB files need manual parsing to be loaded as a pandas DataFrame."""
with gzip.open(filepath, 'rt') as f:
ios = u''
# Skip header
for i in range(skiprows):
_ = f.readline()
# Loop file content
for i, line in enumerate(f, start=0):
sline = line.split(sep)
ios += u'\t'.join(sline[:n_fixed_cols]) + u'\t' + sline[-1]
return pd.read_csv(StringIO(ios), sep='\t', encoding='utf-8', *args, **kwargs)
def ensurePathExists(path):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
| 37.803279
| 124
| 0.658283
|
4a0cb5b22453e773504a7a994ecc2a1f0e406554
| 4,744
|
py
|
Python
|
CORDEX_netcdf2csv.py
|
lorincmeszaros/stochastic-climate-generator
|
5d5d6af2cbcca1eb5caba50f7e6188609aa32c31
|
[
"MIT"
] | null | null | null |
CORDEX_netcdf2csv.py
|
lorincmeszaros/stochastic-climate-generator
|
5d5d6af2cbcca1eb5caba50f7e6188609aa32c31
|
[
"MIT"
] | null | null | null |
CORDEX_netcdf2csv.py
|
lorincmeszaros/stochastic-climate-generator
|
5d5d6af2cbcca1eb5caba50f7e6188609aa32c31
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Read and extracting data in NetCDF 2D array Grids Files
For product: Cordex EUR-11
Note: Check information of nc files, because it relates to #Root_name
Items need to be addressed:
1 - path_folder, data_name
2 - site_name, site_name_ful, lati, loni in Lookup_WZsite.xlsx
5 - out_path
Function: Using numpy and KD-trees with netCDF data, by @author: unidata
URL: r'https://github.com/Unidata/unidata-python-workshop/blob/master/notebooks/netCDF/netcdf-by-coordinates.ipynb'
"""
#%%
import numpy as np
import pandas as pd
import netCDF4
from math import pi
from numpy import cos,sin
from scipy.spatial import cKDTree
import glob
import os
###===================================================================
## 1- Monitoring stations [Manual Definition]
Lookup_WZsite = pd.read_excel(r'Lookup_WZsite.xlsx')
site_name = Lookup_WZsite['site_name'][0:11]
site_name_ful= Lookup_WZsite['site_name_ful'][0:11]
lati= Lookup_WZsite['lati'][0:11]
loni= Lookup_WZsite['loni'][0:11]
location = zip(lati, loni)
print(zip(site_name,location)) - double check locations
#[Finish manual definition]
###===================================================================
## 2- Function: Looking up array indices using KD-Tree
def kdtree_fast (latvar, lonvar, lat0, lon0):
rad_factor= pi/180.0 #for trignometry, need angles in radians
# Read Lat,Long from file to numpy arrays
latvals= latvar[:]*rad_factor
lonvals= lonvar[:]*rad_factor
ny,nx = latvals.shape
clat,clon = cos(latvals),cos(lonvals)
slat,slon = sin(latvals),sin(lonvals)
# Build kd-tree from big arrays of 3D coordinates
triples = list(zip(np.ravel(clat*clon), np.ravel(clat*slon), np.ravel(slat)))
kdt = cKDTree(triples)
lat0_rad = lat0 * rad_factor
lon0_rad = lon0 * rad_factor
clat0,clon0 = cos(lat0_rad),cos(lon0_rad)
slat0,slon0 = sin(lat0_rad),sin(lon0_rad)
dist_sq_min, minindex_1d = kdt.query([clat0*clon0, clat0*slon0, slat0])
iy_min, ix_min = np.unravel_index(minindex_1d, latvals.shape)
return iy_min,ix_min
###===================================================================
#%%
## 3 - READ NC FILES
# [Manual define]=====================================================
path_folder= r'\CORDEX\ncf'
data_name= [#'pr',
'rsds',
'tas',
'uas',
'vas',
'clt',
'hurs',
'ps']
c=0
for dname in data_name:
data_path= os.path.join(path_folder, dname)
ncf= glob.glob(data_path + "_IPSL-IPSL-CM5A-MR_*.nc")
#_CNRM-CERFACS-CNRM-CM5_
#_ICHEC-EC-EARTH_
#_IPSL-IPSL-CM5A-MR_
#_MOHC-HadGEM2-ES_
#_MPI-M-MPI-ESM-LR_
###===================================================================
## 4 - Import data from files
for nc_i in ncf:
nc_i= netCDF4.Dataset(nc_i,'r')
latvar= nc_i.variables['lat']
lonvar= nc_i.variables['lon']
lat0= latvar[0]
lon0= lonvar[0]
# Time variables
time_var= nc_i.variables['time']
time_range= nc_i.variables['time'][:]
time_date= netCDF4.num2date(time_var[:],time_var.units)
print ("Reading file ", nc_i.driving_experiment)
print ('Extracting variable:', dname)
print ('Start date - End date:', time_date[0],time_date[-1])
print (' ')
###===================================================================
## 5 - EXTRACTING DATA
Root_name= dname+'_'+nc_i.driving_model_id+'_'+nc_i.experiment_id
out_path= r'\CORDEX\TS'
for i in range(len(site_name)):
out_name= (Root_name+'_'+ site_name_ful[i]+'.csv')
iy,ix = kdtree_fast(latvar, lonvar, location[i][0], location[i][1])
#print ('Exact Location lat-lon:', location[i])
#print ('Closest lat-lon:', latvar[iy,ix], lonvar[iy,ix])
#print ('Array indices [iy,ix]=', iy, ix)
data_i= nc_i.variables[dname][:,iy,ix]
data_ts= pd.Series(data_i, index= time_date, name= dname )
print ('Writing location ',site_name_ful[i])
print ('NC file extracting (%) ', (i+1)*100/len(site_name ))
###===================================================================
## 6 - PRINTING to *.CSV
data_ts.to_csv(os.path.join(out_path,out_name))
#Print overall progress
c=c+1
print ('Progress (%): ', (c)*100/len(data_name))
###===================================================================
| 38.258065
| 116
| 0.537099
|
4a0cb6832a8fe0d070e7d4d2f1ccc12cc43e538a
| 2,510
|
py
|
Python
|
conceptnet5/vectors/miniaturize.py
|
MattCurryCom/conceptnet5
|
a16d94e635aee3d35a22aa04fcad7bb87ce927d8
|
[
"Apache-2.0"
] | 1
|
2018-11-27T17:00:57.000Z
|
2018-11-27T17:00:57.000Z
|
conceptnet5/vectors/miniaturize.py
|
MattCurryCom/conceptnet5
|
a16d94e635aee3d35a22aa04fcad7bb87ce927d8
|
[
"Apache-2.0"
] | null | null | null |
conceptnet5/vectors/miniaturize.py
|
MattCurryCom/conceptnet5
|
a16d94e635aee3d35a22aa04fcad7bb87ce927d8
|
[
"Apache-2.0"
] | null | null | null |
import wordfreq
import numpy as np
import pandas as pd
from conceptnet5.uri import split_uri
from conceptnet5.languages import CORE_LANGUAGES
from .debias import de_bias_frame
def term_freq(term):
"""
Get an estimate of the frequency of this term from the 'wordfreq' library.
When miniaturizing, we use this as a cutoff for which words to include
in the vocabulary.
Because we have the most data for English, we allow lower word frequencies
in English (by reading in the 'large' list, whose frequencies can go
below 1e-6).
"""
_c, lang, term = split_uri(term)[:3]
if lang == 'en':
return wordfreq.word_frequency(term, 'en', 'large')
elif lang in CORE_LANGUAGES:
return wordfreq.word_frequency(term, lang)
else:
return 0.
def miniaturize(frame, other_vocab=None, k=300, debias=True):
"""
Produce a small matrix with good coverage of English and reasonable
coverage of the other 'core languages' in ConceptNet. Three things that
make the matrix smaller are:
- Vocabulary pruning
- Dimensionality reduction (if k < 300)
- Quantization to 8-bit ints
With `debias=True` (the default), this will run the de-biasing process
after dimensionality reduction and before quantization. This is more
effective than running it entirely before or after miniaturization.
"""
# In practice, wordfreq doesn't even have single words with frequencies
# below 1e-8, so this could just as well say 'term_freq(term) > 0'.
# But this cutoff is clearer and adjustable.
#
# Non-English languages use terms with frequency 1e-6 or greater, because
# only that much of the list has been loaded.
vocab1 = [term for term in frame.index if '_' not in term
and term_freq(term) >= 1e-8]
vocab_set = set(vocab1)
if other_vocab is not None:
extra_vocab = [term for term in other_vocab if '_' in term and
term in frame.index and term not in vocab_set]
extra_vocab = extra_vocab[:20000]
else:
extra_vocab = []
vocab = vocab1 + extra_vocab
smaller = frame.loc[vocab]
U, _S, _Vt = np.linalg.svd(smaller, full_matrices=False)
del smaller, _S, _Vt, vocab1, extra_vocab, vocab_set
redecomposed = pd.DataFrame(U[:, :k], index=vocab, dtype='f')
del U, vocab
if debias:
de_bias_frame(redecomposed)
mini = (redecomposed * 64).astype(np.int8)
mini.sort_index(inplace=True)
return mini
| 35.857143
| 78
| 0.684462
|
4a0cb93fd97a8e38ed67fbfc91bbfe48189978a3
| 576
|
py
|
Python
|
openprescribing/frontend/migrations/0022_auto_20170324_1458.py
|
annapowellsmith/openpresc
|
cfa9fb07d6fc2ee304159c04fcc132cefcf78745
|
[
"MIT"
] | 91
|
2015-10-14T09:10:32.000Z
|
2022-03-10T22:09:21.000Z
|
openprescribing/frontend/migrations/0022_auto_20170324_1458.py
|
annapowellsmith/openpresc
|
cfa9fb07d6fc2ee304159c04fcc132cefcf78745
|
[
"MIT"
] | 1,828
|
2015-12-04T14:52:27.000Z
|
2022-03-31T08:51:14.000Z
|
openprescribing/frontend/migrations/0022_auto_20170324_1458.py
|
HDRUK/openprescribing
|
510e8c07e841cd42284c109774d1730b6463f376
|
[
"MIT"
] | 27
|
2015-12-03T18:26:56.000Z
|
2021-01-09T21:58:53.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-03-24 14:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frontend', '0021_chemical_is_current'),
]
operations = [
migrations.AddField(
model_name='product',
name='is_current',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='section',
name='is_current',
field=models.BooleanField(default=True),
),
]
| 22.153846
| 52
| 0.574653
|
4a0cba44ec4d2ed792304c5eb99448014f2f9fea
| 6,752
|
py
|
Python
|
indicators/urls.py
|
mikael19/activity
|
3932de42d9b423bff5739f7e06520035df213fc6
|
[
"Apache-2.0"
] | null | null | null |
indicators/urls.py
|
mikael19/activity
|
3932de42d9b423bff5739f7e06520035df213fc6
|
[
"Apache-2.0"
] | null | null | null |
indicators/urls.py
|
mikael19/activity
|
3932de42d9b423bff5739f7e06520035df213fc6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from django.urls import path, re_path
from .views import (
IndicatorList, add_indicator, indicator_create, IndicatorCreate,
IndicatorUpdate, IndicatorDelete, PeriodicTargetDeleteView,
PeriodicTargetView, CollectedDataReportData, CollectedDataCreate, CollectedDataDelete,
CollectedDataList, CollectedDataUpdate, CollectedDataAdd, CollectedDataEdit,
CollectedDataDeleteVue, collecteddata_import, indicator_report,
TVAReport, TVAPrint, DisaggregationReport, DisaggregationPrint, IndicatorReport,
program_indicator_report, indicator_data_report, IndicatorExport, service_json,
collected_data_json, program_indicators_json, IndicatorReportData, IndicatorDataExport, ObjectiveView, objectives_list, objectives_tree,
LevelView, DisaggregationTypeDeleteView, DisaggregationLabelDeleteView,
IndicatorTarget, IndicatorTypeView, DataCollectionFrequencyView, PeriodicTargetCreateView)
urlpatterns = [
# INDICATOR PLANING TOOL
# Home
path('home/<int:program>/<int:indicator>/<int:type>/',
IndicatorList.as_view(), name='indicator_list'),
path('add-indicator', add_indicator, name='add-indicator'),
# Indicator Form
path('indicator_list/<int:pk>/',
IndicatorList.as_view(), name='indicator_list'),
path('indicator_create/<int:id>/',
indicator_create, name='indicator_create'),
path('indicator_add/<int:id>/',
IndicatorCreate.as_view(), name='indicator_add'),
path('indicator_update/<int:pk>/',
IndicatorUpdate.as_view(), name='indicator_update'),
path('indicator_delete/<int:pk>/',
IndicatorDelete.as_view(), name='indicator_delete'),
path('periodic_target_delete/<int:pk>/',
PeriodicTargetDeleteView.as_view(), name='pt_delete'),
path('periodic_target_generate/<int:indicator>/',
PeriodicTargetView.as_view(), name='pt_generate'),
path('periodic_target_deleteall/<int:indicator>/<slug:deleteall>/',
PeriodicTargetView.as_view(), name='pt_deleteall'),
# Collected Data List
path('collecteddata/<slug:program>/<int:indicator>/<int:type>/',
CollectedDataList.as_view(), name='collecteddata_list'),
path('collecteddata_add/<program>/<indicator>/',
CollectedDataCreate.as_view(), name='collecteddata_add'),
path('collecteddata/add',
CollectedDataAdd.as_view(), name='add-collected-data'),
path('collecteddata_import/', collecteddata_import,
name='collecteddata_import'),
path('collecteddata_update/<int:pk>/',
CollectedDataUpdate.as_view(), name='collecteddata_update'),
path('collecteddata_delete/<int:pk>/',
CollectedDataDelete.as_view(), name='collecteddata_delete'),
path('collecteddata_export/<program>/<indicator>/',
CollectedDataList.as_view(), name='collecteddata_list'),
path('collected_data/edit/<int:id>',
CollectedDataEdit.as_view(), name='edit-collected-data'),
path('collected_data/delete/<int:id>',
CollectedDataDeleteVue.as_view(), name='delete-collected-data'),
# Indicator Report
path('report/<program>/<indicator>/<type>/',
indicator_report, name='indicator_report'),
path('tvareport/', TVAReport.as_view(), name='tvareport'),
path('tvaprint/<program>/',
TVAPrint.as_view(), name='tvaprint'),
path('disrep/<program>/',
DisaggregationReport.as_view(), name='disrep'),
path('disrepprint/<program>/',
DisaggregationPrint.as_view(), name='disrepprint'),
path('report_table/<program>/<indicator>/<type>/',
IndicatorReport.as_view(), name='indicator_table'),
path('program_report/<program>/',
program_indicator_report, name='program_indicator_report'),
# Indicator Data Report
path('data/<id>/<program>/<type>/',
indicator_data_report, name='indicator_data_report'),
path('data/<id>/<program>/<type>/map/',
indicator_data_report, name='indicator_data_report'),
path('data/<id>/<program>/<type>/graph/',
indicator_data_report, name='indicator_data_report'),
path('data/<id>/<program>/<type>/table/',
indicator_data_report, name='indicator_data_report'),
path('data/<id>/<program>/',
indicator_data_report, name='indicator_data_report'),
path('data/<id>/', indicator_data_report,
name='indicator_data_report'),
path('export/<id>/<program>/<indicator_type>/',
IndicatorExport.as_view(), name='indicator_export'),
# ajax calls
path('service/<service>/service_json/',
service_json, name='service_json'),
path('collected_data_table/<indicator>/<program>/',
collected_data_json, name='collected_data_json'),
path('program_indicators/program>/<indicator>/<type>/',
program_indicators_json, name='program_indicators_json'),
path('report_data/<int:id>/<program>/<type>/',
IndicatorReportData.as_view(), name='indicator_report_data'),
path('report_data/<int:id>/<program>/<indicator_type>/export/',
IndicatorExport.as_view(), name='indicator_export'),
path('collecteddata_report_data/<program>)/<indicator>/<type>/',
CollectedDataReportData.as_view(), name='collecteddata_report_data'),
path('collecteddata_report_data/<program>/<indicator>)/<type>)/export/',
IndicatorDataExport.as_view(), name='collecteddata_report_data'),
path('get_target/<int:indicator_id>/', IndicatorTarget.as_view(),
name='indicator-targets'),
# Objectives
re_path(
r'objective/(?P<pk>.*)',
ObjectiveView.as_view(),
name='objective_list'
),
path('objectives', objectives_list, name='objectives'),
path('objectives/tree', objectives_tree, name='objectives-tree'),
path('disaggregation_type/delete/<int:pk>/',
DisaggregationTypeDeleteView.as_view(),
name='disaggregation_type_delete'),
path(
'disaggregation_label/delete/<int:pk>/',
DisaggregationLabelDeleteView.as_view(),
name='disaggregation_label_delete'),
# Levels Urls
re_path(
r'level/(?P<pk>.*)',
LevelView.as_view(),
name='Level_list'
),
# Indicator Types Urls
re_path(
r'indicator_types/(?P<pk>.*)',
IndicatorTypeView.as_view(),
name='indicator_type_list'
),
re_path(
r'data_collection_frequency/(?P<pk>.*)',
DataCollectionFrequencyView.as_view(),
name='data_collection_frequency_list'
),
# Periodic Target view
re_path(
r'periodic_target/(?P<id>.*)',
PeriodicTargetCreateView.as_view(),
name='periodic_target_view'
),
]
| 42.465409
| 141
| 0.682168
|
4a0cbab0430ab666d0bf06195d243c679991d2a7
| 761
|
py
|
Python
|
advanced/image_processing/examples/plot_synthetic_data.py
|
junghun73/Learning
|
8b5a295c42f142a3b2f5fa13fc75434a2ea9235a
|
[
"CC-BY-4.0"
] | 419
|
2016-03-05T08:50:48.000Z
|
2022-03-24T15:16:46.000Z
|
advanced/image_processing/examples/plot_synthetic_data.py
|
techeye220/scipy-lecture-notes-zh-CN
|
cc87204fcc4bd2f4702f7c29c83cb8ed5c94b7d6
|
[
"CC-BY-4.0"
] | 5
|
2016-05-21T14:21:12.000Z
|
2017-10-06T11:09:48.000Z
|
advanced/image_processing/examples/plot_synthetic_data.py
|
techeye220/scipy-lecture-notes-zh-CN
|
cc87204fcc4bd2f4702f7c29c83cb8ed5c94b7d6
|
[
"CC-BY-4.0"
] | 233
|
2016-02-13T09:22:57.000Z
|
2021-11-11T17:58:44.000Z
|
"""
Synthetic data
===============
The example generates and displays simple synthetic data.
"""
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
np.random.seed(1)
n = 10
l = 256
im = np.zeros((l, l))
points = l*np.random.random((2, n**2))
im[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
im = ndimage.gaussian_filter(im, sigma=l/(4.*n))
mask = im > im.mean()
label_im, nb_labels = ndimage.label(mask)
plt.figure(figsize=(9,3))
plt.subplot(131)
plt.imshow(im)
plt.axis('off')
plt.subplot(132)
plt.imshow(mask, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(133)
plt.imshow(label_im, cmap=plt.cm.spectral)
plt.axis('off')
plt.subplots_adjust(wspace=0.02, hspace=0.02, top=1, bottom=0, left=0, right=1)
plt.show()
| 20.026316
| 79
| 0.68594
|
4a0cbac0c684041aa7380ff3fd608e4c5aded66d
| 11,014
|
py
|
Python
|
Audio/Adaptive_Voice_Conversion/model.py
|
LiuHaolan/models
|
1639b3039237c3997c51ff87f0b6113bb2e8d236
|
[
"Apache-2.0"
] | 43
|
2021-06-03T09:07:08.000Z
|
2022-03-31T15:21:48.000Z
|
Audio/Adaptive_Voice_Conversion/model.py
|
LiuHaolan/models
|
1639b3039237c3997c51ff87f0b6113bb2e8d236
|
[
"Apache-2.0"
] | 64
|
2021-05-31T10:34:06.000Z
|
2022-01-17T03:44:58.000Z
|
Audio/Adaptive_Voice_Conversion/model.py
|
LiuHaolan/models
|
1639b3039237c3997c51ff87f0b6113bb2e8d236
|
[
"Apache-2.0"
] | 37
|
2021-07-04T03:13:18.000Z
|
2022-03-25T07:30:47.000Z
|
import oneflow as flow
import oneflow.nn as nn
import oneflow.nn.functional as F
def pad_layer(inp, layer):
kernel_size = layer.kernel_size[0]
if kernel_size % 2 == 0:
pad = (kernel_size // 2, kernel_size // 2 - 1, 0, 0)
else:
pad = (kernel_size // 2, kernel_size // 2, 0, 0)
pad_fn = nn.ReflectionPad2d(pad)
inp = inp.unsqueeze(0)
inp = pad_fn(inp)
inp = inp.squeeze(0)
out = layer(inp)
return out
def pad_layer_2d(inp, layer, pad_type="reflect"):
kernel_size = layer.kernel_size
if kernel_size[0] % 2 == 0:
pad_lr = [kernel_size[0] // 2, kernel_size[0] // 2 - 1]
else:
pad_lr = [kernel_size[0] // 2, kernel_size[0] // 2]
if kernel_size[1] % 2 == 0:
pad_ud = [kernel_size[1] // 2, kernel_size[1] // 2 - 1]
else:
pad_ud = [kernel_size[1] // 2, kernel_size[1] // 2]
pad = tuple(pad_lr + pad_ud)
inp = F.pad(inp, pad=pad, mode=pad_type)
out = layer(inp)
return out
def pixel_shuffle_1d(inp, scale_factor=2):
batch_size, channels, in_width = inp.size()
channels //= scale_factor
out_width = in_width * scale_factor
inp_view = inp.contiguous().view(batch_size, channels, scale_factor, in_width)
shuffle_out = inp_view.permute(0, 1, 3, 2).contiguous()
shuffle_out = shuffle_out.view(batch_size, channels, out_width)
return shuffle_out
def upsample(x, scale_factor=2):
x_up = F.interpolate(x, scale_factor=scale_factor, mode="nearest")
return x_up
def flatten(x):
out = x.contiguous().view(x.size(0), x.size(1) * x.size(2))
return out
def concat_cond(x, cond):
# x = [batch_size, x_channels, length]
# cond = [batch_size, c_channels]
cond = cond.unsqueeze(dim=2)
cond = cond.expand(*cond.size()[:-1], x.size(-1))
out = flow.cat([x, cond], dim=1)
return out
def append_cond(x, cond):
# x = [batch_size, x_channels, length]
# cond = [batch_size, x_channels * 2]
p = cond.size(1) // 2
mean, std = cond[:, :p], cond[:, p:]
out = x * std.unsqueeze(dim=2) + mean.unsqueeze(dim=2)
return out
def conv_bank(x, module_list, act):
outs = []
for layer in module_list:
out = act(pad_layer(x, layer))
outs.append(out)
out = flow.cat(outs + [x], dim=1)
return out
def get_act(act):
if act == "relu":
return nn.ReLU()
elif act == "lrelu":
return nn.LeakyReLU()
else:
return nn.ReLU()
class SpeakerEncoder(nn.Module):
def __init__(
self,
c_in,
c_h,
c_out,
kernel_size,
bank_size,
bank_scale,
c_bank,
n_conv_blocks,
n_dense_blocks,
subsample,
act,
dropout_rate,
):
super(SpeakerEncoder, self).__init__()
self.c_in = c_in
self.c_h = c_h
self.c_out = c_out
self.kernel_size = kernel_size
self.n_conv_blocks = n_conv_blocks
self.n_dense_blocks = n_dense_blocks
self.subsample = subsample
self.act = get_act(act)
self.conv_bank = nn.ModuleList(
[
nn.Conv1d(c_in, c_bank, kernel_size=k)
for k in range(bank_scale, bank_size + 1, bank_scale)
]
)
in_channels = c_bank * (bank_size // bank_scale) + c_in
self.in_conv_layer = nn.Conv1d(in_channels, c_h, kernel_size=1)
self.first_conv_layers = nn.ModuleList(
[nn.Conv1d(c_h, c_h, kernel_size=kernel_size) for _ in range(n_conv_blocks)]
)
self.second_conv_layers = nn.ModuleList(
[
nn.Conv1d(c_h, c_h, kernel_size=kernel_size, stride=sub)
for sub, _ in zip(subsample, range(n_conv_blocks))
]
)
self.pooling_layer = nn.AdaptiveAvgPool1d(1)
self.first_dense_layers = nn.ModuleList(
[nn.Linear(c_h, c_h) for _ in range(n_dense_blocks)]
)
self.second_dense_layers = nn.ModuleList(
[nn.Linear(c_h, c_h) for _ in range(n_dense_blocks)]
)
self.output_layer = nn.Linear(c_h, c_out)
self.dropout_layer = nn.Dropout(p=dropout_rate)
def conv_blocks(self, inp):
out = inp
# convolution blocks
for l in range(self.n_conv_blocks):
y = pad_layer(out, self.first_conv_layers[l])
y = self.act(y)
y = self.dropout_layer(y)
y = pad_layer(y, self.second_conv_layers[l])
y = self.act(y)
y = self.dropout_layer(y)
if self.subsample[l] > 1:
out = F.avg_pool1d(out, kernel_size=self.subsample[l], ceil_mode=True)
out = y + out
return out
def dense_blocks(self, inp):
out = inp
# dense layers
for l in range(self.n_dense_blocks):
y = self.first_dense_layers[l](out)
y = self.act(y)
y = self.dropout_layer(y)
y = self.second_dense_layers[l](y)
y = self.act(y)
y = self.dropout_layer(y)
out = y + out
return out
def forward(self, x):
out = conv_bank(x, self.conv_bank, act=self.act)
# dimension reduction layer
out = pad_layer(out, self.in_conv_layer)
out = self.act(out)
# conv blocks
out = self.conv_blocks(out)
# avg pooling
out = self.pooling_layer(out).squeeze(2)
# dense blocks
out = self.dense_blocks(out)
out = self.output_layer(out)
return out
class ContentEncoder(nn.Module):
def __init__(
self,
c_in,
c_h,
c_out,
kernel_size,
bank_size,
bank_scale,
c_bank,
n_conv_blocks,
subsample,
act,
dropout_rate,
):
super(ContentEncoder, self).__init__()
self.n_conv_blocks = n_conv_blocks
self.subsample = subsample
self.act = get_act(act)
self.conv_bank = nn.ModuleList(
[
nn.Conv1d(c_in, c_bank, kernel_size=k)
for k in range(bank_scale, bank_size + 1, bank_scale)
]
)
in_channels = c_bank * (bank_size // bank_scale) + c_in
self.in_conv_layer = nn.Conv1d(in_channels, c_h, kernel_size=1)
self.first_conv_layers = nn.ModuleList(
[nn.Conv1d(c_h, c_h, kernel_size=kernel_size) for _ in range(n_conv_blocks)]
)
self.second_conv_layers = nn.ModuleList(
[
nn.Conv1d(c_h, c_h, kernel_size=kernel_size, stride=sub)
for sub, _ in zip(subsample, range(n_conv_blocks))
]
)
self.norm_layer = nn.InstanceNorm1d(c_h, affine=False)
self.mean_layer = nn.Conv1d(c_h, c_out, kernel_size=1)
self.std_layer = nn.Conv1d(c_h, c_out, kernel_size=1)
self.dropout_layer = nn.Dropout(p=dropout_rate)
def forward(self, x):
out = conv_bank(x, self.conv_bank, act=self.act)
# dimension reduction layer
out = pad_layer(out, self.in_conv_layer)
out = self.norm_layer(out)
out = self.act(out)
out = self.dropout_layer(out)
# convolution blocks
for l in range(self.n_conv_blocks):
y = pad_layer(out, self.first_conv_layers[l])
y = self.norm_layer(y)
y = self.act(y)
y = self.dropout_layer(y)
y = pad_layer(y, self.second_conv_layers[l])
y = self.norm_layer(y)
y = self.act(y)
y = self.dropout_layer(y)
if self.subsample[l] > 1:
out = F.avg_pool1d(out, kernel_size=self.subsample[l], ceil_mode=True)
out = y + out
mu = pad_layer(out, self.mean_layer)
log_sigma = pad_layer(out, self.std_layer)
return mu, log_sigma
class Decoder(nn.Module):
def __init__(
self,
c_in,
c_cond,
c_h,
c_out,
kernel_size,
n_conv_blocks,
upsample,
act,
sn,
dropout_rate,
):
super(Decoder, self).__init__()
self.n_conv_blocks = n_conv_blocks
self.upsample = upsample
self.act = get_act(act)
f = lambda x: x
self.in_conv_layer = f(nn.Conv1d(c_in, c_h, kernel_size=1))
self.first_conv_layers = nn.ModuleList(
[
f(nn.Conv1d(c_h, c_h, kernel_size=kernel_size))
for _ in range(n_conv_blocks)
]
)
self.second_conv_layers = nn.ModuleList(
[
f(nn.Conv1d(c_h, c_h * up, kernel_size=kernel_size))
for _, up in zip(range(n_conv_blocks), self.upsample)
]
)
self.norm_layer = nn.InstanceNorm1d(c_h, affine=False)
self.conv_affine_layers = nn.ModuleList(
[f(nn.Linear(c_cond, c_h * 2)) for _ in range(n_conv_blocks * 2)]
)
self.out_conv_layer = f(nn.Conv1d(c_h, c_out, kernel_size=1))
self.dropout_layer = nn.Dropout(p=dropout_rate)
def forward(self, z, cond):
out = pad_layer(z, self.in_conv_layer)
out = self.norm_layer(out)
out = self.act(out)
out = self.dropout_layer(out)
# convolution blocks
for l in range(self.n_conv_blocks):
y = pad_layer(out, self.first_conv_layers[l])
y = self.norm_layer(y)
y = append_cond(y, self.conv_affine_layers[l * 2](cond))
y = self.act(y)
y = self.dropout_layer(y)
y = pad_layer(y, self.second_conv_layers[l])
if self.upsample[l] > 1:
y = pixel_shuffle_1d(y, scale_factor=self.upsample[l])
y = self.norm_layer(y)
y = append_cond(y, self.conv_affine_layers[l * 2 + 1](cond))
y = self.act(y)
y = self.dropout_layer(y)
if self.upsample[l] > 1:
out = y + upsample(out, scale_factor=self.upsample[l])
else:
out = y + out
out = pad_layer(out, self.out_conv_layer)
return out
class AE(nn.Module):
def __init__(self, config):
super(AE, self).__init__()
self.speaker_encoder = SpeakerEncoder(**config["SpeakerEncoder"])
self.content_encoder = ContentEncoder(**config["ContentEncoder"])
self.decoder = Decoder(**config["Decoder"])
def forward(self, x):
emb = self.speaker_encoder(x)
mu, log_sigma = self.content_encoder(x)
eps = log_sigma.new_ones(tuple([*log_sigma.size()])).normal_(0, 1)
dec = self.decoder(mu + flow.exp(log_sigma / 2) * eps, emb)
return mu, log_sigma, emb, dec
def inference(self, x, x_cond):
emb = self.speaker_encoder(x_cond)
mu, _ = self.content_encoder(x)
dec = self.decoder(mu, emb)
return dec
def get_speaker_embeddings(self, x):
emb = self.speaker_encoder(x)
return emb
| 32.017442
| 88
| 0.571182
|
4a0cbcdf3d6fa43bd3f6574f0b2e012204bae6b1
| 22,729
|
py
|
Python
|
pcdet/datasets/waymo/waymo_utils.py
|
xiangruhuang/OpenPCDet
|
d82d9594a0629ffed0c457aedc304e0805e93221
|
[
"Apache-2.0"
] | null | null | null |
pcdet/datasets/waymo/waymo_utils.py
|
xiangruhuang/OpenPCDet
|
d82d9594a0629ffed0c457aedc304e0805e93221
|
[
"Apache-2.0"
] | null | null | null |
pcdet/datasets/waymo/waymo_utils.py
|
xiangruhuang/OpenPCDet
|
d82d9594a0629ffed0c457aedc304e0805e93221
|
[
"Apache-2.0"
] | null | null | null |
# OpenPCDet PyTorch Dataloader and Evaluation Tools for Waymo Open Dataset
# Reference https://github.com/open-mmlab/OpenPCDet
# Written by Shaoshuai Shi, Chaoxu Guo
# All Rights Reserved 2019-2020.
import os
import pickle
import numpy as np
import torch
from ...utils import common_utils
import tensorflow as tf
from waymo_open_dataset.utils import frame_utils, transform_utils, range_image_utils
from waymo_open_dataset import dataset_pb2
from pcdet.ops.roiaware_pool3d.roiaware_pool3d_utils import (
points_in_boxes_cpu
)
try:
tf.enable_eager_execution()
except:
pass
WAYMO_CLASSES = ['unknown', 'Vehicle', 'Pedestrian', 'Sign', 'Cyclist']
def convert_range_image_to_point_cloud_labels(frame,
range_images,
segmentation_labels,
ri_index=0):
"""Convert segmentation labels from range images to point clouds.
Args:
frame: open dataset frame
range_images: A dict of {laser_name, [range_image_first_return,
range_image_second_return]}.
segmentation_labels: A dict of {laser_name, [range_image_first_return,
range_image_second_return]}.
ri_index: 0 for the first return, 1 for the second return.
Returns:
point_labels: {[N, 2]} list of 3d lidar points's segmentation labels. 0 for
points that are not labeled.
"""
calibrations = sorted(frame.context.laser_calibrations, key=lambda c: c.name)
point_labels = []
for c in calibrations:
range_image = range_images[c.name][ri_index]
range_image_tensor = tf.reshape(
tf.convert_to_tensor(range_image.data), range_image.shape.dims)
range_image_mask = range_image_tensor[..., 0] > 0
if c.name in segmentation_labels:
assert c.name == dataset_pb2.LaserName.TOP
sl = segmentation_labels[c.name][ri_index]
sl_tensor = tf.reshape(tf.convert_to_tensor(sl.data), sl.shape.dims)
sl_points_tensor = tf.gather_nd(sl_tensor, tf.where(range_image_mask))
point_labels.append(sl_points_tensor.numpy())
return point_labels
def generate_labels(frame):
obj_name, difficulty, dimensions, locations, heading_angles = [], [], [], [], []
tracking_difficulty, speeds, accelerations, obj_ids = [], [], [], []
num_points_in_gt = []
laser_labels = frame.laser_labels
for i in range(len(laser_labels)):
box = laser_labels[i].box
class_ind = laser_labels[i].type
loc = [box.center_x, box.center_y, box.center_z]
heading_angles.append(box.heading)
obj_name.append(WAYMO_CLASSES[class_ind])
difficulty.append(laser_labels[i].detection_difficulty_level)
tracking_difficulty.append(laser_labels[i].tracking_difficulty_level)
dimensions.append([box.length, box.width, box.height]) # lwh in unified coordinate of OpenPCDet
locations.append(loc)
obj_ids.append(laser_labels[i].id)
num_points_in_gt.append(laser_labels[i].num_lidar_points_in_box)
annotations = {}
annotations['name'] = np.array(obj_name)
annotations['difficulty'] = np.array(difficulty)
annotations['dimensions'] = np.array(dimensions)
annotations['location'] = np.array(locations)
annotations['heading_angles'] = np.array(heading_angles)
annotations['obj_ids'] = np.array(obj_ids)
annotations['tracking_difficulty'] = np.array(tracking_difficulty)
annotations['num_points_in_gt'] = np.array(num_points_in_gt)
annotations = common_utils.drop_info_with_name(annotations, name='unknown')
if annotations['name'].__len__() > 0:
gt_boxes_lidar = np.concatenate([
annotations['location'], annotations['dimensions'], annotations['heading_angles'][..., np.newaxis]],
axis=1
)
else:
gt_boxes_lidar = np.zeros((0, 7))
annotations['gt_boxes_lidar'] = gt_boxes_lidar
return annotations
def convert_range_image_to_point_cloud(frame,
range_images,
camera_projections,
range_image_top_pose,
ri_index=0,
keep_polar_features=False):
"""Convert range images to point cloud.
Args:
frame: open dataset frame
range_images: A dict of {laser_name, [range_image_first_return,
range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
ri_index: 0 for the first return, 1 for the second return.
keep_polar_features: If true, keep the features from the polar range image
(i.e. range, intensity, and elongation) as the first features in the
output range image.
Returns:
points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars).
(NOTE: Will be {[N, 6]} if keep_polar_features is true.
cp_points: {[N, 6]} list of camera projections of length 5
(number of lidars).
"""
calibrations = sorted(frame.context.laser_calibrations, key=lambda c: c.name)
points = []
cp_points = []
cartesian_range_images = frame_utils.convert_range_image_to_cartesian(
frame, range_images, range_image_top_pose, ri_index, keep_polar_features)
for c in calibrations:
range_image = range_images[c.name][ri_index]
range_image_tensor = tf.reshape(
tf.convert_to_tensor(value=range_image.data), range_image.shape.dims)
range_image_mask = range_image_tensor[..., 0] > 0
x, y = tf.meshgrid(tf.range(range_image_mask.shape[1], dtype=tf.float32),
tf.range(range_image_mask.shape[0], dtype=tf.float32))
x = x / range_image_mask.shape[1]
y = y / range_image_mask.shape[0]
xy = tf.stack([x, y], axis=-1)
range_image_cartesian = cartesian_range_images[c.name]
range_image_cartesian = tf.concat([range_image_cartesian, xy], axis=-1)
points_tensor = tf.gather_nd(range_image_cartesian,
tf.compat.v1.where(range_image_mask))
cp = camera_projections[c.name][ri_index]
cp_tensor = tf.reshape(tf.convert_to_tensor(value=cp.data), cp.shape.dims)
cp_points_tensor = tf.gather_nd(cp_tensor,
tf.compat.v1.where(range_image_mask))
points.append(points_tensor.numpy())
cp_points.append(cp_points_tensor.numpy())
return points, cp_points
def save_lidar_points(frame, cur_save_path, use_two_returns=True, seg_labels=True):
if seg_labels:
(range_images, camera_projections, segmentation_labels,
range_image_top_pose) = frame_utils.parse_range_image_and_camera_projection(
frame)
else:
(range_images, camera_projections, range_image_top_pose) = \
frame_utils.parse_range_image_and_camera_projection(frame)
points, cp_points = convert_range_image_to_point_cloud(
frame, range_images, camera_projections, range_image_top_pose,
keep_polar_features=True)
if use_two_returns:
points_ri2, cp_points_ri2 = convert_range_image_to_point_cloud(
frame, range_images, camera_projections, range_image_top_pose,
ri_index=1, keep_polar_features=True)
points = [np.concatenate([p1, p2], axis=0) \
for p1, p2 in zip(points, points_ri2)]
num_points_of_each_lidar = [point.shape[0] for point in points]
points = np.concatenate(points, axis=0)
points = points[:, [3,4,5,1,2,0,6,7]] # [x, y, z, intensity, elongation, range, w, h]
points = points.astype(np.float32)
np.save(cur_save_path, points)
if seg_labels:
# load segmentation labels
if frame.lasers[0].ri_return1.segmentation_label_compressed:
assert frame.lasers[0].ri_return2.segmentation_label_compressed
point_labels = convert_range_image_to_point_cloud_labels(
frame, range_images, segmentation_labels)
point_labels = np.concatenate(point_labels, axis=0)
if use_two_returns:
point_labels_ri2 = convert_range_image_to_point_cloud_labels(
frame, range_images, segmentation_labels, ri_index=1)
point_labels_ri2 = np.concatenate(point_labels_ri2, axis=0)
point_labels = np.concatenate([point_labels, point_labels_ri2],
axis=0)
seg_label_path = str(cur_save_path).replace('.npy', '_seg.npy')
np.save(seg_label_path, point_labels)
else:
seg_label_path = None
return num_points_of_each_lidar, seg_label_path
else:
return num_points_of_each_lidar
def process_single_sequence(sequence_file, save_path, sampled_interval,
has_label=True, use_two_returns=True,
seg_only=False):
sequence_name = os.path.splitext(os.path.basename(sequence_file))[0]
cur_save_dir = save_path / sequence_name
pkl_file = cur_save_dir / ('%s.pkl' % sequence_name)
sequence_infos = []
if pkl_file.exists():
sequence_infos = pickle.load(open(pkl_file, 'rb'))
print('Skip sequence since it has been processed before: %s' % pkl_file)
return sequence_infos
# print('Load record (sampled_interval=%d): %s' % (sampled_interval, sequence_name))
if not sequence_file.exists():
print('NotFoundError: %s' % sequence_file)
return []
dataset = tf.data.TFRecordDataset(str(sequence_file), compression_type='')
cur_save_dir.mkdir(parents=True, exist_ok=True)
for cnt, data in enumerate(dataset):
if cnt % sampled_interval != 0:
continue
# print(sequence_name, cnt)
frame = dataset_pb2.Frame()
frame.ParseFromString(bytearray(data.numpy()))
if seg_only:
if not frame.lasers[0].ri_return1.segmentation_label_compressed:
continue
info = {}
pc_info = {'num_features': 8, 'lidar_sequence': sequence_name, 'sample_idx': cnt}
info['point_cloud'] = pc_info
top_lidar_pose = []
for calibration in frame.context.laser_calibrations:
top_lidar_pose.append(
np.array(calibration.extrinsic.transform).astype(np.float32).reshape(-1)
)
info['frame_id'] = sequence_name + ('_%03d' % cnt)
info['metadata'] = {
'context_name': frame.context.name,
'timestamp_micros': frame.timestamp_micros,
'top_lidar_pose': top_lidar_pose
}
image_info = {}
for j in range(5):
width = frame.context.camera_calibrations[j].width
height = frame.context.camera_calibrations[j].height
image_info.update({'image_shape_%d' % j: (height, width)})
info['image'] = image_info
pose = np.array(frame.pose.transform, dtype=np.float32).reshape(4, 4)
info['pose'] = pose
if has_label:
annotations = generate_labels(frame)
num_points_of_each_lidar, seg_label_path = save_lidar_points(
frame, cur_save_dir / ('%04d.npy' % cnt), use_two_returns=use_two_returns,
seg_labels=True
)
if has_label:
annotations['seg_label_path'] = seg_label_path
info['annos'] = annotations
info['num_points_of_each_lidar'] = num_points_of_each_lidar
if has_label:
annotations['seg_label_path'] = seg_label_path
info['annos'] = annotations
sequence_infos.append(info)
with open(pkl_file, 'wb') as f:
pickle.dump(sequence_infos, f)
print('Infos are saved to (sampled_interval=%d): %s' % (sampled_interval, pkl_file))
return sequence_infos
def split_by_seg_label(points, labels):
"""split the point cloud into semantic segments
Args:
points [N, 3]
labels [N_top, 2] only top lidar points are labeled,
channels are [instance, segment]
Returns:
three segments in shape [N_i, 3]:
road: segment class {18 (road), 19 (lane marker)}
sidewalk: segment class {17 (curb), 20 (other ground),
21 (walkable), 22 (sidewalk)}
other_obj: any segment class except road and walkable
labels [N_other_obj, 2]
"""
# drop points from other lidar sensor (no seg label)
points = points[:labels.shape[0]]
seg_labels = labels[:, 1]
road_mask = seg_labels == 10
sidewalk_mask = seg_labels == 11
other_obj_mask = (road_mask == False) & (sidewalk_mask == False)
road = points[road_mask, :3]
sidewalk = points[sidewalk_mask, :3]
other_obj = points[other_obj_mask, :3]
labels = labels[other_obj_mask, :]
return road, sidewalk, other_obj, labels
def find_box_instance_label(overlap, instance_labels):
num_boxes = overlap.shape[0]
box_instance_labels = np.zeros(num_boxes, dtype=np.int32)
for i in range(num_boxes):
mask = overlap[i, :]
box_instance_labels[i] = np.median(instance_labels[mask])
return box_instance_labels
def check_box_interaction(boxes, radius, other_obj, seg_labels):
expected_overlap = points_in_boxes_cpu(other_obj, boxes)
box_instance_labels = find_box_instance_label(expected_overlap,
seg_labels[:, 0])
boxes_as_boundary = np.copy(boxes)
boxes_as_boundary[:, 3:6] += radius
# compute point-box interaction
interaction = points_in_boxes_cpu(other_obj,
boxes_as_boundary).astype(bool)
# box interacting points with it is allowed
interaction[np.where(expected_overlap)] = False
box_index, point_index = np.where(interaction)
# box interacting with points within the same instance is allowed
mask = box_instance_labels[box_index] == seg_labels[point_index, 0]
interaction[(box_index[mask], point_index[mask])] = False
# others are not allowed
box_is_interacting = interaction.any(1)
return box_is_interacting
def compute_interaction_index_for_frame(dataset, info, radius_list):
points = dataset.get_lidar(info['point_cloud']['lidar_sequence'],
info['point_cloud']['sample_idx'])
annos = info['annos']
boxes = annos['gt_boxes_lidar']
if boxes.shape[0] > 0:
seg_labels = dataset.get_seg_label(info['point_cloud']['lidar_sequence'],
info['point_cloud']['sample_idx'])
road, walkable, other_obj, seg_labels = split_by_seg_label(points, seg_labels)
box_interaction = {}
for radius in radius_list:
box_is_interacting = check_box_interaction(
boxes, radius,
other_obj, seg_labels)
box_interaction[f'{radius}'] = box_is_interacting
info['annos']['interaction_index'] = box_interaction
return info
def extract_foreground_pointcloud(dataset, top_lidar_only, database_save_path, info, db_info_save_path):
frame_id = info['frame_id']
sample_idx = int(frame_id[-3:])
sequence_name = frame_id[:-4]
seg_labels = dataset.get_seg_label(sequence_name, sample_idx)
annos = info['annos']
gt_boxes = annos['gt_boxes_lidar']
pc_info = info['point_cloud']
sequence_name = pc_info['lidar_sequence']
sample_idx = pc_info['sample_idx']
points = dataset.get_lidar(sequence_name, sample_idx)
if top_lidar_only:
points = points[:seg_labels.shape[0]]
seg_inst_labels, seg_cls_labels = seg_labels.T
#vis.clear()
#vis_dict = dict(
# points=torch.from_numpy(points),
# seg_inst_labels=torch.from_numpy(seg_inst_labels),
# seg_cls_labels=torch.from_numpy(seg_cls_labels),
# batch_idx=torch.zeros(points.shape[0], 1).long(),
# batch_size=1,
#)
#vis(vis_dict)
foreground_class = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15]
instance_dict = {i: [] for i in foreground_class}
instance_count = {i: 0 for i in foreground_class}
points = torch.from_numpy(points).cuda()
seg_cls_labels = torch.from_numpy(seg_cls_labels).cuda()
seg_inst_labels = torch.from_numpy(seg_inst_labels).cuda()
for fg_idx, fg_cls in enumerate(foreground_class):
#print('foreground class', fg_cls)
strategy = dataset.strategies[fg_idx]
support = strategy['support']
radius = strategy.get('radius', None)
group_radius = strategy.get('group_radius', None)
min_num_point = strategy.get('min_num_points', 5)
use_inst_label = strategy.get('use_inst_label', False)
attach_box = strategy.get('attach_box', False)
group_with = strategy.get('group_with', [])
cls_mask = seg_cls_labels == fg_cls
cls_points = points[cls_mask]
inst_labels = seg_inst_labels[cls_mask]
while cls_points.shape[0] > min_num_point:
#print(f'cls={fg_cls}, #points', cls_points.shape[0])
if use_inst_label:
inst_label = inst_labels.unique()[0]
instance_pc = cls_points[inst_labels == inst_label]
cls_points = cls_points[inst_labels != inst_label]
inst_labels = inst_labels[inst_labels != inst_label]
else:
center = cls_points[0]
dist = (cls_points - center)[:, :2].norm(p=2, dim=-1)
inst_mask = dist < radius
instance_pc = cls_points[inst_mask]
cls_points = cls_points[inst_mask == False]
if instance_pc.shape[0] > min_num_point:
# find box that covers it
if attach_box:
point_masks = points_in_boxes_cpu(instance_pc[:, :3].cpu().numpy(), gt_boxes)
average = point_masks.mean(1)
if average.max() > 0.9:
box_index = average.argmax()
attaching_box = gt_boxes[box_index]
else:
attaching_box = None
else:
attaching_box = None
# group with other classes
if len(group_with) > 0:
center = instance_pc.mean(0)
offsets = [0]
sizes = [instance_pc.shape[0]]
classes = [fg_cls]
success = False
for g in group_with:
g_mask = seg_cls_labels == g
if not g_mask.any():
continue
g_points = points[g_mask]
g_dist = (g_points - center)[:, :2].norm(p=2, dim=-1)
if not (g_dist < radius).any():
continue
success = True
grouped_points = g_points[g_dist < radius]
classes.append(g)
offsets.append(offsets[-1]+sizes[-1])
sizes.append(grouped_points.shape[0])
instance_pc = torch.cat([instance_pc, grouped_points], dim=0)
if success:
grouping = dict(
cls=classes,
offsets=offsets,
sizes=sizes,
)
else:
grouping = None
else:
grouping = None
low = instance_pc[instance_pc[:, 2].argmin()]
# find support of this
for support_cls in support:
support_mask = seg_cls_labels == support_cls
if not support_mask.any():
continue
support_points = points[support_mask]
support_dist = (support_points - low)[:, :3].norm(p=2, dim=-1)
if not use_inst_label and (support_dist.min() > radius):
continue
trans = (support_points[support_dist.argmin()] - low)[2]
inst_count = instance_count[fg_cls]
instance_count[fg_cls] += 1
if (fg_cls == 0) and (inst_count % 4 != 0):
break
if (fg_cls == 6) and (inst_count % 2 != 0):
break
if (fg_cls == 14) and (inst_count % 2 != 0):
break
if (fg_cls == 15) and (inst_count % 2 != 0):
break
#if support_cls in group_with:
# grouped_points = support_points[support_dist < radius]
# grouping = dict(
# cls=[fg_cls, support_cls],
# offsets=[0, instance_pc.shape[0]],
# sizes=[instance_pc.shape[0], grouped_points.shape[0]]
# )
# instance_pc = np.concatenate([instance_pc, grouped_points], axis=0)
#else:
# grouping = None
inst_save_path = database_save_path / f'{frame_id}_class_{fg_cls:02d}_inst_{inst_count:06d}.npy'
np.save(inst_save_path, instance_pc.detach().cpu().numpy())
#vis.pointcloud(f'cls-{fg_cls}-inst-{inst_count}-support-{support_cls}',
# torch.from_numpy(instance_pc[:, :3]), None, None, radius=3e-4,
# color=vis._shared_color['seg-class-color'][fg_cls])
record = dict(
trans_z=trans.detach().cpu().numpy(),
grouping=grouping,
support=support_cls,
path=inst_save_path,
obj_class=fg_cls,
sample_idx=sample_idx,
sequence_name=sequence_name,
num_points=instance_pc.shape[0],
box3d=attaching_box,
)
instance_dict[fg_cls].append(record)
break
with open(db_info_save_path, 'wb') as f:
pickle.dump(instance_dict, f)
return instance_dict
| 41.704587
| 116
| 0.602006
|
4a0cbcfbb1426966d107a4206383a1494f885afb
| 18,798
|
py
|
Python
|
samples/openapi3/client/petstore/python/petstore_api/models/format_test.py
|
therockstorm/openapi-generator
|
01d0b5d4780ebe2d6025e2b443ec136c6ce16c45
|
[
"Apache-2.0"
] | 3
|
2021-04-09T01:04:32.000Z
|
2022-02-02T11:02:22.000Z
|
samples/openapi3/client/petstore/python/petstore_api/models/format_test.py
|
therockstorm/openapi-generator
|
01d0b5d4780ebe2d6025e2b443ec136c6ce16c45
|
[
"Apache-2.0"
] | 3
|
2021-05-11T23:55:26.000Z
|
2022-02-27T11:17:21.000Z
|
samples/openapi3/client/petstore/python/petstore_api/models/format_test.py
|
therockstorm/openapi-generator
|
01d0b5d4780ebe2d6025e2b443ec136c6ce16c45
|
[
"Apache-2.0"
] | 4
|
2020-12-07T02:43:58.000Z
|
2020-12-07T10:23:39.000Z
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import inspect
import pprint
import re # noqa: F401
import six
from petstore_api.configuration import Configuration
class FormatTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'integer': 'int',
'int32': 'int',
'int64': 'int',
'number': 'float',
'float': 'float',
'double': 'float',
'decimal': 'Decimal',
'string': 'str',
'byte': 'str',
'binary': 'file',
'date': 'date',
'date_time': 'datetime',
'uuid': 'str',
'password': 'str',
'pattern_with_digits': 'str',
'pattern_with_digits_and_delimiter': 'str'
}
attribute_map = {
'integer': 'integer',
'int32': 'int32',
'int64': 'int64',
'number': 'number',
'float': 'float',
'double': 'double',
'decimal': 'decimal',
'string': 'string',
'byte': 'byte',
'binary': 'binary',
'date': 'date',
'date_time': 'dateTime',
'uuid': 'uuid',
'password': 'password',
'pattern_with_digits': 'pattern_with_digits',
'pattern_with_digits_and_delimiter': 'pattern_with_digits_and_delimiter'
}
def __init__(self, integer=None, int32=None, int64=None, number=None, float=None, double=None, decimal=None, string=None, byte=None, binary=None, date=None, date_time=None, uuid=None, password=None, pattern_with_digits=None, pattern_with_digits_and_delimiter=None, local_vars_configuration=None): # noqa: E501
"""FormatTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._integer = None
self._int32 = None
self._int64 = None
self._number = None
self._float = None
self._double = None
self._decimal = None
self._string = None
self._byte = None
self._binary = None
self._date = None
self._date_time = None
self._uuid = None
self._password = None
self._pattern_with_digits = None
self._pattern_with_digits_and_delimiter = None
self.discriminator = None
if integer is not None:
self.integer = integer
if int32 is not None:
self.int32 = int32
if int64 is not None:
self.int64 = int64
self.number = number
if float is not None:
self.float = float
if double is not None:
self.double = double
if decimal is not None:
self.decimal = decimal
if string is not None:
self.string = string
self.byte = byte
if binary is not None:
self.binary = binary
self.date = date
if date_time is not None:
self.date_time = date_time
if uuid is not None:
self.uuid = uuid
self.password = password
if pattern_with_digits is not None:
self.pattern_with_digits = pattern_with_digits
if pattern_with_digits_and_delimiter is not None:
self.pattern_with_digits_and_delimiter = pattern_with_digits_and_delimiter
@property
def integer(self):
"""Gets the integer of this FormatTest. # noqa: E501
:return: The integer of this FormatTest. # noqa: E501
:rtype: int
"""
return self._integer
@integer.setter
def integer(self, integer):
"""Sets the integer of this FormatTest.
:param integer: The integer of this FormatTest. # noqa: E501
:type integer: int
"""
if (self.local_vars_configuration.client_side_validation and
integer is not None and integer > 100): # noqa: E501
raise ValueError("Invalid value for `integer`, must be a value less than or equal to `100`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
integer is not None and integer < 10): # noqa: E501
raise ValueError("Invalid value for `integer`, must be a value greater than or equal to `10`") # noqa: E501
self._integer = integer
@property
def int32(self):
"""Gets the int32 of this FormatTest. # noqa: E501
:return: The int32 of this FormatTest. # noqa: E501
:rtype: int
"""
return self._int32
@int32.setter
def int32(self, int32):
"""Sets the int32 of this FormatTest.
:param int32: The int32 of this FormatTest. # noqa: E501
:type int32: int
"""
if (self.local_vars_configuration.client_side_validation and
int32 is not None and int32 > 200): # noqa: E501
raise ValueError("Invalid value for `int32`, must be a value less than or equal to `200`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
int32 is not None and int32 < 20): # noqa: E501
raise ValueError("Invalid value for `int32`, must be a value greater than or equal to `20`") # noqa: E501
self._int32 = int32
@property
def int64(self):
"""Gets the int64 of this FormatTest. # noqa: E501
:return: The int64 of this FormatTest. # noqa: E501
:rtype: int
"""
return self._int64
@int64.setter
def int64(self, int64):
"""Sets the int64 of this FormatTest.
:param int64: The int64 of this FormatTest. # noqa: E501
:type int64: int
"""
self._int64 = int64
@property
def number(self):
"""Gets the number of this FormatTest. # noqa: E501
:return: The number of this FormatTest. # noqa: E501
:rtype: float
"""
return self._number
@number.setter
def number(self, number):
"""Sets the number of this FormatTest.
:param number: The number of this FormatTest. # noqa: E501
:type number: float
"""
if self.local_vars_configuration.client_side_validation and number is None: # noqa: E501
raise ValueError("Invalid value for `number`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
number is not None and number > 543.2): # noqa: E501
raise ValueError("Invalid value for `number`, must be a value less than or equal to `543.2`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
number is not None and number < 32.1): # noqa: E501
raise ValueError("Invalid value for `number`, must be a value greater than or equal to `32.1`") # noqa: E501
self._number = number
@property
def float(self):
"""Gets the float of this FormatTest. # noqa: E501
:return: The float of this FormatTest. # noqa: E501
:rtype: float
"""
return self._float
@float.setter
def float(self, float):
"""Sets the float of this FormatTest.
:param float: The float of this FormatTest. # noqa: E501
:type float: float
"""
if (self.local_vars_configuration.client_side_validation and
float is not None and float > 987.6): # noqa: E501
raise ValueError("Invalid value for `float`, must be a value less than or equal to `987.6`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
float is not None and float < 54.3): # noqa: E501
raise ValueError("Invalid value for `float`, must be a value greater than or equal to `54.3`") # noqa: E501
self._float = float
@property
def double(self):
"""Gets the double of this FormatTest. # noqa: E501
:return: The double of this FormatTest. # noqa: E501
:rtype: float
"""
return self._double
@double.setter
def double(self, double):
"""Sets the double of this FormatTest.
:param double: The double of this FormatTest. # noqa: E501
:type double: float
"""
if (self.local_vars_configuration.client_side_validation and
double is not None and double > 123.4): # noqa: E501
raise ValueError("Invalid value for `double`, must be a value less than or equal to `123.4`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
double is not None and double < 67.8): # noqa: E501
raise ValueError("Invalid value for `double`, must be a value greater than or equal to `67.8`") # noqa: E501
self._double = double
@property
def decimal(self):
"""Gets the decimal of this FormatTest. # noqa: E501
:return: The decimal of this FormatTest. # noqa: E501
:rtype: Decimal
"""
return self._decimal
@decimal.setter
def decimal(self, decimal):
"""Sets the decimal of this FormatTest.
:param decimal: The decimal of this FormatTest. # noqa: E501
:type decimal: Decimal
"""
self._decimal = decimal
@property
def string(self):
"""Gets the string of this FormatTest. # noqa: E501
:return: The string of this FormatTest. # noqa: E501
:rtype: str
"""
return self._string
@string.setter
def string(self, string):
"""Sets the string of this FormatTest.
:param string: The string of this FormatTest. # noqa: E501
:type string: str
"""
if (self.local_vars_configuration.client_side_validation and
string is not None and not re.search(r'[a-z]', string, flags=re.IGNORECASE)): # noqa: E501
raise ValueError(r"Invalid value for `string`, must be a follow pattern or equal to `/[a-z]/i`") # noqa: E501
self._string = string
@property
def byte(self):
"""Gets the byte of this FormatTest. # noqa: E501
:return: The byte of this FormatTest. # noqa: E501
:rtype: str
"""
return self._byte
@byte.setter
def byte(self, byte):
"""Sets the byte of this FormatTest.
:param byte: The byte of this FormatTest. # noqa: E501
:type byte: str
"""
if self.local_vars_configuration.client_side_validation and byte is None: # noqa: E501
raise ValueError("Invalid value for `byte`, must not be `None`") # noqa: E501
self._byte = byte
@property
def binary(self):
"""Gets the binary of this FormatTest. # noqa: E501
:return: The binary of this FormatTest. # noqa: E501
:rtype: file
"""
return self._binary
@binary.setter
def binary(self, binary):
"""Sets the binary of this FormatTest.
:param binary: The binary of this FormatTest. # noqa: E501
:type binary: file
"""
self._binary = binary
@property
def date(self):
"""Gets the date of this FormatTest. # noqa: E501
:return: The date of this FormatTest. # noqa: E501
:rtype: date
"""
return self._date
@date.setter
def date(self, date):
"""Sets the date of this FormatTest.
:param date: The date of this FormatTest. # noqa: E501
:type date: date
"""
if self.local_vars_configuration.client_side_validation and date is None: # noqa: E501
raise ValueError("Invalid value for `date`, must not be `None`") # noqa: E501
self._date = date
@property
def date_time(self):
"""Gets the date_time of this FormatTest. # noqa: E501
:return: The date_time of this FormatTest. # noqa: E501
:rtype: datetime
"""
return self._date_time
@date_time.setter
def date_time(self, date_time):
"""Sets the date_time of this FormatTest.
:param date_time: The date_time of this FormatTest. # noqa: E501
:type date_time: datetime
"""
self._date_time = date_time
@property
def uuid(self):
"""Gets the uuid of this FormatTest. # noqa: E501
:return: The uuid of this FormatTest. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this FormatTest.
:param uuid: The uuid of this FormatTest. # noqa: E501
:type uuid: str
"""
self._uuid = uuid
@property
def password(self):
"""Gets the password of this FormatTest. # noqa: E501
:return: The password of this FormatTest. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this FormatTest.
:param password: The password of this FormatTest. # noqa: E501
:type password: str
"""
if self.local_vars_configuration.client_side_validation and password is None: # noqa: E501
raise ValueError("Invalid value for `password`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
password is not None and len(password) > 64):
raise ValueError("Invalid value for `password`, length must be less than or equal to `64`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
password is not None and len(password) < 10):
raise ValueError("Invalid value for `password`, length must be greater than or equal to `10`") # noqa: E501
self._password = password
@property
def pattern_with_digits(self):
"""Gets the pattern_with_digits of this FormatTest. # noqa: E501
A string that is a 10 digit number. Can have leading zeros. # noqa: E501
:return: The pattern_with_digits of this FormatTest. # noqa: E501
:rtype: str
"""
return self._pattern_with_digits
@pattern_with_digits.setter
def pattern_with_digits(self, pattern_with_digits):
"""Sets the pattern_with_digits of this FormatTest.
A string that is a 10 digit number. Can have leading zeros. # noqa: E501
:param pattern_with_digits: The pattern_with_digits of this FormatTest. # noqa: E501
:type pattern_with_digits: str
"""
if (self.local_vars_configuration.client_side_validation and
pattern_with_digits is not None and not re.search(r'^\d{10}$', pattern_with_digits)): # noqa: E501
raise ValueError(r"Invalid value for `pattern_with_digits`, must be a follow pattern or equal to `/^\d{10}$/`") # noqa: E501
self._pattern_with_digits = pattern_with_digits
@property
def pattern_with_digits_and_delimiter(self):
"""Gets the pattern_with_digits_and_delimiter of this FormatTest. # noqa: E501
A string starting with 'image_' (case insensitive) and one to three digits following i.e. Image_01. # noqa: E501
:return: The pattern_with_digits_and_delimiter of this FormatTest. # noqa: E501
:rtype: str
"""
return self._pattern_with_digits_and_delimiter
@pattern_with_digits_and_delimiter.setter
def pattern_with_digits_and_delimiter(self, pattern_with_digits_and_delimiter):
"""Sets the pattern_with_digits_and_delimiter of this FormatTest.
A string starting with 'image_' (case insensitive) and one to three digits following i.e. Image_01. # noqa: E501
:param pattern_with_digits_and_delimiter: The pattern_with_digits_and_delimiter of this FormatTest. # noqa: E501
:type pattern_with_digits_and_delimiter: str
"""
if (self.local_vars_configuration.client_side_validation and
pattern_with_digits_and_delimiter is not None and not re.search(r'^image_\d{1,3}$', pattern_with_digits_and_delimiter, flags=re.IGNORECASE)): # noqa: E501
raise ValueError(r"Invalid value for `pattern_with_digits_and_delimiter`, must be a follow pattern or equal to `/^image_\d{1,3}$/i`") # noqa: E501
self._pattern_with_digits_and_delimiter = pattern_with_digits_and_delimiter
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = inspect.getargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FormatTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FormatTest):
return True
return self.to_dict() != other.to_dict()
| 32.863636
| 314
| 0.603468
|
4a0cbd057adf70501146be9be6a7cb3b5901b311
| 2,331
|
py
|
Python
|
src/datamgr/datamanager/lifecycle/metrics/sort_metric.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 84
|
2021-06-30T06:20:23.000Z
|
2022-03-22T03:05:49.000Z
|
src/datamgr/datamanager/lifecycle/metrics/sort_metric.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 7
|
2021-06-30T06:21:16.000Z
|
2022-03-29T07:36:13.000Z
|
src/datamgr/datamanager/lifecycle/metrics/sort_metric.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 40
|
2021-06-30T06:21:26.000Z
|
2022-03-29T12:42:26.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import, print_function, unicode_literals
import pandas as pd
def score_ranking(score_dict):
"""
用pandas实现分组排序
:param score_dict: dict {'591_sum_test_0601': 13.1, '591_b_tpg7': 13.1, '591_tdw_ltpg6': 14.14}
:return: DataFrame
pd.DataFrame([['591_sum_test_0601', 13.10, 2.0, 0.6667],
['591_b_tpg7', 13.10, 2.0, 0.6667],
['591_tdw_ltpg6', 14.14, 3.0, 1.0]],
columns=['dataset_id', 'score', 'ranking', 'ranking_perct'])
"""
sorted_list = sorted(score_dict.items(), key=lambda item: item[1])
dataset_id_list = []
score_list = []
for each_dataset in sorted_list:
dataset_id_list.append(each_dataset[0])
score_list.append(each_dataset[1])
score_dict = {"dataset_id": dataset_id_list, "score": score_list}
df = pd.DataFrame(data=score_dict)
df["ranking"] = df["score"].rank(method="max")
df["ranking_perct"] = (df["ranking"]) / len(df)
return df
| 49.595745
| 111
| 0.716002
|
4a0cbdc43d5fc212d5f89ac52a4ee85d0f4b5a16
| 2,662
|
py
|
Python
|
model_compiler/src/model_compiler/compilers/paddle_model_file_to_onnx_model.py
|
yuanliya/Adlik
|
602074b44064002fc0bb054e17a989a5bcf22e92
|
[
"Apache-2.0"
] | 548
|
2019-09-27T07:37:47.000Z
|
2022-03-31T05:12:38.000Z
|
model_compiler/src/model_compiler/compilers/paddle_model_file_to_onnx_model.py
|
yuanliya/Adlik
|
602074b44064002fc0bb054e17a989a5bcf22e92
|
[
"Apache-2.0"
] | 533
|
2019-09-27T06:30:41.000Z
|
2022-03-29T07:34:08.000Z
|
model_compiler/src/model_compiler/compilers/paddle_model_file_to_onnx_model.py
|
yuanliya/Adlik
|
602074b44064002fc0bb054e17a989a5bcf22e92
|
[
"Apache-2.0"
] | 54
|
2019-10-10T02:19:31.000Z
|
2021-12-28T03:37:45.000Z
|
# Copyright 2019 ZTE corporation. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from typing import Any, Mapping, NamedTuple, Optional, Sequence
from tempfile import NamedTemporaryFile
import onnx
import onnx.utils
from . import repository
from .. import utilities
from ..models.data_format import DataFormat
from ..models.irs.onnx_model import OnnxModel
from ..models.sources.paddle_model_file import PaddlePaddleModelFile
class Config(NamedTuple):
input_formats: Sequence[Optional[DataFormat]]
model_filename: Optional[str]
params_filename: Optional[str]
opset_version: int
enable_onnx_checker: bool
@staticmethod
def from_json(value: Mapping[str, Any]) -> 'Config':
return Config(input_formats=utilities.get_data_formats(value.get('input_formats')),
model_filename=value.get('model_filename'),
params_filename=value.get('params_filename'),
opset_version=int(value['opset_version']) if value.get('opset_version') else 9,
enable_onnx_checker=bool(value['enable_onnx_checker']) if value.get(
'enable_onnx_checker') else False
)
@staticmethod
def from_env(env: Mapping[str, str]) -> 'Config':
return Config(input_formats=utilities.get_data_formats(utilities.split_by(env.get('INPUT_FORMATS'), ',')),
model_filename=env.get('MODEL_FILENAME'),
params_filename=env.get('PARAMS_FILENAME'),
opset_version=int(env['OPSET_VERSION']) if env.get('OPSET_VERSION') else 9,
enable_onnx_checker=bool(env['ENABLE_ONNX_CHECKER']) if env.get('ENABLE_ONNX_CHECKER') else False
)
@repository.REPOSITORY.register(source_type=PaddlePaddleModelFile, target_type=OnnxModel, config_type=Config)
def compile_source(source: PaddlePaddleModelFile, config: Config) -> OnnxModel:
from paddle2onnx.command import program2onnx # pylint: disable=import-outside-toplevel
with NamedTemporaryFile(suffix='.onnx') as onnx_file:
program2onnx(model_dir=source.model_path,
save_file=onnx_file.name,
model_filename=config.model_filename,
params_filename=config.params_filename,
opset_version=config.opset_version)
onnx_model = onnx.load(onnx_file.name)
graph = onnx_model.graph # pylint: disable=no-member
return OnnxModel(model_proto=onnx_model,
input_data_formats=utilities.get_onnx_model_input_data_formats(graph, config.input_formats))
| 46.701754
| 119
| 0.684823
|
4a0cbe3b0cbfe412c0dc69911a57b4921a86aee6
| 6,865
|
py
|
Python
|
homeassistant/components/overkiz/cover_entities/generic_cover.py
|
mtarjoianu/core
|
44e9146463ac505eb3d1c0651ad126cb25c28a54
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/overkiz/cover_entities/generic_cover.py
|
mtarjoianu/core
|
44e9146463ac505eb3d1c0651ad126cb25c28a54
|
[
"Apache-2.0"
] | 12
|
2021-12-16T06:18:49.000Z
|
2022-03-31T06:25:54.000Z
|
homeassistant/components/overkiz/cover_entities/generic_cover.py
|
mtarjoianu/core
|
44e9146463ac505eb3d1c0651ad126cb25c28a54
|
[
"Apache-2.0"
] | 1
|
2021-12-10T10:33:28.000Z
|
2021-12-10T10:33:28.000Z
|
"""Base class for Overkiz covers, shutters, awnings, etc."""
from __future__ import annotations
from collections.abc import Mapping
from typing import Any, cast
from pyoverkiz.enums import OverkizCommand, OverkizCommandParam, OverkizState
from homeassistant.components.cover import (
ATTR_TILT_POSITION,
CoverEntity,
CoverEntityFeature,
)
from homeassistant.components.overkiz.entity import OverkizEntity
ATTR_OBSTRUCTION_DETECTED = "obstruction-detected"
COMMANDS_STOP: list[OverkizCommand] = [
OverkizCommand.STOP,
OverkizCommand.MY,
]
COMMANDS_STOP_TILT: list[OverkizCommand] = [
OverkizCommand.STOP,
OverkizCommand.MY,
]
COMMANDS_OPEN: list[OverkizCommand] = [
OverkizCommand.OPEN,
OverkizCommand.UP,
OverkizCommand.CYCLE,
]
COMMANDS_OPEN_TILT: list[OverkizCommand] = [OverkizCommand.OPEN_SLATS]
COMMANDS_CLOSE: list[OverkizCommand] = [
OverkizCommand.CLOSE,
OverkizCommand.DOWN,
OverkizCommand.CYCLE,
]
COMMANDS_CLOSE_TILT: list[OverkizCommand] = [OverkizCommand.CLOSE_SLATS]
COMMANDS_SET_TILT_POSITION: list[OverkizCommand] = [OverkizCommand.SET_ORIENTATION]
class OverkizGenericCover(OverkizEntity, CoverEntity):
"""Representation of an Overkiz Cover."""
@property
def current_cover_tilt_position(self) -> int | None:
"""Return current position of cover tilt.
None is unknown, 0 is closed, 100 is fully open.
"""
position = self.executor.select_state(
OverkizState.CORE_SLATS_ORIENTATION, OverkizState.CORE_SLATE_ORIENTATION
)
if position is not None:
return 100 - cast(int, position)
return None
async def async_set_cover_tilt_position(self, **kwargs: Any) -> None:
"""Move the cover tilt to a specific position."""
if command := self.executor.select_command(*COMMANDS_SET_TILT_POSITION):
await self.executor.async_execute_command(
command,
100 - kwargs[ATTR_TILT_POSITION],
)
@property
def is_closed(self) -> bool | None:
"""Return if the cover is closed."""
state = self.executor.select_state(
OverkizState.CORE_OPEN_CLOSED,
OverkizState.CORE_SLATS_OPEN_CLOSED,
OverkizState.CORE_OPEN_CLOSED_PARTIAL,
OverkizState.CORE_OPEN_CLOSED_PEDESTRIAN,
OverkizState.CORE_OPEN_CLOSED_UNKNOWN,
OverkizState.MYFOX_SHUTTER_STATUS,
)
if state is not None:
return state == OverkizCommandParam.CLOSED
# Keep this condition after the previous one. Some device like the pedestrian gate, always return 50 as position.
if self.current_cover_position is not None:
return self.current_cover_position == 0
if self.current_cover_tilt_position is not None:
return self.current_cover_tilt_position == 0
return None
async def async_open_cover_tilt(self, **kwargs: Any) -> None:
"""Open the cover tilt."""
if command := self.executor.select_command(*COMMANDS_OPEN_TILT):
await self.executor.async_execute_command(command)
async def async_close_cover_tilt(self, **kwargs: Any) -> None:
"""Close the cover tilt."""
if command := self.executor.select_command(*COMMANDS_CLOSE_TILT):
await self.executor.async_execute_command(command)
async def async_stop_cover(self, **kwargs: Any) -> None:
"""Stop the cover."""
if command := self.executor.select_command(*COMMANDS_STOP):
await self.executor.async_execute_command(command)
async def async_stop_cover_tilt(self, **kwargs: Any) -> None:
"""Stop the cover tilt."""
if command := self.executor.select_command(*COMMANDS_STOP_TILT):
await self.executor.async_execute_command(command)
@property
def is_opening(self) -> bool | None:
"""Return if the cover is opening or not."""
if self.assumed_state:
return None
# Check if cover movement execution is currently running
if any(
execution.get("device_url") == self.device.device_url
and execution.get("command_name") in COMMANDS_OPEN + COMMANDS_OPEN_TILT
for execution in self.coordinator.executions.values()
):
return True
# Check if cover is moving based on current state
is_moving = self.device.states.get(OverkizState.CORE_MOVING)
current_closure = self.device.states.get(OverkizState.CORE_CLOSURE)
target_closure = self.device.states.get(OverkizState.CORE_TARGET_CLOSURE)
if not is_moving or not current_closure or not target_closure:
return None
return cast(int, current_closure.value) > cast(int, target_closure.value)
@property
def is_closing(self) -> bool | None:
"""Return if the cover is closing or not."""
if self.assumed_state:
return None
# Check if cover movement execution is currently running
if any(
execution.get("device_url") == self.device.device_url
and execution.get("command_name") in COMMANDS_CLOSE + COMMANDS_CLOSE_TILT
for execution in self.coordinator.executions.values()
):
return True
# Check if cover is moving based on current state
is_moving = self.device.states.get(OverkizState.CORE_MOVING)
current_closure = self.device.states.get(OverkizState.CORE_CLOSURE)
target_closure = self.device.states.get(OverkizState.CORE_TARGET_CLOSURE)
if not is_moving or not current_closure or not target_closure:
return None
return cast(int, current_closure.value) < cast(int, target_closure.value)
@property
def extra_state_attributes(self) -> Mapping[str, Any] | None:
"""Return the device state attributes."""
attr = super().extra_state_attributes or {}
# Obstruction Detected attribute is used by HomeKit
if self.executor.has_state(OverkizState.IO_PRIORITY_LOCK_LEVEL):
return {**attr, **{ATTR_OBSTRUCTION_DETECTED: True}}
return attr
@property
def supported_features(self) -> int:
"""Flag supported features."""
supported_features = 0
if self.executor.has_command(*COMMANDS_OPEN_TILT):
supported_features |= CoverEntityFeature.OPEN_TILT
if self.executor.has_command(*COMMANDS_STOP_TILT):
supported_features |= CoverEntityFeature.STOP_TILT
if self.executor.has_command(*COMMANDS_CLOSE_TILT):
supported_features |= CoverEntityFeature.CLOSE_TILT
if self.executor.has_command(*COMMANDS_SET_TILT_POSITION):
supported_features |= CoverEntityFeature.SET_TILT_POSITION
return supported_features
| 36.131579
| 121
| 0.682739
|
4a0cbf13218ef7ca5d7c98eab9cb765802096efc
| 20,686
|
py
|
Python
|
nengo/dists.py
|
HugoChateauLaurent/nengo
|
749893186ee09aa6c621a40da3ffd3878114db9c
|
[
"BSD-2-Clause"
] | null | null | null |
nengo/dists.py
|
HugoChateauLaurent/nengo
|
749893186ee09aa6c621a40da3ffd3878114db9c
|
[
"BSD-2-Clause"
] | null | null | null |
nengo/dists.py
|
HugoChateauLaurent/nengo
|
749893186ee09aa6c621a40da3ffd3878114db9c
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import absolute_import
import warnings
import numpy as np
from nengo.exceptions import ValidationError
from nengo.params import (BoolParam, IntParam, NdarrayParam, NumberParam,
Parameter, Unconfigurable, FrozenObject)
import nengo.utils.numpy as npext
class Distribution(FrozenObject):
"""A base class for probability distributions.
The only thing that a probabilities distribution need to define is a
`.Distribution.sample` method. This base class ensures that all
distributions accept the same arguments for the sample function.
"""
def _sample_shape(self, n, d=None):
"""Returns output shape for sample method."""
return (n,) if d is None else (n, d)
def sample(self, n, d=None, rng=np.random):
"""Samples the distribution.
Parameters
----------
n : int
Number samples to take.
d : int or None, optional (Default: None)
The number of dimensions to return. If this is an int, the return
value will be of shape ``(n, d)``. If None, the return
value will be of shape ``(n,)``.
rng : `numpy.random.RandomState`, optional
Random number generator state.
Returns
-------
samples : (n,) or (n, d) array_like
Samples as a 1d or 2d array depending on ``d``. The second
dimension enumerates the dimensions of the process.
"""
raise NotImplementedError("Distributions should implement sample.")
class PDF(Distribution):
"""An arbitrary distribution from a PDF.
Parameters
----------
x : vector_like (n,)
Values of the points to sample from (interpolated).
p : vector_like (n,)
Probabilities of the ``x`` points.
"""
x = NdarrayParam('x', shape='*')
p = NdarrayParam('p', shape='*')
def __init__(self, x, p):
super(PDF, self).__init__()
psum = np.sum(p)
if np.abs(psum - 1) > 1e-8:
raise ValidationError(
"PDF must sum to one (sums to %f)" % psum, attr='p', obj=self)
self.x = x
self.p = p
if len(self.x) != len(self.p):
raise ValidationError(
"`x` and `p` must be the same length", attr='p', obj=self)
# make cumsum = [0] + cumsum, cdf = 0.5 * (cumsum[:-1] + cumsum[1:])
cumsum = np.cumsum(p)
cumsum *= 0.5
cumsum[1:] = cumsum[:-1] + cumsum[1:]
self.cdf = cumsum
def __repr__(self):
return "PDF(x=%r, p=%r)" % (self.x, self.p)
def sample(self, n, d=None, rng=np.random):
shape = self._sample_shape(n, d)
return np.interp(rng.uniform(size=shape), self.cdf, self.x)
class Uniform(Distribution):
"""A uniform distribution.
It's equally likely to get any scalar between ``low`` and ``high``.
Note that the order of ``low`` and ``high`` doesn't matter;
if ``low < high`` this will still work, and ``low`` will still
be a closed interval while ``high`` is open.
Parameters
----------
low : Number
The closed lower bound of the uniform distribution; samples >= low
high : Number
The open upper bound of the uniform distribution; samples < high
integer : boolean, optional (Default: False)
If true, sample from a uniform distribution of integers. In this case,
low and high should be integers.
"""
low = NumberParam('low')
high = NumberParam('high')
integer = BoolParam('integer')
def __init__(self, low, high, integer=False):
super(Uniform, self).__init__()
self.low = low
self.high = high
self.integer = integer
def __repr__(self):
return "Uniform(low=%r, high=%r%s)" % (
self.low, self.high, ", integer=True" if self.integer else "")
def sample(self, n, d=None, rng=np.random):
shape = self._sample_shape(n, d)
if self.integer:
return rng.randint(low=self.low, high=self.high, size=shape)
else:
return rng.uniform(low=self.low, high=self.high, size=shape)
class Gaussian(Distribution):
"""A Gaussian distribution.
This represents a bell-curve centred at ``mean`` and with
spread represented by the standard deviation, ``std``.
Parameters
----------
mean : Number
The mean of the Gaussian.
std : Number
The standard deviation of the Gaussian.
Raises
------
ValidationError if std is <= 0
"""
mean = NumberParam('mean')
std = NumberParam('std', low=0, low_open=True)
def __init__(self, mean, std):
super(Gaussian, self).__init__()
self.mean = mean
self.std = std
def __repr__(self):
return "Gaussian(mean=%r, std=%r)" % (self.mean, self.std)
def sample(self, n, d=None, rng=np.random):
shape = self._sample_shape(n, d)
return rng.normal(loc=self.mean, scale=self.std, size=shape)
class Exponential(Distribution):
"""An exponential distribution (optionally with high values clipped).
If ``high`` is left to its default value of infinity, this is a standard
exponential distribution. If ``high`` is set, then any sampled values at
or above ``high`` will be clipped so they are slightly below ``high``.
This is useful for thresholding and, by extension,
`.networks.AssociativeMemory`.
The probability distribution function (PDF) is given by::
| 0 if x < shift
p(x) = | 1/scale * exp(-(x - shift)/scale) if x >= shift and x < high
| n if x == high - eps
| 0 if x >= high
where ``n`` is such that the PDF integrates to one, and ``eps`` is an
infintesimally small number such that samples of ``x`` are strictly less
than ``high`` (in practice, ``eps`` depends on floating point precision).
Parameters
----------
scale : float
The scale parameter (inverse of the rate parameter lambda). Larger
values make the distribution narrower (sharper peak).
shift : float, optional (Default: 0)
Amount to shift the distribution by. There will be no values smaller
than this shift when sampling from the distribution.
high : float, optional (Default: np.inf)
All values larger than or equal to this value will be clipped to
slightly less than this value.
"""
scale = NumberParam('scale', low=0, low_open=True)
shift = NumberParam('shift')
high = NumberParam('high')
def __init__(self, scale, shift=0., high=np.inf):
super(Exponential, self).__init__()
self.scale = scale
self.shift = shift
self.high = high
def sample(self, n, d=None, rng=np.random):
shape = self._sample_shape(n, d)
x = rng.exponential(self.scale, shape) + self.shift
high = np.nextafter(self.high, np.asarray(-np.inf, dtype=x.dtype))
return np.clip(x, self.shift, high)
class UniformHypersphere(Distribution):
"""Uniform distribution on or in an n-dimensional unit hypersphere.
Sample points are uniformly distributed across the volume (default) or
surface of an n-dimensional unit hypersphere.
Parameters
----------
surface : bool, optional (Default: False)
Whether sample points should be distributed uniformly
over the surface of the hyperphere (True),
or within the hypersphere (False).
min_magnitude : Number, optional (Default: 0)
Lower bound on the returned vector magnitudes (such that they are in
the range ``[min_magnitude, 1]``). Must be in the range [0, 1).
Ignored if ``surface`` is ``True``.
"""
surface = BoolParam('surface')
min_magnitude = NumberParam('min_magnitude', low=0, high=1, high_open=True)
def __init__(self, surface=False, min_magnitude=0):
super(UniformHypersphere, self).__init__()
if surface and min_magnitude > 0:
warnings.warn("min_magnitude ignored because surface is True")
self.surface = surface
self.min_magnitude = min_magnitude
def __repr__(self):
args = []
if self.surface:
args.append("surface=%s" % self.surface)
if self.min_magnitude > 0:
args.append("min_magnitude=%r" % self.min_magnitude)
return "%s(%s)" % (type(self).__name__, ', '.join(args))
def sample(self, n, d=None, rng=np.random):
if d is None or d < 1: # check this, since other dists allow d = None
raise ValidationError("Dimensions must be a positive integer", 'd')
samples = rng.randn(n, d)
samples /= npext.norm(samples, axis=1, keepdims=True)
if self.surface:
return samples
# Generate magnitudes for vectors from uniform distribution.
# The (1 / d) exponent ensures that samples are uniformly distributed
# in n-space and not all bunched up at the centre of the sphere.
samples *= rng.uniform(
low=self.min_magnitude**d, high=1, size=(n, 1)) ** (1. / d)
return samples
class Choice(Distribution):
"""Discrete distribution across a set of possible values.
The same as `numpy.random.choice`, except can take vector or matrix values
for the choices.
Parameters
----------
options : (N, ...) array_like
The options (choices) to choose between. The choice is always done
along the first axis, so if ``options`` is a matrix, the options are
the rows of that matrix.
weights : (N,) array_like, optional (Default: None)
Weights controlling the probability of selecting each option. Will
automatically be normalized. If None, weights be uniformly distributed.
"""
options = NdarrayParam('options', shape=('*', '...'))
weights = NdarrayParam('weights', shape=('*'), optional=True)
def __init__(self, options, weights=None):
super(Choice, self).__init__()
self.options = options
self.weights = weights
weights = (np.ones(len(self.options)) if self.weights is None else
self.weights)
if len(weights) != len(self.options):
raise ValidationError(
"Number of weights (%d) must match number of options (%d)"
% (len(weights), len(self.options)), attr='weights', obj=self)
if not all(weights >= 0):
raise ValidationError("All weights must be non-negative",
attr='weights', obj=self)
total = float(weights.sum())
if total <= 0:
raise ValidationError("Sum of weights must be positive (got %f)"
% total, attr='weights', obj=self)
self.p = weights / total
def __repr__(self):
return "Choice(options=%r%s)" % (
self.options,
"" if self.weights is None else ", weights=%r" % self.weights)
@property
def dimensions(self):
return np.prod(self.options.shape[1:])
def sample(self, n, d=None, rng=np.random):
if d is not None and self.dimensions != d:
raise ValidationError("Options must be of dimensionality %d "
"(got %d)" % (d, self.dimensions),
attr='options', obj=self)
i = np.searchsorted(np.cumsum(self.p), rng.rand(n))
return self.options[i]
class Samples(Distribution):
"""A set of samples.
This class is a subclass of `.Distribution` so that it can be used in any
situation that calls for a `.Distribution`. However, the call to
`.Distribution.sample` must match the dimensions of the samples or
a `.ValidationError` will be raised.
Parameters
----------
samples : (n, d) array_like
``n`` and ``d`` must match what is eventually passed to
`.Distribution.sample`.
"""
samples = NdarrayParam('samples', shape=('...',))
def __init__(self, samples):
super(Samples, self).__init__()
self.samples = samples
def __repr__(self):
return "Samples(samples=%r)" % (self.samples,)
def sample(self, n, d=None, rng=np.random):
samples = np.array(self.samples)
shape = (n,) if d is None else (n, d)
if d is None:
samples = samples.squeeze()
if d is not None and samples.ndim == 1:
samples = samples[..., np.newaxis]
if samples.shape[0] != shape[0]:
raise ValidationError("Wrong number of samples requested; got "
"%d, should be %d" % (n, samples.shape[0]),
attr='samples', obj=self)
elif d is None and len(samples.shape) != 1:
raise ValidationError("Wrong sample dimensionality requested; got "
"'None', should be %d" % (samples.shape[1],),
attr='samples', obj=self)
elif d is not None and samples.shape[1] != shape[1]:
raise ValidationError("Wrong sample dimensionality requested; got "
"%d, should be %d" % (d, samples.shape[1]),
attr='samples', obj=self)
return samples
class SqrtBeta(Distribution):
"""Distribution of the square root of a Beta distributed random variable.
Given ``n + m`` dimensional random unit vectors, the length of subvectors
with ``m`` elements will be distributed according to this distribution.
Parameters
----------
n: int
Number of subvectors.
m: int, optional (Default: 1)
Length of each subvector.
See also
--------
nengo.dists.SubvectorLength
"""
n = IntParam('n', low=0)
m = IntParam('m', low=0)
def __init__(self, n, m=1):
super(SqrtBeta, self).__init__()
self.n = n
self.m = m
def __repr__(self):
return "%s(n=%r, m=%r)" % (type(self).__name__, self.n, self.m)
def sample(self, num, d=None, rng=np.random):
shape = self._sample_shape(num, d)
return np.sqrt(rng.beta(self.m / 2.0, self.n / 2.0, size=shape))
def cdf(self, x):
"""Cumulative distribution function.
.. note:: Requires SciPy.
Parameters
----------
x : array_like
Evaluation points in [0, 1].
Returns
-------
cdf : array_like
Probability that ``X <= x``.
"""
from scipy.special import betainc
sq_x = x * x
return np.where(
sq_x < 1., betainc(self.m / 2.0, self.n / 2.0, sq_x),
np.ones_like(x))
def pdf(self, x):
"""Probability distribution function.
.. note:: Requires SciPy.
Parameters
----------
x : array_like
Evaluation points in [0, 1].
Returns
-------
pdf : array_like
Probability density at ``x``.
"""
from scipy.special import beta
return (2 / beta(0.5 * self.m, 0.5 * self.n) * x ** (self.m - 1)
* (1 - x * x) ** (0.5 * self.n - 1))
def ppf(self, y):
"""Percent point function (inverse cumulative distribution).
.. note:: Requires SciPy.
Parameters
----------
y : array_like
Cumulative probabilities in [0, 1].
Returns
-------
ppf : array_like
Evaluation points ``x`` in [0, 1] such that ``P(X <= x) = y``.
"""
from scipy.special import betaincinv
sq_x = betaincinv(self.m / 2.0, self.n / 2.0, y)
return np.sqrt(sq_x)
class SubvectorLength(SqrtBeta):
"""Distribution of the length of a subvectors of a unit vector.
Parameters
----------
dimensions : int
Dimensionality of the complete unit vector.
subdimensions : int, optional (Default: 1)
Dimensionality of the subvector.
See also
--------
nengo.dists.SqrtBeta
"""
def __init__(self, dimensions, subdimensions=1):
super(SubvectorLength, self).__init__(
dimensions - subdimensions, subdimensions)
def __repr__(self):
return "%s(%r, subdimensions=%r)" % (
type(self).__name__, self.n + self.m, self.m)
class CosineSimilarity(SubvectorLength):
"""Distribution of the cosine of the angle between two random vectors.
The "cosine similarity" is the cosine of the angle between two vectors,
which is equal to the dot product of the vectors, divided by the L2-norms
of the individual vectors. When these vectors are unit length, this is then
simply the distribution of their dot product.
This is also equivalent to the distribution of a single coefficient from a
unit vector (a single dimension of ``UniformHypersphere(surface=True)``).
Furthermore, ``CosineSimilarity(d+2)`` is equivalent to the distribution of
a single coordinate from points uniformly sampled from the d-dimensional
unit ball (a single dimension of
``UniformHypersphere(surface=False).sample(n, d)``). These relationships
have been detailed in [Voelker2017]_.
This can be used to calculate an intercept ``c = ppf(1 - p)`` such that
``dot(u, v) >= c`` with probability ``p``, for random unit vectors ``u``
and ``v``. In other words, a neuron with intercept ``ppf(1 - p)`` will
fire with probability ``p`` for a random unit length input.
.. [Voelker2017]
`Aaron R. Voelker, Jan Gosmann, and Terrence C. Stewart.
Efficiently sampling vectors and coordinates from the n-sphere and
n-ball. Technical Report, Centre for Theoretical Neuroscience,
Waterloo, ON, 2017
<http://compneuro.uwaterloo.ca/publications/voelker2017.html>`_
Parameters
----------
dimensions: int
Dimensionality of the complete unit vector.
See also
--------
nengo.dists.SqrtBeta
"""
def __init__(self, dimensions):
super(CosineSimilarity, self).__init__(dimensions)
def sample(self, num, d=None, rng=np.random):
shape = self._sample_shape(num, d)
sign = Choice((1, -1)).sample(np.prod(shape), rng=rng).reshape(*shape)
return sign * super(CosineSimilarity, self).sample(num, d, rng=rng)
def cdf(self, x):
return (super(CosineSimilarity, self).cdf(x) * np.sign(x) + 1) / 2.0
def pdf(self, x):
return super(CosineSimilarity, self).pdf(x) / 2.0
def ppf(self, y):
x = super(CosineSimilarity, self).ppf(abs(y*2 - 1))
return np.where(y > 0.5, x, -x)
class DistributionParam(Parameter):
"""A Distribution."""
equatable = True
def coerce(self, instance, dist):
self.check_type(instance, dist, Distribution)
return super(DistributionParam, self).coerce(instance, dist)
class DistOrArrayParam(NdarrayParam):
"""Can be a Distribution or samples from a distribution."""
def __init__(self, name, default=Unconfigurable, sample_shape=None,
optional=False, readonly=None):
super(DistOrArrayParam, self).__init__(
name, default, sample_shape, optional, readonly)
def coerce(self, instance, distorarray):
if isinstance(distorarray, Distribution):
return Parameter.coerce(self, instance, distorarray)
return super(DistOrArrayParam, self).coerce(instance, distorarray)
def get_samples(dist_or_samples, n, d=None, rng=np.random):
"""Convenience function to sample a distribution or return samples.
Use this function in situations where you accept an argument that could
be a distribution, or could be an ``array_like`` of samples.
Examples
--------
>>> def mean(values, n=100):
... samples = get_samples(values, n=n)
... return np.mean(samples)
>>> mean([1, 2, 3, 4])
2.5
>>> mean(nengo.dists.Gaussian(0, 1))
0.057277898442269548
Parameters
----------
dist_or_samples : `.Distribution` or (n, d) array_like
Source of the samples to be returned.
n : int
Number samples to take.
d : int or None, optional (Default: None)
The number of dimensions to return.
rng : RandomState, optional (Default: np.random)
Random number generator.
Returns
-------
samples : (n, d) array_like
"""
if isinstance(dist_or_samples, Distribution):
return dist_or_samples.sample(n, d=d, rng=rng)
return np.array(dist_or_samples)
| 33.745514
| 79
| 0.598617
|
4a0cbf89cfe1776aea1b37aab4b9989990d3cef0
| 13,533
|
py
|
Python
|
src/fever_doc_retri/fever_retrieval_v0.py
|
ethanjperez/semanticRetrievalMRS
|
765e00d6e7693e0eaba20ef1407fad0be4a7a92b
|
[
"MIT"
] | 61
|
2019-09-19T03:04:32.000Z
|
2022-03-08T03:59:28.000Z
|
src/fever_doc_retri/fever_retrieval_v0.py
|
ethanjperez/semanticRetrievalMRS
|
765e00d6e7693e0eaba20ef1407fad0be4a7a92b
|
[
"MIT"
] | 13
|
2019-09-19T12:11:01.000Z
|
2020-12-28T17:51:43.000Z
|
src/fever_doc_retri/fever_retrieval_v0.py
|
ethanjperez/semanticRetrievalMRS
|
765e00d6e7693e0eaba20ef1407fad0be4a7a92b
|
[
"MIT"
] | 10
|
2019-09-20T05:07:28.000Z
|
2022-01-12T08:12:08.000Z
|
import collections
from flashtext import KeywordProcessor
import wiki_util
from build_rindex.build_rvindex import load_from_file
from build_rindex.rvindex_scoring import get_query_ngrams
from fever_utils.fever_db import reverse_convert_brc
from hotpot_doc_retri.hotpot_doc_retri_v0 import filter_word, filter_document_id, get_kw_matching_results
from hotpot_doc_retri.retrieval_utils import RetrievedSet, RetrievedItem
from utils import common
import config
from utils import list_dict_data_tool
from tqdm import tqdm
from evaluation import fever_scorer
from wiki_util.title_entities_set import get_title_entity_set
import numpy as np
_MatchedObject = collections.namedtuple( # pylint: disable=invalid-name
"MatchedObject", ["matched_key_word", "matched_keywords_info"])
# Extracted key word is the key word in the database, matched word is the word in the input question.
def item_resorting(d_list, top_k=None):
for item in d_list:
t_claim = ' '.join(item['claim_tokens'])
item['predicted_docids'] = []
# for it in item['prioritized_docids']:
# if '-LRB-' in it[0] and common.doc_id_to_tokenized_text(it[0]) in t_claim:
# item['predicted_docids'].append(it[0])
# Reset Exact match
# t_claim = ' '.join(item['claim_tokens'])
# item['predicted_docids'] = []
for k, it in enumerate(item['prioritized_docids']):
if '-LRB-' in it[0] and common.doc_id_to_tokenized_text(it[0]) in t_claim:
item['prioritized_docids'][k] = [it[0], 5.0]
item['predicted_docids'].append(it[0])
for it in sorted(item['prioritized_docids'], key=lambda x: (-x[1], x[0])):
if it[0] not in item['predicted_docids']:
item['predicted_docids'].append(it[0])
if top_k is not None and len(item['predicted_docids']) > top_k:
item['predicted_docids'] = item['predicted_docids'][:top_k]
def fever_retrieval_v0(term_retrieval_top_k=3, match_filtering_k=2, tag='dev'):
# term_retrieval_top_k = 20
# term_retrieval_top_k = 20
# term_retrieval_top_k = 3
# match_filtering_k = 2
if tag == 'dev':
d_list = common.load_jsonl(config.FEVER_DEV)
elif tag == 'train':
d_list = common.load_jsonl(config.FEVER_TRAIN)
elif tag == 'test':
d_list = common.load_jsonl(config.FEVER_TEST)
else:
raise ValueError(f"Tag:{tag} not supported.")
d_tf_idf = common.load_jsonl(config.RESULT_PATH /
f"doc_retri_results/term_based_methods_results/fever_tf_idf_{tag}.jsonl")
tf_idf_dict = list_dict_data_tool.list_to_dict(d_tf_idf, 'id')
r_list = []
ner_set = get_title_entity_set()
g_score_dict = dict()
load_from_file(g_score_dict,
config.PDATA_ROOT / "reverse_indexing/abs_rindexdb/scored_db/default-tf-idf.score.txt")
keyword_processor = KeywordProcessor(case_sensitive=True)
keyword_processor_disamb = KeywordProcessor(case_sensitive=True)
print("Build Processor")
for kw in tqdm(ner_set):
if filter_word(kw) or filter_document_id(kw):
continue # if the keyword is filtered by above function or is stopwords
else:
# matched_key_word is the original matched span. we need to save it for group ordering.
matched_obj = _MatchedObject(matched_key_word=kw, matched_keywords_info={kw: 'kwm'})
keyword_processor.add_keyword(kw, matched_obj)
for kw in wiki_util.title_entities_set.disambiguation_group:
if filter_word(kw) or filter_document_id(kw):
continue # if the keyword is filtered by above function or is stopwords
else:
if kw in keyword_processor:
# if the kw existed in the kw_processor, we update its dict to add more disamb items
existing_matched_obj: _MatchedObject = keyword_processor.get_keyword(kw)
for disamb_kw in wiki_util.title_entities_set.disambiguation_group[kw]:
if filter_document_id(disamb_kw):
continue
if disamb_kw not in existing_matched_obj.matched_keywords_info:
existing_matched_obj.matched_keywords_info[disamb_kw] = 'kwm_disamb'
else: # If not we add it to the keyword_processor_disamb, which is set to be lower priority
# new_dict = dict()
matched_obj = _MatchedObject(matched_key_word=kw, matched_keywords_info=dict())
for disamb_kw in wiki_util.title_entities_set.disambiguation_group[kw]:
if filter_document_id(disamb_kw):
continue
matched_obj.matched_keywords_info[disamb_kw] = 'kwm_disamb'
# new_dict[disamb_kw] = 'kwm_disamb'
keyword_processor_disamb.add_keyword(kw, matched_obj)
for item in tqdm(d_list):
cur_id = str(item['id'])
query = item['claim']
query_terms = get_query_ngrams(query)
valid_query_terms = [term for term in query_terms if term in g_score_dict]
retrieved_set = RetrievedSet()
# print(tf_idf_doc_list)
get_kw_matching_results(query, valid_query_terms, retrieved_set, match_filtering_k,
g_score_dict, keyword_processor, keyword_processor_disamb)
tf_idf_doc_list = tf_idf_dict[cur_id]['retrieved_list']
added_count = 0
for score, title in sorted(
tf_idf_doc_list, key=lambda x: x[0], reverse=True)[:term_retrieval_top_k + 3]:
if not filter_word(title) and not filter_document_id(title) and not title.startswith('List of '):
retrieved_set.add_item(RetrievedItem(title, 'tf-idf'))
added_count += 1
if term_retrieval_top_k is not None and added_count >= term_retrieval_top_k:
break
predicted_docids = retrieved_set.to_id_list()
# print(retrieved_set)
# print(item['claim'], predicted_docids)
r_item = dict()
r_item['id'] = int(cur_id)
r_item['claim'] = item['claim']
r_item['predicted_docids'] = predicted_docids
if tag != 'test':
r_item['label'] = item['label']
r_list.append(r_item)
# r_list = common.load_jsonl('dev-debug.jsonl')
# We need to modify the existing retrieved document for naming consistency
for i, item in enumerate(r_list):
predicted_docids = item['predicted_docids']
modified_docids = []
for docid in predicted_docids:
docid = docid.replace(' ', '_')
docid = reverse_convert_brc(docid)
modified_docids.append(docid)
item['predicted_docids'] = modified_docids
# Modify finished
# print(r_list[0:10])
len_list = []
for rset in r_list:
len_list.append(len(rset['predicted_docids']))
print(collections.Counter(len_list).most_common(10000))
print(np.mean(len_list))
print(np.std(len_list))
print(np.max(len_list))
print(np.min(len_list))
common.save_jsonl(r_list, f'fever_term_based_retri_results_'
f'{tag}_term_topk:{term_retrieval_top_k}_match_filtering_k:{match_filtering_k}.jsonl')
mode = {'standard': False, 'check_doc_id_correct': True}
# fever_scorer.fever_score_analysis(r_list, d_list, mode=mode, max_evidence=None)
fever_scorer.fever_score(r_list, d_list, mode=mode, max_evidence=None)
def merge_results_with_haonao_module(term_retrieval_top_k=3, match_filtering_k=2, haonan_topk=10, tag='dev',
save=False):
if tag == 'dev':
d_list = common.load_jsonl(config.FEVER_DEV)
task_name = 'shared_task_dev'
elif tag == 'train':
d_list = common.load_jsonl(config.FEVER_TRAIN)
task_name = 'train'
elif tag == 'test':
d_list = common.load_jsonl(config.FEVER_TEST)
task_name = 'shared_task_test'
else:
raise ValueError(f"Tag:{tag} not supported.")
# r_list = common.load_jsonl(config.RESULT_PATH / f'doc_retri_results/fever_results/standard_term_based_results/'
# f'fever_term_based_retri_results_{tag}_term_topk:{term_retrieval_top_k}_match_filtering_k:{match_filtering_k}.jsonl')
r_list = common.load_jsonl(config.RESULT_PATH / f'doc_retri_results/fever_results/standard_term_based_results/'
f'fever_term_based_retri_results_{tag}_term_topk:{term_retrieval_top_k}_match_filtering_k:{match_filtering_k}.jsonl')
old_result_list = common.load_jsonl(config.RESULT_PATH /
f"doc_retri_results/fever_results/haonans_results/dr_{tag}.jsonl")
item_resorting(old_result_list, top_k=haonan_topk)
old_result_dict = list_dict_data_tool.list_to_dict(old_result_list, 'id')
for i, item in enumerate(r_list):
predicted_docids = item['predicted_docids']
modified_docids = []
for docid in predicted_docids:
docid = docid.replace(' ', '_')
docid = reverse_convert_brc(docid)
modified_docids.append(docid)
item['predicted_docids'] = modified_docids
# item['predicted_docids'] = []
merged_result_list = []
for item in tqdm(r_list):
cur_id = int(item['id'])
old_retrieval_doc = old_result_dict[cur_id]['predicted_docids']
new_retrieval_doc = item['predicted_docids']
m_predicted_docids = set.union(set(old_retrieval_doc), set(new_retrieval_doc))
# print(m_predicted_docids)
m_predicted_docids = [docid for docid in m_predicted_docids if not docid.startswith('List_of_')]
item['predicted_docids'] = list(m_predicted_docids)
# print(item['predicted_docids'])
mode = {'standard': False, 'check_doc_id_correct': True}
if tag != 'test':
fever_scorer.fever_score_analysis(r_list, d_list, mode=mode, max_evidence=None)
if save:
print("Saved to:")
common.save_jsonl(r_list, config.RESULT_PATH /
f"doc_retri_results/fever_results/merged_doc_results/m_doc_{tag}.jsonl")
# States information.
len_list = []
for rset in r_list:
len_list.append(len(rset['predicted_docids']))
print(collections.Counter(len_list).most_common(10000))
print(np.mean(len_list))
print(np.std(len_list))
print(np.max(len_list))
print(np.min(len_list))
if __name__ == '__main__':
# fever_retrieval_v0(tag='test', term_retrieval_top_k=3, match_filtering_k=1)
#
merge_results_with_haonao_module(tag='train', term_retrieval_top_k=3, match_filtering_k=2, haonan_topk=10)
# merge_results_with_haonao_module(term_retrieval_top_k=3, match_filtering_k=2, haonan_topk=10)
# d_list = common.load_jsonl(config.FEVER_DEV)
# r_list = common.load_jsonl(config.RESULT_PATH /
# f"doc_retri_results/fever_results/haonans_results/doc_retr_1_shared_task_dev.jsonl")
# item_resorting(r_list, top_k=10)
# print(old_result_list)
# merge_results_with_haonao_module(tag='dev', save=True)
# merge_results_with_haonao_module(tag='train', save=True)
# old_result_list = common.load_jsonl(config.RESULT_PATH /
# f"doc_retri_results/fever_results/haonans_results/dr_test.jsonl")
# item_resorting(old_result_list, top_k=5)
# common.save_jsonl(old_result_list, "n_test_prediction.jsonl")
# print(fever_scorer.fever_doc_only(old_result_list, d_list, max_evidence=5))
# print()
# r_list = common.load_jsonl('dev-debug-3-2.jsonl')
#
# old_result_list = common.load_jsonl(
# "/Users/yixin/projects/extinguishHotpot/results/doc_retri_results/old_fever_retrieval/doc_retr_1_shared_task_dev.jsonl")
# # for item in old_results_list:
# # print(item['predicted_docids'])
# old_result_dict = list_dict_data_tool.list_to_dict(old_result_list, 'id')
#
# for i, item in enumerate(r_list):
# predicted_docids = item['predicted_docids']
# modified_docids = []
# for docid in predicted_docids:
# docid = docid.replace(' ', '_')
# docid = reverse_convert_brc(docid)
# modified_docids.append(docid)
# item['predicted_docids'] = modified_docids
# # item['predicted_docids'] = []
#
# merged_result_list = []
# for item in tqdm(r_list):
# cur_id = int(item['id'])
# old_retrieval_doc = old_result_dict[cur_id]['predicted_docids']
# new_retrieval_doc = item['predicted_docids']
# m_predicted_docids = set.union(set(old_retrieval_doc), set(new_retrieval_doc))
# # print(m_predicted_docids)
# m_predicted_docids = [docid for docid in m_predicted_docids if not docid.startswith('List_of_')]
# item['predicted_docids'] = list(m_predicted_docids)
# # print(item['predicted_docids'])
#
# mode = {'standard': False, 'check_doc_id_correct': True}
# fever_scorer.fever_score(r_list, d_list, mode=mode, max_evidence=None)
# fever_scorer.fever_doc_only(r_list, d_list, max_evidence=None)
#
# # States information.
# len_list = []
# for rset in r_list:
# len_list.append(len(rset['predicted_docids']))
#
# print(collections.Counter(len_list).most_common(10000))
#
# print(np.mean(len_list))
# print(np.std(len_list))
# print(np.max(len_list))
# print(np.min(len_list))
| 42.290625
| 130
| 0.672283
|
4a0cc048d1d689fd7b9e4379cb61c4c66003011e
| 7,650
|
py
|
Python
|
tools/generate-vbinary-test.py
|
shi510/XNNPACK
|
ea2088b668b760cdc5c67df7d2854320ee34aeb8
|
[
"BSD-3-Clause"
] | null | null | null |
tools/generate-vbinary-test.py
|
shi510/XNNPACK
|
ea2088b668b760cdc5c67df7d2854320ee34aeb8
|
[
"BSD-3-Clause"
] | null | null | null |
tools/generate-vbinary-test.py
|
shi510/XNNPACK
|
ea2088b668b760cdc5c67df7d2854320ee34aeb8
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import codecs
import math
import os
import re
import sys
import yaml
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
import xngen
import xnncommon
parser = argparse.ArgumentParser(
description='Vector binary operation microkernel test generator')
parser.add_argument("-s", "--spec", metavar="FILE", required=True,
help="Specification (YAML) file")
parser.add_argument("-o", "--output", metavar="FILE", required=True,
help='Output (C++ source) file')
parser.set_defaults(defines=list())
def split_ukernel_name(name):
match = re.match(r"^xnn_(f16|f32)_v(add|div|max|min|mul|sqrdiff|sub|addc|divc|rdivc|maxc|minc|mulc|sqrdiffc|rsqrdiffc|subc|rsubc)(_(minmax))?_ukernel__(.+)_x(\d+)$", name)
if match is None:
raise ValueError("Unexpected microkernel name: " + name)
op_type = {
"add": "Add",
"div": "Div",
"max": "Max",
"min": "Min",
"mul": "Mul",
"sqrdiff": "SqrDiff",
"sub": "Sub",
"addc": "AddC",
"divc": "DivC",
"rdivc": "RDivC",
"maxc": "MaxC",
"minc": "MinC",
"mulc": "MulC",
"sqrdiffc": "SqrDiffC",
"rsqrdiffc": "RSqrDiffC",
"subc": "SubC",
"rsubc": "RSubC",
}[match.group(2)]
batch_tile = int(match.group(6))
activation_type = match.group(4)
if activation_type is None:
activation_type = "LINEAR"
else:
activation_type = activation_type.upper()
arch, isa = xnncommon.parse_target_name(target_name=match.group(5))
return op_type, activation_type, batch_tile, arch, isa
BINOP_TEST_TEMPLATE = """\
TEST(${TEST_NAME}, batch_eq_${BATCH_TILE}) {
$if ISA_CHECK:
${ISA_CHECK};
${TESTER}()
.batch_size(${BATCH_TILE})
.Test(${", ".join(TEST_ARGS)});
}
$if BATCH_TILE > 1:
TEST(${TEST_NAME}, batch_div_${BATCH_TILE}) {
$if ISA_CHECK:
${ISA_CHECK};
for (size_t batch_size = ${BATCH_TILE*2}; batch_size < ${BATCH_TILE*10}; batch_size += ${BATCH_TILE}) {
${TESTER}()
.batch_size(batch_size)
.Test(${", ".join(TEST_ARGS)});
}
}
TEST(${TEST_NAME}, batch_lt_${BATCH_TILE}) {
$if ISA_CHECK:
${ISA_CHECK};
for (size_t batch_size = 1; batch_size < ${BATCH_TILE}; batch_size++) {
${TESTER}()
.batch_size(batch_size)
.Test(${", ".join(TEST_ARGS)});
}
}
TEST(${TEST_NAME}, batch_gt_${BATCH_TILE}) {
$if ISA_CHECK:
${ISA_CHECK};
for (size_t batch_size = ${BATCH_TILE+1}; batch_size < ${10 if BATCH_TILE == 1 else BATCH_TILE*2}; batch_size++) {
${TESTER}()
.batch_size(batch_size)
.Test(${", ".join(TEST_ARGS)});
}
}
$if TESTER == "VBinOpCMicrokernelTester":
TEST(${TEST_NAME}, inplace) {
$if ISA_CHECK:
${ISA_CHECK};
for (size_t batch_size = 1; batch_size <= ${BATCH_TILE*5}; batch_size += ${max(1, BATCH_TILE-1)}) {
${TESTER}()
.batch_size(batch_size)
.inplace(true)
.Test(${", ".join(TEST_ARGS)});
}
}
$else:
TEST(${TEST_NAME}, inplace_a) {
$if ISA_CHECK:
${ISA_CHECK};
for (size_t batch_size = 1; batch_size <= ${BATCH_TILE*5}; batch_size += ${max(1, BATCH_TILE-1)}) {
${TESTER}()
.batch_size(batch_size)
.inplace_a(true)
.Test(${", ".join(TEST_ARGS)});
}
}
TEST(${TEST_NAME}, inplace_b) {
$if ISA_CHECK:
${ISA_CHECK};
for (size_t batch_size = 1; batch_size <= ${BATCH_TILE*5}; batch_size += ${max(1, BATCH_TILE-1)}) {
${TESTER}()
.batch_size(batch_size)
.inplace_b(true)
.Test(${", ".join(TEST_ARGS)});
}
}
TEST(${TEST_NAME}, inplace_a_and_b) {
$if ISA_CHECK:
${ISA_CHECK};
for (size_t batch_size = 1; batch_size <= ${BATCH_TILE*5}; batch_size += ${max(1, BATCH_TILE-1)}) {
${TESTER}()
.batch_size(batch_size)
.inplace_a(true)
.inplace_b(true)
.Test(${", ".join(TEST_ARGS)});
}
}
$if ACTIVATION_TYPE == "MINMAX":
TEST(${TEST_NAME}, qmin) {
$if ISA_CHECK:
${ISA_CHECK};
for (size_t batch_size = 1; batch_size <= ${BATCH_TILE*5}; batch_size += ${max(1, BATCH_TILE-1)}) {
${TESTER}()
.batch_size(batch_size)
.qmin(128)
.Test(${", ".join(TEST_ARGS)});
}
}
TEST(${TEST_NAME}, qmax) {
$if ISA_CHECK:
${ISA_CHECK};
for (size_t batch_size = 1; batch_size <= ${BATCH_TILE*5}; batch_size += ${max(1, BATCH_TILE-1)}) {
${TESTER}()
.batch_size(batch_size)
.qmax(128)
.Test(${", ".join(TEST_ARGS)});
}
}
"""
def generate_test_cases(ukernel, op_type, activation_type, batch_tile, isa):
"""Generates all tests cases for a Vector Binary Operation micro-kernel.
Args:
ukernel: C name of the micro-kernel function.
op_type: Operation type (ADD/MUL/SUB/etc).
activation_type: Activation type (LINEAR/MINMAX/RELU).
batch_tile: Number of batch elements processed per one iteration of the
inner loop of the micro-kernel.
isa: instruction set required to run the micro-kernel. Generated unit test
will skip execution if the host processor doesn't support this ISA.
Returns:
Code for the test case.
"""
_, test_name = ukernel.split("_", 1)
_, datatype, _ = ukernel.split("_", 2)
tester = "VBinOp%sMicrokernelTester" % ("C" if op_type.endswith("C") else "")
test_args = [
ukernel,
"%s::OpType::%s" % (tester, op_type),
]
if not isa or isa == "psimd":
test_args.append("%s::Variant::Scalar" % tester)
return xngen.preprocess(BINOP_TEST_TEMPLATE, {
"TEST_NAME": test_name.upper().replace("UKERNEL_", ""),
"TEST_ARGS": test_args,
"TESTER": tester,
"DATATYPE": datatype,
"BATCH_TILE": batch_tile,
"OP_TYPE": op_type,
"ACTIVATION_TYPE": activation_type,
"ISA_CHECK": xnncommon.generate_isa_check_macro(isa),
})
def main(args):
options = parser.parse_args(args)
with codecs.open(options.spec, "r", encoding="utf-8") as spec_file:
spec_yaml = yaml.safe_load(spec_file)
if not isinstance(spec_yaml, list):
raise ValueError("expected a list of micro-kernels in the spec")
spec_name = os.path.splitext(os.path.split(options.spec)[1])[0]
opname = spec_name.split("-")[1]
if opname.endswith("c"):
header = "vbinaryc-microkernel-tester.h"
else:
header = "vbinary-microkernel-tester.h"
tests = """\
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
//
// Auto-generated file. Do not edit!
// Specification: {specification}
// Generator: {generator}
#include <gtest/gtest.h>
#include <xnnpack/common.h>
#include <xnnpack/isa-checks.h>
#include <xnnpack/vbinary.h>
#include "{header}"
""".format(specification=options.spec, generator=sys.argv[0], header=header)
for ukernel_spec in spec_yaml:
name = ukernel_spec["name"]
op_type, activation_type, batch_tile, arch, isa = split_ukernel_name(name)
# specification can override architecture
arch = ukernel_spec.get("arch", arch)
test_case = generate_test_cases(name, op_type, activation_type,
batch_tile, isa)
tests += "\n\n" + xnncommon.postprocess_test_case(test_case, arch, isa)
with codecs.open(options.output, "w", encoding="utf-8") as output_file:
output_file.write(tests)
if __name__ == "__main__":
main(sys.argv[1:])
| 29.198473
| 173
| 0.625621
|
4a0cc0f32258e60fa34371d5b63dac92ac48653f
| 892
|
py
|
Python
|
leetcode/0033_Search_in_Rotated_Sorted_Array/result.py
|
theck17/notes
|
f32f0f4b8f821b1ed38d173ef0913efddd094b91
|
[
"MIT"
] | null | null | null |
leetcode/0033_Search_in_Rotated_Sorted_Array/result.py
|
theck17/notes
|
f32f0f4b8f821b1ed38d173ef0913efddd094b91
|
[
"MIT"
] | null | null | null |
leetcode/0033_Search_in_Rotated_Sorted_Array/result.py
|
theck17/notes
|
f32f0f4b8f821b1ed38d173ef0913efddd094b91
|
[
"MIT"
] | null | null | null |
# !/usr/bin/env python3
# Author: C.K
# Email: theck17@163.com
# DateTime:2021-03-21 17:53:33
# Description:
import os
improt sys
class Solution:
def search(self, nums: List[int], target: int) -> int:
left = 0
right = len(nums) - 1
while left <= right:
mid = left + (right-left)//2
if nums[mid] == target:
return mid
elif nums[left] <= nums[mid]:
if target < nums[mid] and target >= nums[left]:
right = mid - 1
else:
left = mid + 1
elif nums[mid] <= nums[right]:
if target > nums[mid] and target <= nums[right]:
left = mid + 1
else:
right = mid - 1
return -1
if __name__ == "__main__":
pass
| 21.756098
| 64
| 0.436099
|
4a0cc1befe04434a1f5cf5e46fb6ce19e0e0026d
| 21,293
|
py
|
Python
|
datawrapper/__main__.py
|
elanals/Datawrapper
|
8dc3920944e2bb4901ab6ea3c0e01d93b2db1adb
|
[
"MIT"
] | null | null | null |
datawrapper/__main__.py
|
elanals/Datawrapper
|
8dc3920944e2bb4901ab6ea3c0e01d93b2db1adb
|
[
"MIT"
] | null | null | null |
datawrapper/__main__.py
|
elanals/Datawrapper
|
8dc3920944e2bb4901ab6ea3c0e01d93b2db1adb
|
[
"MIT"
] | null | null | null |
"""Access Datawrapper's API to create, update, delete charts.
Datawrapper API lets you programatically interface with your charts.
It lets you create and edit charts, update your account information and many more things to come.
This package is a light-weight wrapper around Datawrapper's API.
Typical usage example:
dw = Datawrapper(access_token = <YOUR_ACCESS_TOKEN_HERE>)
dw.account_info()
"""
from typing import Any, Dict, Iterable, List, Union
import json
import os
from pathlib import Path
import IPython
import pandas as pd
import requests as r
from IPython.display import HTML, Image
class Datawrapper:
"""Handles connecting with Datawrapper's API.
Handles access to your Datawrapper's account, create, delete and move charts, tables or maps.
Will attempt to read environment variable DATAWRAPPER_ACCESS_TOKEN by default.
Args:
access_token: A personal access token to use the API. See app.datawrapper.de/account/api-tokens.
"""
_BASE_URL = "https://api.datawrapper.de"
_CHARTS_URL = _BASE_URL + "/v3/charts"
_PUBLISH_URL = _BASE_URL + "/charts"
_FOLDERS_URL = _BASE_URL + "/folders"
_ACCESS_TOKEN = os.getenv("DATAWRAPPER_ACCESS_TOKEN")
def __init__(self, access_token=_ACCESS_TOKEN):
"""To create a token head to app.datawrapper.de/account/api-tokens.
By default this will look for DATAWRAPPER_ACCESS_TOKEN environment variable.
Parameters
----------
access_token : [type], optional
[description], by default _ACCESS_TOKEN
"""
self._access_token = access_token
self._auth_header = {"Authorization": f"Bearer {access_token}"}
def account_info(self) -> Union[Dict[Any, Any], None, Any]:
"""Access your account information.
Returns
-------
dict
A dictionary containing your account information.
"""
account_info_response = r.get(
url=self._BASE_URL + "/v3/me", headers=self._auth_header
)
if account_info_response.status_code == 200:
return account_info_response.json()
else:
print(
"Couldn't find account. Make sure your credentials (access_code) are correct."
)
return None
def add_data(self, chart_id: str, data: pd.DataFrame) -> r.Response:
"""Add data to a specified chart.
Parameters
----------
chart_id : str
ID of chart, table or map to add data to.
data : pd.DataFrame
A pandas dataframe containing the data to be added.
Returns
-------
requests.Response
A requests.Response
"""
_header = self._auth_header
_header["content-type"] = "text/csv"
_data = data.to_csv(index=False, encoding="utf-8")
return r.put(
url=f"{self._CHARTS_URL}/{chart_id}/data",
headers=_header,
data=_data.encode("utf-8"),
)
def refresh_data(self, chart_id: str) -> r.Response:
"""Fetch configured external data and add it to the chart.
Parameters
----------
chart_id : str
ID of chart, table or map to add data to.
Returns
-------
requests.Response
A requests.Response
"""
_header = self._auth_header
_header["accept"] = "*/*"
return r.post(
url=f"{self._CHARTS_URL}/{chart_id}/data/refresh",
headers=_header,
)
def create_chart(
self,
title: str = "New Chart",
chart_type: str = "d3-bars-stacked",
data: Union[pd.DataFrame, None] = None,
folder_id: str = "",
) -> Union[Dict[Any, Any], None, Any]:
"""Creates a new Datawrapper chart, table or map.
You can pass a pandas DataFrame as a `data` argument to upload data.
Returns the created chart's information.
Parameters
----------
title : str, optional
Title for new chart, table or map, by default "New Chart"
chart_type : str, optional
Chart type to be created. See https://developer.datawrapper.de/docs/chart-types, by default "d3-bars-stacked"
data : [type], optional
A pandas DataFrame containing the data to be added, by default None
folder_id : str, optional
ID of folder in Datawrapper.de for the chart, table or map to be created in, by default ""
Returns
-------
dict
A dictionary containing the created chart's information.
"""
_header = self._auth_header
_header["content-type"] = "application/json"
_data = {"title": title, "type": chart_type, "folderId": folder_id}
new_chart_response = r.post(
url=self._CHARTS_URL, headers=_header, data=json.dumps(_data)
)
if (
chart_type == "d3-maps-choropleth"
or chart_type == "d3-maps-symbols"
or chart_type == "locator-map"
):
print(
"\nNOTE: Maps need a valid basemap, set in properties -> visualize"
)
print(
"Full list of valid maps can be retrieved with\n\ncurl --request GET --url https://api.datawrapper.de/plugin/basemap\n"
)
if new_chart_response.status_code <= 201:
chart_info = new_chart_response.json()
print(f"New chart {chart_info['type']} created!")
else:
print(
"Chart could not be created, check your authorization credentials (access token)"
)
if data is not None:
self.add_data(chart_id=chart_info["id"], data=data)
return chart_info
def update_description(
self,
chart_id: str,
source_name: str = "",
source_url: str = "",
intro: str = "",
byline: str = "",
) -> Union[Any, None]:
"""Update a chart's description.
Parameters
----------
chart_id : str
ID of chart, table or map.
source_name : str, optional
Source of data, by default ""
source_url : str, optional
URL of source of data, by default ""
intro : str, optional
Introduction of your chart, table or map, by default ""
byline : str, optional
Who made this?, by default ""
"""
_header = self._auth_header
_header["content-type"] = "application/json"
_data = {
"metadata": {
"describe": {
"source-name": source_name,
"source-url": source_url,
"intro": intro,
"byline": byline,
}
}
}
update_description_response = r.patch(
url=self._CHARTS_URL + f"/{chart_id}",
headers=_header,
data=json.dumps(_data),
)
if update_description_response.status_code == 200:
print("Chart updated!")
else:
print(
"Error. Status code: ", update_description_response.status_code
)
print("Couldn't update chart.")
return None
def publish_chart(
self, chart_id: str, display: bool = True
) -> Union[Any, None]:
"""Publishes a chart, table or map.
Parameters
----------
chart_id : str
ID of chart, table or map.
display : bool, optional
Display the published chart as output in notebook cell, by default True
"""
publish_chart_response = r.post(
url=f"{self._PUBLISH_URL}/{chart_id}/publish",
headers=self._auth_header,
)
if publish_chart_response.status_code <= 201:
# print(f"Chart published at {publish_chart_info[]}")
if display:
publish_chart_info = publish_chart_response.json()
iframe_code = publish_chart_info["data"]["metadata"]["publish"][
"embed-codes"
]["embed-method-iframe"]
# iframe_width = publish_chart_info['data']['metadata']['publish']['embed-width']
# iframe_height = publish_chart_info['data']['metadata']['publish']['embed-height']
return HTML(iframe_code)
else:
return None
else:
print("Chart couldn't be published at this time.")
return None
def chart_properties(
self, chart_id: str
) -> Union[Dict[Any, Any], None, Any, Iterable[Any]]:
"""Retrieve information of a specific chart, table or map.
Parameters
----------
chart_id : str
ID of chart, table, or map.
Returns
-------
dict
A dictionary containing the information of the chart, table, or map.
"""
chart_properties_response = r.get(
url=self._CHARTS_URL + f"/{chart_id}",
headers=self._auth_header,
)
if chart_properties_response.status_code == 200:
return chart_properties_response.json()
else:
print(
"Make sure you have the right id and authorization credentials (access_token)."
)
return None
def update_metadata(
self, chart_id: str, properties: Dict[Any, Any]
) -> Union[Any, None]:
"""Update a chart, table, or map's metadata.
Example: https://developer.datawrapper.de/docs/creating-a-chart-new#edit-colors
Parameters
----------
chart_id : str
ID of chart, table, or map.
properties : dict
A python dictionary of properties to update.
"""
_header = self._auth_header
_header["content-type"] = "application/json"
_data = {"metadata": properties}
update_properties_response = r.patch(
url=self._CHARTS_URL + f"/{chart_id}",
headers=_header,
data=json.dumps(_data),
)
if update_properties_response.status_code == 200:
print("Chart's metadata updated!")
# return update_properties_response.json()
else:
print(
"Error. Status code: ", update_properties_response.status_code
)
x = update_properties_response.text
y = json.loads(x)
print("Message: ", y["message"])
print("Chart could not be updated.")
return None
def update_chart(
self,
chart_id: str,
title: str = "",
theme: str = "",
chart_type: str = "",
language: str = "",
folder_id: str = "",
organization_id: str = "",
) -> Union[Any, None]:
"""Updates a chart's title, theme, type, language, or location (folder/organization).
Parameters
----------
chart_id : str
ID Of chart, table, or map.
title : str, optional
New title, by default ""
theme : str, optional
New theme, by default ""
chart_type : str, optional
New chart type. See https://developer.datawrapper.de/docs/chart-types, by default ""
language : str, optional
New language, by default ""
folder_id : str, optional
New folder's ID, by default ""
organization_id : str, optional
New organization's ID, by default ""
"""
_header = self._auth_header
_header["accept"] = "*/*"
_header["content-type"] = "application/json"
_query = {}
if title:
_query["title"] = title
if theme:
_query["theme"] = theme
if chart_type:
_query["type"] = chart_type
if language:
_query["language"] = language
if folder_id:
_query["folderId"] = folder_id
if organization_id:
_query["organizationId"] = organization_id
update_chart_response = r.patch(
url=self._CHARTS_URL + f"/{chart_id}",
headers=_header,
data=json.dumps(_query),
)
if update_chart_response.status_code == 200:
print(f"Chart with id {chart_id} updated!")
return self.publish_chart(chart_id)
else:
print("Chart could not be updated at the time.")
return None
def display_chart(self, chart_id: str) -> IPython.display.HTML:
"""Displays a datawrapper chart.
Parameters
----------
chart_id : str
ID of chart, table, or map.
Returns
-------
IPython.display.HTML
HTML displaying the chart.
"""
_chart_properties = self.chart_properties(chart_id)
_iframe_code = _chart_properties["metadata"]["publish"]["embed-codes"][ # type: ignore
"embed-method-iframe"
]
return HTML(_iframe_code)
def get_iframe_code(
self, chart_id: str, responsive: bool = False
) -> Union[str, Any]:
"""Returns a chart, table, or map's iframe embed code.
Parameters
----------
chart_id : str
ID of chart, table, or map.
responsive : bool, optional
Whether to return a responsive iframe embed code., by default False
Returns
-------
str
iframe embed code.
"""
_chart_properties = self.chart_properties(chart_id)
if responsive:
iframe_code = _chart_properties["metadata"]["publish"][ # type: ignore
"embed-codes"
][
"embed-method-responsive"
]
else:
iframe_code = _chart_properties["metadata"]["publish"][ # type: ignore
"embed-codes"
][
"embed-method-iframe"
]
return iframe_code
def export_chart(
self,
chart_id: str,
unit: str = "px",
mode: str = "rgb",
width: int = 100,
plain: bool = False,
zoom: int = 2,
scale: int = 1,
border_width: int = 20,
output: str = "png",
filepath: str = "./image.png",
display: bool = False,
) -> Union[Any, None]:
"""Exports a chart, table, or map.
Parameters
----------
chart_id : str
ID of chart, table, or map.
unit : str, optional
One of px, mm, inch. Defines the unit in which the borderwidth, height, and width will be measured in, by default "px"
mode : str, optional
One of rgb or cmyk. Which color mode the output should be in, by default "rgb"
width : int, optional
Width of visualization. If not specified, it takes the chart width, by default None
plain : bool, optional
Defines if only the visualization should be exported (True), or if it should include header and footer as well (False), by default False
zoom : int, optional
Defines the multiplier for the png size, by default 2
scale : int, optional
Defines the multiplier for the pdf size, by default 1
border_width : int, optional
Margin arouund the visualization, by default 20
output : str, optional
One of png, pdf, or svg, by default "png"
filepath : str, optional
Name/filepath to save output in, by default "./image.png"
display : bool, optional
Whether to display the exported image as output in the notebook cell, by default False
Returns
-------
IPython.display.Image
If display is True, it returns an Image.
"""
_export_url = f"{self._CHARTS_URL}/{chart_id}/export/{output}"
_filepath = Path(filepath)
_filepath = _filepath.with_suffix(f".{output}")
_plain = "true" if plain else "false"
querystring = {
"unit": unit,
"mode": mode,
"width": width,
"plain": _plain,
"zoom": zoom,
"scale": scale,
"borderWidth": border_width,
}
_header = self._auth_header
_header["accept"] = "*/*"
export_chart_response = r.get(
url=_export_url, headers=_header, params=querystring # type: ignore
)
if export_chart_response.status_code == 200:
with open(_filepath, "wb") as response:
response.write(export_chart_response.content)
if display:
return Image(_filepath)
else:
print(f"File exported at {_filepath}")
elif export_chart_response.status_code == 403:
print("You don't have access to the requested code.")
elif export_chart_response.status_code == 401:
print("You couldn't be authenticated.")
else:
print("Couldn't export at this time.")
return None
def get_folders(self) -> Union[Dict[Any, Any], None, Any]:
"""Get a list of folders in your Datawrapper account.
Returns
-------
dict
A dictionary containing the folders in your Datawrapper account and their information.
"""
get_folders_response = r.get(
url=self._FOLDERS_URL,
headers=self._auth_header,
)
if get_folders_response.status_code == 200:
return get_folders_response.json()
else:
print(
"Couldn't retrieve folders in account. Make sure you have the rigth authorization credentials (access token)."
)
return None
def move_chart(self, chart_id: str, folder_id: str) -> Union[Any, None]:
"""Moves a chart, table, or map to a specified folder.
Parameters
----------
chart_id : str
ID of chart, table, or map.
folder_id : str
ID of folder to move visualization to.
"""
_header = self._auth_header
_header["content-type"] = "application/json"
_data = {"folderId": folder_id}
move_chart_response = r.patch(
url=self._CHARTS_URL + f"/{chart_id}",
headers=_header,
data=json.dumps(_data),
)
if move_chart_response.status_code == 200:
print(f"Chart moved to folder {folder_id}")
else:
print("Chart could not be moved at the moment.")
return None
def delete_chart(self, chart_id: str) -> r.Response.content: # type: ignore
"""Deletes a specified chart, table or map.
Parameters
----------
chart_id : str
ID of chart, table, or map.
Returns
-------
r.Response.content
The content of the requests.delete
"""
delete_chart_response = r.delete(
url=self._CHARTS_URL + f"/{chart_id}", headers=self._auth_header
)
if delete_chart_response.content:
return delete_chart_response.content
else:
print(f"Successfully deleted chart with id {chart_id}")
return None
def get_charts(
self,
user_id: str = "",
published: str = "true",
search: str = "",
order: str = "DESC",
order_by: str = "createdAt",
limit: int = 25,
) -> Union[None, List[Any]]:
"""Retrieves a list of charts by User
Parameters
----------
user_id : str, optional
ID of the user to fetch charts for, by default ""
published : str, optional
Flag to filter resutls by publish status, by default "true"
search : str, optional
Search for charts with a specific title, by default ""
order : str, optional
Result order (ascending or descending), by default "DESC"
order_by : str, optional
Attribute to order by. One of createdAt, email, id, or name, by default "createdAt"
limit : int, optional
Maximum items to fetch, by default 25
Returns
-------
list
List of charts.
"""
_url = self._CHARTS_URL
_header = self._auth_header
_header["accept"] = "*/*"
_query = {}
if user_id:
_query["userId"] = user_id
if published:
_query["published"] = published
if search:
_query["search"] = search
if order:
_query["order"] = order
if order_by:
_query["orderBy"] = order_by
if limit:
_query["limit"] = str(limit)
get_charts_response = r.get(url=_url, headers=_header, params=_query)
if get_charts_response.status_code == 200:
return get_charts_response.json()["list"] # type: ignore
else:
print("Could not retrieve charts at this moment.")
return None
| 32.607963
| 148
| 0.554736
|
4a0cc1c59c199c115fa56e512499651cdeaaa900
| 2,997
|
py
|
Python
|
aws_workspace_utils.py
|
zulily/aws_workspace_maker
|
bcdd157023ee6e28016d10158f82fb02beea76b9
|
[
"Apache-2.0"
] | null | null | null |
aws_workspace_utils.py
|
zulily/aws_workspace_maker
|
bcdd157023ee6e28016d10158f82fb02beea76b9
|
[
"Apache-2.0"
] | null | null | null |
aws_workspace_utils.py
|
zulily/aws_workspace_maker
|
bcdd157023ee6e28016d10158f82fb02beea76b9
|
[
"Apache-2.0"
] | 1
|
2021-11-05T21:17:14.000Z
|
2021-11-05T21:17:14.000Z
|
"""
Helper function for all things workspaces
"""
import boto3
class WorkSpaceClient():
'''
A class that abstracts AWS Workspace boto client
'''
def __init__(self, region):
'''
Create a client to interact with WorkSpaces in a region
'''
self.ws_client = boto3.client('workspaces', region_name=region)
def create_workspace(self, workspace_config):
'''
Create a WorkSpace in the given region
'''
response = self.ws_client.create_workspaces(Workspaces=[workspace_config])
return response
def delete_bundle(self, bundle_id):
'''
Delete a Bundle in the given region
'''
response = self.ws_client.delete_workspace_bundle(BundleId=bundle_id)
return response
def delete_image(self, image_id):
'''
Delete an Image in the given region
'''
response = self.ws_client.delete_workspace_image(ImageId=image_id)
return response
def get_current_bundles(self):
'''
Retrieve all bundles in the current region, used for deriving config
'''
bundles = None
response = self.ws_client.describe_workspace_bundles()
bundles = {i['Name']:i for i in response['Bundles']}
return bundles
def get_current_directories(self):
'''
Retrieve all directories in the current region, used for deriving config
'''
directories = None
response = self.ws_client.describe_workspace_directories()
directories = {i['Alias']:i for i in response['Directories']}
return directories
def get_current_images(self):
'''
Retrieve all images in the current region, used for deriving config
'''
images = None
response = self.ws_client.describe_workspace_images()
images = {i['Name']:i for i in response['Images']}
return images
def get_current_workspaces(self):
'''
Retrieve all workspaces in the current region, used for creating the list for creation
'''
workspaces = None
response = self.ws_client.describe_workspaces()
workspaces = response['Workspaces']
return workspaces
def get_tags(self, resource_id):
'''
Given a ResourceId, retrieve the tags associated with it, used for determining ownership
'''
tags = None
response = self.ws_client.describe_tags(ResourceId=resource_id)
tags = response.get('TagList')
return tags
def migrate_workspace(self, workspace_id, bundle_id):
'''
Migrate a WorkSpace in the given region to the given bundle_id.
See https://docs.aws.amazon.com/workspaces/latest/adminguide/migrate-workspaces.html
'''
response = self.ws_client.migrate_workspace(SourceWorkspaceId=workspace_id,
BundleId=bundle_id)
return response
| 29.97
| 96
| 0.627961
|
4a0cc2fc79ad6f0e637641c6a24a71bd26caa6d3
| 4,488
|
py
|
Python
|
tclCommands/TclCommandCncjob.py
|
JuanoVenegas/flatcam
|
f3b7f7205a530a553e5a7b33e264cc5e681a76e9
|
[
"MIT"
] | 1
|
2021-09-11T10:59:08.000Z
|
2021-09-11T10:59:08.000Z
|
tclCommands/TclCommandCncjob.py
|
JuanoVenegas/flatcam
|
f3b7f7205a530a553e5a7b33e264cc5e681a76e9
|
[
"MIT"
] | null | null | null |
tclCommands/TclCommandCncjob.py
|
JuanoVenegas/flatcam
|
f3b7f7205a530a553e5a7b33e264cc5e681a76e9
|
[
"MIT"
] | 2
|
2020-05-17T10:51:03.000Z
|
2022-02-04T14:35:38.000Z
|
from ObjectCollection import *
from tclCommands.TclCommand import TclCommandSignaled
class TclCommandCncjob(TclCommandSignaled):
"""
Tcl shell command to Generates a CNC Job from a Geometry Object.
example:
set_sys units MM
new
open_gerber tests/gerber_files/simple1.gbr -outname margin
isolate margin -dia 3
cncjob margin_iso
"""
# array of all command aliases, to be able use old names for backward compatibility (add_poly, add_polygon)
aliases = ['cncjob']
# dictionary of types from Tcl command, needs to be ordered
arg_names = collections.OrderedDict([
('name', str)
])
# dictionary of types from Tcl command, needs to be ordered , this is for options like -optionname value
option_types = collections.OrderedDict([
('z_cut', float),
('z_move', float),
('feedrate', float),
('feedrate_rapid', float),
('tooldia', float),
('spindlespeed', int),
('multidepth', bool),
('extracut', bool),
('depthperpass', float),
('endz', float),
('ppname_g', str),
('outname', str)
])
# array of mandatory options for current Tcl command: required = {'name','outname'}
required = ['name']
# structured help for current command, args needs to be ordered
help = {
'main': "Generates a CNC Job from a Geometry Object.",
'args': collections.OrderedDict([
('name', 'Name of the source object.'),
('tooldia', 'Tool diameter to show on screen.'),
('z_cut', 'Z-axis cutting position.'),
('z_move', 'Z-axis moving position.'),
('feedrate', 'Moving speed when cutting.'),
('feedrate_rapid', 'Rapid moving at speed when cutting.'),
('spindlespeed', 'Speed of the spindle in rpm (example: 4000).'),
('multidepth', 'Use or not multidepth cnccut. (True or False)'),
('depthperpass', 'Height of one layer for multidepth.'),
('extracut', 'Use or not an extra cnccut over the first point in path,in the job end (example: True)'),
('endz', 'Height where the last move will park.'),
('outname', 'Name of the resulting Geometry object.'),
('ppname_g', 'Name of the Geometry postprocessor. No quotes, case sensitive')
]),
'examples': []
}
def execute(self, args, unnamed_args):
"""
execute current TCL shell command
:param args: array of known named arguments and options
:param unnamed_args: array of other values which were passed into command
without -somename and we do not have them in known arg_names
:return: None or exception
"""
name = args['name']
if 'outname' not in args:
args['outname'] = str(name) + "_cnc"
obj = self.app.collection.get_by_name(str(name), isCaseSensitive=False)
if obj is None:
self.raise_tcl_error("Object not found: %s" % str(name))
if not isinstance(obj, FlatCAMGeometry):
self.raise_tcl_error('Expected FlatCAMGeometry, got %s %s.' % (str(name), type(obj)))
args["z_cut"] = args["z_cut"] if "z_cut" in args else obj.options["cutz"]
args["z_move"] = args["z_move"] if "z_move" in args else obj.options["travelz"]
args["feedrate"] = args["feedrate"] if "feedrate" in args else obj.options["feedrate"]
args["feedrate_rapid"] = args["feedrate_rapid"] if "feedrate_rapid" in args else obj.options["feedrate_rapid"]
args["spindlespeed"] = args["spindlespeed"] if "spindlespeed" in args else None
args["tooldia"] = args["tooldia"] if "tooldia" in args else obj.options["cnctooldia"]
args["multidepth"] = args["multidepth"] if "multidepth" in args else obj.options["multidepth"]
args["depthperpass"] = args["depthperpass"] if "depthperpass" in args else obj.options["depthperpass"]
args["extracut"] = args["extracut"] if "extracut" in args else obj.options["extracut"]
args["endz"]= args["endz"] if "endz" in args else obj.options["endz"]
args["ppname_g"] = args["ppname_g"] if "ppname_g" in args else obj.options["ppname_g"]
del args['name']
# HACK !!! Should be solved elsewhere!!!
# default option for multidepth is False
obj.options['multidepth'] = False
obj.generatecncjob(use_thread=False, **args)
| 42.339623
| 118
| 0.615196
|
4a0cc377643d9a70b6e8626d4dac654abffcf1d3
| 3,106
|
py
|
Python
|
container.py
|
Higert/ride-sharing-simulation-01
|
826f7f7e8cf12f1423aa3d1bc0dc7847267ff985
|
[
"MIT"
] | 1
|
2019-01-24T01:55:41.000Z
|
2019-01-24T01:55:41.000Z
|
container.py
|
jellycsc/Uber-ride-sharing-simulation
|
826f7f7e8cf12f1423aa3d1bc0dc7847267ff985
|
[
"MIT"
] | null | null | null |
container.py
|
jellycsc/Uber-ride-sharing-simulation
|
826f7f7e8cf12f1423aa3d1bc0dc7847267ff985
|
[
"MIT"
] | null | null | null |
class Container:
"""A container that holds objects.
This is an abstract class. Only child classes should be instantiated.
"""
def add(self, item):
"""Add <item> to this Container.
@type self: Container
@type item: Object
@rtype: None
"""
raise NotImplementedError("Implemented in a subclass")
def remove(self):
"""Remove and return a single item from this Container.
@type self: Container
@rtype: Object
"""
raise NotImplementedError("Implemented in a subclass")
def is_empty(self):
"""Return True iff this Container is empty.
@type self: Container
@rtype: bool
"""
raise NotImplementedError("Implemented in a subclass")
class PriorityQueue(Container):
"""A queue of items that operates in priority order.
Items are removed from the queue according to priority; the item with the
highest priority is removed first. Ties are resolved in FIFO order,
meaning the item which was inserted *earlier* is the first one to be
removed.
Priority is defined by the rich comparison methods for the objects in the
container (__lt__, __le__, __gt__, __ge__).
If x < y, then x has a *HIGHER* priority than y.
All objects in the container must be of the same type.
"""
# === Private Attributes ===
# @type _items: list
# The items stored in the priority queue.
#
# === Representation Invariants ===
# _items is a sorted list, where the first item in the queue is the
# item with the highest priority.
def __init__(self):
"""Initialize an empty PriorityQueue.
@type self: PriorityQueue
@rtype: None
"""
self._items = []
def remove(self):
"""Remove and return the next item from this PriorityQueue.
Precondition: <self> should not be empty.
@type self: PriorityQueue
@rtype: object
>>> pq = PriorityQueue()
>>> pq.add("red")
>>> pq.add("blue")
>>> pq.add("yellow")
>>> pq.add("green")
>>> pq.remove()
'blue'
>>> pq.remove()
'green'
>>> pq.remove()
'red'
>>> pq.remove()
'yellow'
"""
return self._items.pop(0)
def is_empty(self):
"""
Return true iff this PriorityQueue is empty.
@type self: PriorityQueue
@rtype: bool
>>> pq = PriorityQueue()
>>> pq.is_empty()
True
>>> pq.add("thing")
>>> pq.is_empty()
False
"""
return len(self._items) == 0
def add(self, item):
"""Add <item> to this PriorityQueue.
@type self: PriorityQueue
@type item: object
@rtype: None
>>> pq = PriorityQueue()
>>> pq.add("yellow")
>>> pq.add("blue")
>>> pq.add("red")
>>> pq.add("green")
>>> pq._items
['blue', 'green', 'red', 'yellow']
"""
self._items.append(item)
self._items.sort()
| 25.459016
| 77
| 0.556986
|
4a0cc3b38a3186ceba30aca132491f9a7fa3c1b2
| 1,624
|
py
|
Python
|
iOSIconPrep.py
|
NViday/PrepAppIconIOS-Android
|
168b1388d3129b527bae4da2e7d1a6e7754062e7
|
[
"MIT"
] | null | null | null |
iOSIconPrep.py
|
NViday/PrepAppIconIOS-Android
|
168b1388d3129b527bae4da2e7d1a6e7754062e7
|
[
"MIT"
] | null | null | null |
iOSIconPrep.py
|
NViday/PrepAppIconIOS-Android
|
168b1388d3129b527bae4da2e7d1a6e7754062e7
|
[
"MIT"
] | null | null | null |
# Goal : Resize an high-resolution Image to fit iOS Icon standard
# Follwoing Android Stnadard
# @ 1x
# @ 2x
# @ 3x
#!/bin/python
import os
import errno
from resizeImage import *
path = os.getcwd()
print(path)
def createIOSFolder(folderPath):
try :
os.makedirs(folderPath + "/iOS/")
except FileExistError:
pass
return folderPath
def iOSPrepLauncher( filename) :
iconType = "launcher"
# create an icon folder
folder = createAndroidFolder(iconType)
print (folder)
resolutions = ["@2x","@2x", "@3x", "@2x", "@3x", "@3x", "@2x", "@2x", "@3x", "AppStore"]
sizes = ["40x40", "58x58", "60x60","80x80","87x87", "120x120", "152x152", "167x167","180x180", "1024x1024"]
PrepIcons(folder, filename, iconType, sizes, resolutions)
def iOSPrepTabIcon(filename, name) :
iconType = "tab-"+name
folder = createAndroidFolder(iconType)
print(folder)
resolutions = ["square-comp@2x","circle-comp@2x", "square-reg@2x", "circle-reg@2x", "square-comp@3x", "circle-comp@3x", "square-reg@3x", "circle-reg@3x" ]
sizes = ["34x34","36x36", "46x46", "50x50", "51x51" , "54x54", "69x69", "75x75"]
PrepIcons(folder, filename, iconType, sizes, resolutions)
def iOSPrepContextualIcon(filename, name):
iconType = "context-"+ name
folder = createAndroidFolder(iconType)
resolutions = ["square-comp@2x","circle-comp@2x", "square-reg@2x", "circle-reg@2x", "square-comp@3x", "circle-comp@3x", "square-reg@3x", "circle-reg@3x" ]
sizes = ["34x34","36x36", "46x46", "50x50", "51x51" , "54x54", "69x69", "75x75"]
PrepIcons(folder, filename, iconType, sizes, resolutions)
| 20.3
| 157
| 0.656404
|
4a0cc41f0c9a9888ca40bd0895b6ea016765174e
| 1,551
|
py
|
Python
|
gridgui/simplepanel.py
|
joetjo/jopLauncher
|
ff8496fd8416727a6f6feb5d8575d53cd9ebaeb6
|
[
"Apache-2.0"
] | null | null | null |
gridgui/simplepanel.py
|
joetjo/jopLauncher
|
ff8496fd8416727a6f6feb5d8575d53cd9ebaeb6
|
[
"Apache-2.0"
] | null | null | null |
gridgui/simplepanel.py
|
joetjo/jopLauncher
|
ff8496fd8416727a6f6feb5d8575d53cd9ebaeb6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 joetjo https://github.com/joetjo/MarkdownHelper
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tkinter import Frame
from gridgui.gridbehaviour import GhGridBehaviour
class GhSimplePanel(GhGridBehaviour):
def __init__(self, parent, row=0, col=0, colspan=1, sticky="nsew",
border_color=None, border_width=0):
super().__init__(row, col)
if border_color is None:
content_parent = parent
else:
content_parent = Frame(parent, bg=border_color, padx=border_width, pady=border_width)
content_parent.grid(row=row, column=col, columnspan=colspan, sticky=sticky)
self.content = Frame(content_parent, bg=parent.cget('bg'), padx=0, pady=0)
self.content.grid(row=row, column=col, columnspan=colspan, sticky=sticky)
"""
highlightbackground="black" and highlightthickness=1
"""
def grid_remove(self):
self.content.grid_remove()
def grid(self):
self.content.grid()
| 33.717391
| 97
| 0.687299
|
4a0cc491438354120386609a3f1fe2db63b528ca
| 3,695
|
py
|
Python
|
examples/middleware_and_antiflood.py
|
setazer/aiogram
|
1ea76cd902cfc19986389a52adda70a8bb4555db
|
[
"MIT"
] | 1
|
2020-02-27T02:46:51.000Z
|
2020-02-27T02:46:51.000Z
|
examples/middleware_and_antiflood.py
|
setazer/aiogram
|
1ea76cd902cfc19986389a52adda70a8bb4555db
|
[
"MIT"
] | null | null | null |
examples/middleware_and_antiflood.py
|
setazer/aiogram
|
1ea76cd902cfc19986389a52adda70a8bb4555db
|
[
"MIT"
] | null | null | null |
import asyncio
from aiogram import Bot, Dispatcher, executor, types
from aiogram.contrib.fsm_storage.redis import RedisStorage2
from aiogram.dispatcher import DEFAULT_RATE_LIMIT
from aiogram.dispatcher.handler import CancelHandler, current_handler
from aiogram.dispatcher.middlewares import BaseMiddleware
from aiogram.utils.exceptions import Throttled
TOKEN = 'BOT TOKEN HERE'
# In this example Redis storage is used
storage = RedisStorage2(db=5)
bot = Bot(token=TOKEN)
dp = Dispatcher(bot, storage=storage)
def rate_limit(limit: int, key=None):
"""
Decorator for configuring rate limit and key in different functions.
:param limit:
:param key:
:return:
"""
def decorator(func):
setattr(func, 'throttling_rate_limit', limit)
if key:
setattr(func, 'throttling_key', key)
return func
return decorator
class ThrottlingMiddleware(BaseMiddleware):
"""
Simple middleware
"""
def __init__(self, limit=DEFAULT_RATE_LIMIT, key_prefix='antiflood_'):
self.rate_limit = limit
self.prefix = key_prefix
super(ThrottlingMiddleware, self).__init__()
async def on_process_message(self, message: types.Message, data: dict):
"""
This handler is called when dispatcher receives a message
:param message:
"""
# Get current handler
handler = current_handler.get()
# Get dispatcher from context
dispatcher = Dispatcher.get_current()
# If handler was configured, get rate limit and key from handler
if handler:
limit = getattr(handler, 'throttling_rate_limit', self.rate_limit)
key = getattr(handler, 'throttling_key', f"{self.prefix}_{handler.__name__}")
else:
limit = self.rate_limit
key = f"{self.prefix}_message"
# Use Dispatcher.throttle method.
try:
await dispatcher.throttle(key, rate=limit)
except Throttled as t:
# Execute action
await self.message_throttled(message, t)
# Cancel current handler
raise CancelHandler()
async def message_throttled(self, message: types.Message, throttled: Throttled):
"""
Notify user only on first exceed and notify about unlocking only on last exceed
:param message:
:param throttled:
"""
handler = current_handler.get()
dispatcher = Dispatcher.get_current()
if handler:
key = getattr(handler, 'throttling_key', f"{self.prefix}_{handler.__name__}")
else:
key = f"{self.prefix}_message"
# Calculate how many time is left till the block ends
delta = throttled.rate - throttled.delta
# Prevent flooding
if throttled.exceeded_count <= 2:
await message.reply('Too many requests! ')
# Sleep.
await asyncio.sleep(delta)
# Check lock status
thr = await dispatcher.check_key(key)
# If current message is not last with current key - do not send message
if thr.exceeded_count == throttled.exceeded_count:
await message.reply('Unlocked.')
@dp.message_handler(commands=['start'])
@rate_limit(5, 'start') # this is not required but you can configure throttling manager for current handler using it
async def cmd_test(message: types.Message):
# You can use this command every 5 seconds
await message.reply('Test passed! You can use this command every 5 seconds.')
if __name__ == '__main__':
# Setup middleware
dp.middleware.setup(ThrottlingMiddleware())
# Start long-polling
executor.start_polling(dp)
| 30.53719
| 117
| 0.660893
|
4a0cc4f1ac707f22d6558ca19a99fe839264f5dd
| 9,006
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20200501/get_firewall_policy.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200501/get_firewall_policy.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200501/get_firewall_policy.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetFirewallPolicyResult',
'AwaitableGetFirewallPolicyResult',
'get_firewall_policy',
]
@pulumi.output_type
class GetFirewallPolicyResult:
"""
FirewallPolicy Resource.
"""
def __init__(__self__, base_policy=None, child_policies=None, dns_settings=None, etag=None, firewalls=None, id=None, location=None, name=None, provisioning_state=None, rule_collection_groups=None, tags=None, threat_intel_mode=None, threat_intel_whitelist=None, type=None):
if base_policy and not isinstance(base_policy, dict):
raise TypeError("Expected argument 'base_policy' to be a dict")
pulumi.set(__self__, "base_policy", base_policy)
if child_policies and not isinstance(child_policies, list):
raise TypeError("Expected argument 'child_policies' to be a list")
pulumi.set(__self__, "child_policies", child_policies)
if dns_settings and not isinstance(dns_settings, dict):
raise TypeError("Expected argument 'dns_settings' to be a dict")
pulumi.set(__self__, "dns_settings", dns_settings)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if firewalls and not isinstance(firewalls, list):
raise TypeError("Expected argument 'firewalls' to be a list")
pulumi.set(__self__, "firewalls", firewalls)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if rule_collection_groups and not isinstance(rule_collection_groups, list):
raise TypeError("Expected argument 'rule_collection_groups' to be a list")
pulumi.set(__self__, "rule_collection_groups", rule_collection_groups)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if threat_intel_mode and not isinstance(threat_intel_mode, str):
raise TypeError("Expected argument 'threat_intel_mode' to be a str")
pulumi.set(__self__, "threat_intel_mode", threat_intel_mode)
if threat_intel_whitelist and not isinstance(threat_intel_whitelist, dict):
raise TypeError("Expected argument 'threat_intel_whitelist' to be a dict")
pulumi.set(__self__, "threat_intel_whitelist", threat_intel_whitelist)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="basePolicy")
def base_policy(self) -> Optional['outputs.SubResourceResponse']:
"""
The parent firewall policy from which rules are inherited.
"""
return pulumi.get(self, "base_policy")
@property
@pulumi.getter(name="childPolicies")
def child_policies(self) -> Sequence['outputs.SubResourceResponse']:
"""
List of references to Child Firewall Policies.
"""
return pulumi.get(self, "child_policies")
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> Optional['outputs.DnsSettingsResponse']:
"""
DNS Proxy Settings definition.
"""
return pulumi.get(self, "dns_settings")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def firewalls(self) -> Sequence['outputs.SubResourceResponse']:
"""
List of references to Azure Firewalls that this Firewall Policy is associated with.
"""
return pulumi.get(self, "firewalls")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the firewall policy resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="ruleCollectionGroups")
def rule_collection_groups(self) -> Sequence['outputs.SubResourceResponse']:
"""
List of references to FirewallPolicyRuleCollectionGroups.
"""
return pulumi.get(self, "rule_collection_groups")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="threatIntelMode")
def threat_intel_mode(self) -> Optional[str]:
"""
The operation mode for Threat Intelligence.
"""
return pulumi.get(self, "threat_intel_mode")
@property
@pulumi.getter(name="threatIntelWhitelist")
def threat_intel_whitelist(self) -> Optional['outputs.FirewallPolicyThreatIntelWhitelistResponse']:
"""
ThreatIntel Whitelist for Firewall Policy.
"""
return pulumi.get(self, "threat_intel_whitelist")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetFirewallPolicyResult(GetFirewallPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFirewallPolicyResult(
base_policy=self.base_policy,
child_policies=self.child_policies,
dns_settings=self.dns_settings,
etag=self.etag,
firewalls=self.firewalls,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
rule_collection_groups=self.rule_collection_groups,
tags=self.tags,
threat_intel_mode=self.threat_intel_mode,
threat_intel_whitelist=self.threat_intel_whitelist,
type=self.type)
def get_firewall_policy(expand: Optional[str] = None,
firewall_policy_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFirewallPolicyResult:
"""
FirewallPolicy Resource.
:param str expand: Expands referenced resources.
:param str firewall_policy_name: The name of the Firewall Policy.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['firewallPolicyName'] = firewall_policy_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200501:getFirewallPolicy', __args__, opts=opts, typ=GetFirewallPolicyResult).value
return AwaitableGetFirewallPolicyResult(
base_policy=__ret__.base_policy,
child_policies=__ret__.child_policies,
dns_settings=__ret__.dns_settings,
etag=__ret__.etag,
firewalls=__ret__.firewalls,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
rule_collection_groups=__ret__.rule_collection_groups,
tags=__ret__.tags,
threat_intel_mode=__ret__.threat_intel_mode,
threat_intel_whitelist=__ret__.threat_intel_whitelist,
type=__ret__.type)
| 37.682008
| 276
| 0.658339
|
4a0cc63f7869d2bfaf5d43d56eca4c64309328b1
| 6,452
|
py
|
Python
|
pygmt/__init__.py
|
jbusecke/pygmt
|
9ef6338dbb9bdd4c31dda94da6d4126852a6cd85
|
[
"BSD-3-Clause"
] | 1
|
2021-11-16T01:29:59.000Z
|
2021-11-16T01:29:59.000Z
|
pygmt/__init__.py
|
jbusecke/pygmt
|
9ef6338dbb9bdd4c31dda94da6d4126852a6cd85
|
[
"BSD-3-Clause"
] | 20
|
2021-04-02T14:27:45.000Z
|
2022-03-29T11:05:24.000Z
|
pygmt/__init__.py
|
jbusecke/pygmt
|
9ef6338dbb9bdd4c31dda94da6d4126852a6cd85
|
[
"BSD-3-Clause"
] | null | null | null |
"""
PyGMT is a library for processing geospatial and geophysical data and making
publication quality maps and figures. It provides a Pythonic interface for the
Generic Mapping Tools (GMT), a command-line program widely used in the Earth
Sciences. Besides making GMT more accessible to new users, PyGMT aims to
provide integration with the PyData ecosystem as well as support for rich
display in Jupyter notebooks.
Main Features
-------------
Here are just a few of the things that PyGMT does well:
- Easy handling of individual types of data like Cartesian, geographic, or
time-series data.
- Processing of (geo)spatial data including gridding, filtering, and masking
- Allows plotting of a large spectrum of objects on figures including
lines, vectors, polygons, and symbols (pre-defined and customized)
- Generate publication-quality illustrations and make animations
"""
import atexit as _atexit
from pkg_resources import get_distribution
# Import modules to make the high-level GMT Python API
from pygmt import datasets
from pygmt.accessors import GMTDataArrayAccessor
from pygmt.figure import Figure, set_display
from pygmt.io import load_dataarray
from pygmt.session_management import begin as _begin
from pygmt.session_management import end as _end
from pygmt.src import (
blockmean,
blockmedian,
blockmode,
config,
grd2cpt,
grd2xyz,
grdclip,
grdcut,
grdfill,
grdfilter,
grdgradient,
grdinfo,
grdlandmask,
grdproject,
grdsample,
grdtrack,
grdvolume,
info,
makecpt,
nearneighbor,
project,
select,
sph2grd,
sphdistance,
sphinterpolate,
surface,
which,
x2sys_cross,
x2sys_init,
xyz2grd,
)
# Get semantic version through setuptools-scm
__version__ = f'v{get_distribution("pygmt").version}' # e.g. v0.1.2.dev3+g0ab3cd78
__commit__ = __version__.split("+g")[-1] if "+g" in __version__ else "" # 0ab3cd78
# Start our global modern mode session
_begin()
# Tell Python to run _end when shutting down
_atexit.register(_end)
def print_clib_info():
"""
Print information about the GMT shared library that we can find.
Includes the GMT version, default values for parameters, the path to the
``libgmt`` shared library, and GMT directories.
"""
from pygmt.clib import Session
lines = ["GMT library information:"]
with Session() as ses:
for key in sorted(ses.info):
lines.append(f" {key}: {ses.info[key]}")
print("\n".join(lines))
def show_versions():
"""
Prints various dependency versions useful when submitting bug reports. This
includes information about:
- PyGMT itself
- System information (Python version, Operating System)
- Core dependency versions (Numpy, Pandas, Xarray, etc)
- GMT library information
"""
import importlib
import platform
import subprocess
import sys
def _get_module_version(modname):
"""
Get version information of a Python module.
"""
try:
if modname in sys.modules:
module = sys.modules[modname]
else:
module = importlib.import_module(modname)
try:
return module.__version__
except AttributeError:
return module.version
except ImportError:
return None
def _get_ghostscript_version():
"""
Get ghostscript version.
"""
os_name = sys.platform
if os_name.startswith(("linux", "freebsd", "darwin")):
cmds = ["gs"]
elif os_name == "win32":
cmds = ["gswin64c.exe", "gswin32c.exe"]
else:
return None
for gs_cmd in cmds:
try:
version = subprocess.check_output(
[gs_cmd, "--version"], universal_newlines=True
).strip()
return version
except FileNotFoundError:
continue
return None
def _get_gmt_version():
"""
Get GMT version.
"""
try:
version = subprocess.check_output(
["gmt", "--version"], universal_newlines=True
).strip()
return version
except FileNotFoundError:
return None
sys_info = {
"python": sys.version.replace("\n", " "),
"executable": sys.executable,
"machine": platform.platform(),
}
deps = ["numpy", "pandas", "xarray", "netCDF4", "packaging"]
print("PyGMT information:")
print(f" version: {__version__}")
print("System information:")
for key, val in sys_info.items():
print(f" {key}: {val}")
print("Dependency information:")
for modname in deps:
print(f" {modname}: {_get_module_version(modname)}")
print(f" ghostscript: {_get_ghostscript_version()}")
print(f" gmt: {_get_gmt_version()}")
print_clib_info()
def test(doctest=True, verbose=True, coverage=False, figures=True):
"""
Run the test suite.
Uses `pytest <http://pytest.org/>`__ to discover and run the tests. If you
haven't already, you can install it with `conda
<http://conda.pydata.org/>`__ or `pip <https://pip.pypa.io/en/stable/>`__.
Parameters
----------
doctest : bool
If ``True``, will run the doctests as well (code examples that start
with a ``>>>`` in the docs).
verbose : bool
If ``True``, will print extra information during the test run.
coverage : bool
If ``True``, will run test coverage analysis on the code as well.
Requires ``pytest-cov``.
figures : bool
If ``True``, will test generated figures against saved baseline
figures. Requires ``pytest-mpl`` and ``matplotlib``.
Raises
------
AssertionError
If pytest returns a non-zero error code indicating that some tests have
failed.
"""
import pytest
show_versions()
package = __name__
args = []
if verbose:
args.append("-vv")
if coverage:
args.append(f"--cov={package}")
args.append("--cov-report=term-missing")
if doctest:
args.append("--doctest-modules")
if figures:
args.append("--mpl")
args.append("--pyargs")
args.append(package)
status = pytest.main(args)
assert status == 0, "Some tests have failed."
| 27.810345
| 83
| 0.629727
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.