hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0eec9d3074b439e55c9718c0b6f3f23b0eb54adb
| 1,906
|
py
|
Python
|
autocnet/matcher/cuda_matcher.py
|
gsn9/autocnet
|
ddcca3ce3a6b59f720804bb3da03857efa4ff534
|
[
"CC0-1.0"
] | null | null | null |
autocnet/matcher/cuda_matcher.py
|
gsn9/autocnet
|
ddcca3ce3a6b59f720804bb3da03857efa4ff534
|
[
"CC0-1.0"
] | 1
|
2018-09-13T16:03:53.000Z
|
2018-09-13T16:03:53.000Z
|
autocnet/matcher/cuda_matcher.py
|
gsn9/autocnet
|
ddcca3ce3a6b59f720804bb3da03857efa4ff534
|
[
"CC0-1.0"
] | 1
|
2018-09-13T15:12:51.000Z
|
2018-09-13T15:12:51.000Z
|
import warnings
try:
import cudasift as cs
except:
cs = None
import numpy as np
import pandas as pd
def match(edge, aidx=None, bidx=None, **kwargs):
"""
Apply a composite CUDA matcher and ratio check. If this method is used,
no additional ratio check is necessary and no symmetry check is required.
The ratio check is embedded on the cuda side and returned as an
ambiguity value. In testing symmetry is not required as it is expensive
without significant gain in accuracy when using this implementation.
"""
source_kps = edge.source.get_keypoints(index=aidx)
source_des = edge.source.descriptors[aidx]
source_map = {k:v for k, v in enumerate(source_kps.index)}
destin_kps = edge.destination.get_keypoints(index=bidx)
destin_des = edge.destination.descriptors[bidx]
destin_map = {k:v for k, v in enumerate(destin_kps.index)}
s_siftdata = cs.PySiftData.from_data_frame(source_kps, source_des)
d_siftdata = cs.PySiftData.from_data_frame(destin_kps, destin_des)
cs.PyMatchSiftData(s_siftdata, d_siftdata)
matches, _ = s_siftdata.to_data_frame()
# Matches are reindexed 0-n, but need to be remapped to the source_kps,
# destin_kps indices. This is the mismatch)
source = np.empty(len(matches))
source[:] = edge.source['node_id']
destination = np.empty(len(matches))
destination[:] = edge.destination['node_id']
df = pd.concat([pd.Series(source), pd.Series(matches.index),
pd.Series(destination), matches.match,
matches.score, matches.ambiguity], axis=1)
df.columns = ['source_image', 'source_idx', 'destination_image',
'destination_idx', 'score', 'ambiguity']
df.source_idx = df.source_idx.map(source_map)
df.destination_idx = df.destination_idx.map(destin_map)
# Set the matches and set the 'ratio' (ambiguity) mask
edge.matches = df
| 35.962264
| 77
| 0.707765
| 275
| 1,906
| 4.76
| 0.381818
| 0.027502
| 0.018335
| 0.012223
| 0.082506
| 0.082506
| 0.032086
| 0.032086
| 0
| 0
| 0
| 0.001303
| 0.194648
| 1,906
| 52
| 78
| 36.653846
| 0.851466
| 0.272823
| 0
| 0
| 0
| 0
| 0.060651
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.133333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0eed2f6be467201cff2adf42d27d251ad3cba2b3
| 1,339
|
py
|
Python
|
tests/test_core.py
|
Kantouzin/brainfuck
|
812834320b080e2317d3fac377db64782057c8f4
|
[
"WTFPL"
] | null | null | null |
tests/test_core.py
|
Kantouzin/brainfuck
|
812834320b080e2317d3fac377db64782057c8f4
|
[
"WTFPL"
] | null | null | null |
tests/test_core.py
|
Kantouzin/brainfuck
|
812834320b080e2317d3fac377db64782057c8f4
|
[
"WTFPL"
] | null | null | null |
# coding: utf-8
import unittest
from test.support import captured_stdout
from brainfuck import BrainFuck
class TestCore(unittest.TestCase):
def test_hello_world(self):
bf = BrainFuck()
with captured_stdout() as stdout:
bf.run()
self.assertEqual(stdout.getvalue(), "Hello, world!\n")
def test_fizzbuzz(self):
bf = BrainFuck()
bf.load_file("./tests/fizz_buzz.txt")
with captured_stdout() as stdout:
bf.run()
fizzbuzz_list = list()
for i in range(1, 101):
if i % 15 == 0:
fizzbuzz_list.append("FizzBuzz")
elif i % 3 == 0:
fizzbuzz_list.append("Fizz")
elif i % 5 == 0:
fizzbuzz_list.append("Buzz")
else:
fizzbuzz_list.append(str(i))
fizzbuzz_list.append("\n")
self.assertEqual(stdout.getvalue(), " ".join(fizzbuzz_list))
def test_set_command(self):
bf = BrainFuck()
bf.set_command("にゃにゃ", "にゃー", "にゃっ", "にゃん",
"にゃ。", "にゃ、", "「", "」")
bf.load_file("./tests/hello_world_nya.txt")
with captured_stdout() as stdout:
bf.run()
self.assertEqual(stdout.getvalue(), "Hello, world!\n")
if __name__ == "__main__":
unittest.main()
| 24.345455
| 68
| 0.546677
| 153
| 1,339
| 4.588235
| 0.385621
| 0.119658
| 0.128205
| 0.08547
| 0.254986
| 0.254986
| 0.254986
| 0.254986
| 0.202279
| 0.202279
| 0
| 0.013129
| 0.317401
| 1,339
| 54
| 69
| 24.796296
| 0.754923
| 0.009709
| 0
| 0.305556
| 0
| 0
| 0.095166
| 0.036254
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.194444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0eed82297822ff3d19f2f5807a4ad2a8d7e8d1d9
| 4,531
|
py
|
Python
|
examples/references/segmentation/pascal_voc2012/code/dataflow/dataloaders.py
|
kagrze/ignite
|
18708a76f86623545311d35bc48673eac9e55591
|
[
"BSD-3-Clause"
] | null | null | null |
examples/references/segmentation/pascal_voc2012/code/dataflow/dataloaders.py
|
kagrze/ignite
|
18708a76f86623545311d35bc48673eac9e55591
|
[
"BSD-3-Clause"
] | null | null | null |
examples/references/segmentation/pascal_voc2012/code/dataflow/dataloaders.py
|
kagrze/ignite
|
18708a76f86623545311d35bc48673eac9e55591
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Callable, Optional, Tuple, Union
import numpy as np
from torch.utils.data import DataLoader, Sampler
from torch.utils.data.dataset import Subset, ConcatDataset
import torch.utils.data.distributed as data_dist
from dataflow.datasets import get_train_dataset, get_val_dataset, TransformedDataset, get_train_noval_sbdataset
def get_train_val_loaders(root_path: str,
train_transforms: Callable,
val_transforms: Callable,
batch_size: int = 16,
num_workers: int = 8,
val_batch_size: Optional[int] = None,
pin_memory: bool = True,
random_seed: Optional[int] = None,
train_sampler: Optional[Union[Sampler, str]] = None,
val_sampler: Optional[Union[Sampler, str]] = None,
with_sbd: Optional[str] = None,
limit_train_num_samples: Optional[int] = None,
limit_val_num_samples: Optional[int] = None) -> Tuple[DataLoader, DataLoader, DataLoader]:
train_ds = get_train_dataset(root_path)
val_ds = get_val_dataset(root_path)
if with_sbd is not None:
sbd_train_ds = get_train_noval_sbdataset(with_sbd)
train_ds = ConcatDataset([train_ds, sbd_train_ds])
if random_seed is not None:
np.random.seed(random_seed)
if limit_train_num_samples is not None:
train_indices = np.random.permutation(len(train_ds))[:limit_train_num_samples]
train_ds = Subset(train_ds, train_indices)
if limit_val_num_samples is not None:
val_indices = np.random.permutation(len(val_ds))[:limit_val_num_samples]
val_ds = Subset(val_ds, val_indices)
# random samples for evaluation on training dataset
if len(val_ds) < len(train_ds):
train_eval_indices = np.random.permutation(len(train_ds))[:len(val_ds)]
train_eval_ds = Subset(train_ds, train_eval_indices)
else:
train_eval_ds = train_ds
train_ds = TransformedDataset(train_ds, transform_fn=train_transforms)
val_ds = TransformedDataset(val_ds, transform_fn=val_transforms)
train_eval_ds = TransformedDataset(train_eval_ds, transform_fn=val_transforms)
if isinstance(train_sampler, str):
assert train_sampler == 'distributed'
train_sampler = data_dist.DistributedSampler(train_ds)
if isinstance(val_sampler, str):
assert val_sampler == 'distributed'
val_sampler = data_dist.DistributedSampler(val_ds, shuffle=False)
train_loader = DataLoader(train_ds, shuffle=train_sampler is None,
batch_size=batch_size, num_workers=num_workers,
sampler=train_sampler,
pin_memory=pin_memory, drop_last=True)
val_batch_size = batch_size * 4 if val_batch_size is None else val_batch_size
val_loader = DataLoader(val_ds, shuffle=False, sampler=val_sampler,
batch_size=val_batch_size, num_workers=num_workers,
pin_memory=pin_memory, drop_last=False)
train_eval_loader = DataLoader(train_eval_ds, shuffle=False, sampler=val_sampler,
batch_size=val_batch_size, num_workers=num_workers,
pin_memory=pin_memory, drop_last=False)
return train_loader, val_loader, train_eval_loader
def get_inference_dataloader(root_path: str,
mode: str,
transforms: Callable,
batch_size: int = 16,
num_workers: int = 8,
pin_memory: bool = True,
limit_num_samples: Optional[int] = None) -> DataLoader:
assert mode in ('train', 'test'), "Mode should be 'train' or 'test'"
get_dataset_fn = get_train_dataset if mode == "train" else get_val_dataset
dataset = get_dataset_fn(root_path, return_meta=True)
if limit_num_samples is not None:
indices = np.random.permutation(len(dataset))[:limit_num_samples]
dataset = Subset(dataset, indices)
dataset = TransformedDataset(dataset, transform_fn=transforms)
loader = DataLoader(dataset, shuffle=False,
batch_size=batch_size, num_workers=num_workers,
pin_memory=pin_memory, drop_last=False)
return loader
| 43.990291
| 116
| 0.637387
| 546
| 4,531
| 4.948718
| 0.155678
| 0.041451
| 0.026647
| 0.03849
| 0.330126
| 0.215766
| 0.180977
| 0.15433
| 0.136936
| 0.136936
| 0
| 0.002177
| 0.290223
| 4,531
| 102
| 117
| 44.421569
| 0.837998
| 0.010814
| 0
| 0.171053
| 0
| 0
| 0.015179
| 0
| 0
| 0
| 0
| 0
| 0.039474
| 1
| 0.026316
| false
| 0
| 0.078947
| 0
| 0.131579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0eef441f20577a797d6570e849cc35b3e4804f14
| 6,309
|
py
|
Python
|
saleor/core/jwt.py
|
autobotasia/saleor
|
e03e9f6ab1bddac308a6609d6b576a87e90ae655
|
[
"CC-BY-4.0"
] | 1
|
2022-02-19T13:27:40.000Z
|
2022-02-19T13:27:40.000Z
|
saleor/core/jwt.py
|
autobotasia/saleor
|
e03e9f6ab1bddac308a6609d6b576a87e90ae655
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/core/jwt.py
|
autobotasia/saleor
|
e03e9f6ab1bddac308a6609d6b576a87e90ae655
|
[
"CC-BY-4.0"
] | 2
|
2021-12-03T16:59:37.000Z
|
2022-02-19T13:05:42.000Z
|
from datetime import datetime, timedelta
from typing import Any, Dict, Optional
import graphene
import jwt
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from ..account.models import User
from ..app.models import App
from .permissions import (
get_permission_names,
get_permissions_from_codenames,
get_permissions_from_names,
)
JWT_ALGORITHM = "HS256"
SALEOR_AUTH_HEADER = "HTTP_AUTHORIZATION_BEARER"
DEFAULT_AUTH_HEADER = "HTTP_AUTHORIZATION"
AUTH_HEADER_PREFIXES = ["JWT", "BEARER"]
JWT_ACCESS_TYPE = "access"
JWT_REFRESH_TYPE = "refresh"
JWT_THIRDPARTY_ACCESS_TYPE = "thirdparty"
JWT_REFRESH_TOKEN_COOKIE_NAME = "refreshToken"
PERMISSIONS_FIELD = "permissions"
JWT_SALEOR_OWNER_NAME = "saleor"
JWT_OWNER_FIELD = "owner"
def jwt_base_payload(
exp_delta: Optional[timedelta], token_owner: str
) -> Dict[str, Any]:
utc_now = datetime.utcnow()
payload = {"iat": utc_now, JWT_OWNER_FIELD: token_owner}
if exp_delta:
payload["exp"] = utc_now + exp_delta
return payload
def jwt_user_payload(
user: User,
token_type: str,
exp_delta: Optional[timedelta],
additional_payload: Optional[Dict[str, Any]] = None,
token_owner: str = JWT_SALEOR_OWNER_NAME,
) -> Dict[str, Any]:
payload = jwt_base_payload(exp_delta, token_owner)
payload.update(
{
"token": user.jwt_token_key,
"email": user.email,
"type": token_type,
"user_id": graphene.Node.to_global_id("User", user.id),
"is_staff": user.is_staff,
"is_supplier": user.is_supplier,
}
)
if additional_payload:
payload.update(additional_payload)
return payload
def jwt_encode(payload: Dict[str, Any]) -> str:
return jwt.encode(
payload,
settings.SECRET_KEY, # type: ignore
JWT_ALGORITHM,
)
def jwt_decode_with_exception_handler(
token: str, verify_expiration=settings.JWT_EXPIRE
) -> Optional[Dict[str, Any]]:
try:
return jwt_decode(token, verify_expiration=verify_expiration)
except jwt.PyJWTError:
return None
def jwt_decode(token: str, verify_expiration=settings.JWT_EXPIRE) -> Dict[str, Any]:
return jwt.decode(
token,
settings.SECRET_KEY, # type: ignore
algorithms=[JWT_ALGORITHM],
options={"verify_exp": verify_expiration},
)
def create_token(payload: Dict[str, Any], exp_delta: timedelta) -> str:
payload.update(jwt_base_payload(exp_delta, token_owner=JWT_SALEOR_OWNER_NAME))
return jwt_encode(payload)
def create_access_token(
user: User, additional_payload: Optional[Dict[str, Any]] = None
) -> str:
payload = jwt_user_payload(
user, JWT_ACCESS_TYPE, settings.JWT_TTL_ACCESS, additional_payload
)
return jwt_encode(payload)
def create_refresh_token(
user: User, additional_payload: Optional[Dict[str, Any]] = None
) -> str:
payload = jwt_user_payload(
user,
JWT_REFRESH_TYPE,
settings.JWT_TTL_REFRESH,
additional_payload,
)
return jwt_encode(payload)
def get_token_from_request(request: WSGIRequest) -> Optional[str]:
auth_token = request.META.get(SALEOR_AUTH_HEADER)
if not auth_token:
auth = request.META.get(DEFAULT_AUTH_HEADER, "").split(maxsplit=1)
if len(auth) == 2 and auth[0].upper() in AUTH_HEADER_PREFIXES:
auth_token = auth[1]
return auth_token
def get_user_from_payload(payload: Dict[str, Any]) -> Optional[User]:
user = User.objects.filter(email=payload["email"], is_active=True).first()
user_jwt_token = payload.get("token")
if not user_jwt_token or not user:
raise jwt.InvalidTokenError(
"Invalid token. Create new one by using tokenCreate mutation."
)
if user.jwt_token_key != user_jwt_token:
raise jwt.InvalidTokenError(
"Invalid token. Create new one by using tokenCreate mutation."
)
return user
def is_saleor_token(token: str) -> bool:
"""Confirm that token was generated by Saleor not by plugin."""
try:
payload = jwt.decode(token, options={"verify_signature": False})
except jwt.PyJWTError:
return False
owner = payload.get(JWT_OWNER_FIELD)
if not owner or owner != JWT_SALEOR_OWNER_NAME:
return False
return True
def get_user_from_access_token(token: str) -> Optional[User]:
if not is_saleor_token(token):
return None
payload = jwt_decode(token)
return get_user_from_access_payload(payload)
def get_user_from_access_payload(payload: dict) -> Optional[User]:
jwt_type = payload.get("type")
if jwt_type not in [JWT_ACCESS_TYPE, JWT_THIRDPARTY_ACCESS_TYPE]:
raise jwt.InvalidTokenError(
"Invalid token. Create new one by using tokenCreate mutation."
)
permissions = payload.get(PERMISSIONS_FIELD, None)
user = get_user_from_payload(payload)
if user and permissions is not None:
token_permissions = get_permissions_from_names(permissions)
token_codenames = [perm.codename for perm in token_permissions]
user.effective_permissions = get_permissions_from_codenames(token_codenames)
user.is_staff = True if user.effective_permissions else False
return user
def create_access_token_for_app(app: "App", user: "User"):
"""Create access token for app.
App can use user jwt token to proceed given operation on the Saleor side.
The token which can be used by App has additional field defining the permissions
assigned to it. The permissions set is the intersection of user permissions and
app permissions.
"""
app_permissions = app.permissions.all()
app_permission_enums = get_permission_names(app_permissions)
permissions = user.effective_permissions
user_permission_enums = get_permission_names(permissions)
app_id = graphene.Node.to_global_id("App", app.id)
additional_payload = {
"app": app_id,
PERMISSIONS_FIELD: list(app_permission_enums & user_permission_enums),
}
payload = jwt_user_payload(
user,
JWT_THIRDPARTY_ACCESS_TYPE,
exp_delta=settings.JWT_TTL_APP_ACCESS,
additional_payload=additional_payload,
)
return jwt_encode(payload)
| 30.926471
| 84
| 0.703598
| 815
| 6,309
| 5.161963
| 0.181595
| 0.016639
| 0.02377
| 0.026147
| 0.282387
| 0.231281
| 0.156881
| 0.09294
| 0.09294
| 0.09294
| 0
| 0.001401
| 0.208274
| 6,309
| 203
| 85
| 31.078818
| 0.840841
| 0.058012
| 0
| 0.22293
| 0
| 0
| 0.067174
| 0.00423
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089172
| false
| 0
| 0.057325
| 0.012739
| 0.261147
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0eef6d139660d7b5753e9bf6938554e0499dccc1
| 3,513
|
py
|
Python
|
locust/configuration.py
|
pancaprima/locust
|
dba803fcdd13ff2fada4e8b8ee37a163aa519a48
|
[
"MIT"
] | 1
|
2018-09-03T10:05:55.000Z
|
2018-09-03T10:05:55.000Z
|
locust/configuration.py
|
pancaprima/locust
|
dba803fcdd13ff2fada4e8b8ee37a163aa519a48
|
[
"MIT"
] | 14
|
2017-09-20T11:01:44.000Z
|
2020-02-21T18:37:58.000Z
|
locust/configuration.py
|
erlanggakrisnamukti/locust
|
dba803fcdd13ff2fada4e8b8ee37a163aa519a48
|
[
"MIT"
] | 3
|
2018-01-24T09:39:56.000Z
|
2018-08-24T06:30:23.000Z
|
import os, json, logging, jsonpath_rw_ext, jsonpath_rw
from jsonpath_rw import jsonpath, parse
from . import events
from ast import literal_eval
from flask import make_response
logger = logging.getLogger(__name__)
CONFIG_PATH = '/tests/settings/config.json'
class ClientConfiguration:
"""
This class is a handler for data configuration with JSON data structure.
"""
def __init__(self):
self.config_data = None
def read_json(self, path=None):
"""
Will get the data of configuration as JSON.
It reads configuration file once.
"""
if self.config_data is None:
if path is None:
path = CONFIG_PATH
else :
if path.startswith('./') :
path = path[1:]
elif not path.startswith('/'):
path = '/%s' % (path)
try:
with open((os.environ['PYTHONPATH'].split(os.pathsep))[-1] + path, "r") as data_file:
self.config_data = json.load(data_file)
except Exception as err:
logger.info(err)
self.config_data = json.load({})
return self.config_data
def update_json_config(self, json_added, json_path, options, list_column, config_text):
"""
Write JSON file configuration
"""
data = literal_eval(config_text)
if(options != "replace"):
json_target = jsonpath_rw_ext.match(json_path, data)
if isinstance(json_target[0], dict):
if len(list_column)==1:
json_target[0][list_column[0]] = json_added
json_final = json_target[0]
else:
return False, json.dumps(data, indent=4)
else:
for json_target_value in json_target[0]:
json_added.append(json_target_value)
json_final = json_added
else:
json_final = json_added
jsonpath_expr = parse(json_path)
matches = jsonpath_expr.find(data)
if len(matches)==0:
return make_response(json.dumps({'success':False, 'message':'JSON path not found.'}))
for match in matches:
data = ClientConfiguration.update_json(data, ClientConfiguration.get_path(match), json_final)
return make_response(json.dumps({'success':True, 'data':json.dumps(data, indent=4)}))
@classmethod
def get_path(self, match):
"""
Return an iterator based upon MATCH.PATH. Each item is a path component,
start from outer most item.
"""
if match.context is not None:
for path_element in ClientConfiguration.get_path(match.context):
yield path_element
yield str(match.path)
@classmethod
def update_json(self, json, path, value):
"""
Update JSON dictionary PATH with VALUE. Return updated JSON
"""
try:
first = next(path)
# check if item is an array
if (first.startswith('[') and first.endswith(']')) or (first.startswith('{') and first.endswith('}')):
try:
first = int(first[1:-1])
except ValueError:
pass
json[first] = ClientConfiguration.update_json(json[first], path, value)
return json
except StopIteration:
return value
| 34.782178
| 114
| 0.562767
| 397
| 3,513
| 4.811083
| 0.314861
| 0.036649
| 0.036649
| 0.018848
| 0.112042
| 0.035602
| 0
| 0
| 0
| 0
| 0
| 0.00563
| 0.342727
| 3,513
| 100
| 115
| 35.13
| 0.821568
| 0.105038
| 0
| 0.161765
| 0
| 0
| 0.033124
| 0.008943
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073529
| false
| 0.014706
| 0.073529
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ef004434fa16f22e39f1c30a252704c35a2362e
| 2,034
|
py
|
Python
|
compute_pi.py
|
jakobkogler/pi_memorize
|
c82c24f26407f1728ad1e73851b72dea9bf779f6
|
[
"MIT"
] | null | null | null |
compute_pi.py
|
jakobkogler/pi_memorize
|
c82c24f26407f1728ad1e73851b72dea9bf779f6
|
[
"MIT"
] | null | null | null |
compute_pi.py
|
jakobkogler/pi_memorize
|
c82c24f26407f1728ad1e73851b72dea9bf779f6
|
[
"MIT"
] | null | null | null |
"""Compute pi."""
from decimal import Decimal, getcontext
import argparse
import itertools
class ComputePi:
"""Compute pi to a specific precision using multiple algorithms."""
@staticmethod
def BBP(precision):
"""Compute pi using the Bailey-Borwein-Plouffe formula."""
getcontext().prec = precision + 20
pi = Decimal(0)
for k in itertools.count():
term = (Decimal(4)/(8*k+1) - Decimal(2)/(8*k+4) - Decimal(1)/(8*k+5) - Decimal(1)/(8*k+6))
term /= Decimal(16)**k
pi += term
if term < Decimal(10)**(-precision-10):
break
pi = str(pi)[:-19]
return pi
@staticmethod
def arctan_euler(x, one=1000000):
"""Calculate arctan(1/x) using euler's accelerated formula.
Based on http://www.craig-wood.com/nick/articles/pi-machin/"""
x_squared = x * x
x_squared_plus_1 = x_squared + 1
term = (x * one) // x_squared_plus_1
total = term
two_n = 2
while 1:
divisor = (two_n+1) * x_squared_plus_1
term *= two_n
term += divisor // 2 # round the division
term = term // divisor
if term == 0:
break
total += term
two_n += 2
return total
@staticmethod
def machin_euler(digits):
"""Compute pi using Machin's formula.
Based on http://www.craig-wood.com/nick/articles/pi-machin/"""
one = 10**(digits + 20)
pi = 4*(4*ComputePi.arctan_euler(5, one) - ComputePi.arctan_euler(239, one))
pi //= 10**20
return '3.{}'.format(str(pi)[1:])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Calculates pi.')
parser.add_argument('--precision', type=int, default=100,
help='The desired precision of pi (default: 100 digits)')
args = parser.parse_args()
pi_computer = ComputePi()
print(pi_computer.machin_euler(args.precision))
| 29.478261
| 102
| 0.564897
| 257
| 2,034
| 4.346304
| 0.373541
| 0.03581
| 0.032229
| 0.034915
| 0.119964
| 0.094897
| 0.094897
| 0.094897
| 0.094897
| 0.094897
| 0
| 0.04446
| 0.303343
| 2,034
| 68
| 103
| 29.911765
| 0.743825
| 0.176991
| 0
| 0.108696
| 0
| 0
| 0.052696
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.065217
| 0
| 0.217391
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ef0299af0be6f4403ddbf6bc9801b26ba188122
| 1,657
|
py
|
Python
|
scripts/01_deploy_data_types.py
|
LaMemeBete/nodys-smart-contract
|
f67b88d98ebf7063b72f46cb2b014d5de96eb56d
|
[
"MIT",
"Unlicense"
] | null | null | null |
scripts/01_deploy_data_types.py
|
LaMemeBete/nodys-smart-contract
|
f67b88d98ebf7063b72f46cb2b014d5de96eb56d
|
[
"MIT",
"Unlicense"
] | null | null | null |
scripts/01_deploy_data_types.py
|
LaMemeBete/nodys-smart-contract
|
f67b88d98ebf7063b72f46cb2b014d5de96eb56d
|
[
"MIT",
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
import time
from brownie import (
DataTypes,
TransparentUpgradeableProxy,
ProxyAdmin,
config,
network,
Contract,
)
from scripts.helpful_scripts import get_account, encode_function_data
def main():
account = get_account()
print(config["networks"][network.show_active()])
print(f"Deploying to {network.show_active()}")
data_types = DataTypes.deploy(
{"from": account},
publish_source=config["networks"][network.show_active()]["verify"],
)
# Optional, deploy the ProxyAdmin and use that as the admin contract
proxy_admin = ProxyAdmin.deploy(
{"from": account},
publish_source=config["networks"][network.show_active()]["verify"],
)
# If we want an intializer function we can add
# `initializer=box.store, 1`
# to simulate the initializer being the `store` function
# with a `newValue` of 1
# data_types_encoded_initializer_function = encode_function_data(data_types.setDataTypes)
data_types_encoded_initializer_function = encode_function_data(
data_types.setDataTypes, 10
)
proxy = TransparentUpgradeableProxy.deploy(
data_types.address,
proxy_admin.address,
data_types_encoded_initializer_function,
# gas limit removed fort an issue not very clear
# {"from": account, "gas_limit": 100000000000},
{"from": account},
publish_source=config["networks"][network.show_active()]["verify"],
)
print(f"Proxy deployed to {proxy} ! You can now upgrade it to dataTypesV2!")
proxy_data_types = Contract.from_abi("DataTypes", proxy.address, DataTypes.abi)
| 35.255319
| 93
| 0.692818
| 192
| 1,657
| 5.776042
| 0.411458
| 0.064923
| 0.076646
| 0.090171
| 0.368801
| 0.309288
| 0.309288
| 0.309288
| 0.309288
| 0.309288
| 0
| 0.013595
| 0.200966
| 1,657
| 46
| 94
| 36.021739
| 0.824018
| 0.250453
| 0
| 0.176471
| 0
| 0
| 0.140308
| 0.018654
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.088235
| 0
| 0.117647
| 0.088235
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ef0869b952bf4b7857333b5caa682157e430b0a
| 659
|
py
|
Python
|
modules/BidirectionalLSTM.py
|
omni-us/pytorch-retinanet
|
8d3ee38d50df0afec2ab4dfa0eabb8219eb399f5
|
[
"Apache-2.0"
] | 12
|
2019-08-14T13:32:30.000Z
|
2022-03-09T15:25:33.000Z
|
modules/BidirectionalLSTM.py
|
omni-us/pytorch-retinanet
|
8d3ee38d50df0afec2ab4dfa0eabb8219eb399f5
|
[
"Apache-2.0"
] | 2
|
2019-12-29T21:15:00.000Z
|
2020-01-14T13:51:54.000Z
|
modules/BidirectionalLSTM.py
|
omni-us/pytorch-retinanet
|
8d3ee38d50df0afec2ab4dfa0eabb8219eb399f5
|
[
"Apache-2.0"
] | 6
|
2019-08-03T16:22:41.000Z
|
2020-09-27T16:55:40.000Z
|
import torch.nn as nn
class BidirectionalLSTM(nn.Module):
# Module to extract BLSTM features from convolutional feature map
def __init__(self, nIn, nHidden, nOut):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True)
self.embedding = nn.Linear(nHidden * 2, nOut)
self.rnn.cuda()
self.embedding.cuda()
def forward(self, input):
recurrent, _ = self.rnn(input)
T, b, h = recurrent.size()
t_rec = recurrent.view(T * b, h)
output = self.embedding(t_rec) # [T * b, nOut]
output = output.view(T, b, -1)
return output
| 28.652174
| 69
| 0.614568
| 85
| 659
| 4.635294
| 0.494118
| 0.020305
| 0.015228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004141
| 0.267071
| 659
| 22
| 70
| 29.954545
| 0.811594
| 0.116844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.066667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ef1f7b69f59398c929a14885bdad0d62cb19dca
| 5,173
|
py
|
Python
|
main.py
|
JaekwangCha/my_pytorch_templet
|
7b6b67116e9d69abd64631d90b38fedc79be6c8c
|
[
"MIT"
] | null | null | null |
main.py
|
JaekwangCha/my_pytorch_templet
|
7b6b67116e9d69abd64631d90b38fedc79be6c8c
|
[
"MIT"
] | null | null | null |
main.py
|
JaekwangCha/my_pytorch_templet
|
7b6b67116e9d69abd64631d90b38fedc79be6c8c
|
[
"MIT"
] | null | null | null |
# written by Jaekwang Cha
# version 0.1
# ================== IMPORT CUSTOM LEARNING LIBRARIES ===================== #
from customs.train import train, test
from customs.dataset import load_dataset
from customs.model import load_model
# ================== TRAINING SETTINGS ================== #
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--train_method', default='supervised', type=str, help='type of training: supervised(default), unsupervised, reinforce')
parser.add_argument('--task', default='classification', type=str, help='task of training: classification(default), regression')
parser.add_argument('--dataset', default='mnist', type=str, help='dataset to use')
parser.add_argument('--model', default='CNN', type=str, help='model to use')
parser.add_argument('--seed', default=42, type=int, help='random seed (default: 42)')
parser.add_argument('--num_worker', default=1, type=int, help='number of dataloader worker')
parser.add_argument('--no_cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--gpu', default=0, type=str, help='GPU-id for GPU to use')
parser.add_argument('--multi_gpu', default=0, type=str, help='GPU-ids for multi-GPU usage')
parser.add_argument('--pin_memory', default=True, type=bool, help='pin memory option selector')
parser.add_argument('--save_model', action='store_true', default=False, help='For Saving the current Model')
parser.add_argument('--save_path', default=os.getcwd()+'/weights', type=str, help='Where to save weights')
parser.add_argument('--log_path', default=os.getcwd()+'/Logs', type=str, help='Where to save Logs')
# data setting
parser.add_argument('--val_rate', default=0.2, type=float, help='split rate for the validation data')
parser.add_argument('--transform', default='default', type=str, help='choose the data transform type')
# training parameter setting
parser.add_argument('--n_epoch', default=10, type=int, help='number of total training iteration')
parser.add_argument('--batch_size', default=32, type=int, help='size of minibatch')
parser.add_argument('--test_batch_size', default=32, type=int, help='size of test-minibatch')
# optimizer & scheduler setting
parser.add_argument('--lr', default=0.03, type=float, help='training learning rate')
parser.add_argument('--optimizer', default='adam', type=str, help='optimizer select')
parser.add_argument('--scheduler', default='steplr', type=str, help='scheduler select')
opt = parser.parse_args()
# ===================== IMPORT PYTORCH LIBRARIES ================== #
import torch
from torch.utils.data import DataLoader
torch.manual_seed(opt.seed)
# ================== GPU SETTINGS ================== #
def gpu_setup(opt):
use_cuda = not opt.no_cuda and torch.cuda.is_available()
os.environ["CUDA_DEVICE_ORDER"] ="PCI_BUS_ID"
if opt.multi_gpu != 0:
print()
print('Activating multi-gpu training mode')
print(opt.multi_gpu)
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.multi_gpu)
opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
else:
print()
print('Activating single-gpu training mode')
os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu)
opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using gpu number ' + str(opt.gpu))
return use_cuda
# ======================= MAIN SCRIPT ============================= #
def main(opt):
use_cuda = gpu_setup(opt)
dataset_train, dataset_validation = load_dataset(opt, train=True)
print('training data size: {}'.format(len(dataset_train)))
print('validation data size: {}'.format(len(dataset_validation)))
dataset_test = load_dataset(opt, train=False)
print('test data size: {}'.format(len(dataset_test)))
print()
kwargs = {'num_workers': opt.num_worker, 'pin_memory': opt.pin_memory} if use_cuda else {}
train_dataloader = DataLoader(dataset_train, batch_size=opt.batch_size, shuffle=True, **kwargs)
validation_dataloader = DataLoader(dataset_validation, batch_size=opt.batch_size, shuffle=True, **kwargs)
test_dataloader = DataLoader(dataset_test, batch_size=opt.test_batch_size, shuffle=True, **kwargs)
model = load_model(opt)
if opt.multi_gpu != 0:
model = torch.nn.DataParallel(model)
model.to(opt.device)
train(opt, model, train_dataloader, validation_dataloader)
test(opt, model, test_dataloader)
if __name__ == '__main__':
main(opt)
| 52.785714
| 159
| 0.605838
| 606
| 5,173
| 5.008251
| 0.237624
| 0.062273
| 0.117628
| 0.013839
| 0.233937
| 0.158155
| 0.123229
| 0.085008
| 0.059967
| 0.036903
| 0
| 0.005554
| 0.234293
| 5,173
| 97
| 160
| 53.329897
| 0.760667
| 0.081577
| 0
| 0.104478
| 0
| 0
| 0.236791
| 0.005176
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029851
| false
| 0
| 0.104478
| 0
| 0.149254
| 0.149254
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ef37debe8fbb6d99817c5ad659e3ff1f210c644
| 4,812
|
py
|
Python
|
test/core/024-sc4-gridftp-http/Rosetta.py
|
ahnitz/pegasus
|
e269b460f4d87eb3f3a7e91cd82e2c28fdb55573
|
[
"Apache-2.0"
] | 127
|
2015-01-28T19:19:13.000Z
|
2022-03-31T05:57:40.000Z
|
test/core/024-sc4-gridftp-http/Rosetta.py
|
ahnitz/pegasus
|
e269b460f4d87eb3f3a7e91cd82e2c28fdb55573
|
[
"Apache-2.0"
] | 14
|
2015-04-15T17:44:20.000Z
|
2022-02-22T22:48:49.000Z
|
test/core/024-sc4-gridftp-http/Rosetta.py
|
ahnitz/pegasus
|
e269b460f4d87eb3f3a7e91cd82e2c28fdb55573
|
[
"Apache-2.0"
] | 70
|
2015-01-22T15:20:32.000Z
|
2022-02-21T22:50:23.000Z
|
#!/usr/bin/env python3
import logging
import sys
import subprocess
from pathlib import Path
from datetime import datetime
from Pegasus.api import *
logging.basicConfig(level=logging.DEBUG)
# --- Work Dir Setup -----------------------------------------------------------
RUN_ID = "024-sc4-gridftp-http-" + datetime.now().strftime("%s")
TOP_DIR = Path.cwd()
WORK_DIR = TOP_DIR / "work"
try:
Path.mkdir(WORK_DIR)
except FileExistsError:
pass
# --- Configuration ------------------------------------------------------------
print("Generating pegasus.properties at: {}".format(TOP_DIR / "pegasus.properties"))
props = Properties()
props["pegasus.dir.useTimestamp"] = "true"
props["pegasus.dir.storage.deep"] = "false"
props["pegasus.data.configuration"] = "nonsharedfs"
with (TOP_DIR / "pegasus.properties").open(mode="w") as f:
props.write(f)
# --- Sites --------------------------------------------------------------------
print("Generating site catalog at: sites.yml")
LOCAL = "local"
CONDOR_POOL = "condorpool"
STAGING_SITE = "staging_site"
try:
pegasus_config = subprocess.run(
["pegasus-config", "--bin"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
except FileNotFoundError as e:
print("Unable to find pegasus-config")
assert pegasus_config.returncode == 0
PEGASUS_BIN_DIR = pegasus_config.stdout.decode().strip()
sites = """
pegasus: "5.0"
sites:
-
name: "condor_pool"
arch: "x86_64"
os.type: "linux"
profiles:
condor:
universe: "vanilla"
pegasus:
style: "condor"
-
name: "staging_site"
arch: "x86_64"
os.type: "linux"
directories:
-
type: "sharedScratch"
path: "/lizard/scratch-90-days/http-scratch/ptesting"
fileServers:
-
operation: "get"
url: "http://workflow.isi.edu/shared-scratch/ptesting"
-
operation: "put"
url: "gsiftp://workflow.isi.edu/lizard/scratch-90-days/http-scratch/ptesting"
-
name: "local"
arch: "x86_64"
os.type: "linux"
os.release: "rhel"
os.version: "7"
directories:
-
type: "sharedScratch"
path: "{work_dir}/scratch"
fileServers:
-
operation: "all"
url: "file://{work_dir}/scratch"
-
type: "localStorage"
path: "{work_dir}/outputs"
fileServers:
-
operation: "all"
url: "file://{work_dir}/outputs"
profiles:
env:
PEGASUS_BIN_DIR: "{pegasus_bin_dir}"
""".format(
work_dir=str(WORK_DIR), pegasus_bin_dir=PEGASUS_BIN_DIR
)
with (TOP_DIR / "sites.yml").open(mode="w") as f:
f.write(sites)
# --- Transformations ----------------------------------------------------------
rosetta_exe = Transformation(
"rosetta.exe",
arch=Arch.X86_64,
os_type=OS.LINUX,
site="local",
pfn="file://" + str(TOP_DIR / "rosetta.exe"),
is_stageable=True,
).add_pegasus_profile(clusters_size=3)
tc = TransformationCatalog().add_transformations(rosetta_exe)
# --- Replicas & Workflow ------------------------------------------------------
rc = ReplicaCatalog()
# add all files in minirosetta_database
inputs = list()
def get_files(d: Path) -> None:
for p in d.iterdir():
if p.is_file():
f = File(str(p))
inputs.append(f)
rc.add_replica(LOCAL, str(p), str(p.resolve()))
else:
get_files(p)
get_files(Path("minirosetta_database"))
f1 = File("design.resfile")
inputs.append(f1)
rc.add_replica(LOCAL, f1, str(Path("design.resfile").resolve()))
f2 = File("repack.resfile")
inputs.append(f2)
rc.add_replica(LOCAL, f2, str(Path("repack.resfile").resolve()))
wf = Workflow("rosetta")
pdb_files = list(Path("pdbs").iterdir())
for i in range(10):
current_file = pdb_files[i]
if current_file.is_file():
job = (
Job(rosetta_exe, _id=current_file.name.replace(".pdb", ""))
.add_inputs(File(current_file.name), *inputs)
.add_outputs(File(current_file.name + ".score.sc"), register_replica=True)
.add_args(
"-in:file:s",
current_file.name,
"-out:prefix " + current_file.name + ".",
"-database ./minirosetta_database",
"-linmem_ig 10",
"-nstruct 1",
"-pert_num 2",
"-inner_num 1",
"-jd2::ntrials 1",
)
)
rc.add_replica("local", current_file.name, str(current_file.resolve()))
wf.add_jobs(job)
# write rc to separate file for registration jobs
with (TOP_DIR / "replicas.yml").open("w") as f:
rc.write(f)
wf.add_transformation_catalog(tc)
try:
wf.plan(
dir=str(WORK_DIR),
verbose=5,
sites=[CONDOR_POOL],
staging_sites={CONDOR_POOL: STAGING_SITE},
)
except PegasusClientError as e:
print(e.output)
| 24.932642
| 86
| 0.588113
| 566
| 4,812
| 4.848057
| 0.333922
| 0.02551
| 0.032799
| 0.016035
| 0.109694
| 0.095481
| 0.054665
| 0
| 0
| 0
| 0
| 0.01207
| 0.208022
| 4,812
| 192
| 87
| 25.0625
| 0.707951
| 0.104323
| 0
| 0.133333
| 0
| 0.006667
| 0.374012
| 0.067411
| 0
| 0
| 0
| 0
| 0.006667
| 1
| 0.006667
| false
| 0.006667
| 0.04
| 0
| 0.046667
| 0.026667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ef3a6ff8273269894257cdbba761bebf9bbfde6
| 5,787
|
py
|
Python
|
qiskit/visualization/pulse_v2/device_info.py
|
godspeed5/qiskit-terra
|
a5d87c3e4a663ab962704585fba0caef15061246
|
[
"Apache-2.0"
] | 15
|
2020-06-29T08:33:39.000Z
|
2022-02-12T00:28:51.000Z
|
qiskit/visualization/pulse_v2/device_info.py
|
godspeed5/qiskit-terra
|
a5d87c3e4a663ab962704585fba0caef15061246
|
[
"Apache-2.0"
] | 4
|
2020-11-27T09:34:13.000Z
|
2021-04-30T21:13:41.000Z
|
qiskit/visualization/pulse_v2/device_info.py
|
godspeed5/qiskit-terra
|
a5d87c3e4a663ab962704585fba0caef15061246
|
[
"Apache-2.0"
] | 11
|
2020-06-29T08:40:24.000Z
|
2022-02-24T17:39:16.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""A collection of backend information formatted to generate drawing data.
This instance will be provided to generator functions. The module provides an abstract
class :py:class:``DrawerBackendInfo`` with necessary methods to generate drawing objects.
Because the data structure of backend class may depend on providers, this abstract class
has an abstract factory method `create_from_backend`. Each subclass should provide
the factory method which conforms to the associated provider. By default we provide
:py:class:``OpenPulseBackendInfo`` class that has the factory method taking backends
satisfying OpenPulse specification [1].
This class can be also initialized without the factory method by manually specifying
required information. This may be convenient for visualizing a pulse program for simulator
backend that only has a device Hamiltonian information. This requires two mapping objects
for channel/qubit and channel/frequency along with the system cycle time.
If those information are not provided, this class will be initialized with a set of
empty data and the drawer illustrates a pulse program without any specific information.
Reference:
- [1] Qiskit Backend Specifications for OpenQASM and OpenPulse Experiments,
https://arxiv.org/abs/1809.03452
"""
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Dict, List, Union, Optional
from qiskit import pulse
from qiskit.providers import BaseBackend, BackendConfigurationError
class DrawerBackendInfo(ABC):
"""Backend information to be used for the drawing data generation."""
def __init__(self,
name: Optional[str] = None,
dt: Optional[float] = None,
channel_frequency_map: Optional[Dict[pulse.channels.Channel, float]] = None,
qubit_channel_map: Optional[Dict[int, List[pulse.channels.Channel]]] = None):
"""Create new backend information.
Args:
name: Name of the backend.
dt: System cycle time.
channel_frequency_map: Mapping of channel and associated frequency.
qubit_channel_map: Mapping of qubit and associated channels.
"""
self.backend_name = name or 'no-backend'
self._dt = dt
self._chan_freq_map = channel_frequency_map or dict()
self._qubit_channel_map = qubit_channel_map or dict()
@classmethod
@abstractmethod
def create_from_backend(cls, backend: BaseBackend):
"""Initialize a class with backend information provided by provider.
Args:
backend: Backend object.
"""
raise NotImplementedError
@property
def dt(self):
"""Return cycle time."""
return self._dt
def get_qubit_index(self, chan: pulse.channels.Channel) -> Union[int, None]:
"""Get associated qubit index of given channel object."""
for qind, chans in self._qubit_channel_map.items():
if chan in chans:
return qind
return chan.index
def get_channel_frequency(self, chan: pulse.channels.Channel) -> Union[float, None]:
"""Get frequency of given channel object."""
return self._chan_freq_map.get(chan, None)
class OpenPulseBackendInfo(DrawerBackendInfo):
"""Drawing information of backend that conforms to OpenPulse specification."""
@classmethod
def create_from_backend(cls, backend: BaseBackend):
"""Initialize a class with backend information provided by provider.
Args:
backend: Backend object.
Returns:
OpenPulseBackendInfo: New configured instance.
"""
configuration = backend.configuration()
defaults = backend.defaults()
# load name
name = backend.name()
# load cycle time
dt = configuration.dt
# load frequencies
chan_freqs = dict()
chan_freqs.update({pulse.DriveChannel(qind): freq
for qind, freq in enumerate(defaults.qubit_freq_est)})
chan_freqs.update({pulse.MeasureChannel(qind): freq
for qind, freq in enumerate(defaults.meas_freq_est)})
for qind, u_lo_mappers in enumerate(configuration.u_channel_lo):
temp_val = .0 + .0j
for u_lo_mapper in u_lo_mappers:
temp_val += defaults.qubit_freq_est[u_lo_mapper.q] * complex(*u_lo_mapper.scale)
chan_freqs[pulse.ControlChannel(qind)] = temp_val.real
# load qubit channel mapping
qubit_channel_map = defaultdict(list)
for qind in range(configuration.n_qubits):
qubit_channel_map[qind].append(configuration.drive(qubit=qind))
qubit_channel_map[qind].append(configuration.measure(qubit=qind))
for tind in range(configuration.n_qubits):
try:
qubit_channel_map[qind].extend(configuration.control(qubits=(qind, tind)))
except BackendConfigurationError:
pass
return OpenPulseBackendInfo(name=name,
dt=dt,
channel_frequency_map=chan_freqs,
qubit_channel_map=qubit_channel_map)
| 39.910345
| 96
| 0.681355
| 708
| 5,787
| 5.451977
| 0.333333
| 0.037306
| 0.042746
| 0.014767
| 0.148705
| 0.134715
| 0.082383
| 0.082383
| 0.062694
| 0.062694
| 0
| 0.004832
| 0.249006
| 5,787
| 144
| 97
| 40.1875
| 0.883341
| 0.444272
| 0
| 0.067797
| 0
| 0
| 0.003291
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101695
| false
| 0.016949
| 0.084746
| 0
| 0.305085
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ef3c67e54e013586a797d3526f9d748c2da9ba4
| 8,401
|
py
|
Python
|
django_gotolong/mfund/views.py
|
ParikhKadam/gotolong
|
839beb8aa37055a2078eaa289b8ae05b62e8905e
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 15
|
2019-12-06T16:19:45.000Z
|
2021-08-20T13:22:22.000Z
|
django_gotolong/mfund/views.py
|
ParikhKadam/gotolong
|
839beb8aa37055a2078eaa289b8ae05b62e8905e
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 14
|
2020-12-08T10:45:05.000Z
|
2021-09-21T17:23:45.000Z
|
django_gotolong/mfund/views.py
|
ParikhKadam/gotolong
|
839beb8aa37055a2078eaa289b8ae05b62e8905e
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 9
|
2020-01-01T03:04:29.000Z
|
2021-04-18T08:42:30.000Z
|
# Create your views here.
from .models import Mfund
import plotly.graph_objects as go
from plotly.offline import plot
from plotly.tools import make_subplots
from django.db.models import Q
from django.conf import settings
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic.list import ListView
from django.views import View
from django.db.models import OuterRef, Subquery, Count, Sum, Max, Min
from django.db.models.functions import Trim, Lower, Round
import pandas as pd
import csv, io
import openpyxl
from django.contrib import messages
from django.urls import reverse
from django.http import HttpResponseRedirect
from django_gotolong.lastrefd.models import Lastrefd, lastrefd_update
from django_gotolong.broker.icidir.imf.models import BrokerIcidirMf
def Mfund_url():
return "unused-mfund-refresh-url"
class MfundListView(ListView):
model = Mfund
# if pagination is desired
# paginate_by = 300
# filter_backends = [filters.OrderingFilter,]
# ordering_fields = ['sno', 'nse_symbol']
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id)
return queryset
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(MfundListView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_Amount(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id).order_by('-mf_nav_value')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_AMC(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
order_by('mf_amc', 'mf_category', 'mf_subcat', '-mf_nav_value')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_AMC_Amount(ListView):
model = Mfund
def get_queryset(self):
self.queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
values('mf_amc').annotate(scheme_sum=Sum('mf_nav_value')). \
exclude(scheme_sum=0.0).order_by('-scheme_sum')
print('hi ', self.queryset)
return self.queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
labels = []
values = []
labels_values_dict = {}
sum_total = 0
for q_row in self.queryset:
sum_total += q_row['scheme_sum']
labels_values_dict[q_row['mf_amc']] = q_row['scheme_sum']
context['sum_total'] = int(sum_total)
print('labels values dict', labels_values_dict)
for k, v in sorted(labels_values_dict.items(), key=lambda item: item[1]):
labels.append(k)
values.append(v)
print('labels ', labels)
print('values ', values)
fig = go.Figure(data=[go.Pie(labels=labels, values=values)])
fig.update_traces(textposition='inside', textinfo='percent+label')
# fig.show()
plot_div_1 = plot(fig, output_type='div', include_plotlyjs=False)
context['plot_div_1'] = plot_div_1
return context
class MfundListView_Category(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
order_by('mf_category', 'mf_subcat', '-mf_nav_value')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_Subcat(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
order_by('mf_subcat', '-mf_nav_value')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_Reco(ListView):
model = Mfund
def get_queryset(self):
queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
order_by('mf_research_reco', '-mf_rating')
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
refresh_url = Mfund_url()
context["refresh_url"] = refresh_url
return context
class MfundListView_SubcatAmount(ListView):
model = Mfund
def get_queryset(self):
self.queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \
values('mf_subcat').annotate(scheme_sum=Sum('mf_nav_value')). \
exclude(scheme_sum=0.0).order_by('-scheme_sum')
return self.queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
labels = []
values = []
labels_values_dict = {}
sum_total = 0
for q_row in self.queryset:
sum_total += q_row['scheme_sum']
labels_values_dict[q_row['mf_subcat']] = q_row['scheme_sum']
context['sum_total'] = int(sum_total)
print('labels values dict', labels_values_dict)
for k, v in sorted(labels_values_dict.items(), key=lambda item: item[1]):
labels.append(k)
values.append(v)
print('labels ', labels)
print('values ', values)
fig = go.Figure(data=[go.Pie(labels=labels, values=values)])
fig.update_traces(textposition='inside', textinfo='percent+label')
# fig.show()
plot_div_1 = plot(fig, output_type='div', include_plotlyjs=False)
context['plot_div_1'] = plot_div_1
return context
class MfundRefreshView(View):
debug_level = 1
def get(self, request):
self.mfund_refresh(request)
return HttpResponseRedirect(reverse("mfund-list"))
def __init__(self):
super(MfundRefreshView, self).__init__()
def mfund_refresh(self, request):
debug_level = 1
# declaring template
# first delete all existing mfund objects
Mfund.objects.all().filter(mf_user_id=request.user.id).delete()
max_id_instances = Mfund.objects.aggregate(max_id=Max('mf_id'))
max_mf_id = max_id_instances['max_id']
print('DS: found max id ', max_mf_id)
if max_mf_id is None:
max_mf_id = 0
print('max_mf_id ', max_mf_id)
unique_id = max_mf_id
for brec in BrokerIcidirMf.objects.all().filter(bim_user_id=request.user.id):
unique_id += 1
print(brec.bim_amc, brec.bim_name, brec.bim_category, brec.bim_subcat)
print(brec.bim_rating, brec.bim_units, brec.bim_cost_value, brec.bim_nav_value)
print(brec.bim_research_reco)
# skip 0 units
if int(float(brec.bim_units)) != 0:
_, created = Mfund.objects.update_or_create(
mf_id=unique_id,
mf_user_id=request.user.id,
mf_broker='icidir',
mf_amc=brec.bim_amc,
mf_name=brec.bim_name,
mf_category=brec.bim_category,
mf_subcat=brec.bim_subcat,
mf_rating=brec.bim_rating,
mf_cost_value=brec.bim_cost_value,
mf_nav_value=brec.bim_nav_value,
mf_research_reco=brec.bim_research_reco
)
# breakpoint()
# import pdb
# pdb.set_trace()
# Updated Gfundareco objects
lastrefd_update("mfund")
| 31.464419
| 104
| 0.644209
| 1,067
| 8,401
| 4.811621
| 0.170572
| 0.025711
| 0.043631
| 0.036813
| 0.625438
| 0.600701
| 0.588625
| 0.582976
| 0.580639
| 0.580639
| 0
| 0.003638
| 0.247352
| 8,401
| 266
| 105
| 31.582707
| 0.808319
| 0.037019
| 0
| 0.565217
| 0
| 0
| 0.06774
| 0.002972
| 0
| 0
| 0
| 0
| 0
| 1
| 0.11413
| false
| 0
| 0.11413
| 0.01087
| 0.429348
| 0.065217
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ef63c39ffdfa491eb48d1233a4ab5b8fb12a49a
| 5,444
|
py
|
Python
|
m3u8.py
|
akria00/m3u8-Downloader-master
|
37bf4683b0390998a819d0bb5b8af18ffb2166f6
|
[
"Apache-2.0"
] | 2
|
2020-01-10T20:31:12.000Z
|
2020-03-04T19:34:15.000Z
|
m3u8.py
|
akria00/m3u8-Downloader-master
|
37bf4683b0390998a819d0bb5b8af18ffb2166f6
|
[
"Apache-2.0"
] | null | null | null |
m3u8.py
|
akria00/m3u8-Downloader-master
|
37bf4683b0390998a819d0bb5b8af18ffb2166f6
|
[
"Apache-2.0"
] | 1
|
2019-04-19T08:04:05.000Z
|
2019-04-19T08:04:05.000Z
|
#coding: utf-8
from gevent import monkey
monkey.patch_all()
from gevent.pool import Pool
import gevent
import requests
import urllib
import os
import time
import re
import ssl
class Downloader:
def __init__(self, pool_size, retry=3):
self.pool = Pool(pool_size)
self.session = self._get_http_session(pool_size, pool_size, retry)
self.retry = retry
self.dir = ''
self.succed = {}
self.failed = []
self.ts_total = 0
def _get_http_session(self, pool_connections, pool_maxsize, max_retries):
session = requests.Session()
adapter = requests.adapters.HTTPAdapter(pool_connections=pool_connections, pool_maxsize=pool_maxsize, max_retries=max_retries)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def run(self, m3u8_url, dir='',moreTs=False):
self.dir = dir
if self.dir and not os.path.isdir(self.dir):
os.makedirs(self.dir)
r = self.session.get(m3u8_url, timeout=10)
if r.ok:
body = r.content
if body:
ssl._create_default_https_context = ssl._create_unverified_context
ts_list = [urllib.parse.urljoin(m3u8_url, n.strip()) for n in str(body, encoding = "utf8").split('\n') if n and not n.startswith("#")]
if moreTs:
ts_list = self.getMoreTsList(ts_list)
ts_list = list(zip(ts_list, [n for n in range(len(list(ts_list)))]))
if ts_list:
self.ts_total = len(ts_list)
print(self.ts_total)
g1 = gevent.spawn(self._join_file)
self._download(ts_list)
g1.join()
else:
print( r.status_code)
def _download(self, ts_list):
self.pool.map(self._worker, ts_list)
if self.failed:
ts_list = self.failed
self.failed = []
self._download(ts_list)
def _worker(self, ts_tuple):
url = ts_tuple[0]
index = ts_tuple[1]
retry = self.retry
while retry:
try:
r = self.session.get(url, timeout=20)
if r.ok:
file_name = url.split('/')[-1].split('?')[0]
print( file_name)
with open(os.path.join(self.dir, file_name), 'wb') as f:
f.write(r.content)
self.succed[index] = file_name
return
except:
retry -= 1
print ('[FAIL]%s' % url)
self.failed.append((url, index))
def _join_file(self):
index = 0
outfile = ''
while index < self.ts_total:
file_name = self.succed.get(index, '')
if file_name:
infile = open(os.path.join(self.dir, file_name), 'rb')
if not outfile:
outfile = open(os.path.join(self.dir, file_name.split('.')[0]+'_all.'+file_name.split('.')[-1]), 'wb')
outfile.write(infile.read())
infile.close()
os.remove(os.path.join(self.dir, file_name))
index += 1
else:
time.sleep(1)
if outfile:
outfile.close()
def getMoreTsList(self,ts_list):
headers = {'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'upgrade-insecure-requests':1,
'scheme':'https'
}
retry = self.retry
isOk = False
lastTs = ts_list[-1]
pattern = re.compile(r'(\d+\.?\d)\.ts')
tsNum = '{:0>3}'.format(int(pattern.findall(lastTs)[0]) + 1 )
nextTs = re.sub(pattern,str(tsNum),lastTs,1) + ".ts"
req = urllib.request.Request(url=nextTs,headers=headers,method='GET')
l = r = int(tsNum)
maxTs = 0
while retry or isOk:
try:
isOk = urllib.request.urlopen(req).status==200
if isOk:
retry = 3
l = r + 1
r = l + 100 if maxTs < r else maxTs - int((maxTs-l)/2)
nextTs = re.sub(pattern,'{:0>3}'.format(r),lastTs,1) + ".ts"
req = urllib.request.Request(url=nextTs,headers=headers,method='GET')
else:
r = r - int((r-l)/2)
except :
if int((r-l)/2) == 0:
for i in range(int(tsNum) , r):
ts_list.append(re.sub(pattern,'{:0>3}'.format(i),lastTs,1) + ".ts")
return ts_list
maxTs = r
r = r - int((r-l)/2)
nextTs = re.sub(pattern,'{:0>3}'.format(r),lastTs,1) + ".ts"
req = urllib.request.Request(url=nextTs,headers=headers,method='GET')
retry -= 1
isOk = False
return ts_list
if __name__ == '__main__':
downloader = Downloader(5)
downloader.run('https://www.xiaodianying.com/filets/2069/dp.m3u8', './video',True)
| 38.609929
| 171
| 0.517083
| 675
| 5,444
| 4.038519
| 0.275556
| 0.039618
| 0.016141
| 0.020543
| 0.145268
| 0.145268
| 0.132795
| 0.123624
| 0.091709
| 0.091709
| 0
| 0.026323
| 0.351029
| 5,444
| 140
| 172
| 38.885714
| 0.745259
| 0.002388
| 0
| 0.215385
| 0
| 0.015385
| 0.092081
| 0.020258
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053846
| false
| 0
| 0.069231
| 0
| 0.161538
| 0.030769
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ef7742a3f6f5d085c7065159824fcf2edcb86c7
| 5,910
|
py
|
Python
|
src/dsrlib/ui/utils.py
|
fraca7/dsremap
|
fb8f4fb13e74b512ed0cac05387fbe9694faebcf
|
[
"MIT"
] | 8
|
2020-09-06T02:15:10.000Z
|
2022-01-12T22:49:20.000Z
|
src/dsrlib/ui/utils.py
|
fraca7/dsremap
|
fb8f4fb13e74b512ed0cac05387fbe9694faebcf
|
[
"MIT"
] | 5
|
2021-03-29T20:37:46.000Z
|
2021-09-19T13:20:24.000Z
|
src/dsrlib/ui/utils.py
|
fraca7/dsremap
|
fb8f4fb13e74b512ed0cac05387fbe9694faebcf
|
[
"MIT"
] | 2
|
2020-09-16T01:45:49.000Z
|
2021-06-12T12:38:15.000Z
|
#!/usr/bin/env python3
import os
import contextlib
from PyQt5 import QtCore, QtWidgets
from dsrlib.settings import Settings
class LayoutBuilder:
def __init__(self, target):
self.target = target
self._stack = []
@contextlib.contextmanager
def _layout(self, cls, *args, **kwargs):
layout = cls()
self._stack.append(layout)
try:
yield layout
finally:
self._pop(*args, **kwargs)
def _pop(self, *args, **kwargs):
layout = self._stack.pop()
if self._stack:
parent = self._stack[-1]
if isinstance(layout, QtWidgets.QSplitter):
parent.addWidget(layout)
else:
if isinstance(parent, QtWidgets.QSplitter):
container = QtWidgets.QWidget(parent)
container.setLayout(layout)
parent.addWidget(container)
else:
parent.addLayout(layout, *args, **kwargs)
elif isinstance(self.target, QtWidgets.QMainWindow):
if isinstance(layout, QtWidgets.QSplitter):
self.target.setCentralWidget(layout)
else:
container = QtWidgets.QWidget(self.target)
container.setLayout(layout)
self.target.setCentralWidget(container)
else:
if isinstance(layout, QtWidgets.QSplitter):
layout2 = QtWidgets.QHBoxLayout()
layout2.setContentsMargins(0, 0, 0, 0)
layout2.addWidget(layout)
self.target.setLayout(layout2)
else:
self.target.setLayout(layout)
@contextlib.contextmanager
def hbox(self, *args, **kwargs): # pragma: no cover
with self._layout(QtWidgets.QHBoxLayout, *args, **kwargs) as layout:
layout.setContentsMargins(1, 1, 1, 1)
layout.setSpacing(1)
yield layout
@contextlib.contextmanager
def vbox(self, *args, **kwargs): # pragma: no cover
with self._layout(QtWidgets.QVBoxLayout, *args, **kwargs) as layout:
layout.setContentsMargins(1, 1, 1, 1)
layout.setSpacing(1)
yield layout
def stack(self, *args, **kwargs): # pragma: no cover
return self._layout(QtWidgets.QStackedLayout, *args, **kwargs)
def form(self, *args, **kwargs):
class _FormLayout(QtWidgets.QFormLayout):
def addLayout(self, layout):
self.addRow(layout)
def addRow(self, label, widget=None): # pylint: disable=C0111
if isinstance(label, str):
label = QtWidgets.QLabel(label)
label.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
label.setAlignment(QtCore.Qt.AlignVCenter)
if widget is None:
super().addRow(label)
else:
super().addRow(label, widget)
return self._layout(_FormLayout, *args, **kwargs)
def split(self, *args, **kwargs): # pragma: no cover
return self._layout(QtWidgets.QSplitter, *args, **kwargs)
def getSaveFilename(parent, domain, extension):
with Settings().grouped('Paths') as settings:
path = QtCore.QStandardPaths.writableLocation(QtCore.QStandardPaths.DocumentsLocation)
sname = 'save_%s' % domain
if settings.contains(sname):
path = settings.value(sname)
while True:
name, dummy = QtWidgets.QFileDialog.getSaveFileName(parent, _('Save'), path, '*.%s' % extension, options=QtWidgets.QFileDialog.DontConfirmOverwrite)
if not name:
return None
if not name.endswith('.%s' % extension):
name = '%s.%s' % (name, extension)
if os.path.exists(name):
resp = QtWidgets.QMessageBox.question(parent,
_('Overwrite file?'),
_('This file already exists. Overwrite?'),
QtWidgets.QMessageBox.Yes|QtWidgets.QMessageBox.No|QtWidgets.QMessageBox.Cancel)
if resp == QtWidgets.QMessageBox.Yes:
settings.setValue(sname, os.path.dirname(name))
return name
if resp == QtWidgets.QMessageBox.No:
continue
return None
settings.setValue(sname, os.path.dirname(name))
return name
def getOpenFilename(parent, domain, extension):
with Settings().grouped('Paths') as settings:
path = QtCore.QStandardPaths.writableLocation(QtCore.QStandardPaths.DocumentsLocation)
sname = 'open_%s' % domain
if settings.contains(sname):
path = settings.value(sname)
name, dummy = QtWidgets.QFileDialog.getOpenFileName(parent, _('Open file'), path, '*.%s' % extension if extension else '')
if name:
settings.setValue(sname, os.path.dirname(name))
return name
return None
class EnumComboBox(QtWidgets.QComboBox):
valueChanged = QtCore.pyqtSignal(object)
def __init__(self, *args, enum, value=None, **kwargs):
super().__init__(*args, **kwargs)
self._enum = enum
for item in enum:
self.addItem(enum.label(item), item)
if value is not None:
self.setValue(value)
self.currentIndexChanged.connect(self._emit)
def setValue(self, value):
for index, item in enumerate(self._enum):
if value == item:
self.setCurrentIndex(index)
break
else:
raise ValueError('Value "%s" not found in enum' % str(value))
def _emit(self, _):
self.valueChanged.emit(self.currentData())
| 37.884615
| 160
| 0.57445
| 565
| 5,910
| 5.941593
| 0.247788
| 0.044683
| 0.025022
| 0.023831
| 0.292523
| 0.260352
| 0.260352
| 0.260352
| 0.260352
| 0.217456
| 0
| 0.006248
| 0.323012
| 5,910
| 155
| 161
| 38.129032
| 0.832792
| 0.018782
| 0
| 0.302326
| 0
| 0
| 0.022786
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116279
| false
| 0
| 0.031008
| 0.015504
| 0.248062
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ef7cab0d5cd63afd5bc70bd0539a8ffbacf39c0
| 37,201
|
py
|
Python
|
src/tiden/tidenrunner.py
|
mshonichev/example_pkg
|
556a703fe8ea4a7737b8cae9c5d4d19c1397a70b
|
[
"Apache-2.0"
] | null | null | null |
src/tiden/tidenrunner.py
|
mshonichev/example_pkg
|
556a703fe8ea4a7737b8cae9c5d4d19c1397a70b
|
[
"Apache-2.0"
] | null | null | null |
src/tiden/tidenrunner.py
|
mshonichev/example_pkg
|
556a703fe8ea4a7737b8cae9c5d4d19c1397a70b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright 2017-2020 GridGain Systems.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .tidenpluginmanager import PluginManager
from .report.steps import step, InnerReportConfig, Step, add_attachment, AttachmentType
from .util import log_print, unix_path, call_method, create_case, kill_stalled_java, exec_time
from .result import Result
from .util import write_yaml_file, should_be_skipped
from .logger import *
from .runner import get_test_modules, get_long_path_len, get_class_from_module, known_issue_str
from .priority_decorator import get_priority_key
from .sshpool import SshPool
from uuid import uuid4
from traceback import format_exc
from .runner import set_configuration_options, get_configuration_representation, get_actual_configuration
from importlib import import_module
from os import path, mkdir
from time import time
from shutil import copyfile
from os.path import join, basename
from glob import glob
import traceback
class TidenTestPlan:
all_tests = None
skipped_tests = None
tests_to_execute = None
def __init__(self):
self.all_tests = {}
self.skipped_tests = []
self.tests_to_execute = []
def update(self, other):
self.all_tests.update(other.all_tests)
self.skipped_tests.extend(other.skipped_tests)
self.tests_to_execute.extend(other.tests_to_execute)
class TidenRunner:
# {
# '<suite_name>.<test_file_name>': {
# 'path': <full-path-to-test-file>,
# 'module_short_name': <test_file_name>,
# }
# }
modules = None
# Tiden config dictionary
config = None
# Tiden SshPool instance
ssh_pool = None
# Tiden PluginManager instance
pm = None
# longest length of the test name
long_path_len = 0
# instance of Result class
result = None
# current test module, a key to self.modules dictionary
test_module = None
# == TidenTestPlan for all modules:
total = None
# dictionary of TidenTestPlan indexed by test module name
test_plan = {}
# == for current test module:
# a short name of test module, e.g. test module file name without .py extension
module_short_name = None
# a name of module' test class
test_class_name = None
# instance of current module' test case class
test_class = None
# == for current test within module:
# test name, with all configuration options
current_test_name = None
# test method name only
current_test_method = None
def __init__(self, config, **kwargs):
if kwargs.get('modules', None) is not None:
self.modules = kwargs.get('modules')
else:
self.modules = get_test_modules(config, collect_only=kwargs.get('collect_only'))
self.config = config
self.long_path_len = get_long_path_len(self.modules)
xunit_path_var = None
if kwargs.get('xunit_path'):
xunit_path_var = kwargs.get('xunit_path')
elif config.get('var_dir') and config.get('xunit_file'):
xunit_path_var = join(config.get('var_dir'), config.get('xunit_file'))
self.result = Result(xunit_path=xunit_path_var)
self.ssh_pool: SshPool = kwargs.get('ssh_pool')
self.pm: PluginManager = kwargs.get('plugin_manager')
def collect_tests(self):
"""
Collect tests from all modules.
"""
log_print("*** Collecting tests ***", color='blue')
long_path_len = get_long_path_len(self.modules)
from tiden.sshpool import AbstractSshPool
self.ssh_pool = AbstractSshPool({'hosts': []})
def empty_init(self, config, ssh_pool):
self.config = config
self.ssh = ssh_pool
self.__prepare_session_vars()
for test_module in sorted(self.modules.keys()):
# cleanup instance vars
self.test_plan[test_module] = TidenTestPlan()
self.__prepare_module_vars(test_module, fake_init=empty_init)
self.__print_current_module_name()
test_method_names = sorted(list(self.gen_tests(self.test_class)))
self.create_test_module_attr_yaml(test_method_names)
self.collect_tests0(test_method_names)
self.total.update(self.test_plan[test_module])
log_print("*** Found %s tests. %s skipped. Going to 'run' %s tests ***" % (
len(self.total.all_tests),
len(self.total.skipped_tests),
len(self.total.tests_to_execute)
), color='blue')
test_cnt = 0
# Skipped tests do not hit collect report
# Now generate results for 'executed' tests
for test_module in sorted(self.modules.keys()):
self.__prepare_module_vars(test_module, fake_init=empty_init)
test_plan = self.test_plan[self.test_module]
for test_name in sorted(test_plan.tests_to_execute):
test_param = test_plan.all_tests[test_name]
self.__prepare_test_vars(**test_param)
test_cnt = test_cnt + 1
self.result.start_testcase(self.test_class, self.current_test_name)
self.__print_found_test_method_to_execute(long_path_len, test_cnt, test_module)
self.result.stop_testcase('pass')
def process_tests(self):
"""
Run all tests
:return:
"""
log_print("*** Tests ***", color='blue')
self.__prepare_session_vars()
# Check requirements for applications
for test_module in sorted(self.modules.keys()):
module = import_module("suites.%s" % test_module)
test_class_name = get_class_from_module(self.modules[test_module]['module_short_name'])
test_class = getattr(module, test_class_name)(self.config, self.ssh_pool)
if hasattr(test_class, 'check_requirements'):
test_class.check_requirements()
for test_module in sorted(self.modules.keys()):
# cleanup instance vars
self.test_plan[test_module] = TidenTestPlan()
self.__prepare_module_vars(test_module)
# find test methods:
if hasattr(self.test_class, '__configurations__'):
cfg_options = getattr(self.test_class, '__configuration_options__')
configuration = get_actual_configuration(self.config, cfg_options)
log_print("Configuration options for %s:\n%s" % (self.test_class.__class__.__name__,
'\n'.join([
'\t' + cfg_option_name + '=' + str(
configuration[i])
for i, cfg_option_name in enumerate(cfg_options)
])),
color='blue')
else:
cfg_options = None
configuration = None
test_method_names = list(self.gen_tests(self.test_class))
self.collect_tests1(test_method_names, common_test_param={
'configuration': configuration,
'cfg_options': cfg_options,
})
test_plan = self.test_plan[self.test_module]
if len(test_plan.skipped_tests) > 0:
self._skip_tests()
if len(test_plan.tests_to_execute) > 0:
tests_to_execute = sorted(test_plan.tests_to_execute, key=get_priority_key(self.test_class))
log_print("*** Found %s tests in %s. %s skipped. Going to run %s tests ***\n%s" % (
len(test_plan.all_tests), self.test_class_name, len(test_plan.skipped_tests),
len(test_plan.tests_to_execute),
'\n'.join([
test_plan.all_tests[test_name]['test_method_name']
for test_name in tests_to_execute
])),
color='blue')
# Execute module setup
setup_passed = self.__call_module_setup_teardown('setup')
if setup_passed:
self._run_tests(tests_to_execute)
# Execute module teardown
self.__call_module_setup_teardown('teardown')
# this is for correct fail in Jenkins
if not setup_passed:
exit(1)
def create_test_module_attr_yaml(self, test_method_names):
# create attr.yaml
for current_test_name in test_method_names:
test_function = getattr(self.test_class, current_test_name)
create_case(test_function)
def __prepare_session_vars(self):
self.test_plan = {}
self.total = TidenTestPlan()
def __prepare_module_vars(self, module_name, fake_init=None):
"""
Prepare per-module initialization of internal variables:
Expects self.test_module be set to proper full name of module under 'suites' directory
sets up
self.test_class_name
self.module_short_name
self.test_class - creates instance of test case class
resets
self.all_tests, self.tests_to_execute, self.skipped_tests
config
fills in config['rt'], config['rt']['remote']
Creates test module working local and remote directories.
Copies resources from suite directory to local test module working directory.
:param module_name: name of the module to prepare
:param fake_init: do not init module
:return:
"""
self.test_module = module_name
# fill new module vars
self.module_short_name = self.modules[self.test_module]['module_short_name']
test_module_dir = "%s/%s" % (self.config['suite_var_dir'], self.module_short_name)
remote_test_module_dir = "%s/%s" % (self.config['remote']['suite_var_dir'], self.module_short_name)
self.test_class_name = get_class_from_module(self.module_short_name)
# Update Tiden config
self.config['rt'] = {
'test_class': self.test_class_name,
'test_method': None,
'test_module': self.test_module,
'test_module_name': self.module_short_name,
'test_module_dir': test_module_dir,
'remote': {
'test_module_dir': remote_test_module_dir,
}
}
module = import_module("suites.%s" % self.test_module)
# used for collect_only
if fake_init:
self.test_class = getattr(module, self.test_class_name)
self.test_class.__init__ = fake_init
self.test_class = getattr(module, self.test_class_name)(self.config, self.ssh_pool)
else:
# for process tests - prepare test directory and resources
self.__create_test_module_directory(remote_test_module_dir, test_module_dir)
self.test_class = getattr(module, self.test_class_name)(self.config, self.ssh_pool)
if hasattr(self.test_class, 'tiden'):
self.__copy_resources_to_local_test_module_directory()
# Set ssh and config apps model classes
self.test_class.tiden.config = self.config
self.test_class.tiden.ssh = self.ssh_pool
self.test_class.config = self.config
self.test_class.ssh = self.ssh_pool
self._save_config()
def __prepare_test_vars(self, test_method_name=None, configuration=None, cfg_options=None, **kwargs):
if not test_method_name:
return
self.test_iteration = 1
self.current_test_method = test_method_name
if hasattr(self.test_class, '__configurations__'):
if cfg_options is None:
cfg_options = getattr(self.test_class, '__configuration_options__')
if configuration is None:
configuration = get_actual_configuration(self.config, cfg_options)
configuration_representation = get_configuration_representation(cfg_options, configuration)
self.current_test_name = self.current_test_method + configuration_representation
else:
self.current_test_name = self.current_test_method
def collect_test0(self):
# collect test params
test_params = {
'test_name': self.current_test_name,
}
test_function = getattr(self.test_class, self.current_test_method)
# first setup fixture
if hasattr(test_function, "__setup__"):
setup_fixture = getattr(test_function, "__setup__")
if type(setup_fixture) == type(''):
setup_method = getattr(self.test_class, setup_fixture)
else:
setup_method = setup_fixture
test_params['setup_test_params'] = True
test_params['setup_test_method'] = setup_method
# next, teardown fixture
if hasattr(test_function, "__teardown__"):
teardown_fixture = getattr(test_function, "__teardown__")
teardown_method = getattr(self.test_class, teardown_fixture)
test_params['teardown_test_method'] = teardown_method
# don't forget known issues
if hasattr(test_function, "__known_issues__"):
known_issue = getattr(test_function, "__known_issues__")
test_params['known_issue'] = known_issue
# test by default runs only once,
# unless repeated_test_count set explicitly by decorator or framework option
repeat_count = 1
# here, we check --to=repeated_test=N and --to=repeated_test.test_name=N options
# and decorate test with @repeated_test automagically if that's required
if self.config.get('repeated_test'):
repeated_test_option = self.config['repeated_test']
re_decorate = False
if type({}) != type(repeated_test_option):
# if option was given as --to=repeated_test=N, re-decorate all tests
re_decorate = True
repeat_count = int(repeated_test_option)
elif self.current_test_method in repeated_test_option.keys():
# otherwise re-decorate only if test name matches given option
re_decorate = True
repeat_count = int(repeated_test_option[self.current_test_method])
if re_decorate:
from tiden.util import repeated_test
original_test = test_function
if hasattr(original_test, 'repeated_test_name'):
# that test was previously decorated by @repeated_test, extract original test_names
original_names = original_test.repeated_test_name
decorated_test = repeated_test(repeat_count,
test_names=original_names)(original_test.__func__)
else:
# that's a brand new decoration
decorated_test = repeated_test(repeat_count)(original_test.__func__)
# this magic required to convert decorated test function to method of a test class
from types import MethodType
setattr(self.test_class, self.current_test_method, MethodType(decorated_test, self.test_class))
test_function = getattr(self.test_class, self.current_test_method)
if hasattr(test_function, 'repeated_test_count'):
repeat_count = test_function.repeated_test_count
repeated_test_name = test_function.repeated_test_name
test_params['repeated_test_count'] = repeat_count
test_params['repeated_test_name'] = repeated_test_name
test_params['continue_on_fail'] = self.config.get('repeated_test_continue_on_fail', False)
return test_params
def _skip_tests(self):
test_plan = self.test_plan[self.test_module]
skipped_tests = sorted(test_plan.skipped_tests)
try:
for current_test in skipped_tests:
test_param = test_plan.all_tests[current_test]
self.__prepare_test_vars(**test_param)
pad_string = self.__get_pad_string(msg=self.current_test_method)
self.result.skip_testcase_no_start(self.test_class, self.current_test_name,
skip_message=test_param['skip_msg'],
skip_no_start=test_param['skip_no_start'])
self.result.update_xunit()
log_print("%s %s" % (pad_string, test_param['skip_msg']), color='yellow')
finally:
self.current_test_name = None
self.current_test_method = None
def _run_tests(self, tests_to_execute):
test_plan = self.test_plan[self.test_module]
try:
for test_cnt, current_test in enumerate(tests_to_execute, start=1):
test_param = test_plan.all_tests[current_test]
self.__prepare_test_vars(**test_param)
repeated_test_count = test_param.get('repeated_test_count', 1)
repeated_test_continue_on_fail = test_param.get('continue_on_fail')
test_with_iterations = True if repeated_test_count > 1 else False
pad_string = self.__get_pad_string()
log_print("%s started (%s from %s)" % (pad_string, test_cnt, len(tests_to_execute)), color='yellow')
for self.test_iteration in range(repeated_test_count):
if test_with_iterations:
log_print("{} started (iteration {} from {})".format(pad_string,
self.test_iteration + 1,
repeated_test_count), color='yellow')
test_status = self._run_test()
if test_with_iterations and test_status != 'pass' and not repeated_test_continue_on_fail:
self.result.update_test_name('{}_iteration_{}'.format(current_test, self.test_iteration + 1))
break
finally:
self.current_test_name = None
self.current_test_method = None
def _run_test(self):
setattr(self, '_secret_report_storage', InnerReportConfig())
test_exception = None
tb_msg = None
test_status = 'pass'
pad_string = self.__get_pad_string()
started = int(time())
known_issue = self.test_plan[self.test_module].all_tests[self.current_test_name].get('known_issue')
setattr(self.test_class, '_secret_report_storage', InnerReportConfig())
try:
self.pm.do("before_test_method",
test_module=self.test_module,
test_name=self.current_test_name,
artifacts=self.config.get('artifacts', {}))
self.result.start_testcase(self.test_class, self.current_test_name)
self.__update_config_and_save(current_method_name=self.current_test_name)
# Execute test setup method
self.__call_test_setup_teardown('setup')
# self.__print_with_format()
with Step(self, 'Execution'):
try:
call_method(self.test_class, self.current_test_method)
finally:
self.__set_child_steps_to_parent()
self.__save_logs()
log_print(f"{pad_string} passed {exec_time(started)}", color='green')
except (AssertionError, TidenException) as e:
test_status = 'fail'
test_exception = e
tb_msg = traceback.format_exc()
except Exception as e:
test_status = 'error'
test_exception = e
tb_msg = traceback.format_exc()
finally:
if test_status != 'pass':
log_print(tb_msg, color='red')
log_print("{} {} {}{}".format(pad_string,
test_status,
exec_time(started),
known_issue_str(known_issue)),
color='red')
self.result.stop_testcase(
test_status,
e=test_exception,
tb=tb_msg,
known_issue=known_issue,
run_info=self.test_class.get_run_info() if hasattr(self.test_class, 'get_run_info') else None
)
# Execute test teardown method
self.__call_test_setup_teardown('teardown')
self.pm.do('after_test_method',
test_status=test_status,
exception=test_exception,
stacktrace=tb_msg,
known_issue=known_issue,
description=getattr(self.test_class, self.current_test_method, lambda: None).__doc__,
inner_report_config=getattr(self, '_secret_report_storage'))
# Kill java process if teardown function didn't kill nodes
if not hasattr(self.test_class, 'keep_ignite_between_tests'):
kill_stalled_java(self.ssh_pool)
return test_status
@step('logs')
def __save_logs(self):
test_dir = self.config.get('rt', {}).get('remote', {}).get('test_dir')
if 'WardReport' in self.config.get('plugins', []):
report_config = self.config['plugins']['WardReport']
files_receiver_url = report_config['files_url']
upload_logs = report_config['upload_logs']
else:
return
if test_dir:
try:
for host_ip, output_lines in self.ssh_pool.exec([f"ls {test_dir}"]).items():
with Step(self, host_ip):
for line in output_lines:
file_name: str
for file_name in line.split('\n'):
if file_name and file_name.endswith('.log'):
send_file_name = f'{uuid4()}_{file_name}'
add_attachment(self, file_name, send_file_name, AttachmentType.FILE)
if upload_logs:
cmd = f'cd {test_dir}; ' \
f'curl -H "filename: {send_file_name}" ' \
f'-F "file=@{file_name};filename={file_name}" ' \
f'{files_receiver_url}/files/add'
self.ssh_pool.exec_on_host(host_ip, [cmd])
except:
log_print(f'Failed to send report. \n{format_exc()}', color='pink')
def __copy_resources_to_local_test_module_directory(self):
"""
Copy resources in test resource directory
:return:
"""
test_resource_dir = "%s/res" % self.config['rt']['test_module_dir']
if not path.exists(test_resource_dir):
mkdir(test_resource_dir)
self.config['rt']['resource_dir'] = "%s/res/%s" % (self.config['suite_dir'], self.module_short_name[5:])
for file in glob("%s/*" % self.config['rt']['resource_dir']):
if path.isfile(file):
copyfile(file, f"{test_resource_dir}/{basename(file)}")
self.config['rt']['test_resource_dir'] = unix_path(test_resource_dir)
def __create_test_module_directory(self, remote_test_module_dir, test_module_dir):
mkdir(test_module_dir)
self.ssh_pool.exec([f'mkdir -p {remote_test_module_dir}'])
@step('{method_name}')
def __call_test_setup_teardown(self, method_name):
method_to_execute = None
try:
self._call_plugin_manager(f'before_test_method_{method_name}')
all_tests = self.test_plan[self.test_module].all_tests
if all_tests[self.current_test_name].get(f'{method_name}_test_method'):
method_to_execute = all_tests[self.current_test_name].get(f'{method_name}_test_method')
self.__print_with_format(msg=str(method_to_execute.__name__))
try:
if all_tests[self.current_test_name].get(f'{method_name}_test_params'):
method_to_execute(self.test_class)
else:
method_to_execute()
except Exception as e:
log_print(f'!!! Exception in {method_name} code !!!', color='red')
log_print(traceback.format_exc())
try:
self.__save_logs()
except:
log_print(f'Failed to get logs\n{traceback.format_exc()}', color='pink')
# if exception in setup method then re-raise the exception as we should fail the test
if method_name == 'setup':
raise e
finally:
self.__set_child_steps_to_parent()
self._call_plugin_manager(f'after_test_method_{method_name}')
def __set_child_steps_to_parent(self):
exec_report: InnerReportConfig = getattr(self.test_class, '_secret_report_storage', None)
test_report: InnerReportConfig = getattr(self, '_secret_report_storage')
idx_to_add = None
for idx, test_step in enumerate(test_report.steps):
if test_step['status'] is None:
idx_to_add = idx
break
test_report.steps[idx_to_add]['children'] = exec_report.steps + test_report.steps[idx_to_add].get('children', [])
title = getattr(getattr(self.test_class, self.current_test_method), '__report_title__', None)
suites = getattr(getattr(self.test_class, self.current_test_method), '__report_suites__', None)
if title:
test_report.title = title
test_report.suites = suites
setattr(self, '_secret_report_storage', test_report)
setattr(self.test_class, '_secret_report_storage', InnerReportConfig())
def __call_module_setup_teardown(self, fixture_name):
"""
Execute test module setup/teardown fixture.
:param fixture_name: either 'setup' or 'teardown'
:return:
"""
self._call_plugin_manager('before_test_class_%s' % fixture_name)
fixture_passed = True
try:
if hasattr(self.test_class, fixture_name):
started = time()
try:
self.__print_with_format('started', current_method_name=fixture_name)
self.__update_config_and_save(current_method_name=fixture_name)
# Execute setup or teardown method
call_method(self.test_class, fixture_name)
self.__print_with_format('finished in %s sec' % (int(time() - started)),
current_method_name=fixture_name)
# except (AssertionError, TidenException) as e:
except Exception as e:
fixture_passed = False
self.__print_with_format('failed in %s sec' % (int(time() - started)),
current_method_name=fixture_name)
log_print('Exception in %s.%s.%s: %s\n%s' %
(self.test_module, self.test_class_name, fixture_name,
str(e), str(traceback.format_exc())), color='red')
finally:
self._call_plugin_manager('after_test_class_%s' % fixture_name)
return fixture_passed
def _call_plugin_manager(self, execution_point):
args = [self.test_module, self.test_class]
if self.current_test_method:
args.append(self.current_test_method)
self.pm.do(execution_point, *args)
def __update_config_and_save(self, current_method_name=None):
test_method = current_method_name if current_method_name else self.current_test_method
test_method_name = test_method.split('(')[0] if '(' in test_method else test_method
test_dir_name = test_method_name
all_tests = self.test_plan[self.test_module].all_tests
# cause of repeated_tests decorator
if all_tests.get(test_method) and all_tests[test_method].get('repeated_test_name'):
test_dir_name = '{}_{}'.format(
test_method_name,
all_tests[test_method].get('repeated_test_name')[self.test_iteration])
self.config['rt']['test_method'] = test_method_name
self.config['rt']['remote']['test_dir'] = "{}/{}/{}".format(
self.config['rt']['remote']['test_module_dir'],
self.config['rt']['test_class'],
test_dir_name
)
self.config['rt']['test_dir'] = "{}/{}/{}".format(
self.config['rt']['test_module_dir'], self.config['rt']['test_class'], test_dir_name)
try:
create_remote_dir = [
'mkdir -p %s/%s/%s' % (self.config['rt']['remote']['test_module_dir'],
self.test_class_name, str(test_dir_name)),
'ln -sfn %s %s/current_test_directory' % (self.config['rt']['remote']['test_module_dir'],
self.config['environment']['home'])
]
self.ssh_pool.exec(create_remote_dir)
except Exception:
log_print("Can't create symlink to current test", color='red')
self._save_config()
def _check_test_for_skip(self):
attribs = []
skip_test = False
skip_msg = None
skip_no_start = False
test_function = getattr(self.test_class, self.current_test_method)
if hasattr(test_function, "__attrib__"):
attribs = getattr(test_function, "__attrib__")
attribs.append(str(self.current_test_method))
# if attr is passed to runner and test is not marked with one of the attribute
# then skip it.
if 'mute' in attribs:
skip_msg = 'skipped cause test is MUTED'
known_issue = None
if hasattr(test_function, "__known_issues__"):
known_issue = getattr(test_function, "__known_issues__")
if known_issue:
skip_msg = '{} cause of {}'.format(skip_msg, known_issue)
skip_test = True
skip_no_start = True
elif self.config.get('attrib') and should_be_skipped(self.config.get('attrib'), attribs,
self.config.get('attr_match', 'any')):
skip_msg = 'skipped cause of attrib mismatch'
skip_test = True
skip_no_start = True
if hasattr(test_function, "__skipped__"):
skip_msg = 'skipped cause of %s' % test_function.__skipped_message__
skip_test = True
if hasattr(test_function, "__skip_cond__"):
skip_condition = getattr(test_function, "__skip_cond__")
conditions_met, skip_message = skip_condition(self.config)
if not conditions_met:
skip_msg = 'skipped cause of %s' % skip_message
skip_test = True
if hasattr(test_function, "__skip_conds__") and \
len(test_function.__skip_conds__) > 0:
skip_conditions = test_function.__skip_conds__
for skip_condition in skip_conditions:
conditions_met, skip_message = skip_condition(self.test_class)
if not conditions_met:
skip_msg = 'skipped cause of %s' % skip_message
skip_test = True
return skip_test, skip_msg, skip_no_start
def get_tests_results(self):
return self.result
def _save_config(self):
write_yaml_file(self.config['config_path'], self.config)
@staticmethod
def gen_tests(test_class):
"""
Generates all test method of given test class
:param test_class:
:return:
"""
for class_attr in dir(test_class):
if class_attr.startswith('test_'):
yield class_attr
def collect_tests0(self, test_method_names):
"""
Collect given set of tests from test module for all configurations
:param test_method_names:
:return:
"""
if not hasattr(self.test_class, '__configurations__'):
self.collect_tests1(test_method_names)
else:
cfg_options = getattr(self.test_class, '__configuration_options__').copy()
configurations = getattr(self.test_class, '__configurations__').copy()
for configuration in configurations:
# set configuration options from given configuration to Tiden config,
# so that test can check options and skip itself
set_configuration_options(cfg_options, self.config, configuration)
self.collect_tests1(test_method_names, common_test_param={
'configuration': configuration,
'cfg_options': cfg_options,
})
def collect_tests1(self, test_method_names, common_test_param={}):
"""
Collect given tests from current test module
:param test_method_names:
:param common_test_param:
:return:
"""
try:
test_plan = self.test_plan[self.test_module]
for test_method_name in test_method_names:
self.__prepare_test_vars(test_method_name, **common_test_param)
test_param = {
'test_method_name': test_method_name,
}
is_skipped, skip_msg, skip_no_start = self._check_test_for_skip()
test_param.update(self.collect_test0())
repeat_count = test_param.get('repeated_test_count', 1)
if repeat_count > 0:
if repeat_count == 1:
# don't rename tests when only one iteration requested
test_param['repeated_test_name'] = []
else:
# rare case, skip by --to=repeated_test.test_name=0
is_skipped = True
skip_msg = 'skipped due to repeated_test iterations <= 0'
skip_no_start = False
if is_skipped:
test_param.update({
'skip_msg': skip_msg,
'skip_no_start': skip_no_start,
})
test_plan.skipped_tests.append(self.current_test_name)
else:
if common_test_param:
test_param.update(common_test_param)
test_plan.tests_to_execute.append(self.current_test_name)
test_plan.all_tests[self.current_test_name] = test_param.copy()
finally:
self.current_test_method = None
self.current_test_name = None
def __print_found_test_method_to_execute(self, long_path_len, test_cnt, test_module):
method_long_name = "%s.%s.%s " % (test_module, self.test_class_name, self.current_test_name)
pad_string = method_long_name.ljust(long_path_len, '.')
log_print("%s found (%s from %s)" % (pad_string, test_cnt, len(self.total.tests_to_execute)), color='yellow')
def __print_with_format(self, msg='', current_method_name=''):
if not current_method_name:
if self.current_test_method:
current_method_name = self.current_test_method
else:
current_method_name = ''
log_print("[{}][.{}.{}] {}".format(
datetime.now().isoformat()[11:-7],
self.test_class_name,
current_method_name,
msg))
def __print_current_module_name(self):
log_print("[%s][%s]" % (
datetime.now().isoformat()[11:-7], self.test_module))
def __get_pad_string(self, msg=None):
return ("%s.%s.%s " % (
self.test_module, self.test_class_name, msg if msg else self.current_test_method)) \
.ljust(self.long_path_len, '.')
| 42.858295
| 121
| 0.597403
| 4,315
| 37,201
| 4.774508
| 0.099421
| 0.038831
| 0.037229
| 0.024464
| 0.414135
| 0.308951
| 0.230172
| 0.186875
| 0.125133
| 0.107077
| 0
| 0.001962
| 0.314938
| 37,201
| 867
| 122
| 42.907728
| 0.806435
| 0.110938
| 0
| 0.234401
| 0
| 0.001686
| 0.098218
| 0.019257
| 0
| 0
| 0
| 0
| 0.001686
| 1
| 0.053963
| false
| 0.01855
| 0.040472
| 0.003373
| 0.139966
| 0.05059
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ef87bb853368bafa20ca953ac321175f6e8c5af
| 5,425
|
py
|
Python
|
ludwig/data/cache/manager.py
|
ludwig-ai/ludw
|
b9d95bbdb474bc22260269de1bc094bc5455f37c
|
[
"Apache-2.0"
] | 970
|
2020-12-17T15:09:20.000Z
|
2022-03-31T22:58:03.000Z
|
ludwig/data/cache/manager.py
|
ludwig-ai/ludw
|
b9d95bbdb474bc22260269de1bc094bc5455f37c
|
[
"Apache-2.0"
] | 503
|
2020-12-16T21:44:40.000Z
|
2022-03-31T18:21:52.000Z
|
ludwig/data/cache/manager.py
|
ludwig-ai/ludw
|
b9d95bbdb474bc22260269de1bc094bc5455f37c
|
[
"Apache-2.0"
] | 145
|
2020-12-18T07:38:30.000Z
|
2022-03-29T19:05:08.000Z
|
import logging
import os
import re
import uuid
from pathlib import Path
from ludwig.constants import CHECKSUM, META, TEST, TRAINING, VALIDATION
from ludwig.data.cache.util import calculate_checksum
from ludwig.utils import data_utils
from ludwig.utils.fs_utils import delete, path_exists
logger = logging.getLogger(__name__)
def alphanum(v):
"""Filters a string to only its alphanumeric characters."""
return re.sub(r"\W+", "", v)
class DatasetCache:
def __init__(self, config, checksum, cache_map, dataset_manager):
self.config = config
self.checksum = checksum
self.cache_map = cache_map
self.dataset_manager = dataset_manager
def get(self):
training_set_metadata_fp = self.cache_map[META]
if not path_exists(training_set_metadata_fp):
return None
cache_training_set_metadata = data_utils.load_json(training_set_metadata_fp)
cached_training_set = self.cache_map[TRAINING] if path_exists(self.cache_map[TRAINING]) else None
cached_test_set = self.cache_map[TEST] if path_exists(self.cache_map[TEST]) else None
cached_validation_set = self.cache_map[VALIDATION] if path_exists(self.cache_map[VALIDATION]) else None
valid = self.checksum == cache_training_set_metadata.get(CHECKSUM) and cached_training_set is not None
return valid, cache_training_set_metadata, cached_training_set, cached_test_set, cached_validation_set
def put(self, training_set, test_set, validation_set, training_set_metadata):
logger.info("Writing preprocessed training set cache")
training_set = self.dataset_manager.save(
self.cache_map[TRAINING],
training_set,
self.config,
training_set_metadata,
TRAINING,
)
if test_set is not None:
logger.info("Writing preprocessed test set cache")
test_set = self.dataset_manager.save(
self.cache_map[TEST],
test_set,
self.config,
training_set_metadata,
TEST,
)
if validation_set is not None:
logger.info("Writing preprocessed validation set cache")
validation_set = self.dataset_manager.save(
self.cache_map[VALIDATION],
validation_set,
self.config,
training_set_metadata,
VALIDATION,
)
logger.info("Writing train set metadata")
data_utils.save_json(self.cache_map[META], training_set_metadata)
return training_set, test_set, validation_set, training_set_metadata
def delete(self):
for fname in self.cache_map.values():
if path_exists(fname):
delete(fname)
class CacheManager:
def __init__(self, dataset_manager, cache_dir=None):
self._dataset_manager = dataset_manager
self._cache_dir = cache_dir
def get_dataset_cache(self, config, dataset=None, training_set=None, test_set=None, validation_set=None):
if dataset is not None:
key = self.get_cache_key(dataset, config)
cache_map = {
META: self.get_cache_path(dataset, key, META, "json"),
TRAINING: self.get_cache_path(dataset, key, TRAINING),
TEST: self.get_cache_path(dataset, key, TEST),
VALIDATION: self.get_cache_path(dataset, key, VALIDATION),
}
return DatasetCache(config, key, cache_map, self._dataset_manager)
else:
key = self.get_cache_key(training_set, config)
cache_map = {
META: self.get_cache_path(training_set, key, META, "json"),
TRAINING: self.get_cache_path(training_set, key, TRAINING),
TEST: self.get_cache_path(test_set, key, TEST),
VALIDATION: self.get_cache_path(validation_set, key, VALIDATION),
}
return DatasetCache(config, key, cache_map, self._dataset_manager)
def get_cache_key(self, dataset, config):
if not isinstance(dataset, str):
# TODO(travis): could try hashing the in-memory dataset, but this is tricky for Dask
return str(uuid.uuid1())
return calculate_checksum(dataset, config)
def get_cache_path(self, dataset, key, tag, ext=None):
if not isinstance(dataset, str):
dataset = None
if self._cache_dir is None and dataset is not None:
# Use the input dataset filename (minus the extension) as the cache path
stem = Path(dataset).stem
else:
# To avoid collisions across different directories, we use the unique checksum
# as the cache path
stem = alphanum(key)
ext = ext or self.data_format
cache_fname = f"{stem}.{tag}.{ext}"
return os.path.join(self.get_cache_directory(dataset), cache_fname)
def get_cache_directory(self, input_fname):
if self._cache_dir is None:
if input_fname is not None:
return os.path.dirname(input_fname)
return "."
return self._cache_dir
def can_cache(self, skip_save_processed_input):
return self._dataset_manager.can_cache(skip_save_processed_input)
@property
def data_format(self):
return self._dataset_manager.data_format
| 37.413793
| 111
| 0.650323
| 677
| 5,425
| 4.937962
| 0.171344
| 0.078971
| 0.046665
| 0.038289
| 0.341908
| 0.283877
| 0.213281
| 0.168412
| 0.069399
| 0.039485
| 0
| 0.000254
| 0.272995
| 5,425
| 144
| 112
| 37.673611
| 0.847363
| 0.055853
| 0
| 0.127273
| 0
| 0
| 0.033444
| 0
| 0
| 0
| 0
| 0.006944
| 0
| 1
| 0.109091
| false
| 0
| 0.081818
| 0.018182
| 0.336364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ef896d76fe90ca7521ad1e92767789c5b227b40
| 2,629
|
py
|
Python
|
test_calc_base.py
|
kshshkim/factorioCalcPy
|
2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205
|
[
"MIT"
] | 1
|
2021-09-21T01:42:05.000Z
|
2021-09-21T01:42:05.000Z
|
test_calc_base.py
|
kshshkim/factorioCalcPy
|
2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205
|
[
"MIT"
] | null | null | null |
test_calc_base.py
|
kshshkim/factorioCalcPy
|
2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205
|
[
"MIT"
] | null | null | null |
import pprint
from FactorioCalcBase.data.binary import sorted_recipe_list, production_machine_category_list_dict
from FactorioCalcBase.recipe import Recipe
from FactorioCalcBase.calculator_base import CalculatorBase
from FactorioCalcBase.dependency_dict_common_function import dict_add_number
import time
def test_change_machine(test_obj: CalculatorBase, target_recipe, failed_dict):
recipe_obj = Recipe(recipe_name=target_recipe)
cat = recipe_obj.get_category()
available_machine_list = production_machine_category_list_dict.get(cat)
failed_dict['method_failed']['change_machine_failed'] = {}
if len(available_machine_list) > 1:
for machine in available_machine_list:
test_obj.change_machine_to_specific_block(recipe_name=target_recipe,
machine_name=machine)
if test_obj.block_obj_dict['recipe']['machine_name'] != machine:
raise 'MachineNotChanged'
def test_calculator_base_methods(test_obj: CalculatorBase, failed_dict: dict):
recipe_list = list(test_obj.block_obj_dict['recipe'].keys())
for recipe in recipe_list:
try:
test_change_machine(test_obj, recipe, failed_dict)
except:
dict_add_number(failed_dict['method_failed']['change_machine_failed'], recipe, 1)
def test_calculator_base(failed_dict):
mrms = [0, 0.3]
pm = [None, ["assembling-machine-2", "stone-furnace", "burner-mining-drill"]]
uk = [True, False]
am = [1, 101.5]
failed_dict['init_failed'] = {}
failed_dict['method_failed'] = {
'change_machine_failed': {
}
}
for recipe in sorted_recipe_list:
for mining_research_modifier in mrms:
for preferred_machines in pm:
for use_kovarex in uk:
for amount in am:
try:
test_obj = CalculatorBase(recipe_name=recipe, amount=amount,
preferred_machine_list=preferred_machines,
use_kovarex=use_kovarex,
mining_research_modifier=mining_research_modifier)
except:
dict_add_number(failed_dict['init_failed'], key=recipe, val=1)
test_calculator_base_methods(test_obj, failed_dict)
pprint.pp(failed_dict)
return failed_dict
def run_test():
start_time = time.time()
test_calculator_base({})
print(f'finished in {time.time()-start_time}')
| 40.446154
| 104
| 0.637885
| 297
| 2,629
| 5.276094
| 0.276094
| 0.076579
| 0.045948
| 0.042119
| 0.259732
| 0.2291
| 0.078494
| 0
| 0
| 0
| 0
| 0.006369
| 0.283378
| 2,629
| 64
| 105
| 41.078125
| 0.825372
| 0
| 0
| 0.075472
| 0
| 0
| 0.096234
| 0.033092
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075472
| false
| 0
| 0.113208
| 0
| 0.207547
| 0.056604
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0ef9be0b4faecf741290076154fb3c5bae164853
| 6,546
|
py
|
Python
|
engine.py
|
nyumaya/wake-word-benchmark
|
d2f7ac091d31403f3398bc3ef2e2de4876a4629e
|
[
"Apache-2.0"
] | null | null | null |
engine.py
|
nyumaya/wake-word-benchmark
|
d2f7ac091d31403f3398bc3ef2e2de4876a4629e
|
[
"Apache-2.0"
] | null | null | null |
engine.py
|
nyumaya/wake-word-benchmark
|
d2f7ac091d31403f3398bc3ef2e2de4876a4629e
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018 Picovoice Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from collections import namedtuple
from enum import Enum
import numpy as np
from pocketsphinx import get_model_path
from pocketsphinx.pocketsphinx import Decoder
from engines import Porcupine
from engines import snowboydetect
from engines import AudioRecognition, FeatureExtractor
class Engines(Enum):
POCKET_SPHINX = 'PocketSphinx'
PORCUPINE = 'Porcupine'
SNOWBOY = 'Snowboy'
NYUMAYA = 'Nyumaya'
SensitivityInfo = namedtuple('SensitivityInfo', 'min, max, step')
class Engine(object):
def process(self, pcm):
raise NotImplementedError()
def release(self):
raise NotImplementedError()
def __str__(self):
raise NotImplementedError()
@staticmethod
def frame_length(engine_type):
if engine_type is Engines.NYUMAYA:
return 1600
else:
return 512
@staticmethod
def sensitivity_info(engine_type):
if engine_type is Engines.POCKET_SPHINX:
return SensitivityInfo(-21, 15, 3)
elif engine_type is Engines.PORCUPINE:
return SensitivityInfo(0, 1, 0.1)
elif engine_type is Engines.SNOWBOY:
return SensitivityInfo(0, 1, 0.05)
elif engine_type is Engines.NYUMAYA:
return SensitivityInfo(0, 1, 0.1)
else:
raise ValueError("no sensitivity range for '%s'", engine_type.value)
@staticmethod
def create(engine, keyword, sensitivity):
if engine is Engines.POCKET_SPHINX:
return PocketSphinxEngine(keyword, sensitivity)
elif engine is Engines.PORCUPINE:
return PorcupineEngine(keyword, sensitivity)
elif engine is Engines.SNOWBOY:
return SnowboyEngine(keyword, sensitivity)
elif engine is Engines.NYUMAYA:
return NyumayaEngine(keyword, sensitivity)
else:
ValueError("cannot create engine of type '%s'", engine.value)
class PocketSphinxEngine(Engine):
def __init__(self, keyword, sensitivity):
config = Decoder.default_config()
config.set_string('-logfn', '/dev/null')
config.set_string('-hmm', os.path.join(get_model_path(), 'en-us'))
config.set_string('-dict', os.path.join(get_model_path(), 'cmudict-en-us.dict'))
config.set_string('-keyphrase', keyword if keyword != 'snowboy' else 'snow boy')
config.set_float('-kws_threshold', 10 ** -sensitivity)
self._decoder = Decoder(config)
self._decoder.start_utt()
def process(self, pcm):
assert pcm.dtype == np.int16
self._decoder.process_raw(pcm.tobytes(), False, False)
detected = self._decoder.hyp()
if detected:
self._decoder.end_utt()
self._decoder.start_utt()
return detected
def release(self):
self._decoder.end_utt()
def __str__(self):
return 'PocketSphinx'
class PorcupineEngine(Engine):
def __init__(self, keyword, sensitivity):
self._porcupine = Porcupine(
library_path=os.path.join(self._repo_path, 'lib/linux/x86_64/libpv_porcupine.so'),
model_path=os.path.join(self._repo_path, 'lib/common/porcupine_params.pv'),
keyword_paths=[os.path.join(self._repo_path, 'resources/keyword_files/linux/%s_linux.ppn' % keyword.lower())],
sensitivities=[sensitivity])
def process(self, pcm):
assert pcm.dtype == np.int16
return self._porcupine.process(pcm) == 0
def release(self):
self._porcupine.delete()
def __str__(self):
return 'Porcupine'
@property
def _repo_path(self):
return os.path.join(os.path.dirname(__file__), 'engines/porcupine')
class SnowboyEngine(Engine):
def __init__(self, keyword, sensitivity):
keyword = keyword.lower()
if keyword == 'alexa':
model_relative_path = 'engines/snowboy/resources/alexa/alexa-avs-sample-app/alexa.umdl'
else:
model_relative_path = 'engines/snowboy/resources/models/%s.umdl' % keyword.replace(' ', '_')
model_str = os.path.join(os.path.dirname(__file__), model_relative_path).encode()
resource_filename = os.path.join(os.path.dirname(__file__), 'engines/snowboy/resources/common.res').encode()
self._snowboy = snowboydetect.SnowboyDetect(resource_filename=resource_filename, model_str=model_str)
# https://github.com/Kitt-AI/snowboy#pretrained-universal-models
if keyword == 'jarvis':
self._snowboy.SetSensitivity(('%f,%f' % (sensitivity, sensitivity)).encode())
else:
self._snowboy.SetSensitivity(str(sensitivity).encode())
if keyword in {'alexa', 'computer', 'jarvis', 'view glass'}:
self._snowboy.ApplyFrontend(True)
else:
self._snowboy.ApplyFrontend(False)
def process(self, pcm):
assert pcm.dtype == np.int16
return self._snowboy.RunDetection(pcm.tobytes()) == 1
def release(self):
pass
def __str__(self):
return 'Snowboy'
class NyumayaEngine(Engine):
def __init__(self, keyword, sensitivity):
#logging.info("INIT NYUMAYA")
keyword = keyword.lower()
model_relative_path = 'engines/nyumaya_audio_recognition/models/Hotword/%s_v1.0.0.premium' % keyword
model_str = os.path.join(os.path.dirname(__file__), model_relative_path)
libpath="engines/nyumaya_audio_recognition/lib/linux_x86_64/libnyumaya_premium.so.1.0.0"
self._extractor = FeatureExtractor(libpath)
self._detector = AudioRecognition(libpath)
keywordId = self._detector.addModel(model_str,sensitivity)
def process(self, pcm):
assert pcm.dtype == np.int16
#logging.info(len(pcm))
features = self._extractor.signalToMel(pcm.tobytes(),1.0)
return self._detector.runDetection(features) == 1
def release(self):
pass
def __str__(self):
return 'Nyumaya'
| 33.397959
| 122
| 0.669111
| 770
| 6,546
| 5.497403
| 0.292208
| 0.018427
| 0.021262
| 0.02008
| 0.274274
| 0.246161
| 0.131349
| 0.116702
| 0.086936
| 0.061422
| 0
| 0.012224
| 0.225176
| 6,546
| 195
| 123
| 33.569231
| 0.822358
| 0.101283
| 0
| 0.338346
| 0
| 0
| 0.119563
| 0.066519
| 0
| 0
| 0
| 0
| 0.030075
| 1
| 0.172932
| false
| 0.015038
| 0.067669
| 0.037594
| 0.458647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0efb8a4758e96798acb51aad7950963bd5e398c7
| 1,549
|
py
|
Python
|
objO_and_ctxMgr/harakiri.py
|
thirschbuechler/didactic-barnacles
|
88d0a2b572aacb2cb45e68bb4f05fa5273224439
|
[
"MIT"
] | null | null | null |
objO_and_ctxMgr/harakiri.py
|
thirschbuechler/didactic-barnacles
|
88d0a2b572aacb2cb45e68bb4f05fa5273224439
|
[
"MIT"
] | null | null | null |
objO_and_ctxMgr/harakiri.py
|
thirschbuechler/didactic-barnacles
|
88d0a2b572aacb2cb45e68bb4f05fa5273224439
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 20 22:18:58 2020
@author: https://stackoverflow.com/questions/293431/python-object-deleting-itself
@editor: thirschbuechler
this is probably overkill to alternatively exit a with-context, rather than by exception,
but hey, maybe it will be needed, or related to getting rid of the visa-handle within thvisa
# for some reason, __enter__ does not work in the with-context
"""
# NOTE: This is Python 3 code, it should work with python 2, but I haven't tested it.
import weakref #https://docs.python.org/3/library/weakref.html
class InsaneClass(object):
_alive = []
def __new__(cls): # there is a difference btw. cls and self, but i don't understand
self = super().__new__(cls)
InsaneClass._alive.append(self)
return weakref.proxy(self)
def commit_suicide(self):
self._alive.remove(self)
def __enter__(self):
print("enter says hello")
return self
def __init__(self):
pass
def __exit__(self, exc_type, exc_value, tb):# "with" context exit: call del
print("bye")
if __name__ == '__main__': # test if called as executable, not as library
instance = InsaneClass()
instance.__enter__()
instance.commit_suicide()
#print(instance)
print(InsaneClass) # pointer
print(InsaneClass().__enter__()) # an object
print("now, something completely different!")
with InsaneClass() as i:
i.commit_suicide()
print(i)
| 29.226415
| 92
| 0.666882
| 209
| 1,549
| 4.712919
| 0.61244
| 0.033503
| 0.036548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019247
| 0.228535
| 1,549
| 53
| 93
| 29.226415
| 0.805021
| 0.475791
| 0
| 0
| 0
| 0
| 0.079146
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.192308
| false
| 0.038462
| 0.038462
| 0
| 0.384615
| 0.230769
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0efbf67a5c5c854b7696ec4d515b55094ea51fb7
| 6,593
|
py
|
Python
|
chapter2/gestures.py
|
srimani-programmer/Opencv-with-Python-Blueprints-second-Edition
|
8762022a58a379229f02d7250d8344087d98516d
|
[
"MIT"
] | 39
|
2019-11-25T21:30:14.000Z
|
2022-03-29T05:12:43.000Z
|
chapter2/gestures.py
|
srimani-programmer/Opencv-with-Python-Blueprints-second-Edition
|
8762022a58a379229f02d7250d8344087d98516d
|
[
"MIT"
] | 2
|
2020-04-19T20:38:15.000Z
|
2021-09-29T05:02:48.000Z
|
chapter2/gestures.py
|
srimani-programmer/Opencv-with-Python-Blueprints-second-Edition
|
8762022a58a379229f02d7250d8344087d98516d
|
[
"MIT"
] | 29
|
2019-12-22T15:18:18.000Z
|
2021-12-25T13:52:44.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A module containing an algorithm for hand gesture recognition"""
import numpy as np
import cv2
from typing import Tuple
__author__ = "Michael Beyeler"
__license__ = "GNU GPL 3.0 or later"
def recognize(img_gray):
"""Recognizes hand gesture in a single-channel depth image
This method estimates the number of extended fingers based on
a single-channel depth image showing a hand and arm region.
:param img_gray: single-channel depth image
:returns: (num_fingers, img_draw) The estimated number of
extended fingers and an annotated RGB image
"""
# segment arm region
segment = segment_arm(img_gray)
# find the hull of the segmented area, and based on that find the
# convexity defects
(contour, defects) = find_hull_defects(segment)
# detect the number of fingers depending on the contours and convexity
# defects, then draw defects that belong to fingers green, others red
img_draw = cv2.cvtColor(segment, cv2.COLOR_GRAY2RGB)
(num_fingers, img_draw) = detect_num_fingers(contour,
defects, img_draw)
return (num_fingers, img_draw)
def segment_arm(frame: np.ndarray, abs_depth_dev: int = 14) -> np.ndarray:
"""Segments arm region
This method accepts a single-channel depth image of an arm and
hand region and extracts the segmented arm region.
It is assumed that the hand is placed in the center of the image.
:param frame: single-channel depth image
:returns: binary image (mask) of segmented arm region, where
arm=255, else=0
"""
height, width = frame.shape
# find center (21x21 pixel) region of imageheight frame
center_half = 10 # half-width of 21 is 21/2-1
center = frame[height // 2 - center_half:height // 2 + center_half,
width // 2 - center_half:width // 2 + center_half]
# find median depth value of center region
med_val = np.median(center)
# try this instead:
frame = np.where(abs(frame - med_val) <= abs_depth_dev,
128, 0).astype(np.uint8)
# morphological
kernel = np.ones((3, 3), np.uint8)
frame = cv2.morphologyEx(frame, cv2.MORPH_CLOSE, kernel)
# connected component
small_kernel = 3
frame[height // 2 - small_kernel:height // 2 + small_kernel,
width // 2 - small_kernel:width // 2 + small_kernel] = 128
mask = np.zeros((height + 2, width + 2), np.uint8)
flood = frame.copy()
cv2.floodFill(flood, mask, (width // 2, height // 2), 255,
flags=4 | (255 << 8))
ret, flooded = cv2.threshold(flood, 129, 255, cv2.THRESH_BINARY)
return flooded
def find_hull_defects(segment: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Find hull defects
This method finds all defects in the hull of a segmented arm
region.
:param segment: a binary image (mask) of a segmented arm region,
where arm=255, else=0
:returns: (max_contour, defects) the largest contour in the image
and all corresponding defects
"""
contours, hierarchy = cv2.findContours(segment, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
# find largest area contour
max_contour = max(contours, key=cv2.contourArea)
epsilon = 0.01 * cv2.arcLength(max_contour, True)
max_contour = cv2.approxPolyDP(max_contour, epsilon, True)
# find convexity hull and defects
hull = cv2.convexHull(max_contour, returnPoints=False)
defects = cv2.convexityDefects(max_contour, hull)
return max_contour, defects
def detect_num_fingers(contour: np.ndarray, defects: np.ndarray,
img_draw: np.ndarray, thresh_deg: float = 80.0) -> Tuple[int, np.ndarray]:
"""Detects the number of extended fingers
This method determines the number of extended fingers based on a
contour and convexity defects.
It will annotate an RGB color image of the segmented arm region
with all relevant defect points and the hull.
:param contours: a list of contours
:param defects: a list of convexity defects
:param img_draw: an RGB color image to be annotated
:returns: (num_fingers, img_draw) the estimated number of extended
fingers and an annotated RGB color image
"""
# if there are no convexity defects, possibly no hull found or no
# fingers extended
if defects is None:
return [0, img_draw]
# we assume the wrist will generate two convexity defects (one on each
# side), so if there are no additional defect points, there are no
# fingers extended
if len(defects) <= 2:
return [0, img_draw]
# if there is a sufficient amount of convexity defects, we will find a
# defect point between two fingers so to get the number of fingers,
# start counting at 1
num_fingers = 1
# Defects are of shape (num_defects,1,4)
for defect in defects[:, 0, :]:
# Each defect is an array of four integers.
# First three indexes of start, end and the furthest
# points respectively
# contour is of shape (num_points,1,2) - 2 for point coordinates
start, end, far = [contour[i][0] for i in defect[:3]]
# draw the hull
cv2.line(img_draw, tuple(start), tuple(end), (0, 255, 0), 2)
# if angle is below a threshold, defect point belongs to two
# extended fingers
if angle_rad(start - far, end - far) < deg2rad(thresh_deg):
# increment number of fingers
num_fingers += 1
# draw point as green
cv2.circle(img_draw, tuple(far), 5, (0, 255, 0), -1)
else:
# draw point as red
cv2.circle(img_draw, tuple(far), 5, (0, 0, 255), -1)
# make sure we cap the number of fingers
return min(5, num_fingers), img_draw
def angle_rad(v1, v2):
"""Angle in radians between two vectors
This method returns the angle (in radians) between two array-like
vectors using the cross-product method, which is more accurate for
small angles than the dot-product-acos method.
"""
return np.arctan2(np.linalg.norm(np.cross(v1, v2)), np.dot(v1, v2))
def deg2rad(angle_deg):
"""Convert degrees to radians
This method converts an angle in radians e[0,2*np.pi) into degrees
e[0,360)
"""
return angle_deg / 180.0 * np.pi
| 36.425414
| 97
| 0.64341
| 928
| 6,593
| 4.484914
| 0.279095
| 0.023546
| 0.015858
| 0.027631
| 0.166026
| 0.103075
| 0.103075
| 0.081691
| 0.036521
| 0.036521
| 0
| 0.030094
| 0.27423
| 6,593
| 180
| 98
| 36.627778
| 0.839707
| 0.477931
| 0
| 0.033333
| 0
| 0
| 0.011312
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.05
| 0
| 0.283333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0efc1162d67d89e44bbe7d9f3dc36378c583e84a
| 688
|
py
|
Python
|
xlab/cli.py
|
csalcedo001/xlab
|
8c51f035a870dd57339ff0208a3ab27ef6b8b41f
|
[
"Apache-2.0"
] | 1
|
2022-03-23T23:44:14.000Z
|
2022-03-23T23:44:14.000Z
|
xlab/cli.py
|
csalcedo001/xlab
|
8c51f035a870dd57339ff0208a3ab27ef6b8b41f
|
[
"Apache-2.0"
] | null | null | null |
xlab/cli.py
|
csalcedo001/xlab
|
8c51f035a870dd57339ff0208a3ab27ef6b8b41f
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os
from . import filesys
MAIN_USAGE_MESSAGE = """
usage: xlab command ...
Options:
positional arguments:
command
project
"""
def project(args):
if len(args) != 1:
print("error: Invalid arguments.")
exit()
if args[0] == 'init':
root = os.getcwd()
dirs = filesys.Directories()
dirs.set_root(root)
def main():
if len(sys.argv) <= 1:
print(MAIN_USAGE_MESSAGE)
exit()
command = sys.argv[1]
args = sys.argv[2:]
if command == 'project':
exe = project
else:
print("error: No command 'xlab {}'.".format(command))
exit()
exe(args)
| 16.380952
| 61
| 0.543605
| 80
| 688
| 4.6125
| 0.45
| 0.056911
| 0.086721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010684
| 0.319767
| 688
| 42
| 62
| 16.380952
| 0.777778
| 0
| 0
| 0.1
| 0
| 0
| 0.208999
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.1
| 0
| 0.166667
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0efc40d3300b3d6d0a1fa06e980fe71072140597
| 16,294
|
py
|
Python
|
python/paddle/optimizer/adamw.py
|
jzhang533/Paddle
|
3227b2c401a80104e0c01dedcef2061ffa1ebbed
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/optimizer/adamw.py
|
jzhang533/Paddle
|
3227b2c401a80104e0c01dedcef2061ffa1ebbed
|
[
"Apache-2.0"
] | 1
|
2021-09-07T10:31:38.000Z
|
2021-09-08T09:18:20.000Z
|
python/paddle/optimizer/adamw.py
|
jzhang533/Paddle
|
3227b2c401a80104e0c01dedcef2061ffa1ebbed
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .optimizer import Optimizer
from .adam import Adam
from ..fluid import core
from ..fluid import framework
from ..fluid.framework import Variable
from ..fluid.dygraph import base as imperative_base
from collections import Callable
import paddle
_C_ops = core.ops
__all__ = []
class AdamW(Adam):
r"""
The AdamW optimizer is implemented based on the AdamW Optimization
in paper `DECOUPLED WEIGHT DECAY REGULARIZATION <https://arxiv.org/pdf/1711.05101.pdf>`_.
it can resolves the problem of L2 regularization failure in the Adam optimizer.
.. math::
t & = t + 1
moment\_1\_out & = {\beta}_1 * moment\_1 + (1 - {\beta}_1) * grad
moemnt\_2\_out & = {\beta}_2 * moment\_2 + (1 - {\beta}_2) * grad * grad
learning\_rate & = learning\_rate *
\frac{\sqrt{1 - {\beta}_2^t}}{1 - {beta}_1^t}
param\_out & = param - learning\_rate * (\frac{moment\_1}{\sqrt{moment\_2} + \epsilon} + \lambda * param)
Args:
learning_rate (float|LRScheduler, optional): The learning rate used to update ``Parameter``.
It can be a float value or a LRScheduler. The default value is 0.001.
parameters (list|tuple, optional): List/Tuple of ``Tensor`` names to update to minimize ``loss``. \
This parameter is required in dygraph mode. And you can specify different options for \
different parameter groups such as the learning rate, weight decay, etc, \
then the parameters are list of dict. Note that the learning_rate in paramter groups \
represents the scale of base learning_rate. \
The default value is None in static mode, at this time all parameters will be updated.
beta1 (float|Tensor, optional): The exponential decay rate for the 1st moment estimates.
It should be a float number or a Tensor with shape [1] and data type as float32.
The default value is 0.9.
beta2 (float|Tensor, optional): The exponential decay rate for the 2nd moment estimates.
It should be a float number or a Tensor with shape [1] and data type as float32.
The default value is 0.999.
epsilon (float, optional): A small float value for numerical stability.
The default value is 1e-08.
weight_decay (float|Tensor, optional): The weight decay coefficient, it can be float or Tensor. The default value is 0.01.
lr_ratio (function|None, optional): If it is not None,
the learning rate will be updated with layerwise learning rate ratio.
Otherwise, the learning rate is the original.
Default: None.
apply_decay_param_fun (function|None, optional): If it is not None,
only tensors that makes apply_decay_param_fun(Tensor.name)==True
will be updated with weight decay. It only works when we want to specify tensors.
Default: None.
grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of
some derived class of ``GradientClipBase`` . There are three cliping strategies
( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` ,
:ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping.
lazy_mode (bool, optional): The official Adam algorithm has two moving-average accumulators.
The accumulators are updated at every step. Every element of the two moving-average
is updated in both dense mode and sparse mode. If the size of parameter is very large,
then the update may be very slow. The lazy mode only update the element that has
gradient in current mini-batch, so it will be much more faster. But this mode has
different semantics with the original Adam algorithm and may lead to different result.
The default value is False.
multi_precision (bool, optional): Whether to use multi-precision during weight updating. Default is false.
name (str, optional): Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`.
The default value is None.
**Notes**:
**Currently, AdamW doesn't support sparse parameter optimization.**
Examples:
.. code-block:: python
import paddle
linear = paddle.nn.Linear(10, 10)
inp = paddle.rand([10,10], dtype="float32")
out = linear(inp)
loss = paddle.mean(out)
beta1 = paddle.to_tensor([0.9], dtype="float32")
beta2 = paddle.to_tensor([0.99], dtype="float32")
adam = paddle.optimizer.AdamW(learning_rate=0.1,
parameters=linear.parameters(),
beta1=beta1,
beta2=beta2,
weight_decay=0.01)
out.backward()
adam.step()
adam.clear_grad()
#Note that the learning_rate of linear_2 is 0.01.
linear_1 = paddle.nn.Linear(10, 10)
linear_2 = paddle.nn.Linear(10, 10)
inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1)
out = linear_1(inp)
out = linear_2(out)
loss = paddle.mean(out)
adam = paddle.optimizer.AdamW(
learning_rate=0.1,
parameters=[{
'params': linear_1.parameters()
}, {
'params': linear_2.parameters(),
'weight_decay': 0.001,
'learning_rate': 0.1,
'beta1': 0.8
}],
weight_decay=0.01,
beta1=0.9)
out.backward()
adam.step()
adam.clear_grad()
"""
def __init__(self,
learning_rate=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
parameters=None,
weight_decay=0.01,
lr_ratio=None,
apply_decay_param_fun=None,
grad_clip=None,
lazy_mode=False,
multi_precision=False,
name=None):
assert learning_rate is not None
assert beta1 is not None
assert beta2 is not None
assert epsilon is not None
if not 0 <= beta1 < 1:
raise ValueError("Invaild value of beta1, expect beta1 in [0,1).")
if not 0 <= beta2 < 1:
raise ValueError("Invaild value of beta2, expect beta2 in [0,1).")
if not 0 <= epsilon:
raise ValueError("Invaild value of epsilon, expect epsilon >= 0.")
coeff = weight_decay
if not isinstance(coeff, float) and \
not isinstance(coeff, framework.Variable):
raise TypeError("coeff should be float or Tensor.")
self._params_name = set()
self._apply_decay_param_fun = apply_decay_param_fun
self._coeff = coeff
self._lr_to_coeff = dict()
if lr_ratio is not None:
assert isinstance(lr_ratio, Callable)
if core.is_compiled_with_xpu() or core.is_compiled_with_npu():
raise NotImplementedError(
"'lr_ratio' is unimplemented in XPU and NPU")
self._lr_ratio = lr_ratio
super(AdamW, self).__init__(
learning_rate=learning_rate,
parameters=parameters,
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
grad_clip=grad_clip,
name=name,
lazy_mode=lazy_mode,
multi_precision=multi_precision)
self._default_dict = {'coeff': coeff}
self.type = "adamw"
if core.is_compiled_with_xpu():
self.type = "adam"
# Use _auxiliary_vars together with _set_auxiliary_var/_get_auxiliary_var to achieve that.
self._auxiliary_vars = dict()
def _set_auxiliary_var(self, key, val):
self._auxiliary_vars[key] = val
def _get_auxiliary_var(self, key):
if key in self._auxiliary_vars:
return self._auxiliary_vars[key]
else:
return None
def _append_decoupled_weight_decay(self, block, param_and_grad):
"""
Add decoupled weight decay op.
parameter = parameter - parameter * coeff * lr
Args:
block: block in which variable is to be created
param_and_grad: (parameters, gradients) pairs,
the parameters need to decay.
Raises:
Exception: The type of coeff and parameter is not consistent.
"""
if isinstance(param_and_grad, dict):
param_and_grad = self._update_param_group(param_and_grad)
param, grad = param_and_grad
if self._apply_decay_param_fun is not None \
and not self._apply_decay_param_fun(param.name):
return
if isinstance(self._learning_rate, float):
learning_rate = self._learning_rate
else:
# NOTE. We add this function to the _append_optimize_op(),
# for we must make sure _create_param_lr() be called after
# optimizer._create_global_learning_rate().
learning_rate = self._create_param_lr(param_and_grad)
with block.program._optimized_guard(
[param, grad]), framework.name_scope('weight decay'):
self._params_name.add(param.name)
# If it has been calculated, the result will be reused.
# NOTE(wangxi): In dygraph mode, apply_gradient will be executed
# every step, so need clear _lr_to_coeff every step,
# we do this in _create_optimization_pass
decay_coeff = self._lr_to_coeff.get(learning_rate, None)
if decay_coeff is None:
# NOTE(wangxi): for pipeline to set device:all
with paddle.static.device_guard(None):
decay_coeff = 1.0 - learning_rate * self._coeff
self._lr_to_coeff[learning_rate] = decay_coeff
find_master = (self._multi_precision and
param.dtype == core.VarDesc.VarType.FP16)
if find_master:
master_weight = self._master_weights[param.name]
scaled_param = master_weight * decay_coeff
paddle.fluid.layers.assign(
input=scaled_param, output=master_weight)
else:
scaled_param = param * decay_coeff
paddle.fluid.layers.assign(input=scaled_param, output=param)
def _append_optimize_op(self, block, param_and_grad):
if paddle.is_compiled_with_xpu():
self._append_decoupled_weight_decay(block, param_and_grad)
return super(AdamW, self)._append_optimize_op(block, param_and_grad)
assert isinstance(block, framework.Block)
if isinstance(param_and_grad, dict):
param_and_grad = self._update_param_group(param_and_grad)
param, grad = param_and_grad
# Whether we should do weight decay for the parameter.
with_decay = True
if self._apply_decay_param_fun is not None \
and not self._apply_decay_param_fun(param.name):
with_decay = False
moment1 = self._get_accumulator(self._moment1_acc_str,
param_and_grad[0])
moment2 = self._get_accumulator(self._moment2_acc_str,
param_and_grad[0])
beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,
param_and_grad[0])
beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,
param_and_grad[0])
find_master = self._multi_precision and param_and_grad[
0].dtype == core.VarDesc.VarType.FP16
master_weight = (self._master_weights[param_and_grad[0].name]
if find_master else None)
lr = self._create_param_lr(param_and_grad)
# create the adamw optimize op
if framework.in_dygraph_mode():
lr_ratio_ = 1. if self._lr_ratio is None else self._lr_ratio(
param_and_grad[0])
_beta1 = self._beta1 if not isinstance(
self._beta1, Variable) else self._beta1.numpy().item(0)
_beta2 = self._beta2 if not isinstance(
self._beta2, Variable) else self._beta2.numpy().item(0)
_, _, _, _, _ = _C_ops.adamw(
param_and_grad[0], param_and_grad[1], lr, moment1, moment2,
beta1_pow_acc, beta2_pow_acc, param_and_grad[0], moment1,
moment2, beta1_pow_acc, beta2_pow_acc, 'epsilon', self._epsilon,
'lazy_mode', self._lazy_mode, 'min_row_size_to_use_multithread',
1000, 'beta1', _beta1, 'beta2', _beta2, 'coeff', self._coeff,
"lr_ratio", lr_ratio_)
return None
inputs = {
"Param": [param_and_grad[0]],
"Grad": [param_and_grad[1]],
"LearningRate": [lr],
"Moment1": [moment1],
"Moment2": [moment2],
"Beta1Pow": [beta1_pow_acc],
"Beta2Pow": [beta2_pow_acc],
}
# Pass found_inf to adamw, to skip update for not only param, but also momentum and beta_pow
found_inf = self._get_auxiliary_var('found_inf')
if found_inf:
inputs['SkipUpdate'] = found_inf
outputs = {
"ParamOut": [param_and_grad[0]],
"Moment1Out": [moment1],
"Moment2Out": [moment2],
"Beta1PowOut": [beta1_pow_acc],
"Beta2PowOut": [beta2_pow_acc],
}
attrs = {
"lazy_mode": self._lazy_mode,
"min_row_size_to_use_multithread": 1000,
"multi_precision": find_master,
"with_decay": with_decay,
"coeff": self._coeff,
"lr_ratio": 1.
if self._lr_ratio is None else self._lr_ratio(param_and_grad[0])
}
if isinstance(self._beta1, Variable):
inputs['Beta1Tensor'] = self._beta1
else:
attrs['beta1'] = self._beta1
if isinstance(self._beta2, Variable):
inputs['Beta2Tensor'] = self._beta2
else:
attrs['beta2'] = self._beta2
if isinstance(self._epsilon, Variable):
inputs['EpsilonTensor'] = self._epsilon
else:
attrs['epsilon'] = self._epsilon
if find_master:
inputs["MasterParam"] = master_weight
outputs["MasterParamOut"] = master_weight
adamw_op = block.append_op(
type=self.type,
inputs=inputs,
outputs=outputs,
attrs=attrs,
stop_gradient=True)
return adamw_op
def _create_optimization_pass(self, parameters_and_grads):
optimize_ops = super(
AdamW, self)._create_optimization_pass(parameters_and_grads)
# In dygraph mode, clear _lr_to_coeff after applied gradient
self._lr_to_coeff = dict()
return optimize_ops
def __str__(self):
return " ".join(["Weight Decay, params:", ",".join(self._params_name)])
def _update_param_group(self, parameters):
self._coeff = parameters.get('coeff', self._default_dict['coeff'])
parameters = parameters.get('params')
return parameters
| 42.543081
| 130
| 0.603044
| 2,004
| 16,294
| 4.668164
| 0.19511
| 0.0248
| 0.037199
| 0.016676
| 0.243934
| 0.186424
| 0.155639
| 0.124425
| 0.109674
| 0.08915
| 0
| 0.023185
| 0.31441
| 16,294
| 382
| 131
| 42.65445
| 0.814251
| 0.419541
| 0
| 0.134328
| 0
| 0
| 0.069296
| 0.006907
| 0
| 0
| 0
| 0
| 0.029851
| 1
| 0.039801
| false
| 0.00995
| 0.039801
| 0.004975
| 0.129353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0efde6b5a9c1239ffa852e70caccc25e5c41c1dd
| 1,880
|
py
|
Python
|
tests/resources/test_interactions.py
|
VinLau/BAR_API
|
0719a5fbc08872f667590b27347af9bfed669bca
|
[
"MIT"
] | 1
|
2020-07-06T20:12:25.000Z
|
2020-07-06T20:12:25.000Z
|
tests/resources/test_interactions.py
|
VinLau/BAR_API
|
0719a5fbc08872f667590b27347af9bfed669bca
|
[
"MIT"
] | 37
|
2020-06-27T02:58:23.000Z
|
2022-03-29T00:35:28.000Z
|
tests/resources/test_interactions.py
|
VinLau/BAR_API
|
0719a5fbc08872f667590b27347af9bfed669bca
|
[
"MIT"
] | 9
|
2020-06-26T23:09:16.000Z
|
2022-01-26T21:20:46.000Z
|
from api import app
from unittest import TestCase
class TestIntegrations(TestCase):
maxDiff = None
def setUp(self):
self.app_client = app.test_client()
def test_get_itrns(self):
"""
This function test retrieving protein interactions for various species' genes.
"""
# Valid request rice
response = self.app_client.get("/interactions/rice/LOC_Os01g52560")
expected = {
"wasSuccessful": True,
"data": [
{
"protein_1": "LOC_Os01g01080",
"protein_2": "LOC_Os01g52560",
"total_hits": 1,
"Num_species": 1,
"Quality": 1,
"pcc": 0.65,
},
{
"protein_1": "LOC_Os01g52560",
"protein_2": "LOC_Os01g73310",
"total_hits": 1,
"Num_species": 1,
"Quality": 1,
"pcc": -0.116,
},
],
}
self.assertEqual(response.json, expected)
# Invalid species
response = self.app_client.get("/interactions/poplar/abc")
expected = {"wasSuccessful": False, "error": "Invalid species or gene ID"}
self.assertEqual(response.json, expected)
# Invalid gene id
response = self.app_client.get("/interactions/rice/abc")
expected = {"wasSuccessful": False, "error": "Invalid species or gene ID"}
self.assertEqual(response.json, expected)
# Gene does not exist
response = self.app_client.get("/interactions/rice/LOC_Os01g52565")
expected = {
"wasSuccessful": False,
"error": "There are no data found for the given gene",
}
self.assertEqual(response.json, expected)
| 31.864407
| 86
| 0.519149
| 176
| 1,880
| 5.420455
| 0.386364
| 0.036688
| 0.068134
| 0.08805
| 0.51782
| 0.481132
| 0.392034
| 0.350105
| 0.259958
| 0.259958
| 0
| 0.050085
| 0.373404
| 1,880
| 58
| 87
| 32.413793
| 0.759762
| 0.079787
| 0
| 0.333333
| 0
| 0
| 0.253083
| 0.065766
| 0
| 0
| 0
| 0
| 0.095238
| 1
| 0.047619
| false
| 0
| 0.047619
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0efdfc79a9eea6c3e7cf614d63469062b5917d5a
| 2,261
|
py
|
Python
|
src/dialogflow-java-client-master/samples/clients/VirtualTradingAssistant/src/main/java/ai/examples/scraper/historicalScrape.py
|
16kozlowskim/Group-20-SE
|
ceb8c319643964a3f478772d8f10090962df567c
|
[
"MIT"
] | null | null | null |
src/dialogflow-java-client-master/samples/clients/VirtualTradingAssistant/src/main/java/ai/examples/scraper/historicalScrape.py
|
16kozlowskim/Group-20-SE
|
ceb8c319643964a3f478772d8f10090962df567c
|
[
"MIT"
] | null | null | null |
src/dialogflow-java-client-master/samples/clients/VirtualTradingAssistant/src/main/java/ai/examples/scraper/historicalScrape.py
|
16kozlowskim/Group-20-SE
|
ceb8c319643964a3f478772d8f10090962df567c
|
[
"MIT"
] | null | null | null |
# install BeautifulSoup4 before running
#
# prints out historical data in csv format:
#
# [date, open, high, low, close, volume]
#
import re, csv, sys, urllib2
from bs4 import BeautifulSoup
# If start date and end date is the same only one value will be returned and
# if not the multiple values which can be used to make calculations
#
# ticker (company symbol)
# interval (d (daily), m (monthly), q (quarterly), y (yearly))
# start_date (YYYYMMDD)
# end_date (YYYYMMDD)
def get_historical_data(ticker, interval, start_date, end_date):
#pathToCSV = '/Users/Michal/Downloads/dialogflow-java-client-master2/samples/clients/VirtualTradingAssistant/src/main/java/ai/api/examples/fileStore/file.csv'
#pathToCSV = 'C:\\Users\\ojwoo\\Documents\\Warwick\\CS261\\Coursework\\dialogflow-java-client-master\\samples\\clients\\VirtualTradingAssistant\\src\\main\\java\\ai\\api\\examples\\fileStore\\file.csv'
#pathToCSV = '/Users/Michal/Desktop/apache-tomcat-8.5.28/bin/misc/file.csv'
pathToCSV = 'C:\\apache-tomcat-8.5.28\\bin\\misc\\file.csv'
url_builder = []
url_builder.append('https://stooq.com/q/d/?s=')
url_builder.append(ticker)
url_builder.append('&c=0&d1=')
url_builder.append(start_date)
url_builder.append('&d2=')
url_builder.append(end_date)
url_builder.append('&i=')
url_builder.append(interval)
url = ''.join(url_builder)
page = urllib2.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
link = soup.findAll('a', href=re.compile('^q/d/l/'))
link = re.search('"(.*)"', str(link))
try:
link = link.group(1)
except AttributeError:
with open(pathToCSV, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter='@', quotechar='#')
wr.writerow('')
exit()
link = link.replace('amp;', '')
arr = []
arr.append('https://stooq.com/')
arr.append(link)
link = ''.join(arr)
response = urllib2.urlopen(link)
cr = csv.reader(response)
with open(pathToCSV, 'w') as csvfile:
wr = csv.writer(csvfile, delimiter='@', quotechar='#')
wr.writerows(cr)
def main():
args = sys.argv
get_historical_data(args[1], args[2], args[3], args[4])
if __name__ == '__main__':
main()
| 29.75
| 205
| 0.658116
| 302
| 2,261
| 4.834437
| 0.486755
| 0.068493
| 0.087671
| 0.054795
| 0.247945
| 0.247945
| 0.247945
| 0.247945
| 0.247945
| 0.206849
| 0
| 0.013441
| 0.177355
| 2,261
| 75
| 206
| 30.146667
| 0.771505
| 0.361345
| 0
| 0.1
| 0
| 0
| 0.102313
| 0.031535
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0eff08358676f71813cab0fd67b31eed87ddaad4
| 5,460
|
py
|
Python
|
client/client.py
|
odontomachus/hotbox
|
d42c48d7f056f2b1f7bd707ad674e737a3c2fe08
|
[
"MIT"
] | null | null | null |
client/client.py
|
odontomachus/hotbox
|
d42c48d7f056f2b1f7bd707ad674e737a3c2fe08
|
[
"MIT"
] | null | null | null |
client/client.py
|
odontomachus/hotbox
|
d42c48d7f056f2b1f7bd707ad674e737a3c2fe08
|
[
"MIT"
] | null | null | null |
import sys
import io
from collections import defaultdict
import struct
from time import sleep
import queue
import threading
import serial
from serial import SerialException
RUN_LABELS = ('Time left', 'Temp 1', 'Temp 2', 'Off Goal', 'Temp Change', 'Duty cycle (/30)', 'Heating', 'Cycle', 'Total time', 'Goal temp')
MSG_RUN_STATUS = 1
MSG_CONFIG = 2
MSG_STATUS = 3
MSG_LENGTHS = {MSG_RUN_STATUS: 20, MSG_CONFIG: 9, MSG_STATUS: 5}
STATE_START = 1
STATE_ACTIVE = 2
STATE_READY = 3
STATE_BOOT = 4
STATE_INIT = 5
STATE_DISCONNECTED = 127 # can't connect to serial
HB_CYCLE = 30
class RunStatus:
__slots__ = ('countdown', 't1', 't2', 'dg', 'dt', 'part', 'state', 'cycle', 'time', 'goal')
def __init__(self, message):
(self.t1,
self.t2,
self.countdown,
self.part,
self.cycle,
self.state,
self.dg,
self.dt,
self.time,
self.goal,
) = struct.unpack('=BBLBB?bbLB', message)
def __str__(self):
return "\t".join(
map(str,
(self.countdown,
self.t1,
self.t2,
self.dg,
self.dt,
self.part,
"On" if self.state else "Off",
self.state,
self.cycle,
self.time,
self.goal,
)
))
class OvenConfig:
__slots__ = ('temp', 'time')
def __init__(self, message):
(self.time,
self.temp) = struct.unpack('=LB', message)
class OvenStatus:
__slots__ = ('status',)
def __init__(self, message):
self.status = message[0]
def check_connection(fun):
def inner(self, *args, **kwargs):
if self.state == "connected":
try:
fun(self, *args, **kwargs)
except SerialException:
self.disconnect()
# workaround for bug in pyserial
# http://sourceforge.net/p/pyserial/patches/37/
except TypeError as e:
self.disconnect()
return inner
class Client(threading.Thread):
""" Client class for hotbox serial connection """
parsers = {
MSG_STATUS: OvenStatus,
MSG_RUN_STATUS: RunStatus,
MSG_CONFIG: OvenConfig,
}
def __init__(self):
super().__init__()
self.state = 'disconnected'
self.msg_queue = {MSG_STATUS: queue.Queue(),
MSG_CONFIG: queue.Queue(),
MSG_RUN_STATUS: queue.Queue(),
}
def connect(self, port):
try:
self.conn = serial.Serial(port, 9600, timeout=0.05)
# empty buffer
while len(self.conn.read(1)) > 0:
pass
self.state = 'connected'
sleep(0.01)
self.oven_query_config()
sleep(0.2)
self.oven_status()
except SerialException:
self.disconnect()
# workaround for bug in pyserial
# http://sourceforge.net/p/pyserial/patches/37/
except TypeError as e:
self.disconnect()
finally:
self.start_message = 0
def run(self):
self.running = 1
parsed_length = 0
mtype = 0
msg_length = 0
while self.running:
# Don't do anything if disconnected
if (self.state == 'disconnected'):
sleep(0.1)
continue
try:
c = self.conn.read(1)
except SerialException:
self.disconnect()
continue
# workaround for bug in pyserial
# http://sourceforge.net/p/pyserial/patches/37/
except TypeError as e:
self.disconnect()
continue
# wait for message
if not c:
continue
# this is the message type byte
if parsed_length == 3:
parsed_length += 1
if c[0] == 0:
continue
mtype = c[0]
msg_length = MSG_LENGTHS[mtype]
buffer = bytes()
continue
if parsed_length < 3:
# Abort if not a null byte
if c[0]:
parsed_length = 0
continue
# otherwise increment parsed length
parsed_length += 1
continue
# in any other case this is a data byte
parsed_length += 1
buffer += c
if parsed_length == msg_length:
data = self.parsers[mtype](buffer)
self.msg_queue[mtype].put(data)
parsed_length = 0
mtype = 0
msg_length = 0
@check_connection
def oven_configure(self, ctime, temp):
self.conn.write(b'c'+struct.pack('=LB', ctime, temp))
@check_connection
def oven_start(self):
self.conn.write(b's')
@check_connection
def oven_stop(self):
self.conn.write(b't')
@check_connection
def oven_status(self):
self.conn.write(b'r')
@check_connection
def oven_query_config(self):
self.conn.write(b'q')
def disconnect(self):
self.state = 'disconnected'
self.msg_queue[MSG_STATUS].put(OvenStatus((STATE_DISCONNECTED,)))
| 26.25
| 140
| 0.512637
| 590
| 5,460
| 4.581356
| 0.262712
| 0.044395
| 0.033296
| 0.040696
| 0.254532
| 0.18276
| 0.18276
| 0.18276
| 0.130226
| 0.130226
| 0
| 0.020659
| 0.388278
| 5,460
| 207
| 141
| 26.376812
| 0.788623
| 0.08956
| 0
| 0.373418
| 0
| 0
| 0.045051
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094937
| false
| 0.006329
| 0.056962
| 0.006329
| 0.21519
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0eff0ae716a4c5a7fc1773362d577d2a440094dc
| 2,549
|
py
|
Python
|
test/functional/abc-sync-chain.py
|
ComputerCraftr/devault
|
546b54df85e3392f85e7ea5fcd4ea9b395ba8f4c
|
[
"MIT"
] | 35
|
2019-02-23T06:21:13.000Z
|
2021-11-15T11:35:13.000Z
|
test/functional/abc-sync-chain.py
|
ComputerCraftr/devault
|
546b54df85e3392f85e7ea5fcd4ea9b395ba8f4c
|
[
"MIT"
] | 60
|
2019-02-25T18:17:03.000Z
|
2021-07-13T00:14:00.000Z
|
test/functional/abc-sync-chain.py
|
ComputerCraftr/devault
|
546b54df85e3392f85e7ea5fcd4ea9b395ba8f4c
|
[
"MIT"
] | 24
|
2019-02-20T05:37:02.000Z
|
2021-10-29T18:42:10.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test that a node receiving many (potentially out of order) blocks exits
initial block download (IBD; this occurs once it has passed minimumchainwork)
and continues to sync without seizing.
"""
import random
from test_framework.blocktools import create_block, create_coinbase
from test_framework.mininode import (CBlockHeader,
network_thread_start,
P2PInterface,
msg_block,
msg_headers)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until, p2p_port
NUM_IBD_BLOCKS = 50
class BaseNode(P2PInterface):
def send_header(self, block):
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
self.send_message(msg)
def send_block(self, block):
self.send_message(msg_block(block))
class SyncChainTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
# Setting minimumchainwork makes sure we test IBD as well as post-IBD
self.extra_args = [
["-minimumchainwork={:#x}".format(202 + 2 * NUM_IBD_BLOCKS)]]
def run_test(self):
node0conn = BaseNode()
node0conn.peer_connect('127.0.0.1', p2p_port(0))
network_thread_start()
node0conn.wait_for_verack()
node0 = self.nodes[0]
tip = int(node0.getbestblockhash(), 16)
height = node0.getblockcount() + 1
time = node0.getblock(node0.getbestblockhash())['time'] + 1
blocks = []
for i in range(NUM_IBD_BLOCKS * 2):
block = create_block(tip, create_coinbase(height), time)
block.solve()
blocks.append(block)
tip = block.sha256
height += 1
time += 1
# Headers need to be sent in-order
for b in blocks:
node0conn.send_header(b)
# Send blocks in some random order
for b in random.sample(blocks, len(blocks)):
node0conn.send_block(b)
# The node should eventually, completely sync without getting stuck
def node_synced():
return node0.getbestblockhash() == blocks[-1].hash
wait_until(node_synced)
if __name__ == '__main__':
SyncChainTest().main()
| 31.085366
| 77
| 0.629266
| 301
| 2,549
| 5.159468
| 0.478405
| 0.041854
| 0.043786
| 0.025757
| 0.02962
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025192
| 0.283641
| 2,549
| 81
| 78
| 31.469136
| 0.825301
| 0.229502
| 0
| 0
| 0
| 0
| 0.022576
| 0.011801
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104167
| false
| 0
| 0.104167
| 0.020833
| 0.270833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16004b3ebbf7944e6af5eebfe55aa2baa0c582bb
| 1,325
|
py
|
Python
|
djangostagram/posts/models.py
|
hongsemy/InstagramWithDjango
|
18cb273668809fb48d829e1ac11438c51505623a
|
[
"MIT"
] | null | null | null |
djangostagram/posts/models.py
|
hongsemy/InstagramWithDjango
|
18cb273668809fb48d829e1ac11438c51505623a
|
[
"MIT"
] | null | null | null |
djangostagram/posts/models.py
|
hongsemy/InstagramWithDjango
|
18cb273668809fb48d829e1ac11438c51505623a
|
[
"MIT"
] | null | null | null |
from django.db import models
from djangostagram.users import models as user_model
# Create your models here.
# This class is used in other models as an inheritance.
# An often-used pattern
class TimeStamedModel(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now_add=True)
# An option that makes this model to not show up directly on the database
class Meta:
abstract = True
class Posts(TimeStamedModel):
author = models.ForeignKey(
user_model.User,
null = True,
on_delete = models.CASCADE,
related_name = "post_author"
)
caption = models.TextField(blank=True)
image = models.ImageField(blank=True)
image_likes = models.ManyToManyField(user_model.User, related_name='post_image_likes')
class Comments(TimeStamedModel):
author = models.ForeignKey(
user_model.User,
null = True,
on_delete = models.CASCADE,
related_name = "comment_author"
)
posts = models.ForeignKey(
Posts,
null = True,
on_delete = models.CASCADE,
related_name = "comment_post"
)
contents = models.TextField(blank=True)
| 30.813953
| 90
| 0.627925
| 148
| 1,325
| 5.466216
| 0.432432
| 0.044499
| 0.048208
| 0.059333
| 0.375773
| 0.375773
| 0.375773
| 0.289246
| 0.289246
| 0.222497
| 0
| 0
| 0.299623
| 1,325
| 42
| 91
| 31.547619
| 0.871767
| 0.130566
| 0
| 0.322581
| 0
| 0
| 0.046208
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.483871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
160140a1d069dde69b115daae82f3d8b2a6cf9c6
| 497
|
py
|
Python
|
guillotina/contrib/workflows/events.py
|
rboixaderg/guillotina
|
fcae65c2185222272f3b8fee4bc2754e81e0e983
|
[
"BSD-2-Clause"
] | 173
|
2017-03-10T18:26:12.000Z
|
2022-03-03T06:48:56.000Z
|
guillotina/contrib/workflows/events.py
|
rboixaderg/guillotina
|
fcae65c2185222272f3b8fee4bc2754e81e0e983
|
[
"BSD-2-Clause"
] | 921
|
2017-03-08T14:04:43.000Z
|
2022-03-30T10:28:56.000Z
|
guillotina/contrib/workflows/events.py
|
rboixaderg/guillotina
|
fcae65c2185222272f3b8fee4bc2754e81e0e983
|
[
"BSD-2-Clause"
] | 60
|
2017-03-16T19:59:44.000Z
|
2022-03-03T06:48:59.000Z
|
from guillotina.contrib.workflows.interfaces import IWorkflowChangedEvent
from guillotina.events import ObjectEvent
from zope.interface import implementer
@implementer(IWorkflowChangedEvent)
class WorkflowChangedEvent(ObjectEvent):
"""An object has been moved"""
def __init__(self, object, workflow, action, comments):
ObjectEvent.__init__(self, object)
self.object = object
self.workflow = workflow
self.action = action
self.comments = comments
| 31.0625
| 73
| 0.744467
| 50
| 497
| 7.24
| 0.5
| 0.082873
| 0.077348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183099
| 497
| 15
| 74
| 33.133333
| 0.891626
| 0.04829
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1601ac11a20c04fcd9a8cadea05debe08ac71228
| 6,340
|
py
|
Python
|
data_steward/cdr_cleaner/cleaning_rules/covid_ehr_vaccine_concept_suppression.py
|
lrwb-aou/curation
|
e80447e56d269dc2c9c8bc79e78218d4b0dc504c
|
[
"MIT"
] | 16
|
2017-06-30T20:05:05.000Z
|
2022-03-08T21:03:19.000Z
|
data_steward/cdr_cleaner/cleaning_rules/covid_ehr_vaccine_concept_suppression.py
|
lrwb-aou/curation
|
e80447e56d269dc2c9c8bc79e78218d4b0dc504c
|
[
"MIT"
] | 342
|
2017-06-23T21:37:40.000Z
|
2022-03-30T16:44:16.000Z
|
data_steward/cdr_cleaner/cleaning_rules/covid_ehr_vaccine_concept_suppression.py
|
lrwb-aou/curation
|
e80447e56d269dc2c9c8bc79e78218d4b0dc504c
|
[
"MIT"
] | 33
|
2017-07-01T00:12:20.000Z
|
2022-01-26T18:06:53.000Z
|
"""
Suppress COVID EHR vaccine concepts.
Original Issues: DC-1692
"""
# Python imports
import logging
# Project imports
from cdr_cleaner.cleaning_rules.deid.concept_suppression import AbstractBqLookupTableConceptSuppression
from constants.cdr_cleaner import clean_cdr as cdr_consts
from common import JINJA_ENV, CDM_TABLES
from utils import pipeline_logging
# Third party imports
from google.cloud.exceptions import GoogleCloudError
LOGGER = logging.getLogger(__name__)
SUPPRESSION_RULE_CONCEPT_TABLE = 'covid_vaccine_concepts'
COVID_VACCINE_CONCEPT_QUERY = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project_id}}.{{sandbox_id}}.{{concept_suppression_lookup_table}}` AS
with covid_vacc as (
SELECT *
FROM `{{project_id}}.{{dataset_id}}.concept`
WHERE (
-- done by name and vocab --
REGEXP_CONTAINS(concept_name, r'(?i)(COVID)') AND
REGEXP_CONTAINS(concept_name, r'(?i)(VAC)') AND
vocabulary_id not in ('PPI')
) OR (
-- done by code and vocab --
REGEXP_CONTAINS(concept_code, r'(207)|(208)|(210)|(211)|(212)')
and vocabulary_id = 'CVX'
) OR (
-- done by code and vocab --
REGEXP_CONTAINS(concept_code, r'(91300)|(91301)|(91302)|(91303)|(91304)')
and vocabulary_id = 'CPT4'
)
),
concepts_via_cr as (
select distinct c.*
from `{{project_id}}.{{dataset_id}}.concept`as c
left join `{{project_id}}.{{dataset_id}}.concept_relationship`
on c.concept_id = concept_id_1
where concept_id_2 in (select concept_id from covid_vacc)
# and concept_id_1 not in (select concept_id from covid_vacc)
and (
relationship_id not in ('Subsumes', 'RxNorm dose form of', 'Dose form group of', 'RxNorm - SPL') OR
(relationship_id = 'RxNorm - SPL' and REGEXP_CONTAINS(concept_name, r'(?i)(COVID)'))
)
),
concepts_via_ca as (
select c.*
from `{{project_id}}.{{dataset_id}}.concept`as c
left join `{{project_id}}.{{dataset_id}}.concept_ancestor` as ca
on c.concept_id = ca.descendant_concept_id
where ca.ancestor_concept_id in (select concept_id from covid_vacc)
)
select distinct * from covid_vacc
union distinct
select distinct * from concepts_via_ca
union distinct
select distinct * from concepts_via_cr
""")
class CovidEHRVaccineConceptSuppression(AbstractBqLookupTableConceptSuppression
):
def __init__(self,
project_id,
dataset_id,
sandbox_dataset_id,
table_namer=None):
"""
Initialize the class with proper information.
Set the issue numbers, description and affected datasets. As other tickets may affect
this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
desc = "Suppress COVID EHR vaccine concepts."
super().__init__(
issue_numbers=['DC1692'],
description=desc,
affected_datasets=[cdr_consts.REGISTERED_TIER_DEID],
affected_tables=CDM_TABLES,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id,
concept_suppression_lookup_table=SUPPRESSION_RULE_CONCEPT_TABLE,
table_namer=table_namer)
def create_suppression_lookup_table(self, client):
concept_suppression_lookup_query = COVID_VACCINE_CONCEPT_QUERY.render(
project_id=self.project_id,
dataset_id=self.dataset_id,
sandbox_id=self.sandbox_dataset_id,
concept_suppression_lookup_table=self.
concept_suppression_lookup_table)
query_job = client.query(concept_suppression_lookup_query)
result = query_job.result()
if hasattr(result, 'errors') and result.errors:
LOGGER.error(f"Error running job {result.job_id}: {result.errors}")
raise GoogleCloudError(
f"Error running job {result.job_id}: {result.errors}")
def validate_rule(self, client, *args, **keyword_args):
"""
Validates the cleaning rule which deletes or updates the data from the tables
Method to run validation on cleaning rules that will be updating the values.
For example:
if your class updates all the datetime fields you should be implementing the
validation that checks if the date time values that needs to be updated no
longer exists in the table.
if your class deletes a subset of rows in the tables you should be implementing
the validation that checks if the count of final final row counts + deleted rows
should equals to initial row counts of the affected tables.
Raises RunTimeError if the validation fails.
"""
raise NotImplementedError("Please fix me.")
def setup_validation(self, client, *args, **keyword_args):
"""
Run required steps for validation setup
Method to run to setup validation on cleaning rules that will be updating or deleting the values.
For example:
if your class updates all the datetime fields you should be implementing the
logic to get the initial list of values which adhere to a condition we are looking for.
if your class deletes a subset of rows in the tables you should be implementing
the logic to get the row counts of the tables prior to applying cleaning rule
"""
raise NotImplementedError("Please fix me.")
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
ARGS = parser.parse_args()
pipeline_logging.configure(level=logging.DEBUG, add_console_handler=True)
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(
ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id,
[(CovidEHRVaccineConceptSuppression,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(CovidEHRVaccineConceptSuppression,)])
| 37.964072
| 108
| 0.681388
| 805
| 6,340
| 5.126708
| 0.284472
| 0.039254
| 0.023988
| 0.034892
| 0.408045
| 0.332929
| 0.312818
| 0.239884
| 0.203053
| 0.177853
| 0
| 0.010829
| 0.242587
| 6,340
| 166
| 109
| 38.192771
| 0.848605
| 0.226025
| 0
| 0.133333
| 0
| 0.009524
| 0.398463
| 0.123425
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038095
| false
| 0
| 0.07619
| 0
| 0.12381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1603becbcb60a137e24357b35d07d2dd6b8de743
| 809
|
py
|
Python
|
test_calcscore.py
|
BrandonLeiran/bracket-scoring
|
a099e9a56ee3083c3a9db7d085b11b1dc7fe77f8
|
[
"MIT"
] | null | null | null |
test_calcscore.py
|
BrandonLeiran/bracket-scoring
|
a099e9a56ee3083c3a9db7d085b11b1dc7fe77f8
|
[
"MIT"
] | null | null | null |
test_calcscore.py
|
BrandonLeiran/bracket-scoring
|
a099e9a56ee3083c3a9db7d085b11b1dc7fe77f8
|
[
"MIT"
] | null | null | null |
import pytest
from calcscore import round_score
# you'll be picking what teams make it to the next round
# - so picking 32, then 16, then 8, 4, 2, 1...i.e. round 1-6 winners
# teams will have a name & a seed
# seed doesn't change, so maybe make that not passed around w/ results
def test_round_score_invalid_round():
with pytest.raises(ValueError, match=r".*range*"):
round_score(0)
with pytest.raises(ValueError, match=r".*range*"):
round_score(7)
def test_round_score_invalid_winner():
VALID_ROUND = 1
all_teams = []
round_winners = []
picked_winners = ["picked team"]
with pytest.raises(ValueError, match=r".*invalid winner"):
round_score(VALID_ROUND, all_teams, round_winners, picked_winners)
# score = round_score(0)
# assert score == 0
| 31.115385
| 74
| 0.68974
| 123
| 809
| 4.365854
| 0.495935
| 0.130354
| 0.089385
| 0.145251
| 0.446927
| 0.357542
| 0.175047
| 0.175047
| 0.175047
| 0
| 0
| 0.023256
| 0.202719
| 809
| 25
| 75
| 32.36
| 0.809302
| 0.327565
| 0
| 0.142857
| 0
| 0
| 0.080074
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16045e96f3ff12b08a6e4885879fa2b0a083c578
| 4,803
|
py
|
Python
|
tests/test_get.py
|
bgyori/pyobo
|
f199f62f65fc7faff307b56f979a369202c8ad33
|
[
"MIT"
] | null | null | null |
tests/test_get.py
|
bgyori/pyobo
|
f199f62f65fc7faff307b56f979a369202c8ad33
|
[
"MIT"
] | null | null | null |
tests/test_get.py
|
bgyori/pyobo
|
f199f62f65fc7faff307b56f979a369202c8ad33
|
[
"MIT"
] | null | null | null |
import unittest
from operator import attrgetter
import obonet
from pyobo import SynonymTypeDef, get
from pyobo.struct import Reference
from pyobo.struct.struct import (
iterate_graph_synonym_typedefs, iterate_graph_typedefs, iterate_node_parents, iterate_node_properties,
iterate_node_relationships, iterate_node_synonyms, iterate_node_xrefs,
)
from tests.constants import TEST_CHEBI_OBO_PATH
class TestParseObonet(unittest.TestCase):
""""""
@classmethod
def setUpClass(cls) -> None:
cls.graph = obonet.read_obo(TEST_CHEBI_OBO_PATH)
def test_get_graph_typedefs(self):
"""Test getting type definitions from an :mod:`obonet` graph."""
pairs = {
(typedef.prefix, typedef.identifier)
for typedef in iterate_graph_typedefs(self.graph)
}
self.assertIn(('chebi', 'has_part'), pairs)
def test_get_graph_synonym_typedefs(self):
"""Test getting synonym type definitions from an :mod:`obonet` graph."""
synonym_typedefs = sorted(iterate_graph_synonym_typedefs(self.graph), key=attrgetter('id'))
self.assertEqual(
sorted([
SynonymTypeDef(id='IUPAC_NAME', name='IUPAC NAME'),
SynonymTypeDef(id='BRAND_NAME', name='BRAND NAME'),
SynonymTypeDef(id='INN', name='INN'),
], key=attrgetter('id')),
synonym_typedefs,
)
def test_get_node_synonyms(self):
"""Test getting synonyms from a node in a :mod:`obonet` graph."""
data = self.graph.nodes['CHEBI:51990']
synonyms = list(iterate_node_synonyms(data))
self.assertEqual(1, len(synonyms))
synonym = synonyms[0]
self.assertEqual('N,N,N-tributylbutan-1-aminium fluoride', synonym.name, msg='name parsing failed')
self.assertEqual('EXACT', synonym.specificity, msg='specificity parsing failed')
# TODO implement
# self.assertEqual(SynonymTypeDef(id='IUPAC_NAME', name='IUPAC NAME'), synonym.type)
def test_get_node_properties(self):
"""Test getting properties from a node in a :mod:`obonet` graph."""
data = self.graph.nodes['CHEBI:51990']
properties = list(iterate_node_properties(data))
t_prop = 'http://purl.obolibrary.org/obo/chebi/monoisotopicmass'
self.assertIn(t_prop, {prop for prop, value in properties})
self.assertEqual(1, sum(prop == t_prop for prop, value in properties))
value = [value for prop, value in properties if prop == t_prop][0]
self.assertEqual('261.28318', value)
def test_get_node_parents(self):
"""Test getting parents from a node in a :mod:`obonet` graph."""
data = self.graph.nodes['CHEBI:51990']
parents = list(iterate_node_parents(data))
self.assertEqual(2, len(parents))
self.assertEqual({'24060', '51992'}, {
parent.identifier
for parent in parents
})
self.assertEqual({'chebi'}, {
parent.prefix
for parent in parents
})
def test_get_node_xrefs(self):
"""Test getting parents from a node in a :mod:`obonet` graph."""
data = self.graph.nodes['CHEBI:51990']
xrefs = list(iterate_node_xrefs(data))
self.assertEqual(7, len(xrefs))
# NOTE the prefixes are remapped by PyOBO
self.assertEqual({'pubmed', 'cas', 'beilstein', 'reaxys'}, {
xref.prefix
for xref in xrefs
})
self.assertEqual(
{
('reaxys', '3570522'), ('beilstein', '3570522'), ('cas', '429-41-4'),
('pubmed', '21142041'), ('pubmed', '21517057'), ('pubmed', '22229781'), ('pubmed', '15074950'),
},
{(xref.prefix, xref.identifier) for xref in xrefs}
)
def test_get_node_relations(self):
"""Test getting relations from a node in a :mod:`obonet` graph."""
data = self.graph.nodes['CHEBI:17051']
relations = list(iterate_node_relationships(data, 'chebi'))
self.assertEqual(1, len(relations))
typedef, target = relations[0]
self.assertIsNotNone(target)
self.assertIsInstance(target, Reference)
self.assertEqual('chebi', target.prefix)
self.assertEqual('29228', target.identifier)
self.assertIsNotNone(typedef)
self.assertIsInstance(typedef, Reference)
self.assertEqual('chebi', typedef.prefix)
self.assertEqual('is_conjugate_base_of', typedef.identifier)
class TestGet(unittest.TestCase):
"""Test generation of OBO objects."""
def test_get_obo(self):
"""Test getting an OBO document."""
obo = get('chebi', url=TEST_CHEBI_OBO_PATH, local=True)
terms = list(obo)
self.assertEqual(18, len(terms))
| 39.694215
| 111
| 0.636269
| 557
| 4,803
| 5.348294
| 0.238779
| 0.09567
| 0.026855
| 0.023498
| 0.17959
| 0.171534
| 0.152736
| 0.103726
| 0.103726
| 0.103726
| 0
| 0.030411
| 0.240058
| 4,803
| 120
| 112
| 40.025
| 0.785753
| 0.130543
| 0
| 0.122222
| 0
| 0
| 0.108685
| 0.007035
| 0
| 0
| 0
| 0.008333
| 0.266667
| 1
| 0.1
| false
| 0
| 0.077778
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16054aa866f43fe130ae74a4adb86263728710d3
| 2,676
|
py
|
Python
|
src/commons.py
|
ymontilla/WebScrapingCatastro
|
a184b5c92199305e28ca7346c01d1e78e0a92c13
|
[
"MIT"
] | null | null | null |
src/commons.py
|
ymontilla/WebScrapingCatastro
|
a184b5c92199305e28ca7346c01d1e78e0a92c13
|
[
"MIT"
] | null | null | null |
src/commons.py
|
ymontilla/WebScrapingCatastro
|
a184b5c92199305e28ca7346c01d1e78e0a92c13
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# +
## Utilidades comunes entre places y OSM.
# +
import csv
import ast
import codecs
from math import cos, asin, sqrt
# +
def read_csv_with_encoding(filename, delimiter="|", encoding="iso-8859-1"):
with codecs.open(filename, encoding=encoding) as fp:
reader = csv.reader(fp, delimiter=delimiter)
csvFile = list(reader)
return pd.DataFrame(csvFile[1:], columns=csvFile[0])
def read_json_with_encoding(filename, encoding="iso-8859-1"):
with codecs.open(filename, encoding=encoding) as a:
l = a.read()
json_file = ast.literal_eval(l)
return json_file
# -
import pandas as pd
def distance(lat1, lon1, lat2, lon2):
"""
El resultado de la medición de distancia esta en kilometros.
"""
p = 0.017453292519943295 #Pi/180
a = 0.5 - cos((lat2 - lat1) * p)/2 + cos(lat1 * p) * cos(lat2 * p) * (1 - cos((lon2 - lon1) * p)) / 2
return 12742 * asin(sqrt(a))
def build_center_point(df):
lat = df["latitude"].mean()
lon = df["longitude"].mean()
return pd.DataFrame({'fid': [777], 'latitude': [lat], 'longitude': [lon]})
"""
El proceso es muy pesado y no es posible hacer el ananlisis con toda la data de bogotá, el número de registros es
demasiado grande para caber en memoria. El uso correcto es filtrar los datos antes de hacer el cross join.
"""
def compute_cross_distances(location_df, interest_points_df=None):
condition_latitude = ~location_df["latitude"].isna()
condition_longitude = ~location_df["longitude"].isna()
location_df_complete = location_df.loc[condition_latitude & condition_longitude]
results = []
for i in location_df_complete.index:
for j in interest_points_df.index:
results.append([
location_df_complete.loc[i, "fid"],
distance(location_df_complete.loc[i, "latitude"],
location_df_complete.loc[i, "longitude"],
float(interest_points_df.loc[j, "lat"]), float(interest_points_df.loc[j, "lon"])),
location_df_complete.loc[i, "latitude"],
location_df_complete.loc[i, "longitude"],
interest_points_df.loc[j, "lat"],
interest_points_df.loc[j, "lon"],
interest_points_df.loc[j, "amenity"],
interest_points_df.loc[j, "name"]
])
final = list(zip(*results))
return pd.DataFrame({'fid': final[0], 'distance': final[1], 'p_lat': final[2],
'p_lon': final[3], 'i_lat': final[4], 'i_lon': final[5],
'amenity': final[6], 'name': final[7]})
| 36.162162
| 114
| 0.612855
| 354
| 2,676
| 4.477401
| 0.367232
| 0.069401
| 0.080757
| 0.071924
| 0.251104
| 0.211987
| 0.147634
| 0.147634
| 0.147634
| 0.147634
| 0
| 0.032918
| 0.250747
| 2,676
| 73
| 115
| 36.657534
| 0.757606
| 0.050822
| 0
| 0.043478
| 0
| 0
| 0.077326
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0
| 0.108696
| 0
| 0.326087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
160586a7f083f1efa16456b4bf747dcafc4be695
| 7,851
|
py
|
Python
|
GamesGetter.py
|
JamescMcE/BasketBet
|
f87719ac793ea50822e8c52fc23191dba9ad6418
|
[
"CC0-1.0"
] | null | null | null |
GamesGetter.py
|
JamescMcE/BasketBet
|
f87719ac793ea50822e8c52fc23191dba9ad6418
|
[
"CC0-1.0"
] | null | null | null |
GamesGetter.py
|
JamescMcE/BasketBet
|
f87719ac793ea50822e8c52fc23191dba9ad6418
|
[
"CC0-1.0"
] | null | null | null |
#This script Imports Game Data from ESPN, and Odds from the ODDS-API, and then imports them into a MySQL table, example in workbench here https://puu.sh/HOKCj/ce199eec8e.png
import mysql.connector
import requests
import json
import datetime
import time
#Connection to the MYSQL Server.
mydb = mysql.connector.connect(
host="",
user="",
password="",
database="basketbet_data"
)
mycursor = mydb.cursor()
#Games List.
allGames=[]
#Gets the game Data from ESPN API given the link.
def newGetter(gameDay):
#Json Response for YESTERDAY.
response = requests.get(gameDay).json()
gameData = response["events"]
#Loop through to collect GameDay data.
a=0
while a < len(gameData):
game = str(gameData[a]['name'])
game_ID = str(gameData[a]['id'])
game_Date = str(gameData[a]['date'][:-7])
game_Time = str(gameData[a]['date'][11:-1])
game_Period = str(gameData[a]['status']['period'])
game_Status = str(gameData[a]['status']['type']['description'])
home_Score = str(gameData[a]['competitions'][0]['competitors'][0]['score'])
away_Score = str(gameData[a]['competitions'][0]['competitors'][1]['score'])
#Quick fix to change Clippers Name from LA Clippers to Los Angeles Clippers.
if str(gameData[a]['competitions'][0]['competitors'][0]['team']['displayName']) == 'LA Clippers':
home_Team = 'Los Angeles Clippers'
else:
home_Team = str(gameData[a]['competitions'][0]['competitors'][0]['team']['displayName'])
if str(gameData[a]['competitions'][0]['competitors'][1]['team']['displayName']) == 'LA Clippers':
away_Team = 'Los Angeles Clippers'
else:
away_Team = str(gameData[a]['competitions'][0]['competitors'][1]['team']['displayName'])
#Appends the Game Data to the list.
allGames.append((game_ID, game, home_Team, home_Score, away_Team, away_Score, game_Date, game_Time, game_Period, game_Status))
a+=1
#Gets the Odds from the ODDS-API.
def oddsGetter():
#Parameters for Odds Api.
parameters = {
"sport" : "basketball_nba",
"region" : "uk",
"mkt" : "h2h",
"apiKey" : "",
}
#JSON Response.
response = requests.get("https://api.the-odds-api.com/v3/odds/", params=parameters)
data = response.json()['data']
team0OddsInfo=[]
team1OddsInfo=[]
team0_odds = ''
team1_odds = ''
#Appends the odds info to a list as strings.
for game in data:
for site in game['sites']:
if site['site_key'] == "paddypower":
team0_odds = str(site['odds']['h2h'][0])
team1_odds = str(site['odds']['h2h'][1])
if team0_odds == '':
team0_odds = 0
if team1_odds == '':
team1_odds = 0
team0 = str(game['teams'][0])
team1 = str(game['teams'][1])
startTime = game['commence_time']
gameDate = str(datetime.datetime.utcfromtimestamp(startTime).strftime('%Y-%m-%d %H:%M:%S'))[:-9]
team0OddsInfo.append((team0, team0_odds, gameDate))
team1OddsInfo.append((team1, team1_odds, gameDate))
a=0
#as both lists are the same length, it loops through one and Updates the tables where needed.
while a < len(team0OddsInfo):
query_string = 'SELECT * FROM basketbet_data.all_games WHERE Game_Date = %s'
gameDate = (str(team0OddsInfo[a][2]),)
mycursor.execute(query_string, gameDate)
matchedGames = mycursor.fetchall()
b=0
while b < len(matchedGames):
if matchedGames[b][2] == team0OddsInfo[a][0]:
query_list = [team0OddsInfo[a][1], team1OddsInfo[a][1], matchedGames[b][0]]
query_string = 'UPDATE all_games SET Home_Odds = %s, Away_Odds = %s WHERE (Game_ID = %s)'
mycursor.execute(query_string, query_list)
elif matchedGames[b][5] == team0OddsInfo[a][0]:
query_list = [team0OddsInfo[a][1], team1OddsInfo[a][1], matchedGames[b][0]]
query_string = 'UPDATE all_games SET Away_Odds = %s, Home_Odds = %s WHERE (Game_ID = %s)'
mycursor.execute(query_string, query_list)
b+=1
a+=1
#For the console to show when odds were updated.
mydb.commit()
time = datetime.datetime.utcnow()
print('\n' + 'ODDS UPDATE AT: ' + str(time))
print('--------------------------------')
print('--------------------------------')
print(len(team0OddsInfo), "GAME ODDS inserted.")
print('REMAINING REQUESTS:', response.headers['x-requests-remaining'])
print('USED REQUESTS:', response.headers['x-requests-used'])
print('--------------------------------')
print('--------------------------------')
#Block to keep the script running then sleep for time 300 with counter set at 72 for Games every 5min | Odds every 6hr.
counter=72
startTime = time.time()
while True:
#Today, Yesterday and Tomorrow.
today = datetime.date.today()
yesterday = today + datetime.timedelta(days=-1)
tomorrow = today + datetime.timedelta(days=1)
#Removing the - from the dates for the URLs, then making the URLs.
todayShort = str(today).replace('-', '')
yesterdayShort = str(yesterday).replace('-', '')
tomorrowShort = str(tomorrow).replace('-', '')
yesterdayUrl = "http://site.api.espn.com/apis/site/v2/sports/basketball/nba/scoreboard?dates=" + yesterdayShort + '-' + yesterdayShort
todayUrl = "http://site.api.espn.com/apis/site/v2/sports/basketball/nba/scoreboard?dates=" + todayShort + '-' + todayShort
tomorrowUrl = "http://site.api.espn.com/apis/site/v2/sports/basketball/nba/scoreboard?dates=" + tomorrowShort + '-' + tomorrowShort
newGetter(yesterdayUrl)
newGetter(todayUrl)
newGetter(tomorrowUrl)
#Inserting or updating the table in MYSQL with the games.
c=0
updateCount=0
newGameCount=0
while c < len(allGames):
query_string = 'SELECT * FROM basketbet_data.all_games WHERE Game_ID = %s'
gameID = (str(allGames[c][0]),)
mycursor.execute(query_string, gameID)
if mycursor.fetchone():
updateCount+=1
query_list = [allGames[c][1], allGames[c][2], allGames[c][4], allGames[c][5], allGames[c][3], allGames[c][6], allGames[c][7], allGames[c][8], allGames[c][9], allGames[c][0]]
query_string = 'UPDATE all_games SET Game_Name = %s, Home_Team = %s, Away_Team = %s, Away_Score = %s, Home_Score = %s, Game_Date = %s, Game_Time = %s, Game_Period = %s, Game_Status = %s WHERE (Game_ID = %s)'
mycursor.execute(query_string, query_list)
mydb.commit()
else:
newGameCount+=1
query_string = "INSERT INTO basketbet_data.all_games (Game_ID, Game_Name, Home_Team, Home_Odds, Home_Score, Away_Team, Away_Odds, Away_Score, Game_Date, Game_Time, Game_Period, Game_Status) VALUES (%s, %s, %s, 0, %s, %s, 0, %s, %s, %s, %s, %s)"
mycursor.execute(query_string, allGames[c])
mydb.commit()
c+=1
#Prints to console what games were updated and what new games were inserted.
print('----------------------------------------')
print(str(updateCount) + ' GAMES UPDATED, and ' + str(newGameCount) + ' NEW GAMES inserted.')
print('----------------------------------------')
allGames=[]
#Counter for the Odds script.
if counter==72:
oddsGetter()
counter=0
else:
counter+=1
print('\n')
time.sleep(300 - ((time.time() - startTime) % 300))
| 42.668478
| 257
| 0.584639
| 944
| 7,851
| 4.76589
| 0.233051
| 0.02934
| 0.032007
| 0.032007
| 0.304512
| 0.234497
| 0.234497
| 0.206713
| 0.206713
| 0.16048
| 0
| 0.019993
| 0.248249
| 7,851
| 184
| 258
| 42.668478
| 0.742291
| 0.135015
| 0
| 0.175182
| 0
| 0.036496
| 0.267567
| 0.042495
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014599
| false
| 0.007299
| 0.036496
| 0
| 0.051095
| 0.087591
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16071d9e180a990b1f3b40b4034a6c704c0e2258
| 4,302
|
py
|
Python
|
neurodocker/tests/test_neurodocker.py
|
effigies/neurodocker
|
4b0f32d2915b8b0308e3e391d534e05eb29b8d09
|
[
"Apache-2.0"
] | 1
|
2021-01-27T06:00:35.000Z
|
2021-01-27T06:00:35.000Z
|
neurodocker/tests/test_neurodocker.py
|
giovtorres/neurodocker
|
65575f5e44f2c5ef96a5da51d0df54b1af80bb79
|
[
"Apache-2.0"
] | null | null | null |
neurodocker/tests/test_neurodocker.py
|
giovtorres/neurodocker
|
65575f5e44f2c5ef96a5da51d0df54b1af80bb79
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for neurodocker.main"""
# Author: Jakub Kaczmarzyk <jakubk@mit.edu>
from __future__ import absolute_import, unicode_literals
import sys
import pytest
from neurodocker.neurodocker import create_parser, parse_args, main
def test_generate():
args = ("generate -b ubuntu:17.04 -p apt"
" --arg FOO=BAR BAZ"
" --afni version=latest"
" --ants version=2.2.0"
" --freesurfer version=6.0.0"
" --fsl version=5.0.10"
" --user=neuro"
" --miniconda env_name=neuro conda_install=python=3.6.2"
" --user=root"
" --mrtrix3"
" --neurodebian os_codename=zesty download_server=usa-nh"
" --spm version=12 matlab_version=R2017a"
" --no-check-urls"
" --expose 1234 9000"
" --volume /var /usr/bin"
" --label FOO=BAR BAZ=CAT"
" --copy relpath/to/file.txt /tmp/file.txt"
" --add relpath/to/file2.txt /tmp/file2.txt"
" --cmd '--arg1' '--arg2'"
" --workdir /home"
" --install git"
" --user=neuro"
)
main(args.split())
with pytest.raises(SystemExit):
args = "-b ubuntu"
main(args.split())
with pytest.raises(SystemExit):
args = "-p apt"
main(args.split())
with pytest.raises(SystemExit):
main()
args = "generate -b ubuntu -p apt --ants option=value"
with pytest.raises(ValueError):
main(args.split())
def test_generate_opts(capsys):
args = "generate -b ubuntu:17.04 -p apt --no-check-urls {}"
main(args.format('--user=neuro').split())
out, _ = capsys.readouterr()
assert "USER neuro" in out
main(args.format('--add path/to/file.txt /tmp/file.txt').split())
out, _ = capsys.readouterr()
assert 'ADD ["path/to/file.txt", "/tmp/file.txt"]' in out
main(args.format('--copy path/to/file.txt /tmp/file.txt').split())
out, _ = capsys.readouterr()
assert 'COPY ["path/to/file.txt", "/tmp/file.txt"]' in out
main(args.format('--env KEY=VAL KEY2=VAL').split())
out, _ = capsys.readouterr()
assert 'ENV KEY="VAL" \\' in out
assert ' KEY2="VAL"' in out
main(args.format('--expose 1230 1231').split())
out, _ = capsys.readouterr()
assert "EXPOSE 1230 1231" in out
main(args.format('--workdir /home').split())
out, _ = capsys.readouterr()
assert "WORKDIR /home" in out
main(args.format('--install vi').split())
out, _ = capsys.readouterr()
assert "vi" in out
main(args.format('--instruction RUNecho').split())
out, _ = capsys.readouterr()
assert "RUNecho" in out
def test_generate_from_json(capsys, tmpdir):
import json
cmd = "generate -b debian:stretch -p apt --c3d version=1.0.0"
main(cmd.split())
true, _ = capsys.readouterr()
specs = {'check_urls': True,
'generation_timestamp': '2017-08-31 21:49:04',
'instructions': [['base', 'debian:stretch'],
['c3d', {'version': '1.0.0'}]],
'neurodocker_version': '0.2.0-18-g9227b17',
'pkg_manager': 'apt'}
str_specs = json.dumps(specs)
filepath = tmpdir.join("specs.json")
filepath.write(str_specs)
gen_cmd = "generate --file {}".format(filepath)
main(gen_cmd.split())
test, _ = capsys.readouterr()
# These indices chop off the header (with timestamp) and the layer that
# saves to JSON (with timestamp).
sl = slice(8, -19)
assert true.split('\n')[sl] == test.split('\n')[sl]
def test_generate_no_print(capsys):
args = ['generate', '-b', 'ubuntu:17.04', '-p', 'apt', '--no-check-urls']
main(args)
out, _ = capsys.readouterr()
assert "FROM" in out and "RUN" in out
args.append('--no-print-df')
main(args)
out, _ = capsys.readouterr()
assert not out
def test_generate_save(tmpdir):
outfile = tmpdir.join("test.txt")
args = ['generate', '-b', 'ubuntu:17.04', '-p', 'apt', '--mrtrix3',
'use_binaries=false', '--no-print-df', '-o', outfile.strpath,
'--no-check-urls']
main(args)
assert outfile.read(), "saved Dockerfile is empty"
assert "git clone https://github.com/MRtrix3/mrtrix3.git" in outfile.read()
| 31.173913
| 79
| 0.58066
| 547
| 4,302
| 4.484461
| 0.330896
| 0.052181
| 0.077456
| 0.101916
| 0.360375
| 0.229923
| 0.198532
| 0.182634
| 0.119853
| 0.119853
| 0
| 0.032548
| 0.250116
| 4,302
| 137
| 80
| 31.40146
| 0.727836
| 0.039749
| 0
| 0.213592
| 0
| 0
| 0.352413
| 0.016735
| 0
| 0
| 0
| 0
| 0.135922
| 1
| 0.048544
| false
| 0
| 0.048544
| 0
| 0.097087
| 0.029126
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1607f8c0c3d6768327bf886d9e6092523f205171
| 2,778
|
py
|
Python
|
fuzzers/011-cle-ffconfig/generate.py
|
tmichalak/prjuray
|
53f3c94b58ffc6d405ac20a3b340ae726717ed47
|
[
"0BSD"
] | 39
|
2020-07-17T19:43:40.000Z
|
2022-01-07T02:05:48.000Z
|
fuzzers/011-cle-ffconfig/generate.py
|
tmichalak/prjuray
|
53f3c94b58ffc6d405ac20a3b340ae726717ed47
|
[
"0BSD"
] | 24
|
2020-07-17T20:15:54.000Z
|
2022-01-21T08:29:51.000Z
|
fuzzers/011-cle-ffconfig/generate.py
|
tmichalak/prjuray
|
53f3c94b58ffc6d405ac20a3b340ae726717ed47
|
[
"0BSD"
] | 11
|
2020-07-17T19:43:45.000Z
|
2022-02-09T08:43:23.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The Project U-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
'''
FDCE Primitive: D Flip-Flop with Clock Enable and Asynchronous Clear
FDPE Primitive: D Flip-Flop with Clock Enable and Asynchronous Preset
FDRE Primitive: D Flip-Flop with Clock Enable and Synchronous Reset
FDSE Primitive: D Flip-Flop with Clock Enable and Synchronous Set
LDCE Primitive: Transparent Data Latch with Asynchronous Clear and Gate Enable
LDPE Primitive: Transparent Data Latch with Asynchronous Preset and Gate Enable
'''
from prims import isff, isl
from utils.segmaker import Segmaker
segmk = Segmaker("design.bits", bits_per_word=16)
def loadtop():
'''
i,prim,loc,bel
0,FDPE,SLICE_X12Y100,C5FF
1,FDPE,SLICE_X15Y100,A5FF
2,FDPE_1,SLICE_X16Y100,B5FF
3,LDCE_1,SLICE_X17Y100,BFF
'''
f = open('top.txt', 'r')
f.readline()
ret = {}
for l in f:
i, prim, loc, bel, init = l.split(",")
i = int(i)
init = int(init)
ret[loc] = (i, prim, loc, bel, init)
return ret
top = loadtop()
def vs2i(s):
return {"1'b0": 0, "1'b1": 1}[s]
print("Loading tags from design.txt")
with open("design.txt", "r") as f:
for line in f:
'''
puts $fp "$type $tile $grid_x $grid_y $ff $bel_type $used $usedstr"
CLEM CLEM_X10Y137 30 13 SLICE_X13Y137/AFF REG_INIT 1 FDRE
CLEM CLEM_X10Y137 30 13 SLICE_X12Y137/D2FF FF_INIT 0
'''
line = line.split()
tile_type = line[0]
tile_name = line[1]
grid_x = line[2]
grid_y = line[3]
# Other code uses BEL name
# SLICE_X12Y137/D2FF
site_ff_name = line[4]
site, ff_name = site_ff_name.split('/')
ff_type = line[5]
used = int(line[6])
cel_prim = None
cel_name = None
if used:
cel_name = line[7]
cel_prim = line[8]
cinv = int(line[9])
init = vs2i(line[10])
# A B C D E F G H
which = ff_name[0]
# LUT6 vs LUT5 FF
is2 = '2' in ff_name
if used:
segmk.add_site_tag(site, "%s.ZINI" % ff_name, 1 ^ init)
'''
On name:
The primitives you listed have a control input to set the FF value to zero (clear/reset),
the other three primitives have a control input that sets the FF value to one.
Z => inversion
'''
segmk.add_site_tag(site, "%s.ZRST" % ff_name,
cel_prim in ('FDRE', 'FDCE', 'LDCE'))
segmk.compile()
segmk.write()
| 28.060606
| 101
| 0.596472
| 418
| 2,778
| 3.866029
| 0.435407
| 0.02599
| 0.034653
| 0.044554
| 0.246287
| 0.227723
| 0.117574
| 0.117574
| 0.117574
| 0
| 0
| 0.051151
| 0.296256
| 2,778
| 98
| 102
| 28.346939
| 0.775448
| 0.319654
| 0
| 0.046512
| 0
| 0
| 0.069904
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.046512
| 0.023256
| 0.139535
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1608246c353096fff06ae6f3c3c9e80955bceb92
| 2,697
|
py
|
Python
|
hmc/integrators/states/riemannian_leapfrog_state.py
|
JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo
|
7ee1b530db0eb536666dbc872fbf8200e53dd49b
|
[
"MIT"
] | 1
|
2021-11-23T15:40:07.000Z
|
2021-11-23T15:40:07.000Z
|
hmc/integrators/states/riemannian_leapfrog_state.py
|
JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo
|
7ee1b530db0eb536666dbc872fbf8200e53dd49b
|
[
"MIT"
] | null | null | null |
hmc/integrators/states/riemannian_leapfrog_state.py
|
JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo
|
7ee1b530db0eb536666dbc872fbf8200e53dd49b
|
[
"MIT"
] | null | null | null |
from typing import Callable
import numpy as np
from hmc.integrators.states.leapfrog_state import LeapfrogState
from hmc.integrators.fields import riemannian
from hmc.linalg import solve_psd
class RiemannianLeapfrogState(LeapfrogState):
"""The Riemannian leapfrog state uses the Fisher information matrix to provide
a position-dependent Riemannian metric. As such, computing the gradients of
the Hamiltonian requires higher derivatives of the metric, which vanish in
the Euclidean case.
"""
def __init__(self,
position: np.ndarray,
momentum: np.ndarray):
super().__init__(position, momentum)
self._jac_metric: np.ndarray
self._grad_logdet_metric: np.ndarray
@property
def requires_update(self) -> bool:
o = self.log_posterior is None or \
self.grad_log_posterior is None or \
self.metric is None or \
self.inv_metric is None or \
self.jac_metric is None or \
self.grad_logdet_metric is None
return o
@property
def jac_metric(self):
return self._jac_metric
@jac_metric.setter
def jac_metric(self, value):
self._jac_metric = value
@jac_metric.deleter
def jac_metric(self):
del self._jac_metric
@property
def grad_logdet_metric(self):
return self._grad_logdet_metric
@grad_logdet_metric.setter
def grad_logdet_metric(self, value):
self._grad_logdet_metric = value
@grad_logdet_metric.deleter
def grad_logdet_metric(self):
del self._grad_logdet_metric
def update(self, auxiliaries: Callable):
num_dims = len(self.position)
log_posterior, grad_log_posterior, metric, jac_metric = auxiliaries(self.position)
jac_metric = np.swapaxes(jac_metric, 0, -1)
inv_metric, sqrtm_metric = solve_psd(metric, return_chol=True)
grad_logdet_metric = riemannian.grad_logdet(inv_metric, jac_metric, num_dims)
self.log_posterior = log_posterior
self.grad_log_posterior = grad_log_posterior
self.metric = metric
self.sqrtm_metric = sqrtm_metric
self.inv_metric = inv_metric
self.jac_metric = jac_metric
self.grad_logdet_metric = grad_logdet_metric
self.velocity = riemannian.velocity(inv_metric, self.momentum)
self.force = riemannian.force(self.velocity, grad_log_posterior, jac_metric, grad_logdet_metric)
def clear(self):
super().clear()
del self.jac_metric
del self.grad_logdet_metric
del self.metric
del self.inv_metric
del self.logdet_metric
del self.sqrtm_metric
| 32.890244
| 104
| 0.68743
| 347
| 2,697
| 5.060519
| 0.233429
| 0.092255
| 0.136674
| 0.079727
| 0.226082
| 0.068337
| 0.041002
| 0
| 0
| 0
| 0
| 0.000985
| 0.247312
| 2,697
| 81
| 105
| 33.296296
| 0.864039
| 0.091212
| 0
| 0.112903
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16129
| false
| 0
| 0.080645
| 0.032258
| 0.306452
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
160b335422855d4c69636103d3682d2f66956533
| 821
|
py
|
Python
|
tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/html5test.py
|
google-ar/chromium
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2,151
|
2020-04-18T07:31:17.000Z
|
2022-03-31T08:39:18.000Z
|
tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/html5test.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
tools/chrome_proxy/integration_tests/chrome_proxy_pagesets/html5test.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 338
|
2020-04-18T08:03:10.000Z
|
2022-03-29T12:33:22.000Z
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from common.chrome_proxy_shared_page_state import ChromeProxySharedPageState
from telemetry.page import page as page_module
from telemetry import story
class HTML5TestPage(page_module.Page):
def __init__(self, url, page_set):
super(HTML5TestPage, self).__init__(url=url, page_set=page_set,
shared_page_state_class=ChromeProxySharedPageState)
class HTML5TestStorySet(story.StorySet):
""" Chrome proxy test page for traffic over https. """
def __init__(self):
super(HTML5TestStorySet, self).__init__()
urls_list = [
'http://html5test.com/',
]
for url in urls_list:
self.AddStory(HTML5TestPage(url, self))
| 27.366667
| 76
| 0.751523
| 109
| 821
| 5.385321
| 0.550459
| 0.035775
| 0.051107
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01462
| 0.16687
| 821
| 29
| 77
| 28.310345
| 0.843567
| 0.248477
| 0
| 0
| 0
| 0
| 0.034539
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.2
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
161021c6a14b006c767d40fee4f27d3f18827442
| 744
|
py
|
Python
|
BizPy/openpyxl/20200513/horizontal_chart.py
|
t2y/python-study
|
52a132ea600d4696164e540d8a8f8f5fc58e097a
|
[
"Apache-2.0"
] | 18
|
2016-08-15T00:24:44.000Z
|
2020-11-30T15:11:52.000Z
|
BizPy/openpyxl/20200513/horizontal_chart.py
|
t2y/python-study
|
52a132ea600d4696164e540d8a8f8f5fc58e097a
|
[
"Apache-2.0"
] | null | null | null |
BizPy/openpyxl/20200513/horizontal_chart.py
|
t2y/python-study
|
52a132ea600d4696164e540d8a8f8f5fc58e097a
|
[
"Apache-2.0"
] | 6
|
2016-09-28T10:47:03.000Z
|
2020-10-14T10:20:06.000Z
|
import pandas as pd
from openpyxl import Workbook
from openpyxl.chart import BarChart, Reference
wb = Workbook()
ws = wb.active
df = pd.read_csv('population.csv')
ws.append(df.columns.tolist())
for row in df.values:
ws.append(list(row))
row_length = 1 + len(df.values)
values = Reference(ws, min_col=2, max_col=2, min_row=1, max_row=row_length)
categories = Reference(ws, min_col=1, min_row=2, max_row=row_length)
chart = BarChart()
chart.type = 'bar'
chart.style = 11
chart.shape = 4
chart.title = '都道府県別の人口'
chart.x_axis.title = '都道府県'
chart.y_axis.title = '人口'
chart.add_data(values, titles_from_data=True)
chart.set_categories(categories)
ws.add_chart(chart, 'A9')
wb.save('population_horizontal.xlsx')
| 25.655172
| 76
| 0.72043
| 120
| 744
| 4.308333
| 0.458333
| 0.034816
| 0.069633
| 0.065764
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015773
| 0.147849
| 744
| 28
| 77
| 26.571429
| 0.799685
| 0
| 0
| 0
| 0
| 0
| 0.082402
| 0.036313
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
161068852c112b7ab6b2bbf31d699217b497ca00
| 462
|
py
|
Python
|
changes/api/serializer/models/logsource.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | 1
|
2015-11-08T13:00:44.000Z
|
2015-11-08T13:00:44.000Z
|
changes/api/serializer/models/logsource.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | null | null | null |
changes/api/serializer/models/logsource.py
|
alex/changes
|
69a17b4c639e7082a75d037384ccb68ead3a0b4b
|
[
"Apache-2.0"
] | null | null | null |
from changes.api.serializer import Serializer, register
from changes.models.log import LogSource
@register(LogSource)
class LogSourceSerializer(Serializer):
def serialize(self, instance, attrs):
return {
'id': instance.id.hex,
'job': {
'id': instance.job_id.hex,
},
'name': instance.name,
'step': instance.step,
'dateCreated': instance.date_created,
}
| 27.176471
| 55
| 0.582251
| 44
| 462
| 6.068182
| 0.568182
| 0.082397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.307359
| 462
| 16
| 56
| 28.875
| 0.834375
| 0
| 0
| 0
| 0
| 0
| 0.056277
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0.071429
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
161139c53368ea4186cb4cad223d2c35a3e06750
| 1,246
|
py
|
Python
|
examples/prostate/data_preparation/utils/nrrd_to_nifti.py
|
IsaacYangSLA/NVFlare
|
8c6582894c9a8431f64479bc9f472fefcd71e5a7
|
[
"Apache-2.0"
] | null | null | null |
examples/prostate/data_preparation/utils/nrrd_to_nifti.py
|
IsaacYangSLA/NVFlare
|
8c6582894c9a8431f64479bc9f472fefcd71e5a7
|
[
"Apache-2.0"
] | null | null | null |
examples/prostate/data_preparation/utils/nrrd_to_nifti.py
|
IsaacYangSLA/NVFlare
|
8c6582894c9a8431f64479bc9f472fefcd71e5a7
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import nibabel as nib
import nrrd
import numpy as np
parser = argparse.ArgumentParser("Convert nrrd label to nifti with reference image file for affine")
parser.add_argument("--input_path", help="Input nrrd path", type=str)
parser.add_argument("--reference_path", help="Reference image path", type=str)
parser.add_argument("--output_path", help="Output nifti path", type=str)
args = parser.parse_args()
img = nib.load(args.reference_path)
img_affine = img.affine
nrrd = nrrd.read(args.input_path)
data = np.flip(nrrd[0], axis=1)
nft_img = nib.Nifti1Image(data, img_affine)
nib.save(nft_img, args.output_path)
| 35.6
| 100
| 0.764045
| 195
| 1,246
| 4.810256
| 0.528205
| 0.063966
| 0.054371
| 0.034115
| 0.059701
| 0.059701
| 0
| 0
| 0
| 0
| 0
| 0.013966
| 0.138042
| 1,246
| 34
| 101
| 36.647059
| 0.859404
| 0.47191
| 0
| 0
| 0
| 0
| 0.243789
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.266667
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16117ea75b817e23fa127a364786f0a599ad09cc
| 1,570
|
py
|
Python
|
setup.py
|
jszakmeister/rst2ctags
|
22f4035d9ea1e43a07b91f806014d318b3dc5097
|
[
"BSD-3-Clause"
] | 23
|
2015-03-05T14:12:08.000Z
|
2022-01-08T00:21:39.000Z
|
setup.py
|
jszakmeister/rst2ctags
|
22f4035d9ea1e43a07b91f806014d318b3dc5097
|
[
"BSD-3-Clause"
] | 8
|
2015-03-05T14:15:44.000Z
|
2020-10-02T00:16:55.000Z
|
setup.py
|
jszakmeister/rst2ctags
|
22f4035d9ea1e43a07b91f806014d318b3dc5097
|
[
"BSD-3-Clause"
] | 12
|
2015-03-05T15:12:22.000Z
|
2021-11-09T21:29:55.000Z
|
from setuptools import setup
import io
import os
import re
version_re = re.compile(r'^__version__ = "([^"]*)"$')
# Find the version number.
with open('rst2ctags.py', 'r') as f:
for line in f:
line = line.rstrip()
m = version_re.match(line)
if m:
version = m.group(1)
break
else:
raise RuntimeError("Couldn't find version string in rst2ctags.py")
# Load the description.
readme_path = os.path.join(os.path.dirname(__file__), 'README.rst')
with io.open(readme_path, encoding='utf-8') as f:
long_description = f.read()
setup(
name='rst2ctags',
description='Generates ctags-compatible output for the sections of a '
'reStructuredText document.',
long_description=long_description,
license='BSD',
author='John Szakmeister',
author_email='john@szakmeister.net',
url='https://github.com/jszakmeister/rst2ctags',
version=version,
py_modules=['rst2ctags'],
zip_safe=True,
entry_points={
'console_scripts': [
'rst2ctags = rst2ctags:cli_main',
],
},
classifiers=[
'License :: OSI Approved :: BSD License',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: Text Processing',
'Topic :: Text Processing :: Indexing',
'Topic :: Utilities',
]
)
| 26.610169
| 74
| 0.610191
| 174
| 1,570
| 5.385057
| 0.597701
| 0.048026
| 0.053362
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011159
| 0.257962
| 1,570
| 58
| 75
| 27.068966
| 0.793133
| 0.029299
| 0
| 0
| 0
| 0
| 0.419461
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.085106
| 0
| 0.085106
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
161220d89127fbd24716ad1fd95c0f68eb787901
| 50,986
|
py
|
Python
|
py-ws/hardshare/cli.py
|
rerobots/hardshare
|
456e7d1d1eb21d03efc3cd1f7960a1729b62527b
|
[
"Apache-2.0"
] | 8
|
2020-04-14T17:19:57.000Z
|
2022-03-03T08:55:34.000Z
|
py-ws/hardshare/cli.py
|
rerobots/hardshare
|
456e7d1d1eb21d03efc3cd1f7960a1729b62527b
|
[
"Apache-2.0"
] | 11
|
2020-04-01T15:13:37.000Z
|
2021-06-15T22:10:31.000Z
|
py-ws/hardshare/cli.py
|
rerobots/hardshare
|
456e7d1d1eb21d03efc3cd1f7960a1729b62527b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright (C) 2018 rerobots, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line interface
"""
import argparse
import json
import logging
import logging.handlers
import os
import os.path
import subprocess
import sys
import uuid
import yaml
from aiohttp.client_exceptions import ClientConnectorError as ConnectionError
from .core import WorkspaceInstance
from .mgmt import get_local_config, add_key, add_ssh_path, list_local_keys
from .mgmt import find_wd, modify_local, rm_wd
from .api import HSAPIClient
from .err import Error as HSError
from .addons import camera_main, stop_cameras
from .addons import add_cmdsh, rm_cmdsh, add_vnc, rm_vnc, add_mistyproxy, rm_mistyproxy
def get_config_with_index(id_prefix=None):
try:
config = get_local_config()
except:
print('error loading configuration data. does it exist?')
return None, None, 1
if len(config['wdeployments']) == 0:
print(('ERROR: no workspace deployment in local configuration.'))
return config, None, 1
if isinstance(id_prefix, list):
if len(id_prefix) == 0:
if len(config['wdeployments']) > 1:
print('ERROR: ambiguous command: more than 1 workspace deployment defined.')
return config, None, 1
index = [0]
else:
indices = []
for idp in id_prefix:
index = find_wd(config, idp)
if index is None:
print('ERROR: given prefix does not match precisely 1 workspace deployment')
return config, None, 1
indices.append(index)
index = indices
elif id_prefix:
index = find_wd(config, id_prefix)
if index is None:
print('ERROR: given prefix does not match precisely 1 workspace deployment')
return config, None, 1
else:
if len(config['wdeployments']) > 1:
print('ERROR: ambiguous command: more than 1 workspace deployment defined.')
return config, None, 1
index = 0
return config, index, 0
def main(argv=None):
pkglogger = logging.getLogger('hardshare')
pkglogger.setLevel(logging.WARNING)
loghandler = logging.handlers.WatchedFileHandler(filename='hardshare_client.log', mode='a', delay=True)
loghandler.setLevel(logging.DEBUG)
loghandler.setFormatter(logging.Formatter('%(name)s.%(funcName)s (%(levelname)s) (pid: {});'
' %(asctime)s ; %(message)s'
.format(os.getpid())))
pkglogger.addHandler(loghandler)
if argv is None:
argv = sys.argv[1:]
argparser = argparse.ArgumentParser(description=('Command-line interface'
' for the hardshare client'), add_help=False)
argparser.add_argument('-h', '--help', dest='print_help',
action='store_true', default=False,
help='print this help message and exit')
argparser.add_argument('-V', '--version', action='store_true', default=False,
help='print version of hardshare (this) package.',
dest='print_version')
argparser.add_argument('-v', '--verbose', action='store_true', default=False,
help='print verbose messages about actions by the hardshare client',
dest='verbose')
argparser.add_argument('--format', metavar='FORMAT',
default=None, type=str,
help=('special output formatting (default is no special formatting); '
'options: YAML , JSON'),
dest='output_format')
subparsers = argparser.add_subparsers(dest='command')
subparsers.add_parser('version', help='print version number and exit.')
help_parser = subparsers.add_parser('help', help='print this help message and exit')
help_parser.add_argument('help_target_command', metavar='COMMAND', type=str, nargs='?')
config_commanddesc = 'manage local and remote configuration'
config_parser = subparsers.add_parser('config',
description=config_commanddesc,
help=config_commanddesc)
config_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment for configuration changes'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
config_parser.add_argument('-c', '--create', action='store_true', default=False,
dest='create_config',
help='if no local configuration is found, then create one')
config_parser.add_argument('--add-terminate-prog', metavar='PATH',
dest='add_terminate_prog', default=None,
help='add program to list of commands to execute')
config_parser.add_argument('--rm-terminate-prog', metavar='PATH',
dest='rm_terminate_prog', default=None,
help=('remove program from list of commands to execute; '
'for example, '
'copy-and-paste value shown in `hardshare config -l` here'))
config_parser.add_argument('--add-key', metavar='FILE',
dest='new_api_token',
help='add new account key')
config_parser.add_argument('--add-ssh-path', metavar='PATH',
dest='new_ssh_path',
help='add path to SSH key pair (does NOT copy the key)')
config_parser.add_argument('--add-raw-device', metavar='PATH', type=str,
dest='raw_device_path', default=None,
help='add device file to present in container')
config_parser.add_argument('--cprovider', metavar='CPROVIDER', type=str,
dest='cprovider', default=None,
help='select a container provider: docker, podman, proxy')
config_parser.add_argument('--assign-image', metavar='IMG', type=str,
dest='cprovider_img', default=None,
help='assign image for cprovider to use (advanced option)')
config_parser.add_argument('--rm-raw-device', metavar='PATH', type=str,
dest='remove_raw_device_path', default=None,
help='remove device previously marked for inclusion in container')
config_parser.add_argument('--add-init-inside', metavar='CMD', type=str,
dest='add_init_inside', default=None,
help='add command to be executed inside container')
config_parser.add_argument('--rm-init-inside', action='store_true', default=False,
dest='rm_init_inside',
help='remove (empty) list of commands for inside initialization')
config_parser.add_argument('-p', '--prune', action='store_true', default=False,
dest='prune_err_keys',
help=('delete files in local key directory that'
' are not valid; to get list of'
' files with errors, try `--list`'))
config_parser.add_argument('-l', '--list', action='store_true', default=False,
dest='list_config',
help='list configuration')
config_parser.add_argument('--local', action='store_true', default=False,
dest='only_local_config',
help='only show local configuration data')
config_parser.add_argument('--include-dissolved', action='store_true', default=False,
dest='include_dissolved',
help='include configuration data of dissolved workspace deployments')
config_parser.add_argument('--declare', metavar='ID',
dest='declared_wdeployment_id', default=None,
help=('declare that workspace deployment is'
' hosted here. (this only works if it'
' has been previously registered under'
' the same user account.)'))
rules_commanddesc = 'modify access rules (also known as capabilities or permissions)'
rules_parser = subparsers.add_parser('rules',
description=rules_commanddesc,
help=rules_commanddesc)
rules_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of target workspace deployment'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
rules_parser.add_argument('-l', '--list', action='store_true', default=False,
dest='list_rules',
help='list all rules')
rules_parser.add_argument('--permit-me', action='store_true', default=False,
dest='add_rule_permit_me',
help='permit instantiations by you (the owner)')
rules_parser.add_argument('--drop-all', action='store_true', default=False,
dest='drop_all_rules',
help=('remove all access rules; '
'note that access is denied by default, '
'including to you (the owner)'))
rules_parser.add_argument('--permit-all', action='store_true', default=False,
dest='add_rule_permit_all',
help='permit instantiations by anyone')
register_commanddesc = 'register new workspace deployment'
register_parser = subparsers.add_parser('register',
description=register_commanddesc,
help=register_commanddesc)
register_parser.add_argument('--permit-more', action='store_false', default=True,
dest='register_at_most_one',
help=('permit registration of more than 1 wdeployment; '
'default is to fail if local configuration already '
'has wdeployment declared'))
check_commanddesc = 'check registration of this workspace deployment'
check_parser = subparsers.add_parser('check',
description=check_commanddesc,
help=check_commanddesc)
check_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment to check'
' (can be unique prefix)'))
dissolve_commanddesc = ('dissolve this workspace deployment, making it'
' unavailable for any future use'
' (THIS CANNOT BE UNDONE)')
dissolve_parser = subparsers.add_parser('dissolve',
description=dissolve_commanddesc,
help=dissolve_commanddesc)
dissolve_parser.add_argument('wdid', metavar='ID', nargs='?', default=None,
help='id of workspace deployment to dissolve')
status_commanddesc = 'get status of local instances and daemon'
status_parser = subparsers.add_parser('status',
description=status_commanddesc,
help=status_commanddesc)
status_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of target workspace deployment'
' (can be unique prefix)'))
advertise_commanddesc = 'advertise availability, accept new instances'
advertise_parser = subparsers.add_parser('ad',
description=advertise_commanddesc,
help=advertise_commanddesc)
advertise_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment to advertise'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
advertise_parser.add_argument('-d', '--daemon', action='store_true', default=False,
help='detach from invoking terminal (i.e., run as daemon)',
dest='become_daemon')
attach_camera_commanddesc = 'attach camera stream to workspace deployments'
attach_camera_parser = subparsers.add_parser('attach-camera',
description=attach_camera_commanddesc,
help=attach_camera_commanddesc)
attach_camera_parser.add_argument('camera', default=0,
type=int,
help=('on Linux, 0 typically implies /dev/video0; '
'if you only have one camera, then try 0'))
attach_camera_parser.add_argument('id_prefix', metavar='ID', nargs='*', default=None,
help=('id of workspace deployment on which to attach'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
attach_camera_parser.add_argument('--width-height', metavar='W,H', type=str,
dest='attach_camera_res', default=None,
help=('width and height of captured images; '
'default depends on the supporting drivers'))
attach_camera_parser.add_argument('--crop', metavar='CROPCONFIG', type=str,
dest='attach_camera_crop_config', default=None,
help=('image crop configuration; '
'default: all wdeployments get full images'))
attach_camera_parser.add_argument('-d', '--daemon', action='store_true', default=False,
help='detach from invoking terminal (i.e., run as daemon)',
dest='become_daemon')
stop_cameras_commanddesc = 'stop camera streams previously started by attach-camera'
stop_cameras_parser = subparsers.add_parser('stop-cameras',
description=stop_cameras_commanddesc,
help=stop_cameras_commanddesc)
stop_cameras_parser.add_argument('-a', '--all', action='store_true', default=False,
help=('stop all attached cameras associated with this '
'user account, whether or not started on this host'),
dest='all_cameras')
addon_cmdsh_commanddesc = 'manage add-on cmdsh for your workspace deployments'
addon_cmdsh_parser = subparsers.add_parser('addon-cmdsh',
description=addon_cmdsh_commanddesc,
help=addon_cmdsh_commanddesc)
addon_cmdsh_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
addon_cmdsh_parser.add_argument('--add', action='store_true', default=False,
help='add add-on cmdsh to enable terminal access via WebSockets',
dest='add_addon_cmdsh')
addon_cmdsh_parser.add_argument('--rm', action='store_true', default=False,
help='remove add-on cmdsh',
dest='rm_addon_cmdsh')
addon_vnc_commanddesc = 'manage add-on vnc for your workspace deployments'
addon_vnc_parser = subparsers.add_parser('addon-vnc',
description=addon_vnc_commanddesc,
help=addon_vnc_commanddesc)
addon_vnc_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
addon_vnc_parser.add_argument('--add', action='store_true', default=False,
help='add add-on vnc to enable VNC via rerobots.net',
dest='add_addon_vnc')
addon_vnc_parser.add_argument('--rm', action='store_true', default=False,
help='remove add-on vnc',
dest='rm_addon_vnc')
addon_mistyproxy_commanddesc = 'manage add-on mistyproxy for your workspace deployments'
addon_mistyproxy_parser = subparsers.add_parser('addon-mistyproxy',
description=addon_mistyproxy_commanddesc,
help=addon_mistyproxy_commanddesc)
addon_mistyproxy_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of workspace deployment'
' (can be unique prefix); '
'this argument is not required '
'if there is only 1 workspace deployment'))
addon_mistyproxy_parser.add_argument('--add', action='store_true', default=False,
help='add add-on mistyproxy to allow HTTP proxy to Misty robots',
dest='add_addon_mistyproxy')
addon_mistyproxy_parser.add_argument('--ip', metavar='ADDRESS', default=None,
help='IP address of the Misty robot',
dest='targetaddr')
addon_mistyproxy_parser.add_argument('--rm', action='store_true', default=False,
help='remove add-on mistyproxy',
dest='rm_addon_mistyproxy')
terminate_commanddesc = 'mark as unavailable; optionally wait for current instance to finish'
terminate_parser = subparsers.add_parser('stop-ad',
description=terminate_commanddesc,
help=terminate_commanddesc)
terminate_parser.add_argument('id_prefix', metavar='ID', nargs='?', default=None,
help=('id of target workspace deployment'
' (can be unique prefix)'))
terminate_parser.add_argument('-f', '--force', action='store_true', default=False,
help=('if there is an active instance, then'
' stop it without waiting'),
dest='force_terminate')
help_message_purge = ('if the server indicates that an instance is active,'
' but there is not one or it is otherwise in a'
' non-recoverable state, then mark it remotely as'
' terminated and attempt local clean-up; this'
' command is a last resort. First, try `hardshare'
' terminate` without --purge.')
terminate_parser.add_argument('--purge', action='store_true', default=False,
help=help_message_purge,
dest='purge_supposed_instance')
argv_parsed = argparser.parse_args(argv)
if argv_parsed.print_version or argv_parsed.command == 'version':
from . import __version__ as hardshare_pkg_version
print(hardshare_pkg_version)
return 0
elif argv_parsed.command is None or argv_parsed.command == 'help':
if hasattr(argv_parsed, 'help_target_command') and argv_parsed.help_target_command is not None:
if argv_parsed.help_target_command == 'config':
config_parser.print_help()
elif argv_parsed.help_target_command == 'rules':
rules_parser.print_help()
elif argv_parsed.help_target_command == 'register':
register_parser.print_help()
elif argv_parsed.help_target_command == 'check':
check_parser.print_help()
elif argv_parsed.help_target_command == 'dissolve':
dissolve_parser.print_help()
elif argv_parsed.help_target_command == 'status':
status_parser.print_help()
elif argv_parsed.help_target_command == 'attach-camera':
attach_camera_parser.print_help()
elif argv_parsed.help_target_command == 'stop-cameras':
stop_cameras_parser.print_help()
elif argv_parsed.help_target_command == 'addon-cmdsh':
addon_cmdsh_parser.print_help()
elif argv_parsed.help_target_command == 'addon-vnc':
addon_vnc_parser.print_help()
elif argv_parsed.help_target_command == 'addon-mistyproxy':
addon_mistyproxy_parser.print_help()
elif argv_parsed.help_target_command == 'ad':
advertise_parser.print_help()
elif argv_parsed.help_target_command == 'stop-ad':
terminate_parser.print_help()
else:
argparser.print_help()
else:
argparser.print_help()
return 0
if argv_parsed.verbose:
pkglogger.setLevel(logging.DEBUG)
if argv_parsed.output_format is not None:
output_format = argv_parsed.output_format.lower()
if output_format not in ['yaml', 'json']:
print('output format unrecognized: {}'.format(argv_parsed.output_format))
return 1
else:
output_format = None
try:
ac = HSAPIClient()
except:
ac = None
if argv_parsed.command == 'status':
try:
config = get_local_config()
except:
print('error loading configuration data. does it exist?')
return 1
if argv_parsed.id_prefix is None:
if len(config['wdeployments']) == 0:
findings = [WorkspaceInstance.inspect_instance()]
else:
findings = []
for wd in config['wdeployments']:
findings.append(WorkspaceInstance.inspect_instance(wdeployment=wd))
else:
findings = []
for m in find_wd(config, argv_parsed.id_prefix, one_or_none=False):
findings.append(WorkspaceInstance.inspect_instance(wdeployment=config['wdeployments'][m]))
if output_format == 'json':
print(json.dumps(findings))
else: # output_format == 'yaml'
print(yaml.dump(findings, default_flow_style=False))
elif argv_parsed.command == 'attach-camera':
config, indices, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
wdeployments = [config['wdeployments'][jj]['id'] for jj in indices]
local_keys = list_local_keys()
if len(local_keys) < 1:
print('No valid keys available. Check: `hardshare config -l`')
return 1
with open(local_keys[0], 'rt') as fp:
tok = fp.read().strip()
if argv_parsed.attach_camera_res:
width, height = [int(x) for x in argv_parsed.attach_camera_res.split(',')]
if width < 1 or height < 1:
print('Width, height must be positive')
return 1
else:
width, height = None, None
if argv_parsed.attach_camera_crop_config:
crop = json.loads(argv_parsed.attach_camera_crop_config)
else:
crop = None
if argv_parsed.become_daemon:
if os.fork() != 0:
return 0
os.close(0)
os.close(1)
os.close(2)
try:
camera_main(wdeployments, tok=tok, dev=argv_parsed.camera, width=width, height=height, crop=crop)
except ConnectionError:
if not argv_parsed.become_daemon:
print('ERROR: failed to reach server. Are you connected to the Internet?')
return 1
elif argv_parsed.command == 'stop-cameras':
local_keys = list_local_keys()
if len(local_keys) < 1:
print('No valid keys available. Check: `hardshare config -l`')
return 1
with open(local_keys[0], 'rt') as fp:
tok = fp.read().strip()
try:
stop_cameras(tok, allcam=argv_parsed.all_cameras)
except ConnectionError:
print('ERROR: failed to reach server. Are you connected to the Internet?')
return 1
elif argv_parsed.command == 'addon-cmdsh':
if ac is None:
print('cannot register without initial local configuration.'
' (try `hardshare config --create`)')
return 1
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
wdeployment_id = config['wdeployments'][index]['id']
local_keys = list_local_keys()
if len(local_keys) < 1:
print('No valid keys available. Check: `hardshare config -l`')
return 1
with open(local_keys[0], 'rt') as fp:
tok = fp.read().strip()
try:
if argv_parsed.add_addon_cmdsh:
add_cmdsh(wdeployment_id, tok)
elif argv_parsed.rm_addon_cmdsh:
rm_cmdsh(wdeployment_id, tok)
else:
print('Use `hardshare addon-cmdsh` with a switch.')
print('To get a help message, enter\n\n hardshare help addon-cmdsh')
return 1
except ValueError as err:
print('ERROR: {}'.format(err))
return 1
elif argv_parsed.command == 'addon-vnc':
if ac is None:
print('cannot register without initial local configuration.'
' (try `hardshare config --create`)')
return 1
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
wdeployment_id = config['wdeployments'][index]['id']
local_keys = list_local_keys()
if len(local_keys) < 1:
print('No valid keys available. Check: `hardshare config -l`')
return 1
with open(local_keys[0], 'rt') as fp:
tok = fp.read().strip()
try:
if argv_parsed.add_addon_vnc:
add_vnc(wdeployment_id, tok)
elif argv_parsed.rm_addon_vnc:
rm_vnc(wdeployment_id, tok)
else:
print('Use `hardshare addon-vnc` with a switch.')
print('To get a help message, enter\n\n hardshare help addon-vnc')
return 1
except ValueError as err:
print('ERROR: {}'.format(err))
return 1
elif argv_parsed.command == 'addon-mistyproxy':
if ac is None:
print('cannot register without initial local configuration.'
' (try `hardshare config --create`)')
return 1
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
wdeployment_id = config['wdeployments'][index]['id']
local_keys = list_local_keys()
if len(local_keys) < 1:
print('No valid keys available. Check: `hardshare config -l`')
return 1
with open(local_keys[0], 'rt') as fp:
tok = fp.read().strip()
try:
if argv_parsed.add_addon_mistyproxy:
if argv_parsed.targetaddr is None:
print('--ip is required with --add')
return 1
add_mistyproxy(wdeployment_id, tok, argv_parsed.targetaddr)
elif argv_parsed.rm_addon_mistyproxy:
rm_mistyproxy(wdeployment_id, tok)
else:
print('Use `hardshare addon-mistyproxy` with a switch.')
print('To get a help message, enter\n\n hardshare help addon-mistyproxy')
return 1
except ValueError as err:
print('ERROR: {}'.format(err))
return 1
elif argv_parsed.command == 'ad':
if ac is None:
print('cannot register without initial local configuration.'
' (try `hardshare config --create`)')
return 1
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
if 'ssh_key' not in config or config['ssh_key'] is None:
print('WARNING: local configuration does not declare SSH key.\n'
'Instances with connection type sshtun cannot launch.')
pkglogger.removeHandler(loghandler)
if argv_parsed.become_daemon:
if os.fork() != 0:
return 0
os.close(0)
os.close(1)
os.close(2)
else:
pkglogger.addHandler(logging.StreamHandler())
logfname = 'hardshare_client.{}.log'.format(config['wdeployments'][index]['id'])
loghandler = logging.FileHandler(filename=logfname, mode='a', delay=True)
loghandler.setLevel(logging.DEBUG)
loghandler.setFormatter(logging.Formatter('%(name)s.%(funcName)s (%(levelname)s) (pid: {});'
' %(asctime)s ; %(message)s'
.format(os.getpid())))
pkglogger.addHandler(loghandler)
return ac.run_sync(config['wdeployments'][index]['id'])
elif argv_parsed.command == 'stop-ad':
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
if argv_parsed.purge_supposed_instance:
cprovider = config['wdeployments'][index]['cprovider']
if cprovider == 'proxy':
print('--purge not supported for cprovider `proxy`')
return 1
elif cprovider not in ['docker', 'podman']:
print('unknown cprovider: {}'.format(cprovider))
return 1
findings = WorkspaceInstance.inspect_instance(wdeployment=config['wdeployments'][index])
if 'container' in findings:
try:
subprocess.check_call([cprovider, 'rm', '-f',
findings['container']['name']],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
except:
print('failed to stop container `{}`'.format(findings['container']['name']))
return 1
return 0
else:
print('failed to detect local instance')
return 1
else:
if ac is None:
print('cannot terminate without valid API client')
return 1
try:
ac.terminate(config['wdeployments'][index]['id'])
except FileNotFoundError:
print('ERROR: cannot reach daemon. Does it exist? (Try `hardshare status`)')
return 1
return 0
elif argv_parsed.command == 'register':
if ac is None:
print('cannot register without initial local configuration.'
' (try `hardshare config --create`)')
return 1
try:
print(ac.register_new(at_most_one=argv_parsed.register_at_most_one))
except HSError as err:
print('ERROR: {}'.format(err))
return 1
except ConnectionError:
print('ERROR: failed to reach server. Are you connected to the Internet?')
return 1
elif argv_parsed.command == 'rules':
if ac is None:
print('no local configuration found. (try `hardshare config -h`)')
return 1
if argv_parsed.id_prefix is None:
wdid = None
else:
try:
wdid = str(uuid.UUID(argv_parsed.id_prefix))
except:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
print('The given ID does not appear to be valid.')
return 1
wdid = config['wdeployments'][index]['id']
if argv_parsed.list_rules:
try:
res = ac.get_access_rules(wdid)
except Exception as err:
print('{}'.format(err))
return 1
if 'err' in res:
if res['err'] == 'wrong authorization token':
print('wrong API token. Did it expire?')
else:
print(res['err'])
return 1
res['comments'] = [
'Access is denied unless a rule explicitly permits it.',
]
if output_format == 'json':
print(json.dumps(res))
else: # output_format == 'yaml'
print(yaml.dump(res, default_flow_style=False))
elif argv_parsed.drop_all_rules or argv_parsed.add_rule_permit_me:
try:
if argv_parsed.drop_all_rules:
ac.drop_access_rules(wdid)
elif argv_parsed.add_rule_permit_me:
ac.add_access_rule(wdid)
except Exception as err:
print('{}'.format(err))
return 1
elif argv_parsed.add_rule_permit_all:
ui_input = None
while ui_input not in ('y', 'yes'):
print('Do you want to permit access by anyone? [y/N] ', end='')
ui_input = input().lower()
if ui_input in ('n', 'no', ''):
return 1
try:
ac.add_access_rule(wdid, to_user='*')
except Exception as err:
print('{}'.format(err))
return 1
else:
print('Use `hardshare rules` with a switch. For example, `hardshare rules -l`')
print('or to get a help message, enter\n\n hardshare help rules')
return 1
elif argv_parsed.command == 'check':
if ac is None:
print('no local configuration found. (try `hardshare config -h`)')
return 1
try:
res = ac.check_registration(argv_parsed.id_prefix)
except:
print('Error occurred while contacting remote server '
'at {}'.format(ac.base_uri))
return 1
if 'err' in res:
if res['err'] == 'not found':
print('not found: workspace deployment with id prefix {}'
.format(res['id_prefix']))
elif res['err'] == 'wrong authorization token':
print('wrong API token. Did it expire?')
else:
print(res['err'])
return 1
else:
print('summary of workspace deployment {}'.format(res['id']))
print('\tcreated: {}'.format(res['date_created']))
print('\torigin (address) of registration: {}'.format(res['origin']))
if 'date_dissolved' in res:
print('\tdissolved: {}'.format(res['date_dissolved']))
elif argv_parsed.command == 'dissolve':
if ac is None:
print('no local configuration found. (try `hardshare config -h`)')
return 1
try:
wdid = str(uuid.UUID(argv_parsed.wdid))
except:
print('The given ID does not appear to be valid.')
return 1
ui_input = None
while ui_input not in ('y', 'yes'):
print(('Do you want to dissolve {}? This action cannot be undone. '
'[y/N] ').format(wdid), end='')
ui_input = input().lower()
if ui_input in ('n', 'no', ''):
return 1
try:
res = ac.dissolve_registration(wdid)
except:
print('Error occurred while contacting remote server '
'at {}'.format(ac.base_uri))
return 1
if 'err' in res:
if res['err'] == 'not found':
print('not found: workspace deployment with id prefix {}'
.format(res['id_prefix']))
elif res['err'] == 'wrong authorization token':
print('wrong API token. Did it expire?')
else:
print(res['err'])
return 1
# Remove from local configuration, if present
rm_wd(get_local_config(), wdid, save=True)
elif argv_parsed.command == 'config':
if argv_parsed.list_config:
try:
config = get_local_config(create_if_empty=argv_parsed.create_config,
collect_errors=True)
except:
print('error loading configuration data.'
' does it exist? is it broken?')
return 1
if not argv_parsed.only_local_config:
# Try to get remote config, given possibly new local config
try:
assert ac is not None
remote_config = ac.get_remote_config(include_dissolved=argv_parsed.include_dissolved)
except HSError as err:
print('Error: {}'.format(err))
return 1
except:
print('Error occurred while contacting rerobots servers')
print('Try config -l --local to only get local information')
return 1
config = {
'local': config,
'remote': remote_config,
}
if 'local' in config:
ref = config['local']['wdeployments']
else:
ref = config['wdeployments']
for jj, wdeployment in enumerate(ref):
ref[jj]['url'] = 'https://rerobots.net/workspace/{}'.format(wdeployment['id'])
if output_format == 'json':
print(json.dumps(config))
elif output_format == 'yaml':
print(yaml.dump(config, default_flow_style=False))
else:
if 'local' not in config:
config = {
'local': config,
'remote': None,
}
print('workspace deployments defined in local configuration:')
if len(config['local']['wdeployments']) == 0:
print('\t(none)')
else:
for wdeployment in config['local']['wdeployments']:
print('{}\n\turl: {}\n\towner: {}\n\tcprovider: {}\n\tcargs: {}'.format(
wdeployment['id'],
wdeployment['url'],
wdeployment['owner'],
wdeployment['cprovider'],
wdeployment['cargs'],
))
if wdeployment['cprovider'] in ['docker', 'podman']:
print('\timg: {}'.format(wdeployment['image']))
if wdeployment['terminate']:
print('\tterminate:')
for terminate_p in wdeployment['terminate']:
print('\t\t{}'.format(terminate_p))
print('\nfound keys:')
if len(config['local']['keys']) == 0:
print('\t(none)')
else:
print('\t' + '\n\t'.join(config['local']['keys']))
if 'err_keys' in config['local'] and len(config['local']['err_keys']) > 0:
print('found possible keys with errors:')
for err_key_path, err in config['local']['err_keys'].items():
print('\t {}: {}'.format(err, err_key_path))
if config['remote']:
if 'err' in config['remote']:
print('Error occurred while contacting remote server.')
if config['remote']['err'] == 'wrong authorization token':
print('wrong API token. Did it expire?')
else:
print(config['remote']['err'])
return 1
if len(config['remote']['deployments']) == 0:
print('\nno registered workspace deployments with this user account')
else:
print('\nregistered workspace deployments with this user account:')
for wd in config['remote']['deployments']:
print('{}'.format(wd['id']))
print('\tcreated: {}'.format(wd['date_created']))
if wd['desc'] is not None:
print('\tdesc: {}'.format(wd['desc']))
print('\torigin (address) of registration: {}'
.format(wd['origin']))
if wd['dissolved']:
print('\tdissolved: {}'.format(wd['dissolved']))
elif argv_parsed.prune_err_keys:
_, errored_keys = list_local_keys(collect_errors=True)
for err_key_path, err in errored_keys.items():
print('deleting {}...'.format(err_key_path))
os.unlink(err_key_path)
elif argv_parsed.new_api_token:
try:
add_key(argv_parsed.new_api_token)
except:
print('failed to add key')
return 1
elif argv_parsed.new_ssh_path:
try:
add_ssh_path(argv_parsed.new_ssh_path)
except:
print('ERROR: {} or {} does not exist or '
'has the wrong permissions.'.format(
argv_parsed.new_ssh_path,
argv_parsed.new_ssh_path + '.pub'
))
return 1
elif argv_parsed.create_config:
get_local_config(create_if_empty=True)
elif argv_parsed.declared_wdeployment_id is not None:
assert ac is not None
ac.declare_existing(argv_parsed.declared_wdeployment_id)
ac.sync_config()
elif argv_parsed.raw_device_path is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
cprovider = config['wdeployments'][index]['cprovider']
if cprovider == 'proxy':
print('--add-raw-device not supported for cprovider `proxy`')
return 1
elif cprovider not in ['docker', 'podman']:
print('unknown cprovider: {}'.format(cprovider))
return 1
if not os.path.exists(argv_parsed.raw_device_path):
print('ERROR: given device file does not exist')
return 1
carg = '--device={D}:{D}'.format(D=argv_parsed.raw_device_path)
config['wdeployments'][index]['cargs'].append(carg)
modify_local(config)
elif argv_parsed.remove_raw_device_path is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
carg = '--device={D}:{D}'.format(D=argv_parsed.remove_raw_device_path)
config['wdeployments'][index]['cargs'].remove(carg)
modify_local(config)
elif argv_parsed.add_init_inside is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
cprovider = config['wdeployments'][index]['cprovider']
if cprovider == 'proxy':
print('--add-init-inside not supported for cprovider `proxy`')
return 1
elif cprovider not in ['docker', 'podman']:
print('unknown cprovider: {}'.format(cprovider))
return 1
config['wdeployments'][index]['init_inside'].append(argv_parsed.add_init_inside)
modify_local(config)
elif argv_parsed.rm_init_inside:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
cprovider = config['wdeployments'][index]['cprovider']
if cprovider == 'proxy':
print('--rm-init-inside not supported for cprovider `proxy`')
return 1
elif cprovider not in ['docker', 'podman']:
print('unknown cprovider: {}'.format(cprovider))
return 1
config['wdeployments'][index]['init_inside'] = []
modify_local(config)
elif argv_parsed.cprovider is not None:
selected_cprovider = argv_parsed.cprovider.lower()
if selected_cprovider not in ['docker', 'podman', 'proxy']:
print('ERROR: cprovider must be one of the following: docker, podman, proxy')
return 1
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
config['wdeployments'][index]['cprovider'] = selected_cprovider
if selected_cprovider == 'proxy':
config['wdeployments'][index]['image'] = None
else: # selected_cprovider \in {docker, podman}
if config['wdeployments'][index]['image'] is None:
config['wdeployments'][index]['image'] = 'rerobots/hs-generic'
modify_local(config)
elif argv_parsed.cprovider_img is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
cprovider = config['wdeployments'][index]['cprovider']
if cprovider not in ['docker', 'podman', 'proxy']:
print('unknown cprovider: {}'.format(cprovider))
return 1
if cprovider == 'podman':
cp_images = subprocess.run([cprovider, 'image', 'exists', argv_parsed.cprovider_img])
if cp_images.returncode != 0:
print('ERROR: given image name is not recognized by cprovider')
return 1
elif cprovider == 'docker':
cp_images = subprocess.run([cprovider, 'image', 'inspect', argv_parsed.cprovider_img],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
if cp_images.returncode != 0:
print('ERROR: given image name is not recognized by cprovider')
return 1
else: # cprovider == 'proxy'
print('ERROR: --assign-image not supported for cprovider `proxy`')
return 1
config['wdeployments'][index]['image'] = argv_parsed.cprovider_img
modify_local(config)
elif argv_parsed.add_terminate_prog is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
normalized_path = os.path.abspath(argv_parsed.add_terminate_prog)
if not os.path.exists(normalized_path):
print('ERROR: given path does not exist')
return 1
config['wdeployments'][index]['terminate'].append(normalized_path)
modify_local(config)
elif argv_parsed.rm_terminate_prog is not None:
config, index, rc = get_config_with_index(argv_parsed.id_prefix)
if rc != 0:
return rc
config['wdeployments'][index]['terminate'].remove(argv_parsed.rm_terminate_prog)
modify_local(config)
else:
print('Use `hardshare config` with a switch. For example, `hardshare config -l`')
print('or to get a help message, enter\n\n hardshare help config')
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 47.784442
| 109
| 0.530459
| 5,236
| 50,986
| 4.998472
| 0.099694
| 0.04394
| 0.031178
| 0.020174
| 0.544551
| 0.473254
| 0.417737
| 0.373338
| 0.358933
| 0.3349
| 0
| 0.004822
| 0.373573
| 50,986
| 1,066
| 110
| 47.829268
| 0.814615
| 0.015906
| 0
| 0.438634
| 0
| 0.001067
| 0.244312
| 0.003151
| 0
| 0
| 0
| 0
| 0.002134
| 1
| 0.002134
| false
| 0
| 0.020277
| 0
| 0.123799
| 0.149413
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1612e716ac963ff1c93e60be69cd7a089a9ba5ac
| 3,870
|
py
|
Python
|
app/realty.py
|
JenBanks8585/Labs_CitySpireDS
|
4755bd5ce718ee2f65f6a53a5918bd0cf18b2ddf
|
[
"MIT"
] | null | null | null |
app/realty.py
|
JenBanks8585/Labs_CitySpireDS
|
4755bd5ce718ee2f65f6a53a5918bd0cf18b2ddf
|
[
"MIT"
] | null | null | null |
app/realty.py
|
JenBanks8585/Labs_CitySpireDS
|
4755bd5ce718ee2f65f6a53a5918bd0cf18b2ddf
|
[
"MIT"
] | null | null | null |
"""Realty Info"""
import os
import requests
from dotenv import load_dotenv
from fastapi import APIRouter, Depends
import sqlalchemy
from pydantic import BaseModel, SecretStr
from app import config
from app.walk_score import *
load_dotenv()
router = APIRouter()
headers = {'x-rapidapi-key': os.getenv('api_key'),
'x-rapidapi-host': os.getenv('host') }
@router.get('/streamlined_rent_list')
async def streamlined_rent_list(api_key = config.settings.api_key,
city: str = "New York City",
state: str= "NY",
prop_type: str = "condo",
limit: int = 4):
"""
Parameters:
api_key
city: str
state: str
prop_type: str ('condo', 'single_family', 'multi_family')
limit: int number of results to populate
Returns:
information about properties for rent
"""
url = os.getenv('url_list_for_rent')
querystring = {"city": city,
"state_code": state,
"limit": limit,
"offset": "0",
"sort":"relevance",
"prop_type": prop_type}
response_for_rent = requests.request("GET", url, params = querystring, headers = headers,)
response = response_for_rent.json()['properties']
rental_list = []
for i in range(limit):
line = response[i]['address']['line']
city = response[i]['address']['city']
state = response[i]['address']['state']
lat = response[i]['address']['lat']
lon = response[i]['address']['lon']
photos = response[i]['photos']
address = line +" "+ city + " "+ state
walk_score = just_walk_score(address, lat, lon)
element = {'address': address,
'lat': lat,
'lon': lon,
'city':city,
'state':state,
'photos': photos,
'walk_score': walk_score}
rental_list.append(element)
return rental_list
@router.get('/for_rent_list')
async def for_rent_list(api_key = config.settings.api_key,
city: str = "New York City",
state: str= "NY",
prop_type: str = "condo",
limit: int = 4):
"""
Parameters:
api_key
city: str
state: str
prop_type: str ('condo', 'single_family', 'multi_family')
limit: int number of results to populate
Returns:
information about properties for rent
"""
url = os.getenv('url_list_for_rent')
querystring = {"city": city,
"state_code": state,
"limit": limit,
"offset": "0",
"sort":"relevance",
"prop_type": prop_type}
response_for_rent = requests.request("GET", url, params = querystring, headers = headers,)
return response_for_rent.json()['properties']
@router.get('/for_rent_list/{property_id}')
async def property_detail(property_id: str = "O3599084026"):
"""
Parameters:
property_id
Returns:
detailed information about the property
"""
url = os.getenv('url_property_detail')
querystring = {"property_id":property_id}
response_prop_detail = requests.request("GET", url, headers=headers, params=querystring)
return response_prop_detail.json()['properties']
@router.get('/for_sale_list')
async def for_sale_list(api_key = config.settings.api_key,
city = "New York City",
state= "NY",
limit = 4):
url = os.getenv('url_list_for_sale')
querystring = {"city": city ,"limit": limit,"offset":"0","state_code": state,"sort":"relevance"}
response_for_sale = requests.request("GET", url, headers=headers, params=querystring)
return response_for_sale.json()['properties']
| 28.880597
| 100
| 0.575969
| 432
| 3,870
| 4.969907
| 0.210648
| 0.035864
| 0.023288
| 0.02422
| 0.518398
| 0.460643
| 0.450862
| 0.450862
| 0.435026
| 0.435026
| 0
| 0.005865
| 0.29509
| 3,870
| 133
| 101
| 29.097744
| 0.781158
| 0.002842
| 0
| 0.315789
| 0
| 0
| 0.170951
| 0.015485
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.105263
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16156ec4833837e6239f5128828011fb974363b0
| 5,868
|
py
|
Python
|
fast_lemon_api_test.py
|
a6502/fast_lemon_api
|
09a5b6eec3e84d1d006f927e502a7071a28739cc
|
[
"Unlicense"
] | null | null | null |
fast_lemon_api_test.py
|
a6502/fast_lemon_api
|
09a5b6eec3e84d1d006f927e502a7071a28739cc
|
[
"Unlicense"
] | null | null | null |
fast_lemon_api_test.py
|
a6502/fast_lemon_api
|
09a5b6eec3e84d1d006f927e502a7071a28739cc
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env pytest-3
from fastapi.testclient import TestClient
from fast_lemon_api import app
client = TestClient(app)
def test_get_root():
response = client.get("/")
assert response.status_code == 200
assert response.text == "Welcome to the fast-lemon-api!\n"
neworder = {
"isin": "blablablabla",
"limit_price": 0.2,
"side": "buy",
"quantity": 1,
"valid_until": 1996943663,
"status": "open"
}
order_id = None
def test_post_orders1():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": 0.2,
"side": "buy",
"quantity": 1,
"valid_until": 1996943663,
})
assert response.status_code == 201
j = response.json()
#print(repr(j))
order_id = j.pop('uuid')
assert j == neworder
#assert 0
def test_post_orders2():
response = client.post('/orders/',
json={
"isin": "blablabla",
"limit_price": 0.2,
"side": "buy",
"quantity": 1,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'loc': ['body', 'isin'],
'msg': 'ensure this value has at least 12 characters',
'type': 'value_error.any_str.min_length',
'ctx': {
'limit_value': 12
}
}]
}
def test_post_orders3():
response = client.post('/orders/',
json={
"isin": "blablablablabla",
"limit_price": 0.2,
"side": "buy",
"quantity": 1,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'ctx': {
'limit_value': 12
},
'loc': ['body', 'isin'],
'msg': 'ensure this value has at most 12 characters',
'type': 'value_error.any_str.max_length'
}]
}
def test_post_orders4():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": -1,
"side": "buy",
"quantity": 1,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'ctx': {
'limit_value': 0
},
'loc': ['body', 'limit_price'],
'msg': 'ensure this value is greater than 0',
'type': 'value_error.number.not_gt'
}]
}
def test_post_orders5():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": 0.2,
"side": "BUY!",
"quantity": 1,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'ctx': {
'enum_values': ['buy', 'sell']
},
'loc': ['body', 'side'],
'msg':
"value is not a valid enumeration member; permitted: 'buy', 'sell'",
'type': 'type_error.enum'
}]
}
def test_post_orders6():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": 0.33333,
"side": "SELL",
"quantity": 0,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'ctx': {
'limit_value': 0
},
'loc': ['body', 'quantity'],
'msg': 'ensure this value is greater than 0',
'type': 'value_error.number.not_gt'
}]
}
def test_post_orders8():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": 0.2,
"side": "SELL",
"quantity": 1.1,
"valid_until": 1996950863
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'loc': ['body', 'quantity'],
'msg': 'value is not a valid integer',
'type': 'type_error.integer'
}]
}
def test_post_orders7():
response = client.post('/orders/',
json={
"isin": "blablablabla",
"limit_price": 0.2,
"side": "SELL",
"quantity": 2,
"valid_until": 1996
})
assert response.status_code == 422
assert response.json() == {
'detail': [{
'loc': ['body', 'valid_until'],
'msg': 'valid_until cannot be in the past',
'type': 'value_error'
}]
}
| 30.5625
| 80
| 0.387014
| 461
| 5,868
| 4.774403
| 0.234273
| 0.108133
| 0.081781
| 0.098137
| 0.669241
| 0.669241
| 0.622899
| 0.593821
| 0.593821
| 0.541572
| 0
| 0.054395
| 0.486196
| 5,868
| 191
| 81
| 30.722513
| 0.675622
| 0.007498
| 0
| 0.640244
| 0
| 0
| 0.222642
| 0.018897
| 0
| 0
| 0
| 0
| 0.109756
| 1
| 0.054878
| false
| 0
| 0.012195
| 0
| 0.067073
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1616161b4c2c7495b51d0bf323d5ee79ad27b64f
| 4,999
|
py
|
Python
|
tests/regenerate_credentials.py
|
andrewkozlik/pam-u2f
|
5b504783c9af972c790bdcb506867bad7df5e333
|
[
"BSD-2-Clause"
] | null | null | null |
tests/regenerate_credentials.py
|
andrewkozlik/pam-u2f
|
5b504783c9af972c790bdcb506867bad7df5e333
|
[
"BSD-2-Clause"
] | null | null | null |
tests/regenerate_credentials.py
|
andrewkozlik/pam-u2f
|
5b504783c9af972c790bdcb506867bad7df5e333
|
[
"BSD-2-Clause"
] | null | null | null |
#!/bin/python2
import collections
import re
import subprocess
import sys
PUC = "../pamu2fcfg/pamu2fcfg"
resident = ["", "-r"]
presence = ["", "-P"]
pin = ["", "-N"]
verification = ["", "-V"]
Credential = collections.namedtuple("Credential", "keyhandle pubkey attributes oldformat")
sshformat = 0
def print_test_case(filename, sshformat, credentials):
start = """
cfg.auth_file = "{authfile}";
cfg.sshformat = {ssh};
rc = get_devices_from_authfile(&cfg, username, dev, &n_devs);
assert(rc == 1);
assert(n_devs == {devices});
"""
checks = """
assert(strcmp(dev[{i}].coseType, "es256") == 0);
assert(strcmp(dev[{i}].keyHandle, "{kh}") == 0);
assert(strcmp(dev[{i}].publicKey, "{pk}") == 0);
assert(strcmp(dev[{i}].attributes, "{attr}") == 0);
assert(dev[{i}].old_format == {old});
"""
free = """
free(dev[{i}].coseType);
free(dev[{i}].attributes);
free(dev[{i}].keyHandle);
free(dev[{i}].publicKey);
"""
end = """
memset(dev, 0, sizeof(dev_t) * {devices});
"""
code = ""
free_block = ""
code += start.format(authfile = filename, ssh = sshformat, devices = len(credentials))
for c, v in enumerate(credentials):
code += checks.format(i = c, kh = v.keyhandle, pk = v.pubkey, attr = v.attributes, old = v.oldformat)
free_block += free.format(i = c)
code += free_block + end.format(devices = len(credentials))
print(code)
# Single credentials
print >> sys.stderr, "Generating single credentials"
for r in resident:
for p in presence:
for n in pin:
for v in verification:
filename = "credentials/new_" + r + p + v + n
print >> sys.stderr, "Generating " + filename + ".templ"
line = subprocess.check_output([PUC, "-u@USERNAME@", r, p, v, n])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "w") as outfile:
outfile.write(line)
credentials = [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
print_test_case(filename + ".cred", sshformat, credentials)
# Double credentials
print >> sys.stderr, "Generating double credentials"
for r in resident:
for p in presence:
for n in pin:
for v in verification:
filename = "credentials/new_double_" + r + p + v + n
print >> sys.stderr, "Generating " + filename + ".templ"
line = subprocess.check_output([PUC, "-u@USERNAME@", r, p, v, n])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "w") as outfile:
outfile.write(line)
credentials = [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
line = subprocess.check_output([PUC, "-n", r, p, v, n])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "a") as outfile:
outfile.write(line)
credentials += [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
print_test_case(filename + ".cred", sshformat, credentials)
# Mixed credentials
print >> sys.stderr, "Mixed double credentials"
options = [("", ""), ("", "-P"), ("-P", ""), ("-P", "-P")]
for p1, p2 in options:
filename = "credentials/new_mixed_" + p1 +"1" + p2 + "2"
print >> sys.stderr, "Generating " + filename + ".templ"
line = subprocess.check_output([PUC, "-u@USERNAME@", p1])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "w") as outfile:
outfile.write(line)
credentials = [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
line = subprocess.check_output([PUC, "-n", p2])
matches = re.match(r'^.*?:(.*?),(.*?),es256,(.*)', line, re.M)
with open(filename + ".templ", "a") as outfile:
outfile.write(line)
credentials += [Credential(keyhandle = matches.group(1),
pubkey = matches.group(2),
attributes = matches.group(3),
oldformat = 0)]
print_test_case(filename + ".cred", sshformat, credentials)
| 34.475862
| 109
| 0.509302
| 520
| 4,999
| 4.840385
| 0.186538
| 0.071514
| 0.033373
| 0.047676
| 0.62654
| 0.578466
| 0.578466
| 0.578466
| 0.578466
| 0.578466
| 0
| 0.016485
| 0.320464
| 4,999
| 144
| 110
| 34.715278
| 0.724463
| 0.013803
| 0
| 0.52381
| 0
| 0
| 0.22026
| 0.099675
| 0
| 0
| 0
| 0
| 0.066667
| 1
| 0.009524
| false
| 0
| 0.038095
| 0
| 0.047619
| 0.104762
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16173a166fd943413345036df12245c2a4ab8343
| 5,807
|
py
|
Python
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_scalar_test.py
|
zhangyujing/tensorflow
|
c7a04561fb8972fb64907acc5f10f3c6d4cef9f2
|
[
"Apache-2.0"
] | 13
|
2018-07-23T18:53:35.000Z
|
2021-11-18T19:56:45.000Z
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_scalar_test.py
|
zhangyujing/tensorflow
|
c7a04561fb8972fb64907acc5f10f3c6d4cef9f2
|
[
"Apache-2.0"
] | 6
|
2020-04-21T20:38:18.000Z
|
2020-06-16T01:00:15.000Z
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/affine_scalar_test.py
|
zhangyujing/tensorflow
|
c7a04561fb8972fb64907acc5f10f3c6d4cef9f2
|
[
"Apache-2.0"
] | 13
|
2018-09-07T13:28:38.000Z
|
2020-07-17T15:06:24.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Affine Scalar Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.affine_scalar import AffineScalar
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_scalar_congruency
from tensorflow.python.platform import test
class AffineScalarBijectorTest(test.TestCase):
"""Tests correctness of the Y = scale @ x + shift transformation."""
def testProperties(self):
with self.test_session():
mu = -1.
# scale corresponds to 1.
bijector = AffineScalar(shift=mu)
self.assertEqual("affine_scalar", bijector.name)
def testNoBatchScalar(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = -1.
# Corresponds to scale = 2
bijector = AffineScalar(shift=mu, scale=2.)
x = [1., 2, 3] # Three scalar samples (no batches).
self.assertAllClose([1., 3, 5], run(bijector.forward, x))
self.assertAllClose([1., 1.5, 2.], run(bijector.inverse, x))
self.assertAllClose([-np.log(2.)] * 3,
run(bijector.inverse_log_det_jacobian, x))
def testOneBatchScalarViaIdentityIn64BitUserProvidesShiftOnly(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value).astype(np.float64)
x = array_ops.placeholder(dtypes.float64, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = np.float64([1.])
# One batch, scalar.
# Corresponds to scale = 1.
bijector = AffineScalar(shift=mu)
x = np.float64([1.]) # One sample from one batches.
self.assertAllClose([2.], run(bijector.forward, x))
self.assertAllClose([0.], run(bijector.inverse, x))
self.assertAllClose([0.], run(bijector.inverse_log_det_jacobian, x))
def testOneBatchScalarViaIdentityIn64BitUserProvidesScaleOnly(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value).astype(np.float64)
x = array_ops.placeholder(dtypes.float64, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
multiplier = np.float64([2.])
# One batch, scalar.
# Corresponds to scale = 2, shift = 0.
bijector = AffineScalar(scale=multiplier)
x = np.float64([1.]) # One sample from one batches.
self.assertAllClose([2.], run(bijector.forward, x))
self.assertAllClose([0.5], run(bijector.inverse, x))
self.assertAllClose([np.log(0.5)],
run(bijector.inverse_log_det_jacobian, x))
def testTwoBatchScalarIdentityViaIdentity(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Univariate, two batches.
# Corresponds to scale = 1.
bijector = AffineScalar(shift=mu)
x = [1., 1] # One sample from each of two batches.
self.assertAllClose([2., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose([0., 0.], run(bijector.inverse_log_det_jacobian, x))
def testTwoBatchScalarIdentityViaScale(self):
with self.test_session() as sess:
def static_run(fun, x):
return fun(x).eval()
def dynamic_run(fun, x_value):
x_value = np.array(x_value)
x = array_ops.placeholder(dtypes.float32, name="x")
return sess.run(fun(x), feed_dict={x: x_value})
for run in (static_run, dynamic_run):
mu = [1., -1]
# Univariate, two batches.
# Corresponds to scale = 1.
bijector = AffineScalar(shift=mu, scale=[2., 1])
x = [1., 1] # One sample from each of two batches.
self.assertAllClose([3., 0], run(bijector.forward, x))
self.assertAllClose([0., 2], run(bijector.inverse, x))
self.assertAllClose(
[-np.log(2), 0.], run(bijector.inverse_log_det_jacobian, x))
def testScalarCongruency(self):
with self.test_session():
bijector = AffineScalar(shift=3.6, scale=0.42)
assert_scalar_congruency(bijector, lower_x=-2., upper_x=2.)
if __name__ == "__main__":
test.main()
| 37.707792
| 92
| 0.646633
| 766
| 5,807
| 4.765013
| 0.203655
| 0.021918
| 0.028767
| 0.030685
| 0.590137
| 0.569041
| 0.535616
| 0.525205
| 0.493973
| 0.450959
| 0
| 0.021432
| 0.220596
| 5,807
| 153
| 93
| 37.954248
| 0.78502
| 0.200448
| 0
| 0.59596
| 0
| 0
| 0.005647
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.171717
| false
| 0
| 0.090909
| 0.050505
| 0.373737
| 0.010101
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
161805dd743777711d517821e54c4fec5cc46ec8
| 7,634
|
py
|
Python
|
mule/util/algorand_util.py
|
bricerisingalgorand/mule
|
721b73f691076e5c3e2ebb8a79313da486fb0f96
|
[
"MIT"
] | null | null | null |
mule/util/algorand_util.py
|
bricerisingalgorand/mule
|
721b73f691076e5c3e2ebb8a79313da486fb0f96
|
[
"MIT"
] | null | null | null |
mule/util/algorand_util.py
|
bricerisingalgorand/mule
|
721b73f691076e5c3e2ebb8a79313da486fb0f96
|
[
"MIT"
] | null | null | null |
import os
import subprocess
import json
import urllib.request
from mule.util import os_util
from mule.util import file_util
from mule.util import time_util
from mule.util import s3_util
from mule.util import semver_util
import platform
def build_algo_release_url(package_type, channel, os_type, cpu_arch_type, package_version):
return f"https://algorand-releases.s3.amazonaws.com/channel/{channel}/{package_type}_{channel}_{os_type}-{cpu_arch_type}_{package_version}.tar.gz"
def get_latest_package_version(package_type, channel, os_type, cpu_arch_type):
os_type = os_util.get_os_type()
cpu_arch_type = os_util.get_cpu_arch_type()
package_keys = list(s3_util.get_matching_s3_keys(
'algorand-releases',
f"channel/{channel}/{package_type}_{channel}_{os_type}-{cpu_arch_type}_",
'tar.gz',
s3_auth=False
))
package_versions = list(map(semver_util.parse_version, package_keys))
latest_version = semver_util.get_highest_version(package_versions)
print(f"Found latest version of package type {package_type} for channel {channel}: {latest_version}")
return latest_version
def install_node(data_dir, bin_dir, channel, node_package_version='latest'):
"""
Download and install algod.
"""
node_package_dir = file_util.ensure_folder(f"/tmp/algod-pkg-{time_util.get_timestamp()}")
data_dir = file_util.ensure_folder(data_dir)
bin_dir = file_util.ensure_folder(bin_dir)
os_type = os_util.get_os_type()
cpu_arch_type = os_util.get_cpu_arch_type()
if node_package_version == 'latest':
if channel == 'test':
node_package_version = get_latest_package_version('node', 'stable', os_type, cpu_arch_type)
else:
node_package_version = get_latest_package_version('node', channel, os_type, cpu_arch_type)
print(f"Installing {channel} node package version {node_package_version} to:\n\tbin_dir: {bin_dir}\n\tdata_dir: {data_dir}")
node_package_url = build_algo_release_url('node', channel, os_type, cpu_arch_type, node_package_version)
if channel == 'test':
node_package_url = build_algo_release_url('node', 'stable', os_type, cpu_arch_type, node_package_version)
node_package_tar_path = f"{node_package_dir}/node_package.tar.gz"
_ = urllib.request.urlretrieve(node_package_url, node_package_tar_path)
file_util.decompressTarfile(node_package_tar_path, f"{node_package_dir}")
file_util.mv_folder_contents(f"{node_package_dir}/data", data_dir)
file_util.mv_folder_contents(f"{node_package_dir}/bin", bin_dir)
if channel == 'stable':
file_util.copy_file(
os.path.join(node_package_dir, "genesis/mainnet/genesis.json"),
os.path.join(data_dir, 'genesis.json')
)
else:
file_util.copy_file(
os.path.join(node_package_dir, f"genesis/{channel}net/genesis.json"),
os.path.join(data_dir, 'genesis.json')
)
def show_node_configs(data_dir, kmd_dir):
data_dir = file_util.ensure_folder(data_dir)
kmd_dir = file_util.ensure_folder(kmd_dir)
node_config_path = f"{data_dir}/config.json"
kmd_config_path = f"{kmd_dir}/kmd_config.json"
file_util.ensure_file(node_config_path, '{}')
file_util.ensure_file(kmd_config_path, '{}')
current_node_config = file_util.read_json_file(node_config_path)
current_kmd_config = file_util.read_json_file(kmd_config_path)
print(f"Showing node configs at {node_config_path} with:\n{json.dumps(current_node_config, sort_keys=True, indent=4)}")
print(f"Showing node configs at {kmd_config_path} with:\n{json.dumps(current_kmd_config, sort_keys=True, indent=4)}")
def configure_node(data_dir, kmd_dir, node_config, kmd_config):
data_dir = file_util.ensure_folder(data_dir)
kmd_dir = file_util.ensure_folder(kmd_dir)
node_config_path = f"{data_dir}/config.json"
kmd_config_path = f"{kmd_dir}/kmd_config.json"
file_util.ensure_file(node_config_path, '{}')
file_util.ensure_file(kmd_config_path, '{}')
current_node_config = file_util.read_json_file(node_config_path)
current_kmd_config = file_util.read_json_file(kmd_config_path)
current_node_config.update(node_config)
current_kmd_config.update(kmd_config)
print(f"Updating node configs at {node_config_path} with:\n{json.dumps(node_config, sort_keys=True, indent=4)}")
print(f"Updating node configs at {kmd_config_path} with:\n{json.dumps(kmd_config, sort_keys=True, indent=4)}")
file_util.write_json_file(node_config_path, current_node_config)
file_util.write_json_file(kmd_config_path, current_kmd_config)
def start_node(data_dir, kmd_dir, bin_dir=None):
goal_args = [
'node',
'start',
]
print(f"Starting node with:\n\tdata_dir: {data_dir}\n\tkmd_dir: {kmd_dir}")
goal(data_dir, kmd_dir, goal_args, bin_dir)
def stop_node(data_dir, kmd_dir, bin_dir=None):
goal_args = [
'node',
'stop',
]
print(f"Stopping node with:\n\tdata_dir: {data_dir}\n\tkmd_dir: {kmd_dir}")
goal(data_dir, kmd_dir, goal_args, bin_dir)
def restart_node(data_dir, kmd_dir, bin_dir=None):
goal_args = [
'node',
'restart',
]
print(f"Restarting node with:\n\tdata_dir: {data_dir}\n\tkmd_dir: {kmd_dir}")
goal(data_dir, kmd_dir, goal_args, bin_dir)
def status_node(data_dir, kmd_dir, bin_dir=None):
goal_args = [
'node',
'status',
]
print(f"Status of node with:\n\tdata_dir: {data_dir}\n\tkmd_dir: {kmd_dir}")
goal(data_dir, kmd_dir, goal_args, bin_dir)
def goal(data_dir, kmd_dir, args, bin_dir=None):
goal_command = ['goal']
if not bin_dir is None:
goal_command = [f"{bin_dir}/goal"]
goal_command.extend([
'-d', data_dir,
'-k', kmd_dir,
])
goal_command.extend(args)
subprocess.run(goal_command, check=True)
def algorand_indexer(args, bin_dir=None, log_file_name=None):
algorand_indexer_command = ['algorand-indexer']
if not bin_dir is None:
algorand_indexer_command = [f"{bin_dir}/algorand-indexer"]
if log_file_name is None:
log_file_name = f"indexer-{time_util.get_timestamp()}.log"
algorand_indexer_command.extend(args)
log_file = open(log_file_name, 'w')
subprocess.Popen(algorand_indexer_command, stdout=log_file, stderr=log_file)
def start_indexer_local_node(node, postgres, bin_dir=None, pid_file=None, log_file_name=None):
algorand_indexer_args = ['daemon']
algorand_indexer_args.extend([
'-d', node['data'],
'--postgres', build_indexer_postgress_connection_string(postgres)
])
if not pid_file is None:
algorand_indexer_args.extend([
'--pidfile', pid_file
])
algorand_indexer(algorand_indexer_args, bin_dir, log_file_name)
def start_indexer_remote_node(node, postgres, bin_dir=None, pid_file=None, log_file_name=None):
algorand_indexer_args = ['daemon']
algorand_indexer_args.extend([
'--algod-net', f"{node['host']}:{node['port']}",
'--algod-token', node['token'],
'--genesis', node['genesis'],
'--postgres', build_indexer_postgress_connection_string(postgres)
])
if not pid_file is None:
algorand_indexer_args.extend([
'--pidfile', pid_file
])
algorand_indexer(algorand_indexer_args, bin_dir, log_file_name)
def build_indexer_postgress_connection_string(postgres):
postgress_connection_string = []
for field in postgres.items():
postgress_connection_string.append(f"{field[0]}={field[1]}")
return ' '.join(postgress_connection_string)
| 38.361809
| 150
| 0.716793
| 1,129
| 7,634
| 4.451727
| 0.127547
| 0.04039
| 0.030442
| 0.033625
| 0.666136
| 0.606049
| 0.574015
| 0.531039
| 0.458217
| 0.42877
| 0
| 0.001726
| 0.165051
| 7,634
| 198
| 151
| 38.555556
| 0.78679
| 0.003537
| 0
| 0.394904
| 0
| 0.038217
| 0.236069
| 0.096693
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089172
| false
| 0
| 0.063694
| 0.006369
| 0.171975
| 0.063694
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
161931efe310b9554c601df989d24d47e0bdfff9
| 2,490
|
py
|
Python
|
examples/showcase/src/demos_panels/scrollPanel.py
|
allbuttonspressed/pyjs
|
c726fdead530eb63ee4763ae15daaa58d84cd58f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
examples/showcase/src/demos_panels/scrollPanel.py
|
allbuttonspressed/pyjs
|
c726fdead530eb63ee4763ae15daaa58d84cd58f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
examples/showcase/src/demos_panels/scrollPanel.py
|
allbuttonspressed/pyjs
|
c726fdead530eb63ee4763ae15daaa58d84cd58f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-11-18T14:17:59.000Z
|
2019-11-18T14:17:59.000Z
|
"""
The ``ui.ScrollPanel`` class implements a panel that scrolls its contents.
If you want the scroll bars to be always visible, call
``setAlwaysShowScrollBars(True)``. You can also change the current scrolling
position programmatically by calling ``setScrollPosition(vPos)`` and
``setScrollHorizontalPosition(hPos)`` to change the horizontal and vertical
scrolling position, respectively.
It is in the nature of a scrollpanel that if you give it a relative size, it will not work.
This makes it tricky to use it where you want it to fill out a parent widget of unknown size.
To avoid this problem you will have to wrap its content in a SimplePanel and
then use css/oveflow to control its behaviour as shown in the second example:
"container" represents the parent widget that could be any absolute or relative size and
the superscrollpanel will fill it out and apply vertical scrollbars if needed.
"""
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.ScrollPanel import ScrollPanel
from pyjamas.ui.HTML import HTML
from pyjamas.ui.VerticalPanel import VerticalPanel
class ScrollPanelDemo(SimplePanel):
def __init__(self):
SimplePanel.__init__(self)
vert = VerticalPanel()
vert.setSpacing("10px")
self.add(vert)
panel = ScrollPanel(Size=("300px", "100px"))
contents = HTML("<b>Tao Te Ching, Chapter One</b><p>" +
"The Way that can be told of is not an unvarying " +
"way;<p>The names that can be named are not " +
"unvarying names.<p>It was from the Nameless that " +
"Heaven and Earth sprang;<p>The named is but the " +
"mother that rears the ten thousand creatures, " +
"each after its kind.")
panel.add(contents)
vert.add(panel)
container = SimplePanel(Width="400px", Height="200px")
contents2 = HTML(50*"Dont forget to grab the css for SuperScrollPanel in Showcase.css! ")
panel2 = SuperScrollPanel(contents2)
container.add(panel2)
vert.add(container)
class SuperScrollPanel(ScrollPanel):
def __init__(self, panel):
ScrollPanel.__init__(self)
self.setHeight("100%")
self.setStyleName("SuperScrollPanelOuter")
self.inner = SimplePanel(Height="100%")
self.add(self.inner)
self.inner.setStyleName("SuperScrollPanelInner")
self.inner.add(panel)
| 42.20339
| 97
| 0.677912
| 319
| 2,490
| 5.241379
| 0.460815
| 0.026316
| 0.0311
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013778
| 0.242169
| 2,490
| 58
| 98
| 42.931034
| 0.872284
| 0.360241
| 0
| 0
| 0
| 0
| 0.270833
| 0.026515
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1619ba2c67e7c086f7e9ae9363f2ebb460f2febc
| 772
|
py
|
Python
|
psdn.py
|
xiongchiamiov/phone-suitable-domain-name
|
da8d28c5783415f406e19b8ef2cde4c790a4c95d
|
[
"WTFPL"
] | 3
|
2017-10-23T18:31:24.000Z
|
2021-02-01T21:22:24.000Z
|
psdn.py
|
xiongchiamiov/phone-suitable-domain-name
|
da8d28c5783415f406e19b8ef2cde4c790a4c95d
|
[
"WTFPL"
] | null | null | null |
psdn.py
|
xiongchiamiov/phone-suitable-domain-name
|
da8d28c5783415f406e19b8ef2cde4c790a4c95d
|
[
"WTFPL"
] | 1
|
2016-10-14T10:47:41.000Z
|
2016-10-14T10:47:41.000Z
|
#!/usr/bin/env python3
# May you recognize your weaknesses and share your strengths.
# May you share freely, never taking more than you give.
# May you find love and love everyone you find.
import re
import time
import whois
phone_spellable = re.compile(r'^[filoqrsuwxy]+$')
candidate_words = []
with open('/usr/share/dict/words') as f:
for word in f:
word = word.strip()
if phone_spellable.match(word):
candidate_words.append((len(word), word))
candidate_words.sort()
for word in candidate_words:
query = False
while query is False:
try:
query = whois.query('%s.com' % word[1])
except:
print("Sleeping five seconds...")
time.sleep(5)
if not query:
print(word)
| 23.393939
| 61
| 0.634715
| 107
| 772
| 4.523364
| 0.579439
| 0.115702
| 0.03719
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005226
| 0.256477
| 772
| 32
| 62
| 24.125
| 0.837979
| 0.235751
| 0
| 0
| 0
| 0
| 0.114334
| 0.035836
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
161a66975b57933d5f14b6a51378ceceb0ae3ebd
| 1,725
|
py
|
Python
|
cart/views.py
|
pmaigutyak/mp-cart
|
53adbbdeea7f8f8b2d432b103f7347d89adf3e30
|
[
"0BSD"
] | 1
|
2021-09-25T14:31:48.000Z
|
2021-09-25T14:31:48.000Z
|
cart/views.py
|
pmaigutyak/mp-cart
|
53adbbdeea7f8f8b2d432b103f7347d89adf3e30
|
[
"0BSD"
] | null | null | null |
cart/views.py
|
pmaigutyak/mp-cart
|
53adbbdeea7f8f8b2d432b103f7347d89adf3e30
|
[
"0BSD"
] | 1
|
2021-04-10T18:50:47.000Z
|
2021-04-10T18:50:47.000Z
|
from django.utils.translation import ugettext
from django.views.decorators.http import require_POST
from django.http import JsonResponse
from django.shortcuts import render
from django.core.exceptions import ValidationError
from django.views.decorators.csrf import csrf_exempt
from cart.lib import get_cart
from cart.forms import SelectProductForm, SetQtyForm
@require_POST
def _cart_action_view(request, action_factory, form_class, message):
form = form_class(data=request.POST)
if not form.is_valid():
return JsonResponse({'message': form.errors.as_json()}, status=403)
cart = get_cart(request)
try:
result = action_factory(cart, form.cleaned_data)
except ValidationError as e:
return JsonResponse({'message': ', '.join(e.messages)}, status=403)
return JsonResponse({
'message': message,
'result': result,
'total': cart.printable_total
})
def add(request):
return _cart_action_view(
request,
action_factory=lambda cart, data: cart.add(**data),
form_class=SelectProductForm,
message=ugettext('Product added to cart')
)
def remove(request):
return _cart_action_view(
request,
action_factory=lambda cart, data: cart.remove(**data),
form_class=SelectProductForm,
message=ugettext('Product removed from cart')
)
def get_modal(request):
cart = get_cart(request)
return render(request, 'cart/modal.html', {'cart': cart})
@csrf_exempt
def set_qty(request):
return _cart_action_view(
request,
action_factory=lambda cart, data: cart.set_qty(**data),
form_class=SetQtyForm,
message=ugettext('Quantity updated')
)
| 26.136364
| 75
| 0.697391
| 209
| 1,725
| 5.583732
| 0.315789
| 0.051414
| 0.047986
| 0.071979
| 0.285347
| 0.285347
| 0.256213
| 0.167095
| 0.167095
| 0.167095
| 0
| 0.004364
| 0.202899
| 1,725
| 65
| 76
| 26.538462
| 0.844364
| 0
| 0
| 0.208333
| 0
| 0
| 0.066705
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104167
| false
| 0
| 0.166667
| 0.0625
| 0.416667
| 0.020833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
161a6fecb9358040e2c0bfdcfac12240bdf3bc16
| 2,089
|
py
|
Python
|
ChessAI/src/const.py
|
darius-luca-tech/AI_Projects
|
3cff26878807121e077375e5dbef39390fea0189
|
[
"MIT"
] | 2
|
2020-07-11T14:48:27.000Z
|
2020-08-04T11:24:58.000Z
|
ChessAI/src/const.py
|
darius-luca-tech/AI_Projects
|
3cff26878807121e077375e5dbef39390fea0189
|
[
"MIT"
] | null | null | null |
ChessAI/src/const.py
|
darius-luca-tech/AI_Projects
|
3cff26878807121e077375e5dbef39390fea0189
|
[
"MIT"
] | null | null | null |
#------ game constants -----#
#players
WHITE = 0
BLACK = 1
BOTH = 2
#color for onTurnLabel
PLAYER_COLOR = ["white", "black"]
#figures
PAWN = 1
KNIGHT = 2
BISHOP = 3
ROOK = 4
QUEEN = 5
KING = 6
FIGURE_NAME = [ "", "pawn", "knight", "bishop", "rook", "queen", "king" ]
#used in move 32bit for promotion figure prom_figure = figure-2
PROM_KNIGHT = 0
PROM_BISHOP = 1
PROM_ROOK = 2
PROM_QUEEN = 3
#all lines
A, B, C, D, E, F, G, H = range(8)
#all squares
A1, B1, C1, D1, E1, F1, G1, H1, \
A2, B2, C2, D2, E2, F2, G2, H2, \
A3, B3, C3, D3, E3, F3, G3, H3, \
A4, B4, C4, D4, E4, F4, G4, H4, \
A5, B5, C5, D5, E5, F5, G5, H5, \
A6, B6, C6, D6, E6, F6, G6, H6, \
A7, B7, C7, D7, E7, F7, G7, H7, \
A8, B8, C8, D8, E8, F8, G8, H8 = range(64)
#----- game display constants -----#
DEFAULTBORDERWIDTH = 20
DEFAULTTILEWIDTH = 45
DEFAULTFONTSIZE = (7, 15)
COLORS = { "bg":"#EDC08C",
"border":"#B55602",
"tiles":("#FC9235", "#FFB87A") }
#----- move types -----#
NORMAL_MOVE, CAPTURE, PROMOTION, DOUBLE_STEP, ENPASSANT_CAPTURE, CASTLING, KING_CAPTURE = range(7)
#----- move 32bit reservation -----#
# a single move is stored in 32 bit as follows
# xxxxxxxx xx x xxx xxx xxxxxx xxxxxx xxx
# G F E D C B A
#
# A: move type (0-6)
# B: start sq (0-63)
# C: destination sq (0-63)
# D: start figure (1-6)
# E: captured figure (1-6)
# F: color of moved piece (0-1)
# G: promotion figure (0-3)
#NAME = (start_bit, lenght)
MOVE_TYPE = (0, 3)
MOVE_START = (3, 6)
MOVE_DEST = (9, 6)
MOVE_FIG_START = (15, 3)
MOVE_FIG_CAPTURE = (18, 3)
MOVE_COLOR = (21, 1)
MOVE_PROM = (22, 2)
#----- castling -----#
CASTLING_LEFT = 0
CASTLING_RIGHT = 1
#----- player status -----#
IDELING = 0
PICKING = 1
INF = 1000000
ASCII_FIG = [[],[]]
ASCII_FIG[WHITE] = [ 'x', chr(9817), chr(9816), chr(9815), chr(9814), chr(9813), chr(9812)]
ASCII_FIG[BLACK] = [ 'x', chr(9823), chr(9822), chr(9821), chr(9820), chr(9819), chr(9818)]
#AI constants
CASTLING_RIGHT_LOSS_PENALTY = -40
| 22.706522
| 99
| 0.567736
| 328
| 2,089
| 3.527439
| 0.570122
| 0.012965
| 0.015557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128807
| 0.245572
| 2,089
| 91
| 100
| 22.956044
| 0.60533
| 0.291527
| 0
| 0
| 0
| 0
| 0.060427
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.021277
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
161b1a291b36fd8f7983e45a6a229f8f666d35f1
| 392
|
py
|
Python
|
agent.py
|
kapzlok2408/Pokemon-Showdown-Node-Bot
|
c759eb9106fd2a3da3ebe4692a6730c37b2e5ee3
|
[
"MIT"
] | null | null | null |
agent.py
|
kapzlok2408/Pokemon-Showdown-Node-Bot
|
c759eb9106fd2a3da3ebe4692a6730c37b2e5ee3
|
[
"MIT"
] | null | null | null |
agent.py
|
kapzlok2408/Pokemon-Showdown-Node-Bot
|
c759eb9106fd2a3da3ebe4692a6730c37b2e5ee3
|
[
"MIT"
] | null | null | null |
import gym
import gym_pokemon
import random
if __name__ == "__main__":
env = gym.make("Pokemon-v0")
total_reward = 0.0
total_steps = 0
obs = env.reset()
while True:
action = random.randint(-1,8)
obs, reward, done, _ = env.step(action)
total_reward += reward
total_steps += 1
print("Currently %d steps, total reward of %.2f" % (total_steps, total_reward))
if done:
break
| 20.631579
| 81
| 0.683673
| 59
| 392
| 4.271186
| 0.508475
| 0.174603
| 0.126984
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025157
| 0.188776
| 392
| 18
| 82
| 21.777778
| 0.767296
| 0
| 0
| 0
| 0
| 0
| 0.147959
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1875
| 0
| 0.1875
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
161b52cb8725f9e857d4d9abd90c6be8f1cb0dec
| 964
|
py
|
Python
|
setup.py
|
danjjl/ipyfilechooser
|
19d2e906207b2c3426675eda7889267f5956b182
|
[
"MIT"
] | null | null | null |
setup.py
|
danjjl/ipyfilechooser
|
19d2e906207b2c3426675eda7889267f5956b182
|
[
"MIT"
] | null | null | null |
setup.py
|
danjjl/ipyfilechooser
|
19d2e906207b2c3426675eda7889267f5956b182
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
def read(fname):
"""Open files relative to package."""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='ipyfilechooser',
version='0.3.1',
author='Thomas Bouve (@crahan)',
author_email='crahan@n00.be',
description=(
'Python file chooser widget for use in '
'Jupyter/IPython in conjunction with ipywidgets'
),
long_description=read('README.md'),
long_description_content_type='text/markdown',
url='https://github.com/crahan/ipyfilechooser',
license='MIT',
packages=find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
],
install_requires=[
'ipywidgets'
]
)
| 26.777778
| 70
| 0.637967
| 107
| 964
| 5.635514
| 0.728972
| 0.039801
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009358
| 0.224066
| 964
| 35
| 71
| 27.542857
| 0.796791
| 0.053942
| 0
| 0
| 0
| 0
| 0.42053
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.068966
| 0
| 0.137931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1620270422616b41ca7180a5b9004dcde020933a
| 1,590
|
py
|
Python
|
keras2onnx/proto/tfcompat.py
|
CNugteren/keras-onnx
|
b3d6b6486fe56640c48c62dd098e9405e35b4e9f
|
[
"MIT"
] | 1
|
2021-04-15T16:35:54.000Z
|
2021-04-15T16:35:54.000Z
|
keras2onnx/proto/tfcompat.py
|
CNugteren/keras-onnx
|
b3d6b6486fe56640c48c62dd098e9405e35b4e9f
|
[
"MIT"
] | null | null | null |
keras2onnx/proto/tfcompat.py
|
CNugteren/keras-onnx
|
b3d6b6486fe56640c48c62dd098e9405e35b4e9f
|
[
"MIT"
] | null | null | null |
###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import os
import tensorflow as _tf
from distutils.version import StrictVersion
is_tf2 = StrictVersion(_tf.__version__.split('-')[0]) >= StrictVersion('2.0.0')
def normalize_tensor_shape(tensor_shape):
if is_tf2:
return [d for d in tensor_shape]
else:
return [d.value for d in tensor_shape]
def dump_graph_into_tensorboard(tf_graph):
# type: (_tf.Graph) -> None
_tb_log_dir = os.environ.get('TB_LOG_DIR')
if _tb_log_dir:
if is_tf2:
from tensorflow.python.ops.summary_ops_v2 import graph as write_graph
pb_visual_writer = _tf.summary.create_file_writer(_tb_log_dir)
with pb_visual_writer.as_default():
write_graph(tf_graph)
else:
from tensorflow.python.summary import summary
pb_visual_writer = summary.FileWriter(_tb_log_dir)
pb_visual_writer.add_graph(tf_graph)
if is_tf2:
tensorflow = _tf.compat.v1
def is_subclassed(layer):
"""Returns True if the object is a subclassed layer or subclassed model."""
return (layer.__module__.find('keras.engine') == -1 and
layer.__module__.find('keras.layers') == -1)
else:
tensorflow = _tf
def is_subclassed(layer):
return False
| 31.8
| 83
| 0.610692
| 197
| 1,590
| 4.598985
| 0.431472
| 0.027594
| 0.04415
| 0.02649
| 0.037528
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009569
| 0.211321
| 1,590
| 49
| 84
| 32.44898
| 0.712919
| 0.155346
| 0
| 0.266667
| 0
| 0
| 0.034014
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.166667
| 0.033333
| 0.433333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16205a78e576c7488204d92806cb7a59f5ca5566
| 11,588
|
py
|
Python
|
back2back/httpmulticlient.py
|
excentis/ByteBlower_python_examples
|
0e082e17413abf5e25f6d14b85e50e7f73e7f965
|
[
"BSD-3-Clause"
] | 2
|
2018-10-04T10:55:55.000Z
|
2018-11-29T08:51:38.000Z
|
back2back/httpmulticlient.py
|
excentis/ByteBlower_python_examples
|
0e082e17413abf5e25f6d14b85e50e7f73e7f965
|
[
"BSD-3-Clause"
] | null | null | null |
back2back/httpmulticlient.py
|
excentis/ByteBlower_python_examples
|
0e082e17413abf5e25f6d14b85e50e7f73e7f965
|
[
"BSD-3-Clause"
] | 3
|
2018-10-04T10:56:29.000Z
|
2019-10-28T10:19:40.000Z
|
"""
HTTP MultiServer/MultiClient for the ByteBlower Python API.
All examples are guaranteed to work with Python 2.7 and above
Copyright 2018, Excentis N.V.
"""
# Needed for python2 / python3 print function compatibility
from __future__ import print_function
# import the ByteBlower module
import byteblowerll.byteblower as byteblower
import time
configuration = {
# Address (IP or FQDN) of the ByteBlower server to use
'server_address': 'byteblower-tp-1300.lab.byteblower.excentis.com',
# Configuration for the first ByteBlower port.
# Will be used as HTTP server.
'port_1_config': {
'interface': 'trunk-1-13',
'mac': '00:bb:01:00:00:01',
# IP configuration for the ByteBlower Port.
# Options are 'DHCPv4', 'DHCPv6', 'SLAAC', 'static'
# if DHCPv4, use "dhcpv4"
'ip': 'dhcpv4',
# if DHCPv6, use "dhcpv6"
# 'ip': 'dhcpv6',
# if SLAAC, use "slaac"
# 'ip': 'slaac',
# if staticv4, use ["ipaddress", netmask, gateway]
# 'ip': ['192.168.0.2', "255.255.255.0", "192.168.0.1"],
# if staticv6, use ["ipaddress", prefixlength]
# 'ip': ['3000:3128::24', '64'],
# TCP port number to be used by the HTTP connection.
# On the HTTP server, this will be the port on which the server
# listens.
'tcp_port': 4096
},
# Configuration for the second ByteBlower port.
# Will be used as HTTP client.
'port_2_config': {
'interface': 'trunk-1-25',
'mac': '00:bb:01:00:00:02',
# IP configuration for the ByteBlower Port.
# Options are 'DHCPv4', 'DHCPv6', 'SLAAC', 'static'
# if DHCPv4, use "dhcpv4"
'ip': 'dhcpv4',
# if DHCPv6, use "dhcpv6"
# ip': 'dhcpv6',
# if SLAAC, use "slaac"
# 'ip': 'slaac',
# if staticv4, use ["ipaddress", netmask, gateway]
# 'ip': ['192.168.0.2', "255.255.255.0", "192.168.0.1"],
# if staticv6, use ["ipaddress", prefixlength]
# 'ip': ['3000:3128::24', '64'],
# TCP port range the HTTP Clients will use to connect with
# the HTTP server
'tcp_port_min': 32000,
'tcp_port_max': 50000
},
# HTTP Method
# HTTP Method can be GET or PUT
# - GET: Standard HTTP download, we retrieve data from the web server
# - PUT: Standard HTTP upload, the wireless endpoint will push data to the
# webserver
'http_method': 'GET',
# 'http_method': 'PUT',
# total duration, in nanoseconds.
# This is the duration of the flow. When this duration expires,
# all sessions will be stopped.
'duration': 10000000000,
# session duration, in nanoseconds
# Duration of the individual sessions
# 'session_duration': 1500000000,
'session_duration': None,
# session size, in bytes
# The number of bytes transmitted by a session
'session_size': 1 * 1000 * 1000,
# 'session_size': None,
# max concurrent sessions
# Maximum number of sessions that will be running simultaneously
'max_concurrent_sessions': 100,
# maximum number of sessions
# No more than this number of sessions will be created
# 0 means no limit
'max_total_sessions': 0,
# TOS value to use on the HTTP client (and server)
'tos': 0
}
class Example:
def __init__(self, **kwargs):
self.server_address = kwargs['server_address']
self.port_1_config = kwargs['port_1_config']
self.port_2_config = kwargs['port_2_config']
# Helper function, we can use this to parse the HTTP Method to the
# enumeration used by the API
from byteblowerll.byteblower import ParseHTTPRequestMethodFromString
http_method_arg = kwargs['http_method']
self.http_method = ParseHTTPRequestMethodFromString(http_method_arg)
self.duration = kwargs['duration']
self.session_duration = kwargs['session_duration']
self.session_size = kwargs['session_size']
self.max_concurrent_sessions = kwargs['max_concurrent_sessions']
self.max_total_sessions = kwargs['max_total_sessions']
self.tos = kwargs['tos']
self.server = None
self.port_1 = None
self.port_2 = None
def cleanup(self):
"""Clean up the created objects"""
byteblower_instance = byteblower.ByteBlower.InstanceGet()
if self.port_1:
self.server.PortDestroy(self.port_1)
self.port_1 = None
if self.port_2:
self.server.PortDestroy(self.port_2)
self.port_2 = None
if self.server is not None:
byteblower_instance.ServerRemove(self.server)
self.server = None
def run(self):
byteblower_instance = byteblower.ByteBlower.InstanceGet()
print("Connecting to ByteBlower server %s..." % self.server_address)
self.server = byteblower_instance.ServerAdd(self.server_address)
# Create the port which will be the HTTP server (port_1)
print("Creating HTTP Server port")
self.port_1 = self.provision_port(self.port_1_config)
print("Creating HTTP Client port")
# Create the port which will be the HTTP client (port_2)
self.port_2 = self.provision_port(self.port_2_config)
http_server_ip_address = self.port_1_config['ip_address']
# create a HTTP server
http_server = self.port_1.ProtocolHttpMultiServerAdd()
server_tcp_port = self.port_1_config['tcp_port']
if server_tcp_port is not None:
http_server.PortSet(server_tcp_port)
else:
server_tcp_port = http_server.PortGet()
# create a HTTP Client
http_client = self.port_2.ProtocolHttpMultiClientAdd()
# - remote endpoint
http_client.RemoteAddressSet(http_server_ip_address)
http_client.RemotePortSet(server_tcp_port)
# - local endpoint
http_client.LocalPortRangeSet(self.port_2_config['tcp_port_min'],
self.port_2_config['tcp_port_max'])
# Configure the direction.
# If the HTTP Method is GET,
# traffic will flow from the HTTP server to the HTTP client
# If the HTTP Method is PUT,
# traffic will flow from the HTTP client to the HTTP server
http_client.HttpMethodSet(self.http_method)
print("Server port:", self.port_1.DescriptionGet())
print("Client port:", self.port_2.DescriptionGet())
# let the HTTP server listen for requests
http_server.Start()
# - total duration of all sessions
http_client.DurationSet(self.duration)
# - how many connections can be created?
http_client.CumulativeConnectionLimitSet(self.max_total_sessions)
# - how many connections can be running at the same time
http_client.MaximumConcurrentRequestsSet(self.max_concurrent_sessions)
# - individual duration, can be size-based or time-based
if self.session_duration is not None:
# let the HTTP Client request a page of a specific duration
# to download...
http_client.SessionDurationSet(self.session_duration)
elif self.session_size is not None:
# let the HTTP Client request a page of a specific size...
http_client.SessionSizeSet(self.session_size)
else:
raise ValueError("Either duration or request_size must be configured")
print("Starting the HTTP client")
http_client.Start()
http_client_result = http_client.ResultGet()
for iteration in range(10):
time.sleep(1)
http_client_result.Refresh()
print("-" * 10)
print("Iteration", iteration+1)
print(" connections attempted", http_client_result.ConnectionsAttemptedGet())
print(" connections established", http_client_result.ConnectionsEstablishedGet())
print(" connections aborted", http_client_result.ConnectionsAbortedGet())
print(" connections refused", http_client_result.ConnectionsRefusedGet())
print("-" * 10)
http_client.Stop()
http_server.Stop()
print("Stopped the HTTP client")
request_status_value = http_client.StatusGet()
request_status_string = byteblower.ConvertHTTPMultiClientStatusToString(request_status_value)
http_client_result.Refresh()
tx_bytes = http_client_result.TcpTxByteCountGet()
tx_speed = http_client_result.TcpTxSpeedGet()
rx_bytes = http_client_result.TcpRxByteCountGet()
rx_speed = http_client_result.TcpRxSpeedGet()
http_server_result = http_server.ResultGet()
http_server_result.Refresh()
print("Requested Duration : {} nanoseconds".format(self.duration))
print("Status : {}".format(request_status_string))
print("Client Result data : {}".format(http_client_result.DescriptionGet()))
print("Server Result data : {}".format(http_server_result.DescriptionGet()))
return [
self.duration,
self.session_duration,
self.session_size,
self.max_total_sessions,
self.max_concurrent_sessions,
tx_bytes, rx_bytes,
tx_speed, rx_speed,
request_status_value
]
def provision_port(self, config):
port = self.server.PortCreate(config['interface'])
port_l2 = port.Layer2EthIISet()
port_l2.MacSet(config['mac'])
ip_config = config['ip']
if not isinstance(ip_config, list):
# Config is not static, DHCP or slaac
if ip_config.lower() == "dhcpv4":
port_l3 = port.Layer3IPv4Set()
port_l3.ProtocolDhcpGet().Perform()
config['ip_address'] = port_l3.IpGet()
elif ip_config.lower() == "dhcpv6":
port_l3 = port.Layer3IPv6Set()
port_l3.ProtocolDhcpGet().Perform()
config['ip_address'] = port_l3.IpDhcpGet()
elif ip_config.lower() == "slaac":
port_l3 = port.Layer3IPv6Set()
port_l3.StatelessAutoconfiguration()
config['ip_address'] = port_l3.IpStatelessGet()
else:
# Static configuration
if len(ip_config) == 3:
# IPv4
port_l3 = port.Layer3IPv4Set()
port_l3.IpSet(ip_config[0])
port_l3.NetmaskSet(ip_config[1])
port_l3.GatewaySet(ip_config[2])
config['ip_address'] = port_l3.IpGet()
elif len(ip_config) == 2:
port_l3 = port.Layer3IPv6Set()
# IPv6
address = ip_config[0]
prefix_length = ip_config[1]
ip = "{}/{}".format(address, prefix_length)
port_l3.IpManualAdd(ip)
config['ip_address'] = ip_config[0]
if not isinstance(config['ip_address'], str):
ip = config['ip_address'][0]
if '/' in ip:
config['ip_address'] = ip.split('/')[0]
print("Created port", port.DescriptionGet())
return port
# When this python module is called stand-alone, the run-function must be
# called. This approach makes it possible to include it in a series of
# examples.
if __name__ == "__main__":
example = Example(**configuration)
try:
example.run()
finally:
example.cleanup()
| 36.440252
| 101
| 0.621764
| 1,361
| 11,588
| 5.110948
| 0.213079
| 0.053191
| 0.027602
| 0.008626
| 0.243243
| 0.157993
| 0.125503
| 0.111271
| 0.102358
| 0.088269
| 0
| 0.031557
| 0.280808
| 11,588
| 317
| 102
| 36.555205
| 0.803096
| 0.278909
| 0
| 0.152941
| 0
| 0
| 0.125651
| 0.011137
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023529
| false
| 0
| 0.023529
| 0
| 0.064706
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1621aa767e78100c7f16f615ddf74780115c4b1d
| 9,106
|
py
|
Python
|
rastervision/plugin.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | 3
|
2020-07-05T04:04:18.000Z
|
2021-02-05T16:19:55.000Z
|
rastervision/plugin.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | null | null | null |
rastervision/plugin.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | 1
|
2020-04-27T15:21:53.000Z
|
2020-04-27T15:21:53.000Z
|
import os
import json
import importlib
from pluginbase import PluginBase
import rastervision as rv
from rastervision.protos.plugin_pb2 import PluginConfig as PluginConfigMsg
from rastervision.utils.files import download_if_needed
class PluginError(Exception):
pass
def load_conf_list(s):
"""Loads a list of items from the config.
Lists should be comma separated.
This takes into account that previous versions of Raster Vision
allowed for a `[ "module" ]` like syntax, even though that didn't
work for multi-value lists.
"""
try:
# A comma separated list of values will be transformed to
# having a list-like string, with ' instead of ". Replacing
# single quotes with double quotes lets us parse it as a JSON list.
return json.loads(s.replace("'", '"'))
except json.JSONDecodeError:
return list(map(lambda x: x.strip(), s.split(',')))
class PluginRegistry:
@staticmethod
def get_instance():
return rv._registry._get_plugin_registry()
def __init__(self, plugin_config, rv_home):
"""Initializes this plugin registry.
A plugin registry is passed to plugins in a call
to their "register_plugin" method.
Args:
plugin_config - the everett ConfigManager for the plugin
section of the application configuration.
"""
self.plugin_root_dir = os.path.join(rv_home, 'plugins')
self.config_builders = {}
self.command_config_builders = {}
self.commands = []
self.aux_command_classes = {}
self.default_raster_sources = []
self.default_vector_sources = []
self.default_label_sources = []
self.default_label_stores = []
self.default_evaluators = []
self.experiment_runners = {}
self.filesystems = []
plugin_files = load_conf_list(plugin_config('files', default='[]'))
self._load_from_files(plugin_files)
self.plugin_files = plugin_files
plugin_modules = load_conf_list(plugin_config('modules', default='[]'))
self._load_from_modules(plugin_modules)
self.plugin_modules = plugin_modules
def _load_plugin(self, plugin, identifier):
# Check the plugin is valid
if not hasattr(plugin, 'register_plugin'):
raise PluginError('Plugin at {} does not have '
'"register_plugin" method.'.format(identifier))
register_method = getattr(plugin, 'register_plugin')
if not callable(register_method):
raise PluginError('Plugin at {} has a '
'"register_plugin" attribute, '
'but it is not callable'.format(identifier))
# TODO: Log loading plugin.
register_method(self)
def _load_from_files(self, plugin_paths):
if not plugin_paths:
return
self.plugin_sources = []
plugin_base = PluginBase(package='rastervision.plugins')
for uri in plugin_paths:
plugin_name = os.path.splitext(os.path.basename(uri))[0]
plugin_path = os.path.join(self.plugin_root_dir, plugin_name)
fs = rv._registry.get_file_system(uri, search_plugins=False)
local_path = download_if_needed(uri, plugin_path, fs=fs)
local_dir = os.path.dirname(local_path)
plugin_source = plugin_base.make_plugin_source(
searchpath=[local_dir])
# We're required to hang onto the source
# to keep it from getting GC'd.
self.plugin_sources.append(plugin_source)
self._load_plugin(plugin_source.load_plugin(plugin_name), uri)
def _load_from_modules(self, plugin_modules):
if not plugin_modules:
return
for module in plugin_modules:
plugin = importlib.import_module(module)
self._load_plugin(plugin, module)
def add_plugins_from_proto(self, plugin_msg):
new_plugin_files = list(
set(plugin_msg.plugin_uris) - set(self.plugin_files))
self._load_from_files(new_plugin_files)
self.plugin_files.extend(new_plugin_files)
new_plugin_modules = list(
set(plugin_msg.plugin_modules) - set(self.plugin_modules))
self._load_from_modules(new_plugin_modules)
self.plugin_modules.extend(new_plugin_modules)
def to_proto(self):
"""Returns a protobuf message that records the
plugin sources for plugins that are currently loaded
in the registry.
"""
return PluginConfigMsg(
plugin_uris=self.plugin_files, plugin_modules=self.plugin_modules)
def register_config_builder(self, group, key, builder_class):
"""Registers a ConfigBuilder as a plugin.
Args:
group - The Config group, e.g. rv.BACKEND, rv.TASK.
key - The key used for this plugin. This will be used to
construct the builder in a ".builder(key)" call.
builder_class - The subclass of ConfigBuilder that builds
the Config for this plugin.
"""
if (group, key) in self.config_builders:
raise PluginError('ConfigBuilder already registered for group '
'{} and key {}'.format(group, key))
self.config_builders[(group, key)] = builder_class
def register_command_config_builder(self, command_type, builder_class):
"""Registers a ConfigBuilder as a plugin.
Args:
command_type - The key used for this plugin. This will be used to
construct the builder in a ".builder(key)" call.
builder_class - The subclass of CommandConfigBuilder that builds
the CommandConfig for this plugin.
"""
if command_type in self.command_config_builders:
raise PluginError(
'CommandConfigBuilder already registered for command'
'with type {}'.format(command_type))
self.command_config_builders[command_type] = builder_class
self.commands.append(command_type)
def register_aux_command(self, command_type, command_class):
"""Registers a custom AuxCommand as a plugin.
Args:
command_type - The key used for this plugin. This will be used to
construct the builder in a ".builder(key)" call.
command_class - The subclass of AuxCommand subclass to register.
"""
if command_type in self.command_config_builders:
raise PluginError(
'CommandConfigBuilder is already registered for command'
'with type {}'.format(command_type))
if command_type in self.aux_command_classes:
raise PluginError('AuxCommand is already registered for command'
'with type {}'.format(command_type))
self.aux_command_classes[command_type] = command_class
if command_class.options.include_by_default:
self.commands.append(command_type)
def register_default_raster_source(self, provider_class):
"""Registers a RasterSourceDefaultProvider for use as a plugin."""
self.default_raster_sources.append(provider_class)
def register_default_vector_source(self, provider_class):
"""Registers a VectorSourceDefaultProvider for use as a plugin."""
self.default_vector_sources.append(provider_class)
def register_default_label_source(self, provider_class):
"""Registers a LabelSourceDefaultProvider for use as a plugin."""
self.default_label_sources.append(provider_class)
def register_default_label_store(self, provider_class):
"""Registers a LabelStoreDefaultProvider for use as a plugin."""
self.default_label_stores.append(provider_class)
def register_default_evaluator(self, provider_class):
"""Registers an EvaluatorDefaultProvider for use as a plugin."""
self.default_evaluators.append(provider_class)
def register_experiment_runner(self, runner_key, runner_class):
"""Registers an ExperimentRunner as a plugin.
Args:
runner_key - The key used to reference this plugin runner.
This is a string that will match the command line
argument used to reference this runner; e.g. if the
key is "FOO_RUNNER", then users can use the runner
by issuing a "rastervision run foo_runner ..." command.
runner_class - The class of the ExperimentRunner plugin.
"""
if runner_key in self.experiment_runners:
raise PluginError('ExperimentRunner already registered for '
'key {}'.format(runner_key))
self.experiment_runners[runner_key] = runner_class
def register_filesystem(self, filesystem_class):
"""Registers a FileSystem as a plugin."""
self.filesystems.append(filesystem_class)
| 40.471111
| 80
| 0.648913
| 1,073
| 9,106
| 5.28425
| 0.219012
| 0.029982
| 0.015873
| 0.013757
| 0.270547
| 0.215873
| 0.191887
| 0.156261
| 0.128042
| 0.113404
| 0
| 0.000303
| 0.276082
| 9,106
| 224
| 81
| 40.651786
| 0.85983
| 0.281243
| 0
| 0.091667
| 0
| 0
| 0.078657
| 0
| 0
| 0
| 0
| 0.004464
| 0
| 1
| 0.15
| false
| 0.008333
| 0.066667
| 0.008333
| 0.283333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1621ccd669a0abec2dea3abc64d60feca57f3bfe
| 2,134
|
py
|
Python
|
acsm/nnutils/resunet.py
|
eldar/acsm
|
04069e8bb4c12185473dc10c3355e5367fa98968
|
[
"Apache-2.0"
] | 52
|
2020-04-02T12:35:55.000Z
|
2022-03-11T07:47:30.000Z
|
acsm/nnutils/resunet.py
|
eldar/acsm
|
04069e8bb4c12185473dc10c3355e5367fa98968
|
[
"Apache-2.0"
] | 8
|
2020-06-04T07:34:34.000Z
|
2021-09-18T21:17:26.000Z
|
acsm/nnutils/resunet.py
|
eldar/acsm
|
04069e8bb4c12185473dc10c3355e5367fa98968
|
[
"Apache-2.0"
] | 6
|
2020-07-12T02:12:18.000Z
|
2021-03-06T05:03:33.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import os
import os.path as osp
import numpy as np
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
import functools
from . import net_blocks as nb
import pdb
class ResNetConcatGenerator(nn.Module):
def __init__(self, input_nc, output_nc, n_blocks=3, ngf=64,):
super(ResNetConcatGenerator, self).__init__()
self.encoder = ResnetEncoder(n_blocks=n_blocks)
self.n_blocks = n_blocks
decoder = []
if n_blocks == 3:
inner_nc = 256
nlayers = 4
elif n_blocks == 4:
inner_nc = 512
nlayers = 5
for lx in range(nlayers):
outnc = max(inner_nc // 2, 16)
up = nb.upconv2d(inner_nc, outnc)
decoder.append(up)
inner_nc = outnc
up = nn.Conv2d(
inner_nc, output_nc, kernel_size=3, stride=1, padding=1, bias=True)
decoder.append(up)
self.decoder = nn.Sequential(*decoder)
nb.net_init(self.decoder)
return
def forward(self, input):
img_enc = self.encoder(input)
img_dec = self.decoder(img_enc)
return img_dec
def reinit_weights(self, ):
self.encoder = ResnetEncoder(n_blocks=self.n_blocks)
nb.net_init(self.decoder)
class ResnetEncoder(nn.Module):
def __init__(self, n_blocks):
super(ResnetEncoder, self).__init__()
self.resnet = torchvision.models.resnet18(pretrained=True)
self.n_blocks = n_blocks
def forward(self, x):
n_blocks = self.n_blocks
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
if n_blocks >= 1:
x = self.resnet.layer1(x)
if n_blocks >= 2:
x = self.resnet.layer2(x)
if n_blocks >= 3:
x = self.resnet.layer3(x)
if n_blocks >= 4:
x = self.resnet.layer4(x)
return x
| 27.358974
| 79
| 0.612933
| 288
| 2,134
| 4.3125
| 0.315972
| 0.101449
| 0.070853
| 0.032206
| 0.170692
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021956
| 0.295689
| 2,134
| 77
| 80
| 27.714286
| 0.804391
| 0
| 0
| 0.092308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.384615
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1626ca15f81c599021a7770317db1230752e7b3f
| 4,282
|
py
|
Python
|
scrapers/covid_scraper.py
|
ZachGeo/covidGR_API
|
2f316337dda65bd33ac895df336481c3c2abe2c6
|
[
"MIT"
] | null | null | null |
scrapers/covid_scraper.py
|
ZachGeo/covidGR_API
|
2f316337dda65bd33ac895df336481c3c2abe2c6
|
[
"MIT"
] | null | null | null |
scrapers/covid_scraper.py
|
ZachGeo/covidGR_API
|
2f316337dda65bd33ac895df336481c3c2abe2c6
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
from datetime import date
from lxml import html
import requests
import re
import json
class CovidScraper:
def __init__(self):
self.api_url = 'http://127.0.0.1:5000/covidgr'
self.api_sum_url = 'http://127.0.0.1:5000/summary/covidgr'
self.api_test_url = 'http://127.0.0.1:5000/covidgr/tests'
self.scrape_url = 'https://www.worldometers.info/coronavirus/country/greece/'
self.scrape_tests_url = 'https://github.com/owid/covid-19-data/blob/master/public/data/testing/covid-testing-latest-data-source-details.csv'
self.today = ''
self.covid_data = []
self.summary_data= []
def scrape_data(self):
data = []
self.today = str(date.today())
soup = self.scrape_page_content()
soup_test_page = self.scrape_page_content_contains_tests()
if soup:
self.get_daily_data(soup)
self.get_summary_data(soup)
if self.summary_data and self.covid_data:
post_daily_and_sum_covid_data = self.call_api_put_data(
self.today, self.covid_data, self.summary_data)
data.append(post_daily_and_sum_covid_data)
if soup_test_page:
tests_data = self.get_tests_per_day(soup_test_page)
if tests_data[0]:
post_daily_tests_covid_data = self.call_api_post_tested_covid_data(
tests_data[0], tests_data[1])
data.append(post_daily_tests_covid_data)
return data
def scrape_page_content(self):
page = requests.get(self.scrape_url)
soup = BeautifulSoup(page.content, 'html.parser')
return soup
def scrape_page_content_contains_tests(self):
page = requests.get(self.scrape_tests_url)
soup = BeautifulSoup(page.content, 'html.parser')
return soup
def get_daily_data(self, soup):
covid_data = []
daily_covidgr_html_content = soup.find('li', class_='news_li')
get_daily_covidgr_text = daily_covidgr_html_content.text
for elem in get_daily_covidgr_text.split():
regex = '\d*(.|)\d+'
match = re.findall(regex, elem)
if match:
covid_data.append(elem)
self.covid_data = covid_data
def get_summary_data(self, soup):
summary_data = []
all_cases_covidgr_html_content = soup.find_all(
'div', class_='maincounter-number')
for item in range(len(all_cases_covidgr_html_content)):
regex = r'(\n)|\s'
all_cases_data = re.sub(
regex, '', all_cases_covidgr_html_content[item].text)
summary_data.append(all_cases_data)
self.summary_data = summary_data
def get_tests_per_day(self, tree):
html_content = tree.find('tr', id='LC34').find_all('td')
country_code = html_content[1]
date_test = html_content[3].text
if country_code.text == 'GRC':
today_tests = html_content[10].text
total_tests = html_content[8].text
return [date_test, today_tests]
def call_api_post_tested_covid_data(self, today, tests):
headers = {
'Content-type': 'application/json',
}
data = json.dumps({"date": today, "daily_test": tests})
response_tests = requests.post(
self.api_test_url, headers=headers, data=data)
return response_tests.json()
def call_api_put_data(self, today, covid_data, summary_data):
headers = {
'Content-type': 'application/json',
}
data = json.dumps(
{"date": today, "cases": covid_data[0], "deaths": covid_data[1]})
sum_data = json.dumps(
{"sum_cases": summary_data[0], "sum_deaths": summary_data[1], "sum_recovered": summary_data[2]})
response = requests.post(self.api_url, headers=headers, data=data)
response_sum = requests.put(
self.api_sum_url, headers=headers, data=sum_data)
return [response.json(), response_sum.json()]
if __name__ == '__main__':
cs = CovidScraper()
results = cs.scrape_data()
print(results)
| 32.439394
| 148
| 0.615834
| 544
| 4,282
| 4.523897
| 0.215074
| 0.058513
| 0.026412
| 0.013409
| 0.336449
| 0.224299
| 0.145063
| 0.138155
| 0.088582
| 0.088582
| 0
| 0.015479
| 0.275806
| 4,282
| 131
| 149
| 32.687023
| 0.778136
| 0
| 0
| 0.084211
| 0
| 0.010526
| 0.111397
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094737
| false
| 0
| 0.063158
| 0
| 0.231579
| 0.010526
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
162894b73abedfff0ad797772b95e5e53cb507ab
| 2,412
|
py
|
Python
|
setup.py
|
Oli2/presto-python-client
|
11a89c2528a35d5af6916e9c9175cb3e1f84160b
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Oli2/presto-python-client
|
11a89c2528a35d5af6916e9c9175cb3e1f84160b
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
Oli2/presto-python-client
|
11a89c2528a35d5af6916e9c9175cb3e1f84160b
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import re
from setuptools import setup
import textwrap
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('prestodb/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='presto-python-client',
author='Presto Team',
author_email='presto-users@googlegroups.com',
version=version,
url='https://github.com/prestodb/presto-python-client',
packages=['prestodb'],
package_data={'': ['LICENSE', 'README.md']},
description='Client for the Presto distributed SQL Engine',
long_description=textwrap.dedent("""
Client for Presto (https://prestodb.io), a distributed SQL engine for
interactive and batch big data processing. Provides a low-level client and
a DBAPI 2.0 implementation.
"""),
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Database :: Front-Ends',
],
install_requires=[
'click',
'future',
'ipaddress',
'requests',
'requests_kerberos',
'six',
'typing',
],
extras_require={'tests':[
'httpretty',
'pytest',
'pytest-runner',
]}
)
| 33.041096
| 78
| 0.641376
| 281
| 2,412
| 5.437722
| 0.572954
| 0.087042
| 0.114529
| 0.051047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009683
| 0.22927
| 2,412
| 72
| 79
| 33.5
| 0.812265
| 0.216003
| 0
| 0.037037
| 0
| 0
| 0.556679
| 0.027142
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
162b50aea1cc09a5257abec74537cee83cae39dc
| 368
|
py
|
Python
|
Graphs/Pie Chart.py
|
TausifAnsari/PyHub
|
f6c949dc6a3974f57d7d146708443d0ceeb4418f
|
[
"MIT"
] | 1
|
2020-09-30T19:31:20.000Z
|
2020-09-30T19:31:20.000Z
|
Graphs/Pie Chart.py
|
TanviSutar/PyHub
|
6281e9f515674fb51f0d0862c26ec18020fa7d83
|
[
"MIT"
] | null | null | null |
Graphs/Pie Chart.py
|
TanviSutar/PyHub
|
6281e9f515674fb51f0d0862c26ec18020fa7d83
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as graph
subject = ["Probability", "Calculas", "Discrete Mathematics", "Adv Engineering Mathematics",
"Linear Algebra", "Cryptography"]
weightage = [250,900,850,1200,290,345]
seperator = [0.05,0,0,0,0.05,0.05]
graph.title("Mathematics Topic Weightage")
graph.pie(weightage,labels=subject,autopct="%0.1f%%", explode=seperator)
graph.show()
| 30.666667
| 93
| 0.741848
| 50
| 368
| 5.46
| 0.66
| 0.032967
| 0.029304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098214
| 0.086957
| 368
| 12
| 94
| 30.666667
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0.341463
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
162b6c04231d6cc1d5159da7ca51127039c4295e
| 6,252
|
py
|
Python
|
exercises/perform_model_selection.py
|
noavilk/IML.HUJI
|
35aa4e6fbe489239e4fe72bf38c0dba3e6c81f37
|
[
"MIT"
] | null | null | null |
exercises/perform_model_selection.py
|
noavilk/IML.HUJI
|
35aa4e6fbe489239e4fe72bf38c0dba3e6c81f37
|
[
"MIT"
] | null | null | null |
exercises/perform_model_selection.py
|
noavilk/IML.HUJI
|
35aa4e6fbe489239e4fe72bf38c0dba3e6c81f37
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import numpy as np
import pandas as pd
from sklearn import datasets
from IMLearn.metrics import mean_square_error
from IMLearn.utils import split_train_test
from IMLearn.model_selection import cross_validate
from IMLearn.learners.regressors import PolynomialFitting, LinearRegression, RidgeRegression
from sklearn.linear_model import Lasso
from utils import *
import plotnine as gg
def select_polynomial_degree(n_samples: int = 100, noise: float = 5):
"""
Simulate data from a polynomial model and use cross-validation to select the best fitting degree
Parameters
----------
n_samples: int, default=100
Number of samples to generate
noise: float, default = 5
Noise level to simulate in responses
"""
# Question 1 - Generate dataset for model f(x)=(x+3)(x+2)(x+1)(x-1)(x-2) + eps for eps Gaussian noise
# and split into training- and testing portions
def f(x):
return (x + 3) * (x + 2) * (x + 1) * (x - 1) * (x - 2)
X = np.linspace(-1.2, 2, n_samples)
y = f(X) + np.random.normal(0, noise, n_samples)
train_X, train_y, test_X, test_y = split_train_test(pd.DataFrame(X), pd.Series(y), train_proportion=(2 / 3))
df_train = pd.DataFrame({"x": train_X.squeeze(), "y": train_y, "type": "Train"})
df_test = pd.DataFrame({"x": test_X.squeeze(), "y": test_y, "type": "test"})
x_stat = np.linspace(-1.4, 2, 100)
df_stat = pd.DataFrame({"x": x_stat, "y": f(x_stat), "type": "Model"})
df = pd.concat([df_test, df_train])
title = f"f(x) = (x+3)(x+2)(x+1)(x-1)(x-2) + Gaussian noise ~ N(0,{noise})"
p = gg.ggplot() + \
gg.geom_point(df, gg.aes("x", "y", color="type")) + \
gg.geom_line(df_stat, gg.aes("x", "y")) + \
gg.theme_bw() + \
gg.ggtitle(title)
# print(p)
gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False)
# Question 2 - Perform CV for polynomial fitting with degrees 0,1,...,10
train_err = []
validation_err = []
for k in range(11):
pf = PolynomialFitting(k)
train_score, validation_score = cross_validate(pf, train_X.to_numpy(), train_y.to_numpy(), mean_square_error)
train_err.append(train_score)
validation_err.append(validation_score)
df1 = pd.DataFrame({"k": range(11), "avg error": train_err, "type": "train error"})
df2 = pd.DataFrame({"k": range(11), "avg error": validation_err, "type": "validation error"})
df = pd.concat([df1, df2])
title = f" Cross Validation for Polynomial Fitting Over Different Degrees k"
p = gg.ggplot(df, gg.aes("k", "avg error", color="type")) + \
gg.geom_point() + \
gg.theme_bw() + gg.scale_x_continuous(breaks=range(11)) + \
gg.labs(y="Average training and validation errors",
title=f"{title} \nWith Noise: {noise}, Num of samples: {n_samples}")
gg.ggsave(filename=f'../../IML/ex5/plots/{title} {noise} {n_samples}.png', plot=p, verbose=False)
# Question 3 - Using best value of k, fit a k-degree polynomial model and report test error
best_k = np.argmin(np.array(validation_err))
pf = PolynomialFitting(int(best_k))
pf.fit(train_X.to_numpy(), train_y.to_numpy())
y_pred = pf.predict(test_X.to_numpy())
print("best k =", best_k)
print("Test = ", round(mean_square_error(test_y.to_numpy(), y_pred), 2))
print("Validation = ", round(validation_err[best_k], 2))
def select_regularization_parameter(n_samples: int = 50, n_evaluations: int = 500):
"""
Using sklearn's diabetes dataset use cross-validation to select the best fitting regularization parameter
values for Ridge and Lasso regressions
Parameters
----------
n_samples: int, default=50
Number of samples to generate
n_evaluations: int, default = 500
Number of regularization parameter values to evaluate for each of the algorithms
"""
# Question 6 - Load diabetes dataset and split into training and testing portions
X, y = datasets.load_diabetes(return_X_y=True, as_frame=True)
train_X, train_y, test_X, test_y = X.iloc[:50, :], y[:50], X.iloc[50:, ], y[50:]
# Question 7 - Perform CV for different values of the regularization parameter for Ridge and Lasso regressions
for name, learner, ran in [("Ridge", RidgeRegression, np.linspace(0.001, 0.05, 500)),
("Lasso", Lasso, np.linspace(0.001, 0.5, 500))]:
train_err = []
validation_err = []
for lam in ran:
rg = learner(lam)
train_score, validation_score = cross_validate(rg, train_X.to_numpy(), train_y.to_numpy(),
mean_square_error)
train_err.append(train_score)
validation_err.append(validation_score)
df1 = pd.DataFrame({"lambda": ran, "avg error": train_err, "type": "train error"})
df2 = pd.DataFrame({"lambda": ran, "avg error": validation_err, "type": "validation error"})
df = pd.concat([df1, df2])
title = f"{name} Regularization Cross Validate Over Different Lambda"
p = gg.ggplot(df, gg.aes("lambda", "avg error", color="type")) + \
gg.geom_line() + \
gg.theme_bw() + gg.labs(y="Average training and validation errors", title=title)
gg.ggsave(filename=f'../../IML/ex5/plots/{title}.png', plot=p, verbose=False)
# Question 8 - Compare best Ridge model, best Lasso model and Least Squares model
best_lam = np.argmin(np.array(validation_err))
rg = learner(ran[best_lam])
rg.fit(train_X.to_numpy(), train_y.to_numpy())
y_pred = rg.predict(test_X.to_numpy())
print(f"best lambda {name} = {round(ran[best_lam], 3)}")
print(f"Test MSE {name} = {round(mean_square_error(test_y.to_numpy(), y_pred), 2)}")
lr = LinearRegression()
lr.fit(train_X.to_numpy(), train_y.to_numpy())
print("Linear Regression Loss = ", lr.loss(test_X.to_numpy(), test_y.to_numpy()))
if __name__ == '__main__':
np.random.seed(0)
select_polynomial_degree()
select_polynomial_degree(noise=0)
select_polynomial_degree(n_samples=1500, noise=10)
select_regularization_parameter()
| 45.304348
| 117
| 0.644274
| 913
| 6,252
| 4.242059
| 0.202629
| 0.028918
| 0.016525
| 0.016783
| 0.452621
| 0.364575
| 0.286083
| 0.262329
| 0.221792
| 0.19055
| 0
| 0.023185
| 0.213532
| 6,252
| 137
| 118
| 45.635037
| 0.764491
| 0.189379
| 0
| 0.134831
| 0
| 0.011236
| 0.164323
| 0.035757
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033708
| false
| 0
| 0.123596
| 0.011236
| 0.168539
| 0.067416
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
162c0bbced3e06420246b7de0d2ad6e3745c54ef
| 9,001
|
py
|
Python
|
libraries/tools/media_utils.py
|
unfoldingWord-dev/d43-catalog
|
6c36f59b9b326e0ead45739c09631ef1e57c4932
|
[
"MIT"
] | 1
|
2017-05-18T22:18:31.000Z
|
2017-05-18T22:18:31.000Z
|
libraries/tools/media_utils.py
|
unfoldingWord-dev/d43-catalog
|
6c36f59b9b326e0ead45739c09631ef1e57c4932
|
[
"MIT"
] | 54
|
2016-11-07T03:07:03.000Z
|
2021-04-14T21:24:04.000Z
|
libraries/tools/media_utils.py
|
unfoldingWord-dev/d43-catalog
|
6c36f59b9b326e0ead45739c09631ef1e57c4932
|
[
"MIT"
] | 7
|
2016-10-26T18:15:14.000Z
|
2018-06-01T18:37:32.000Z
|
import re
import copy
def parse_media(media, content_version, project_chapters):
"""
Converts a media object into formats usable in the catalog
:param media: the media object
:type media: dict
:param content_version: the current version of the source content
:type content_version: string
:param project_chapters: a dictionary of project chapters
:type project_chapters: dict
:return: resource_formats, project_formats a list of resource formats and dictionary of project formats
"""
resource_formats = []
project_formats = {}
if 'resource' in media:
resource_formats = _parse_resource(media['resource'], content_version)
if 'projects' in media:
for project in media['projects']:
project_id = project['identifier']
chapters = []
if project_id == 'obs':
# TRICKY: obs projects always have 50 chapters
# This allows empty projects to still publish media.
for x in range(1, 51): # chapters 1..50
chapters.append(str(x).zfill(2))
if project_id in project_chapters:
chapters = project_chapters[project_id]
project_formats[project_id] = _parse_project(project, content_version, chapters)
return resource_formats, project_formats
def _parse_resource(resource, content_version):
"""
Converts a resource media object into formats usable in the catalog
:param resource: the media object
:type resource: dict
:param content_version: the current version of the source content
:type content_version: string
:return: a list of formats
"""
source_version = _expand_keys(resource['version'], {'latest': content_version})
formats = []
if 'media' in resource:
for media in resource['media']:
media_version = _expand_keys(media['version'], {'latest': content_version})
expansion_vars = _make_expansion_variables(media, content_version)
if 'quality' in media and len(media['quality']) > 0:
# build format for each quality
for quality in media['quality']:
expansion_vars['quality'] = quality
format = _make_format(source_version=source_version,
media_version=media_version,
quality=quality,
media=media,
expansion_vars=expansion_vars)
formats.append(format)
else:
# build a single format
format = _make_format(source_version=source_version,
media_version=media_version,
quality=None,
media=media,
expansion_vars=expansion_vars)
formats.append(format)
return formats
def _make_format(source_version, media_version, quality, media, expansion_vars):
format = {
'format': '',
'modified': '',
'size': 0,
'source_version': '{}'.format(source_version),
'version': '{}'.format(media_version),
'contributor': media['contributor'],
'url': _expand_keys(media['url'], expansion_vars),
'signature': '',
'build_rules': [
'signing.sign_given_url'
]
}
if quality:
format['quality'] = quality
return format
def _parse_project(project, content_version, chapters_ids):
"""
Converts a project media object into formats usable in the catalog
:param project: the media object
:type project: dict
:param content_version: the current version of the source content
:type content_version: string
:param chapters_ids: a list of chapter identifiers in the project
:type chapters_ids: list
:return: a list of formats
"""
source_version = _expand_keys(project['version'], {'latest': content_version})
formats = []
if 'media' in project:
for media in project['media']:
media_version = _expand_keys(media['version'], {'latest': content_version})
expansion_vars = _make_expansion_variables(media, content_version)
if 'quality' in media and len(media['quality']) > 0:
# build format for each quality
for quality in media['quality']:
expansion_vars['quality'] = quality
format = _make_format(source_version=source_version,
media_version=media_version,
quality=quality,
media=media,
expansion_vars=expansion_vars)
chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars)
if chapters:
format['chapters'] = chapters
formats.append(format)
else:
# build single format
format = _make_format(source_version=source_version,
media_version=media_version,
quality=None,
media=media,
expansion_vars=expansion_vars)
chapters = _prepare_chapter_formats(media, chapters_ids, expansion_vars)
if chapters:
format['chapters'] = chapters
formats.append(format)
return formats
def _prepare_chapter_formats(media, chapters, expansion_vars):
"""
This is a wrapper around the method `_parse_project_chapter`.
Since we routinely conditionally prepare chapters in multiple places
this handles it in one place
:param media: the media object to inspect
:param chapters: a list of chapter ids
:param expansion_vars: a dictionary of variables that may be expanded in the chapter url
:return:
"""
if 'chapter_url' in media:
chapter_url = _expand_keys(media['chapter_url'], expansion_vars)
chapters = _parse_project_chapter(chapter_url, chapters)
if chapters:
return chapters
return None
def _parse_project_chapter(chapter_url, chapters):
"""
Generates chapter formats for use in the catalog
:param chapter_url: the url template that will be used in the formats
:param chapters: a list of chapter ids
:type chapters: list
:return:
"""
# TODO: this requires that we give a well formatted list of chapter ids and check if the Rc is a book
# only book RCs can have chapter formats
formats = []
for chapter_id in chapters:
format = {
'size': 0,
'length': 0,
'modified': '',
'identifier': chapter_id,
'url': _expand_keys(chapter_url, {'chapter': chapter_id}),
'signature': '',
'build_rules': [
'signing.sign_given_url'
]
}
formats.append(format)
return formats
def _make_expansion_variables(media_block, content_version):
"""
Creates a dictionary of expansion variables for media items.
:param self:
:param media_block:
:param content_version:
:return:
"""
vars = copy.copy(media_block)
# strip black listed keys
black_list = ['url', 'chapter_url']
for key in black_list:
if key in vars:
del vars[key]
# TRICKY: using `latest` as an expansion variable in urls is not explicitly stated in the spec,
# but it's a common misunderstanding so we allow it.
vars['latest'] = '{}'.format(content_version)
return vars
def _expand_keys(target, replacements):
"""
Replaces all the dict keys found in the string with the dict values.
Keys in the string must be delimited by brackets {}
:param target:
:param replacements:
:return:
"""
if isinstance(target, basestring) or isinstance(target, str):
result = target
if not isinstance(replacements, dict):
raise Exception('Expected dictionary of replacements but received {}'.format(type(replacements)))
for key in replacements:
if not isinstance(replacements[key], list):
result = re.sub(r'{\s*' + key + '\s*}', '{}'.format(replacements[key]), result)
return result
elif isinstance(target, int):
return target
else:
raise Exception('Invalid replacement target "{}". Expected string but received {}'.format(target, type(target)))
| 39.47807
| 120
| 0.579602
| 940
| 9,001
| 5.368085
| 0.179787
| 0.05549
| 0.033888
| 0.02279
| 0.452041
| 0.419342
| 0.385256
| 0.346611
| 0.329568
| 0.27824
| 0
| 0.002364
| 0.342184
| 9,001
| 227
| 121
| 39.651982
| 0.849856
| 0.257527
| 0
| 0.473684
| 0
| 0
| 0.086943
| 0.006856
| 0
| 0
| 0
| 0.004405
| 0
| 1
| 0.06015
| false
| 0
| 0.015038
| 0
| 0.150376
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
162cd54c3b760abba50c342688a1d04f0b1b3010
| 631
|
py
|
Python
|
BST.py
|
boristown/leetcode
|
2e510b7913653da75cd9d10f1adce4c466e74768
|
[
"MIT"
] | 1
|
2021-10-04T03:09:51.000Z
|
2021-10-04T03:09:51.000Z
|
BST.py
|
boristown/leetcode
|
2e510b7913653da75cd9d10f1adce4c466e74768
|
[
"MIT"
] | null | null | null |
BST.py
|
boristown/leetcode
|
2e510b7913653da75cd9d10f1adce4c466e74768
|
[
"MIT"
] | null | null | null |
class BST:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
@staticmethod
def array2BST(array):
'''
array:sorted array
'''
n = len(array)
if n == 0: return None
m = n//2
left,root,right = array[:m],array[m],array[m+1:]
return BST(root,BST.array2BST(left),BST.array2BST(right))
@staticmethod
def BST2array(node):
'''
node:BST node
'''
if not node: return []
return BST.BST2array(node.left)+[node.val]+BST.BST2array(node.right)
| 26.291667
| 76
| 0.534073
| 79
| 631
| 4.21519
| 0.316456
| 0.054054
| 0.12012
| 0.072072
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023585
| 0.328051
| 631
| 24
| 76
| 26.291667
| 0.761792
| 0.050713
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
162ffe7bb753d133521ad38601ddfbb5cb83a226
| 4,192
|
py
|
Python
|
readme_metrics/MetricsMiddleware.py
|
readmeio/metrics-sdks-python
|
02bc6e486260641f1a62760d20370157a4928af6
|
[
"0BSD"
] | 2
|
2020-09-23T04:44:22.000Z
|
2021-07-06T18:14:11.000Z
|
readme_metrics/MetricsMiddleware.py
|
readmeio/metrics-sdks-python
|
02bc6e486260641f1a62760d20370157a4928af6
|
[
"0BSD"
] | null | null | null |
readme_metrics/MetricsMiddleware.py
|
readmeio/metrics-sdks-python
|
02bc6e486260641f1a62760d20370157a4928af6
|
[
"0BSD"
] | 1
|
2020-09-23T04:44:25.000Z
|
2020-09-23T04:44:25.000Z
|
import io
import time
import datetime
from readme_metrics.Metrics import Metrics
from readme_metrics.MetricsApiConfig import MetricsApiConfig
from readme_metrics.ResponseInfoWrapper import ResponseInfoWrapper
from werkzeug import Request
class MetricsMiddleware:
"""Core middleware class for ReadMe Metrics
Attributes:
config (MetricsApiConfig): Contains the configuration settings for the running
middleware instance
"""
def __init__(self, wsgi_app_reference, config: MetricsApiConfig):
"""
Constructs and initializes MetricsMiddleware WSGI middleware to be passed into
the currently running WSGI web server.
Args:
wsgi_app_reference ([type]): Reference to the current WSGI application,
which will be wrapped
config (MetricsApiConfig): Instance of MetricsApiConfig object
"""
self.config = config
self.app = wsgi_app_reference
self.metrics_core = Metrics(config)
def __call__(self, environ, start_response):
"""Method that is called by the running WSGI server.
You should NOT be calling this method yourself under normal circumstances.
"""
response_headers = {}
response_status = 0
iterable = None
req = Request(environ)
def _start_response(_status, _response_headers, *args):
write = start_response(_status, _response_headers, *args)
# Populate response info (headers & status)
nonlocal response_headers, response_status
response_headers = _response_headers
response_status = _status
return write
try:
req.rm_start_dt = str(datetime.datetime.utcnow())
req.rm_start_ts = int(time.time() * 1000)
if req.method == "POST":
# The next 4 lines are a workaround for a serious shortcoming in the
# WSGI spec.
#
# The data can only be read once, after which the socket is exhausted
# and cannot be read again. As such, we read the data and then
# repopulate the variable so that it can be used by other code down the
# pipeline.
#
# For more info: https://stackoverflow.com/a/13106009/643951
# the environment variable CONTENT_LENGTH may be empty or missing
try:
content_length = int(environ.get("CONTENT_LENGTH", 0))
except (ValueError):
content_length = 0
content_body = environ["wsgi.input"].read(content_length)
# guarding check to close stream
if hasattr(environ["CONTENT_LENGTH"], "close"):
environ["wsgi.input"].close()
environ["wsgi.input"] = io.BytesIO(content_body)
req.rm_content_length = content_length
req.rm_body = content_body
iterable = self.app(environ, _start_response)
for data in iterable:
res_ctype = ""
res_clength = 0
htype = next(
(h for h in response_headers if h[0] == "Content-Type"), None
)
hlength = next(
(h for h in response_headers if h[0] == "Content-Length"), None
)
if htype and hlength:
res_ctype = htype[1]
res_clength = int(hlength[1])
# Populate response body
res = ResponseInfoWrapper(
response_headers,
response_status,
res_ctype,
res_clength,
data.decode("utf-8"),
)
# Send off data to be queued (and processed) by ReadMe if allowed
self.metrics_core.process(req, res)
yield data
finally:
# Undocumented in WSGI spec but the iterable has to be closed
if hasattr(iterable, "close"):
iterable.close()
| 34.933333
| 87
| 0.569656
| 437
| 4,192
| 5.311213
| 0.382151
| 0.058165
| 0.049548
| 0.049978
| 0.064627
| 0.064627
| 0.031883
| 0.031883
| 0.031883
| 0.031883
| 0
| 0.010554
| 0.367128
| 4,192
| 119
| 88
| 35.226891
| 0.864305
| 0.290792
| 0
| 0.032258
| 0
| 0
| 0.036128
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048387
| false
| 0
| 0.112903
| 0
| 0.193548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16312fcb11ab7937c366343185da9dd102a4e745
| 4,048
|
py
|
Python
|
kbrl.py
|
deekshaarya4/gymexperiments
|
2d503ba14fcfba41339de25dd78d649bd12693e6
|
[
"MIT"
] | null | null | null |
kbrl.py
|
deekshaarya4/gymexperiments
|
2d503ba14fcfba41339de25dd78d649bd12693e6
|
[
"MIT"
] | null | null | null |
kbrl.py
|
deekshaarya4/gymexperiments
|
2d503ba14fcfba41339de25dd78d649bd12693e6
|
[
"MIT"
] | null | null | null |
import numpy as np
import gym
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(description='KBRL with KNN')
parser.add_argument('--episodes', nargs='?', type=int, default=500)
parser.add_argument('--max_timesteps', nargs='?', type=int, default=200)
parser.add_argument('environment')
args = parser.parse_args()
env = gym.make(args.environment).env
action_space = env.action_space
# hyperparameters:
epsilon = 1.0
exploration_decay = 0.98
k = 500 # number of nearest neighbors
minimum_num_iters = 500 # number of iterations used for training
num_iter = 0
max_iters = 0
gamma = 0.95
max_state_size = 15000 # because we don't know the state space size in continuous environments
# learning-related variables
states = None
actions = {}
rewards = {}
values = {}
# episode-related variables
episode_beginning = 0
def make_move(observation, reward, done):
global states, actions, values, rewards, num_iter, episode_beginning, max_iters, epsilon
if states is None:
# first state observed
states = np.zeros((max_state_size, observation.size))
if num_iter > minimum_num_iters and np.random.rand() > epsilon and values:
# if amount of data is sufficient and values is populated (atleast one episode has been run)
# testing phase: exploitation
# Uses k=500 nearest neighbors to pick the action which has the highest reward
nbrs = NearestNeighbors(n_neighbors=min(k,max_iters)).fit(states[:max_iters])
distances, indices = nbrs.kneighbors(observation)
# find the best action
action_list = {}
freq_list = {}
for i in indices[0]:
v = values[i]
a = actions[i]
vnew = action_list.get(a, 0) + v
action_list[a] = vnew
freq_list[a] = freq_list.get(a, 0) + 1
# normalize by number of times action occured and take action with highest value
for act in action_list:
action_list[act] = action_list[act] / freq_list[act]
sorted_list = [(y,x) for x,y in action_list.items()]
sorted_list.sort(reverse=True)
take_action = sorted_list[0][1]
else:
# training phase: exploration randomly picks an action
take_action = action_space.sample()
# populate the state present, action taken and reward obtained
if num_iter < max_state_size:
states[num_iter] = observation # save the state
actions[num_iter] = take_action # and the action we took
rewards[num_iter-1] = reward # and the reward we obtained last time step
values[num_iter-1] = 0
num_iter += 1
if done:
# end of episode: calculate the value function for this episode
val = 0
for t in reversed(range(episode_beginning, num_iter)):
val = gamma * val + rewards.get(t,0)
values[t] = val
episode_beginning = num_iter
max_iters = min(max(max_iters, num_iter), max_state_size)
# decay exploration probability
epsilon *= exploration_decay
# do not decay below 0
epsilon = max(epsilon, 0)
return take_action
# Ignore sklearn warnings
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
reward = 0
episode_reward = 0
done = False
cumulative_reward_list = []
for i in range(args.episodes):
observation = env.reset()
sum_reward = 0
for j in range(args.max_timesteps):
env.render()
action = make_move(observation, reward, done)
observation, reward, done, _ = env.step(action)
sum_reward += reward
if done:
break
episode_reward = episode_reward * 0.95 + sum_reward * 0.05
print('Reward for episode '+ str(i)+' : '+str(episode_reward))
cumulative_reward_list.append(episode_reward)
# env.render()
plt.plot(range(0,500), cumulative_reward_list, linewidth=2)
plt.xlabel("Episodes")
plt.ylabel("Cumulative Reward")
plt.title("Performance")
plt.show()
plt.close()
| 30.900763
| 100
| 0.673913
| 555
| 4,048
| 4.769369
| 0.345946
| 0.031734
| 0.018134
| 0.014356
| 0.036267
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019008
| 0.233202
| 4,048
| 130
| 101
| 31.138462
| 0.833763
| 0.21418
| 0
| 0.022222
| 0
| 0
| 0.034516
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0.011111
| 0.066667
| 0
| 0.1
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16320687d82ed5fd57ef5ebf44c1b6e925a208e1
| 12,169
|
py
|
Python
|
deepchem/models/atomic_conv.py
|
cjgalvin/deepchem
|
64993a129e7f0f78fed9500298b1828ac8a0757a
|
[
"MIT"
] | 3
|
2019-05-29T19:18:25.000Z
|
2021-01-25T05:44:05.000Z
|
deepchem/models/atomic_conv.py
|
cjgalvin/deepchem
|
64993a129e7f0f78fed9500298b1828ac8a0757a
|
[
"MIT"
] | 10
|
2017-02-23T19:39:22.000Z
|
2017-08-31T22:21:18.000Z
|
deepchem/models/atomic_conv.py
|
cjgalvin/deepchem
|
64993a129e7f0f78fed9500298b1828ac8a0757a
|
[
"MIT"
] | 1
|
2018-09-22T00:53:53.000Z
|
2018-09-22T00:53:53.000Z
|
__author__ = "Joseph Gomes"
__copyright__ = "Copyright 2017, Stanford University"
__license__ = "MIT"
import sys
from deepchem.models import KerasModel
from deepchem.models.layers import AtomicConvolution
from deepchem.models.losses import L2Loss
from tensorflow.keras.layers import Input, Layer
import numpy as np
import tensorflow as tf
import itertools
def initializeWeightsBiases(prev_layer_size,
size,
weights=None,
biases=None,
name=None):
"""Initializes weights and biases to be used in a fully-connected layer.
Parameters
----------
prev_layer_size: int
Number of features in previous layer.
size: int
Number of nodes in this layer.
weights: tf.Tensor, optional (Default None)
Weight tensor.
biases: tf.Tensor, optional (Default None)
Bias tensor.
name: str
Name for this op, optional (Defaults to 'fully_connected' if None)
Returns
-------
weights: tf.Variable
Initialized weights.
biases: tf.Variable
Initialized biases.
"""
if weights is None:
weights = tf.random.truncated_normal([prev_layer_size, size], stddev=0.01)
if biases is None:
biases = tf.zeros([size])
w = tf.Variable(weights, name='w')
b = tf.Variable(biases, name='b')
return w, b
class AtomicConvScore(Layer):
"""The scoring function used by the atomic convolution models."""
def __init__(self, atom_types, layer_sizes, **kwargs):
super(AtomicConvScore, self).__init__(**kwargs)
self.atom_types = atom_types
self.layer_sizes = layer_sizes
def build(self, input_shape):
self.type_weights = []
self.type_biases = []
self.output_weights = []
self.output_biases = []
n_features = int(input_shape[0][-1])
layer_sizes = self.layer_sizes
num_layers = len(layer_sizes)
weight_init_stddevs = [1 / np.sqrt(x) for x in layer_sizes]
bias_init_consts = [0.0] * num_layers
for ind, atomtype in enumerate(self.atom_types):
prev_layer_size = n_features
self.type_weights.append([])
self.type_biases.append([])
self.output_weights.append([])
self.output_biases.append([])
for i in range(num_layers):
weight, bias = initializeWeightsBiases(
prev_layer_size=prev_layer_size,
size=layer_sizes[i],
weights=tf.random.truncated_normal(
shape=[prev_layer_size, layer_sizes[i]],
stddev=weight_init_stddevs[i]),
biases=tf.constant(
value=bias_init_consts[i], shape=[layer_sizes[i]]))
self.type_weights[ind].append(weight)
self.type_biases[ind].append(bias)
prev_layer_size = layer_sizes[i]
weight, bias = initializeWeightsBiases(prev_layer_size, 1)
self.output_weights[ind].append(weight)
self.output_biases[ind].append(bias)
def call(self, inputs):
frag1_layer, frag2_layer, complex_layer, frag1_z, frag2_z, complex_z = inputs
atom_types = self.atom_types
num_layers = len(self.layer_sizes)
def atomnet(current_input, atomtype):
prev_layer = current_input
for i in range(num_layers):
layer = tf.nn.bias_add(
tf.matmul(prev_layer, self.type_weights[atomtype][i]),
self.type_biases[atomtype][i])
layer = tf.nn.relu(layer)
prev_layer = layer
output_layer = tf.squeeze(
tf.nn.bias_add(
tf.matmul(prev_layer, self.output_weights[atomtype][0]),
self.output_biases[atomtype][0]))
return output_layer
frag1_zeros = tf.zeros_like(frag1_z, dtype=tf.float32)
frag2_zeros = tf.zeros_like(frag2_z, dtype=tf.float32)
complex_zeros = tf.zeros_like(complex_z, dtype=tf.float32)
frag1_atomtype_energy = []
frag2_atomtype_energy = []
complex_atomtype_energy = []
for ind, atomtype in enumerate(atom_types):
frag1_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag1_layer)
frag2_outputs = tf.map_fn(lambda x: atomnet(x, ind), frag2_layer)
complex_outputs = tf.map_fn(lambda x: atomnet(x, ind), complex_layer)
cond = tf.equal(frag1_z, atomtype)
frag1_atomtype_energy.append(tf.where(cond, frag1_outputs, frag1_zeros))
cond = tf.equal(frag2_z, atomtype)
frag2_atomtype_energy.append(tf.where(cond, frag2_outputs, frag2_zeros))
cond = tf.equal(complex_z, atomtype)
complex_atomtype_energy.append(
tf.where(cond, complex_outputs, complex_zeros))
frag1_outputs = tf.add_n(frag1_atomtype_energy)
frag2_outputs = tf.add_n(frag2_atomtype_energy)
complex_outputs = tf.add_n(complex_atomtype_energy)
frag1_energy = tf.reduce_sum(frag1_outputs, 1)
frag2_energy = tf.reduce_sum(frag2_outputs, 1)
complex_energy = tf.reduce_sum(complex_outputs, 1)
binding_energy = complex_energy - (frag1_energy + frag2_energy)
return tf.expand_dims(binding_energy, axis=1)
class AtomicConvModel(KerasModel):
"""Implements an Atomic Convolution Model.
Implements the atomic convolutional networks as introduced in
Gomes, Joseph, et al. "Atomic convolutional networks for predicting protein-ligand binding affinity." arXiv preprint arXiv:1703.10603 (2017).
The atomic convolutional networks function as a variant of
graph convolutions. The difference is that the "graph" here is
the nearest neighbors graph in 3D space. The AtomicConvModel
leverages these connections in 3D space to train models that
learn to predict energetic state starting from the spatial
geometry of the model.
"""
def __init__(self,
frag1_num_atoms=70,
frag2_num_atoms=634,
complex_num_atoms=701,
max_num_neighbors=12,
batch_size=24,
atom_types=[
6, 7., 8., 9., 11., 12., 15., 16., 17., 20., 25., 30., 35.,
53., -1.
],
radial=[[
1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0,
7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0
], [0.0, 4.0, 8.0], [0.4]],
layer_sizes=[32, 32, 16],
learning_rate=0.001,
**kwargs):
"""
Parameters
----------
frag1_num_atoms: int
Number of atoms in first fragment
frag2_num_atoms: int
Number of atoms in sec
max_num_neighbors: int
Maximum number of neighbors possible for an atom. Recall neighbors
are spatial neighbors.
atom_types: list
List of atoms recognized by model. Atoms are indicated by their
nuclear numbers.
radial: list
TODO: add description
layer_sizes: list
TODO: add description
learning_rate: float
Learning rate for the model.
"""
# TODO: Turning off queue for now. Safe to re-activate?
self.complex_num_atoms = complex_num_atoms
self.frag1_num_atoms = frag1_num_atoms
self.frag2_num_atoms = frag2_num_atoms
self.max_num_neighbors = max_num_neighbors
self.batch_size = batch_size
self.atom_types = atom_types
rp = [x for x in itertools.product(*radial)]
frag1_X = Input(shape=(frag1_num_atoms, 3))
frag1_nbrs = Input(shape=(frag1_num_atoms, max_num_neighbors))
frag1_nbrs_z = Input(shape=(frag1_num_atoms, max_num_neighbors))
frag1_z = Input(shape=(frag1_num_atoms,))
frag2_X = Input(shape=(frag2_num_atoms, 3))
frag2_nbrs = Input(shape=(frag2_num_atoms, max_num_neighbors))
frag2_nbrs_z = Input(shape=(frag2_num_atoms, max_num_neighbors))
frag2_z = Input(shape=(frag2_num_atoms,))
complex_X = Input(shape=(complex_num_atoms, 3))
complex_nbrs = Input(shape=(complex_num_atoms, max_num_neighbors))
complex_nbrs_z = Input(shape=(complex_num_atoms, max_num_neighbors))
complex_z = Input(shape=(complex_num_atoms,))
self._frag1_conv = AtomicConvolution(
atom_types=self.atom_types, radial_params=rp,
boxsize=None)([frag1_X, frag1_nbrs, frag1_nbrs_z])
self._frag2_conv = AtomicConvolution(
atom_types=self.atom_types, radial_params=rp,
boxsize=None)([frag2_X, frag2_nbrs, frag2_nbrs_z])
self._complex_conv = AtomicConvolution(
atom_types=self.atom_types, radial_params=rp,
boxsize=None)([complex_X, complex_nbrs, complex_nbrs_z])
score = AtomicConvScore(self.atom_types, layer_sizes)([
self._frag1_conv, self._frag2_conv, self._complex_conv, frag1_z,
frag2_z, complex_z
])
model = tf.keras.Model(
inputs=[
frag1_X, frag1_nbrs, frag1_nbrs_z, frag1_z, frag2_X, frag2_nbrs,
frag2_nbrs_z, frag2_z, complex_X, complex_nbrs, complex_nbrs_z,
complex_z
],
outputs=score)
super(AtomicConvModel, self).__init__(
model, L2Loss(), batch_size=batch_size, **kwargs)
def default_generator(self,
dataset,
epochs=1,
mode='fit',
deterministic=True,
pad_batches=True):
batch_size = self.batch_size
def replace_atom_types(z):
def place_holder(i):
if i in self.atom_types:
return i
return -1
return np.array([place_holder(x) for x in z])
for epoch in range(epochs):
for ind, (F_b, y_b, w_b, ids_b) in enumerate(
dataset.iterbatches(
batch_size, deterministic=True, pad_batches=pad_batches)):
N = self.complex_num_atoms
N_1 = self.frag1_num_atoms
N_2 = self.frag2_num_atoms
M = self.max_num_neighbors
batch_size = F_b.shape[0]
num_features = F_b[0][0].shape[1]
frag1_X_b = np.zeros((batch_size, N_1, num_features))
for i in range(batch_size):
frag1_X_b[i] = F_b[i][0]
frag2_X_b = np.zeros((batch_size, N_2, num_features))
for i in range(batch_size):
frag2_X_b[i] = F_b[i][3]
complex_X_b = np.zeros((batch_size, N, num_features))
for i in range(batch_size):
complex_X_b[i] = F_b[i][6]
frag1_Nbrs = np.zeros((batch_size, N_1, M))
frag1_Z_b = np.zeros((batch_size, N_1))
for i in range(batch_size):
z = replace_atom_types(F_b[i][2])
frag1_Z_b[i] = z
frag1_Nbrs_Z = np.zeros((batch_size, N_1, M))
for atom in range(N_1):
for i in range(batch_size):
atom_nbrs = F_b[i][1].get(atom, "")
frag1_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs)
for j, atom_j in enumerate(atom_nbrs):
frag1_Nbrs_Z[i, atom, j] = frag1_Z_b[i, atom_j]
frag2_Nbrs = np.zeros((batch_size, N_2, M))
frag2_Z_b = np.zeros((batch_size, N_2))
for i in range(batch_size):
z = replace_atom_types(F_b[i][5])
frag2_Z_b[i] = z
frag2_Nbrs_Z = np.zeros((batch_size, N_2, M))
for atom in range(N_2):
for i in range(batch_size):
atom_nbrs = F_b[i][4].get(atom, "")
frag2_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs)
for j, atom_j in enumerate(atom_nbrs):
frag2_Nbrs_Z[i, atom, j] = frag2_Z_b[i, atom_j]
complex_Nbrs = np.zeros((batch_size, N, M))
complex_Z_b = np.zeros((batch_size, N))
for i in range(batch_size):
z = replace_atom_types(F_b[i][8])
complex_Z_b[i] = z
complex_Nbrs_Z = np.zeros((batch_size, N, M))
for atom in range(N):
for i in range(batch_size):
atom_nbrs = F_b[i][7].get(atom, "")
complex_Nbrs[i, atom, :len(atom_nbrs)] = np.array(atom_nbrs)
for j, atom_j in enumerate(atom_nbrs):
complex_Nbrs_Z[i, atom, j] = complex_Z_b[i, atom_j]
inputs = [
frag1_X_b, frag1_Nbrs, frag1_Nbrs_Z, frag1_Z_b, frag2_X_b,
frag2_Nbrs, frag2_Nbrs_Z, frag2_Z_b, complex_X_b, complex_Nbrs,
complex_Nbrs_Z, complex_Z_b
]
y_b = np.reshape(y_b, newshape=(batch_size, 1))
yield (inputs, [y_b], [w_b])
| 36.109792
| 143
| 0.639494
| 1,741
| 12,169
| 4.182079
| 0.152211
| 0.038319
| 0.019778
| 0.02637
| 0.368219
| 0.295564
| 0.229501
| 0.15561
| 0.141739
| 0.08323
| 0
| 0.031029
| 0.255814
| 12,169
| 336
| 144
| 36.217262
| 0.772968
| 0.136412
| 0
| 0.097872
| 0
| 0
| 0.005314
| 0
| 0
| 0
| 0
| 0.008929
| 0
| 1
| 0.038298
| false
| 0
| 0.034043
| 0
| 0.106383
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1632af4d460f191002d145c0aa53f5434243e662
| 5,717
|
py
|
Python
|
setup.py
|
DivoK/mystery
|
b656eebe678c64864b2a5762765f36bddd540933
|
[
"MIT"
] | 8
|
2019-05-31T19:46:49.000Z
|
2020-05-14T22:21:35.000Z
|
setup.py
|
DivoK/mystery
|
b656eebe678c64864b2a5762765f36bddd540933
|
[
"MIT"
] | 4
|
2019-06-04T15:24:22.000Z
|
2021-06-01T23:53:37.000Z
|
setup.py
|
DivoK/mystery
|
b656eebe678c64864b2a5762765f36bddd540933
|
[
"MIT"
] | 4
|
2019-06-04T15:08:46.000Z
|
2020-04-25T15:52:00.000Z
|
"""
Core business logic for `mystery`.
This code will run when the package is being built and installed.
"""
import json
import pathlib
import random
import tempfile
import urllib.request
import typing
import setuptools
from setuptools.command.sdist import sdist
# Load the configuration file.
CONFIG_PATH = pathlib.Path('config.json')
CONFIG = json.load(CONFIG_PATH.open('r'))
def _get_lockfile_path() -> pathlib.Path:
"""
Assemble the lockfile's path.
:return: lockfile path.
:rtype: pathlib.Path
"""
return pathlib.Path(tempfile.gettempdir()).joinpath(CONFIG['lockfile_name'])
class SDistCommand(sdist):
"""
Will be registered as a replacement for pip's 'sdist' command.
"""
def run(self):
dep_lock_path = _get_lockfile_path()
try:
dep_lock_path.unlink()
except FileNotFoundError:
pass
super().run()
def _get_package_list() -> typing.List[str]:
"""
Get a list of possible packages.
:return: list of package names.
:rtype: typing.List[str]
"""
try:
# Get the top PyPI packages and use one of them.
response = urllib.request.urlopen(CONFIG['top_pypi_packages_link'])
possible_packages_raw = response.read()
except urllib.request.URLError:
# Use the offline backup file.
with open(CONFIG['top_pypi_packages_offline_backup'], 'r') as backup_file:
possible_packages_raw = backup_file.read()
return json.loads(possible_packages_raw)['rows'][: CONFIG['top_x_packages']]
def _choose_mystery_package() -> str:
"""
Choose the underlying mysterious package and handle the lockfile's state.
:return: mystery package name.
:rtype: str
"""
# To keep the chosen dependency consistent in between setup.py runs, 'mystery' uses a temporary lockfile.
dep_lock_path = _get_lockfile_path()
if dep_lock_path.exists():
# Use the locked package and unlink the lockfile.
chosen_package = dep_lock_path.read_text().strip()
dep_lock_path.unlink()
else:
# Choose a package and create the lockfile.
possible_packages = _get_package_list()
chosen_package = random.choice(
[package['project'] for package in possible_packages]
)
dep_lock_path.write_text(chosen_package) # Lock the chosen package of course.
return chosen_package
def _fix_package_name(package_name: str) -> str:
"""
Fix the package name so it could be placed in the __init__.py file.
:param package_name: mystery package name.
:type package_name: str
:return: fixed mystery package name.
:rtype: str
"""
# Transform to eligible package name.
fixed_package_name = package_name.replace('-', '_')
# Special case for the 'backports' modules.
if fixed_package_name.startswith('backports_'):
fixed_package_name.replace('_', '.', 1)
return fixed_package_name
def _write_init_py(package_name: str) -> None:
"""
Dynamically write the __init__.py for the package using the chosen package.
:param chosen_package: mystery package name.
:type chosen_package: str
:rtype: None
"""
package_name = _fix_package_name(package_name)
init_py_path = pathlib.Path('mystery')
init_py_path.mkdir(exist_ok=True)
init_py_path = init_py_path / '__init__.py'
init_py_path.write_text(
f'''
# Here we're trying to import the mystery package (it's "{package_name}" this time).
# If it exists, overwrite 'mystery' in 'sys.modules'. Else, print there was an error.
import sys
try:
import {package_name}
except ImportError as error:
print('Internal error:', error)
print("The mystery package wasn't playing nice. Sorry!")
print('Hint: you can always try to reinstall mystery and get a different package!')
sorry = 'try reinstalling mystery and get a different package!'
else:
sys.modules['mystery'] = {package_name}
sys.modules['mystery'].__mystery_init_py__ = __file__
sys.modules['mystery'].__mystery_package_name__ = '{package_name}'
del sys # We care about this only when mystery fails (and even that's inconsequential).
'''
)
def _get_long_description_data() -> typing.Tuple[str, str]:
"""
Get data regarding the long description of the package.
:return: tuple of the README.md text and the long_description type.
:rtype: typing.Tuple[str, str]
"""
with open('README.md', 'r') as readme:
return (readme.read(), 'text/markdown')
CHOSEN_PACKAGE = _choose_mystery_package()
_write_init_py(CHOSEN_PACKAGE)
LONG_DESCRIPTION, LONG_DESCRIPTION_CONTENT_TYPE = _get_long_description_data()
setuptools.setup(
name='mystery',
version='1.0.2',
description='It is a riddle, wrapped in a mystery, inside an enigma.',
url='https://github.com/DivoK/mystery',
author='Divo Kaplan',
author_email='divokaplan@gmail.com',
packages=setuptools.find_packages(),
install_requires=[CHOSEN_PACKAGE],
cmdclass={'sdist': SDistCommand},
python_requires='>=3.6',
include_package_data=True,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
keywords='mystery setuptools fun python-packages random',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Intended Audience :: Other Audience',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| 32.117978
| 109
| 0.688473
| 735
| 5,717
| 5.12517
| 0.319728
| 0.070082
| 0.020441
| 0.023361
| 0.085479
| 0.0515
| 0
| 0
| 0
| 0
| 0
| 0.002866
| 0.206577
| 5,717
| 177
| 110
| 32.299435
| 0.827601
| 0.238412
| 0
| 0.089109
| 0
| 0.019802
| 0.340762
| 0.03954
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069307
| false
| 0.009901
| 0.118812
| 0
| 0.247525
| 0.039604
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1632cc5107307be666384111255532a74d2d121a
| 1,665
|
py
|
Python
|
ADMM_primal.py
|
CrazyIvanPro/Optimal_Transport
|
aa782820a5ca5a01909ed3c32acbada43f6cfa0f
|
[
"MIT"
] | 2
|
2020-11-09T10:37:19.000Z
|
2021-07-06T09:24:30.000Z
|
ADMM_primal.py
|
CrazyIvanPro/Optimal_Transport
|
aa782820a5ca5a01909ed3c32acbada43f6cfa0f
|
[
"MIT"
] | null | null | null |
ADMM_primal.py
|
CrazyIvanPro/Optimal_Transport
|
aa782820a5ca5a01909ed3c32acbada43f6cfa0f
|
[
"MIT"
] | 1
|
2021-06-03T17:07:01.000Z
|
2021-06-03T17:07:01.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =======================================
# File Name: ADMM_primal.py
# Purpose : implementation for ADMM method
# for solving primal problem
# =======================================
from utils import get_params
import numpy as np
import sys
def ADMM_primal(mu, nu, c, iters=10000, rho=1024, alpha=1.618):
"""ADMM_primal
"""
# initialize
m, n = c.shape
pi = np.zeros((m, n))
pi_dag = np.zeros((m, n))
w = np.zeros((m, n))
u = np.zeros(m)
v = np.zeros(n)
rho_tilde = rho * 32
while rho_tilde >= rho:
for _ in range(iters):
r = ((-w + u.reshape((m, 1)) + v.reshape((1, n)) - c) / rho +
mu.reshape((m, 1)) + nu.reshape((1, n)) + pi_dag)
pi = (r - ((r.sum(axis=1) - r.sum() / (m + n + 1)) / (n + 1)).reshape((m, 1))
- ((r.sum(axis=0) - r.sum() / (m + n + 1)) / (m + 1)).reshape((1, n)))
pi_dag = np.maximum(pi + w / rho, 0.0)
u = u + alpha * rho * (mu - pi.sum(axis=1))
v = v + alpha * rho * (nu - pi.sum(axis=0))
w = w + alpha * rho * (pi - pi_dag)
rho_tilde = rho_tilde / 2
print('error_mu = %.5e' % np.linalg.norm(pi_dag.sum(axis = 1) - mu, 1))
print('error_nu = %.5e' % np.linalg.norm(pi_dag.sum(axis = 0) - nu, 1))
print('fvall = %.5e' % (c * pi_dag).sum())
if __name__ == '__main__':
try:
print("Test...")
_mu, _nu, _c = get_params(64, 'random')
ADMM_primal(_mu, _nu, _c)
except KeyboardInterrupt:
print (" Ctrl+C pressed...")
sys.exit(1)
| 29.732143
| 89
| 0.465465
| 241
| 1,665
| 3.074689
| 0.319502
| 0.047233
| 0.043185
| 0.036437
| 0.167341
| 0.070175
| 0.070175
| 0.070175
| 0
| 0
| 0
| 0.037197
| 0.305706
| 1,665
| 55
| 90
| 30.272727
| 0.603806
| 0.153754
| 0
| 0
| 0
| 0
| 0.060932
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.090909
| 0
| 0.121212
| 0.151515
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
163306f757b2b46fb97912f794d0169c24de2f36
| 1,117
|
py
|
Python
|
misc_scripts/CleanVCFparams.py
|
pombase/legacy-eg-loader
|
1a324121325ffc3b9a4c15922f7a12756a9c3206
|
[
"Apache-2.0"
] | null | null | null |
misc_scripts/CleanVCFparams.py
|
pombase/legacy-eg-loader
|
1a324121325ffc3b9a4c15922f7a12756a9c3206
|
[
"Apache-2.0"
] | null | null | null |
misc_scripts/CleanVCFparams.py
|
pombase/legacy-eg-loader
|
1a324121325ffc3b9a4c15922f7a12756a9c3206
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import os
import sys
import pprint
import argparse
parser = argparse.ArgumentParser(description='Clean up the data for a given parameter')
parser.add_argument('--infile', help="Path to the VCF file", default='test.vcf')
parser.add_argument('--outfile', help="Path to the new VCF file", default='test.out.vcf')
parser.add_argument('--param', help="Parameter to clean", default='PL')
args = parser.parse_args()
fi = open(args.infile, 'r')
#fo = open('Spombe.2013-01-02.filt3c.nr57-final.snps.anno-snpeff3.cleaned3.AB325691.vcf', 'w')
fo = open(args.outfile, 'w')
for line in fi:
if len(line) == 0:
continue
if line[0] == '#':
fo.write(line)
continue
line = line.rstrip()
v = line.split('\t');
params = v[8].split(':')
out = v[0:8]
try:
paramIndex = params.index(args.param)
del params[paramIndex]
out.append(':'.join(params))
for d in v[9:]:
dv = d.split(':')
del dv[paramIndex]
out.append(':'.join(dv))
except ValueError:
out.append(':'.join(params))
out += v[9:]
fo.write("\t".join(out) + "\n")
fi.close()
fo.close()
| 25.386364
| 94
| 0.637422
| 169
| 1,117
| 4.189349
| 0.467456
| 0.038136
| 0.072034
| 0.036723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028078
| 0.170994
| 1,117
| 43
| 95
| 25.976744
| 0.736501
| 0.097583
| 0
| 0.114286
| 0
| 0
| 0.16004
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.114286
| 0
| 0.114286
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1633a9fb3de8a2d02c1b973e0da5225da5fdee84
| 25,426
|
py
|
Python
|
create_coherency_dataset.py
|
UKPLab/acl20-dialogue-coherence-assessment
|
328b888855dc833b4b0c05c259ee7115f4219dbe
|
[
"MIT"
] | 12
|
2020-05-03T12:41:53.000Z
|
2021-11-19T06:45:56.000Z
|
create_coherency_dataset.py
|
UKPLab/acl20-dialogue-coherence-assessment
|
328b888855dc833b4b0c05c259ee7115f4219dbe
|
[
"MIT"
] | 2
|
2020-07-02T08:19:19.000Z
|
2021-12-03T16:58:02.000Z
|
create_coherency_dataset.py
|
UKPLab/acl20-dialogue-coherence-assessment
|
328b888855dc833b4b0c05c259ee7115f4219dbe
|
[
"MIT"
] | 4
|
2020-08-27T08:36:55.000Z
|
2021-08-19T21:53:31.000Z
|
import math
import os
from copy import deepcopy
from ast import literal_eval
import pandas as pd
from math import factorial
import random
from collections import Counter, defaultdict
import sys
from nltk import word_tokenize
from tqdm import tqdm, trange
import argparse
import numpy as np
import re
import csv
from sklearn.model_selection import train_test_split
from swda.swda import CorpusReader, Transcript, Utterance
act2word = {1:"inform",2:"question", 3:"directive", 4:"commissive"}
def permute(sents, sent_DAs, amount):
""" return a list of different! permuted sentences and their respective dialog acts """
""" if amount is greater than the possible amount of permutations, only the uniquely possible ones are returned """
assert len(sents) == len(sent_DAs), "length of permuted sentences and list of DAs must be equal"
if amount == 0:
return []
permutations = [list(range(len(sents)))]
amount = min(amount, factorial(len(sents))-1)
for i in range(amount):
permutation = np.random.permutation(len(sents))
while permutation.tolist() in permutations:
permutation = np.random.permutation(len(sents))
permutations.append(permutation.tolist())
return permutations[1:] #the first one is the original, which was included s.t. won't be generated
def draw_rand_sent(act_utt_df, sent_len, amount):
""" df is supposed to be a pandas dataframe with colums 'act' and 'utt' (utterance),
with act being a number from 1 to 4 and utt being a sentence """
permutations = []
for _ in range(amount):
(utt, da, name, ix) = draw_rand_sent_from_df(act_utt_df)
sent_insert_ix = random.randint(0, sent_len-1)
permutations.append((utt, da, name, ix, sent_insert_ix))
return permutations
def draw_rand_sent_from_df(df):
ix = random.randint(0, len(df['utt'])-1)
return literal_eval(df['utt'][ix]), df['act'][ix], df['dialogue'][ix], df['ix'][ix]
def half_perturb(sents, sent_DAs, amount):
assert len(sents) == len(sent_DAs), "length of permuted sentences and list of DAs must be equal"
permutations = [list(range(len(sents)))]
for _ in range(amount):
while True:
speaker = random.randint(0,1) # choose one of the speakers
speaker_ix = list(filter(lambda x: (x-speaker) % 2 == 0, range(len(sents))))
permuted_speaker_ix = np.random.permutation(speaker_ix)
new_sents = list(range(len(sents)))
for (i_to, i_from) in zip(speaker_ix, permuted_speaker_ix):
new_sents[i_to] = i_from
if (not new_sents == permutations[0]) and (
not new_sents in permutations or len(permutations) > math.factorial(len(speaker_ix))):
permutations.append(new_sents)
break
return permutations[1:]
def utterance_insertions(length, amount):
possible_permutations = []
original = list(range(length))
for ix in original:
for y in range(length):
if ix == y: continue
ix_removed = original[0:ix] + ([] if ix == length-1 else original[ix+1:])
ix_removed.insert(y, ix)
possible_permutations.append(deepcopy(ix_removed))
permutations = []
for _ in range(amount):
i = random.randint(0, len(possible_permutations)-1)
permutations.append(possible_permutations[i])
return permutations
class DailyDialogConverter:
def __init__(self, data_dir, tokenizer, word2id, task='', ranking_dataset = True):
self.data_dir = data_dir
self.act_utt_file = os.path.join(data_dir, 'act_utt_name.txt')
self.tokenizer = tokenizer
self.word2id = word2id
self.output_file = None
self.task = task
self.ranking_dataset = ranking_dataset
self.perturbation_statistics = 0
self.setname = os.path.split(data_dir)[1]
assert self.setname == 'train' or self.setname == 'validation' or self.setname == 'test', "wrong data dir name"
def create_act_utt(self):
dial_file = os.path.join(self.data_dir, "dialogues_{}.txt".format(self.setname))
act_file = os.path.join(self.data_dir, "dialogues_act_{}.txt".format(self.setname))
output_file = os.path.join(self.data_dir, 'act_utt_name.txt'.format(self.task))
df = open(dial_file, 'r')
af = open(act_file, 'r')
of = open(output_file, 'w')
csv_writer = csv.writer(of, delimiter='|')
for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118):
seqs = dial.split('__eou__')
seqs = seqs[:-1]
if len(seqs) < 5:
continue
tok_seqs = [self.tokenizer(seq) for seq in seqs]
tok_seqs = [[w.lower() for w in utt] for utt in tok_seqs]
tok_seqs = [self.word2id(seq) for seq in tok_seqs]
acts = act.split(' ')
acts = acts[:-1]
acts = [int(act) for act in acts]
for utt_i, (act, utt) in enumerate(zip(acts, tok_seqs)):
dialog_name = "{}_{}".format(self.setname, line_count)
row = (act, utt, dialog_name,utt_i)
csv_writer.writerow(row)
def convert_dset(self, amounts):
# data_dir is supposed to be the dir with the respective train/test/val-dataset files
print("Creating {} perturbations for task {}".format(amounts, self.task))
dial_file = os.path.join(self.data_dir, "dialogues_{}.txt".format(self.setname))
act_file = os.path.join(self.data_dir, "dialogues_act_{}.txt".format(self.setname))
self.output_file = os.path.join(self.data_dir, 'coherency_dset_{}.txt'.format(self.task))
root_data_dir = os.path.split(self.data_dir)[0]
shuffled_path = os.path.join(root_data_dir, "shuffled_{}".format(self.task))
if not os.path.isdir(shuffled_path):
os.mkdir(shuffled_path)
assert os.path.isfile(dial_file) and os.path.isfile(act_file), "could not find input files"
assert os.path.isfile(self.act_utt_file), "missing act_utt.txt in data_dir"
with open(self.act_utt_file, 'r') as f:
act_utt_df = pd.read_csv(f, sep='|', names=['act','utt','dialogue','ix'])
rand_generator = lambda: draw_rand_sent_from_df(act_utt_df)
df = open(dial_file, 'r')
af = open(act_file, 'r')
of = open(self.output_file, 'w')
discarded = 0
for line_count, (dial, act) in tqdm(enumerate(zip(df, af)), total=11118):
seqs = dial.split('__eou__')
seqs = seqs[:-1]
if len(seqs) < 5:
discarded += 1
continue
tok_seqs = [self.tokenizer(seq) for seq in seqs]
tok_seqs = [[w.lower() for w in utt] for utt in tok_seqs]
tok_seqs = [self.word2id(seq) for seq in tok_seqs]
acts = act.split(' ')
acts = acts[:-1]
acts = [int(act) for act in acts]
if self.task == 'up':
permuted_ixs = permute(tok_seqs, acts, amounts)
elif self.task == 'us':
permuted_ixs = draw_rand_sent(act_utt_df, len(tok_seqs), amounts)
elif self.task == 'hup':
permuted_ixs = half_perturb(tok_seqs, acts, amounts)
elif self.task == 'ui':
permuted_ixs = utterance_insertions(len(tok_seqs), amounts)
shuffle_file = os.path.join(shuffled_path, "{}_{}.csv".format(self.setname, line_count))
with open(shuffle_file, "w") as f:
csv_writer = csv.writer(f)
for perm in permuted_ixs:
if self.task == 'us':
(utt, da, name, ix, insert_ix) = perm
row = [name, ix,insert_ix]
csv_writer.writerow(row)
else:
csv_writer.writerow(perm)
self.perturbation_statistics += len(permuted_ixs)
if self.task == 'us':
for p in permuted_ixs:
(insert_sent, insert_da, name, ix, insert_ix) = p
a = " ".join([str(a) for a in acts])
u = str(tok_seqs)
p_a = deepcopy(acts)
p_a[insert_ix] = insert_da
pa = " ".join([str(a) for a in p_a])
p_u = deepcopy(tok_seqs)
p_u[insert_ix] = self.word2id(insert_sent)
of.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u))
of.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u))
else:
for p in permuted_ixs:
a = " ".join([str(a) for a in acts])
u = str(tok_seqs)
pa = [acts[i] for i in p]
p_a = " ".join([str(a) for a in pa])
pu = [tok_seqs[i] for i in p]
p_u = str(pu)
of.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u))
of.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u))
print(discarded)
class SwitchboardConverter:
def __init__(self, data_dir, tokenizer, word2id, task='', seed=42):
self.corpus = CorpusReader(data_dir)
self.data_dir = data_dir
self.tokenizer = tokenizer
self.word2id = word2id
self.task = task
self.utt_num = 0
for utt in self.corpus.iter_utterances():
self.utt_num += 1
self.trans_num = 0
for trans in self.corpus.iter_transcripts():
self.trans_num += 1
self.da2num = switchboard_da_mapping()
# CAUTION: make sure that for each task the seed is the same s.t. the splits will be the same!
train_ixs, val_ixs = train_test_split(range(self.trans_num), shuffle=True, train_size=0.8, random_state=seed)
val_ixs, test_ixs = train_test_split(val_ixs, shuffle=True, train_size=0.5, random_state=seed)
self.train_ixs, self.val_ixs, self.test_ixs = train_ixs, val_ixs, test_ixs
self.utt_da_pairs = []
prev_da = "%"
for i, utt in enumerate(self.corpus.iter_utterances()):
sentence = re.sub(r"([+/\}\[\]]|\{\w)", "",
utt.text)
sentence = self.word2id(self.tokenizer(sentence))
act = utt.damsl_act_tag()
if act == None: act = "%"
if act == "+": act = prev_da
_, swda_name = os.path.split(utt.swda_filename)
swda_name = swda_name[:-4] if swda_name.endswith('.csv') else swda_name
ix = utt.utterance_index
self.utt_da_pairs.append((sentence, act, swda_name, ix))
def draw_rand_sent(self):
r = random.randint(0, len(self.utt_da_pairs)-1)
return self.utt_da_pairs[r]
def create_vocab(self):
print("Creating Vocab file for Switchboard")
cnt = Counter()
for utt in self.corpus.iter_utterances():
sentence = re.sub(r"([+/\}\[\]]|\{\w)", "",
utt.text)
sentence = self.tokenizer(sentence)
for w in sentence:
cnt[w] += 1
itos_file = os.path.join(self.data_dir, "itos.txt")
itosf = open(itos_file, "w")
for (word, _) in cnt.most_common(25000):
itosf.write("{}\n".format(word))
#getKeysByValue
def swda_permute(self, sents, amount, speaker_ixs):
if amount == 0:
return []
permutations = [list(range(len(sents)))]
segment_permutations = []
amount = min(amount, factorial(len(sents))-1)
segm_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segm_ixs.values()))
for i in range(amount):
while True:
permutation = []
segm_perm = np.random.permutation(len(segments))
segment_permutations.append(segm_perm)
for segm_ix in segm_perm:
utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix))
permutation = permutation + utt_ixs
if permutation not in permutations:
break
permutations.append(permutation)
return permutations[1:] , segment_permutations #the first one is the original, which was included s.t. won't be generated
def speaker_segment_ixs(self, speaker_ixs):
i = 0
segment_indices = dict()
prev_speaker = speaker_ixs[0]
for j,speaker in enumerate(speaker_ixs):
if speaker != prev_speaker:
prev_speaker = speaker
i += 1
segment_indices[j] = i
return segment_indices
def swda_half_perturb(self, amount, speaker_ixs):
segm_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segm_ixs.values()))
segment_permutations = []
permutations = [list(segm_ixs.keys())]
for _ in range(amount):
speaker = random.randint(0,1) # choose one of the speakers
speaker_to_perm = list(filter(lambda x: (x-speaker) % 2 == 0, segments))
speaker_orig = list(filter(lambda x: (x-speaker) % 2 != 0, segments))
#TODO: rename either speaker_ix or speaker_ixs, they are something different, but the names are too close
if len(speaker_to_perm) < 2:
return []
while True:
permuted_speaker_ix = np.random.permutation(speaker_to_perm).tolist()
new_segments = [None]*(len(speaker_orig)+len(permuted_speaker_ix))
if speaker == 0 :
new_segments[::2] = permuted_speaker_ix
new_segments[1::2] = speaker_orig
else:
new_segments[1::2] = permuted_speaker_ix
new_segments[::2] = speaker_orig
segment_permutations.append(new_segments)
permutation = []
for segm_ix in new_segments:
utt_ixs = sorted(getKeysByValue(segm_ixs, segm_ix))
permutation = permutation + utt_ixs
if not permutation in permutations:
permutations.append(permutation)
break
return permutations[1:], segment_permutations
def swda_utterance_insertion(self, speaker_ixs, amounts):
segment_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segment_ixs.values()))
segment_permutations = []
permutations = []
i = 0
for _ in range(amounts):
while True: # actually: do ... while permutation not in permutations
i_from = random.randint(0, len(segments)-1)
i_to = random.randint(0, len(segments)-2)
segm_perm = deepcopy(segments)
rem_elem = segments[i_from]
segm_perm = segm_perm[0:i_from] + segm_perm[i_from+1:]
segm_perm = segm_perm[0:i_to] + [rem_elem] + segm_perm[i_to:]
permutation = []
for segm_ix in segm_perm:
utt_ixs = sorted(getKeysByValue(segment_ixs, segm_ix))
permutation = permutation + utt_ixs
if permutation not in permutations:
permutations.append(permutation)
segment_permutations.append(segm_perm)
break
return permutations, segment_permutations
def swda_utterance_sampling(self, speaker_ixs, amount):
segm_ixs = self.speaker_segment_ixs(speaker_ixs)
segments = list(set(segm_ixs.values()))
permutations = []
for i in range(amount):
(sentence, act, swda_name, ix) = self.draw_rand_sent()
insert_ix = random.choice(segments)
permutations.append((sentence, act, swda_name, ix, insert_ix))
return permutations
def convert_dset(self, amounts):
# create distinct train/validation/test files. they'll correspond to the created
# splits from the constructor
train_output_file = os.path.join(self.data_dir, 'train', 'coherency_dset_{}.txt'.format(self.task))
val_output_file = os.path.join(self.data_dir, 'validation', 'coherency_dset_{}.txt'.format(self.task))
test_output_file = os.path.join(self.data_dir, 'test', 'coherency_dset_{}.txt'.format(self.task))
if not os.path.exists(os.path.join(self.data_dir, 'train')):
os.makedirs(os.path.join(self.data_dir, 'train'))
if not os.path.exists(os.path.join(self.data_dir, 'validation')):
os.makedirs(os.path.join(self.data_dir, 'validation'))
if not os.path.exists(os.path.join(self.data_dir, 'test')):
os.makedirs(os.path.join(self.data_dir, 'test'))
trainfile = open(train_output_file, 'w')
valfile = open(val_output_file, 'w')
testfile = open(test_output_file, 'w')
shuffled_path = os.path.join(self.data_dir, "shuffled_{}".format(self.task))
if not os.path.isdir(shuffled_path):
os.mkdir(shuffled_path)
for i,trans in enumerate(tqdm(self.corpus.iter_transcripts(display_progress=False), total=1155)):
utterances = []
acts = []
speaker_ixs = []
prev_act = "%"
for utt in trans.utterances:
sentence = re.sub(r"([+/\}\[\]]|\{\w)", "",
utt.text)
sentence = self.word2id(self.tokenizer(sentence))
utterances.append(sentence)
act = utt.damsl_act_tag()
if act == None: act = "%"
if act == "+": act = prev_act
acts.append(self.da2num[act])
prev_act = act
if "A" in utt.caller:
speaker_ixs.append(0)
else:
speaker_ixs.append(1)
if self.task == 'up':
permuted_ixs , segment_perms = self.swda_permute(utterances, amounts, speaker_ixs)
elif self.task == 'us':
permuted_ixs = self.swda_utterance_sampling(speaker_ixs, amounts)
elif self.task == 'hup':
permuted_ixs , segment_perms = self.swda_half_perturb(amounts, speaker_ixs)
elif self.task == 'ui':
permuted_ixs, segment_perms = self.swda_utterance_insertion(speaker_ixs, amounts)
swda_fname = os.path.split(trans.swda_filename)[1]
shuffle_file = os.path.join(shuffled_path, swda_fname) # [:-4]
with open(shuffle_file, "w") as f:
csv_writer = csv.writer(f)
if self.task == 'us':
for perm in permuted_ixs:
(utt, da, name, ix, insert_ix) = perm
row = [name, ix,insert_ix]
csv_writer.writerow(row)
else:
for perm in segment_perms:
csv_writer.writerow(perm)
if self.task == 'us':
for p in permuted_ixs:
a = " ".join([str(x) for x in acts])
u = str(utterances)
insert_sent, insert_da, name, ix, insert_ix = p
insert_da = self.da2num[insert_da]
p_a = deepcopy(acts)
p_a[insert_ix] = insert_da
pa = " ".join([str(x) for x in p_a])
p_u = deepcopy(utterances)
p_u[insert_ix] = insert_sent
if i in self.train_ixs:
trainfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u))
trainfile.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u))
if i in self.val_ixs:
valfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u))
valfile.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u))
if i in self.test_ixs:
testfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,pa,p_u))
testfile.write("{}|{}|{}|{}|{}\n".format("1",pa,p_u,a,u))
else:
for p in permuted_ixs:
a = " ".join([str(x) for x in acts])
u = str(utterances)
pa = [acts[i] for i in p]
p_a = " ".join([str(x) for x in pa])
pu = [utterances[i] for i in p]
p_u = str(pu)
if i in self.train_ixs:
trainfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u))
trainfile.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u))
if i in self.val_ixs:
valfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u))
valfile.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u))
if i in self.test_ixs:
testfile.write("{}|{}|{}|{}|{}\n".format("0",a,u,p_a,p_u))
testfile.write("{}|{}|{}|{}|{}\n".format("1",p_a,p_u,a,u))
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--datadir",
required=True,
type=str,
help="""The input directory where the files of the corpus
are located. """)
parser.add_argument("--corpus",
required=True,
type=str,
help="""the name of the corpus to use, currently either 'DailyDialog' or 'Switchboard' """)
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--amount',
type=int,
default=20,
help="random seed for initialization")
parser.add_argument('--word2id',
action='store_true',
help= "convert the words to ids")
parser.add_argument('--task',
required=True,
type=str,
default="up",
help="""for which task the dataset should be created.
alternatives: up (utterance permutation)
us (utterance sampling)
hup (half utterance petrurbation)
ui (utterance insertion, nothing directly added!)""")
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
if args.word2id:
f = open(os.path.join(args.datadir, "itos.txt"), "r")
word2id_dict = dict()
for i, word in enumerate(f):
word2id_dict[word[:-1].lower()] = i
word2id = lambda x: [word2id_dict[y] for y in x] # don't convert words to ids (yet). It gets done in the glove wrapper of mtl_coherence.py
else:
word2id = lambda x: x
tokenizer = word_tokenize
if args.corpus == 'DailyDialog':
converter = DailyDialogConverter(args.datadir, tokenizer, word2id, task=args.task)
converter.create_act_utt()
elif args.corpus == 'Switchboard':
converter = SwitchboardConverter(args.datadir, tokenizer, word2id, args.task, args.seed)
converter.create_vocab()
converter.convert_dset(amounts=args.amount)
def getKeysByValue(dictOfElements, valueToFind):
listOfKeys = list()
for item in dictOfElements.items():
if item[1] == valueToFind:
listOfKeys.append(item[0])
return listOfKeys
def switchboard_da_mapping():
mapping_dict = dict({
"sd": 1,
"b": 2,
"sv": 3,
"aa": 4,
"%-": 5,
"ba": 6,
"qy": 7,
"x": 8,
"ny": 9,
"fc": 10,
"%": 11,
"qw": 12,
"nn": 13,
"bk": 14,
"h": 15,
"qy^d": 16,
"o": 17,
"bh": 18,
"^q": 19,
"bf": 20,
"na": 21,
"ny^e": 22,
"ad": 23,
"^2": 24,
"b^m": 25,
"qo": 26,
"qh": 27,
"^h": 28,
"ar": 29,
"ng": 30,
"nn^e": 31,
"br": 32,
"no": 33,
"fp": 34,
"qrr": 35,
"arp": 36,
"nd": 37,
"t3": 38,
"oo": 39,
"co": 40,
"cc": 41,
"t1": 42,
"bd": 43,
"aap": 44,
"am": 45,
"^g": 46,
"qw^d": 47,
"fa": 48,
"ft":49
})
d = defaultdict(lambda: 11)
for (k, v) in mapping_dict.items():
d[k] = v
return d
if __name__ == "__main__":
main()
| 39.977987
| 146
| 0.532801
| 3,085
| 25,426
| 4.211994
| 0.142626
| 0.0157
| 0.018624
| 0.018316
| 0.492689
| 0.421733
| 0.360397
| 0.311836
| 0.272895
| 0.247807
| 0
| 0.014494
| 0.343349
| 25,426
| 635
| 147
| 40.040945
| 0.763776
| 0.038386
| 0
| 0.380769
| 0
| 0
| 0.076496
| 0.003457
| 0
| 0
| 0
| 0.001575
| 0.009615
| 1
| 0.038462
| false
| 0
| 0.032692
| 0
| 0.105769
| 0.005769
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16369f4689956af64363c246df723fffbf5f3a5e
| 7,164
|
py
|
Python
|
downloadParagraph.py
|
icadot86/bert
|
42070209183dab3b5ff59b0dea1398a9538960f3
|
[
"Apache-2.0"
] | null | null | null |
downloadParagraph.py
|
icadot86/bert
|
42070209183dab3b5ff59b0dea1398a9538960f3
|
[
"Apache-2.0"
] | null | null | null |
downloadParagraph.py
|
icadot86/bert
|
42070209183dab3b5ff59b0dea1398a9538960f3
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
import sys, getopt
import urllib
import requests
import requests_cache
import re
import time
from bs4 import BeautifulSoup
from requests import Session
sys.path.append("/home/taejoon1kim/BERT/my_bert")
from utils.cacheUtils import cacheExist, writeCache, readCache, getDownloadCachePath
from utils.path import BERT_INPUT_JSON, BERT_SEARCH_JSON
def preprocessor(text):
if "감독" in text:
return text[0:text.find("감독")]
elif "등장인물" in text:
return text[0:text.find("등장인물")]
elif "누구야" in text:
return text[0:text.find("누구야")]
elif "알려줘" in text:
return text[0:text.find("알려줘")]
elif "보여줘" in text:
return text[0:text.find("보여줘")]
elif "찾아줘" in text:
return text[0:text.find("찾아줘")]
elif "언제야" in text:
return text[0:text.find("언제")]
elif "어디" in text:
return text[0:text.find("어디")]
elif "뭐야" in text:
return text[0:text.find("뭐야")]
else :
return text
def checkQType(text):
global Q_TYPE
if "감독" in text or "어디서" in text or "언제" in text or "뭐야" in text:
Q_TYPE = 2
elif "누구야" in text:
Q_TYPE = 1
else:
Q_TYPE = 3
SEARCH_RESULT['Q_TYPE'] = Q_TYPE
print("QUESTION TYPE : ", Q_TYPE)
WIKI_URL = "wikipedia.org"
YOUTUBE_URL = "youtube.com/channel"
NO_RESULT = "no_result"
SEARCH_RESULT = {
"WIKI" : {"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"},
"FIRST" : {"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"},
"YOUTUBE" : {"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"},
"test_input.json" : f"{NO_RESULT}",
"search_result.json" : f"{NO_RESULT}",
"Q_TYPE" : f"{NO_RESULT}"
}
def downloadURL(URL):
# desktop user-agent
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:65.0) Gecko/20100101 Firefox/65.0"
# mobile user-agent
MOBILE_USER_AGENT = "Mozilla/5.0 (Linux; Android 7.0; SM-G930V Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.125 Mobile Safari/537.36"
headers = {"user-agent" : USER_AGENT}
#headers = {"user-agent" : USER_AGENT, "cache-contorl" : "public,max-age=3600"}
#headers = {"user-agent" : USER_AGENT, "cache-contorl" : "no-cache"}
#s = Session()
#s.headers.update(headers)
resp = requests.get(URL, headers=headers)
#resp = s.get(URL)
results = [{"title" : f"{NO_RESULT}", "link" : f"{NO_RESULT}"}]
print(resp.status_code)
if resp.status_code == 200:
soup = BeautifulSoup(resp.content, "lxml")
results = []
for g in soup.find_all('div', class_='r'):
anchors = g.find_all('a')
if anchors:
link = anchors[0]['href']
title = g.find('h3').text
item = {
"title": title,
"link": link
}
results.append(item)
#print(link)
global SEARCH_RESULT
if link.find(WIKI_URL) != -1 and SEARCH_RESULT['WIKI']['link'] == NO_RESULT:
SEARCH_RESULT['WIKI']['title'] = title
SEARCH_RESULT['WIKI']['link'] = link
elif link.find(YOUTUBE_URL) != -1 and SEARCH_RESULT['YOUTUBE']['link'] == NO_RESULT:
SEARCH_RESULT['YOUTUBE']['title'] = title
SEARCH_RESULT['YOUTUBE']['link'] = link
if SEARCH_RESULT['WIKI']['link'] != NO_RESULT and SEARCH_RESULT['YOUTUBE']['link'] != NO_RESULT:
break
SEARCH_RESULT['FIRST']['title'] = results[0].get('title')
SEARCH_RESULT['FIRST']['link'] = results[0].get('link')
else:
SEARCH_RESULT['FIRST']['title'] = f"resp.status_code {resp.status_code}"
return results
def download(text):
global cache
cache = getDownloadCachePath(text)
global start, Q_TYPE
init_start = time.time()
start = time.time()
requests_cache.install_cache('/home/taejoon1kim/BERT/my_bert/download_cache')
#if cacheExist(cache) == False:
if True:
checkQType(text)
query_text = preprocessor(text)
## 1st SEARCH
query = query_text
query = query.replace(' ', '+')
if Q_TYPE <= 2:
URL = f"https://google.com/search?q={query} site:wikipedia.org"
else :
URL = f"https://google.com/search?q={query}"
print(URL)
downloadURL(URL)
printTime("1st Search Time")
pWithoutTag = f"{NO_RESULT}"
imgTag = f"{NO_RESULT}"
## 2nd SEARCH
if SEARCH_RESULT['WIKI']['title'] == NO_RESULT and Q_TYPE > 2:
URL = f"https://google.com/search?q={query} site:wikipedia.org"
downloadURL(URL)
if SEARCH_RESULT['WIKI']['title'] == NO_RESULT:
pWithoutTag = "위키피디아가 없네요. 링크를 열어보세요"
else:
resp = requests.get(SEARCH_RESULT['WIKI']['link'])
if resp.status_code == 200:
soup = BeautifulSoup(resp.content, "lxml")
p = soup.find('p')
pWithoutTag = re.sub('<.+?>', '', str(p), 0).strip()
pWithoutTag = re.sub('"', '', str(pWithoutTag), 0).strip()
pWithoutTag = re.sub('\n', ' ', str(pWithoutTag), 0).strip()
imgTag = "http:" + soup.find('a', {'class':'image'}).find('img')['src']
## GENERATE BERT INPUT
JSON_1 = "{\"version\":\"mytest_dev\",\"data\":[{\"paragraphs\":[{\"qas\":[{\"answers\":[{\"text\":\"테스트\",\"answer_start\":0}],\"id\":\"1-1\",\"question\":\"테스트\"}],\"context\":\""
JSON_2 = "\"}],\"title\":\"테스트\"}]}"
FULL_JSON = JSON_1 + pWithoutTag + JSON_2
writeJson(FULL_JSON, BERT_INPUT_JSON)
printTime("2nd Search Time")
SEARCH_RESULT['test_input.json'] = FULL_JSON
## GENERATE SEARCH RESULT
FULL_JSON = "{\"google\":[{\"title\":\"" + SEARCH_RESULT['FIRST']['title'] + "\",\"link\":\"" + SEARCH_RESULT['FIRST']['link'] + "\"}],\"wiki\":[{\"title\":\"" + SEARCH_RESULT['WIKI']['title'] + "\",\"link\":\"" + SEARCH_RESULT['WIKI']['link'] + "\"}],\"youtube\":[{\"title\":\"" + SEARCH_RESULT['YOUTUBE']['title'] + "\",\"link\":\"" + SEARCH_RESULT['YOUTUBE']['link'] + "\"}],\"Q_TYPE\":\"" + str(Q_TYPE) + "\",\"IMG_SRC\":\"" + str(imgTag) + "\"}"
writeJson(FULL_JSON, BERT_SEARCH_JSON)
SEARCH_RESULT['search_result.json'] = FULL_JSON
writeCache(cache, SEARCH_RESULT)
else:
CACHE_RESULT = readCache(cache)
writeJson(CACHE_RESULT['test_input.json'], BERT_INPUT_JSON)
writeJson(CACHE_RESULT['search_result.json'], BERT_SEARCH_JSON)
Q_TYPE = CACHE_RESULT['Q_TYPE']
print(f"[SEARCH] Total time : {format(time.time() - init_start, '0.5f')}")
return Q_TYPE
def writeJson(json, filePath):
f = open(filePath, 'w')
f.write(json)
f.close()
def printTime(text):
global start
print(f"[SEARCH] {text} : {format(time.time() - start, '0.5f')}")
start = time.time()
def main(argv):
download(argv[1])
if __name__ == "__main__":
main(sys.argv)
| 35.82
| 458
| 0.564768
| 889
| 7,164
| 4.40045
| 0.219348
| 0.092025
| 0.029908
| 0.03681
| 0.268916
| 0.223415
| 0.197597
| 0.087935
| 0.052658
| 0.052658
| 0
| 0.019151
| 0.256561
| 7,164
| 199
| 459
| 36
| 0.715359
| 0.049553
| 0
| 0.119205
| 0
| 0.02649
| 0.243741
| 0.038292
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046358
| false
| 0
| 0.066225
| 0
| 0.192053
| 0.05298
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1637357f64028a6c4c7d59c4294f21b8d56010e2
| 2,861
|
py
|
Python
|
data_io.py
|
LucasChenLC/courseManager2
|
3f91ea72dbc0a3f3afcc88c7f0959edb6c33adf9
|
[
"MIT"
] | null | null | null |
data_io.py
|
LucasChenLC/courseManager2
|
3f91ea72dbc0a3f3afcc88c7f0959edb6c33adf9
|
[
"MIT"
] | null | null | null |
data_io.py
|
LucasChenLC/courseManager2
|
3f91ea72dbc0a3f3afcc88c7f0959edb6c33adf9
|
[
"MIT"
] | null | null | null |
from xml.dom.minidom import Document, parse
class InfoBatch:
def __init__(self, title, pre_node_titles):
self.title = title
self.pre_node_titles = pre_node_titles
def save_data_xml(course_list, file_path):
doc = Document()
courses = doc.createElement('course_list')
doc.appendChild(courses)
for course in course_list:
single_course = doc.createElement('course')
courses.appendChild(single_course)
single_course_name = doc.createElement('course_name')
course_name = doc.createTextNode(course.name)
single_course.appendChild(single_course_name)
single_course_name.appendChild(course_name)
pre_course = doc.createElement('pre_course')
pre_course_name = ','.join(course.pre_course)
course_name = doc.createTextNode(pre_course_name)
single_course.appendChild(pre_course)
pre_course.appendChild(course_name)
after_course = doc.createElement('after_course')
after_course_name = ','.join(course.after_course)
course_name = doc.createTextNode(after_course_name)
single_course.appendChild(after_course)
after_course.appendChild(course_name)
with open(file_path, 'wb+') as f:
f.write(doc.toprettyxml(indent='\t', encoding='utf-8'))
def load_data_xml(file_path):
info_list = []
doc = parse(file_path)
courses = doc.getElementsByTagName("course")
for course in courses:
title = course.getElementsByTagName("course_name")[0].childNodes[0].data
try:
pre_node_titles = course.getElementsByTagName("pre_node_titles")[0].childNodes[0].data
pre_node_titles = pre_node_titles.split(',')
info_list.append(InfoBatch(title, pre_node_titles))
except IndexError:
info_list.append(InfoBatch(title, []))
return info_list
'''
course_list = []
course_list.append(Course('Advance Math'))
course_list.append(Course('Linear Algebra'))
course_list.append(Course('Procedure Oriented Programming'))
course_list.append(Course('Object Oriented Programming'))
course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming'])
course_list.append(Course('College Physics'))
course_list[-1].add_pre_course(course_list, ['Advance Math'])
course_list.append(Course('Digital Logic'))
course_list[-1].add_pre_course(course_list, ['Procedure Oriented Programming'])
course_list.append(Course('Computer Organization'))
course_list[-1].add_pre_course(course_list, ['Advance Math', 'Procedure Oriented Programming', 'Digital Logic'])
course_list.append(Course('Computer Architecture'))
course_list[-1].add_pre_course(course_list,
['Advance Math', 'Procedure Oriented Programming', 'Digital Logic', 'Computer Organization'])
save_data_xml(course_list, 'resource/data/data.xml')
'''
| 37.644737
| 124
| 0.71828
| 351
| 2,861
| 5.558405
| 0.210826
| 0.117888
| 0.053306
| 0.09021
| 0.428498
| 0.274218
| 0.219375
| 0.193747
| 0.193747
| 0.193747
| 0
| 0.004189
| 0.165676
| 2,861
| 75
| 125
| 38.146667
| 0.813155
| 0
| 0
| 0
| 0
| 0
| 0.050586
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.02439
| 0
| 0.146341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
163841fc5da39772ff971e9eff1ba89827ff6817
| 1,003
|
py
|
Python
|
tests/rules/test_git_rm_local_modifications.py
|
jlandrum/theheck
|
d2c008b6ca14220504be95f887253ddd9f5e9f72
|
[
"MIT"
] | null | null | null |
tests/rules/test_git_rm_local_modifications.py
|
jlandrum/theheck
|
d2c008b6ca14220504be95f887253ddd9f5e9f72
|
[
"MIT"
] | null | null | null |
tests/rules/test_git_rm_local_modifications.py
|
jlandrum/theheck
|
d2c008b6ca14220504be95f887253ddd9f5e9f72
|
[
"MIT"
] | null | null | null |
import pytest
from theheck.rules.git_rm_local_modifications import match, get_new_command
from theheck.types import Command
@pytest.fixture
def output(target):
return ('error: the following file has local modifications:\n {}\n(use '
'--cached to keep the file, or -f to force removal)').format(target)
@pytest.mark.parametrize('script, target', [
('git rm foo', 'foo'),
('git rm foo bar', 'bar')])
def test_match(output, script, target):
assert match(Command(script, output))
@pytest.mark.parametrize('script', ['git rm foo', 'git rm foo bar', 'git rm'])
def test_not_match(script):
assert not match(Command(script, ''))
@pytest.mark.parametrize('script, target, new_command', [
('git rm foo', 'foo', ['git rm --cached foo', 'git rm -f foo']),
('git rm foo bar', 'bar', ['git rm --cached foo bar', 'git rm -f foo bar'])])
def test_get_new_command(output, script, target, new_command):
assert get_new_command(Command(script, output)) == new_command
| 34.586207
| 81
| 0.67996
| 148
| 1,003
| 4.5
| 0.283784
| 0.09009
| 0.072072
| 0.121622
| 0.207207
| 0.087087
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164506
| 1,003
| 28
| 82
| 35.821429
| 0.794749
| 0
| 0
| 0
| 0
| 0
| 0.323031
| 0
| 0
| 0
| 0
| 0
| 0.15
| 1
| 0.2
| false
| 0
| 0.15
| 0.05
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16384fd421a05dbe791af899ad03aaf8e20b6076
| 6,078
|
py
|
Python
|
application.py
|
statisticsnorway/microdata-data-service
|
d477b7b75589d4c977771122558c948c040a1106
|
[
"Apache-2.0"
] | null | null | null |
application.py
|
statisticsnorway/microdata-data-service
|
d477b7b75589d4c977771122558c948c040a1106
|
[
"Apache-2.0"
] | 7
|
2021-10-08T13:40:33.000Z
|
2022-02-04T10:37:55.000Z
|
application.py
|
statisticsnorway/microdata-data-service
|
d477b7b75589d4c977771122558c948c040a1106
|
[
"Apache-2.0"
] | null | null | null |
import logging
import json_logging
import tomlkit
import uvicorn
from fastapi import FastAPI, status
from fastapi.encoders import jsonable_encoder
from fastapi.openapi.docs import (
get_redoc_html,
get_swagger_ui_html,
get_swagger_ui_oauth2_redirect_html,
)
from fastapi.responses import JSONResponse
from fastapi.staticfiles import StaticFiles
from starlette.responses import PlainTextResponse, Response
from data_service.api.data_api import data_router
from data_service.api.observability_api import observability_router
from data_service.config import config
from data_service.core.processor import NotFoundException
from data_service.core.filters import EmptyResultSetException
"""
Self-hosting JavaScript and CSS for docs
https://fastapi.tiangolo.com/advanced/extending-openapi/#self-hosting-javascript-and-css-for-docs
"""
data_service_app = FastAPI(docs_url=None, redoc_url=None)
data_service_app.mount("/static", StaticFiles(directory="static"), name="static")
data_service_app.include_router(data_router)
data_service_app.include_router(observability_router)
@data_service_app.get("/docs", include_in_schema=False)
async def custom_swagger_ui_html():
return get_swagger_ui_html(
openapi_url=data_service_app.openapi_url,
title=data_service_app.title + " - Swagger UI",
oauth2_redirect_url=data_service_app.swagger_ui_oauth2_redirect_url,
swagger_js_url="/static/swagger-ui-bundle.js",
swagger_css_url="/static/swagger-ui.css",
)
@data_service_app.get(data_service_app.swagger_ui_oauth2_redirect_url, include_in_schema=False)
async def swagger_ui_redirect():
return get_swagger_ui_oauth2_redirect_html()
@data_service_app.get("/redoc", include_in_schema=False)
async def redoc_html():
return get_redoc_html(
openapi_url=data_service_app.openapi_url,
title=data_service_app.title + " - ReDoc",
redoc_js_url="/static/redoc.standalone.js",
)
def _get_project_meta():
with open('./pyproject.toml') as pyproject:
file_contents = pyproject.read()
return tomlkit.parse(file_contents)['tool']['poetry']
pkg_meta = _get_project_meta()
class CustomJSONLog(json_logging.JSONLogFormatter):
"""
Customized application logger
"""
def _format_log_object(self, record, request_util):
json_log_object = super(CustomJSONLog, self)._format_log_object(record, request_util)
json_log_object.update({
"message": record.getMessage()
})
if "exc_info" in json_log_object:
json_log_object["error.stack"] = json_log_object.pop('exc_info')
del json_log_object['filename']
json_log_object["@timestamp"] = json_log_object.pop('written_at')
json_log_object["loggerName"] = json_log_object.pop('logger')
json_log_object["levelName"] = json_log_object.pop('level')
json_log_object["schemaVersion"] = "v3"
json_log_object["serviceVersion"] = str(pkg_meta['version'])
json_log_object["serviceName"] = "data-service"
del json_log_object['written_ts']
del json_log_object['type']
del json_log_object['msg']
del json_log_object['module']
del json_log_object['line_no']
return json_log_object
class CustomJSONRequestLogFormatter(json_logging.JSONRequestLogFormatter):
"""
Customized request logger
"""
def _format_log_object(self, record, request_util):
json_log_object = super(CustomJSONRequestLogFormatter, self)._format_log_object(record, request_util)
json_log_object.update({
"message": record.getMessage()
})
json_log_object["@timestamp"] = json_log_object.pop('written_at')
json_log_object["xRequestId"] = json_log_object.pop('correlation_id')
json_log_object["url"] = json_log_object.pop('request')
json_log_object["source_host"] = json_log_object.pop('remote_host')
json_log_object["responseTime"] = json_log_object.pop('response_time_ms')
json_log_object["statusCode"] = json_log_object.pop('response_status')
del json_log_object['written_ts']
del json_log_object['type']
del json_log_object['remote_user']
del json_log_object['referer']
del json_log_object['x_forwarded_for']
del json_log_object['protocol']
del json_log_object['remote_ip']
del json_log_object['request_size_b']
del json_log_object['remote_port']
del json_log_object['request_received_at']
del json_log_object['response_size_b']
del json_log_object['response_content_type']
del json_log_object['response_sent_at']
return json_log_object
@data_service_app.exception_handler(EmptyResultSetException)
async def empty_result_set_exception_handler(request, exc):
log = logging.getLogger(__name__)
log.exception(exc)
return Response(
status_code=status.HTTP_204_NO_CONTENT
)
@data_service_app.exception_handler(NotFoundException)
async def not_found_exception_handler(request, exc):
log = logging.getLogger(__name__)
log.exception(exc)
return JSONResponse(
status_code=status.HTTP_404_NOT_FOUND,
content=jsonable_encoder({"detail": "No such datastructure"})
)
@data_service_app.exception_handler(Exception)
async def unknown_exception_handler(request, exc):
log = logging.getLogger(__name__)
log.exception(exc)
return PlainTextResponse("Internal Server Error", status_code=500)
@data_service_app.on_event("startup")
def startup_event():
json_logging.init_fastapi(enable_json=True, custom_formatter=CustomJSONLog)
json_logging.init_request_instrument(data_service_app, custom_formatter=CustomJSONRequestLogFormatter)
logging.basicConfig(level=logging.INFO)
json_logging.config_root_logger()
log = logging.getLogger(__name__)
log.info('Started data-service')
log.info(config.get_settings().print())
if __name__ == "__main__":
uvicorn.run(data_service_app, host="0.0.0.0", port=8000)
| 33.766667
| 109
| 0.74054
| 781
| 6,078
| 5.352113
| 0.234315
| 0.114115
| 0.152392
| 0.072727
| 0.377033
| 0.285167
| 0.24067
| 0.224402
| 0.205263
| 0.205263
| 0
| 0.004509
| 0.160744
| 6,078
| 179
| 110
| 33.955307
| 0.814938
| 0.009049
| 0
| 0.2
| 0
| 0
| 0.123501
| 0.016787
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032
| false
| 0
| 0.12
| 0
| 0.24
| 0.008
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16386e8f49ac83e2f9c436adbc056266858401ad
| 18,764
|
py
|
Python
|
graspologic/embed/n2v.py
|
dtborders/graspologic
|
8ea9a47cabe35ad28ec9d381e525358c2027f619
|
[
"MIT"
] | null | null | null |
graspologic/embed/n2v.py
|
dtborders/graspologic
|
8ea9a47cabe35ad28ec9d381e525358c2027f619
|
[
"MIT"
] | null | null | null |
graspologic/embed/n2v.py
|
dtborders/graspologic
|
8ea9a47cabe35ad28ec9d381e525358c2027f619
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import logging
import math
import time
from typing import Any, List, Optional, Tuple, Union
import networkx as nx
import numpy as np
from ..utils import remap_node_ids
def node2vec_embed(
graph: Union[nx.Graph, nx.DiGraph],
num_walks: int = 10,
walk_length: int = 80,
return_hyperparameter: float = 1.0,
inout_hyperparameter: float = 1.0,
dimensions: int = 128,
window_size: int = 10,
workers: int = 8,
iterations: int = 1,
interpolate_walk_lengths_by_node_degree: bool = True,
random_seed: Optional[int] = None,
) -> Tuple[np.array, List[Any]]:
"""
Generates a node2vec embedding from a given graph. Will follow the word2vec algorithm to create the embedding.
Parameters
----------
graph: Union[nx.Graph, nx.DiGraph]
A networkx graph or digraph. A multigraph should be turned into a non-multigraph so that the calling user
properly handles the multi-edges (i.e. aggregate weights or take last edge weight).
If the graph is unweighted, the weight of each edge will default to 1.
num_walks : int
Number of walks per source. Default is 10.
walk_length: int
Length of walk per source. Default is 80.
return_hyperparameter : float
Return hyperparameter (p). Default is 1.0
inout_hyperparameter : float
Inout hyperparameter (q). Default is 1.0
dimensions : int
Dimensionality of the word vectors. Default is 128.
window_size : int
Maximum distance between the current and predicted word within a sentence. Default is 10.
workers : int
Use these many worker threads to train the model. Default is 8.
iterations : int
Number of epochs in stochastic gradient descent (SGD)
interpolate_walk_lengths_by_node_degree : bool
Use a dynamic walk length that corresponds to each nodes
degree. If the node is in the bottom 20 percentile, default to a walk length of 1. If it is in the top 10
percentile, use ``walk_length``. If it is in the 20-80 percentiles, linearly interpolate between 1 and ``walk_length``.
This will reduce lower degree nodes from biasing your resulting embedding. If a low degree node has the same
number of walks as a high degree node (which it will if this setting is not on), then the lower degree nodes
will take a smaller breadth of random walks when compared to the high degree nodes. This will result in your
lower degree walks dominating your higher degree nodes.
random_seed : int
Seed to be used for reproducible results. Default is None and will produce a random output. Note that for a fully
deterministically-reproducible run, you must also limit to a single worker thread (`workers=1`), to eliminate
ordering jitter from OS thread scheduling. In addition the environment variable ``PYTHONHASHSEED`` must be set
to control hash randomization.
Returns
-------
Tuple[np.array, List[Any]]
A tuple containing a matrix, with each row index corresponding to the embedding for each node. The tuple
also contains a vector containing the corresponding vertex labels for each row in the matrix.
The matrix and vector are positionally correlated.
Notes
-----
The original reference implementation of node2vec comes from Aditya Grover from
https://github.com/aditya-grover/node2vec/.
Further details on the Alias Method used in this functionality can be found at
https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
References
----------
.. [1] Aditya Grover and Jure Leskovec "node2vec: Scalable Feature Learning for Networks."
Knowledge Discovery and Data Mining, 2016.
"""
_preconditions(
graph,
num_walks,
walk_length,
return_hyperparameter,
inout_hyperparameter,
dimensions,
window_size,
workers,
iterations,
interpolate_walk_lengths_by_node_degree,
)
random_state = np.random.RandomState(seed=random_seed)
node2vec_graph = _Node2VecGraph(
graph, return_hyperparameter, inout_hyperparameter, random_state
)
logging.info(
f"Starting preprocessing of transition probabilities on graph with {str(len(graph.nodes()))} nodes and "
f"{str(len(graph.edges()))} edges"
)
start = time.time()
logging.info(f"Starting at time {str(start)}")
node2vec_graph._preprocess_transition_probabilities()
logging.info(f"Simulating walks on graph at time {str(time.time())}")
walks = node2vec_graph._simulate_walks(
num_walks, walk_length, interpolate_walk_lengths_by_node_degree
)
logging.info(f"Learning embeddings at time {str(time.time())}")
model = _learn_embeddings(
walks, dimensions, window_size, workers, iterations, random_seed
)
end = time.time()
logging.info(
f"Completed. Ending time is {str(end)} Elapsed time is {str(start - end)}"
)
labels = node2vec_graph.original_graph.nodes()
remapped_labels = node2vec_graph.label_map_to_string
return (
np.array([model.wv.get_vector(remapped_labels[node]) for node in labels]),
labels,
)
def _assert_is_positive_int(name: str, value: int):
if not isinstance(value, int):
raise TypeError(f"{name} must be an int")
if value <= 0:
raise ValueError(f"{name} must be > 0")
def _assert_is_nonnegative_float(name: str, value: float):
if not isinstance(value, float):
raise TypeError(f"{name} must be a float")
if value < 0.0:
raise ValueError(f"{name} must be >= 0.0")
def _preconditions(
graph: Union[nx.Graph, nx.DiGraph],
num_walks: int,
walk_length: int,
return_hyperparameter: float,
inout_hyperparameter: float,
dimensions: int,
window_size: int,
workers: int,
iterations: int,
interpolate_walk_lengths_by_node_degree: bool,
):
if not isinstance(graph, nx.Graph):
raise TypeError("graph must be a networkx Graph or DiGraph")
if graph.is_multigraph():
raise ValueError(
"This function does not work on multigraphs - because there are two reasonable ways to treat a "
"multigraph with different behaviors, we insist that the caller create an appropriate Graph or "
"DiGraph that represents the manner in which they'd like the multigraph to be treated for the "
"purposes of this embedding"
)
_assert_is_positive_int("num_walks", num_walks)
_assert_is_positive_int("walk_length", walk_length)
_assert_is_nonnegative_float("return_hyperparameter", return_hyperparameter)
_assert_is_nonnegative_float("inout_hyperparameter", inout_hyperparameter)
_assert_is_positive_int("dimensions", dimensions)
_assert_is_positive_int("window_size", window_size)
_assert_is_positive_int("workers", workers)
_assert_is_positive_int("iterations", iterations)
if not isinstance(interpolate_walk_lengths_by_node_degree, bool):
raise TypeError("interpolate_walk_lengths_by_node_degree must be a bool")
def _learn_embeddings(
walks: List[Any],
dimensions: int,
window_size: int,
workers: int,
iterations: int,
random_seed: Optional[int],
):
"""
Learn embeddings by optimizing the skip-gram objective using SGD.
"""
from gensim.models import Word2Vec
walks = [list(map(str, walk)) for walk in walks]
# Documentation - https://radimrehurek.com/gensim/models/word2vec.html
model = Word2Vec(
walks,
size=dimensions,
window=window_size,
min_count=0,
sg=1, # Training algorithm: 1 for skip-gram; otherwise CBOW
workers=workers,
iter=iterations,
seed=random_seed,
)
return model
class _Node2VecGraph:
"""
Temporary inner state object for constructing the random walks
Parameters
----------
graph: nx.Graph
A networkx graph
return_hyperparameter : float
Return hyperparameter
inout_hyperparameter : float
Inout hyperparameter
random_state : np.random.RandomState
Random State for reproducible results. Default is None and will produce random
results
"""
def __init__(
self,
graph: nx.Graph,
return_hyperparameter: float,
inout_hyperparameter: float,
random_state: Optional[np.random.RandomState] = None,
):
self.original_graph: nx.Graph = graph
graph_with_new_ids, new_id_map = remap_node_ids(graph=graph)
self.graph = graph_with_new_ids
self.label_map_to_string = new_id_map
self.is_directed = self.graph.is_directed()
self.p = return_hyperparameter
self.q = inout_hyperparameter
self.random_state = random_state
def node2vec_walk(
self,
walk_length: int,
start_node: Any,
degree_percentiles: Optional[np.ndarray],
):
"""
Simulate a random walk starting from start node.
"""
graph = self.graph
alias_nodes = self.alias_nodes
alias_edges = self.alias_edges
walk = [start_node]
# Percentiles will be provided if we are using the 'interpolate_walk_lengths_by_node_degree' feature.
# the intent of the code is to default the bottom 20% of to a minimal walk length, default the top 10% to a
# maximum walk length, and interpolate the inner 70% linearly from min to max.
# This is to avoid having your random walks be dominated by low degree nodes. If the low degree nodes have the
# same number of walks as the high degree nodes, the low degree nodes will take a smaller breadth of paths
# (due to their being less nodes to choose from) and will bias your resulting Word2Vec embedding
if degree_percentiles is not None:
degree = nx.degree(graph, start_node)
walk_length = self._get_walk_length_interpolated(
degree, degree_percentiles, walk_length
)
while len(walk) < walk_length:
current = walk[-1]
current_neighbors = sorted(graph.neighbors(current))
if len(current_neighbors) > 0:
if len(walk) == 1:
walk.append(
current_neighbors[
_alias_draw(
alias_nodes[current][0],
alias_nodes[current][1],
self.random_state,
)
]
)
else:
prev = walk[-2]
next = current_neighbors[
_alias_draw(
alias_edges[(prev, current)][0],
alias_edges[(prev, current)][1],
self.random_state,
)
]
walk.append(next)
else:
break
return walk
@staticmethod
def _get_walk_length_interpolated(
degree: int, percentiles: list, max_walk_length: int
):
"""
Given a node's degree, determine the length of a walk that should be used. If the degree is less than the
first element of the percentiles list, default the walk length to 1. Otherwise, if the degree is greater
than the last element of the list, default it to the max_walk_length. If it falls in the middle, do a linear
interpolation to decide the length of the walk.
"""
new_walk_length = None
for i, percentile in enumerate(percentiles):
# if we are below the first percentile in the list, default to a walk length of 1
if i == 0 and degree < percentile:
return 1
# otherwise, find which bucket we are going to be in.
if degree <= percentile:
new_walk_length = max_walk_length * ((i * 0.1) + 0.2)
break
# the degree is above the last percentile
if not new_walk_length:
new_walk_length = max_walk_length
# a walk length of 0 is invalid but can happen depending on the percentiles used
if new_walk_length < 1:
new_walk_length = 1
return math.floor(new_walk_length)
def _simulate_walks(
self,
num_walks: int,
walk_length: int,
interpolate_walk_lengths_by_node_degree: bool = False,
):
"""
Repeatedly simulate random walks from each node.
"""
graph = self.graph
walks = []
nodes = list(graph.nodes())
degree_percentiles: Optional[np.ndarray] = None
if interpolate_walk_lengths_by_node_degree:
degree_percentiles = np.percentile(
[degree for _, degree in graph.degree()], [x for x in range(20, 90, 10)]
)
for walk_iteration in range(num_walks):
logging.info(
"Walk iteration: " + str(walk_iteration + 1) + "/" + str(num_walks)
)
self.random_state.shuffle(nodes)
for node in nodes:
walks.append(
self.node2vec_walk(
walk_length=walk_length,
start_node=node,
degree_percentiles=degree_percentiles,
)
)
return walks
def _get_alias_edge(self, source: Any, destination: Any):
"""
Get the alias edge setup lists for a given edge.
"""
graph = self.graph
p = self.p
q = self.q
unnormalized_probs = []
for destination_neighbor in sorted(graph.neighbors(destination)):
if destination_neighbor == source:
unnormalized_probs.append(
graph[destination][destination_neighbor].get("weight", 1) / p
)
elif graph.has_edge(destination_neighbor, source):
unnormalized_probs.append(
graph[destination][destination_neighbor].get("weight", 1)
)
else:
unnormalized_probs.append(
graph[destination][destination_neighbor].get("weight", 1) / q
)
norm_const = sum(unnormalized_probs)
normalized_probs = [float(u_prob) / norm_const for u_prob in unnormalized_probs]
return _alias_setup(normalized_probs)
def _preprocess_transition_probabilities(self, weight_default: float = 1.0):
"""
Preprocessing of transition probabilities for guiding the random walks.
"""
graph = self.graph
is_directed = self.is_directed
alias_nodes = {}
total_nodes = len(graph.nodes())
bucket = 0
current_node = 0
quotient = int(total_nodes / 10)
logging.info(
f"Beginning preprocessing of transition probabilities for {total_nodes} vertices"
)
for node in graph.nodes():
current_node += 1
if current_node > bucket * quotient:
bucket += 1
logging.info(f"Completed {current_node} / {total_nodes} vertices")
unnormalized_probs = [
graph[node][nbr].get("weight", weight_default)
for nbr in sorted(graph.neighbors(node))
]
norm_const = sum(unnormalized_probs)
normalized_probs = [
float(u_prob) / norm_const for u_prob in unnormalized_probs
]
alias_nodes[node] = _alias_setup(normalized_probs)
logging.info(
f"Completed preprocessing of transition probabilities for vertices"
)
alias_edges = {}
total_edges = len(graph.edges())
bucket = 0
current_edge = 0
quotient = int(total_edges / 10)
logging.info(
f"Beginning preprocessing of transition probabilities for {total_edges} edges"
)
if is_directed:
for edge in graph.edges():
current_edge += 1
if current_edge > bucket * quotient:
bucket += 1
logging.info(f"Completed {current_edge} / {total_edges} edges")
alias_edges[edge] = self._get_alias_edge(edge[0], edge[1])
else:
for edge in graph.edges():
current_edge += 1
if current_edge > bucket * quotient:
bucket += 1
logging.info(f"Completed {current_edge} / {total_edges} edges")
alias_edges[edge] = self._get_alias_edge(edge[0], edge[1])
alias_edges[(edge[1], edge[0])] = self._get_alias_edge(edge[1], edge[0])
logging.info(f"Completed preprocessing of transition probabilities for edges")
self.alias_nodes = alias_nodes
self.alias_edges = alias_edges
return
def _alias_setup(probabilities: List[float]):
"""
Compute utility lists for non-uniform sampling from discrete distributions.
Refer to
https://lips.cs.princeton.edu/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
for details
"""
number_of_outcomes = len(probabilities)
alias = np.zeros(number_of_outcomes)
sampled_probabilities = np.zeros(number_of_outcomes, dtype=int)
smaller = []
larger = []
for i, prob in enumerate(probabilities):
alias[i] = number_of_outcomes * prob
if alias[i] < 1.0:
smaller.append(i)
else:
larger.append(i)
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
sampled_probabilities[small] = large
alias[large] = alias[large] + alias[small] - 1.0
if alias[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
return sampled_probabilities, alias
def _alias_draw(
probabilities: List[float], alias: List[float], random_state: np.random.RandomState
):
"""
Draw sample from a non-uniform discrete distribution using alias sampling.
"""
number_of_outcomes = len(probabilities)
random_index = int(np.floor(random_state.rand() * number_of_outcomes))
if random_state.rand() < alias[random_index]:
return random_index
else:
return probabilities[random_index]
| 35.537879
| 127
| 0.627052
| 2,281
| 18,764
| 4.989917
| 0.188075
| 0.031629
| 0.012652
| 0.021086
| 0.295115
| 0.203831
| 0.162186
| 0.15217
| 0.129591
| 0.090669
| 0
| 0.0107
| 0.297698
| 18,764
| 527
| 128
| 35.605313
| 0.853013
| 0.283948
| 0
| 0.222892
| 0
| 0
| 0.10604
| 0.008496
| 0
| 0
| 0
| 0
| 0.03012
| 1
| 0.039157
| false
| 0
| 0.024096
| 0
| 0.099398
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1638d587cabcf4138e331d614308389b13e85fb7
| 8,421
|
py
|
Python
|
bot.py
|
NotBlizzard/blizzybot
|
41a6f07e4d3bb97772b07aa9d6a3af935b78fb9a
|
[
"MIT"
] | null | null | null |
bot.py
|
NotBlizzard/blizzybot
|
41a6f07e4d3bb97772b07aa9d6a3af935b78fb9a
|
[
"MIT"
] | null | null | null |
bot.py
|
NotBlizzard/blizzybot
|
41a6f07e4d3bb97772b07aa9d6a3af935b78fb9a
|
[
"MIT"
] | null | null | null |
# bot.py
# TODO:
# organize imports
# organize
from websocket import create_connection
from threading import Thread
from battle import Battle
import commands
import traceback
import requests
import inspect
import json
from fractions import Fraction
import random
import time
import sys
import re
import os
from learn import Learn
class Bot:
pokedex = json.loads(open(os.path.join(os.path.dirname(__file__), "./data/pokedex.json"), "r").read())
pokemon_teams = json.loads(open(os.path.join(os.path.dirname(__file__), "./data/pokemon_teams.json"), "r").read())
def __init__(self, username, password, server, admins, rooms, symbol, avatar, plugins, log):
self.start_time = float(time.time())
self.commands = []
self.last_message = {}
self.i = 0
self.url = "http://play.pokemonshowdown.com/action.php"
self.room = ""
self.username = username
self.password = password
self.joined_all_rooms = False
self.avatar = avatar
self.server = server
self.admins = admins
self.rooms = rooms
self.symbol = symbol
self.battles = []
self.plugins = plugins
self.rooms_joined = []
self.log = log
self.tiers = ["randombattle", "ou", "ubers", "uu", "ru", "nu", "pu", "lc", "anythinggoes", "battlespotsingles"]
def __str__(self):
return "<Bot:{}>".format(self.username)
def join(self, room):
self.ws.send("|/join {}".format(room))
def current_battle(self):
return [i for i in self.battles if i.room == self.room][0]
def battle(self, message):
message[1] = re.sub(r'[^A-z0-9]', '', message[1])
if message[1] == "turn" or message[1] == "start":
getattr(self.current_battle()[self.room], "decide")()
else:
getattr(self.current_battle()[self.room], message[1])(message)
def plugin(self, room, plugin, message):
self.ws.send("{}|{}".format(room, plugin.run(message, self.last_message[self.room])))
def command(self, message, room, user):
cmd = message[4].split(self.symbol)[1].split(" ")[0]
try:
if " " in message[4]:
args = message[4].split("{} ".format(cmd))[1]
else:
args = []
command = getattr(commands, "command_{}".format(cmd), __name__)(args, room.strip().lower(), user.lower(), self)
self.ws.send("{}|{}".format(room, command))
except (IndexError, TypeError):
print(traceback.print_exc())
self.ws.send("{}|Luffy: so it's a mystery command! (\"{}\" is not recognized)".format(room, cmd))
except:
print(traceback.print_exc())
self.ws.send("{}|Something went wrong.".format(room))
def login(self, message):
key = message[2]
challenge = message[3]
if self.password == "":
data = { "act": "getassertion", "userid": self.username, "challengekeyid": key, "challenge": challenge }
data = requests.get(self.url, data=data)
self.ws.send("|/trn {},0,{}".format(self.username, data.text))
else:
data = { "act": "login", "name": self.username, "pass": self.password, "challengekeyid": key, "challenge": challenge }
data = requests.post(self.url, data=data)
data = json.loads(data.text.split("]")[1])
self.ws.send("|/trn {},0,{}".format(self.username, data["assertion"]))
def disconnect(self):
self.ws = None
sys.exit()
def start(self):
try:
self.connect()
except SystemExit:
return sys.exit()
def message(self, messages):
timestamp = int(messages[2])
user = messages[3]
print(self.room)
print(self.rooms_joined)
match_line = [x for x in self.plugins if re.match(x.match_line, messages[4], flags=re.IGNORECASE)]
if len(match_line) > 0 and self.room in self.rooms_joined:
plugin = [x for x in self.plugins if x == match_line[0]][0]
if self.room == "lobby":
self.room = ""
self.commands.append(Thread(target=self.plugin, args=(self.room, plugin, messages)).start())
if self.room in self.rooms_joined and messages[4][0] == self.symbol:
if self.room == "lobby":
self.room = ""
self.commands.append(Thread(target=self.command, args=(messages, self.room, user)).start())
def battle_message(self, messages):
user = re.sub(r'[^A-z0-9]', '', messages[2])
if messages[3][0] == self.symbol:
messages = [""] + messages # now the list has five elements.
self.commands.append(Thread(target=self.command, args=(messages, self.room, " " + user)).start())
def raw(self, messages):
if self.rooms[self.i] not in self.rooms_joined and "infobox" in messages[2]:
if self.rooms[self.i] == "lobby":
self.rooms[self.i] = ""
self.rooms_joined.append(self.rooms[self.i])
if len(self.rooms) > self.i + 1:
self.i += 1
def update(self):
[self.join(room) for room in self.rooms]
def request(self, messages):
data = [x for x in self.battles if self.room in str(x)]
battle_tier = re.search("battle-(.+)-(\d+)", self.room).group(1)
if len(data) == 0: # new battle
self.battles.append(Battle(battle_tier, self.room, self))
print("NEW BATTLE")
self.battles[-1].run(messages)
else:
pass
def update_battle(self, messages):
data = json.loads(messages[2])
if len(data["challengesFrom"].keys()) > 0:
who = list(data["challengesFrom"].keys())[0]
tier = data["challengesFrom"][who]
if tier in self.tiers:
if "random" not in tier:
team = Bot.pokemon_teams[tier][random.choice(list(Bot.pokemon_teams[tier].keys()))]
self.ws.send("|/utm {}".format(team))
self.ws.send("|/accept {}".format(who))
def connect(self):
self.ws = create_connection("ws://{}/showdown/websocket".format(self.server))
while True:
messages = [x for x in self.ws.recv().split("\n")]
for message in messages:
print("it is ")
print(self.rooms_joined)
if self.log:
print(message.encode("utf-8", "ignore"))
try:
if ">" in self.last_message:
self.room = message[1:]
except:
self.room = "" # lobby
message = message.split("|")
# battles
if self.room in [x.room for x in self.battles] and len(message) > 1:
battle = [i for i in self.battles if i.room == self.room][0]
battle.run(message)
if len(message) > 1:
if message[1] == "c:":
self.message(message)
self.last_message[self.room] = message
elif message[1] == "title":
room = re.sub(r' ', '', message[2].lower())
self.rooms_joined.append(room)
elif message[1] == "raw":
self.raw(message)
elif message[1] == "c":
self.battle_message(message)
elif message[1] == "challstr":
self.login(message)
elif message[1] == "updateuser":
if not self.joined_all_rooms:
for room in self.rooms:
self.join(room)
self.joined_all_rooms = True
elif message[1] == "request":
self.request(message)
elif message[1] == "updatechallenges":
self.update_battle(message)
else:
pass
| 36.141631
| 131
| 0.517278
| 957
| 8,421
| 4.487983
| 0.205852
| 0.046566
| 0.020955
| 0.011641
| 0.262398
| 0.205588
| 0.129919
| 0.105704
| 0.105704
| 0.088941
| 0
| 0.010106
| 0.342002
| 8,421
| 232
| 132
| 36.297414
| 0.765024
| 0.011281
| 0
| 0.124294
| 0
| 0
| 0.078967
| 0.006303
| 0
| 0
| 0
| 0.00431
| 0.011299
| 1
| 0.096045
| false
| 0.033898
| 0.084746
| 0.011299
| 0.214689
| 0.045198
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16391df203c1efac2e1f8b82d3e69209d5e07f18
| 10,758
|
py
|
Python
|
stRT/tdr/widgets/changes.py
|
Yao-14/stAnalysis
|
d08483ce581f5b03cfcad8be500aaa64b0293f74
|
[
"BSD-3-Clause"
] | null | null | null |
stRT/tdr/widgets/changes.py
|
Yao-14/stAnalysis
|
d08483ce581f5b03cfcad8be500aaa64b0293f74
|
[
"BSD-3-Clause"
] | null | null | null |
stRT/tdr/widgets/changes.py
|
Yao-14/stAnalysis
|
d08483ce581f5b03cfcad8be500aaa64b0293f74
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Optional, Tuple, Union
import numpy as np
import pandas as pd
import pyvista as pv
from pyvista import DataSet, MultiBlock, PolyData, UnstructuredGrid
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from .ddrtree import DDRTree, cal_ncenter
from .slice import euclidean_distance, three_d_slice
####################################
# Changes along a vector direction #
####################################
def changes_along_line(
model: Union[PolyData, UnstructuredGrid],
key: Union[str, list] = None,
n_points: int = 100,
vec: Union[tuple, list] = (1, 0, 0),
center: Union[tuple, list] = None,
) -> Tuple[np.ndarray, np.ndarray, MultiBlock, MultiBlock]:
slices, line_points, line = three_d_slice(
model=model, method="line", n_slices=n_points, vec=vec, center=center
)
x, y = [], []
x_length = 0
for slice, (point_i, point) in zip(slices, enumerate(line_points)):
change_value = np.asarray(slice[key]).sum()
y.append(change_value)
if point_i == 0:
x.append(0)
else:
point1 = line_points[point_i - 1].points.flatten()
point2 = line_points[point_i].points.flatten()
ed = euclidean_distance(instance1=point1, instance2=point2, dimension=3)
x_length += ed
x.append(x_length)
return np.asarray(x), np.asarray(y), slices, line
#################################
# Changes along the model shape #
#################################
def changes_along_shape(
model: Union[PolyData, UnstructuredGrid],
spatial_key: Optional[str] = None,
key_added: Optional[str] = "rd_spatial",
dim: int = 2,
inplace: bool = False,
**kwargs,
):
model = model.copy() if not inplace else model
X = model.points if spatial_key is None else model[spatial_key]
DDRTree_kwargs = {
"maxIter": 10,
"sigma": 0.001,
"gamma": 10,
"eps": 0,
"dim": dim,
"Lambda": 5 * X.shape[1],
"ncenter": cal_ncenter(X.shape[1]),
}
DDRTree_kwargs.update(kwargs)
Z, Y, stree, R, W, Q, C, objs = DDRTree(X, **DDRTree_kwargs)
# Obtain the real part of the complex argument
model[key_added] = np.real(W).astype(np.float64)
return model if not inplace else None
##############################
# Changes along the branches #
##############################
def ElPiGraph_tree(
X: np.ndarray,
NumNodes: int = 50,
**kwargs,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a principal elastic tree.
Reference: Albergante et al. (2020), Robust and Scalable Learning of Complex Intrinsic Dataset Geometry via ElPiGraph.
Args:
X: DxN, data matrix list.
NumNodes: The number of nodes of the principal graph. Use a range of 10 to 100 for ElPiGraph approach.
**kwargs: Other parameters used in elpigraph.computeElasticPrincipalTree. For details, please see:
https://github.com/j-bac/elpigraph-python/blob/master/elpigraph/_topologies.py
Returns:
nodes: The nodes in the principal tree.
edges: The edges between nodes in the principal tree.
"""
try:
import elpigraph
except ImportError:
raise ImportError(
"You need to install the package `elpigraph-python`."
"\nInstall elpigraph-python via `pip install git+https://github.com/j-bac/elpigraph-python.git`."
)
ElPiGraph_kwargs = {
"alpha": 0.01,
"FinalEnergy": "Penalized",
"StoreGraphEvolution": True,
"GPU": False,
}
ElPiGraph_kwargs.update(kwargs)
if ElPiGraph_kwargs["GPU"] is True:
try:
import cupy
except ImportError:
raise ImportError(
"You need to install the package `cupy`."
"\nInstall cupy via `pip install cupy-cuda113`."
)
elpi_tree = elpigraph.computeElasticPrincipalTree(
X=np.asarray(X), NumNodes=NumNodes, **ElPiGraph_kwargs
)
nodes = elpi_tree[0]["NodePositions"] # ['AllNodePositions'][k]
matrix_edges_weights = elpi_tree[0]["ElasticMatrix"] # ['AllElasticMatrices'][k]
matrix_edges_weights = np.triu(matrix_edges_weights, 1)
edges = np.array(np.nonzero(matrix_edges_weights), dtype=int).transpose()
return nodes, edges
def SimplePPT_tree(
X: np.ndarray,
NumNodes: int = 50,
**kwargs,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a simple principal tree.
Reference: Mao et al. (2015), SimplePPT: A simple principal tree algorithm, SIAM International Conference on Data Mining.
Args:
X: DxN, data matrix list.
NumNodes: The number of nodes of the principal graph. Use a range of 100 to 2000 for PPT approach.
**kwargs: Other parameters used in simpleppt.ppt. For details, please see:
https://github.com/LouisFaure/simpleppt/blob/main/simpleppt/ppt.py
Returns:
nodes: The nodes in the principal tree.
edges: The edges between nodes in the principal tree.
"""
try:
import igraph
import simpleppt
except ImportError:
raise ImportError(
"You need to install the package `simpleppt` and `igraph`."
"\nInstall simpleppt via `pip install -U simpleppt`."
"\nInstall igraph via `pip install -U igraph`"
)
SimplePPT_kwargs = {
"seed": 1,
"lam": 10,
}
SimplePPT_kwargs.update(kwargs)
X = np.asarray(X)
ppt_tree = simpleppt.ppt(X=X, Nodes=NumNodes, **SimplePPT_kwargs)
R = ppt_tree.R
nodes = (np.dot(X.T, R) / R.sum(axis=0)).T
B = ppt_tree.B
edges = np.array(
igraph.Graph.Adjacency((B > 0).tolist(), mode="undirected").get_edgelist()
)
return nodes, edges
def map_points_to_branch(
model: Union[PolyData, UnstructuredGrid],
nodes: np.ndarray,
spatial_key: Optional[str] = None,
key_added: Optional[str] = "nodes",
inplace: bool = False,
**kwargs,
):
"""
Find the closest principal tree node to any point in the model through KDTree.
Args:
model: A reconstruct model.
nodes: The nodes in the principal tree.
spatial_key: The key that corresponds to the coordinates of the point in the model. If spatial_key is None,
the coordinates are model.points.
key_added: The key under which to add the nodes labels.
inplace: Updates model in-place.
kwargs: Other parameters used in scipy.spatial.KDTree.
Returns:
A model, which contains the following properties:
`model.point_data[key_added]`, the nodes labels array.
"""
from scipy.spatial import KDTree
model = model.copy() if not inplace else model
X = model.points if spatial_key is None else model[spatial_key]
nodes_kdtree = KDTree(np.asarray(nodes), **kwargs)
_, ii = nodes_kdtree.query(np.asarray(X), k=1)
model.point_data[key_added] = ii
return model if not inplace else None
def map_gene_to_branch(
model: Union[PolyData, UnstructuredGrid],
tree: PolyData,
key: Union[str, list],
nodes_key: Optional[str] = "nodes",
inplace: bool = False,
):
"""
Find the closest principal tree node to any point in the model through KDTree.
Args:
model: A reconstruct model contains the gene expression label.
tree: A three-dims principal tree model contains the nodes label.
key: The key that corresponds to the gene expression.
nodes_key: The key that corresponds to the coordinates of the nodes in the tree.
inplace: Updates tree model in-place.
Returns:
A tree, which contains the following properties:
`tree.point_data[key]`, the gene expression array.
"""
model = model.copy()
model_data = pd.DataFrame(model[nodes_key], columns=["nodes_id"])
key = [key] if isinstance(key, str) else key
for sub_key in key:
model_data[sub_key] = np.asarray(model[sub_key])
model_data = model_data.groupby(by="nodes_id").sum()
model_data["nodes_id"] = model_data.index
model_data.index = range(len(model_data.index))
tree = tree.copy() if not inplace else tree
tree_data = pd.DataFrame(tree[nodes_key], columns=["nodes_id"])
tree_data = pd.merge(tree_data, model_data, how="outer", on="nodes_id")
tree_data.fillna(value=0, inplace=True)
for sub_key in key:
tree.point_data[sub_key] = tree_data[sub_key].values
return tree if not inplace else None
def construct_tree_model(
nodes: np.ndarray,
edges: np.ndarray,
key_added: Optional[str] = "nodes",
) -> PolyData:
"""
Construct a principal tree model.
Args:
nodes: The nodes in the principal tree.
edges: The edges between nodes in the principal tree.
key_added: The key under which to add the nodes labels.
Returns:
A three-dims principal tree model, which contains the following properties:
`tree_model.point_data[key_added]`, the nodes labels array.
"""
padding = np.empty(edges.shape[0], int) * 2
padding[:] = 2
edges_w_padding = np.vstack((padding, edges.T)).T
tree_model = pv.PolyData(nodes, edges_w_padding)
tree_model.point_data[key_added] = np.arange(0, len(nodes), 1)
return tree_model
def changes_along_branch(
model: Union[PolyData, UnstructuredGrid],
spatial_key: Optional[str] = None,
map_key: Union[str, list] = None,
key_added: Optional[str] = "nodes",
rd_method: Literal["ElPiGraph", "SimplePPT"] = "ElPiGraph",
NumNodes: int = 50,
inplace: bool = False,
**kwargs,
) -> Tuple[Union[DataSet, PolyData, UnstructuredGrid], PolyData]:
model = model.copy() if not inplace else model
X = model.points if spatial_key is None else model[spatial_key]
if rd_method == "ElPiGraph":
nodes, edges = ElPiGraph_tree(X=X, NumNodes=NumNodes, **kwargs)
elif rd_method == "SimplePPT":
nodes, edges = SimplePPT_tree(X=X, NumNodes=NumNodes, **kwargs)
else:
raise ValueError(
"`rd_method` value is wrong."
"\nAvailable `rd_method` are: `'ElPiGraph'`, `'SimplePPT'`."
)
map_points_to_branch(
model=model,
nodes=nodes,
spatial_key=spatial_key,
key_added=key_added,
inplace=True,
)
tree_model = construct_tree_model(nodes=nodes, edges=edges)
if not (map_key is None):
map_gene_to_branch(
model=model, tree=tree_model, key=map_key, nodes_key=key_added, inplace=True
)
return model if not inplace else None, tree_model
| 31.734513
| 125
| 0.635899
| 1,391
| 10,758
| 4.800863
| 0.198418
| 0.016771
| 0.014376
| 0.019167
| 0.401318
| 0.355795
| 0.278377
| 0.237796
| 0.237796
| 0.198413
| 0
| 0.009722
| 0.244655
| 10,758
| 338
| 126
| 31.828402
| 0.812085
| 0.266035
| 0
| 0.280788
| 0
| 0.004926
| 0.096987
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039409
| false
| 0
| 0.103448
| 0
| 0.182266
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16394617ff3197501b57f08cd314d25d52093a16
| 842
|
py
|
Python
|
test/test_add_group.py
|
nkoshkina/Python_Training3
|
e917440d37883dbcaa527a0700bcfa1478a1c1ce
|
[
"Apache-2.0"
] | null | null | null |
test/test_add_group.py
|
nkoshkina/Python_Training3
|
e917440d37883dbcaa527a0700bcfa1478a1c1ce
|
[
"Apache-2.0"
] | null | null | null |
test/test_add_group.py
|
nkoshkina/Python_Training3
|
e917440d37883dbcaa527a0700bcfa1478a1c1ce
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from model.group import Group
import pytest
import allure_pytest
def test_add_group(app, db, check_ui, json_groups):
group0 = json_groups
#with pytest.allure.step("Given a group list"):
old_groups = db.get_group_list()
#with pytest.allure.step("When I add a group %s to the list" % group0):
app.group.create(group0)
#assert app.group.count() == len(old_groups) + 1
#with pytest.allure.step("When the new groups list is equal old list with added group"):
new_groups = db.get_group_list()
old_groups.append(group0)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
if check_ui:
print("CHECK_UI")
assert sorted(new_groups, key=Group.id_or_max) == \
sorted(app.group.get_groups_list(), key=Group.id_or_max)
| 36.608696
| 93
| 0.693587
| 135
| 842
| 4.111111
| 0.355556
| 0.064865
| 0.072072
| 0.086486
| 0.342342
| 0.156757
| 0.156757
| 0.108108
| 0
| 0
| 0
| 0.008746
| 0.185273
| 842
| 22
| 94
| 38.272727
| 0.800292
| 0.321853
| 0
| 0
| 0
| 0
| 0.01421
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.285714
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
163c66ec8f6a6a9ebf21f694414728829c5d030d
| 7,851
|
py
|
Python
|
src/otp_yubikey/models.py
|
moggers87/django-otp-yubikey
|
2d7cf9dc91ba57b65aa62254532997cc1e6261dd
|
[
"BSD-2-Clause"
] | null | null | null |
src/otp_yubikey/models.py
|
moggers87/django-otp-yubikey
|
2d7cf9dc91ba57b65aa62254532997cc1e6261dd
|
[
"BSD-2-Clause"
] | null | null | null |
src/otp_yubikey/models.py
|
moggers87/django-otp-yubikey
|
2d7cf9dc91ba57b65aa62254532997cc1e6261dd
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
from base64 import b64decode
from binascii import hexlify, unhexlify
from struct import pack
import six
from django.db import models
from django.utils.encoding import force_text
from django_otp.models import Device
from django_otp.util import hex_validator, random_hex
from yubiotp.client import YubiClient10, YubiClient11, YubiClient20
from yubiotp.modhex import modhex
from yubiotp.otp import decode_otp
def default_id():
return force_text(random_hex(6))
def id_validator(value):
return hex_validator(6)(value)
def default_key():
return force_text(random_hex(16))
def key_validator(value):
return hex_validator(16)(value)
class YubikeyDevice(Device):
"""
Represents a locally-verified YubiKey OTP
:class:`~django_otp.models.Device`.
.. attribute:: private_id
*CharField*: The 6-byte private ID (hex-encoded).
.. attribute:: key
*CharField*: The 16-byte AES key shared with this YubiKey
(hex-encoded).
.. attribute:: session
*PositiveIntegerField*: The non-volatile session counter most recently
used by this device.
.. attribute:: counter
*PositiveIntegerField*: The volatile session usage counter most
recently used by this device.
"""
private_id = models.CharField(
max_length=12,
validators=[id_validator],
default=default_id,
verbose_name="Private ID",
help_text="The 6-byte private ID (hex-encoded)."
)
key = models.CharField(
max_length=32,
validators=[key_validator],
default=default_key,
help_text="The 16-byte AES key shared with this YubiKey (hex-encoded)."
)
session = models.PositiveIntegerField(
default=0,
help_text="The non-volatile session counter most recently used by this device."
)
counter = models.PositiveIntegerField(
default=0,
help_text="The volatile session usage counter most recently used by this device."
)
class Meta(Device.Meta):
verbose_name = "Local YubiKey device"
def public_id(self):
"""
The public ID of this device is the four-byte, big-endian,
modhex-encoded primary key.
"""
return modhex(pack('>I', self.id))
public_id.short_description = 'Public Identity'
public_id.admin_order_field = 'id'
@property
def bin_key(self):
return unhexlify(self.key.encode())
def verify_token(self, token):
if isinstance(token, six.text_type):
token = token.encode('utf-8')
try:
public_id, otp = decode_otp(token, self.bin_key)
except Exception:
return False
if public_id != self.public_id():
return False
if hexlify(otp.uid) != self.private_id.encode():
return False
if otp.session < self.session:
return False
if (otp.session == self.session) and (otp.counter <= self.counter):
return False
# All tests pass. Update the counters and return the good news.
self.session = otp.session
self.counter = otp.counter
self.save()
return True
class ValidationService(models.Model):
"""
Represents a YubiKey validation web service. By default, this will point to
Yubico's official hosted service, which you can customize. You can also
create instances to point at any other service implementing the same
protocol.
.. attribute:: name
*CharField*: The name of this validation service.
.. attribute:: api_id
*IntegerField*: Your API ID. The server needs this to sign responsees.
(Default: 1)
.. attribute:: api_key
*CharField*: Your base64-encoded API key, used to sign requests. This
is optional but strongly recommended. (Default: ``''``)
.. attribute:: base_url
*URLField*: The base URL of the verification service. Defaults to
Yubico's hosted API.
.. attribute:: api_version
*CharField*: The version of the validation API to use: '1.0', '1.1', or
'2.0'. (Default: '2.0')
.. attribute:: use_ssl
*BooleanField*: If ``True``, we'll use the HTTPS versions of the
default URLs. Because :mod:`urllib2` does not verify certificates, this
provides little benefit. (Default: ``False``).
.. attribute:: param_sl
*CharField*: The level of syncing required. See
:class:`~yubiotp.client.YubiClient20`.
.. attribute:: param_timeout
*CharField*: The time to allow for syncing. See
:class:`~yubiotp.client.YubiClient20`.
"""
API_VERSIONS = ['1.0', '1.1', '2.0']
name = models.CharField(
max_length=32,
help_text="The name of this validation service."
)
api_id = models.IntegerField(
default=1,
verbose_name="API ID",
help_text="Your API ID."
)
api_key = models.CharField(
max_length=64,
blank=True,
default='',
verbose_name="API key",
help_text="Your base64-encoded API key."
)
base_url = models.URLField(
blank=True,
default='',
verbose_name="Base URL",
help_text="The base URL of the verification service. Defaults to Yubico's hosted API."
)
api_version = models.CharField(
max_length=8,
choices=list(zip(API_VERSIONS, API_VERSIONS)),
default='2.0',
help_text="The version of the validation api to use."
)
use_ssl = models.BooleanField(
default=False,
verbose_name="Use SSL",
help_text="Use HTTPS API URLs by default?"
)
param_sl = models.CharField(
max_length=16,
blank=True,
default=None,
verbose_name="SL",
help_text="The level of syncing required."
)
param_timeout = models.CharField(
max_length=16,
blank=True,
default=None,
verbose_name="Timeout",
help_text="The time to allow for syncing."
)
class Meta(object):
verbose_name = "YubiKey validation service"
def __unicode__(self):
return self.name
def get_client(self):
api_key = b64decode(self.api_key.encode()) or None
if self.api_version == '2.0':
client = YubiClient20(self.api_id, api_key, self.use_ssl, False, self.param_sl or None, self.param_timeout or None)
elif self.api_version == '1.1':
client = YubiClient11(self.api_id, api_key, self.use_ssl)
else:
client = YubiClient10(self.api_id, api_key, self.use_ssl)
if self.base_url:
client.base_url = self.base_url
return client
class RemoteYubikeyDevice(Device):
"""
Represents a YubiKey device that is to be verified with a remote validation
service. In order create these devices, you must have at least one
:class:`~otp_yubikey.models.ValidationService` in the database.
.. attribute:: service
*ForeignKey*: The validation service to use for this device.
.. attribute:: public_id
*CharField*: The public identity of the YubiKey (modhex-encoded).
"""
service = models.ForeignKey(ValidationService, on_delete=models.CASCADE)
public_id = models.CharField(max_length=32, verbose_name="Public ID", help_text="The public identity of the YubiKey (modhex-encoded).")
class Meta(Device.Meta):
verbose_name = "Remote YubiKey device"
def verify_token(self, token):
verified = False
if token[:-32] == self.public_id:
client = self.service.get_client()
response = client.verify(token)
verified = response.is_ok()
return verified
| 27.644366
| 139
| 0.640683
| 979
| 7,851
| 5.010215
| 0.228805
| 0.021203
| 0.022426
| 0.039144
| 0.339042
| 0.235066
| 0.200815
| 0.158818
| 0.112946
| 0.112946
| 0
| 0.014016
| 0.263915
| 7,851
| 283
| 140
| 27.742049
| 0.834747
| 0.300217
| 0
| 0.157534
| 0
| 0
| 0.139202
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068493
| false
| 0
| 0.082192
| 0.041096
| 0.39726
| 0.006849
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
163d64f557e7427d0b9ba345ed63cc3b52a618e5
| 14,278
|
py
|
Python
|
glue/core/tests/test_state_objects.py
|
HPLegion/glue
|
1843787ccb4de852dfe103ff58473da13faccf5f
|
[
"BSD-3-Clause"
] | null | null | null |
glue/core/tests/test_state_objects.py
|
HPLegion/glue
|
1843787ccb4de852dfe103ff58473da13faccf5f
|
[
"BSD-3-Clause"
] | null | null | null |
glue/core/tests/test_state_objects.py
|
HPLegion/glue
|
1843787ccb4de852dfe103ff58473da13faccf5f
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from numpy.testing import assert_allclose
from echo import CallbackProperty, ListCallbackProperty
from glue.core import Data, DataCollection
from .test_state import clone
from ..state_objects import (State, StateAttributeLimitsHelper,
StateAttributeSingleValueHelper,
StateAttributeHistogramHelper)
class SimpleTestState(State):
a = CallbackProperty()
b = CallbackProperty()
flat = ListCallbackProperty()
nested = ListCallbackProperty()
def test_state_serialization():
state1 = SimpleTestState()
state1.a = 2
state1.b = 'hello'
state1.flat = [1, 3, 4]
sub_state = SimpleTestState()
sub_state.a = 3
sub_state.b = 'blah'
sub_state.flat = [1, 2]
sub_state.nested = []
state1.nested = [1, 3, sub_state]
state2 = clone(state1)
assert state2.a == 2
assert state2.b == 'hello'
assert state2.flat == [1, 3, 4]
assert state2.nested[0:2] == [1, 3]
assert state2.nested[2].a == 3
assert state2.nested[2].b == 'blah'
assert state2.nested[2].flat == [1, 2]
assert state2.nested[2].nested == []
EXPECTED_STR = """
a: 2
b: hello
flat: <CallbackList with 3 elements>
nested: <CallbackList with 3 elements>
"""
EXPECTED_REPR = """
<SimpleTestState
a: 2
b: hello
flat: <CallbackList with 3 elements>
nested: <CallbackList with 3 elements>
>
"""
def test_state_str_repr():
state1 = SimpleTestState()
state1.a = 2
state1.b = 'hello'
state1.flat = [1, 3, 4]
sub_state = SimpleTestState()
state1.nested = [1, 3, sub_state]
assert str(state1) == EXPECTED_STR.strip()
assert repr(state1) == EXPECTED_REPR.strip()
class TestStateAttributeLimitsHelper():
def setup_method(self, method):
self.data = Data(x=np.linspace(-100, 100, 10000),
y=np.linspace(2, 3, 10000), label='test_data')
self.data_collection = DataCollection([self.data])
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
lower = CallbackProperty()
upper = CallbackProperty()
log = CallbackProperty(False)
scale = CallbackProperty(100)
self.state = SimpleState()
self.helper = StateAttributeLimitsHelper(self.state, attribute='comp',
lower='lower', upper='upper',
percentile='scale', log='log')
self.state.data = self.data
self.state.comp = self.data.id['x']
self.x_id = self.data.main_components[0]
self.y_id = self.data.main_components[1]
def test_minmax(self):
assert self.helper.lower == -100
assert self.helper.upper == +100
def test_change_attribute(self):
self.helper.attribute = self.y_id
assert self.helper.lower == 2
assert self.helper.upper == 3
self.helper.attribute = self.x_id
assert self.helper.lower == -100
assert self.helper.upper == +100
def test_change_percentile(self):
# Changing scale mode updates the limits
self.helper.percentile = 99.5
assert_allclose(self.helper.lower, -99.5)
assert_allclose(self.helper.upper, +99.5)
self.helper.percentile = 99
assert_allclose(self.helper.lower, -99)
assert_allclose(self.helper.upper, +99)
self.helper.percentile = 90
assert_allclose(self.helper.lower, -90)
assert_allclose(self.helper.upper, +90)
# When switching to custom, the last limits are retained
self.helper.percentile = "Custom"
assert_allclose(self.helper.lower, -90)
assert_allclose(self.helper.upper, +90)
def test_percentile_cached(self):
# Make sure that if we change scale and change attribute, the scale
# modes are cached on a per-attribute basis.
self.helper.percentile = 99.5
self.state.comp = self.y_id
assert self.helper.percentile == 100
self.helper.percentile = 99
self.state.comp = self.x_id
assert self.helper.percentile == 99.5
self.state.comp = self.y_id
assert self.helper.percentile == 99
def test_flip_button(self):
self.helper.flip_limits()
assert self.helper.lower == +100
assert self.helper.upper == -100
# Make sure that values were re-cached when flipping
self.state.comp = self.y_id
assert self.helper.lower == 2
assert self.helper.upper == 3
self.state.comp = self.x_id
assert self.helper.lower == +100
assert self.helper.upper == -100
def test_manual_edit(self):
# Make sure that values are re-cached when edited manually
self.helper.percentile = "Custom"
self.state.lower = -122
self.state.upper = 234
self.helper.log = True
assert self.helper.lower == -122
assert self.helper.upper == 234
assert self.helper.log
self.state.comp = self.y_id
assert self.helper.lower == 2
assert self.helper.upper == 3
assert not self.helper.log
self.state.comp = self.x_id
assert self.helper.lower == -122
assert self.helper.upper == 234
assert self.helper.log
class TestStateAttributeSingleValueHelper():
def setup_method(self, method):
self.data = Data(x=np.linspace(-100, 30, 9999),
y=np.linspace(2, 3, 9999), label='test_data')
self.data_collection = DataCollection([self.data])
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
val = CallbackProperty()
self.state = SimpleState()
self.helper = StateAttributeSingleValueHelper(self.state, attribute='comp',
function=np.nanmedian, value='val')
self.state.data = self.data
self.state.comp = self.data.id['x']
self.x_id = self.data.main_components[0]
self.y_id = self.data.main_components[1]
def test_value(self):
assert self.helper.value == -35.
def test_change_attribute(self):
self.helper.attribute = self.y_id
assert self.helper.value == 2.5
self.helper.attribute = self.x_id
assert self.helper.value == -35
def test_manual_edit(self):
self.state.val = 42.
assert self.helper.value == 42
self.state.comp = self.y_id
assert self.helper.value == 2.5
self.state.comp = self.x_id
assert self.helper.value == 42
class TestStateAttributeHistogramHelper():
def setup_method(self, method):
self.data = Data(x=[-3.2, 4.3, 2.2, 5.4, 7.2, -1.1, 2.3],
y=['a', 'f', 'd', 'e', 'f', 'f', 'a'], label='test_data')
self.data_collection = DataCollection([self.data])
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
x_min = CallbackProperty()
x_max = CallbackProperty()
n_bin = CallbackProperty()
self.state = SimpleState()
self.helper = StateAttributeHistogramHelper(self.state, attribute='comp',
lower='x_min', upper='x_max', n_bin='n_bin')
self.state.data = self.data
def test_default_numerical(self):
self.state.comp = self.data.id['x']
assert self.state.x_min == -3.2
assert self.state.x_max == 7.2
assert self.state.n_bin == 15
def test_default_categorical(self):
self.state.comp = self.data.id['y']
assert self.state.x_min == -0.5
assert self.state.x_max == 3.5
assert self.state.n_bin == 4
def test_hitting_limits(self):
# FIXME: here we modify the internal defaults rather than making a new
# state helper, but this could be improved
self.helper._default_n_bin = 4
self.helper._max_n_bin = 3
self.state.comp = self.data.id['x']
assert self.state.x_min == -3.2
assert self.state.x_max == 7.2
assert self.state.n_bin == 4
self.state.comp = self.data.id['y']
assert self.state.x_min == -0.5
assert self.state.x_max == 3.5
assert self.state.n_bin == 3
def test_caching(self):
self.state.comp = self.data.id['x']
self.state.x_min = 2
self.state.x_max = 7
self.state.n_bin = 8
self.state.comp = self.data.id['y']
self.state.x_min = 1.5
self.state.x_max = 3.5
self.state.n_bin = 3
self.state.comp = self.data.id['x']
assert self.state.x_min == 2
assert self.state.x_max == 7
assert self.state.n_bin == 8
self.state.comp = self.data.id['y']
assert self.state.x_min == 1.5
assert self.state.x_max == 3.5
assert self.state.n_bin == 3
def test_histogram_helper_common_n_bin():
data = Data(x=[-3.2, 4.3, 2.2],
y=['a', 'f', 'd'],
z=[1.1, 2.3, 1.2],
label='test_data')
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
x_min = CallbackProperty()
x_max = CallbackProperty()
n_bin = CallbackProperty()
common = CallbackProperty()
state = SimpleState()
helper = StateAttributeHistogramHelper(state, attribute='comp',
lower='x_min', upper='x_max', n_bin='n_bin',
common_n_bin='common')
state.data = data
state.comp = data.id['x']
state.n_bin = 9
state.comp = data.id['y']
assert state.n_bin == 3
state.comp = data.id['z']
assert state.n_bin == 15
state.n_bin = 12
state.common = True
state.comp = data.id['x']
assert state.n_bin == 12
state.n_bin = 11
state.comp = data.id['y']
assert state.n_bin == 3
state.comp = data.id['z']
assert state.n_bin == 11
state.common = False
state.n_bin = 13
state.comp = data.id['x']
assert state.n_bin == 11
def test_histogram_helper_common_n_bin_active():
# Make sure that common_n_bin works as expected if True from start
data = Data(x=[-3.2, 4.3, 2.2],
y=['a', 'f', 'd'],
z=[1.1, 2.3, 1.2],
label='test_data')
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
x_min = CallbackProperty()
x_max = CallbackProperty()
n_bin = CallbackProperty()
common = CallbackProperty(True)
state = SimpleState()
helper = StateAttributeHistogramHelper(state, attribute='comp',
lower='x_min', upper='x_max', n_bin='n_bin',
common_n_bin='common')
state.data = data
state.comp = data.id['x']
state.n_bin = 9
state.comp = data.id['z']
assert state.n_bin == 9
state.n_bin = 12
state.common = True
state.comp = data.id['x']
assert state.n_bin == 12
state.n_bin = 11
state.comp = data.id['y']
assert state.n_bin == 3
state.comp = data.id['z']
assert state.n_bin == 11
state.common = False
state.n_bin = 13
state.comp = data.id['x']
assert state.n_bin == 11
def test_limits_helper_initial_values():
# Regression test for a bug that occurred if the limits cache was empty
# but some attributes were set to values - in this case we don't want to
# override the existing values.
data = Data(x=np.linspace(-100, 100, 10000),
y=np.linspace(2, 3, 10000), label='test_data')
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
lower = CallbackProperty()
upper = CallbackProperty()
state = SimpleState()
state.lower = 1
state.upper = 2
state.comp = data.id['x']
helper = StateAttributeLimitsHelper(state, attribute='comp',
lower='lower', upper='upper')
assert helper.lower == 1
assert helper.upper == 2
class DatetimeState(State):
a = CallbackProperty()
def test_state_serialization_datetime64():
state1 = DatetimeState()
state1.a = np.datetime64(100, 'D')
state2 = clone(state1)
assert state2.a == np.datetime64(100, 'D')
def test_nan_inf_minmax():
data = Data(x=[3, 1, -2, np.inf, np.nan], label='test_data')
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
lower = CallbackProperty()
upper = CallbackProperty()
percentile = CallbackProperty()
log = CallbackProperty()
state = SimpleState()
helper = StateAttributeLimitsHelper(state, attribute='comp', # noqa
lower='lower', upper='upper',
percentile='percentile', log='log')
state.data = data
state.comp = data.id['x']
assert state.lower == -2
assert state.upper == +3
state.log = True
assert state.lower == +1
assert state.upper == +3
state.log = False
state.percentile = 99
assert_allclose(state.lower, -1.97)
assert_allclose(state.upper, +2.98)
def test_percentile_no_log():
# Regression test for a bug that caused a crash if the state class had a
# percentile attribute but no log.
data = Data(x=np.linspace(-100, 100, 10000),
y=np.linspace(2, 3, 10000), label='test_data')
class SimpleState(State):
layer = CallbackProperty()
comp = CallbackProperty()
lower = CallbackProperty()
upper = CallbackProperty()
scale = CallbackProperty()
state = SimpleState()
state.comp = data.id['x']
state.lower = 2
state.upper = 4
helper = StateAttributeLimitsHelper(state, attribute='comp',
lower='lower', upper='upper',
percentile='scale')
state.scale = 90
| 27.832359
| 96
| 0.588178
| 1,772
| 14,278
| 4.632619
| 0.108352
| 0.068218
| 0.056523
| 0.039347
| 0.692654
| 0.649044
| 0.59252
| 0.575344
| 0.567426
| 0.519795
| 0
| 0.038075
| 0.29549
| 14,278
| 512
| 97
| 27.886719
| 0.77801
| 0.053579
| 0
| 0.652422
| 0
| 0
| 0.038157
| 0
| 0
| 0
| 0
| 0.001953
| 0.247863
| 1
| 0.068376
| false
| 0
| 0.017094
| 0
| 0.213675
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
163f5e0eb3de89d92ad7d61128630ed72fcd3690
| 1,079
|
py
|
Python
|
code/scripts/GeneratePNG_Preview_AsIs.py
|
dgrechka/bengaliai-cv19
|
9ef15c5b140628337ae6efe0d76e7ec5d291dc17
|
[
"MIT"
] | null | null | null |
code/scripts/GeneratePNG_Preview_AsIs.py
|
dgrechka/bengaliai-cv19
|
9ef15c5b140628337ae6efe0d76e7ec5d291dc17
|
[
"MIT"
] | null | null | null |
code/scripts/GeneratePNG_Preview_AsIs.py
|
dgrechka/bengaliai-cv19
|
9ef15c5b140628337ae6efe0d76e7ec5d291dc17
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import sys
import os
from glob import glob
import png
sys.path.append(os.path.join(__file__,'..','..'))
from tfDataIngest import tfDataSetParquet as tfDsParquet
inputDataDir = sys.argv[1]
outputDir = sys.argv[2]
# test app
if __name__ == "__main__":
files = glob(os.path.join(inputDataDir,"train*.parquet"))
print("Found {0} parquet files in input dir {1}".format(len(files),inputDataDir))
print("First is {0}".format(files[0]))
ds = tfDsParquet.create_parquet_dataset([files[0]])
for element in ds.as_numpy_iterator():
#print("Iterating...")
sampleId,pixels = element
sampleId = sampleId.decode("utf-8")
fileName = os.path.join(outputDir,"{0}.png".format(sampleId))
png.from_array(pixels, mode="L").save(fileName)
#print(element)
#print("sample name is {0}".format(sampleId))
#print(sampleIds.shape)
#print(pixels.shape)
# a += 1
# if a > 10:
# break
print("Done")
#print("{0} elements in the dataset".format(len(ds.)))
| 29.972222
| 85
| 0.636701
| 141
| 1,079
| 4.751773
| 0.468085
| 0.026866
| 0.044776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 0.208526
| 1,079
| 36
| 86
| 29.972222
| 0.76815
| 0.193698
| 0
| 0
| 0
| 0
| 0.110465
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0.15
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1640d2033b3fc61dda0183c87b5baa9f8cbed3bd
| 2,763
|
py
|
Python
|
widgets/datepicker_ctrl/codegen.py
|
RSabet/wxGlade
|
8b62eb8397308e60977857455b2765727b1b940f
|
[
"MIT"
] | 225
|
2018-03-26T11:23:22.000Z
|
2022-03-24T09:44:08.000Z
|
widgets/datepicker_ctrl/codegen.py
|
RSabet/wxGlade
|
8b62eb8397308e60977857455b2765727b1b940f
|
[
"MIT"
] | 403
|
2018-01-03T19:47:28.000Z
|
2018-03-23T17:43:39.000Z
|
widgets/datepicker_ctrl/codegen.py
|
DietmarSchwertberger/wxGlade
|
8e78cdc509d458cc896d47315e19f3daa6c09213
|
[
"MIT"
] | 47
|
2018-04-08T16:48:38.000Z
|
2021-12-21T20:08:44.000Z
|
"""\
Code generator functions for wxDatePickerCtrl objects
@copyright: 2002-2007 Alberto Griggio
@copyright: 2014-2016 Carsten Grohmann
@copyright: 2016-2021 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import common, compat
import wcodegen
class PythonDatePickerCtrlGenerator(wcodegen.PythonWidgetCodeWriter):
tmpl = '%(name)s = %(klass)s(%(parent)s, %(id)s%(style)s)\n'
# XXX the following needs to depend on the code generator when Phoenix is about to be supported fully:
if compat.IS_PHOENIX:
import_modules = ['import wx.adv\n']
if compat.IS_PHOENIX:
def cn(self, name):
# don't process already formatted items again
if name.startswith('wx.'):
return name
if name.startswith('wx'):
return 'wx.adv.' + name[2:]
elif name.startswith('EVT_'):
return 'wx.adv.' + name
return name
def _prepare_tmpl_content(self, obj):
wcodegen.PythonWidgetCodeWriter._prepare_tmpl_content(self, obj)
self.has_setdefault = int(obj.properties.get('default', 0))
return
class CppDatePickerCtrlGenerator(wcodegen.CppWidgetCodeWriter):
import_modules = ['<wx/datectrl.h>']
tmpl = '%(name)s = new %(klass)s(%(parent)s, %(id)s, ' \
'wxDefaultDateTime, wxDefaultPosition, wxDefaultSize, ' \
'%(style)s);\n'
prefix_style = False
set_default_style = True
def _prepare_tmpl_content(self, obj):
wcodegen.CppWidgetCodeWriter._prepare_tmpl_content(self, obj)
self.has_setdefault = int(obj.properties.get('default', 0))
return
def xrc_code_generator(obj):
xrcgen = common.code_writers['XRC']
class DatePickerCtrlXrcObject(xrcgen.DefaultXrcObject):
def write_property(self, name, val, output, tabs):
if name == 'label':
# translate & into _ as accelerator marker
val2 = val.replace('&', '_')
if val.count('&&') > 0:
while True:
index = val.find('&&')
if index < 0:
break
val = val2[:index] + '&&' + val2[index+2:]
else:
val = val2
xrcgen.DefaultXrcObject.write_property(self, name, val, output, tabs)
return DatePickerCtrlXrcObject(obj)
def initialize():
klass = 'wxDatePickerCtrl'
common.class_names['EditDatePickerCtrl'] = klass
common.register('python', klass, PythonDatePickerCtrlGenerator(klass))
common.register('C++', klass, CppDatePickerCtrlGenerator(klass))
common.register('XRC', klass, xrc_code_generator)
| 33.695122
| 106
| 0.615635
| 294
| 2,763
| 5.676871
| 0.435374
| 0.031156
| 0.04314
| 0.052726
| 0.22169
| 0.19293
| 0.173757
| 0.089874
| 0.089874
| 0.089874
| 0
| 0.01689
| 0.271444
| 2,763
| 81
| 107
| 34.111111
| 0.812221
| 0.156352
| 0
| 0.192308
| 0
| 0.019231
| 0.125431
| 0.018103
| 0
| 0
| 0
| 0
| 0
| 1
| 0.115385
| false
| 0
| 0.076923
| 0
| 0.480769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1642121cd961a12c79b579c9fabd08e8a6ce9bc8
| 3,960
|
py
|
Python
|
train.py
|
lck1201/simple-effective-3Dpose-baseline
|
790a185b44e48a9cc619f52b6615aae729bff76b
|
[
"MIT"
] | 20
|
2019-03-29T12:20:10.000Z
|
2021-02-07T08:32:18.000Z
|
train.py
|
motokimura/simple-effective-3Dpose-baseline
|
790a185b44e48a9cc619f52b6615aae729bff76b
|
[
"MIT"
] | 10
|
2019-04-03T15:25:00.000Z
|
2021-03-26T16:23:33.000Z
|
train.py
|
motokimura/simple-effective-3Dpose-baseline
|
790a185b44e48a9cc619f52b6615aae729bff76b
|
[
"MIT"
] | 7
|
2019-06-02T13:25:27.000Z
|
2020-12-17T06:07:17.000Z
|
import pprint
import mxnet as mx
from mxnet import gluon
from mxnet import init
from lib.core.get_optimizer import *
from lib.core.metric import MPJPEMetric
from lib.core.loss import MeanSquareLoss
from lib.core.loader import JointsDataIter
from lib.network import get_net
from lib.net_module import *
from lib.utils import *
from lib.dataset.hm36 import hm36
from config import config, gen_config, update_config_from_args, s_args
config = update_config_from_args(config, s_args)
def main():
# Parse config and mkdir output
logger, final_Model_path = create_logger(config)
config.final_Model_path = final_Model_path
gen_config(os.path.join(final_Model_path, 'hyperParams.yaml'))
logger.info('Training config:{}\n'.format(pprint.pformat(config)))
# define context
if config.useGPU:
ctx = [mx.gpu(int(i)) for i in config.gpu.split(',')]
else:
ctx = mx.cpu()
logger.info("Using context:", ctx)
# dataset, generate trainset/ validation set
train_imdbs = []
valid_imdbs = []
for i in range(len(config.DATASET.train_image_set)):
logger.info("Construct Dataset:", config.DATASET.dbname[i], ", Dataset Path:", config.DATASET.dataset_path[i])
train_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.train_image_set[i],
config.DATASET.root_path[i],
config.DATASET.dataset_path[i]))
valid_imdbs.append(eval(config.DATASET.dbname[i])(config.DATASET.valid_image_set[i],
config.DATASET.root_path[i],
config.DATASET.dataset_path[i],
config.final_Model_path))
data_names = ['hm36data']
label_names = ['hm36label']
train_data_iter = JointsDataIter(train_imdbs[0], runmode=0,
data_names = data_names, label_names=label_names,
shuffle=config.TRAIN.SHUFFLE, batch_size=len(ctx)*config.TRAIN.batchsize, logger=logger)
valid_data_iter = JointsDataIter(valid_imdbs[0], runmode=1,
data_names = data_names, label_names=label_names,
shuffle=False, batch_size=len(ctx)*config.TEST.batchsize, logger=logger)
assert train_data_iter.get_meanstd()['mean3d'].all() == valid_data_iter.get_meanstd()['mean3d'].all()
# network
net = get_net(config)
if config.resume:
ckp_path = os.path.join(config.resumeckp)
net.collect_params().load(ckp_path, ctx=ctx)
else:
net.initialize(init=init.MSRAPrelu(), ctx=ctx)
if config.NETWORK.hybrid:
net.hybridize()
logger.info(net)
# define loss and metric
mean3d = train_data_iter.get_meanstd()['mean3d']
std3d = train_data_iter.get_meanstd()['std3d']
train_metric = MPJPEMetric('train_metric', mean3d, std3d)
eval_metric = MPJPEMetric('valid_metric', mean3d, std3d)
loss = MeanSquareLoss()
# optimizer
optimizer, optimizer_params = get_optimizer(config, ctx)
# train and valid
TrainDBsize = train_data_iter.get_size()
ValidDBsize = valid_data_iter.get_size()
logger.info("Train DB size:", TrainDBsize, "Valid DB size:",ValidDBsize)
if not isinstance(train_data_iter, mx.io.PrefetchingIter):
train_data_iter = mx.io.PrefetchingIter(train_data_iter)
trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params)
for epoch in range(config.TRAIN.begin_epoch, config.TRAIN.end_epoch):
trainNet(net, trainer, train_data_iter, loss, train_metric, epoch, config, logger=logger, ctx=ctx)
validNet(net, valid_data_iter, loss, eval_metric, epoch, config, logger=logger, ctx=ctx)
logger.kill()
if __name__ == '__main__':
main()
| 41.684211
| 124
| 0.646212
| 490
| 3,960
| 5
| 0.25102
| 0.042449
| 0.047755
| 0.026122
| 0.288163
| 0.217551
| 0.181633
| 0.153061
| 0.153061
| 0.045714
| 0
| 0.007395
| 0.248737
| 3,960
| 95
| 125
| 41.684211
| 0.816134
| 0.036364
| 0
| 0.084507
| 0
| 0
| 0.048307
| 0
| 0
| 0
| 0
| 0
| 0.014085
| 1
| 0.014085
| false
| 0
| 0.183099
| 0
| 0.197183
| 0.028169
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1643d3915575e537c0423b05a3b3b1e3b7eb7865
| 6,789
|
py
|
Python
|
FastLinear/generate_memory_bank.py
|
WangFeng18/dino
|
1a4e49bd0e99d7e205338b14994a1d57c3084cfe
|
[
"Apache-2.0"
] | null | null | null |
FastLinear/generate_memory_bank.py
|
WangFeng18/dino
|
1a4e49bd0e99d7e205338b14994a1d57c3084cfe
|
[
"Apache-2.0"
] | null | null | null |
FastLinear/generate_memory_bank.py
|
WangFeng18/dino
|
1a4e49bd0e99d7e205338b14994a1d57c3084cfe
|
[
"Apache-2.0"
] | null | null | null |
import os
from tqdm import tqdm
import torch.backends.cudnn as cudnn
import torch
from datasets import ImageNetInstance, ImageNetInstanceLMDB
from torchvision import transforms
import argparse
from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network
from torch.utils.data import DataLoader
from PIL import ImageFile, Image
import torch.distributed as dist
from lars import *
ImageFile.LOAD_TRUNCATED_IMAGES = True
import warnings
warnings.filterwarnings('ignore')
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
def main():
parser = argparse.ArgumentParser("The first stage of BoostrapSelfSup")
parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed parallel')
parser.add_argument("--task", type=str, default="moco", help="the pretraining models")
parser.add_argument("--pretrained_path", type=str, default="", help="the pretraining models")
parser.add_argument("--save_path", type=str, default="", help="where to save the memory_bank")
parser.add_argument("--backbone", type=str, default="resnet50")
parser.add_argument("--data_path", type=str, default="~/ILSVRC2012/", help="the data path")
parser.add_argument("--batch_size", type=int, default=32, help="batch size")
parser.add_argument("--img_size", type=int, default=224, help="image size")
parser.add_argument("--feat_dim", type=int, default=128, help="feat dimension")
parser.add_argument("--feature_layer", type=str, default='lowdim', help="feature layer")
parser.add_argument('--use-lmdb', action='store_true')
args = parser.parse_args()
pretrained_path = os.path.expanduser(args.pretrained_path)
save_path = os.path.expanduser(args.save_path)
data_path = os.path.expanduser(args.data_path)
batch_size = args.batch_size
feat_dim = args.feat_dim
dist.init_process_group(backend='nccl')
torch.cuda.set_device(args.local_rank)
# network = ResNet(50, frozen_stages=4)
if args.task == 'moco':
network = get_moco_network(pretrained_path, feature_layer=args.feature_layer)
elif args.task == 'swav':
network = get_swav_network(pretrained_path, feature_layer=args.feature_layer)
elif args.task == 'selfboost':
network = get_selfboost_network(pretrained_path, feature_layer=args.feature_layer)
elif args.task == 'minmaxent':
network = get_minmaxent_network(args.backbone, pretrained_path, feature_layer=args.feature_layer)
elif args.task == 'dino':
network = get_dino_network(args.backbone, pretrained_path, feature_layer=args.feature_layer)
elif args.task == 'simclr':
network = get_simclr_network(args.backbone, pretrained_path, feature_layer=args.feature_layer)
elif args.task == 'sup':
network = get_sup_network(args.backbone, pretrained_path, feature_layer=args.feature_layer)
else:
raise NotImplementedError
network.cuda(args.local_rank)
network = torch.nn.parallel.DistributedDataParallel(network, device_ids=[args.local_rank])
cudnn.benchmark = True
augmentation = transforms.Compose([
transforms.Resize(int(256*args.img_size/224), interpolation=Image.BICUBIC),
transforms.CenterCrop(args.img_size),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
if args.use_lmdb:
train_dataset = ImageNetInstanceLMDB(root=data_path, list_file='train.lmdb', transform=augmentation)
val_dataset = ImageNetInstanceLMDB(root=data_path, list_file='val.lmdb', transform=augmentation)
else:
train_dataset = ImageNetInstance(root=os.path.join(data_path, 'train'), transform=augmentation)
val_dataset = ImageNetInstance(root=os.path.join(data_path, 'val'), transform=augmentation)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False, rank=args.local_rank)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, rank=args.local_rank)
n_train_points = len(train_dataset)
n_val_points = len(val_dataset)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, pin_memory=True, num_workers=4)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size, sampler=val_sampler, pin_memory=True, num_workers=4)
print("Initializing train memory bank: {} points.".format(n_train_points))
train_memory_bank = torch.zeros(n_train_points, feat_dim).to("cpu").detach()
print("Initializing val memory bank: {} points.".format(n_val_points))
val_memory_bank = torch.zeros(n_val_points, feat_dim).to("cpu").detach()
network.eval()
train_sampler.set_epoch(0)
val_sampler.set_epoch(0)
for data in tqdm(train_dataloader):
idx, img, _ = data
idx = idx.cuda(args.local_rank, non_blocking=True)
img = img.cuda(args.local_rank, non_blocking=True)
if True: #args.backbone.startswith('resnet'):
feature = network(img)
else:
feature = network.module.get_intermediate_layers(img, 4)
feature = [x[:, 0] for x in feature]
feature = torch.cat(feature, dim=-1)
feature = concat_all_gather(feature.contiguous())
idx = concat_all_gather(idx)
with torch.no_grad():
train_memory_bank[idx,:] = feature.detach().cpu()
for data in tqdm(val_dataloader):
idx, img, _ = data
idx = idx.cuda(args.local_rank, non_blocking=True)
img = img.cuda(args.local_rank, non_blocking=True)
if True: #args.backbone.startswith('resnet'):
feature = network(img)
else:
feature = network.module.get_intermediate_layers(img, 4)
feature = [x[:, 0] for x in feature]
feature = torch.cat(feature, dim=-1)
feature = concat_all_gather(feature.contiguous())
idx = concat_all_gather(idx)
with torch.no_grad():
val_memory_bank[idx,:] = feature.detach().cpu()
if args.local_rank == 0:
torch.save(
{'train_memory_bank': train_memory_bank,
'val_memory_bank': val_memory_bank
},
args.save_path
)
if __name__ == '__main__':
main()
| 44.664474
| 174
| 0.705259
| 879
| 6,789
| 5.208191
| 0.220705
| 0.04194
| 0.040848
| 0.039755
| 0.438401
| 0.38685
| 0.327872
| 0.260157
| 0.240498
| 0.240498
| 0
| 0.010757
| 0.178377
| 6,789
| 151
| 175
| 44.960265
| 0.809968
| 0.033289
| 0
| 0.211382
| 0
| 0
| 0.08699
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01626
| false
| 0
| 0.105691
| 0
| 0.130081
| 0.01626
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16447f2400735bc0538f6c77d41578715bdd08b9
| 2,489
|
py
|
Python
|
tests/utils/test_mercator.py
|
anuragtr/fabric8-analytics-rudra
|
13fb15539d195fcb89ced02b205d034ec0c18e00
|
[
"Apache-2.0"
] | 1
|
2019-05-13T09:31:19.000Z
|
2019-05-13T09:31:19.000Z
|
tests/utils/test_mercator.py
|
anuragtr/fabric8-analytics-rudra
|
13fb15539d195fcb89ced02b205d034ec0c18e00
|
[
"Apache-2.0"
] | null | null | null |
tests/utils/test_mercator.py
|
anuragtr/fabric8-analytics-rudra
|
13fb15539d195fcb89ced02b205d034ec0c18e00
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from rudra.utils.mercator import SimpleMercator
class TestSimpleMercator:
pom_xml_content = """
<project>
<dependencies>
<dependency>
<groupId>grp1.id</groupId>
<artifactId>art1.id</artifactId>
</dependency>
<dependency>
<groupId>grp2.id</groupId>
<artifactId>art2.id</artifactId>
</dependency>
<dependency>
<groupId>grp3.id</groupId>
<artifactId>art3.id</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
</project>
"""
def test_get_dependencies(self):
client = SimpleMercator(self.pom_xml_content)
deps = client.get_dependencies()
assert len(deps) == 3
artifact_ids = [d.artifact_id for d in deps]
assert not {'art1.id', 'art2.id', 'art3.id'}.difference(set(artifact_ids))
group_ids = [d.group_id for d in deps]
assert not {'grp1.id', 'grp2.id', 'grp3.id'}.difference(set(group_ids))
scopes = [d.scope for d in deps]
assert not {'compile', 'test'}.difference(set(scopes))
def test_get_dependencies_with_no_dependencies(self):
client = SimpleMercator('<project></project>'.encode())
deps = client.get_dependencies()
assert len(deps) == 0
def test_get_dependencies_with_no_content(self):
with pytest.raises(ValueError, match='Empty Content .*'):
SimpleMercator('')
def test_find_data_corrupt_pom(self):
content = """
</project>
</project>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>grp1.id</groupId>
<artifactId>art1.id</artifactId>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<dependency>
<groupId>grp1.id</groupId>
<artifactId>art1.id</artifactId>
</dependency>
</dependencies>
</project>
"""
client = SimpleMercator(content)
deps = client.get_dependencies()
assert len(deps) == 1
artifact_ids = [d.artifact_id for d in deps]
assert 'art1.id' in artifact_ids
| 34.09589
| 82
| 0.526718
| 225
| 2,489
| 5.684444
| 0.257778
| 0.070367
| 0.074277
| 0.031274
| 0.513683
| 0.469898
| 0.411259
| 0.362783
| 0.292416
| 0.292416
| 0
| 0.012587
| 0.361591
| 2,489
| 72
| 83
| 34.569444
| 0.792322
| 0
| 0
| 0.515625
| 0
| 0
| 0.527923
| 0.134592
| 0
| 0
| 0
| 0
| 0.109375
| 1
| 0.0625
| false
| 0
| 0.03125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16477f8a306c6c85422ce092acee78844c0cd611
| 4,037
|
py
|
Python
|
django_airbrake/utils/client.py
|
Captricity/airbrake-django
|
2ea126653883732a13f1a80c9e567b7076601620
|
[
"BSD-3-Clause"
] | null | null | null |
django_airbrake/utils/client.py
|
Captricity/airbrake-django
|
2ea126653883732a13f1a80c9e567b7076601620
|
[
"BSD-3-Clause"
] | 2
|
2016-07-12T15:44:02.000Z
|
2016-08-19T20:31:49.000Z
|
django_airbrake/utils/client.py
|
Captricity/airbrake-django
|
2ea126653883732a13f1a80c9e567b7076601620
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import traceback
from django.conf import settings
from django.urls import resolve
from lxml import etree
from six.moves.urllib.request import urlopen, Request
class Client(object):
API_URL = '%s://airbrake.io/notifier_api/v2/notices'
ERRORS = {
403: "Cannot use SSL",
422: "Invalid XML sent to Airbrake",
500: "Airbrake has braked too hard",
}
DEFAULTS = {
'TIMEOUT': 5,
'USE_SSL': False,
}
@property
def url(self):
scheme = 'http'
if self.settings['USE_SSL']:
scheme = 'https'
return Client.API_URL % scheme
@property
def settings(self):
if getattr(self, '_settings', None):
return self._settings
self._settings = Client.DEFAULTS
self._settings.update(getattr(settings, 'AIRBRAKE', {}))
return self._settings
def notify(self, exception=None, request=None):
headers = {
'Content-Type': 'text/xml'
}
payload = self._generate_xml(exception=exception, request=request)
req = Request(self.url, payload, headers)
resp = urlopen(req, timeout=self.settings['TIMEOUT'])
status = resp.getcode()
if status == 200:
return True
elif status in Client.ERRORS:
raise Exception(Client.ERRORS[status])
def _generate_xml(self, exception=None, request=None):
_, _, trace = sys.exc_info()
notice_em = etree.Element('notice', version='2.0')
tb = traceback.extract_tb(trace)
api_key = etree.SubElement(notice_em, 'api-key').text = self.settings['API_KEY']
notifier_em = etree.SubElement(notice_em, 'notifier')
etree.SubElement(notifier_em, 'name').text = 'django-airbrake'
etree.SubElement(notifier_em, 'version').text = '0.0.4'
url_el = etree.SubElement(notifier_em, 'url')
url_el.text = 'http://example.com'
if request:
request_em = etree.SubElement(notice_em, 'request')
if request.is_secure():
scheme = 'https'
else:
scheme = 'http'
url = '%s://%s%s' % (scheme, request.get_host(),
request.get_full_path())
etree.SubElement(request_em, 'url').text = str(url)
url_el.text = url
cb, _, _ = resolve(request.path)
etree.SubElement(request_em, 'component').text = str(cb.__module__)
etree.SubElement(request_em, 'action').text = str(cb.__name__)
if 'context' in self.settings:
cgi_em = etree.SubElement(request_em, 'cgi-data')
for key, val in list(self.settings['context'].items()):
var = etree.SubElement(cgi_em, 'var')
var.set('key', str(key))
var.text = str(val)
session = list(request.session.items())
if len(session):
session_em = etree.SubElement(request_em, 'session')
for key, val in session:
var = etree.SubElement(session_em, 'var')
var.set('key', str(key))
var.text = str(val)
if exception:
error_em = etree.SubElement(notice_em, 'error')
etree.SubElement(error_em, 'class').text = str(exception.__class__.__name__)
etree.SubElement(error_em, 'message').text = str(exception)
backtrace_em = etree.SubElement(error_em, 'backtrace')
for line in tb:
etree.SubElement(backtrace_em, 'line',
file=str(line[0]),
number=str(line[1]),
method=str(line[2]))
env_em = etree.SubElement(notice_em, 'server-environment')
etree.SubElement(env_em, 'environment-name').text = self.settings.get('ENVIRONMENT', 'development')
return '<?xml version="1.0" encoding="UTF-8"?>%s' % etree.tostring(notice_em)
| 34.211864
| 107
| 0.566757
| 454
| 4,037
| 4.876652
| 0.288546
| 0.135501
| 0.053749
| 0.051942
| 0.149051
| 0.02981
| 0.02981
| 0.02981
| 0.02981
| 0.02981
| 0
| 0.008964
| 0.30914
| 4,037
| 117
| 108
| 34.504274
| 0.784869
| 0
| 0
| 0.131868
| 0
| 0
| 0.118157
| 0.009908
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043956
| false
| 0
| 0.065934
| 0
| 0.208791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1649638736a414c6fde2874636d2e6f9fe9164e4
| 2,912
|
py
|
Python
|
docs/tutorial/context/app.py
|
theasylum/wired
|
6b6a3e83702b18ebb41ca1f94e957bdf7e44986d
|
[
"MIT"
] | 12
|
2018-07-22T15:40:35.000Z
|
2020-12-27T21:39:18.000Z
|
docs/tutorial/context/app.py
|
theasylum/wired
|
6b6a3e83702b18ebb41ca1f94e957bdf7e44986d
|
[
"MIT"
] | 36
|
2019-03-23T13:47:25.000Z
|
2020-11-28T18:08:14.000Z
|
docs/tutorial/context/app.py
|
theasylum/wired
|
6b6a3e83702b18ebb41ca1f94e957bdf7e44986d
|
[
"MIT"
] | 6
|
2019-03-23T20:08:57.000Z
|
2021-06-03T16:52:06.000Z
|
"""
A customer walks into a store. Do the steps to interact with them:
- Get *a* (not *the*) greeter
- Interact with them
Simple wired application:
- Settings that say what punctuation to use
- Registry
- Two factories that says hello, one for the FrenchCustomer context
- A default Customer and FrenchCustomer
"""
from dataclasses import dataclass
from wired import ServiceRegistry
@dataclass
class Customer:
name: str
@dataclass
class FrenchCustomer(Customer):
pass
@dataclass
class Settings:
punctuation: str
@dataclass
class Greeter:
punctuation: str
greeting: str = 'Hello'
def __call__(self, customer: Customer) -> str:
return f'{self.greeting} {customer.name} {self.punctuation}'
@dataclass
class FrenchGreeter(Greeter):
greeting: str = 'Bonjour'
def __call__(self, customer: Customer) -> str:
return f'{self.greeting} {customer.name} {self.punctuation}'
def setup(settings: Settings) -> ServiceRegistry:
# Make the registry
registry = ServiceRegistry()
# Make the greeter factories, using punctuation from settings
punctuation = settings.punctuation
# First the default greeter, no context
def default_greeter_factory(container) -> Greeter:
# Use the dataclass default for greeting
return Greeter(punctuation=punctuation)
# Register it as a factory using its class for the "key"
registry.register_factory(default_greeter_factory, Greeter)
# Now the French greeter, using context of FrenchCustomer
def french_greeter_factory(container) -> Greeter:
# Use the dataclass default for greeting
return FrenchGreeter(punctuation=punctuation)
# Register it as a factory using its class for the "key", but
# this time register with a "context"
registry.register_factory(
french_greeter_factory, Greeter, context=FrenchCustomer
)
return registry
def greet_customer(registry: ServiceRegistry, customer: Customer) -> str:
# A customer comes in, handle the steps in the greeting
# as a container.
container = registry.create_container()
# Get a Greeter using the customer as context. Use the Customer when
# generating the greeting.
greeter: Greeter = container.get(Greeter, context=customer)
greeting = greeter(customer)
return greeting
def main():
settings = Settings(punctuation='!!')
registry = setup(settings)
# *** Default Customer
# Make a Customer, pass into the "greet_customer" interaction,
# then test the result.
customer = Customer(name='Mary')
assert 'Hello Mary !!' == greet_customer(registry, customer)
# *** French Customer
# Make a FrenchCustomer, pass into the "greet_customer" interaction,
# then test the result.
french_customer = FrenchCustomer(name='Henri')
assert 'Bonjour Henri !!' == greet_customer(registry, french_customer)
| 25.54386
| 74
| 0.712569
| 341
| 2,912
| 6.008798
| 0.249267
| 0.034163
| 0.027818
| 0.018546
| 0.254758
| 0.254758
| 0.254758
| 0.254758
| 0.254758
| 0.254758
| 0
| 0
| 0.206731
| 2,912
| 113
| 75
| 25.769912
| 0.887013
| 0.374313
| 0
| 0.23913
| 0
| 0
| 0.084727
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 1
| 0.152174
| false
| 0.021739
| 0.043478
| 0.086957
| 0.543478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1649bff1d5c282f752cad12fddde82da77d3b6ea
| 3,133
|
py
|
Python
|
feast/DetectionModules/ldar_program.py
|
GeoSensorWebLab/FEAST_PtE
|
63ff8b7925873d756666f3c0c4b9f0f84abd5eb2
|
[
"MIT"
] | 10
|
2020-03-26T20:12:19.000Z
|
2022-02-14T22:47:01.000Z
|
feast/DetectionModules/ldar_program.py
|
GeoSensorWebLab/FEAST_PtE
|
63ff8b7925873d756666f3c0c4b9f0f84abd5eb2
|
[
"MIT"
] | 1
|
2021-07-14T21:14:12.000Z
|
2021-07-14T21:14:12.000Z
|
feast/DetectionModules/ldar_program.py
|
GeoSensorWebLab/FEAST_PtE
|
63ff8b7925873d756666f3c0c4b9f0f84abd5eb2
|
[
"MIT"
] | 9
|
2020-03-27T22:57:31.000Z
|
2021-09-29T17:29:35.000Z
|
"""
This module defines the LDARProgram class.
"""
import numpy as np
import copy
from .repair import Repair
from ..EmissionSimModules.result_classes import ResultDiscrete, ResultContinuous
class LDARProgram:
"""
An LDAR program contains one or more detection methods and one or more repair methods. Each LDAR program records
the find and repair costs associated with all detection and repair methods in the program. The LDAR program
deploys runs the action methods of each detection and repair method contained in the program. The detection and
repair methods determine their own behavior at each time step.
"""
def __init__(self, gas_field, tech_dict):
"""
:param gas_field: a GasField object
:param tech_dict: a dict containing all of the detection methods to be employed by the LDAR program. The dict
must have the form {"name": DetectionMethod}. All of the relationships between detection methods and between
detection methods and repair methods must be defined by the dispatch_objects specified for each method.
"""
self.emissions = copy.deepcopy(gas_field.emissions)
self.emissions_timeseries = []
self.vents_timeseries = []
#self.emissions_results = ResultContinuous(units='g/s')
#self.vents_results = ResultContinuous(units='g/s')
self.tech_dict = tech_dict
self.repair = {}
self.repair_cost = ResultDiscrete(units='USD')
for tech_name, tech in tech_dict.items():
if type(tech.dispatch_object) is Repair:
self.repair[tech_name + ' ' + tech.dispatch_object.name] = tech.dispatch_object
def action(self, time, gas_field):
"""
Runs the detect method for every tech in tech_dict and runs the repair method
:param time: the simulation time object
:param gas_field: the simulation gas_field object
:return:
"""
for i, tech in enumerate(self.tech_dict.values()):
if hasattr(tech, 'survey_interval') and tech.survey_interval \
and np.mod(time.current_time, tech.survey_interval) < time.delta_t:
tech.action(list(np.linspace(0, gas_field.n_sites - 1, gas_field.n_sites, dtype=int)))
tech.detect(time, gas_field, self.emissions.get_current_emissions(time))
for rep in self.repair.values():
rep.repair(time, self.emissions)
def calc_rep_costs(self, time):
"""
Calculates the total repair costs up to time.current_time, assuming that all reparable emissions that have a
max end_time less than time.current_time have been repaired.
:param time: a FEAST time object
:return: None
"""
for em in self.emissions.emissions.index.unique():
empdf_temp = self.emissions.emissions.loc[[em]]
max_row = empdf_temp[empdf_temp.end_time == empdf_temp.end_time.max()].iloc[0]
if max_row.reparable & (max_row.end_time < time.current_time):
self.repair_cost.append_entry([max_row.end_time, max_row.repair_cost])
| 48.2
| 120
| 0.679221
| 422
| 3,133
| 4.893365
| 0.331754
| 0.034867
| 0.029056
| 0.024213
| 0.03293
| 0.03293
| 0
| 0
| 0
| 0
| 0
| 0.001262
| 0.241302
| 3,133
| 64
| 121
| 48.953125
| 0.86748
| 0.416534
| 0
| 0
| 0
| 0
| 0.011508
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.137931
| 0
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
164cf23737de25e42e24acaa15cc12f759dc3323
| 12,783
|
py
|
Python
|
src/CycleGAN.py
|
sjmoran/SIDGAN
|
169bd69974bbb7f5760c28a00c231a856017e51c
|
[
"0BSD"
] | 25
|
2020-09-17T06:29:41.000Z
|
2022-03-22T06:38:37.000Z
|
src/CycleGAN.py
|
sjmoran/SIDGAN
|
169bd69974bbb7f5760c28a00c231a856017e51c
|
[
"0BSD"
] | 2
|
2021-05-30T09:00:46.000Z
|
2021-11-24T08:34:26.000Z
|
src/CycleGAN.py
|
sjmoran/SIDGAN
|
169bd69974bbb7f5760c28a00c231a856017e51c
|
[
"0BSD"
] | 5
|
2020-10-16T00:44:10.000Z
|
2021-11-04T15:59:55.000Z
|
#Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#This program is free software; you can redistribute it and/or modify it under the terms of the BSD 0-Clause License.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the BSD 0-Clause License for more details.
from keras.optimizers import Adam
from models.ICCV_architectures import *
from models.unet import *
from keras.engine.topology import Network
import sys
import tensorflow as tf
from utilities.data_loader import *
class CycleGAN():
def __init__(self,
opt,
image_shape=(256 * 1, 256 * 1, 3),
load_training_data=True,
normalization=InstanceNormalization,
):
self.task = opt.task
self.im_w = opt.im_w
self.im_h = opt.im_h
self.data_root = opt.data_root
self.img_shape = image_shape
self.channels = self.img_shape[-1]
# Fetch data during training instead of pre caching all images
self.use_data_generator = True
self.generator_architecture = opt.generator_architecture
self.use_norm = opt.use_norm
self.add_extra_conv = opt.add_extra_conv
self.image_shapeA = (opt.im_w * 1, opt.im_h * 1, 3)
self.image_shapeA_in = (None, None, 3)
if self.task == 'Long2Short_raw':
self.image_shapeB = (opt.im_w * 1, opt.im_h * 1, 1)
self.image_shapeB_in = (None, None, 3)
else:
self.image_shapeB = (opt.im_w * 1, opt.im_h * 1, 3)
self.image_shapeB_in = (None, None, 3)
# Identity loss - sometimes send images from B to G_A2B (and the opposite) to teach identity mappings
self.use_identity_learning = opt.use_identity_learning
self.identity_mapping_modulus = opt.identity_mapping_modulus # Identity mapping will be done each time the iteration number is divisable with this number
# PatchGAN - if false the discriminator learning rate should be decreased
self.use_patchgan = opt.use_patchgan
self.normalization = normalization
# Loss hyperparameters
self.lambda_1 = opt.lambda_1 # Cyclic loss weight A_2_B
self.lambda_2 = opt.lambda_2 # Cyclic loss weight B_2_A
self.lambda_D = opt.lambda_D # Weight for loss from discriminator guess on synthetic images
# Learning rates
self.learning_rate_D = opt.lr_D
self.learning_rate_G = opt.lr_G
self.beta_1 = opt.beta_1
self.beta_2 = opt.beta_2
self.batch_size = 1
self.clipvalue = opt.clipvalue
self.epsilon_norm = opt.epsilon_norm
# self.crop_res = opt.crop_res
# Resize convolution - instead of transpose convolution in deconvolution layers (uk) - can reduce checkerboard artifacts but the blurring might affect the cycle-consistency
self.use_resize_convolution = opt.use_resize_convolution
# Supervised learning part
self.use_supervised_learning = opt.use_supervised_learning
self.supervised_weight = opt.supervised_weight
self.supervised_loss = opt.supervised_loss
# optimizer
if opt.clipvalue is not None:
self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2, clipvalue=self.clipvalue)
self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2, clipvalue=self.clipvalue)
else:
self.opt_D = Adam(self.learning_rate_D, self.beta_1, self.beta_2)
self.opt_G = Adam(self.learning_rate_G, self.beta_1, self.beta_2)
# # ======= Discriminator model ==========
if self.generator_architecture == 'ICCV':
D_A = modelDiscriminator(self.image_shapeA, use_patchgan=self.use_patchgan,
disc_use_4_layers=True)
D_B = modelDiscriminator(self.image_shapeB, use_patchgan=self.use_patchgan,
disc_use_4_layers=True)
loss_weights_D = [0.5] # 0.5 since we train on real and synthetic images
loss_weights_D = [0.5] # 0.5 since we train on real and synthetic images
elif self.generator_architecture == 'unet_mini':
D_A = unet_discriminator_mini(self.image_shapeA, use_norm=self.use_norm, epsilon=self.epsilon_norm,
use_patchgan=self.use_patchgan)
D_B = unet_discriminator_mini(self.image_shapeB, use_norm=self.use_norm, epsilon=self.epsilon_norm,
use_patchgan=self.use_patchgan)
loss_weights_D = [0.5] # 0.5 since we train on real and synthetic images
# Discriminator builds
image_A = Input(self.image_shapeA)
image_B = Input(self.image_shapeB)
guess_A = D_A(image_A)
guess_B = D_B(image_B)
self.D_A = Model(inputs=image_A, outputs=guess_A, name='D_A_model')
self.D_B = Model(inputs=image_B, outputs=guess_B, name='D_B_model')
if self.use_patchgan:
self.D_A.compile(optimizer=self.opt_D,
loss=self.lse,
loss_weights=loss_weights_D)
self.D_B.compile(optimizer=self.opt_D,
loss=self.lse,
loss_weights=loss_weights_D)
else:
self.D_A.compile(optimizer=self.opt_D,
loss='binary_crossentropy',
loss_weights=loss_weights_D)
self.D_B.compile(optimizer=self.opt_D,
loss='binary_crossentropy',
loss_weights=loss_weights_D)
# Use Networks to avoid falsy keras error about weight descripancies
self.D_A_static = Network(inputs=image_A, outputs=guess_A, name='D_A_static_model')
self.D_B_static = Network(inputs=image_B, outputs=guess_B, name='D_B_static_model')
# ============= Generator models =======================
# Do note update discriminator weights during generator training
self.D_A_static.trainable = False
self.D_B_static.trainable = False
# Generators
if self.generator_architecture == 'ICCV':
self.G_A2B = modelGenerator(conv_kernel_c7Ak=7,
use_resize_convolution=self.use_resize_convolution, input=self.image_shapeA,
output=self.image_shapeB, name='G_A2B_model')
self.G_B2A = modelGenerator(conv_kernel_c7Ak=7,
use_resize_convolution=self.use_resize_convolution, input=self.image_shapeB,
output=self.image_shapeA, name='G_B2A_model')
elif self.generator_architecture == 'unet_mini':
self.G_A2B = unet_generator_mini(input=self.image_shapeA,
output=self.image_shapeB,
normalization=normalization,
epsilon=self.epsilon_norm,
use_norm=self.use_norm,
add_extra_conv=self.add_extra_conv,
use_resize_convolution=self.use_resize_convolution,
name='G_A2B_model')
self.G_B2A = unet_generator_mini(input=self.image_shapeB,
output=self.image_shapeA,
normalization=normalization,
epsilon=self.epsilon_norm,
use_norm=self.use_norm,
add_extra_conv=self.add_extra_conv,
use_resize_convolution=self.use_resize_convolution,
name='G_B2A_model')
if self.use_identity_learning:
self.G_A2B.compile(optimizer=self.opt_G, loss='MAE')
self.G_B2A.compile(optimizer=self.opt_G, loss='MAE')
# Generator builds
real_A = Input(shape=self.image_shapeA, name='real_A')
real_B = Input(shape=self.image_shapeB, name='real_B')
synthetic_B = self.G_A2B(real_A)
synthetic_A = self.G_B2A(real_B)
dA_guess_synthetic = self.D_A_static(synthetic_A)
dB_guess_synthetic = self.D_B_static(synthetic_B)
reconstructed_A = self.G_B2A(synthetic_B)
reconstructed_B = self.G_A2B(synthetic_A)
model_outputs = [reconstructed_A, reconstructed_B]
compile_losses = [self.cycle_loss, self.cycle_loss, self.lse, self.lse]
compile_weights = [self.lambda_1, self.lambda_2, self.lambda_D, self.lambda_D]
model_outputs.append(dA_guess_synthetic)
model_outputs.append(dB_guess_synthetic)
if self.use_supervised_learning:
model_outputs.append(synthetic_A)
model_outputs.append(synthetic_B)
if self.supervised_loss == 'MAE':
compile_losses.append('MAE')
compile_losses.append('MAE')
compile_weights.append(self.supervised_weight)
compile_weights.append(self.supervised_weight)
self.G_model = Model(inputs=[real_A, real_B],
outputs=model_outputs,
name='G_model')
self.G_model.compile(optimizer=self.opt_G,
loss=compile_losses,
loss_weights=compile_weights)
# ======= Data ==========
# Use 'None' to fetch all available images
nr_A_test_imgs = 1000
nr_B_test_imgs = 1000
if self.use_data_generator:
print('--- Using dataloader during training ---')
else:
print('--- Caching data ---')
sys.stdout.flush()
if load_training_data:
if self.use_data_generator:
self.data_generator = load_data(task=self.task, root=self.data_root, batch_size=self.batch_size,
crop_size=self.im_w, generator=True)
# Only store test images
if opt.task == 'Vimeo2Long_SID':
self.A_test, self.B_test, test_A_image_names, test_B_image_names = get_test_data(nr_A_test_imgs,
nr_B_test_imgs)
else:
self.A_test = []
self.B_test = []
self.A_train = []
self.B_train = []
if not self.use_data_generator:
print('Data has been loaded')
def load_model_and_weights(self, model, weights_path, iteration, by_name):
name = model.name + '_weights_epoch_' + str(iteration)
final_path = os.path.join(root, weights_path, '{}.hdf5'.format(name))
model.load_weights(final_path, by_name=by_name)
def print_info(self):
print('fInitializing Cycle GAN with parameters ...')
print('task: ', self.task)
print('generator architecture: ', self.generator_architecture)
print('image width: ', self.im_w)
print('image height: ', self.im_h)
print('learning date G: ', self.learning_rate_G)
print('learning date D: ', self.learning_rate_D)
print('use patchGAN: ', self.use_patchgan)
print('use_identity_learning: ', self.use_identity_learning)
print('normalization: ', self.normalization)
print('identity_mapping_modulus: ', self.identity_mapping_modulus)
print('lambda_1: ', self.lambda_1)
print('lambda_2: ', self.lambda_2)
print('lambda_D: ', self.lambda_D)
print('beta_1: ', self.beta_1)
print('beta_2: ', self.beta_2)
print('use_supervised_learning: ', self.use_supervised_learning)
print('supervised_weight: ', self.supervised_weight)
print('supervised_loss: ', self.supervised_loss)
def lse(self, y_true, y_pred):
loss = tf.reduce_mean(tf.squared_difference(y_pred, y_true))
return loss
def cycle_loss(self, y_true, y_pred):
loss = tf.reduce_mean(tf.abs(y_pred - y_true))
return loss
| 46.824176
| 181
| 0.586013
| 1,539
| 12,783
| 4.573749
| 0.17154
| 0.02685
| 0.025572
| 0.022873
| 0.362978
| 0.318653
| 0.270493
| 0.25032
| 0.230999
| 0.205427
| 0
| 0.011807
| 0.33083
| 12,783
| 272
| 182
| 46.996324
| 0.811083
| 0.127044
| 0
| 0.242424
| 0
| 0
| 0.059046
| 0.00654
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025253
| false
| 0
| 0.035354
| 0
| 0.075758
| 0.116162
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
164e763a74e067d7e8c03c1d5ec3635ec5b33a02
| 876
|
py
|
Python
|
application/fastapi/main.py
|
edson-dev/neoway
|
f792e16c0f627e8b94b54f001e87e076f36311ab
|
[
"MIT"
] | null | null | null |
application/fastapi/main.py
|
edson-dev/neoway
|
f792e16c0f627e8b94b54f001e87e076f36311ab
|
[
"MIT"
] | null | null | null |
application/fastapi/main.py
|
edson-dev/neoway
|
f792e16c0f627e8b94b54f001e87e076f36311ab
|
[
"MIT"
] | null | null | null |
import uvicorn
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from routes import doc, api
from fastapi.templating import Jinja2Templates
from starlette.requests import Request
# configure static and templates file on jinja 2
app = FastAPI(
title=f"Technical Case",
description=f"endpoint para subir planilhas para banco de dados relacional Postgres.",
version=f"0.0.1",
static_directory="static"
)
app.mount("/static", StaticFiles(directory="static"), name="static")
#import factory builders and initiate
doc.init_app(app)
api.init_app(app, "/api")
#
templates = Jinja2Templates(directory="templates")
#views
@app.get("/", tags=["/view"])
async def index(request: Request):
return templates.TemplateResponse("index.html", {"request": request})
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", port=8080)
| 28.258065
| 90
| 0.745434
| 117
| 876
| 5.487179
| 0.529915
| 0.012461
| 0.031153
| 0.040498
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018325
| 0.127854
| 876
| 30
| 91
| 29.2
| 0.82199
| 0.099315
| 0
| 0
| 0
| 0
| 0.220663
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
164f24393208739c6bb0a99eb1b2e8ed9fcd90d3
| 58,056
|
py
|
Python
|
civis/io/_tables.py
|
jsfalk/civis-python
|
39b6498b2d67d838d720d9631d74f3d3d43f7c1a
|
[
"BSD-3-Clause"
] | null | null | null |
civis/io/_tables.py
|
jsfalk/civis-python
|
39b6498b2d67d838d720d9631d74f3d3d43f7c1a
|
[
"BSD-3-Clause"
] | null | null | null |
civis/io/_tables.py
|
jsfalk/civis-python
|
39b6498b2d67d838d720d9631d74f3d3d43f7c1a
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import concurrent.futures
import csv
from os import path
import io
import logging
import os
import shutil
from tempfile import TemporaryDirectory
import warnings
import zlib
import gzip
import zipfile
from civis import APIClient
from civis._utils import maybe_get_random_name
from civis.base import EmptyResultError, CivisImportError
from civis.futures import CivisFuture
from civis.io import civis_to_file, file_to_civis, query_civis
from civis.utils import run_job
from civis._deprecation import deprecate_param
import requests
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
try:
import pandas as pd
NO_PANDAS = False
except ImportError:
NO_PANDAS = True
CHUNK_SIZE = 32 * 1024
log = logging.getLogger(__name__)
__all__ = ['read_civis', 'read_civis_sql', 'civis_to_csv',
'civis_to_multifile_csv', 'dataframe_to_civis', 'csv_to_civis',
'civis_file_to_table', 'split_schema_tablename',
'export_to_civis_file']
DELIMITERS = {
',': 'comma',
'\t': 'tab',
'|': 'pipe',
}
@deprecate_param('v2.0.0', 'api_key')
def read_civis(table, database, columns=None, use_pandas=False,
job_name=None, api_key=None, client=None, credential_id=None,
polling_interval=None, archive=False, hidden=True, **kwargs):
"""Read data from a Civis table.
Parameters
----------
table : str
Name of table, including schema, in the database. E.g.
``'my_schema.my_table'``. Schemas or tablenames with periods must
be double quoted, e.g. ``'my_schema."my.table"'``.
database : str or int
Read data from this database. Can be the database name or ID.
columns : list, optional
A list of column names. Column SQL transformations are possible.
If omitted, all columns are exported.
use_pandas : bool, optional
If ``True``, return a :class:`pandas:pandas.DataFrame`. Otherwise,
return a list of results from :func:`python:csv.reader`.
job_name : str, optional
A name to give the job. If omitted, a random job name will be
used.
api_key : DEPRECATED str, optional
Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY`
environment variable will be used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
credential_id : str or int, optional
The database credential ID. If ``None``, the default credential
will be used.
polling_interval : int or float, optional
Number of seconds to wait between checks for query completion.
archive : bool, optional (deprecated)
If ``True``, archive the import job as soon as it completes.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
**kwargs : kwargs
Extra keyword arguments are passed into
:func:`pandas:pandas.read_csv` if `use_pandas` is ``True`` or
passed into :func:`python:csv.reader` if `use_pandas` is
``False``.
Returns
-------
data : :class:`pandas:pandas.DataFrame` or list
A list of rows (with header as first row) if `use_pandas` is
``False``, otherwise a `pandas` `DataFrame`. Note that if
`use_pandas` is ``False``, no parsing of types is performed and
each row will be a list of strings.
Raises
------
ImportError
If `use_pandas` is ``True`` and `pandas` is not installed.
Examples
--------
>>> table = "schema.table"
>>> database = "my_data"
>>> columns = ["column_a", "ROW_NUMBER() OVER(ORDER BY date) AS order"]
>>> data = read_civis(table, database, columns=columns)
>>> columns = data.pop(0)
>>> col_a_index = columns.index("column_a")
>>> col_a = [row[col_a_index] for row in data]
>>> df = read_civis("schema.table", "my_data", use_pandas=True)
>>> col_a = df["column_a"]
See Also
--------
civis.io.read_civis_sql : Read directly into memory using SQL.
civis.io.civis_to_csv : Write directly to csv.
civis.io.export_to_civis_file : Store a SQL query's results in a Civis file
"""
if use_pandas and NO_PANDAS:
raise ImportError("use_pandas is True but pandas is not installed.")
if archive:
warnings.warn("`archive` is deprecated and will be removed in v2.0.0. "
"Use `hidden` instead.", FutureWarning)
if client is None:
# Instantiate client here in case users provide a (deprecated) api_key
client = APIClient(api_key=api_key)
sql = _get_sql_select(table, columns)
data = read_civis_sql(sql=sql, database=database, use_pandas=use_pandas,
job_name=job_name, client=client,
credential_id=credential_id,
polling_interval=polling_interval,
archive=archive, hidden=hidden, **kwargs)
return data
def export_to_civis_file(sql, database, job_name=None, client=None,
credential_id=None, polling_interval=None,
hidden=True, csv_settings=None):
"""Store results of a query to a Civis file
Parameters
----------
sql : str
The SQL select string to be executed.
database : str or int
Execute the query against this database. Can be the database name
or ID.
job_name : str, optional
A name to give the job. If omitted, a random job name will be
used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
credential_id : str or int, optional
The database credential ID. If ``None``, the default credential
will be used.
polling_interval : int or float, optional
Number of seconds to wait between checks for query completion.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
csv_settings : dict, optional
A dictionary of csv_settings to pass to
:func:`civis.APIClient.scripts.post_sql`.
Returns
-------
fut : :class:`~civis.futures.CivisFuture`
A future which returns the response from
:func:`civis.APIClient.scripts.get_sql_runs` after the sql query
has completed and the result has been stored as a Civis file.
Examples
--------
>>> sql = "SELECT * FROM schema.table"
>>> fut = export_to_civis_file(sql, "my_database")
>>> file_id = fut.result()['output'][0]["file_id"]
See Also
--------
civis.io.read_civis : Read directly into memory without SQL.
civis.io.read_civis_sql : Read results of a SQL query into memory.
civis.io.civis_to_csv : Write directly to a CSV file.
civis.io.civis_file_to_table : Upload a Civis file to a Civis table
"""
client = client or APIClient()
script_id, run_id = _sql_script(client=client,
sql=sql,
database=database,
job_name=job_name,
credential_id=credential_id,
csv_settings=csv_settings,
hidden=hidden)
fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id),
polling_interval=polling_interval, client=client,
poll_on_creation=False)
return fut
@deprecate_param('v2.0.0', 'api_key')
def read_civis_sql(sql, database, use_pandas=False, job_name=None,
api_key=None, client=None, credential_id=None,
polling_interval=None, archive=False,
hidden=True, **kwargs):
"""Read data from Civis using a custom SQL string.
The custom SQL string will be executed twice; once to attempt to
retrieve headers and once to retrieve the data. This is done to
use a more performant method for retrieving the data. The first
execution of the custom SQL is controlled such that changes in
state cannot occur (e.g., INSERT, UPDATE, DELETE, etc.).
Parameters
----------
sql : str
The SQL select string to be executed.
database : str or int
Execute the query against this database. Can be the database name
or ID.
use_pandas : bool, optional
If ``True``, return a :class:`pandas:pandas.DataFrame`. Otherwise,
return a list of results from :func:`python:csv.reader`.
job_name : str, optional
A name to give the job. If omitted, a random job name will be
used.
api_key : DEPRECATED str, optional
Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY`
environment variable will be used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
credential_id : str or int, optional
The database credential ID. If ``None``, the default credential
will be used.
polling_interval : int or float, optional
Number of seconds to wait between checks for query completion.
archive : bool, optional (deprecated)
If ``True``, archive the import job as soon as it completes.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
**kwargs : kwargs
Extra keyword arguments are passed into
:func:`pandas:pandas.read_csv` if `use_pandas` is ``True`` or
passed into :func:`python:csv.reader` if `use_pandas` is
``False``.
Returns
-------
data : :class:`pandas:pandas.DataFrame` or list
A list of rows (with header as first row) if `use_pandas` is
``False``, otherwise a `pandas` `DataFrame`. Note that if
`use_pandas` is ``False``, no parsing of types is performed and
each row will be a list of strings.
Raises
------
ImportError
If `use_pandas` is ``True`` and `pandas` is not installed.
Examples
--------
>>> sql = "SELECT * FROM schema.table"
>>> df = read_civis_sql(sql, "my_database", use_pandas=True)
>>> col_a = df["column_a"]
>>> data = read_civis_sql(sql, "my_database")
>>> columns = data.pop(0)
>>> col_a_index = columns.index("column_a")
>>> col_a = [row[col_a_index] for row in data]
Notes
-----
This reads the data into memory.
See Also
--------
civis.io.read_civis : Read directly into memory without SQL.
civis.io.civis_to_csv : Write directly to a CSV file.
"""
if client is None:
client = APIClient(api_key=api_key)
if use_pandas and NO_PANDAS:
raise ImportError("use_pandas is True but pandas is not installed.")
if archive:
warnings.warn("`archive` is deprecated and will be removed in v2.0.0. "
"Use `hidden` instead.", FutureWarning)
db_id = client.get_database_id(database)
credential_id = credential_id or client.default_credential
# Try to get headers separately. In most scenarios this will greatly
# reduce the work that Platform does to provide a single output file
# with headers prepended to it due to how distributed databases export
# data at scale.
headers = _get_headers(client, sql, db_id, credential_id, polling_interval)
# include_header defaults to True in the API.
include_header = True if headers is None else False
csv_settings = dict(include_header=include_header,
compression='gzip')
script_id, run_id = _sql_script(client, sql, db_id,
job_name, credential_id,
csv_settings=csv_settings,
hidden=hidden)
fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id),
polling_interval=polling_interval, client=client,
poll_on_creation=False)
if archive:
def f(x):
return client.scripts.put_sql_archive(script_id, True)
fut.add_done_callback(f)
fut.result()
outputs = client.scripts.get_sql_runs(script_id, run_id)["output"]
if not outputs:
raise EmptyResultError("Query {} returned no output."
.format(script_id))
url = outputs[0]["path"]
file_id = outputs[0]["file_id"]
log.debug('Exported results to Civis file %s (%s)',
outputs[0]["output_name"], file_id)
if use_pandas:
# allows users to enter their own names parameter
_kwargs = {'names': headers}
_kwargs.update(kwargs)
_kwargs['compression'] = 'gzip'
data = pd.read_csv(url, **_kwargs)
else:
response = requests.get(url, stream=True)
response.raise_for_status()
with StringIO() as buf:
if headers:
buf.write(','.join(headers) + '\n')
_decompress_stream(response, buf, write_bytes=False)
buf.seek(0)
data = list(csv.reader(buf, **kwargs))
return data
@deprecate_param('v2.0.0', 'api_key')
def civis_to_csv(filename, sql, database, job_name=None, api_key=None,
client=None, credential_id=None, include_header=True,
compression='none', delimiter=',', unquoted=False,
archive=False, hidden=True, polling_interval=None):
"""Export data from Civis to a local CSV file.
The custom SQL string will be executed twice; once to attempt to
retrieve headers and once to retrieve the data. This is done to
use a more performant method for retrieving the data. The first
execution of the custom SQL is controlled such that changes in
state cannot occur (e.g., INSERT, UPDATE, DELETE, etc.).
Parameters
----------
filename : str
Download exported data into this file.
sql : str
The SQL select string to be executed.
database : str or int
Export data from this database. Can be the database name or ID.
job_name : str, optional
A name to give the job. If omitted, a random job name will be
used.
api_key : DEPRECATED str, optional
Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY`
environment variable will be used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
credential_id : str or int, optional
The ID of the database credential. If ``None``, the default
credential will be used.
include_header: bool, optional
If ``True``, the first line of the CSV will be headers.
Default: ``True``.
compression: str, optional
Type of compression to use, if any. One of ``'none'``, ``'zip'``, or
``'gzip'``. Default ``'none'``. ``'gzip'`` currently returns a file
with no compression unless include_header is set to False. In a
future release, a ``'gzip'`` compressed file will be returned for
all cases.
delimiter: str, optional
Which delimiter to use, if any. One of ``','``, ``'\t'``, or
``'|'``. Default: ``','``.
unquoted: bool, optional
Whether or not to quote fields. Default: ``False``.
polling_interval : int or float, optional
Number of seconds to wait between checks for query completion.
archive : bool, optional (deprecated)
If ``True``, archive the import job as soon as it completes.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
Returns
-------
results : :class:`~civis.futures.CivisFuture`
A `CivisFuture` object.
Examples
--------
>>> sql = "SELECT * FROM schema.table"
>>> fut = civis_to_csv("file.csv", sql, "my_database")
>>> fut.result() # Wait for job to complete
See Also
--------
civis.io.read_civis : Read table contents into memory.
civis.io.read_civis_sql : Read results of a SQL query into memory.
civis.io.export_to_civis_file : Store a SQL query's results in a Civis file
"""
if archive:
warnings.warn("`archive` is deprecated and will be removed in v2.0.0. "
"Use `hidden` instead.", FutureWarning)
if client is None:
client = APIClient(api_key=api_key)
db_id = client.get_database_id(database)
credential_id = credential_id or client.default_credential
# don't fix bug that would cause breaking change for now
# when gzip compression is requested, a gzip file is not actually returned
# instead the gzip file is decompressed during download
if compression == 'gzip' and include_header:
compression = 'none'
# don't support parallel unload; the output format
# is different which would introduce a breaking change
headers = b''
delimiter = DELIMITERS.get(delimiter)
if not delimiter:
raise ValueError("delimiter must be one of {}"
.format(DELIMITERS.keys()))
# always set compression to gzip to reduce I/O
csv_settings = dict(include_header=include_header,
compression='gzip',
column_delimiter=delimiter,
unquoted=unquoted,
filename_prefix=None,
force_multifile=False)
script_id, run_id = _sql_script(client, sql, db_id, job_name,
credential_id, hidden=hidden,
csv_settings=csv_settings)
fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id),
polling_interval=polling_interval, client=client,
poll_on_creation=False)
download = _download_callback(script_id, run_id, filename,
headers, compression)
fut.add_done_callback(download)
if archive:
def f(x):
return client.scripts.put_sql_archive(script_id, True)
fut.add_done_callback(f)
return fut
@deprecate_param('v2.0.0', 'api_key')
def civis_to_multifile_csv(sql, database, job_name=None, api_key=None,
client=None, credential_id=None,
include_header=True,
compression='none', delimiter='|',
max_file_size=None,
unquoted=False, prefix=None,
polling_interval=None, hidden=True):
"""Unload the result of SQL query and return presigned urls.
This function is intended for unloading large queries/tables from redshift
as it uses a 'PARALLEL ON' S3 unload. It returns a similar manifest file
to conventional S3 UNLOAD statements except the CSV parts are accessible
via both files endpoint IDs and presigned S3 urls.
Parameters
----------
sql : str
The SQL select string to be executed.
database : str or int
Execute the query against this database. Can be the database name
or ID.
job_name : str, optional
A name to give the job. If omitted, a random job name will be
used.
api_key : DEPRECATED str, optional
Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY`
environment variable will be used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
credential_id : str or int, optional
The database credential ID. If ``None``, the default credential
will be used.
include_header: bool, optional
If ``True`` include a key in the returned dictionary containing a list
of column names. Default: ``True``.
compression: str, optional
Type of compression to use, if any. One of ``'none'``, ``'zip'``, or
``'gzip'``. Default ``'none'``.
delimiter: str, optional
Which delimiter to use, if any. One of ``','``, ``'\t'``, or
``'|'``. Default: ``'|'``.
max_file_size: int, optional
Maximum number of Megabytes each created file will be.
unquoted: bool, optional
Whether or not to quote fields. Default: ``False``.
prefix: str, optional
A user specified filename prefix for the output file to have. Default:
``None``.
polling_interval : int or float, optional
Number of seconds to wait between checks for query completion.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
Returns
-------
unload_manifest: dict
A dictionary resembling an AWS manifest file. Has the following keys:
'query': str
The query.
'header': list of str
The columns from the query.
'entries': list of dict
Each dict has the following keys:
'id': int
File ID
'name': str
Filename
'size': int
File size in bytes
'url': str
Unsigned S3 URL ('s3://...')
'url_signed': str
Signed S3 URL ('https://...')
'unquoted': bool
Whether the cells are quoted.
'compression': str
Type of compression used.
'delimiter': str
Delimiter that separates the cells.
Examples
--------
>>> sql = "SELECT * FROM schema.my_big_table"
>>> database = "my_database"
>>> delimiter = "|"
>>> manifest = civis_to_multifile_csv(sql, database, delimiter=delimiter)
>>> ids = [entry['id'] for entry in manifest['entries']]
>>> buf = BytesIO()
>>> civis_to_file(ids[0], buf)
>>> buf.seek(0)
>>> df = pd.read_csv(buf, delimiter=delimiter)
See Also
--------
civis.APIClient.scripts.post_sql
"""
if client is None:
client = APIClient(api_key=api_key)
delimiter = DELIMITERS.get(delimiter)
assert delimiter, "delimiter must be one of {}".format(DELIMITERS.keys())
csv_settings = dict(include_header=include_header,
compression=compression,
column_delimiter=delimiter,
unquoted=unquoted,
filename_prefix=prefix,
force_multifile=True,
max_file_size=max_file_size)
script_id, run_id = _sql_script(client, sql, database, job_name,
credential_id, hidden,
csv_settings=csv_settings)
fut = CivisFuture(client.scripts.get_sql_runs, (script_id, run_id),
polling_interval=polling_interval, client=client,
poll_on_creation=False)
outputs = fut.result()["output"]
if not outputs:
raise EmptyResultError("Unload query {} returned no manifest."
.format(script_id))
buf = io.BytesIO()
civis_to_file(outputs[0]['file_id'], buf, client=client)
txt = io.TextIOWrapper(buf, encoding='utf-8')
txt.seek(0)
unload_manifest = json.load(txt)
return unload_manifest
@deprecate_param('v2.0.0', 'api_key', 'headers')
def dataframe_to_civis(df, database, table, api_key=None, client=None,
max_errors=None, existing_table_rows="fail",
diststyle=None, distkey=None,
sortkey1=None, sortkey2=None,
table_columns=None,
headers=None, credential_id=None,
primary_keys=None, last_modified_keys=None,
execution="immediate",
delimiter=None, polling_interval=None,
archive=False, hidden=True, **kwargs):
"""Upload a `pandas` `DataFrame` into a Civis table.
The `DataFrame`'s index will not be included. To store the index
along with the other values, use `df.reset_index()` instead
of `df` as the first argument to this function.
Parameters
----------
df : :class:`pandas:pandas.DataFrame`
The `DataFrame` to upload to Civis.
database : str or int
Upload data into this database. Can be the database name or ID.
table : str
The schema and table you want to upload to. E.g.,
``'scratch.table'``. Schemas or tablenames with periods must
be double quoted, e.g. ``'scratch."my.table"'``.
api_key : DEPRECATED str, optional
Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY`
environment variable will be used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
max_errors : int, optional
The maximum number of rows with errors to remove from the import
before failing.
existing_table_rows : str, optional
The behaviour if a table with the requested name already exists.
One of ``'fail'``, ``'truncate'``, ``'append'``, ``'drop'``, or
``'upsert'``. Defaults to ``'fail'``.
diststyle : str, optional
The distribution style for the table.
One of ``'even'``, ``'all'`` or ``'key'``.
distkey : str, optional
The column to use as the distkey for the table.
sortkey1 : str, optional
The column to use as the sortkey for the table.
sortkey2 : str, optional
The second column in a compound sortkey for the table.
table_columns : list[Dict[str, str]], optional
A list of dictionaries corresponding to the columns in
the source file. Each dictionary should have keys
for column "name" and "sqlType". The import will only copy these
columns regardless if there are more columns in the table.
headers : bool, optional [DEPRECATED]
Whether or not the first row of the file should be treated as
headers. The default, ``None``, attempts to autodetect whether
or not the first row contains headers.
This parameter has no effect in versions >= 1.11 and will be
removed in v2.0. Tables will always be written with column
names read from the DataFrame. Use the `header` parameter
(which will be passed directly to :func:`~pandas.DataFrame.to_csv`)
to modify the column names in the Civis Table.
credential_id : str or int, optional
The ID of the database credential. If ``None``, the default
credential will be used.
primary_keys: list[str], optional
A list of the primary key column(s) of the destination table that
uniquely identify a record. If existing_table_rows is "upsert", this
field is required. Note that this is true regardless of whether the
destination database itself requires a primary key.
last_modified_keys: list[str], optional
A list of the columns indicating a record has been updated. If
existing_table_rows is "upsert", this field is required.
escaped: bool, optional
A boolean value indicating whether or not the source file has quotes
escaped with a backslash. Defaults to false.
execution: string, optional, default "immediate"
One of "delayed" or "immediate". If "immediate", refresh column
statistics as part of the run. If "delayed", flag the table for a
deferred statistics update; column statistics may not be available
for up to 24 hours. In addition, if existing_table_rows is "upsert",
delayed executions move data from staging table to final table after a
brief delay, in order to accommodate multiple concurrent imports to the
same destination table.
polling_interval : int or float, optional
Number of seconds to wait between checks for job completion.
archive : bool, optional (deprecated)
If ``True``, archive the import job as soon as it completes.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
**kwargs : kwargs
Extra keyword arguments will be passed to
:meth:`pandas:pandas.DataFrame.to_csv`.
Returns
-------
fut : :class:`~civis.futures.CivisFuture`
A `CivisFuture` object.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
>>> fut = civis.io.dataframe_to_civis(df, 'my-database',
... 'scratch.df_table')
>>> fut.result()
See Also
--------
:func:`~pandas.DataFrame.to_csv`
"""
if client is None:
client = APIClient(api_key=api_key)
if archive:
warnings.warn("`archive` is deprecated and will be removed in v2.0.0. "
"Use `hidden` instead.", FutureWarning)
headers = False if kwargs.get('header') is False else True
with TemporaryDirectory() as tmp_dir:
tmp_path = os.path.join(tmp_dir, 'dataframe_to_civis.csv')
to_csv_kwargs = {'encoding': 'utf-8', 'index': False}
to_csv_kwargs.update(kwargs)
df.to_csv(tmp_path, **to_csv_kwargs)
_, name = split_schema_tablename(table)
file_id = file_to_civis(tmp_path, name, client=client)
delimiter = ','
fut = civis_file_to_table(file_id, database, table,
client=client, max_errors=max_errors,
existing_table_rows=existing_table_rows,
diststyle=diststyle, distkey=distkey,
sortkey1=sortkey1, sortkey2=sortkey2,
table_columns=table_columns,
delimiter=delimiter, headers=headers,
credential_id=credential_id,
primary_keys=primary_keys,
last_modified_keys=last_modified_keys,
escaped=False, execution=execution,
polling_interval=polling_interval,
hidden=hidden)
return fut
@deprecate_param('v2.0.0', 'api_key')
def csv_to_civis(filename, database, table, api_key=None, client=None,
max_errors=None, existing_table_rows="fail",
diststyle=None, distkey=None,
sortkey1=None, sortkey2=None,
table_columns=None,
delimiter=",", headers=None,
primary_keys=None, last_modified_keys=None,
escaped=False, execution="immediate",
credential_id=None, polling_interval=None, archive=False,
hidden=True):
"""Upload the contents of a local CSV file to Civis.
Parameters
----------
filename : str
Upload the contents of this file.
database : str or int
Upload data into this database. Can be the database name or ID.
table : str
The schema and table you want to upload to. E.g.,
``'scratch.table'``.
api_key : DEPRECATED str, optional
Your Civis API key. If not given, the :envvar:`CIVIS_API_KEY`
environment variable will be used.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
max_errors : int, optional
The maximum number of rows with errors to remove from the import
before failing.
existing_table_rows : str, optional
The behaviour if a table with the requested name already exists.
One of ``'fail'``, ``'truncate'``, ``'append'``, ``'drop'``, or
``'upsert'``. Defaults to ``'fail'``.
diststyle : str, optional
The distribution style for the table.
One of ``'even'``, ``'all'`` or ``'key'``.
distkey : str, optional
The column to use as the distkey for the table.
sortkey1 : str, optional
The column to use as the sortkey for the table.
sortkey2 : str, optional
The second column in a compound sortkey for the table.
table_columns : list[Dict[str, str]], optional
A list of dictionaries corresponding to the columns in
the source file. Each dictionary should have keys
for column "name" and "sqlType". The import will only copy these
columns regardless if there are more columns in the table.
delimiter : string, optional
The column delimiter. One of ``','``, ``'\\t'`` or ``'|'``.
headers : bool, optional
Whether or not the first row of the file should be treated as
headers. The default, ``None``, attempts to autodetect whether
or not the first row contains headers.
primary_keys: list[str], optional
A list of the primary key column(s) of the destination table that
uniquely identify a record. If existing_table_rows is "upsert", this
field is required. Note that this is true regardless of whether the
destination database itself requires a primary key.
last_modified_keys: list[str], optional
A list of the columns indicating a record has been updated. If
existing_table_rows is "upsert", this field is required.
escaped: bool, optional
A boolean value indicating whether or not the source file has quotes
escaped with a backslash. Defaults to false.
execution: string, optional, default "immediate"
One of "delayed" or "immediate". If "immediate", refresh column
statistics as part of the run. If "delayed", flag the table for a
deferred statistics update; column statistics may not be available
for up to 24 hours. In addition, if existing_table_rows is "upsert",
delayed executions move data from staging table to final table after a
brief delay, in order to accommodate multiple concurrent imports to the
same destination table.
credential_id : str or int, optional
The ID of the database credential. If ``None``, the default
credential will be used.
polling_interval : int or float, optional
Number of seconds to wait between checks for job completion.
archive : bool, optional (deprecated)
If ``True``, archive the import job as soon as it completes.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
Returns
-------
results : :class:`~civis.futures.CivisFuture`
A `CivisFuture` object.
Notes
-----
This reads the contents of `filename` into memory.
Examples
--------
>>> with open('input_file.csv', 'w') as _input:
... _input.write('a,b,c\\n1,2,3')
>>> fut = civis.io.csv_to_civis('input_file.csv',
... 'my-database',
... 'scratch.my_data')
>>> fut.result()
"""
if client is None:
client = APIClient(api_key=api_key)
if archive:
warnings.warn("`archive` is deprecated and will be removed in v2.0.0. "
"Use `hidden` instead.", FutureWarning)
name = path.basename(filename)
with open(filename, "rb") as data:
file_id = file_to_civis(data, name, client=client)
log.debug('Uploaded file %s to Civis file %s', filename, file_id)
fut = civis_file_to_table(file_id, database, table,
client=client, max_errors=max_errors,
existing_table_rows=existing_table_rows,
diststyle=diststyle, distkey=distkey,
sortkey1=sortkey1, sortkey2=sortkey2,
table_columns=table_columns,
delimiter=delimiter, headers=headers,
credential_id=credential_id,
primary_keys=primary_keys,
last_modified_keys=last_modified_keys,
escaped=escaped, execution=execution,
polling_interval=polling_interval,
hidden=hidden)
return fut
@deprecate_param('v2.0.0', 'file_id')
def civis_file_to_table(file_id, database, table, client=None,
max_errors=None, existing_table_rows="fail",
diststyle=None, distkey=None,
sortkey1=None, sortkey2=None,
table_columns=None,
primary_keys=None, last_modified_keys=None,
escaped=False, execution="immediate",
delimiter=None, headers=None,
credential_id=None, polling_interval=None,
hidden=True):
"""Upload the contents of one or more Civis files to a Civis table.
All provided files will be loaded as an atomic unit in parallel, and
should share the same columns in the same order, and be in the same
format.
Parameters
----------
file_id : int or list[int]
Civis file ID or a list of Civis file IDs. Reference by name to this
argument is deprecated, as the name will change in v2.0.0.
database : str or int
Upload data into this database. Can be the database name or ID.
table : str
The schema and table you want to upload to. E.g.,
``'scratch.table'``.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
max_errors : int, optional
The maximum number of rows with errors to remove from the import
before failing. If multiple files are provided, this limit applies
across all files combined.
existing_table_rows : str, optional
The behaviour if a table with the requested name already exists.
One of ``'fail'``, ``'truncate'``, ``'append'``, ``'drop'``, or
``'upsert'``. Defaults to ``'fail'``.
diststyle : str, optional
The distribution style for the table.
One of ``'even'``, ``'all'`` or ``'key'``.
distkey : str, optional
The column to use as the distkey for the table.
sortkey1 : str, optional
The column to use as the sortkey for the table.
sortkey2 : str, optional
The second column in a compound sortkey for the table.
table_columns : list[Dict[str, str]], optional
A list of dictionaries corresponding to the columns in
the source file. Each dictionary should have keys
for column "name" and "sqlType". The import will only copy these
columns regardless if there are more columns in the table.
primary_keys: list[str], optional
A list of the primary key column(s) of the destination table that
uniquely identify a record. If existing_table_rows is "upsert", this
field is required. Note that this is true regardless of whether the
destination database itself requires a primary key.
last_modified_keys: list[str], optional
A list of the columns indicating a record has been updated. If
existing_table_rows is "upsert", this field is required.
escaped: bool, optional
A boolean value indicating whether or not the source file(s) escape
quotes with a backslash. Defaults to false.
execution: string, optional, default "immediate"
One of "delayed" or "immediate". If "immediate", refresh column
statistics as part of the run. If "delayed", flag the table for a
deferred statistics update; column statistics may not be available
for up to 24 hours. In addition, if existing_table_rows is "upsert",
delayed executions move data from staging table to final table after a
brief delay, in order to accommodate multiple concurrent imports to the
same destination table.
delimiter : string, optional
The column delimiter. One of ``','``, ``'\\t'`` or ``'|'``. If not
provided, will attempt to auto-detect.
headers : bool, optional
Whether or not the first row of the file should be treated as
headers. The default, ``None``, attempts to autodetect whether
or not the first row contains headers.
credential_id : str or int, optional
The ID of the database credential. If ``None``, the default
credential will be used.
polling_interval : int or float, optional
Number of seconds to wait between checks for job completion.
hidden : bool, optional
If ``True`` (the default), this job will not appear in the Civis UI.
Returns
-------
results : :class:`~civis.futures.CivisFuture`
A `CivisFuture` object.
Raises
------
CivisImportError
If multiple files are given and determined to be incompatible for
import. This may be the case if their columns have different types,
their delimiters are different, headers are present in some but not
others, or compressions do not match.
Examples
--------
>>> file_id = 100
>>> fut = civis.io.civis_file_to_table(file_id,
... 'my-database',
... 'scratch.my_data')
>>> fut.result()
"""
if client is None:
client = APIClient()
schema, table = split_schema_tablename(table)
if isinstance(file_id, int):
file_id = [file_id]
if schema is None:
raise ValueError("Provide a schema as part of the `table` input.")
db_id = client.get_database_id(database)
cred_id = credential_id or client.default_credential
if delimiter is not None: # i.e. it was provided as an argument
delimiter = DELIMITERS.get(delimiter)
assert delimiter, "delimiter must be one of {}".format(
DELIMITERS.keys()
)
try:
client.get_table_id(table, database)
log.debug('Table {table} already exists - skipping column '
'detection'.format(table=table))
table_exists = True
except ValueError:
table_exists = False
# Use Preprocess endpoint to get the table columns as needed
# and perform necessary file cleaning
need_table_columns = ((not table_exists or existing_table_rows == 'drop')
and table_columns is None)
cleaning_futures = _run_cleaning(file_id, client, need_table_columns,
headers, delimiter, hidden)
(cleaned_file_ids, headers, compression, delimiter,
cleaned_table_columns) = _process_cleaning_results(
cleaning_futures, client, headers, need_table_columns, delimiter
)
table_columns = table_columns or cleaned_table_columns
source = dict(file_ids=cleaned_file_ids)
destination = dict(schema=schema, table=table, remote_host_id=db_id,
credential_id=cred_id, primary_keys=primary_keys,
last_modified_keys=last_modified_keys)
redshift_options = dict(distkey=distkey, sortkeys=[sortkey1, sortkey2],
diststyle=diststyle)
# If multiple files are being imported, there might be differences in
# their precisions/lengths - setting this option will allow the Civis API
# to increase these values for the data types provided, and decreases the
# risk of a length-related import failure
loosen_types = len(file_id) > 1
import_name = 'CSV import to {}.{}'.format(schema, table)
import_job = client.imports.post_files_csv(
source,
destination,
headers,
name=import_name,
max_errors=max_errors,
existing_table_rows=existing_table_rows,
column_delimiter=delimiter,
compression=compression,
escaped=escaped,
execution=execution,
loosen_types=loosen_types,
table_columns=table_columns,
redshift_destination_options=redshift_options,
hidden=hidden
)
fut = run_job(import_job.id, client=client,
polling_interval=polling_interval)
log.debug('Started run %d for import %d', fut.run_id, import_job.id)
return fut
def _sql_script(client, sql, database, job_name, credential_id, hidden=False,
csv_settings=None):
job_name = maybe_get_random_name(job_name)
db_id = client.get_database_id(database)
credential_id = credential_id or client.default_credential
csv_settings = csv_settings or {}
export_job = client.scripts.post_sql(job_name,
remote_host_id=db_id,
credential_id=credential_id,
sql=sql,
hidden=hidden,
csv_settings=csv_settings)
run_job = client.scripts.post_sql_runs(export_job.id)
log.debug('Started run %d of SQL script %d', run_job.id, export_job.id)
return export_job.id, run_job.id
def _get_sql_select(table, columns=None):
if columns and not isinstance(columns, (list, tuple)):
raise TypeError("columns must be a list, tuple or None")
select = ", ".join(columns) if columns is not None else "*"
sql = "select {} from {}".format(select, table)
return sql
def _get_headers(client, sql, database, credential_id, polling_interval=None):
headers = None
try:
# use 'begin read only;' to ensure we can't change state
sql = 'begin read only; select * from ({}) limit 1'.format(sql)
fut = query_civis(sql, database, client=client,
credential_id=credential_id,
polling_interval=polling_interval)
headers = fut.result()['result_columns']
except Exception as exc: # NOQA
log.debug("Failed to retrieve headers due to %s", str(exc))
return headers
def _decompress_stream(response, buf, write_bytes=True):
# use response.raw for a more consistent approach
# if content-encoding is specified in the headers
# then response.iter_content will decompress the stream
# however, our use of content-encoding is inconsistent
chunk = response.raw.read(CHUNK_SIZE)
d = zlib.decompressobj(zlib.MAX_WBITS | 32)
while chunk or d.unused_data:
if d.unused_data:
to_decompress = d.unused_data + chunk
d = zlib.decompressobj(zlib.MAX_WBITS | 32)
else:
to_decompress = d.unconsumed_tail + chunk
if write_bytes:
buf.write(d.decompress(to_decompress))
else:
buf.write(d.decompress(to_decompress).decode('utf-8'))
chunk = response.raw.read(CHUNK_SIZE)
def _download_file(url, local_path, headers, compression):
response = requests.get(url, stream=True)
response.raise_for_status()
# gzipped buffers can be concatenated so write headers as gzip
if compression == 'gzip':
with gzip.open(local_path, 'wb') as fout:
fout.write(headers)
with open(local_path, 'ab') as fout:
shutil.copyfileobj(response.raw, fout, CHUNK_SIZE)
# write headers and decompress the stream
elif compression == 'none':
with open(local_path, 'wb') as fout:
fout.write(headers)
_decompress_stream(response, fout)
# decompress the stream, write headers, and zip the file
elif compression == 'zip':
with TemporaryDirectory() as tmp_dir:
tmp_path = path.join(tmp_dir, 'civis_to_csv.csv')
with open(tmp_path, 'wb') as tmp_file:
tmp_file.write(headers)
_decompress_stream(response, tmp_file)
with zipfile.ZipFile(local_path, 'w') as fout:
arcname = path.basename(local_path)
if arcname.split('.')[-1] == 'zip':
arcname = arcname.split('.')[0] + '.csv'
fout.write(tmp_path, arcname, zipfile.ZIP_DEFLATED)
def _download_callback(job_id, run_id, filename, headers, compression):
def callback(future):
if not future.succeeded():
return
outputs = future.result().get("output")
if not outputs:
warnings.warn("Job %s, run %s does not have any output to "
"download. Not creating file %s."
% (job_id, run_id, filename),
RuntimeWarning)
return
else:
url = outputs[0]["path"]
file_id = outputs[0]["file_id"]
log.debug('Exported results to Civis file %s', file_id)
return _download_file(url, filename, headers, compression)
return callback
def split_schema_tablename(table):
"""Split a Redshift 'schema.tablename' string
Remember that special characters (such as '.') can only
be included in a schema or table name if delimited by double-quotes.
Parameters
----------
table: str
Either a Redshift schema and table name combined
with a ".", or else a single table name.
Returns
-------
schema, tablename
A 2-tuple of strings. The ``schema`` may be None if the input
is only a table name, but the ``tablename`` will always be filled.
Raises
------
ValueError
If the input ``table`` is not separable into a schema and
table name.
"""
reader = csv.reader(StringIO(str(table)),
delimiter=".",
doublequote=True,
quotechar='"')
schema_name_tup = next(reader)
if len(schema_name_tup) == 1:
schema_name_tup = (None, schema_name_tup[0])
if len(schema_name_tup) != 2:
raise ValueError("Cannot parse schema and table. "
"Does '{}' follow the pattern 'schema.table'?"
.format(table))
return tuple(schema_name_tup)
def _replace_null_column_names(column_list):
"""Replace null names in columns from file cleaning with an appropriately
blank column name.
Parameters
----------
column_list: list[dict]
the list of columns from file cleaning.
Returns
--------
column_list: list[dict]
"""
new_cols = []
for i, col in enumerate(column_list):
# Avoid mutating input arguments
new_col = dict(col)
if new_col.get('name') is None:
new_col['name'] = 'column_{}'.format(i)
new_cols.append(new_col)
return new_cols
def _run_cleaning(file_ids, client, need_table_columns, headers, delimiter,
hidden, polling_interval=None):
cleaning_futures = []
for fid in file_ids:
cleaner_job = client.files.post_preprocess_csv(
file_id=fid,
in_place=False,
detect_table_columns=need_table_columns,
force_character_set_conversion=True,
include_header=headers,
column_delimiter=delimiter,
hidden=hidden
)
cleaning_futures.append(run_job(cleaner_job.id, client=client,
polling_interval=polling_interval))
return cleaning_futures
def _check_all_detected_info(detected_info, headers, delimiter,
compression, output_file_id):
"""Check a single round of cleaning results as compared to provided values.
Parameters
----------
detected_info: Dict[str, Any]
The detected info of the file as returned by the Civis API.
headers: bool
The provided value for whether or not the file contains errors.
delimiter: str
The provided value for the file delimiter.
compression: str
The provided value for the file compression.
output_file_id: int
The cleaned file's Civis ID. Used for debugging.
Raises
------
CivisImportError
If the values detected on the file do not match their expected
attributes.
"""
if headers != detected_info['includeHeader']:
raise CivisImportError('Mismatch between detected headers - '
'please ensure all imported files either '
'have a header or do not.')
if delimiter != detected_info['columnDelimiter']:
raise CivisImportError('Provided delimiter "{}" does not match '
'detected delimiter for {}: "{}"'.format(
delimiter,
output_file_id,
detected_info["columnDelimiter"])
)
if compression != detected_info['compression']:
raise CivisImportError('Mismatch between detected and provided '
'compressions - provided compression was {}'
' but detected compression {}. Please '
'ensure all imported files have the same '
'compression.'.format(
compression,
detected_info['compression'])
)
def _process_cleaning_results(cleaning_futures, client, headers,
need_table_columns, delimiter):
cleaned_file_ids = []
done, still_going = concurrent.futures.wait(
cleaning_futures, return_when=concurrent.futures.FIRST_COMPLETED
)
# Set values from first completed file cleaning - other files will be
# compared to this one. If inconsistencies are detected, raise an error.
first_completed = done.pop()
output_file = client.jobs.list_runs_outputs(
first_completed.job_id,
first_completed.run_id
)[0]
detected_info = client.files.get(output_file.object_id).detected_info
table_columns = (detected_info['tableColumns'] if need_table_columns
else None)
if headers is None:
headers = detected_info['includeHeader']
if delimiter is None:
delimiter = detected_info['columnDelimiter']
compression = detected_info['compression']
_check_all_detected_info(detected_info, headers, delimiter, compression,
output_file.object_id)
cleaned_file_ids.append(output_file.object_id)
# Ensure that all results from files are correctly accounted for -
# Since concurrent.futures.wait returns two sets, it is possible
# That done contains more than one Future. Thus it is necessary to account
# for these possible completed cleaning runs while waiting on those which
# are still running.
for result in concurrent.futures.as_completed(done | still_going):
output_file = client.jobs.list_runs_outputs(
result.job_id,
result.run_id
)[0]
detected_info = client.files.get(output_file.object_id).detected_info
if need_table_columns:
file_columns = detected_info['tableColumns']
_check_column_types(table_columns, file_columns,
output_file.object_id)
_check_all_detected_info(detected_info, headers, delimiter,
compression, output_file.object_id)
cleaned_file_ids.append(output_file.object_id)
if need_table_columns:
table_columns = _replace_null_column_names(table_columns)
return cleaned_file_ids, headers, compression, delimiter, table_columns
def _check_column_types(table_columns, file_columns, output_obj_id):
"""Check that base column types match those current defined for the table.
Parameters
----------
table_columns: List[Dict[str, str]]
The columns for the table to be created.
file_columns: List[Dict[str, str]]
The columns detected by the Civis API for the file.
output_obj_id: int
The file ID under consideration; used for error messaging.
Raises
------
CivisImportError
If the table columns and the file columns have a type mismatch, or
differ in count.
"""
if len(table_columns) != len(file_columns):
raise CivisImportError('All files should have the same number of '
'columns. Expected {} columns but file {} '
'has {} columns'.format(
len(table_columns),
output_obj_id,
len(file_columns))
)
error_msgs = []
for idx, (tcol, fcol) in enumerate(zip(table_columns, file_columns)):
# for the purposes of type checking, we care only that the types
# share a base type (e.g. INT, VARCHAR, DECIMAl) rather than that
# they have the same precision and length
# (e.g VARCHAR(42), DECIMAL(8, 10))
tcol_base_type = tcol['sql_type'].split('(', 1)[0]
fcol_base_type = fcol['sql_type'].split('(', 1)[0]
if tcol_base_type != fcol_base_type:
error_msgs.append(
'Column {}: File base type was {}, but expected {}'.format(
idx,
fcol_base_type,
tcol_base_type
)
)
if error_msgs:
raise CivisImportError(
'Encountered the following errors for file {}:\n\t{}'.format(
output_obj_id,
'\n\t'.join(error_msgs)
)
)
| 40.798313
| 79
| 0.617111
| 7,259
| 58,056
| 4.80011
| 0.093815
| 0.008782
| 0.010734
| 0.00683
| 0.620566
| 0.596946
| 0.573356
| 0.548645
| 0.53596
| 0.523017
| 0
| 0.003304
| 0.296248
| 58,056
| 1,422
| 80
| 40.827004
| 0.849524
| 0.49888
| 0
| 0.355596
| 0
| 0
| 0.091897
| 0.0025
| 0
| 0
| 0
| 0
| 0.00361
| 1
| 0.041516
| false
| 0
| 0.072202
| 0.00361
| 0.151625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
164f6ae0c583900eea5f44762f6006a785208240
| 2,218
|
py
|
Python
|
tests/unit/small_text/integrations/pytorch/test_strategies.py
|
chschroeder/small-text
|
ef28e91ba0c94fe938dde4f16253aa8695ea13b7
|
[
"MIT"
] | 218
|
2021-05-26T16:38:53.000Z
|
2022-03-30T09:48:54.000Z
|
tests/unit/small_text/integrations/pytorch/test_strategies.py
|
chschroeder/small-text
|
ef28e91ba0c94fe938dde4f16253aa8695ea13b7
|
[
"MIT"
] | 9
|
2021-10-16T23:23:02.000Z
|
2022-02-22T15:23:11.000Z
|
tests/unit/small_text/integrations/pytorch/test_strategies.py
|
chschroeder/small-text
|
ef28e91ba0c94fe938dde4f16253aa8695ea13b7
|
[
"MIT"
] | 21
|
2021-06-24T11:19:44.000Z
|
2022-03-12T16:29:53.000Z
|
import unittest
import pytest
from small_text.integrations.pytorch.exceptions import PytorchNotFoundError
try:
from small_text.integrations.pytorch.query_strategies import (
BADGE,
ExpectedGradientLength,
ExpectedGradientLengthMaxWord)
except PytorchNotFoundError:
pass
@pytest.mark.pytorch
class BADGETest(unittest.TestCase):
def test_init_default(self):
strategy = BADGE(2)
self.assertEqual(2, strategy.num_classes)
def test_init(self):
strategy = BADGE(4)
self.assertEqual(4, strategy.num_classes)
def test_badge_str(self):
strategy = BADGE(2)
expected_str = 'BADGE(num_classes=2)'
self.assertEqual(expected_str, str(strategy))
@pytest.mark.pytorch
class ExpectedGradientLengthTest(unittest.TestCase):
def test_init_default(self):
strategy = ExpectedGradientLength(2)
self.assertEqual(2, strategy.num_classes)
self.assertEqual(50, strategy.batch_size)
self.assertEqual('cuda', strategy.device)
def test_init(self):
strategy = ExpectedGradientLength(4, batch_size=100, device='cpu')
self.assertEqual(4, strategy.num_classes)
self.assertEqual(100, strategy.batch_size)
self.assertEqual('cpu', strategy.device)
def test_expected_gradient_length_str(self):
strategy = ExpectedGradientLength(2)
expected_str = 'ExpectedGradientLength()'
self.assertEqual(expected_str, str(strategy))
@pytest.mark.pytorch
class ExpectedGradientLengthMaxWordTest(unittest.TestCase):
def test_init_default(self):
strategy = ExpectedGradientLengthMaxWord(2, 'embedding')
self.assertEqual(2, strategy.num_classes)
self.assertEqual(50, strategy.batch_size)
self.assertEqual('cuda', strategy.device)
self.assertEqual('embedding', strategy.layer_name)
def test_init(self):
strategy = ExpectedGradientLengthMaxWord(4, 'embedding', batch_size=100, device='cpu')
self.assertEqual(4, strategy.num_classes)
self.assertEqual(100, strategy.batch_size)
self.assertEqual('cpu', strategy.device)
self.assertEqual('embedding', strategy.layer_name)
| 30.383562
| 94
| 0.712353
| 236
| 2,218
| 6.538136
| 0.207627
| 0.174984
| 0.042774
| 0.057032
| 0.6442
| 0.553467
| 0.531432
| 0.508101
| 0.365522
| 0.365522
| 0
| 0.01728
| 0.191163
| 2,218
| 72
| 95
| 30.805556
| 0.842809
| 0
| 0
| 0.596154
| 0
| 0
| 0.045086
| 0.010821
| 0
| 0
| 0
| 0
| 0.346154
| 1
| 0.153846
| false
| 0.019231
| 0.076923
| 0
| 0.288462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16517f3c2ccf47bb7eb0759cee7e8d2e4ec1a86f
| 3,553
|
py
|
Python
|
src/adsb/sbs/server.py
|
claws/adsb
|
4a7d35880dece6baaf24370fab445e2571fc19e9
|
[
"MIT"
] | 7
|
2018-07-11T00:50:47.000Z
|
2021-09-29T10:36:44.000Z
|
src/adsb/sbs/server.py
|
claws/adsb
|
4a7d35880dece6baaf24370fab445e2571fc19e9
|
[
"MIT"
] | 3
|
2020-06-13T23:27:42.000Z
|
2020-07-22T03:06:16.000Z
|
src/adsb/sbs/server.py
|
claws/adsb
|
4a7d35880dece6baaf24370fab445e2571fc19e9
|
[
"MIT"
] | 3
|
2020-01-08T19:05:42.000Z
|
2022-02-11T02:22:23.000Z
|
import asyncio
import datetime
import logging
import socket
from . import protocol
from typing import Tuple
from asyncio import AbstractEventLoop
logger = logging.getLogger(__name__)
class Server(object):
def __init__(
self,
host: str = "localhost",
port: int = 30003,
backlog=100,
loop: AbstractEventLoop = None,
) -> None:
self.loop = loop or asyncio.get_event_loop()
self.host = host
self._requested_port = port
self.port = None
self.backlog = backlog
self.listener = None
self.protocols = {}
async def start(self) -> None:
""" Start the server """
try:
self.listener = await self.loop.create_server(
lambda: protocol.SBSServerProtocol(self),
self.host,
self._requested_port,
family=socket.AF_INET,
backlog=self.backlog,
) # type: asyncio.Server
# Fetch actual port in use. This can be different from the
# specified port if the port was passed as 0 which means use
# an ephemeral port.
assert len(self.listener.sockets) == 1
_, self.port = self.listener.sockets[0].getsockname()
except asyncio.CancelledError:
logger.exception("Connection waiter Future was cancelled")
except Exception:
logger.exception("An error occurred in start")
async def stop(self) -> None:
""" Stop the server """
if self.listener:
# Avoid iterating over the protocols dict which may change size
# while it is being iterating over.
peers = list(self.protocols)
for peer in peers:
prot = self.protocols.get(peer)
if prot:
prot.close()
self.listener.close()
def register_protocol(
self, peer: Tuple[str, int], prot: "SBSServerProtocol"
) -> None:
""" Register a protocol instance with the server.
:param peer: Tuple of (host:str, port:int).
:param prot: a SBSServerProtocol instance.
"""
self.protocols[peer] = prot
def deregister_protocol(self, peer: Tuple[str, int]) -> None:
""" De-register a protocol instance from the server.
This peer will no longer receive messages.
:param peer: Tuple of (host:str, port:int).
"""
del self.protocols[peer]
def send_message(self, msg: bytes, peer: Tuple[str, int] = None) -> None:
""" Send a message.
:param msg: A bytes object representing the SBS format message to
send to peers. The message is assumed to include the end of
message delimiter.
:param peer: A specific peer to send the message to. Peer is a
Tuple of (host:str, port:int). If not specified then the message
is broadcast to all peers.
"""
if self.protocols:
if peer:
prot = self.protocols.get(peer)
if prot:
prot.send_message(msg)
else:
raise Exception(
f"Server can't send msg to non-existant peer: {peer}"
)
else:
# broadcast message to all peers
for peer, prot in self.protocols.items():
prot.send_message(msg)
else:
raise Exception("Server can't send msg, no peers available")
| 32.59633
| 77
| 0.565156
| 406
| 3,553
| 4.891626
| 0.334975
| 0.052367
| 0.018127
| 0.022659
| 0.165156
| 0.136455
| 0.098691
| 0.064451
| 0
| 0
| 0
| 0.004787
| 0.353223
| 3,553
| 108
| 78
| 32.898148
| 0.859443
| 0.247678
| 0
| 0.164179
| 0
| 0
| 0.072603
| 0
| 0
| 0
| 0
| 0
| 0.014925
| 1
| 0.059701
| false
| 0
| 0.104478
| 0
| 0.179104
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
16557fb191c1ea62849d52d444fde47864d855b9
| 43,651
|
py
|
Python
|
lantz/drivers/sacher/Sacher_EPOS.py
|
mtsolmn/lantz-drivers
|
f48caf9000ddd08f2abb837d832e341410af4788
|
[
"BSD-3-Clause"
] | 4
|
2019-05-04T00:10:53.000Z
|
2020-10-22T18:08:40.000Z
|
lantz/drivers/sacher/Sacher_EPOS.py
|
mtsolmn/lantz-drivers
|
f48caf9000ddd08f2abb837d832e341410af4788
|
[
"BSD-3-Clause"
] | 3
|
2019-07-12T13:44:17.000Z
|
2020-10-22T19:32:08.000Z
|
lantz/drivers/sacher/Sacher_EPOS.py
|
mtsolmn/lantz-drivers
|
f48caf9000ddd08f2abb837d832e341410af4788
|
[
"BSD-3-Clause"
] | 9
|
2019-04-03T17:07:03.000Z
|
2021-02-15T21:53:55.000Z
|
# sacher_epos.py, python wrapper for sacher epos motor
# David Christle <christle@uchicago.edu>, August 2014
#
"""
Possbily Maxon EPOS now
"""
"""
This is the actual version that works
But only in the lab32 virtual environment
"""
# from instrument import Instrument
# import qt
import ctypes
import ctypes.wintypes
import logging
import time
# from instrument import Instrument
from ctypes.wintypes import DWORD, WORD
import numpy as np
"""
okay so we import a bunch of random stuff
I always forget what ctypes is for but I'll worry about it later
"""
# from subprocess import Popen, PIPE
# from multiprocessing.managers import BaseManager
# import atexit
# import os
# python32_dir = "C:\\Users\\Alex\\Miniconda3\\envs\\lab32"
# assert os.path.isdir(python32_dir)
# os.chdir(python32_dir)
# derp = "C:\\Users\\Alex\\Documents\\wow_such_code"
# assert os.path.isdir(derp)
# os.chdir(derp)
# p = Popen([python32_dir + "\\python.exe", derp + "\\delegate.py"], stdout=PIPE, cwd=derp)
# atexit.register(p.terminate)
# port = int(p.stdout.readline())
# authkey = p.stdout.read()
# print(port, authkey)
# m = BaseManager(address=("localhost", port), authkey=authkey)
# m.connect()
# tell manager to expect an attribute called LibC
# m.register("SacherLasaTeknique")
# access and use libc
# libc = m.SacherLasaTeknique()
# print(libc.vcs())
# eposlib = ctypes.windll.eposcmd
eposlib = ctypes.windll.LoadLibrary('C:\\Users\\Carbro\\Desktop\\Charmander\\EposCmd.dll')
DeviceName = b'EPOS'
ProtocolStackName = b'MAXON_RS232'
InterfaceName = b'RS232'
"""
Max on
Max off
but anyway it looks like ctypes is the thing that's talking to the epos dll
"""
HISTCHAN = 65536
TTREADMAX = 131072
RANGES = 8
MODE_HIST = 0
MODE_T2 = 2
MODE_T3 = 3
FLAG_OVERFLOW = 0x0040
FLAG_FIFOFULL = 0x0003
# in mV
ZCMIN = 0
ZCMAX = 20
DISCRMIN = 0
DISCRMAX = 800
# in ps
OFFSETMIN = 0
OFFSETMAX = 1000000000
# in ms
ACQTMIN = 1
ACQTMAX = 10 * 60 * 60 * 1000
# in mV
PHR800LVMIN = -1600
PHR800LVMAX = 2400
"""
wooooooo a bunch a variables and none of them are explained
way to go dc you da real champ
"""
class Sacher_EPOS():
"""
ok before I dive into this giant Sacher class thing let me just list here all the functions that are being defined in this class:
check(self)
before
wreck(self)
ok but actually:
__init__(self, name, address, reset=False)
__del__(self)
get_bit(self, byteval,idx)
_u32todouble(self, uinput)
open(self)
close(self)
get_offset(self)
fine_tuning_steps(self, steps)
set_new_offset(self, new_offset)
get_motor_position(self)
set_target_position(self, target, absolute, immediately)
do_get_wavelength(self)
do_set_wavelength(self, wavelength)
is_open(self)
clear_fault(self)
initialize(self)
The last one is really long
And also damn there are 16 of them
I'll comment about them as I go through them
"""
def __init__(self, name, address, reset=False):
# Instrument.__init__(self, name, tags=['physical'])
# self._port_name = str(address)
self._port_name = address
self._is_open = False
self._HPM = True
# self.add_parameter('wavelength',
# flags = Instrument.FLAG_GETSET,
# type = types.FloatType,
# units = 'nm',
# minval=1070.0,maxval=1180.0)
# self.add_function('open')
# self.add_function('close')
# self.add_function('fine_tuning_steps')
# self.add_function('get_motor_position')
# self.add_function('set_target_position')
# try:
self.open()
self.initialize()
# except:
# logging.error('Error loading Sacher EPOS motor. In use?')
"""
I mean to me this really seems like the initialize function
so I wonder what initialize(self) is doing
At any rate there doesn't seem to be a lot going on here
"""
def __del__(self):
# execute disconnect
self.close()
return
"""
this might be the only self explanatory one
it disconnects
"""
@staticmethod
def get_bit(byteval, idx):
# def get_bit(self, byteval,idx):
return ((byteval & (1 << idx)) != 0)
"""
you get the bits, and then you use them
but honestly I don't really get what this is doing
sudo git a_clue
"""
@staticmethod
def _u32todouble(uinput):
# def _u32todouble(self, uinput):
# this function implements the really weird/non-standard U32 to
# floating point conversion in the sacher VIs
# get sign of number
sign = Sacher_EPOS.get_bit(uinput, 31)
if sign == False:
mantissa_sign = 1
elif sign == True:
mantissa_sign = -1
exp_mask = 0b111111
# print 'uin u is %d' % uinput
# print 'type uin %s' % type(uinput)
# print 'binary input is %s' % bin(long(uinput))
# get sign of exponent
if Sacher_EPOS.get_bit(uinput, 7) == False:
exp_sign = 1
elif Sacher_EPOS.get_bit(uinput, 7) == True:
exp_sign = -1
# print 'exp extract %s' % bin(int(uinput & exp_mask))
# print 'exp conv %s' % (exp_sign*int(uinput & exp_mask))
# print 'sign of exponent %s' % self.get_bit(uinput,7)
# print 'binary constant is %s' % bin(int(0b10000000000000000000000000000000))
mantissa_mask = 0b01111111111111111111111100000000
# mantissa_mask = 0b0111111111111111111111110000000
# print 'mantissa extract is %s' % bin((uinput & mantissa_mask) >> 8)
mantissa = 1.0 / 1000000.0 * float(mantissa_sign) * float((uinput & mantissa_mask) >> 8)
# print 'mantissa is %.12f' % mantissa
# print(1 if Sacher_EPOS.get_bit(uinput,31) else 0, mantissa, 1 if Sacher_EPOS.get_bit(uinput,7) else 0, uinput & exp_mask)
output = mantissa * 2.0 ** (float(exp_sign) * float(int(uinput & exp_mask)))
# print 'output is %s' % output
return output
"""
ok dc gave some slight explanations here
Apparently there's a "really weird/non-standard U32 to floating point conversion in the sacher VIs"
It'd be gr8 if I knew what U32's were
unsigned 32 bit something something?
ah whatever
I'll have to worry about this later
"""
@staticmethod
def _doubletou32(dinput):
mantissa_bit = 0 if int(dinput / abs(dinput)) > 0 else 1
exp_bit = 1 if -1 < dinput < 1 else 0
b = np.ceil(np.log10(abs(dinput)))
a = dinput / 10 ** b
if dinput < 0:
a = -a
# print('a:\t{}\tb:\t{}'.format(a, b))
d = np.log2(10) * b
d_ = np.ceil(d)
c = a * 2 ** (d - d_)
# print('c:\t{}\td_:{}\toriginal:\t{}'.format(c, d_, c * 2 ** d_))
return (int(mantissa_bit) << 31) + (int(c * 1e6) << 8) + (int(exp_bit) << 7) + int(abs(d_))
def open(self):
eposlib.VCS_OpenDevice.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p,
ctypes.POINTER(DWORD)]
eposlib.VCS_OpenDevice.restype = ctypes.wintypes.HANDLE
buf = ctypes.pointer(DWORD(0))
ret = ctypes.wintypes.HANDLE()
# print 'types are all %s %s %s %s %s' % (type(DeviceName), type(ProtocolStackName), type(InterfaceName), type(self._port_name), type(buf))
ret = eposlib.VCS_OpenDevice(DeviceName, ProtocolStackName, InterfaceName, self._port_name, buf)
self._keyhandle = ret
# print 'keyhandle is %s' % self._keyhandle
# print 'open device ret %s' % buf
# print 'printing'
# print buf.contents.value
# print 'done printer'
if int(buf.contents.value) >= 0:
self._is_open = True
self._keyhandle = ret
return
"""
I have absolutely no idea what the hell this is doing
Considering that close(self) is apparently closing the EPOS motor, maybe this is opening it
"""
def close(self):
print('closing EPOS motor.')
eposlib.VCS_CloseDevice.argtypes = [ctypes.wintypes.HANDLE, ctypes.POINTER(DWORD)]
eposlib.VCS_CloseDevice.restype = ctypes.wintypes.BOOL
buf = ctypes.pointer(DWORD(0))
ret = ctypes.wintypes.BOOL()
ret = eposlib.VCS_CloseDevice(self._keyhandle, buf)
# print 'close device returned %s' % buf
if int(buf.contents.value) >= 0:
self._is_open = False
else:
logging.error(__name__ + ' did not close Sacher EPOS motor correctly.')
return
"""
Apparently this closes the EPOS motor
I don't know what "opening" and "closing" the motor means though
and yeah also these random variables don't make any sense to me
"""
def get_motor_current(self):
nodeID = ctypes.wintypes.WORD(0)
eposlib.VCS_GetCurrentIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.c_uint8), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetCurrentIs.restype = ctypes.wintypes.BOOL
motorCurrent = ctypes.c_uint8(0)
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_GetCurrentIs(self._keyhandle, nodeID, ctypes.byref(motorCurrent), ctypes.byref(buf))
return motorCurrent.value
"""
Not sure what this is doing yet
"""
def find_home(self):
nodeID = ctypes.wintypes.WORD(0)
eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_uint8,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_FindHome(self._keyhandle, nodeID, ctypes.c_uint8(35), ctypes.byref(buf))
print('Homing: {}'.format(ret))
return ret
"""
Not sure what this is doing yet
"""
def restore(self):
nodeID = ctypes.wintypes.WORD(0)
eposlib.VCS_FindHome.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_FindHome.restype = ctypes.wintypes.BOOL
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_Restore(self._keyhandle, nodeID, ctypes.byref(buf))
print('Restore: {}'.format(ret))
return ret
"""
Not sure what this is doing yet
"""
def get_offset(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# These are hardcoded values I got from the LabVIEW program -- I don't think
# any documentation exists on particular object indices
StoredPositionObject = ctypes.wintypes.WORD(8321)
StoredPositionObjectSubindex = ctypes.c_uint8(0)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_int32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_int32))
if ret == 0:
logging.error(__name__ + ' Could not read stored position from Sacher EPOS motor')
return CastedObjectData[0]
"""
Not sure what this is doing yet
"""
def fine_tuning_steps(self, steps):
current_motor_pos = self.get_motor_position()
self._offset = self.get_offset()
self.set_target_position(steps, False, True)
new_motor_pos = self.get_motor_position()
# print('New motor position is %s' % new_motor_pos)
# print 'new offset is %s' % (new_motor_pos-current_motor_pos+self._offset)
self.set_new_offset(new_motor_pos - current_motor_pos + self._offset)
"""
Not sure what this is doing yet
"""
def set_new_offset(self, new_offset):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL
# print 'setting new offset'
StoredPositionObject = ctypes.wintypes.WORD(8321)
StoredPositionObjectSubindex = ctypes.c_uint8(0)
StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)
ObjectDataArray = (ctypes.c_uint32 * 1)(new_offset)
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten,
ctypes.byref(buf))
if ret == 0:
logging.error(__name__ + ' Could not write stored position from Sacher EPOS motor')
return
"""
Not sure what this is doing yet
"""
def set_coeffs(self, a, b, c, min_wl, max_wl):
print('')
print("setting coefficients...")
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
eposlib.VCS_SetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_SetObject.restype = ctypes.wintypes.BOOL
# print 'setting new offset'
d = (min_wl << 16) + max_wl
StoredPositionObject = ctypes.wintypes.WORD(8204)
for subidx, coeff in enumerate([a, b, c]):
print(subidx, coeff)
StoredPositionObjectSubindex = ctypes.c_uint8(subidx + 1)
StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)
ObjectDataArray = (ctypes.c_uint32 * 1)(self._doubletou32(coeff))
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten,
ctypes.byref(buf))
StoredPositionObjectSubindex = ctypes.c_uint8(4)
StoredPositionNbBytesToWrite = ctypes.wintypes.DWORD(4)
ObjectDataArray = (ctypes.c_uint32 * 1)(d)
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesWritten = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_SetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToWrite, StoredPositionNbBytesWritten,
ctypes.byref(buf))
print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))
if ret == 0:
logging.error(__name__ + ' Could not write stored position from Sacher EPOS motor')
return
"""
Not sure what this is doing yet
"""
def get_motor_position(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
pPosition = ctypes.pointer(ctypes.c_long())
eposlib.VCS_GetPositionIs.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.c_long), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetPositionIs.restype = ctypes.wintypes.BOOL
ret = eposlib.VCS_GetPositionIs(self._keyhandle, nodeID, pPosition, ctypes.byref(buf))
# print 'get motor position ret %s' % ret
# print 'get motor position buf %s' % buf.value
# print 'get motor position value %s' % pPosition.contents.value
return pPosition.contents.value
# print('getting motor position...')
# print(ret)
# return print(pPosition.contents.value)
"""
Not sure what this is doing yet
"""
def set_target_position(self, target, absolute, immediately):
# print('check #1')
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
# First, set enabled state
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
# print('#5 Motor current: {}'.format(self.get_motor_current()))
ret = eposlib.VCS_SetEnableState(self._keyhandle, nodeID, ctypes.byref(buf))
# print('Enable state ret %s buf %s' % (ret, buf.value))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
# print('#6 Motor current: {}'.format(self.get_motor_current()))
pTarget = ctypes.c_long(target)
pAbsolute = ctypes.wintypes.BOOL(absolute)
pImmediately = ctypes.wintypes.BOOL(immediately)
eposlib.VCS_MoveToPosition.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_long,
ctypes.wintypes.BOOL, ctypes.wintypes.BOOL,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_MoveToPosition.restype = ctypes.wintypes.BOOL
# print('check #2')
# print('About to set motor position')
# print('Current motor position is %d' % (self.get_motor_position()))
ret = eposlib.VCS_MoveToPosition(self._keyhandle, nodeID, pTarget, pAbsolute, pImmediately, ctypes.byref(buf))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('#7 Motor current: {}'.format(self.get_motor_current()))
# print('set motor position ret %s' % ret)
# print('set motor position buf %s' % buf.value)
steps_per_second = 14494.0 # hardcoded, estimated roughly, unused now
nchecks = 0
# print('check #3')
while nchecks < 1000:
# get the movement state. a movement state of 1 indicates the motor
# is done moving
# print('')
# print('check #4')
# print('Motor current: {}'.format(self.get_motor_current()))
print('Motor position: {}'.format(self.get_motor_position()))
# print('Motor offset: {}'.format(self.get_offset()))
self._offset = self.get_offset()
# print('Motor offset is %s' % self._offset)
pMovementState = ctypes.pointer(ctypes.wintypes.BOOL())
# print(pMovementState.contents.value)
eposlib.VCS_GetMovementState.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.wintypes.BOOL),
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetMovementState.restype = ctypes.wintypes.BOOL
# print('Getting movement state')
ret = eposlib.VCS_GetMovementState(self._keyhandle, nodeID, pMovementState, ctypes.byref(buf))
# print('set motor position ret %s' % ret)
# print('set motor position buf %s' % buf.value)
# print('Movement state is %s' % pMovementState.contents.value)
if pMovementState.contents.value == 1:
break
nchecks = nchecks + 1
# print('Current motor position is %d' % self.get_motor_position())
# print('check #5')
# print(nchecks)
# print('')
time.sleep(0.01)
# Now set disabled state
ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf))
# print('check #6')
# print('Disable state ret %s buf %s' % (ret, buf.value))
# print('Final motor position is %d' % (self.get_motor_position()))
# print('check #7')
return ret
"""
Not sure what this is doing yet
"""
def fuck_my_life(self, wavelength):
print('goddamn this piece of shit')
print('')
print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))
# print('#3 Motor current: {}'.format(self.get_motor_current()))
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
# Step 1: Get the actual motor position
# print('Getting motor position')
current_motor_pos = self.get_motor_position()
# Step 2: Get the motor offset
self._offset = self.get_offset()
# print('Motor offset is %s' % self._offset)
# Step 3: Convert the desired wavelength into a position
# Check sign of position-to-wavelength
pos0 = self._doubleA * (0.0) ** 2.0 + self._doubleB * 0.0 + self._doubleC
pos5000 = self._doubleA * (5000.0) ** 2.0 + self._doubleB * 5000.0 + self._doubleC
# logging.error(__name__ + ' Sacher wavelength calibration polynomials indicated a wrong wavelength direction')
# If that's OK, use the quadratic formula to calculate the roots
b2a = -1.0 * self._doubleB / (2.0 * self._doubleA)
sqrtarg = self._doubleB ** 2.0 / (4.0 * self._doubleA ** 2.0) - (self._doubleC - wavelength) / self._doubleA
# print('wut da fuuuu')
# print(b2a)
# print(sqrtarg)
# print(pos0)
# print(pos5000)
if sqrtarg < 0.0:
logging.error(__name__ + ' Negative value under square root sign -- something is wrong')
if pos0 > pos5000:
# Take the + square root solution
x = b2a - np.sqrt(sqrtarg)
elif pos0 < pos5000:
x = b2a + np.sqrt(sqrtarg)
print(b2a)
print(np.sqrt(sqrtarg))
# print('Position is %s' % x)
wavelength_to_pos = int(round(x))
# Step 4: Calculate difference between the output position and the stored offset
# print('Step 4...')
diff_wavelength_offset = wavelength_to_pos - int(self._offset)
print('wavelength_to_pos: {}'.format(wavelength_to_pos))
print('diff_wavelength_offset: {}'.format(diff_wavelength_offset))
print('self._offset: {}'.format(int(self._offset)))
"""
Not sure what this is doing yet
"""
def do_get_wavelength(self):
self._offset = self.get_offset()
# self._currentwl = self._doubleA*(self._offset)**2.0 + self._doubleB*self._offset + self._doubleC
self._currentwl = self._doubleA * (
self.get_motor_position()) ** 2.0 + self._doubleB * self.get_motor_position() + self._doubleC
print('Current wavelength: %.3f nm' % self._currentwl)
return self._currentwl
"""
Not sure what this is doing yet
"""
def do_set_wavelength(self, wavelength):
print('setting wavelength...')
print('')
# print('Coefficients are %s %s %s' % (self._doubleA, self._doubleB, self._doubleC))
# print('#3 Motor current: {}'.format(self.get_motor_current()))
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
# Step 1: Get the actual motor position
# print('Getting motor position')
current_motor_pos = self.get_motor_position()
# Step 2: Get the motor offset
self._offset = self.get_offset()
# print('Motor offset is %s' % self._offset)
# Step 3: Convert the desired wavelength into a position
# Check sign of position-to-wavelength
pos0 = self._doubleA * (0.0) ** 2.0 + self._doubleB * 0.0 + self._doubleC
pos5000 = self._doubleA * (5000.0) ** 2.0 + self._doubleB * 5000.0 + self._doubleC
# logging.error(__name__ + ' Sacher wavelength calibration polynomials indicated a wrong wavelength direction')
# If that's OK, use the quadratic formula to calculate the roots
b2a = -1.0 * self._doubleB / (2.0 * self._doubleA)
sqrtarg = self._doubleB ** 2.0 / (4.0 * self._doubleA ** 2.0) - (self._doubleC - wavelength) / self._doubleA
# print('wut da fuuuu')
# print(b2a)
# print(sqrtarg)
# print(pos0)
# print(pos5000)
if sqrtarg < 0.0:
logging.error(__name__ + ' Negative value under square root sign -- something is wrong')
if pos0 > pos5000:
# Take the + square root solution
x = b2a - np.sqrt(sqrtarg)
elif pos0 < pos5000:
x = b2a + np.sqrt(sqrtarg)
# x is what the motor position should be
# print('Position is %s' % x)
wavelength_to_pos = int(round(x))
# Step 4: Calculate difference between the output position and the stored offset
# print('Step 4...')
diff_wavelength_offset = wavelength_to_pos - int(self._offset)
# print('Diff wavelength offset %s' % diff_wavelength_offset)
# Step 5: If HPM is activated and the wavelength position is lower, overshoot
# the movement by 10,000 steps
# print('Step 5...')
# print('#4 Motor current: {}'.format(self.get_motor_current()))
if 1 == 2:
print('uh-oh')
# if self._HPM and diff_wavelength_offset < 0:
#
# print('Overshooting by 10000')
#
# self.set_target_position(diff_wavelength_offset - 10000, False, True)
# # Step 6: Set the real target position
#
# """
# HEY LOOK EVERYONE RIGHT ABOVE HERE THIS IS THE STUPID THING THAT'S NOT WORKING!
# """
#
# #print('Step 6a... diff wavelength')
#
# self.set_target_position(10000, False, True)
else:
# print('Step 6b... diff wavelength')
# self.set_target_position(diff_wavelength_offset, False, True)
"""WRONG"""
self.set_target_position(wavelength_to_pos, True, True)
"""this is the real shit right here
I need to set the absolute position to true
"""
# self.set_target_position(10000, False, True)
# Step 7: Get the actual motor position
new_motor_pos = self.get_motor_position()
# print('New motor position is %s' % new_motor_pos)
# print('new offset is %s' % (new_motor_pos-current_motor_pos+self._offset))
self.set_new_offset(new_motor_pos - current_motor_pos + self._offset)
# Step 8, get and print current wavelength
# print('Current wavelength is %.3f' % self.do_get_wavelength())
# print('setting wavelength done')
return
"""
Not sure what this is doing yet
"""
def is_open(self):
return self._is_open
"""
Not sure what this is doing yet
"""
def clear_fault(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf))
print('clear fault buf %s, ret %s' % (buf, ret))
if ret == 0:
errbuf = ctypes.create_string_buffer(64)
eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))
raise ValueError(errbuf.value)
"""
Not sure what this is doing yet
"""
def initialize(self):
nodeID = ctypes.wintypes.WORD(0)
buf = ctypes.wintypes.DWORD(0)
BaudRate = DWORD(38400)
Timeout = DWORD(100)
ret = eposlib.VCS_SetProtocolStackSettings(self._keyhandle, BaudRate, Timeout, ctypes.byref(buf))
# print 'set protocol buf %s ret %s' % (buf, ret)
if ret == 0:
errbuf = ctypes.create_string_buffer(64)
# eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))
raise ValueError(errbuf.value)
buf = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_ClearFault(self._keyhandle, nodeID, ctypes.byref(buf))
# print 'clear fault buf %s, ret %s' % (buf, ret)
if ret == 0:
errbuf = ctypes.create_string_buffer(64)
eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))
raise ValueError(errbuf.value)
buf = ctypes.wintypes.DWORD(0)
plsenabled = ctypes.wintypes.DWORD(0)
ret = eposlib.VCS_GetEnableState(self._keyhandle, nodeID, ctypes.byref(plsenabled), ctypes.byref(buf))
# print 'get enable state buf %s ret %s and en %s' % (buf, ret, plsenabled)
if ret == 0:
errbuf = ctypes.create_string_buffer(64)
eposlib.VCS_GetErrorInfo(buf, errbuf, WORD(64))
raise ValueError(errbuf.value)
if int(plsenabled.value) != 0:
logging.warning(__name__ + ' EPOS motor enabled, disabling before proceeding.')
ret = eposlib.VCS_SetDisableState(self._keyhandle, nodeID, ctypes.byref(buf))
if int(ret) != 0:
logging.warning(__name__ + ' EPOS motor successfully disabled, proceeding')
else:
logging.error(__name__ + ' EPOS motor was not successfully disabled!')
buf = ctypes.wintypes.DWORD(0)
Counts = WORD(512) # incremental encoder counts in pulses per turn
PositionSensorType = WORD(4)
ret = eposlib.VCS_SetEncoderParameter(self._keyhandle, nodeID, Counts, PositionSensorType, ctypes.byref(buf))
## if ret == int(0):
## print 'errr'
## errbuf = ctypes.create_string_buffer(64)
## print 'sending'
## eposlib.VCS_GetErrorInfo.restype = ctypes.wintypes.BOOL
## print 'boolerrorinfo'
## eposlib.VCS_GetErrorInfo.argtypes = [ctypes.wintypes.DWORD, ctypes.c_char_p, ctypes.wintypes.WORD]
## print 'arg'
##
## ret = eposlib.VCS_GetErrorInfo(buf, ctypes.byref(errbuf), WORD(64))
## print 'err'
## raise ValueError(errbuf.value)
# For some reason, it appears normal in the LabVIEW code that this
# function actually returns an error, i.e. the return value is zero
# and the buffer has a non-zero error code in it; the LabVIEW code
# doesn't check it.
# Also, it appears that in the 2005 version of this DLL, the function
# VCS_GetErrorInfo doesn't exist!
# Get operation mode, check if it's 1 -- this is "profile position mode"
buf = ctypes.wintypes.DWORD(0)
pMode = ctypes.pointer(ctypes.c_int8())
eposlib.VCS_GetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.c_int8), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetOperationMode.restype = ctypes.wintypes.BOOL
ret = eposlib.VCS_GetOperationMode(self._keyhandle, nodeID, pMode, ctypes.byref(buf))
# if mode is not 1, make it 1
if pMode.contents.value != 1:
eposlib.VCS_SetOperationMode.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.c_int8,
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_SetOperationMode.restype = ctypes.wintypes.BOOL
pMode_setting = ctypes.c_int8(1)
ret = eposlib.VCS_SetOperationMode(self._keyhandle, nodeID, pMode_setting, ctypes.byref(buf))
eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL
pProfileVelocity = ctypes.pointer(ctypes.wintypes.DWORD())
pProfileAcceleration = ctypes.pointer(ctypes.wintypes.DWORD())
pProfileDeceleration = ctypes.pointer(ctypes.wintypes.DWORD())
ret = eposlib.VCS_GetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration,
pProfileDeceleration, ctypes.byref(buf))
print(pProfileVelocity.contents.value, pProfileAcceleration.contents.value, pProfileDeceleration.contents.value)
if (int(pProfileVelocity.contents.value) > int(11400) or int(pProfileAcceleration.contents.value) > int(
60000) or int(pProfileDeceleration.contents.value) > int(60000)):
eposlib.VCS_GetPositionProfile.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD,
ctypes.wintypes.DWORD, ctypes.wintypes.DWORD,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetPositionProfile.restype = ctypes.wintypes.BOOL
pProfileVelocity = ctypes.wintypes.DWORD(429)
pProfileAcceleration = ctypes.wintypes.DWORD(429)
pProfileDeceleration = ctypes.wintypes.DWORD(429)
logging.warning(__name__ + ' GetPositionProfile out of bounds, resetting...')
ret = eposlib.VCS_SetPositionProfile(self._keyhandle, nodeID, pProfileVelocity, pProfileAcceleration,
pProfileDeceleration, ctypes.byref(buf))
# Now get the motor position (stored position offset)
# from the device's "homposition" object
self._offset = self.get_offset()
# Now read the stored 'calculation parameters'
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# More hardcoded values
StoredPositionObject = ctypes.wintypes.WORD(8204)
StoredPositionObjectSubindex = ctypes.c_uint8(1)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))
self._coefA = CastedObjectData[0]
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# Get coefficient B
StoredPositionObject = ctypes.wintypes.WORD(8204)
StoredPositionObjectSubindex = ctypes.c_uint8(2)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))
self._coefB = CastedObjectData[0]
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# These are hardcoded values I got from the LabVIEW program -- I don't think
# any documentation exists on particular object indices
StoredPositionObject = ctypes.wintypes.WORD(8204)
StoredPositionObjectSubindex = ctypes.c_uint8(3)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))
self._coefC = CastedObjectData[0]
# Get coefficient D
eposlib.VCS_GetObject.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD, ctypes.wintypes.WORD,
ctypes.c_uint8, ctypes.c_void_p, ctypes.wintypes.DWORD,
ctypes.POINTER(ctypes.wintypes.DWORD), ctypes.POINTER(ctypes.wintypes.DWORD)]
eposlib.VCS_GetObject.restype = ctypes.wintypes.BOOL
# These are hardcoded values I got from the LabVIEW program -- I don't think
# any documentation exists on particular object indices
StoredPositionObject = ctypes.wintypes.WORD(8204)
StoredPositionObjectSubindex = ctypes.c_uint8(4)
StoredPositionNbBytesToRead = ctypes.wintypes.DWORD(4)
ObjectData = ctypes.c_void_p()
ObjectDataArray = (ctypes.c_uint32 * 1)()
ObjectData = ctypes.cast(ObjectDataArray, ctypes.POINTER(ctypes.c_uint32))
StoredPositionNbBytesRead = ctypes.pointer(ctypes.wintypes.DWORD(0))
ret = eposlib.VCS_GetObject(self._keyhandle, nodeID, StoredPositionObject, StoredPositionObjectSubindex,
ObjectData, StoredPositionNbBytesToRead, StoredPositionNbBytesRead,
ctypes.byref(buf))
# Cast the object data to uint32
CastedObjectData = ctypes.cast(ObjectData, ctypes.POINTER(ctypes.c_uint32))
self._coefD = CastedObjectData[0]
# print 'coefficients are %s %s %s %s' % (self._coefA, self._coefB, self._coefC, self._coefD)
self._doubleA = self._u32todouble(self._coefA)
self._doubleB = self._u32todouble(self._coefB)
self._doubleC = self._u32todouble(self._coefC)
firstHalf = np.int16(self._coefD >> 16)
secondHalf = np.int16(self._coefD & 0xffff)
# Set the minimum and maximum wavelengths for the motor
self._minwl = float(firstHalf) / 10.0
self._maxwl = float(secondHalf) / 10.0
# print 'first %s second %s' % (firstHalf, secondHalf)
# This returns '10871' and '11859' for the Sacher, which are the correct
# wavelength ranges in Angstroms
# print 'Now calculate the current wavelength position:'
self._currentwl = self._doubleA * (self._offset) ** 2.0 + self._doubleB * self._offset + self._doubleC
print('Current wavelength: %.3f nm' % self._currentwl)
print('initializing done')
return True
"""
Not sure what this is doing yet
"""
"""
Also we're done with the Sacher_EPOS() class at this point
"""
if __name__ == '__main__':
epos = Sacher_EPOS(None, b'COM3')
# epos.set_coeffs(8.34529e-12,8.49218e-5,1081.92,10840,11860)
# epos.do_get_wavelength()
# print('#1 Motor current: {}'.format(epos.get_motor_current()))
# epos.do_get_wavelength()
# print('motor position is...')
# current_pos = epos.get_motor_position()
# print('current position is {}'.format(current_pos))
# new_pos = current_pos + 10000
# epos.set_target_position(new_pos, True, True)
# print(epos.get_motor_position())
# print('#2 Motor current: {}'.format(epos.get_motor_current()))
# epos.find_home()
# epos.restore()
# time.sleep(7)
epos.do_set_wavelength(1151.5)
# epos.do_get_wavelength()
print('Motor current: {}'.format(epos.get_motor_current()))
print('Motor position: {}'.format(epos.get_motor_position()))
"""
OTHER MISC. NOTES:
increasing wavelength:
causes the square to rotate left
causes base to move to the left when square is stuck in
causes screw to loosen
causes large gold base to tighten
decreasing wavelength:
there's an overshoot when lowering wavelength
causes the square to rotate right
causes base to move to the right when square is stuck in
causes screw to tighten
causes large gold base to loosen, and also unplug the motor
Also you don't need to explicitly run epos.initialize() because there's an __init__ function which contains epos.initialize()
"""
# womp the end
| 41.532826
| 147
| 0.625644
| 5,039
| 43,651
| 5.285573
| 0.133161
| 0.089885
| 0.056357
| 0.042577
| 0.618007
| 0.576143
| 0.554855
| 0.52463
| 0.50995
| 0.477397
| 0
| 0.025668
| 0.269937
| 43,651
| 1,050
| 148
| 41.572381
| 0.810092
| 0.26311
| 0
| 0.518681
| 0
| 0
| 0.033659
| 0.002565
| 0
| 0
| 0.000624
| 0.001905
| 0
| 1
| 0.048352
| false
| 0
| 0.013187
| 0.004396
| 0.103297
| 0.057143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
165616f6329f47d7fc22c8cc1eb0970f40d768d9
| 1,652
|
py
|
Python
|
tools/generate_lst.py
|
haotianliu001/HRNet-Lesion
|
9dae108879456e084b2200e39d7e58c1c08c2b16
|
[
"MIT"
] | null | null | null |
tools/generate_lst.py
|
haotianliu001/HRNet-Lesion
|
9dae108879456e084b2200e39d7e58c1c08c2b16
|
[
"MIT"
] | null | null | null |
tools/generate_lst.py
|
haotianliu001/HRNet-Lesion
|
9dae108879456e084b2200e39d7e58c1c08c2b16
|
[
"MIT"
] | null | null | null |
import argparse
import os
image_dir = 'image'
label_dir = 'label'
splits = ['train', 'val', 'test']
image_dirs = [
'image/{}',
'image/{}_crop'
]
label_dirs = [
'label/{}/annotations',
'label/{}/annotations_crop',
]
def generate(root):
assert len(image_dirs) == len(label_dirs)
for split in splits:
for image_path, label_path in zip(image_dirs, label_dirs):
image_path = image_path.format(split)
label_path = label_path.format(split)
if split != 'train' and image_path.endswith('_crop'):
label_path = label_path.replace('_crop', '')
if not os.path.exists(os.path.join(root, label_path)):
continue
lines = []
for label in os.listdir(os.path.join(root, label_path)):
image = label.replace('.png', '.jpg')
if os.path.exists(os.path.join(root, image_path, image)):
lines.append('{} {}\n'.format(os.path.join(image_path, image), os.path.join(label_path, label)))
else:
print('not found: {}'.format(os.path.join(root, image_path, image)))
print(image_path, label_path, len(lines))
output_file = '{}.lst'.format(image_path.split('/')[1])
with open(os.path.join(root, output_file), 'w') as f:
f.writelines(lines)
print(f'Save to {os.path.join(root, output_file)}\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('root', type=str, help='path of dataset root')
args = parser.parse_args()
generate(args.root)
| 30.036364
| 116
| 0.579903
| 209
| 1,652
| 4.37799
| 0.315789
| 0.065574
| 0.087432
| 0.091803
| 0.190164
| 0.190164
| 0.102732
| 0
| 0
| 0
| 0
| 0.000827
| 0.26816
| 1,652
| 54
| 117
| 30.592593
| 0.755997
| 0
| 0
| 0
| 0
| 0
| 0.12954
| 0.015133
| 0
| 0
| 0
| 0
| 0.025
| 1
| 0.025
| false
| 0
| 0.05
| 0
| 0.075
| 0.075
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1658161ce6f6978b51d0a1fdd4a0ce93c2160124
| 897
|
py
|
Python
|
examples/example.py
|
f-dangel/unfoldNd
|
63e9abc4867d8678c2ac00da567dc106e9f6f2c7
|
[
"MIT"
] | 21
|
2021-03-04T04:56:20.000Z
|
2022-03-31T11:15:28.000Z
|
examples/example.py
|
f-dangel/unfoldNd
|
63e9abc4867d8678c2ac00da567dc106e9f6f2c7
|
[
"MIT"
] | 12
|
2021-02-16T16:16:23.000Z
|
2021-05-28T06:00:41.000Z
|
examples/example.py
|
f-dangel/unfoldNd
|
63e9abc4867d8678c2ac00da567dc106e9f6f2c7
|
[
"MIT"
] | 1
|
2021-11-04T12:52:19.000Z
|
2021-11-04T12:52:19.000Z
|
"""How to use ``unfoldNd``. A comparison with ``torch.nn.Unfold``."""
# imports, make this example deterministic
import torch
import unfoldNd
torch.manual_seed(0)
# random batched RGB 32x32 image-shaped input tensor of batch size 64
inputs = torch.randn((64, 3, 32, 32))
# module hyperparameters
kernel_size = 3
dilation = 1
padding = 1
stride = 2
# both modules accept the same arguments and perform the same operation
torch_module = torch.nn.Unfold(
kernel_size, dilation=dilation, padding=padding, stride=stride
)
lib_module = unfoldNd.UnfoldNd(
kernel_size, dilation=dilation, padding=padding, stride=stride
)
# forward pass
torch_outputs = torch_module(inputs)
lib_outputs = lib_module(inputs)
# check
if torch.allclose(torch_outputs, lib_outputs):
print("✔ Outputs of torch.nn.Unfold and unfoldNd.UnfoldNd match.")
else:
raise AssertionError("❌ Outputs don't match")
| 24.916667
| 71
| 0.753623
| 128
| 897
| 5.203125
| 0.53125
| 0.031532
| 0.058559
| 0.078078
| 0.156156
| 0.156156
| 0.156156
| 0.156156
| 0
| 0
| 0
| 0.023499
| 0.146042
| 897
| 35
| 72
| 25.628571
| 0.843342
| 0.317726
| 0
| 0.1
| 0
| 0
| 0.129784
| 0
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1658fa9a24f0d70843df0f950d0081f1ffadc11b
| 797
|
py
|
Python
|
src/pretix/helpers/escapejson.py
|
NicsTr/pretix
|
e6d2380d9ed1836cc64a688b2be20d00a8500eab
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-04-25T00:11:00.000Z
|
2020-04-25T00:11:00.000Z
|
src/pretix/helpers/escapejson.py
|
NicsTr/pretix
|
e6d2380d9ed1836cc64a688b2be20d00a8500eab
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/pretix/helpers/escapejson.py
|
NicsTr/pretix
|
e6d2380d9ed1836cc64a688b2be20d00a8500eab
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from django.utils.encoding import force_str
from django.utils.functional import keep_lazy
from django.utils.safestring import SafeText, mark_safe
_json_escapes = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
}
_json_escapes_attr = {
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('"'): '"',
ord("'"): ''',
ord("="): '=',
}
@keep_lazy(str, SafeText)
def escapejson(value):
"""Hex encodes characters for use in a application/json type script."""
return mark_safe(force_str(value).translate(_json_escapes))
@keep_lazy(str, SafeText)
def escapejson_attr(value):
"""Hex encodes characters for use in a html attributw script."""
return mark_safe(force_str(value).translate(_json_escapes_attr))
| 25.709677
| 75
| 0.6399
| 100
| 797
| 4.9
| 0.41
| 0.089796
| 0.091837
| 0.065306
| 0.583673
| 0.583673
| 0.355102
| 0.355102
| 0.216327
| 0.216327
| 0
| 0.038922
| 0.161857
| 797
| 30
| 76
| 26.566667
| 0.694611
| 0.155583
| 0
| 0.363636
| 0
| 0
| 0.099698
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.136364
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1659ed45e2efb246708ee177c0a31eb71473cb9b
| 1,813
|
py
|
Python
|
pyxley/charts/plotly/base.py
|
snowind/pyxley
|
cff9e50b8d80b9794c6907355e541f166959cd6c
|
[
"MIT"
] | 2,536
|
2015-06-26T20:12:30.000Z
|
2022-03-01T07:26:44.000Z
|
pyxley/charts/plotly/base.py
|
zhiaozhou/pyxley
|
2dab00022d977d986169cd8a629b3a2f91be893f
|
[
"MIT"
] | 51
|
2015-07-17T14:16:43.000Z
|
2021-07-09T21:34:36.000Z
|
pyxley/charts/plotly/base.py
|
zhiaozhou/pyxley
|
2dab00022d977d986169cd8a629b3a2f91be893f
|
[
"MIT"
] | 335
|
2015-07-16T20:22:00.000Z
|
2022-02-25T07:18:15.000Z
|
from ..charts import Chart
from flask import jsonify, request
_BASE_CONFIG = {
"showLink": False,
"displaylogo": False,
"modeBarButtonsToRemove": ["sendDataToCloud"]
}
class PlotlyAPI(Chart):
""" Base class for Plotly.js API
This class is used to create charts using the plotly.js api
To keep this general, this chart does not have a default
method of transmitting data. Instead the user must supply
a route_func method.
"""
def __init__(self, chart_id, url, route_func, init_params={}):
options = {
"chartid": chart_id,
"url": url,
"params": init_params
}
super(PlotlyAPI, self).__init__("PlotlyAPI", options, route_func)
@staticmethod
def line_plot(df, xypairs, mode, layout={}, config=_BASE_CONFIG):
""" basic line plot
dataframe to json for a line plot
Args:
df (pandas.DataFrame): input dataframe
xypairs (list): list of tuples containing column names
mode (str): plotly.js mode (e.g. lines)
layout (dict): layout parameters
config (dict): config parameters
"""
if df.empty:
return {
"x": [],
"y": [],
"mode": mode
}
_data = []
for x, y in xypairs:
if (x in df.columns) and (y in df.columns):
_data.append(
{
"x": df[x].values.tolist(),
"y": df[y].values.tolist(),
"mode": mode
}
)
return {
"data": _data,
"layout": layout,
"config": config
}
| 27.059701
| 73
| 0.492554
| 185
| 1,813
| 4.702703
| 0.475676
| 0.027586
| 0.025287
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.407612
| 1,813
| 66
| 74
| 27.469697
| 0.810056
| 0.277992
| 0
| 0.105263
| 0
| 0
| 0.093643
| 0.0189
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.052632
| 0
| 0.184211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
165b5afa3e28ca226423cdaac8f6894170030430
| 576
|
py
|
Python
|
pyqt/getting_started/close_window.py
|
CospanDesign/python
|
9f911509aae7abd9237c14a4635294c7719c9129
|
[
"MIT"
] | 5
|
2015-12-12T20:16:45.000Z
|
2020-02-21T19:50:31.000Z
|
pyqt/getting_started/close_window.py
|
CospanDesign/python
|
9f911509aae7abd9237c14a4635294c7719c9129
|
[
"MIT"
] | null | null | null |
pyqt/getting_started/close_window.py
|
CospanDesign/python
|
9f911509aae7abd9237c14a4635294c7719c9129
|
[
"MIT"
] | 2
|
2020-06-01T06:27:06.000Z
|
2022-03-10T13:21:03.000Z
|
#!/usr/bin/python
import sys
from PyQt4 import QtGui
from PyQt4 import QtCore
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
self.initUI()
def initUI(self):
qbtn = QtGui.QPushButton('Quit', self)
qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit)
qbtn.resize(qbtn.sizeHint())
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('Quit Button')
self.show()
def main():
app = QtGui.QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| 19.2
| 65
| 0.682292
| 74
| 576
| 5.081081
| 0.567568
| 0.047872
| 0.079787
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02935
| 0.171875
| 576
| 29
| 66
| 19.862069
| 0.75891
| 0.027778
| 0
| 0
| 0
| 0
| 0.041219
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.15
| 0
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
165bdb25d95d9e2ecf502312358485ebe1274976
| 1,948
|
py
|
Python
|
generator/contact.py
|
rizzak/python_training
|
38bbe5d7e38892e8dcc28caeae1481b98cce7356
|
[
"Apache-2.0"
] | null | null | null |
generator/contact.py
|
rizzak/python_training
|
38bbe5d7e38892e8dcc28caeae1481b98cce7356
|
[
"Apache-2.0"
] | null | null | null |
generator/contact.py
|
rizzak/python_training
|
38bbe5d7e38892e8dcc28caeae1481b98cce7356
|
[
"Apache-2.0"
] | null | null | null |
import jsonpickle
import random
import string
from model.contact import Contact
import os.path
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of contacts", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Contact(first_name="", middle_name="", last_name="", nickname="", title="", company="", address="",
home_tel="", mobile_tel="", work_tel="", fax="", email="", homepage="", birthday="",
anniversary="", secondary_address="", secondary_tel="", notes="")] + [
Contact(first_name=random_string('first_name', 10), middle_name=random_string('middle_name', 10), last_name=random_string('last_name', 10),
nickname=random_string('nickname', 10), title=random_string('random_string', 10), company=random_string('company', 10),
address=random_string('address', 10), home_tel=random_string('home_tel', 10), mobile_tel=random_string('mobile_tel', 10),
work_tel=random_string('work_tel', 10), fax=random_string('fax', 10), email=random_string('email', 10),
homepage=random_string('homepage', 10), birthday=random_string('birthday', 10), anniversary=random_string('anniversary', 10),
secondary_address=random_string('secondary_address', 10), secondary_tel=random_string('secondary_tel', 10), notes=random_string('notes', 10))
for i in range(5)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file , "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
| 40.583333
| 153
| 0.664271
| 260
| 1,948
| 4.780769
| 0.326923
| 0.193081
| 0.04827
| 0.017699
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026462
| 0.165811
| 1,948
| 48
| 154
| 40.583333
| 0.738462
| 0
| 0
| 0
| 0
| 0
| 0.111339
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.189189
| 0
| 0.243243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|