hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8d0131fd0dd77260df8028fcae8b87ef1c39cda9
| 1,175
|
py
|
Python
|
tests/integration_tests/test_suassystem_interop_client.py
|
liyu711/SUAS
|
2f6592fc2ab316475eeabe2f4828e5ba5c1a4b0b
|
[
"MIT"
] | null | null | null |
tests/integration_tests/test_suassystem_interop_client.py
|
liyu711/SUAS
|
2f6592fc2ab316475eeabe2f4828e5ba5c1a4b0b
|
[
"MIT"
] | null | null | null |
tests/integration_tests/test_suassystem_interop_client.py
|
liyu711/SUAS
|
2f6592fc2ab316475eeabe2f4828e5ba5c1a4b0b
|
[
"MIT"
] | null | null | null |
import unittest
import interop
from SUASSystem import InteropClientConverter
from SUASSystem import Location
class SDATestCase(unittest.TestCase):
def setUp(self):
self.interop_client = InteropClientConverter()
def test_submit_target(self):
compiled_target_info = {
"latitude" : 38,
"longitude" : -77,
"orientation" : "s",
"shape" : "circle",
"background_color" : "white",
"alphanumeric" : "ABC",
"alphanumeric_color" : "black",
}
target_image_relative_path = "tests/images/target.PNG"
target_id = self.interop_client.post_standard_target(compiled_target_info, target_image_relative_path)
def test_submit_position(self):
"""
Test POST of position data
"""
self.interop_client.post_telemetry(Location(38, 76, 100), 350.0)
def test_get_obstacles(self):
"""
Test GET of obstacles
"""
self.interop_client.get_obstacles()
def test_get_active_mission(self):
"""
Test GET of active mission
"""
self.interop_client.get_active_mission()
| 27.97619
| 110
| 0.621277
| 122
| 1,175
| 5.713115
| 0.442623
| 0.07891
| 0.121951
| 0.065997
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017794
| 0.282553
| 1,175
| 41
| 111
| 28.658537
| 0.809015
| 0.06383
| 0
| 0
| 0
| 0
| 0.118447
| 0.02233
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.16
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d061cbc92ba43ecf28dbdbaa3d14137c3609085
| 3,244
|
py
|
Python
|
xlremed/dataset/docred.py
|
osainz59/XLREMed
|
d7fb0c322126676fe95f71ced0014076cb7a9e4d
|
[
"Apache-2.0"
] | 4
|
2020-10-01T16:58:24.000Z
|
2021-11-30T09:29:03.000Z
|
xlremed/dataset/docred.py
|
osainz59/XLREMed
|
d7fb0c322126676fe95f71ced0014076cb7a9e4d
|
[
"Apache-2.0"
] | null | null | null |
xlremed/dataset/docred.py
|
osainz59/XLREMed
|
d7fb0c322126676fe95f71ced0014076cb7a9e4d
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
#import fire
from collections import defaultdict
from pprint import pprint
from itertools import product
from .dataset import Dataset
class DocRED(Dataset):
def __init__(self, path):
super(DocRED, self).__init__(name='DocRED')
self.path = path
self._init()
self.train_data = None
self.val_data = None
self.test_data = None
def _init(self):
self.rel_info = json.load(open(os.path.join(self.path, 'rel_info.json')))
self.rel2id = {v: i for i, v in enumerate(self.rel_info.keys())}
self.train_path = os.path.join(self.path, 'train_annotated.json')
self.train_dist_path = os.path.join(self.path, 'train_distant.json')
self.dev_path = os.path.join(self.path, 'dev.json')
self.test_path = os.path.join(self.path, 'test.json')
def _read_instances(self, path, labels=False):
with open(path, 'rt') as in_file:
data = json.load(in_file)
output = []
for i, instance in enumerate(data):
text = ""
sentences_lenghts = []
l = 0
for sent in instance['sents']:
sentences_lenghts.append(l)
l += len(sent)
text += " " + " ".join(sent)
entities = []
ent2id = defaultdict(list)
for i, ent in enumerate(instance['vertexSet']):
idx = f"#{i}"
for elem in ent:
entities.append( (idx, elem['name'], sentences_lenghts[elem['sent_id']] + elem['pos'][0],
sentences_lenghts[elem['sent_id']] + elem['pos'][1], elem['type']) )
ent2id[f"{elem['sent_id']}#{i}"].append(len(entities) - 1)
if labels:
relation_facts = []
for label in instance['labels']:
heads, tails = [], []
for evidence in label['evidence']:
for h in ent2id.get(f"{evidence}#{label['h']}", []):
heads.append(h)
for t in ent2id.get(f"{evidence}#{label['t']}", []):
tails.append(t)
for head, tail in product(heads, tails):
relation_facts.append( (self.rel2id[label['r']], head, tail) )
text = self.tokenizer.encode(text)
output.append( (text, entities) if not labels else (text, entities, relation_facts) )
return output
def get_train(self):
if not self.train_data:
self.train_data = self._read_instances(self.train_path, labels=True)
return self.train_data
def get_val(self):
if not self.val_data:
self.val_data = self._read_instances(self.dev_path, labels=True)
return self.val_data
def get_test(self):
if not self.test_data:
self.test_data = self._read_instances(self.test_path, labels=False)
return self.test_data
def test():
dataset = DocRED('data/DocRED')
for instance in dataset.get_train():
pprint(instance)
break
if __name__ == "__main__":
#fire.Fire(test)
test()
| 33.791667
| 109
| 0.544698
| 391
| 3,244
| 4.347826
| 0.235294
| 0.037647
| 0.029412
| 0.041176
| 0.208824
| 0.125882
| 0.070588
| 0
| 0
| 0
| 0
| 0.004606
| 0.330764
| 3,244
| 96
| 110
| 33.791667
| 0.778443
| 0.008015
| 0
| 0
| 0
| 0
| 0.069941
| 0.020827
| 0.013514
| 0
| 0
| 0
| 0
| 1
| 0.094595
| false
| 0
| 0.081081
| 0
| 0.243243
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d06a575c43d8f20bdee54e0f28161929a56cf8b
| 7,955
|
py
|
Python
|
swyft/networks/classifier.py
|
undark-lab/swyft
|
50aa524e2f3a2b3d1354543178ff72bc7f055a35
|
[
"MIT"
] | 104
|
2020-11-26T09:46:03.000Z
|
2022-03-18T06:22:03.000Z
|
swyft/networks/classifier.py
|
cweniger/swyft
|
2c0ed514622a37e8ec4e406b99a8327ecafb7ab4
|
[
"MIT"
] | 83
|
2021-03-02T15:54:26.000Z
|
2022-03-10T08:09:05.000Z
|
swyft/networks/classifier.py
|
undark-lab/swyft
|
50aa524e2f3a2b3d1354543178ff72bc7f055a35
|
[
"MIT"
] | 10
|
2021-02-04T14:27:36.000Z
|
2022-03-31T17:39:34.000Z
|
from abc import ABC, abstractmethod
from typing import Dict, Hashable, Tuple
import torch
import torch.nn as nn
import swyft
import swyft.utils
from swyft.networks.channelized import ResidualNetWithChannel
from swyft.networks.standardization import (
OnlineDictStandardizingLayer,
OnlineStandardizingLayer,
)
from swyft.types import Array, MarginalIndex, ObsShapeType
class HeadTailClassifier(ABC):
"""Abstract class which ensures that child classifier networks will function with swyft"""
@abstractmethod
def head(self, observation: Dict[Hashable, torch.Tensor]) -> torch.Tensor:
"""convert the observation into a tensor of features
Args:
observation: observation type
Returns:
a tensor of features which can be utilized by tail
"""
pass
@abstractmethod
def tail(self, features: torch.Tensor, parameters: torch.Tensor) -> torch.Tensor:
"""finish the forward pass using features computed by head
Args:
features: output of head
parameters: the parameters normally given to forward pass
Returns:
the same output as `forward(observation, parameters)`
"""
pass
class ObservationTransform(nn.Module):
def __init__(
self,
observation_key: Hashable,
observation_shapes: ObsShapeType,
online_z_score: bool,
) -> None:
super().__init__()
self.observation_key = observation_key
self.observation_shapes = observation_shapes
self.flatten = nn.Flatten()
if online_z_score:
self.online_z_score = OnlineDictStandardizingLayer(self.observation_shapes)
else:
self.online_z_score = nn.Identity()
def forward(self, observation: Dict[Hashable, torch.Tensor]) -> torch.Tensor:
z_scored_observation = self.online_z_score(observation)
return self.flatten(z_scored_observation[self.observation_key]) # B, O
@property
def n_features(self) -> int:
with torch.no_grad():
fabricated_observation = {
key: torch.rand(2, *shape)
for key, shape in self.observation_shapes.items()
}
_, n_features = self.forward(fabricated_observation).shape
return n_features
class ParameterTransform(nn.Module):
def __init__(
self, n_parameters: int, marginal_indices: MarginalIndex, online_z_score: bool
) -> None:
super().__init__()
self.register_buffer(
"marginal_indices",
torch.tensor(swyft.utils.tupleize_marginal_indices(marginal_indices)),
)
self.n_parameters = torch.Size([n_parameters])
if online_z_score:
self.online_z_score = OnlineStandardizingLayer(self.n_parameters)
else:
self.online_z_score = nn.Identity()
def forward(self, parameters: torch.Tensor) -> torch.Tensor:
parameters = self.online_z_score(parameters)
return self.get_marginal_block(parameters, self.marginal_indices) # B, M, P
@property
def marginal_block_shape(self) -> Tuple[int, int]:
return self.get_marginal_block_shape(self.marginal_indices)
@staticmethod
def is_marginal_block_possible(marginal_indices: MarginalIndex) -> bool:
marginal_indices = swyft.utils.tupleize_marginal_indices(marginal_indices)
return [len(marginal_indices[0]) == len(mi) for mi in marginal_indices]
@classmethod
def get_marginal_block_shape(
cls, marginal_indices: MarginalIndex
) -> Tuple[int, int]:
marginal_indices = swyft.utils.tupleize_marginal_indices(marginal_indices)
assert cls.is_marginal_block_possible(
marginal_indices
), f"Each tuple in {marginal_indices} must have the same length."
return len(marginal_indices), len(marginal_indices[0])
@classmethod
def get_marginal_block(
cls, parameters: Array, marginal_indices: MarginalIndex
) -> torch.Tensor:
depth = swyft.utils.depth(marginal_indices)
tuple_marginal_indices = swyft.utils.tupleize_marginal_indices(marginal_indices)
assert cls.is_marginal_block_possible(
tuple_marginal_indices
), f"Each tuple in {tuple_marginal_indices} must have the same length."
if depth in [0, 1, 2]:
return torch.stack(
[parameters[..., mi] for mi in tuple_marginal_indices], dim=1
)
else:
raise ValueError(
f"{marginal_indices} must be of the form (a) 2, (b) [2, 3], (c) [2, [1, 3]], or (d) [[0, 1], [1, 2]]."
)
class MarginalClassifier(nn.Module):
def __init__(
self,
n_marginals: int,
n_combined_features: int,
hidden_features: int,
num_blocks: int,
dropout_probability: float = 0.0,
use_batch_norm: bool = True,
) -> None:
super().__init__()
self.n_marginals = n_marginals
self.n_combined_features = n_combined_features
self.net = ResidualNetWithChannel(
channels=self.n_marginals,
in_features=self.n_combined_features,
out_features=1,
hidden_features=hidden_features,
num_blocks=num_blocks,
dropout_probability=dropout_probability,
use_batch_norm=use_batch_norm,
)
def forward(
self, features: torch.Tensor, marginal_block: torch.Tensor
) -> torch.Tensor:
fb = features.unsqueeze(1).expand(-1, self.n_marginals, -1) # B, M, O
combined = torch.cat([fb, marginal_block], dim=2) # B, M, O + P
return self.net(combined).squeeze(-1) # B, M
class Network(nn.Module, HeadTailClassifier):
def __init__(
self,
observation_transform: nn.Module,
parameter_transform: nn.Module,
marginal_classifier: nn.Module,
) -> None:
super().__init__()
self.observation_transform = observation_transform
self.parameter_transform = parameter_transform
self.marginal_classifier = marginal_classifier
def forward(
self, observation: Dict[Hashable, torch.Tensor], parameters: torch.Tensor
) -> torch.Tensor:
features = self.observation_transform(observation) # B, O
marginal_block = self.parameter_transform(parameters) # B, M, P
return self.marginal_classifier(features, marginal_block) # B, M
def head(self, observation: Dict[Hashable, torch.Tensor]) -> torch.Tensor:
return self.observation_transform(observation) # B, O
def tail(self, features: torch.Tensor, parameters: torch.Tensor) -> torch.Tensor:
marginal_block = self.parameter_transform(parameters) # B, M, P
return self.marginal_classifier(features, marginal_block) # B, M
def get_marginal_classifier(
observation_key: Hashable,
marginal_indices: MarginalIndex,
observation_shapes: ObsShapeType,
n_parameters: int,
hidden_features: int,
num_blocks: int,
observation_online_z_score: bool = True,
parameter_online_z_score: bool = True,
) -> nn.Module:
observation_transform = ObservationTransform(
observation_key, observation_shapes, online_z_score=observation_online_z_score
)
n_observation_features = observation_transform.n_features
parameter_transform = ParameterTransform(
n_parameters, marginal_indices, online_z_score=parameter_online_z_score
)
n_marginals, n_block_parameters = parameter_transform.marginal_block_shape
marginal_classifier = MarginalClassifier(
n_marginals,
n_observation_features + n_block_parameters,
hidden_features=hidden_features,
num_blocks=num_blocks,
)
return Network(
observation_transform,
parameter_transform,
marginal_classifier,
)
if __name__ == "__main__":
pass
| 34.737991
| 118
| 0.669893
| 890
| 7,955
| 5.710112
| 0.176404
| 0.091499
| 0.03778
| 0.034632
| 0.344943
| 0.297324
| 0.256789
| 0.214286
| 0.153089
| 0.140693
| 0
| 0.004169
| 0.246135
| 7,955
| 228
| 119
| 34.890351
| 0.843255
| 0.067505
| 0
| 0.338983
| 0
| 0.00565
| 0.033826
| 0.003287
| 0
| 0
| 0
| 0
| 0.011299
| 1
| 0.101695
| false
| 0.016949
| 0.050847
| 0.011299
| 0.248588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d09e1e56a5c8747b2ab32acaa6c0733d2870eb2
| 12,386
|
py
|
Python
|
mstrio/distribution_services/contact/contact.py
|
czyzq/mstrio-py
|
b25fd19936b659d503a7eaaa96c8d0b4e118cb7c
|
[
"Apache-2.0"
] | 1
|
2022-02-15T13:18:04.000Z
|
2022-02-15T13:18:04.000Z
|
mstrio/distribution_services/contact/contact.py
|
czyzq/mstrio-py
|
b25fd19936b659d503a7eaaa96c8d0b4e118cb7c
|
[
"Apache-2.0"
] | null | null | null |
mstrio/distribution_services/contact/contact.py
|
czyzq/mstrio-py
|
b25fd19936b659d503a7eaaa96c8d0b4e118cb7c
|
[
"Apache-2.0"
] | null | null | null |
from collections import defaultdict
from enum import auto
from typing import Iterable, List, Optional, TYPE_CHECKING, Union
from mstrio import config
from mstrio.api import contacts
from mstrio.distribution_services.contact_group import ContactGroup
from mstrio.distribution_services.device import Device
from mstrio.utils.entity import auto_match_args_entity, DeleteMixin, EntityBase
from mstrio.utils.enum_helper import AutoName
from mstrio.utils.helper import (
camel_to_snake, delete_none_values, Dictable, fetch_objects, get_objects_id
)
from mstrio.users_and_groups.user import User
if TYPE_CHECKING:
from mstrio.connection import Connection
class ContactDeliveryType(AutoName):
EMAIL = auto()
FILE = auto()
PRINTER = auto()
FTP = auto()
MOBILE_ANDROID = auto()
MOBILE_IPHONE = auto()
MOBILE_IPAD = auto()
UNSUPPORTED = auto()
class ContactAddress(Dictable):
"""Representation of contact address object
Attributes:
id: id of contact address, optional
name: contact address' name
physical_address: physical address of contact
delivery_type: object of type ContactDeliveryType
is_default: specifies if address is default, optional,
default value: False
device: instance of Device or string (containing device's id),
if device is a string, connection is required
connection: instance of Connection, optional,
is required if device is string
"""
_FROM_DICT_MAP = {'delivery_type': ContactDeliveryType, 'device': Device.from_dict}
def __init__(self, name: str, physical_address: str,
delivery_type: Union[ContactDeliveryType, str], device: Union['Device', str],
id: Optional[str] = None, is_default: bool = False,
connection: Optional['Connection'] = None):
self.id = id
self.name = name
self.physical_address = physical_address
self.is_default = is_default
self.delivery_type = delivery_type if isinstance(
delivery_type, ContactDeliveryType) else ContactDeliveryType(delivery_type)
if isinstance(device, Device):
self.device = device
else:
if not connection:
raise ValueError('Argument: connection is required if device is a string')
self.device = Device(connection, id=device)
def __repr__(self) -> str:
param_dict = auto_match_args_entity(self.__init__, self, exclude=['self'],
include_defaults=False)
params = [
f"{param}={self.delivery_type}"
if param == 'delivery_type' else f'{param}={repr(value)}'
for param, value in param_dict.items()
]
formatted_params = ', '.join(params)
return f'ContactAddress({formatted_params})'
def to_dict(self, camel_case=True) -> dict:
result = {
'name': self.name,
'id': self.id,
'physicalAddress': self.physical_address,
'deliveryType': self.delivery_type.value,
'deviceId': self.device.id,
'deviceName': self.device.name,
'isDefault': self.is_default
}
return result if camel_case else camel_to_snake(result)
@classmethod
def from_dict(cls, source, connection, to_snake_case=True) -> 'ContactAddress':
source = source.copy()
device_id = source.pop('deviceId')
device_name = source.pop('deviceName')
source['device'] = {'id': device_id, 'name': device_name}
return super().from_dict(source, connection, to_snake_case)
def list_contacts(connection: 'Connection', to_dictionary: bool = False,
limit: Optional[int] = None, **filters) -> Union[List['Contact'], List[dict]]:
"""Get all contacts as list of Contact objects or dictionaries.
Optionally filter the contacts by specifying filters.
Args:
connection: MicroStrategy connection object
to_dictionary: If True returns a list of contact dicts,
otherwise returns a list of contact objects
limit: limit the number of elements returned. If `None` (default), all
objects are returned.
**filters: Available filter parameters:
['id', 'name', 'description', 'enabled']
"""
return Contact._list_contacts(
connection=connection,
to_dictionary=to_dictionary,
limit=limit,
**filters
)
class Contact(EntityBase, DeleteMixin):
"""Object representation of Microstrategy Contact object
Attributes:
name: contact's name
id: contact's id
description: contact's description
enabled: specifies if a contact is enabled
linked_user: user linked to contact, instance of User
contact_addresses: list of contact's addresses,
instances of ContactAddress
memberships: list of Contact Groups that the contact belongs to
connection: instance of Connection class, represents connection
to MicroStrategy Intelligence Server
"""
_FROM_DICT_MAP = {
**EntityBase._FROM_DICT_MAP,
'linked_user': User.from_dict,
'contact_addresses': [ContactAddress.from_dict],
'memberships': [ContactGroup.from_dict],
}
_API_GETTERS = {
('id', 'name', 'description', 'enabled', 'linked_user', 'memberships',
'contact_addresses'): contacts.get_contact
}
_API_DELETE = staticmethod(contacts.delete_contact)
_API_PATCH = {
('name', 'description', 'enabled', 'linked_user', 'contact_addresses', 'memberships'):
(contacts.update_contact, 'put')
}
_PATCH_PATH_TYPES = {
'name': str,
'description': str,
'enabled': bool,
'linked_user': dict,
'contact_addresses': list,
'memberships': list
}
def __init__(self, connection: 'Connection', id: Optional[str] = None,
name: Optional[str] = None):
"""Initialize Contact object by passing id or name.
When `id` is provided, name is omitted.
Args:
connection: MicroStrategy connection object
id: ID of Contact
name: name of Contact
"""
if id is None and name is None:
raise ValueError("Please specify either 'id' or 'name' parameter in the constructor.")
if id is None:
result = self._list_contacts(
connection=connection,
name=name,
to_dictionary=True
)
if result:
object_data = result[0]
object_data['connection'] = connection
self._init_variables(**object_data)
else:
raise ValueError(f"There is no Contact named: '{name}'")
else:
super().__init__(connection, id)
def _init_variables(self, **kwargs) -> None:
super()._init_variables(**kwargs)
self.description = kwargs.get('description')
self.enabled = kwargs.get('enabled')
linked_user = kwargs.get("linked_user")
self.linked_user = User.from_dict(linked_user, self.connection) if linked_user else None
addresses = kwargs.get('contact_addresses')
self.contact_addresses = [
ContactAddress.from_dict(address, self.connection)
for address in addresses
] if addresses else None
memberships = kwargs.get('memberships')
self.memberships = [
ContactGroup.from_dict(m, self.connection)
for m in memberships
] if memberships else None
@classmethod
def create(cls, connection: 'Connection', name: str, linked_user: Union['User', str],
contact_addresses: Iterable[Union['ContactAddress', dict]],
description: Optional[str] = None, enabled: bool = True) -> 'Contact':
"""Create a new contact.
Args:
connection: MicroStrategy connection object
returned by `connection.Connection()`
name: contact name
linked_user: user linked to contact
contact_addresses: list of contact addresses
description: description of contact
enabled: specifies if contact should be enabled
Returns:
Contact object
"""
body = {
'name': name,
'description': description,
'enabled': enabled,
'linkedUser': {
'id': get_objects_id(linked_user, User)
},
'contactAddresses': [
address.to_dict() if isinstance(address, ContactAddress) else address
for address in contact_addresses
],
}
body = delete_none_values(body)
response = contacts.create_contact(connection, body).json()
if config.verbose:
print(
f"Successfully created contact named: '{name}' with ID: '{response['id']}'"
)
return cls.from_dict(source=response, connection=connection)
def alter(self, name: Optional[str] = None, description: Optional[str] = None,
enabled: Optional[bool] = None, linked_user: Optional[Union['User', str]] = None,
contact_addresses: Optional[Iterable[Union['ContactAddress', dict]]] = None):
"""Update properties of a contact
Args:
name: name of a contact
description: description of a contact
enabled: specifies if a contact is enabled
linked_user: an object of class User linked to the contact
contact_addresses: list of contact addresses
"""
linked_user = {'id': get_objects_id(linked_user, User)} if linked_user else None
func = self.alter
args = func.__code__.co_varnames[:func.__code__.co_argcount]
defaults = func.__defaults__ # type: ignore
defaults_dict = dict(zip(args[-len(defaults):], defaults)) if defaults else {}
local = locals()
properties = defaultdict(dict)
for property_key in defaults_dict:
if local[property_key] is not None:
properties[property_key] = local[property_key]
self._alter_properties(**properties)
@classmethod
def _list_contacts(cls, connection: 'Connection', to_dictionary: bool = False,
limit: Optional[int] = None, **filters
) -> Union[List['Contact'], List[dict]]:
"""Get all contacts as list of Contact objects or dictionaries.
Optionally filter the contacts by specifying filters.
Args:
connection: MicroStrategy connection object
to_dictionary: If True returns a list of contact dicts,
otherwise returns a list of contact objects
limit: limit the number of elements returned. If `None` (default),
all objects are returned.
**filters: Available filter parameters:
['id', 'name', 'description', 'enabled']
"""
objects = fetch_objects(
connection=connection,
api=contacts.get_contacts,
limit=limit,
filters=filters,
dict_unpack_value='contacts'
)
if to_dictionary:
return objects
return [
cls.from_dict(source=obj, connection=connection)
for obj in objects
]
def add_to_contact_group(self, contact_group: Union['ContactGroup', str]):
"""Add to ContactGroup
Args:
contact_group: contact group to which add this contact
"""
if isinstance(contact_group, str):
contact_group = ContactGroup(self.connection, id=contact_group)
contact_group.add_members([self])
self.fetch()
def remove_from_contact_group(self, contact_group: Union['ContactGroup', str]):
"""Remove from ContactGroup
Args:
contact_group: contact group from which to remove this contact
"""
if isinstance(contact_group, str):
contact_group = ContactGroup(self.connection, id=contact_group)
contact_group.remove_members([self])
self.fetch()
| 35.797688
| 98
| 0.619086
| 1,321
| 12,386
| 5.636639
| 0.161998
| 0.024174
| 0.017459
| 0.010744
| 0.290089
| 0.213672
| 0.189901
| 0.170293
| 0.1574
| 0.145313
| 0
| 0.000114
| 0.292588
| 12,386
| 345
| 99
| 35.901449
| 0.849692
| 0.244389
| 0
| 0.085
| 0
| 0
| 0.106578
| 0.009381
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0
| 0.06
| 0
| 0.24
| 0.005
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d0a2cafd43bfa146f864191d3b8493254f59ce2
| 330
|
py
|
Python
|
sources/t04/t04ej15.py
|
workready/pythonbasic
|
59bd82caf99244f5e711124e1f6f4dec8de22141
|
[
"MIT"
] | null | null | null |
sources/t04/t04ej15.py
|
workready/pythonbasic
|
59bd82caf99244f5e711124e1f6f4dec8de22141
|
[
"MIT"
] | null | null | null |
sources/t04/t04ej15.py
|
workready/pythonbasic
|
59bd82caf99244f5e711124e1f6f4dec8de22141
|
[
"MIT"
] | null | null | null |
# Iterador a partir de una función generadora
def fib():
prev, curr = 0, 1
while True:
yield curr
prev, curr = curr, prev + curr
f = fib()
# Recorremos nuestro iterador, llamando a next(). Dentro del for se llama automáticamente a iter(f)
print(0, end=' ')
for n in range(16):
print(next(f), end=' ')
| 25.384615
| 99
| 0.630303
| 51
| 330
| 4.078431
| 0.666667
| 0.115385
| 0.115385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020243
| 0.251515
| 330
| 13
| 100
| 25.384615
| 0.821862
| 0.427273
| 0
| 0
| 0
| 0
| 0.010695
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.111111
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d0bafb0014137256cbe170eed5e636723c3e6ff
| 1,166
|
py
|
Python
|
setup.py
|
snower/sevent
|
a11e4488e500a4008b07f4391119ad3288a1f07f
|
[
"MIT"
] | 14
|
2018-02-16T10:33:04.000Z
|
2021-09-28T02:04:50.000Z
|
setup.py
|
snower/sevent
|
a11e4488e500a4008b07f4391119ad3288a1f07f
|
[
"MIT"
] | null | null | null |
setup.py
|
snower/sevent
|
a11e4488e500a4008b07f4391119ad3288a1f07f
|
[
"MIT"
] | 1
|
2021-09-26T06:09:38.000Z
|
2021-09-26T06:09:38.000Z
|
#!/usr/bin/env python
import os
import sys
import platform
from setuptools import setup, Extension
if platform.system() != 'Windows' and platform.python_implementation() == "CPython":
ext_modules = [Extension('sevent/cbuffer', sources=['sevent/cbuffer.c'])]
else:
ext_modules = []
if os.path.exists("README.md"):
if sys.version_info[0] >= 3:
with open("README.md", encoding="utf-8") as fp:
long_description = fp.read()
else:
with open("README.md") as fp:
long_description = fp.read()
else:
long_description = ''
setup(
name='sevent',
version='0.4.6',
packages=['sevent', 'sevent.impl', 'sevent.coroutines', 'sevent.helpers'],
ext_modules=ext_modules,
package_data={
'': ['README.md'],
},
install_requires=[
'dnslib>=0.9.7',
'greenlet>=0.4.2',
],
author='snower',
author_email='sujian199@gmail.com',
url='https://github.com/snower/sevent',
license='MIT',
description='lightweight event loop',
long_description=long_description,
long_description_content_type="text/markdown",
)
| 27.116279
| 85
| 0.610635
| 137
| 1,166
| 5.072993
| 0.562044
| 0.129496
| 0.040288
| 0.046043
| 0.083453
| 0.083453
| 0.083453
| 0
| 0
| 0
| 0
| 0.01676
| 0.232419
| 1,166
| 42
| 86
| 27.761905
| 0.759777
| 0.017153
| 0
| 0.135135
| 0
| 0
| 0.242067
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.108108
| 0
| 0.108108
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2386c477dd9e245a8edc63bb8d5ff08d06c30f67
| 1,202
|
py
|
Python
|
core/systems/cart_pole.py
|
ivandariojr/core
|
c4dec054a3e80355ed3812d48ca2bba286584a67
|
[
"MIT"
] | 6
|
2021-01-26T21:00:24.000Z
|
2022-02-28T23:57:50.000Z
|
core/systems/cart_pole.py
|
ivandariojr/core
|
c4dec054a3e80355ed3812d48ca2bba286584a67
|
[
"MIT"
] | 15
|
2020-01-28T22:49:18.000Z
|
2021-12-14T08:34:39.000Z
|
core/systems/cart_pole.py
|
ivandariojr/core
|
c4dec054a3e80355ed3812d48ca2bba286584a67
|
[
"MIT"
] | 6
|
2019-06-07T21:31:20.000Z
|
2021-12-13T01:00:02.000Z
|
from torch import cat, cos, float64, sin, stack, tensor
from torch.nn import Module, Parameter
from core.dynamics import RoboticDynamics
class CartPole(RoboticDynamics, Module):
def __init__(self, m_c, m_p, l, g=9.81):
RoboticDynamics.__init__(self, 2, 1)
Module.__init__(self)
self.params = Parameter(tensor([m_c, m_p, l, g], dtype=float64))
def D(self, q):
m_c, m_p, l, _ = self.params
_, theta = q
return stack(
(stack([m_c + m_p, m_p * l * cos(theta)]),
stack([m_p * l * cos(theta), m_p * (l ** 2)])))
def C(self, q, q_dot):
_, m_p, l, _ = self.params
z = tensor(0, dtype=float64)
_, theta = q
_, theta_dot = q_dot
return stack((stack([z, -m_p * l * theta_dot * sin(theta)]),
stack([z, z])))
def U(self, q):
_, m_p, l, g = self.params
_, theta = q
return m_p * g * l * cos(theta)
def G(self, q):
_, m_p, l, g = self.params
_, theta = q
z = tensor(0, dtype=float64)
return stack([z, -m_p * g * l * sin(theta)])
def B(self, q):
return tensor([[1], [0]], dtype=float64)
| 29.317073
| 72
| 0.526622
| 181
| 1,202
| 3.259669
| 0.220994
| 0.044068
| 0.050847
| 0.027119
| 0.288136
| 0.105085
| 0.084746
| 0.084746
| 0.084746
| 0.084746
| 0
| 0.0246
| 0.323627
| 1,202
| 40
| 73
| 30.05
| 0.701107
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0.09375
| 0.03125
| 0.46875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2389a78f5b884ed87e17e07d946941aa063ef130
| 2,674
|
py
|
Python
|
export.py
|
nicolasm/lastfm-export
|
43456161c7083b490d09a1d2638a38c9771e1b3f
|
[
"MIT"
] | 7
|
2015-11-17T20:40:29.000Z
|
2022-02-03T17:55:23.000Z
|
export.py
|
nicolasm/lastfm-export
|
43456161c7083b490d09a1d2638a38c9771e1b3f
|
[
"MIT"
] | 2
|
2021-12-30T22:32:11.000Z
|
2021-12-30T22:32:12.000Z
|
export.py
|
nicolasm/lastfm-export
|
43456161c7083b490d09a1d2638a38c9771e1b3f
|
[
"MIT"
] | 1
|
2020-08-03T09:19:11.000Z
|
2020-08-03T09:19:11.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#######################################################################
# This script imports your Last.fm listening history #
# inside a MySQL or Sqlite database. #
# #
# Copyright (c) 2015-2020, Nicolas Meier #
#######################################################################
import json
import logging
import sys
from lfmconf.lfmconf import get_lastfm_conf
from lfmdb import lfmdb
from stats.stats import LastfmStats, recent_tracks, \
retrieve_total_json_tracks_from_db
from queries.inserts import get_query_insert_json_track
logging.basicConfig(
level=logging.INFO,
format=f'%(asctime)s %(levelname)s %(message)s'
)
conf = get_lastfm_conf()
user = conf['lastfm']['service']['username']
api_key = conf['lastfm']['service']['apiKey']
lastfm_stats = LastfmStats.get_lastfm_stats(user, api_key)
total_pages = lastfm_stats.nb_delta_pages()
total_plays_in_db = lastfm_stats.nb_json_tracks_in_db
logging.info('Nb page to get: %d' % total_pages)
if total_pages == 0:
logging.info('Nothing to update!')
sys.exit(1)
all_pages = []
for page_num in range(total_pages, 0, -1):
logging.info('Page %d of %d' % (page_num, total_pages))
page = recent_tracks(user, api_key, page_num)
while page.get('recenttracks') is None:
logging.info('has no tracks. Retrying!')
page = recent_tracks(user, api_key, page_num)
all_pages.append(page)
# Iterate through all pages
num_pages = len(all_pages)
for page_num, page in enumerate(all_pages):
logging.info('Page %d of %d' % (page_num + 1, num_pages))
tracks = page['recenttracks']['track']
# Remove the "nowplaying" track if found.
if tracks[0].get('@attr'):
if tracks[0]['@attr']['nowplaying'] == 'true':
tracks.pop(0)
# Get only the missing tracks.
if page_num == 0:
logging.info('Fist page')
nb_plays = lastfm_stats.nb_plays_for_first_page()
tracks = tracks[0: nb_plays]
logging.info('Getting %d plays' % nb_plays)
# On each page, iterate through all tracks
num_tracks = len(tracks)
json_tracks = []
for track_num, track in enumerate(reversed(tracks)):
logging.info('Track %d of %d' % (track_num + 1, num_tracks))
json_tracks.append(json.dumps(track))
try:
lfmdb.insert_many(get_query_insert_json_track(), json_tracks)
except Exception:
sys.exit(1)
logging.info('Done! %d rows in table json_track.' % retrieve_total_json_tracks_from_db())
| 31.093023
| 89
| 0.613687
| 350
| 2,674
| 4.465714
| 0.337143
| 0.070377
| 0.019194
| 0.029431
| 0.165067
| 0.112604
| 0.075496
| 0.075496
| 0
| 0
| 0
| 0.010572
| 0.221765
| 2,674
| 85
| 90
| 31.458824
| 0.740509
| 0.170157
| 0
| 0.076923
| 0
| 0
| 0.140087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.134615
| 0
| 0.134615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
238ac0373309554dc94b4fa70cb2b5d3ddfb8bbc
| 2,617
|
py
|
Python
|
final_project/code/src/data_attention.py
|
jschmidtnj/cs584
|
d1d4d485d1fac8743cdbbc2996792db249dcf389
|
[
"MIT"
] | null | null | null |
final_project/code/src/data_attention.py
|
jschmidtnj/cs584
|
d1d4d485d1fac8743cdbbc2996792db249dcf389
|
[
"MIT"
] | null | null | null |
final_project/code/src/data_attention.py
|
jschmidtnj/cs584
|
d1d4d485d1fac8743cdbbc2996792db249dcf389
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
data file
read in data
"""
from typing import Tuple, Any
import pandas as pd
import tensorflow as tf
from loguru import logger
from utils import file_path_relative
import numpy as np
from transformers import DistilBertTokenizer
NUM_ROWS_TRAIN: int = 15000
TEST_RATIO: float = 0.2
def _run_encode(texts: np.array, tokenizer: Any, maxlen: int = 512):
"""
Encoder for encoding the text into sequence of integers for transformer Input
"""
logger.info('encode')
encodings = tokenizer(
texts.tolist(),
return_token_type_ids=False,
padding='max_length',
truncation=True,
max_length=maxlen
)
return np.array(encodings['input_ids'])
def read_data_attention(strategy: tf.distribute.TPUStrategy,
max_len: int,
) -> Tuple[np.array, np.array, np.array, np.array, tf.data.Dataset, tf.data.Dataset, tf.data.Dataset, int]:
"""
read data from attention models
"""
logger.info('reading data for attention models')
# batch with number of tpu's
batch_size = 16 * strategy.num_replicas_in_sync
auto = tf.data.experimental.AUTOTUNE
# First load the tokenizer
tokenizer = DistilBertTokenizer.from_pretrained(
'distilbert-base-multilingual-cased')
train = pd.read_csv(file_path_relative('jigsaw-toxic-comment-train.csv'))
valid = pd.read_csv(file_path_relative('validation.csv'))
test = pd.read_csv(file_path_relative('test.csv'))
x_train = _run_encode(train['comment_text'].astype(str),
tokenizer, maxlen=max_len)
x_valid = _run_encode(valid['comment_text'].astype(str),
tokenizer, maxlen=max_len)
x_test = _run_encode(test['content'].astype(
str), tokenizer, maxlen=max_len)
y_train = train['toxic'].values
y_valid = valid['toxic'].values
train_dataset = (
tf.data.Dataset
.from_tensor_slices((x_train, y_train))
.repeat()
.shuffle(2048)
.batch(batch_size)
.prefetch(auto)
)
valid_dataset = (
tf.data.Dataset
.from_tensor_slices((x_valid, y_valid))
.batch(batch_size)
.cache()
.prefetch(auto)
)
test_dataset = (
tf.data.Dataset
.from_tensor_slices(x_test)
.batch(batch_size)
)
# return all datasets
return x_train, x_valid, y_train, y_valid, train_dataset, valid_dataset, \
test_dataset, batch_size
if __name__ == '__main__':
raise RuntimeError('cannot run data attention on its own')
| 27.260417
| 131
| 0.649599
| 333
| 2,617
| 4.867868
| 0.378378
| 0.02591
| 0.048118
| 0.06169
| 0.226403
| 0.226403
| 0.120296
| 0.120296
| 0.05182
| 0
| 0
| 0.008612
| 0.245701
| 2,617
| 95
| 132
| 27.547368
| 0.812563
| 0.086741
| 0
| 0.16129
| 0
| 0
| 0.097488
| 0.027246
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.112903
| 0
| 0.177419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2390d6b6f9cc47e9e8fe109965cc9769d5444907
| 2,875
|
py
|
Python
|
applications/WindEngineeringApplication/tests/test_WindEngineeringApplication.py
|
clazaro/Kratos
|
b947b82c90dfcbf13d60511427f85990d36b90be
|
[
"BSD-4-Clause"
] | null | null | null |
applications/WindEngineeringApplication/tests/test_WindEngineeringApplication.py
|
clazaro/Kratos
|
b947b82c90dfcbf13d60511427f85990d36b90be
|
[
"BSD-4-Clause"
] | null | null | null |
applications/WindEngineeringApplication/tests/test_WindEngineeringApplication.py
|
clazaro/Kratos
|
b947b82c90dfcbf13d60511427f85990d36b90be
|
[
"BSD-4-Clause"
] | null | null | null |
# Kratos imports
import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as UnitTest
from KratosMultiphysics.WindEngineeringApplication.test_suite import SuiteFlags, TestSuite
import run_cpp_tests
# STL imports
import pathlib
class TestLoader(UnitTest.TestLoader):
@property
def suiteClass(self):
return TestSuite
def AssembleTestSuites(enable_mpi=False):
""" Populates the test suites to run.
Populates the test suites to run. At least, it should pupulate the suites:
"small", "nighlty" and "all"
Return
------
suites: A dictionary of suites
The set of suites with its test_cases added.
"""
static_suites = UnitTest.KratosSuites
# Test cases will be organized into lists first, then loaded into their
# corresponding suites all at once
local_cases = {}
for key in static_suites.keys():
local_cases[key] = []
# Glob all test cases in this application
this_directory = pathlib.Path(__file__).absolute().parent
test_loader = TestLoader()
all_tests = test_loader.discover(this_directory)
# Sort globbed test cases into lists based on their suite flags
# flags correspond to entries in KratosUnittest.TestSuites
# (small, nightly, all, validation)
#
# Cases with the 'mpi' flag are added to mpi suites as well as their corresponding normal suites.
# Cases with the 'mpi_only' flag are not added to normal suites.
for test_case in all_tests:
suite_flags = set(test_case.suite_flags)
# Check whether the test case has a flag for mpi
mpi = SuiteFlags.MPI in suite_flags
mpi_only = SuiteFlags.MPI_ONLY in suite_flags
# Don't add the test if its mpi-exclusive and mpi is not enabled
if (not enable_mpi) and mpi_only:
continue
# Remove mpi flags
if mpi:
suite_flags.remove(SuiteFlags.MPI)
if mpi_only:
suite_flags.remove(SuiteFlags.MPI_ONLY)
# Add case to the corresponding suites
for suite_flag in suite_flags:
local_cases[suite_flag.name.lower()].append(test_case)
if mpi or mpi_only:
local_cases["mpi_" + suite_flag.name.lower()].append(test_case)
# Put test in 'all' if it isn't already there
if not (SuiteFlags.ALL in suite_flags):
if not mpi_only:
local_cases["all"].append(test_case)
if mpi or mpi_only:
local_cases["mpi_all"].append(test_case)
# Load all sorted cases into the global suites
for suite_name, test_cases in local_cases.items():
static_suites[suite_name].addTests(test_cases)
return static_suites
def Run(enable_mpi=False):
UnitTest.runTests(AssembleTestSuites(enable_mpi=enable_mpi))
if __name__ == "__main__":
Run(enable_mpi=False)
| 31.25
| 103
| 0.681391
| 384
| 2,875
| 4.916667
| 0.317708
| 0.047669
| 0.025424
| 0.027013
| 0.129237
| 0.098517
| 0.069915
| 0.043432
| 0.043432
| 0.043432
| 0
| 0
| 0.249739
| 2,875
| 92
| 104
| 31.25
| 0.87529
| 0.340174
| 0
| 0.046512
| 0
| 0
| 0.011918
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.116279
| 0.023256
| 0.255814
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2392d8b12f4d624279784bb31763afce245b714a
| 5,739
|
py
|
Python
|
src/cadorsfeed/fpr.py
|
kurtraschke/cadors-parse
|
67ac398ef318562dcbd7c60ef7c0d91e7980111a
|
[
"MIT"
] | 1
|
2018-01-05T12:54:13.000Z
|
2018-01-05T12:54:13.000Z
|
src/cadorsfeed/fpr.py
|
kurtraschke/cadors-parse
|
67ac398ef318562dcbd7c60ef7c0d91e7980111a
|
[
"MIT"
] | null | null | null |
src/cadorsfeed/fpr.py
|
kurtraschke/cadors-parse
|
67ac398ef318562dcbd7c60ef7c0d91e7980111a
|
[
"MIT"
] | null | null | null |
import re
import uuid
from copy import deepcopy
from datetime import datetime
from lxml import etree
from lxml.html import xhtml_to_html
from geoalchemy import WKTSpatialElement
from geolucidate.functions import _cleanup, _convert
from geolucidate.parser import parser_re
from cadorsfeed import db
from cadorsfeed.models import DailyReport, CadorsReport, ReportCategory
from cadorsfeed.models import Aircraft, NarrativePart, Location, LocationRef
from cadorsfeed.cadorslib.xpath_functions import extensions
from cadorsfeed.cadorslib.narrative import process_narrative, normalize_ns
from cadorsfeed.cadorslib.locations import LocationStore
from cadorsfeed.aerodb import aerodromes_re, lookup
NSMAP = {'h': 'http://www.w3.org/1999/xhtml',
'pyf': 'urn:uuid:fb23f64b-3c54-4009-b64d-cc411bd446dd',
'a': 'http://www.w3.org/2005/Atom',
'geo': 'http://www.w3.org/2003/01/geo/wgs84_pos#',
'aero':'urn:uuid:1469bf5a-50a9-4c9b-813c-af19f9d6824d'}
def make_datetime(date, time):
if time is None:
time = "0000 Z"
return datetime.strptime(date + " " + time, "%Y-%m-%d %H%M Z")
def clean_html(tree):
mytree = deepcopy(tree)
for elem in mytree.iter():
for attr, val in elem.attrib.iteritems():
if attr.startswith('{'):
del elem.attrib[attr]
xhtml_to_html(mytree)
return etree.tostring(normalize_ns(mytree), method="html",
encoding=unicode)
def format_parsed_report(parsed_report):
report = CadorsReport.query.get(
parsed_report['cadors_number']) or CadorsReport(uuid=uuid.uuid4())
parsed_report['timestamp'] = make_datetime(parsed_report['date'],
parsed_report['time'])
del parsed_report['date']
del parsed_report['time']
primary_locations = set()
other_locations = set()
if parsed_report['tclid'] != '':
#try to do a db lookup
data = lookup(parsed_report['tclid'])
if data is not None:
primary_locations.add(data)
if parsed_report['location'] != '':
location = parsed_report['location']
#Apply geolucidate and the aerodromes RE
match = aerodromes_re.get_icao_re.search(location)
if match:
data = lookup(match.group())
primary_locations.add(data)
match = parser_re.search(location)
if match:
(latitude, longitude) = _convert(*_cleanup(match.groupdict()))
location = make_location(latitude, longitude)
location.name = match.group()
primary_locations.add(location)
for narrative_part in parsed_report['narrative']:
narrative_tree = process_narrative(narrative_part['narrative_text'])
narrative_part['narrative_html'] = clean_html(narrative_tree)
narrative_part['narrative_xml'] = etree.tostring(narrative_tree,
method="xml",
encoding=unicode)
#do the location extraction here
#parse out geolinks
elements = narrative_tree.xpath(
"//*[@class='geolink' and @geo:lat and @geo:long]",
namespaces=NSMAP)
for element in elements:
longitude = element.attrib[
'{http://www.w3.org/2003/01/geo/wgs84_pos#}long']
latitude = element.attrib[
'{http://www.w3.org/2003/01/geo/wgs84_pos#}lat']
name = element.attrib['title']
location = make_location(latitude, longitude)
location.name = name
other_locations.add(location)
#parse out aerodrome links
elements = narrative_tree.xpath(
"//*[@class='aerolink' and @aero:code]",
namespaces=NSMAP)
for element in elements:
code = element.attrib[
'{urn:uuid:1469bf5a-50a9-4c9b-813c-af19f9d6824d}code']
other_locations.add(lookup(code))
for aircraft_part in parsed_report['aircraft']:
if aircraft_part['flight_number'] is not None:
match = re.match("([A-Z]{2,4})([0-9]{1,4})M?",
aircraft_part['flight_number'])
if match:
aircraft_part['flight_number_operator'] = match.group(1)
aircraft_part['flight_number_flight'] = match.group(2)
report.categories = []
report.aircraft = []
report.narrative_parts = []
report.locations = []
for category in parsed_report['categories']:
report.categories.append(ReportCategory(text=category))
del parsed_report['categories']
for aircraft_part in parsed_report['aircraft']:
report.aircraft.append(Aircraft(**aircraft_part))
del parsed_report['aircraft']
for narrative_part in parsed_report['narrative']:
report.narrative_parts.append(NarrativePart(**narrative_part))
del parsed_report['narrative']
for location in primary_locations:
locref = LocationRef(report=report, location=location,
primary=True)
db.session.add(locref)
other_locations -= primary_locations
for location in other_locations:
locref = LocationRef(report=report, location=location,
primary=False)
db.session.add(locref)
for key, value in parsed_report.iteritems():
setattr(report, key, value)
return report
def make_location(latitude, longitude):
wkt = "POINT(%s %s)" % (longitude,
latitude)
point = WKTSpatialElement(wkt)
location = Location(location=point)
return location
| 34.781818
| 76
| 0.626067
| 635
| 5,739
| 5.513386
| 0.269291
| 0.071979
| 0.023993
| 0.017138
| 0.228221
| 0.180805
| 0.160811
| 0.067124
| 0.032276
| 0.023993
| 0
| 0.025946
| 0.267991
| 5,739
| 164
| 77
| 34.993902
| 0.807427
| 0.024743
| 0
| 0.188525
| 0
| 0
| 0.132511
| 0.037554
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032787
| false
| 0
| 0.131148
| 0
| 0.196721
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2393234a05bc91425b68a33c2343314b4950796f
| 1,016
|
py
|
Python
|
sstcam_sandbox/d191122_dc_tf/generate_pedestal.py
|
watsonjj/CHECLabPySB
|
91330d3a6f510a392f635bd7f4abd2f77871322c
|
[
"BSD-3-Clause"
] | null | null | null |
sstcam_sandbox/d191122_dc_tf/generate_pedestal.py
|
watsonjj/CHECLabPySB
|
91330d3a6f510a392f635bd7f4abd2f77871322c
|
[
"BSD-3-Clause"
] | null | null | null |
sstcam_sandbox/d191122_dc_tf/generate_pedestal.py
|
watsonjj/CHECLabPySB
|
91330d3a6f510a392f635bd7f4abd2f77871322c
|
[
"BSD-3-Clause"
] | 1
|
2021-03-30T09:46:56.000Z
|
2021-03-30T09:46:56.000Z
|
from sstcam_sandbox import get_checs
from TargetCalibSB.pedestal import PedestalTargetCalib
from TargetCalibSB import get_cell_ids_for_waveform
from CHECLabPy.core.io import TIOReader
from tqdm import tqdm
from glob import glob
def process(path):
pedestal_path = path.replace("_r0.tio", "_ped.tcal")
reader = TIOReader(path)
pedestal = PedestalTargetCalib(
reader.n_pixels, reader.n_samples-32, reader.n_cells
)
desc = "Generating pedestal"
for wfs in tqdm(reader, total=reader.n_events, desc=desc):
if wfs.missing_packets:
continue
cells = get_cell_ids_for_waveform(wfs.first_cell_id, reader.n_samples, reader.n_cells)
wfs = wfs[:, 32:]
wfs.first_cell_id = cells[32]
pedestal.add_to_pedestal(wfs, wfs.first_cell_id)
pedestal.save_tcal(pedestal_path)
def main():
input_paths = glob(get_checs("d181203_erlangen/pedestal/*.tio"))
for path in input_paths:
process(path)
if __name__ == '__main__':
main()
| 29.028571
| 94
| 0.714567
| 139
| 1,016
| 4.920863
| 0.388489
| 0.061404
| 0.052632
| 0.061404
| 0.061404
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015892
| 0.194882
| 1,016
| 34
| 95
| 29.882353
| 0.820293
| 0
| 0
| 0
| 0
| 0
| 0.072835
| 0.030512
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.222222
| 0
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2395f7861779117a4a46333dc411993e7c87448d
| 2,723
|
py
|
Python
|
algorithm/leetcode/2018-03-25.py
|
mhoonjeon/problemsolving
|
f47ff41b03ce406b26ea36be602c0aa14ac7ccf1
|
[
"MIT"
] | null | null | null |
algorithm/leetcode/2018-03-25.py
|
mhoonjeon/problemsolving
|
f47ff41b03ce406b26ea36be602c0aa14ac7ccf1
|
[
"MIT"
] | null | null | null |
algorithm/leetcode/2018-03-25.py
|
mhoonjeon/problemsolving
|
f47ff41b03ce406b26ea36be602c0aa14ac7ccf1
|
[
"MIT"
] | null | null | null |
# 804. Unique Morse Code Words
class Solution:
def __init__(self):
self.morse_code = [".-", "-...", "-.-.", "-..", ".", "..-.", "--.",
"....", "..", ".---", "-.-", ".-..", "--", "-.",
"---", ".--.", "--.-", ".-.", "...", "-", "..-",
"...-", ".--", "-..-", "-.--", "--.."]
self.alphabets = "abcdefghijklmnopqrstuvwxyz"
self.alpha_morse = dict(zip(self.alphabets, self.morse_code))
def uniqueMorseRepresentations(self, words):
"""
:type words: List[str]
:rtype: int
"""
word_set = []
for word in words:
s = ""
for ch in word:
s += self.alpha_morse[ch]
word_set.append(s)
return len(list(set(word_set)))
""" https://leetcode.com/problems/unique-morse-code-words/discuss/120675/\
Easy-and-Concise-Solution-C++JavaPython
def uniqueMorseRepresentations(self, words):
d = [".-", "-...", "-.-.", "-..", ".", "..-.", "--.", "....", "..", ".---",
"-.-", ".-..", "--", "-.", "---", ".--.", "--.-", ".-.", "...", "-",
"..-", "...-", ".--", "-..-", "-.--", "--.."]
return len({''.join(d[ord(i) - ord('a')] for i in w) for w in words})
"""
# 771. Jewels and Stones, 98.33%
# https://leetcode.com/problems/jewels-and-stones/description/
def numJewelsInStones(self, J, S):
"""
:type J: str
:type S: str
:rtype: int
"""
count = 0
for jewel in J:
for stone in S:
if jewel == stone:
count += 1
return count
""" https://leetcode.com/problems/jewels-and-stones/discuss/113553/\
Easy-and-Concise-Solution-using-hash-set-C++JavaPython
def numJewelsInStones(self, J, S):
setJ = set(J)
return sum(s in setJ for s in S)
"""
# 806. Number of Lines To Write String
# https://leetcode.com/problems/number-of-lines-to-write-string/
def numberOfLines(self, widths, S):
"""
:type widths: List[int]
:type S: str
:rtype: List[int]
"""
lines = 1
line_width = 0
for ch in S:
index = ord(ch) - ord('a')
if line_width + widths[index] <= 100:
line_width += widths[index]
else:
lines += 1
line_width = widths[index]
return [lines, line_width]
""" https://leetcode.com/problems/number-of-lines-to-write-string/discuss/\
120666/Easy-Solution-6-lines-C++JavaPython
def numberOfcurs(self, widths, S):
res, cur = 1, 0
for i in S:
width = widths[ord(i) - ord('a')]
res += 1 if cur + width > 100 else 0
cur = width if cur + width > 100 else cur + width
return [res, cur]
"""
| 27.785714
| 79
| 0.480353
| 302
| 2,723
| 4.278146
| 0.281457
| 0.05031
| 0.06192
| 0.092879
| 0.224458
| 0.157895
| 0.137771
| 0.077399
| 0.077399
| 0.077399
| 0
| 0.0258
| 0.288285
| 2,723
| 97
| 80
| 28.072165
| 0.640867
| 0.128167
| 0
| 0
| 0
| 0
| 0.087621
| 0.0209
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2396d1a675b3960ca8025853ba1b4a50d69159c9
| 19,779
|
py
|
Python
|
pygna/block_model.py
|
Gee-3/pygna
|
61f2128e918e423fef73d810e0c3af5761933096
|
[
"MIT"
] | 32
|
2019-07-11T22:58:14.000Z
|
2022-03-04T19:34:55.000Z
|
pygna/block_model.py
|
Gee-3/pygna
|
61f2128e918e423fef73d810e0c3af5761933096
|
[
"MIT"
] | 3
|
2021-05-24T14:03:13.000Z
|
2022-01-07T03:47:32.000Z
|
pygna/block_model.py
|
Gee-3/pygna
|
61f2128e918e423fef73d810e0c3af5761933096
|
[
"MIT"
] | 5
|
2019-07-24T09:38:07.000Z
|
2021-12-30T09:20:20.000Z
|
import networkx as nx
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
import logging
from pygna import output
from pygna.utils import YamlConfig
import pandas as pd
import random
import string
import seaborn as sns
import pygna.output as output
class BlockModel(object):
def __init__(self, block_model_matrix, n_nodes: int = 10, nodes_percentage: list = None):
"""
This class implements a block model reading and elaboration methods
:param block_model_matrix: the matrix to be used as block model
:param n_nodes: the number of nodes
:param nodes_percentage: the percentage of nodes to use for the calculations, passed through a list for example [0.5, 0.5]
"""
self.n_nodes = n_nodes
self.nodes = ["N" + str(i) for i in range(n_nodes)]
self.n_clusters = block_model_matrix.shape[0]
self.graph = nx.Graph()
self.bm = block_model_matrix
self.nodes_in_block = False
self.nodes_percentage = nodes_percentage
self.cluster_dict = {}
def set_nodes(self, nodes_names: list) -> None:
"""
Set the nodes name of the block model
:param nodes_names: the names list
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> nodes = list("A", "B", "C")
>>> bm.set_nodes(nodes)
"""
self.nodes = nodes_names
self.n_nodes = len(nodes_names)
def set_bm(self, block_model_matrix: pd.DataFrame) -> None:
"""
Change block model matrix used in the class
:param block_model_matrix: the block model matrix
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bmm = pd.DataFrame(mydata_matrix)
>>> bm.set_bm(bmm)
"""
if block_model_matrix.shape[0] == self.n_clusters:
self.bm = block_model_matrix
else:
logging.error("the block model is supposed to have %d clusters" % (self.n_clusters))
def set_nodes_in_block_percentage(self, nodes_percentage: list) -> None:
"""
Pass the percentage of nodes in each block as a list, for example [0.5, 0.5]
:param nodes_percentage: percentage of the nodes
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bm.set_nodes_in_block_percentage([0.5, 0.5])
"""
self.nodes_percentage = nodes_percentage
def set_nodes_in_block(self, nodes_in_block: int) -> None:
"""
Set the nodes number in the block model
:param nodes_in_block: the number of nodes in the block list
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bm.set_nodes_in_block(1000)
"""
self.nodes_in_block = nodes_in_block
def create_graph(self) -> None:
"""
Create a graph from the parameters passed in the constructor of the class
Example
_______
>>> bm = BlockModel(np.array(config["BlockModel"]["matrix"]), n_nodes=config["BlockModel"]["n_nodes"], nodes_percentage=config["BlockModel"]["nodes_percentage"])
>>> bm.create_graph()
"""
reject = True
logging.info('Reject=' + str(reject))
while reject:
graph = generate_graph_from_sm(self.n_nodes, self.bm, self.nodes_in_block, self.nodes,
self.nodes_percentage)
LCC = max(nx.connected_components(graph), key=len)
reject = (len(LCC) != self.n_nodes)
logging.info('Reject=' + str(reject))
logging.info('Nodes: %d, in LCC: %d' % (self.n_nodes, len(LCC)))
self.graph = graph
def plot_graph(self, output_folder: str) -> None:
"""
Plot the block model graph
:param output_folder: the folder where to save the result
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bm.plot_graph("block_model_path.pdf")
"""
plot_bm_graph(self.graph, self.bm, output_folder=output_folder)
def write_network(self, output_file: str) -> None:
"""
Save the network on a given file
:param output_file: the output path where to save the results
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bm.write_network("network.tsv")
"""
self.network_file = output_file
logging.info("Network written on %s" % (output_file))
if output_file.endswith(".tsv"):
nx.write_edgelist(self.graph, output_file, data=False, delimiter="\t")
else:
logging.error("output file format unknown")
def write_cluster_genelist(self, output_file: str) -> None:
"""
Save the gene list to a GMT file
:param output_file: the output path where to save the results
Example
_______
>>> p = 0.5
>>> n_nodes = 1000
>>> matrix = np.array([[1, 2], [3, 4]])
>>> bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
>>> bm.write_cluster_genelist("genes.gmt")
"""
self.genelist_file = output_file
clusters = nx.get_node_attributes(self.graph, "cluster")
for i in set(clusters.values()):
c = "cluster_" + str(i)
self.cluster_dict[c] = {}
self.cluster_dict[c]["descriptor"] = "cluster"
self.cluster_dict[c]["genes"] = [str(j) for j in clusters.keys() if clusters[j] == i]
if output_file.endswith(".gmt"):
output.print_GMT(self.cluster_dict, self.genelist_file)
else:
logging.error("output file format unknown")
def generate_graph_from_sm(n_nodes: int, block_model: pd.DataFrame, nodes_in_block: list = False,
node_names: list = None, nodes_percentage: list = None) -> nx.Graph:
"""
This function creates a graph with n_nodes number of vertices and a matrix block_model that describes the intra e inter-block connectivity.
The nodes_in_block is parameter, list, to control the number of nodes in each cluster
:param n_nodes: the number of nodes in the block model
:param block_model: the block model to elaborate
:param nodes_in_block: the list of nodes in the block model
:param node_names: the list of names in the block model
:param nodes_percentage: the percentage of nodes to use for the calculations, passed through a list for example [0.5, 0.5]
Example
_______
>>> bm = pd.DataFrame(mydata_matrix)
>>> nodes = list("A","B","C")
>>> graph = generate_graph_from_sm(n_nodes, bm, nodes_in_block, nodes, nodes_percentage)
"""
if not node_names:
node_names = range(n_nodes)
edges = []
G = nx.Graph()
if nodes_percentage:
cluster = np.random.choice(block_model.shape[0], size=n_nodes, p=nodes_percentage)
np.random.shuffle(cluster)
elif nodes_in_block:
list_temp = [nodes_in_block[i] * [i] for i in range(len(nodes_in_block))]
cluster = np.array([val for sublist in list_temp for val in sublist])
np.random.shuffle(cluster)
else:
# cluster is an array of random numbers corresponding to the cluster of each node
cluster = np.random.randint(block_model.shape[0], size=n_nodes)
for i in range(n_nodes):
G.add_node(node_names[i], cluster=cluster[i])
for i in range(n_nodes):
for j in range(i + 1, n_nodes):
if np.random.rand() < block_model[cluster[i], cluster[j]]:
edges.append((node_names[i], node_names[j]))
G.add_edges_from(edges)
return G
def plot_bm_graph(graph: nx.Graph, block_model: pd.DataFrame, output_folder: str = None) -> None:
"""
Save the graph on a file
:param graph: the graph with name of the nodes
:param block_model: the block model
:param output_folder: the folder where to save the file
Example
_______
>>> bm = pd.DataFrame(mydata_matrix)
>>> graph = nx.complete_graph(100)
>>> plot_bm_graph(graph, bm, output_folder="./results/")
"""
nodes = graph.nodes()
colors = ['#b15928', '#1f78b4', '#6a3d9a', '#33a02c', '#ff7f00']
cluster = nx.get_node_attributes(graph, 'cluster')
labels = [colors[cluster[n]] for n in nodes]
layout = nx.spring_layout(graph)
plt.figure(figsize=(13.5, 5))
plt.subplot(1, 3, 1)
nx.draw(graph, nodelist=nodes, pos=layout, node_color='#636363', node_size=50, edge_color='#bdbdbd')
plt.title("Observed network")
plt.subplot(1, 3, 2)
plt.imshow(block_model, cmap='OrRd', interpolation='nearest')
plt.title("Stochastic block matrix")
plt.subplot(1, 3, 3)
legend = []
for ix, c in enumerate(colors):
legend.append(mpatches.Patch(color=c, label='C%d' % ix))
nx.draw(graph, nodelist=nodes, pos=layout, node_color=labels, node_size=50, edge_color='#bdbdbd')
plt.legend(handles=legend, ncol=len(colors), mode="expand", borderaxespad=0)
plt.title("SB clustering")
plt.savefig(output_folder + 'block_model.pdf', bbox_inches='tight')
def generate_sbm_network(input_file: "yaml configuration file") -> None:
"""
This function generates a simulated network, using the block model matrix given as input and saves both the network and the cluster nodes.
All parameters must be specified in a yaml file.
This function allows to create network and geneset for any type of SBM
"""
ym = YamlConfig()
config = ym.load_config(input_file)
print(config)
bm = BlockModel(np.array(config["BlockModel"]["matrix"]), n_nodes=config["BlockModel"]["n_nodes"],
nodes_percentage=config["BlockModel"]["nodes_percentage"])
outpath = config["Simulations"]["output_folder"]
suffix = config["Simulations"]["suffix"]
for i in range(config["Simulations"]["n_simulated"]):
bm.create_graph()
bm.write_network(outpath + suffix + "_s_" + str(i) + "_network.tsv")
bm.write_cluster_genelist(outpath + suffix + "_s_" + str(i) + "_genes.gmt")
# bm.plot_graph(outpath+suffix+"_s_"+str(i))
def generate_sbm2_network(output_folder: 'folder where the simulations are saved',
prefix: 'prefix for the simulations' = 'sbm',
n_nodes: 'nodes in the network' = 1000,
theta0: 'probability of connection in the cluster' = '0.9,0.7,0.5,0.2',
percentage: 'percentage of nodes in cluster 0, use ratio 0.1 = 10 percent' = '0.1',
density: 'multiplicative parameter used to define network density' = '0.06,0.1,0.2',
n_simulations: 'number of simulated networks for each configuration' = 3
):
"""
This function generates the simulated networks and genesets using the stochastic block model with 2 BLOCKS as described in the paper. The output names are going to be prefix_t_<theta0>_p_<percentage>_d_<density>_s_<n_simulation>_network.tsv or _genes.gmt
One connected cluster while the rest of the network has the same probability of connection. SBM = d *[theta0, 1-theta0 1-theta0, 1-theta0]
The simulator checks for connectedness of the generated network, if the generated net is not connected, a new simulation is generated.
"""
teta_ii = [float(i) for i in theta0.replace(' ', '').split(',')]
percentages = [float(i) for i in percentage.replace(' ', '').split(',')]
density = [float(i) for i in density.replace(' ', '').split(',')]
n_simulated = int(n_simulations)
n_nodes = int(n_nodes)
for p in percentages:
for t in teta_ii:
for d in density:
matrix = np.array([[d * t, d * (1 - t)], [d * (1 - t), d * (1 - t)]])
bm = BlockModel(matrix, n_nodes=n_nodes, nodes_percentage=[p, 1 - p])
for i in range(n_simulated):
name = output_folder + prefix + "_t_" + str(t) + "_p_" + str(p) + "_d_" + str(d) + "_s_" + str(i)
bm.create_graph()
bm.write_network(name + "_network.tsv")
bm.write_cluster_genelist(name + "_genes.gmt")
def write_network(network, output_file):
network_file= output_file
logging.info("Network written on %s" %(output_file))
if output_file.endswith(".tsv"):
nx.write_edgelist(network, output_file, data=False, delimiter="\t")
else:
logging.error("output file format unknown")
def get_mix_genesets(gmt_diz,
tups = [('positive_0', 'positive_1'),
('positive_2', 'positive_3'),
('null_4', 'null_5'),
('null_6', 'null_7')],
perc = [4,6,10,12,88,90,94,96]):
diz = {}
for t in tups:
a = gmt_diz[t[0]]['genes']
b = gmt_diz[t[1]]['genes']
for p in perc:
name = t[0]+'_'+str(int(p))+'_'+t[1]+'_'+str(int(100-p))
aa = np.random.choice(a, int(len(a)/100*p), replace = False)
bb = np.random.choice(b, int(len(a)/100*int(100-p)), replace = False)
tot = []
for i in aa:
tot.append(i)
for i in bb:
tot.append(i)
diz[name]=tot
return(diz)
#########################################################################
####### COMMAND LINE FUNCTIONS ##########################################
#########################################################################
def generate_gna_sbm( output_tsv: 'output_network',
output_gmt: 'output geneset filename, this contains only the blocks',
output_gmt2: 'mixture output geneset filename, this contains the mixture blocks'=None,
N:'number of nodes in the network' = 1000,
block_size:'size of the first 8 blocks' = 50,
d:'baseline probability of connection, p0 in the paper' = 0.06,
fc_cis:'positive within-block scaling factor for the probability of connection, Mii = fc_cis * d (alpha parameter in the paper)' = 2.,
fc_trans:'positive between-block scaling factor for the probability of connection, (beta parameter in the paper)' = .5,
pi : 'percentage of block-i nodes for the genesets made of block-i and block-j. Use symmetrical values (5,95),use string comma separated' = '4,6,10,12,88,90,94,96',
descriptor='crosstalk_sbm',
sbm_matrix_figure: 'shows the blockmodel matrix' = None):
"""
This function generates benchmark network and geneset to test
the crosstalk between two blocks.
This function generates 4 blocks with d*fold change probability
and other 4 blocks with d probability.
The crosstalk is set both between the the first 4 blocks and the others.
Make sure that 8*cluster_size < N
"""
clusters = 8
lc = N - (block_size*clusters)
if lc < 1:
logging.error('nodes are less than cluster groups')
d =float(d)
sizes = clusters*[block_size]
sizes.append(lc)
print(sizes)
probs = d*np.ones((9,9))
#pp = np.tril(d/100*(1+np.random.randn(ncluster+1,ncluster+1)))
A = fc_cis*d
B = d + fc_trans*(d*(fc_cis-1))
probs[0,1] = B
probs[2,3] = B
probs[1,0] = B
probs[3,2] = B
probs[4,5] = B
probs[6,7] = B
probs[5,4] = B
probs[7,6] = B
probs[0,0] = A
probs[1,1] = A
probs[2,2] = A
probs[3,3] = A
if type(sbm_matrix_figure)==str:
f,ax = plt.subplots(1)
sns.heatmap(probs, ax = ax, cmap = 'YlOrRd', annot=True)
f.savefig(sbm_matrix_figure)
ncycle = 0
k = 0
while (k<N):
g = nx.stochastic_block_model(sizes, probs)
g = max(nx.connected_component_subgraphs(g), key=len)
k = len(g)
ncycle +=1
if ncycle > 20:
logging.error('density is too low')
H = nx.relabel_nodes(g, lambda x:'n'+str(x))
gmt_diz = {}
nodes = list(H.nodes)
for p,l in enumerate(H.graph['partition'][:-1]):
if p<4:
name = 'positive_'+str(p)
else:
name = 'null_'+str(p)
ll = [nodes[i] for i in l]
gmt_diz[name]={}
gmt_diz[name]['genes']=ll
gmt_diz[name]['descriptor']=descriptor
if type(output_gmt2)==str:
perc = [float(i) for i in pi.split(',')]
logging.info('Generating mixes with perc = %s')
gmt_diz2={}
mix_dix = get_mix_genesets(gmt_diz, perc = perc)
for name,i in mix_dix.items():
gmt_diz2[name]={}
gmt_diz2[name]['genes']=i
gmt_diz2[name]['descriptor']=descriptor
output.print_GMT(gmt_diz2, output_gmt2)
write_network(H, output_tsv)
output.print_GMT(gmt_diz, output_gmt)
print('Generated'+output_tsv)
def generate_gnt_sbm( output_tsv: 'output network filename',
output_gmt: 'output geneset filename, this contains only the blocks',
N:'number of nodes in the network' = 1000,
block_size: 'size of the first 6 blocks'= 50,
d: 'baseline probability of connection, p0 in the paper' = 0.06,
fold_change:'positive within-block scaling factor for the probability of connection, Mii = fold_change * d (alpha parameter in the paper)' = 2.,
descriptor:'descriptor for the gmt file'='mixed_sbm'):
"""
This function generates 3 blocks with d*fold_change probability
and other 3 blocks with d probability.
Make sure that 6*cluster_size < N
"""
lc = N - (block_size*6)
if lc < 1:
logging.error('nodes are less than cluster groups')
d =float(d)
sizes = 6*[block_size]
sizes.append(lc)
print(sizes)
probs = d*np.ones((7,7))
#pp = np.tril(d/100*(1+np.random.randn(ncluster+1,ncluster+1)))
probs[0,0]=fold_change*d
probs[1,1]=fold_change*d
probs[2,2]=fold_change*d
ncycle = 0
k = 0
while (k<N):
g = nx.stochastic_block_model(sizes, probs)
g = max(nx.connected_component_subgraphs(g), key=len)
k = len(g)
ncycle +=1
if ncycle > 20:
logging.error('density is too low')
H = nx.relabel_nodes(g, lambda x:'n'+str(x))
gmt_diz = {}
nodes = list(H.nodes)
for p,l in enumerate(H.graph['partition'][:-1]):
if p<3:
name = 'positive_'+str(p)
else:
name = 'null_'+str(p)
ll = [nodes[i] for i in l]
gmt_diz[name]={}
gmt_diz[name]['genes']=ll
gmt_diz[name]['descriptor']=descriptor
write_network(H, output_tsv)
output.print_GMT(gmt_diz, output_gmt)
| 36.027322
| 258
| 0.590323
| 2,706
| 19,779
| 4.13969
| 0.13969
| 0.026245
| 0.018211
| 0.006249
| 0.441707
| 0.377076
| 0.342082
| 0.307356
| 0.288074
| 0.273433
| 0
| 0.024883
| 0.278679
| 19,779
| 548
| 259
| 36.093066
| 0.760286
| 0.265433
| 0
| 0.300699
| 0
| 0.01049
| 0.169069
| 0.001568
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059441
| false
| 0
| 0.041958
| 0
| 0.108392
| 0.027972
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2398b8c755adf06d3f7f1e5cae4d4aedb1f1899b
| 443
|
py
|
Python
|
class/lect/Lect-17/pd1.py
|
MikenzieAlasca/F21-1010
|
a7c15b8d9bf84f316aa6921f6d8a588c513a22b8
|
[
"MIT"
] | 5
|
2021-09-09T21:08:14.000Z
|
2021-12-14T02:30:52.000Z
|
class/lect/Lect-17/pd1.py
|
MikenzieAlasca/F21-1010
|
a7c15b8d9bf84f316aa6921f6d8a588c513a22b8
|
[
"MIT"
] | null | null | null |
class/lect/Lect-17/pd1.py
|
MikenzieAlasca/F21-1010
|
a7c15b8d9bf84f316aa6921f6d8a588c513a22b8
|
[
"MIT"
] | 8
|
2021-09-09T17:46:07.000Z
|
2022-02-08T22:41:35.000Z
|
import pandas as pd
people_dict = {
"weight": pd.Series([145, 182, 191],index=["joan", "bob", "mike"]),
"birthyear": pd.Series([2002, 2000, 1999], index=["bob", "joan", "mike"], name="year"),
"children": pd.Series([1, 2], index=["mike", "bob"]),
"hobby": pd.Series(["Rock Climbing", "Scuba Diving", "Sailing"], index=["joan", "bob", "mike"]),
}
people = pd.DataFrame(people_dict)
print ( people )
| 31.642857
| 104
| 0.557562
| 55
| 443
| 4.454545
| 0.581818
| 0.130612
| 0.097959
| 0.130612
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064972
| 0.200903
| 443
| 13
| 105
| 34.076923
| 0.627119
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
239ed9095bc55c203b6c4b8328d5c14492d59001
| 6,762
|
py
|
Python
|
test/phagesExperiment/runTableCases.py
|
edsaac/bioparticle
|
67e191329ef191fc539b290069524b42fbaf7e21
|
[
"MIT"
] | null | null | null |
test/phagesExperiment/runTableCases.py
|
edsaac/bioparticle
|
67e191329ef191fc539b290069524b42fbaf7e21
|
[
"MIT"
] | 1
|
2020-09-25T23:31:21.000Z
|
2020-09-25T23:31:21.000Z
|
test/phagesExperiment/runTableCases.py
|
edsaac/VirusTransport_RxSandbox
|
67e191329ef191fc539b290069524b42fbaf7e21
|
[
"MIT"
] | 1
|
2021-09-30T05:00:58.000Z
|
2021-09-30T05:00:58.000Z
|
###############################################################
# _ _ _ _ _
# | |__ (_) ___ _ __ __ _ _ __| |_(_) ___| | ___
# | '_ \| |/ _ \| '_ \ / _` | '__| __| |/ __| |/ _ \
# | |_) | | (_) | |_) | (_| | | | |_| | (__| | __/
# |_.__/|_|\___/| .__/ \__,_|_| \__|_|\___|_|\___|
# |_|
#
###############################################################
#
# $ python3 runTableCases.py [CASES.CSV] [TEMPLATE.IN] -run
#
# Where:
# - [CASES.CSV] path to csv file with the list of
# parameters and the corresponding tags
# - [TEMPLATE.IN] input file template for PFLOTRAN and
# the corresponding tags
# - [shouldRunPFLOTRAN = "-run"]
#
###############################################################
import numpy as np
import matplotlib.pyplot as plt
from pandas import read_csv
from os import system
import sys
## Global variables
ColumnLenght = 50.0
ConcentrationAtInlet = 1.66E-16
## Non-dimensional numbers
def DaII(K,A,U,L=ColumnLenght):
return (L*L*K)/(A*U)
def Peclet(A,L=ColumnLenght):
return L/A
def plotResults(U,pH,IS,PV,kATT,kDET,dAq,dIm,alpha):
FILE = current_folder+"/pflotran-obs-0.tec"
textBoxpH = "pH = {:n}".format(pH)\
+ "\nIS = {:n}".format(IS)
textBoxKin = \
"$k_{\\rm att}$"+" = {:.4f}".format(kATT) + " h$^{-1}$" +"\n" + \
"$k_{\\rm det}$"+" = {:.4f}".format(kDET) + " h$^{-1}$" +"\n" + \
"$\lambda_{\\rm aq}$"+" = {:.4f}".format(dAq)+ " h$^{-1}$" +"\n" + \
"$\lambda_{\\rm im}$"+" = {:.4f}".format(dIm)+ " h$^{-1}$" +"\n" + \
"$\\alpha_{\\rm L}$"+" = {:.4f}".format(alpha)+ " cm "
textBoxDimensionless = "Damköhler(II) = $\\dfrac{\\rm reaction}{\\rm dispersion}$"+"\n" +\
"Da$^{\\rm att}$"+" = {:.1E}".format(DaII(kATT,alpha,U)) +"\n" +\
"Da$^{\\rm det}$"+" = {:.1E}".format(DaII(kDET,alpha,U)) +"\n" +\
"Da$^{\\rm λaq}$"+" = {:.1E}".format(DaII(dAq, alpha,U)) +"\n" +\
"Da$^{\\rm λim}$"+" = {:.1E}".format(DaII(dIm, alpha,U)) +"\n\n" +\
"Péclet = $\\dfrac{\\rm advection}{\\rm dispersion}$"+"\n" +\
"P$_{\\rm é}$"+" = {:.1E}".format(Peclet(alpha))
system("./miscellaneous/PFT2CSV.sh " + FILE)
#system("rm " + current_folder +"/*.out")
ObservationPoint = np.loadtxt(FILE,delimiter=",",skiprows=1)
Cnorm = ObservationPoint[:,1]/ConcentrationAtInlet
TimeInPoreVolumes = ObservationPoint[:,0] * U*24./(ColumnLenght)
Legend=["$\\dfrac{[V_{(aq)}]}{[V_{(aq)}]_0}$"]
plt.figure(figsize=(10,4),facecolor="white")
## Plot log-scale
ax1 = plt.subplot(1,2,1)
ax1.plot(TimeInPoreVolumes,Cnorm,c="purple",lw=3)
ax1.set_yscale("symlog",\
linthresh=1.0E-6,subs=[1,2,3,4,5,6,7,8,9])
ax1.set_ylim([-1.0E-7,1.15])
ax1.set_xlim([0,10])
ax1.set_xlabel("Pore Volume [$-$]",fontsize="large")
ax1.axvline(x=PV,ls="dotted",c="gray",lw=1)
ax1.axhspan(ymin=-1.0E-7,ymax=1.0E-6,facecolor="pink",alpha=0.2)
## Rate values
ax1.text(9.5,5.0E-5,textBoxKin,\
bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5),\
horizontalalignment='right')
## Case pH/IS
ax1.text(9.0,1.0E-1,textBoxpH,\
bbox=dict(boxstyle='round', facecolor='purple', alpha=0.15),\
horizontalalignment='right')
## Plot linear-scale
ax2 = plt.subplot(1,2,2)
ax2.plot(TimeInPoreVolumes,Cnorm,c="purple",lw=3,label=Legend[0])
ax2.set_ylim([-1.0E-2,1.02])
ax2.set_xlim([0,10])
ax2.set_xlabel("Pore Volume [$-$]",fontsize="large")
ax2.axvline(x=PV,ls="dotted",c="gray",lw=1)
ax2.legend(fontsize="large",loc="upper right")
## Péclet and Damköhler numbers
ax2.text(9.5,0.1,textBoxDimensionless,\
bbox=dict(boxstyle='round', facecolor='purple', alpha=0.15),\
horizontalalignment='right')
plt.tight_layout()
FIGPATH = current_folder + "/" + "CASE_" + current_folder[7:10] + ".png"
#plt.show()
plt.savefig(FIGPATH,transparent=False)
## Tags dictionary for variables in input file
tagsReplaceable = {
"Porosity" : "<porosity>",
"DarcyVel" : "<darcyVel>", # q = u*porosity
"CleanTime" : "<elutionTime>", # t @ C0 = 0
"FinalTime" : "<endTime>", # @ 10 pore volumes
"AttachRate": "<katt>",
"DetachRate": "<kdet>",
"DecayAq" : "<decayAq>",
"DecayIm" : "<decayIm>",
"LongDisp" : "<longDisp>"
}
## Tags dictionary for other parameters
tagsAccesory = {
"FlowVel" : "poreWaterVel",
"PoreVol" : "poreVolume",
"pH" : "pH",
"IonicStr" : "IS"
}
## Path to PFLOTRAN executable
PFLOTRAN_path = "$PFLOTRAN_DIR/src/pflotran/pflotran "
## Table with the set of parameters
try:
parameters_file = str(sys.argv[1])
except IndexError:
sys.exit("Parameters file not defined :(")
setParameters = read_csv(parameters_file)
total_rows = setParameters.shape[0]
## Template for the PFLOTRAN input file
try:
template_file = str(sys.argv[2])
except IndexError:
sys.exit("Template file not found :(")
## Run cases?
try:
shouldRunPFLOTRAN = "-run" in str(sys.argv[3])
except IndexError:
shouldRunPFLOTRAN = False
## Delete previous cases
system("rm -rf CASE*")
## Row in the set of parameters table = case to be run
for i in range(total_rows):
#for i in range(1):
## Create a folder for the case
current_folder = "./CASE_" + "{0:03}".format(i+1)
system("mkdir " + current_folder)
## Copy template input file to folder
system("cp " + template_file + " " + current_folder+"/pflotran.in")
current_file = current_folder + "/pflotran.in"
## Replace tags for values in case
for current_tag in tagsReplaceable:
COMM = "sed -i 's/" + tagsReplaceable[current_tag] + "/"\
+'{:.3E}'.format(setParameters.loc[i,tagsReplaceable[current_tag]])\
+ "/g' " + current_file
system(COMM)
## Run PFLOTRAN in that case
if shouldRunPFLOTRAN:
#print(PFLOTRAN_path + "-pflotranin " + current_file)
system(PFLOTRAN_path + "-pflotranin " + current_file)
#system("python3 ./miscellaneous/organizeResults.py " + current_folder + "/pflotran-obs-0.tec -clean")
current_U = setParameters.loc[i,tagsAccesory["FlowVel"]]
current_pH = setParameters.loc[i,tagsAccesory["pH"]]
current_IS = setParameters.loc[i,tagsAccesory["IonicStr"]]
current_PV = setParameters.loc[i,tagsAccesory["PoreVol"]]
#Porosity = setParameters.loc[i,tagsReplaceable["Porosity"]]
#input("Press Enter to continue...")
plotResults(current_U,current_pH,current_IS,current_PV,\
setParameters.loc[i,tagsReplaceable["AttachRate"]],\
setParameters.loc[i,tagsReplaceable["DetachRate"]],\
setParameters.loc[i,tagsReplaceable["DecayAq"]],\
setParameters.loc[i,tagsReplaceable["DecayIm"]],\
setParameters.loc[i,tagsReplaceable["LongDisp"]])
#input("Press Enter to continue...")
system("rm -r pictures ; mkdir pictures")
system("cp CASE**/*.png ./pictures/")
| 34.676923
| 106
| 0.603963
| 831
| 6,762
| 4.761733
| 0.302046
| 0.044478
| 0.047258
| 0.056609
| 0.176649
| 0.115744
| 0.065706
| 0.047511
| 0.047511
| 0.034369
| 0
| 0.025119
| 0.164005
| 6,762
| 195
| 107
| 34.676923
| 0.674863
| 0.213842
| 0
| 0.092437
| 0
| 0
| 0.237549
| 0.018972
| 0.016807
| 0
| 0
| 0
| 0
| 1
| 0.02521
| false
| 0
| 0.042017
| 0.016807
| 0.084034
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
239f83a7c0d314a200223629c25572a463600e23
| 593
|
py
|
Python
|
mongo_list_temp.py
|
ScottStanton/mqtt_temp_mongo_web
|
76d59910f132fea9724b86aebfcef04b61789b8d
|
[
"Unlicense"
] | null | null | null |
mongo_list_temp.py
|
ScottStanton/mqtt_temp_mongo_web
|
76d59910f132fea9724b86aebfcef04b61789b8d
|
[
"Unlicense"
] | null | null | null |
mongo_list_temp.py
|
ScottStanton/mqtt_temp_mongo_web
|
76d59910f132fea9724b86aebfcef04b61789b8d
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
#
# This software is covered by The Unlicense license
#
import os, pymongo, sys
def print_mongo():
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["cpu_temperature"]
mycol = mydb["temps"]
#print(myclient.list_database_names())
for x in mycol.find():
print(x)
myclient.close()
def main():
print_mongo()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 17.969697
| 64
| 0.60371
| 67
| 593
| 5.134328
| 0.686567
| 0.05814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018476
| 0.269815
| 593
| 32
| 65
| 18.53125
| 0.775982
| 0.175379
| 0
| 0.105263
| 0
| 0
| 0.134298
| 0.053719
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.157895
| 0.210526
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23a2a97bb6db12d817c114dd0b13665cae319c12
| 2,185
|
py
|
Python
|
second/pytorch/models/fusion.py
|
RickOnEarth/pointpillars_based_CLOCs
|
c6d4576a151540200dac2354b00dc4ecce6ee72d
|
[
"MIT"
] | 2
|
2022-01-05T08:41:38.000Z
|
2022-02-14T01:30:08.000Z
|
second/pytorch/models/fusion.py
|
RickOnEarth/pointpillars_based_CLOCs
|
c6d4576a151540200dac2354b00dc4ecce6ee72d
|
[
"MIT"
] | 1
|
2022-03-28T03:23:36.000Z
|
2022-03-28T03:23:36.000Z
|
second/pytorch/models/fusion.py
|
RickOnEarth/pointpillars_based_CLOCs
|
c6d4576a151540200dac2354b00dc4ecce6ee72d
|
[
"MIT"
] | 2
|
2022-01-07T05:56:43.000Z
|
2022-02-16T13:26:13.000Z
|
import time
import torch
from torch import nn
from torch.nn import functional as F
#import spconv
import torchplus
from torchplus.nn import Empty, GroupNorm, Sequential
from torchplus.ops.array_ops import gather_nd, scatter_nd
from torchplus.tools import change_default_args
import sys
if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
class fusion(nn.Module):
def __init__(self):
super(fusion, self).__init__()
self._total_time = 0.0
self._total_count = 0
self.name = 'fusion_layer'
self.corner_points_feature = Sequential(
nn.Conv2d(24,48,1),
nn.ReLU(),
nn.Conv2d(48,96,1),
nn.ReLU(),
nn.Conv2d(96,96,1),
nn.ReLU(),
nn.Conv2d(96,4,1),
)
self.fuse_2d_3d = Sequential(
nn.Conv2d(4,18,1),
nn.ReLU(),
nn.Conv2d(18,36,1),
nn.ReLU(),
nn.Conv2d(36,36,1),
nn.ReLU(),
nn.Conv2d(36,1,1),
)
self.maxpool = Sequential(
nn.MaxPool2d([200,1],1),
)
def forward(self,input_1,tensor_index):
torch.cuda.synchronize()
t1 = time.time()
flag = -1
if tensor_index[0,0] == -1: #tensor_index[0,0]=0
out_1 = torch.zeros(1,200,107136,dtype = input_1.dtype,device = input_1.device)
out_1[:,:,:] = -9999999
flag = 0
else:
x = self.fuse_2d_3d(input_1) #input例:[1, 4, 1, 193283],4 channel,1*193283
out_1 = torch.zeros(1,200,107136,dtype = input_1.dtype,device = input_1.device)
out_1[:,:,:] = -9999999
out_1[:,tensor_index[:,0],tensor_index[:,1]] = x[0,:,0,:]
flag = 1
x = self.maxpool(out_1)
#x, _ = torch.max(out_1,1)
x = x.squeeze().reshape(1,-1,1)
torch.cuda.synchronize()
self._total_time += time.time() - t1
self._total_count += 1 #batch size = 1
#print("avg fusion time:", self._total_time/self._total_count*1000)
return x, flag
| 33.106061
| 92
| 0.556522
| 304
| 2,185
| 3.832237
| 0.305921
| 0.054936
| 0.036052
| 0.046352
| 0.271245
| 0.245494
| 0.245494
| 0.180258
| 0.118455
| 0.118455
| 0
| 0.099277
| 0.30389
| 2,185
| 65
| 93
| 33.615385
| 0.666667
| 0.08238
| 0
| 0.206897
| 0
| 0
| 0.050025
| 0.044022
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.155172
| 0
| 0.224138
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23a456677b9384e5a17f6de8dcdc1e93e2a745f9
| 3,001
|
py
|
Python
|
pdf_lines_gluer.py
|
serge-sotnyk/pdf-lines-gluer
|
b44284a28e4bce377d683ab8d6f820e704c630cb
|
[
"MIT"
] | 1
|
2021-04-16T13:05:20.000Z
|
2021-04-16T13:05:20.000Z
|
pdf_lines_gluer.py
|
serge-sotnyk/pdf-lines-gluer
|
b44284a28e4bce377d683ab8d6f820e704c630cb
|
[
"MIT"
] | null | null | null |
pdf_lines_gluer.py
|
serge-sotnyk/pdf-lines-gluer
|
b44284a28e4bce377d683ab8d6f820e704c630cb
|
[
"MIT"
] | 2
|
2019-06-24T06:45:46.000Z
|
2019-06-28T19:43:20.000Z
|
import string
from typing import List, Dict
# inject code here #
def _mean_in_window(lines, i) -> float:
start = max(i - 5, 0)
finish = min(i + 5, len(lines) - 1)
sm, count = 0, 0
for n in range(start, finish):
sm += len(lines[n]) - 1 # minus one-char prefix
count += 1
return sm / max(count, 1)
def _last_char(line: str) -> str:
return ' ' if len(line) < 1 else line[-1]
def _last_char_features(l_char: str) -> Dict[str, object]:
res = {
'isalpha': l_char.isalpha(),
'isdigit': l_char.isdigit(),
'islower': l_char.islower(),
'punct': l_char if l_char in string.punctuation else ' ',
}
return res
def _first_chars(line: str) -> str:
if len(line) < 1:
chars = ' '
elif len(line) < 2:
chars = line[0]
else:
chars = line[:2]
res = []
for c in chars:
if c.isdigit():
res.append('0')
elif c.isalpha():
res.append('a' if c.islower() else 'A')
else:
res.append(c)
return ''.join(res)
def _line_to_features(line: str, i: int, lines: List[str], annotated: bool) -> Dict[str, object]:
features = {}
this_len = len(line)
mean_len = _mean_in_window(lines, i)
if i > 0:
prev_len = len(lines[i-1]) - (1 if annotated else 0)
l_char = _last_char(lines[i-1])
else:
prev_len = 0
l_char = ' '
features.update(
{
'this_len': this_len,
'mean_len': mean_len,
'prev_len': prev_len,
'first_chars': _first_chars(line),
})
features.update(_last_char_features(l_char))
return features
def _featurize_text_with_annotation(text: str) -> (List[object], List[bool]):
lines = text.strip().splitlines()
x, y = [], []
for i, line in enumerate(lines):
y.append(line[0] == '+') # True, if line should be glued with previous
line = line[1:]
x.append(_line_to_features(line, i, lines, True))
return x, y
_HYPHEN_CHARS = {
'\u002D', # HYPHEN-MINUS
'\u00AD', # SOFT HYPHEN
'\u2010', # HYPHEN
'\u2011', # NON-BREAKING HYPHEN
}
def _preprocess_pdf(text: str, clf, v) -> str:
lines = [s.strip() for s in text.strip().splitlines()]
x = []
for i, line in enumerate(lines):
x.append(_line_to_features(line, i, lines, False))
if not x:
return ''
x_features = v.transform(x)
y_pred = clf.predict(x_features)
corrected_acc = []
for i, line in enumerate(lines):
line = line.strip()
if i == 0 or not y_pred[i]:
corrected_acc.append(line)
else:
prev_line = corrected_acc[-1]
if prev_line != '' and prev_line[-1] in _HYPHEN_CHARS:
corrected_acc[-1] = prev_line[:-1]
else:
corrected_acc[-1] += ' '
corrected_acc[-1] += line
corrected = '\n'.join(corrected_acc)
return corrected
| 26.557522
| 97
| 0.55115
| 410
| 3,001
| 3.853659
| 0.241463
| 0.028481
| 0.032911
| 0.034177
| 0.134177
| 0.08481
| 0.039241
| 0.039241
| 0
| 0
| 0
| 0.021184
| 0.307897
| 3,001
| 112
| 98
| 26.794643
| 0.739528
| 0.044985
| 0
| 0.086957
| 0
| 0
| 0.033602
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076087
| false
| 0
| 0.021739
| 0.01087
| 0.184783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23a5398ab784fc5aa194816a75732cc159a8849f
| 1,241
|
py
|
Python
|
backend/thing/urls.py
|
thuong-lino/thing
|
e45d8f197896f4ab9b52dec0a85169396fff629a
|
[
"MIT"
] | null | null | null |
backend/thing/urls.py
|
thuong-lino/thing
|
e45d8f197896f4ab9b52dec0a85169396fff629a
|
[
"MIT"
] | null | null | null |
backend/thing/urls.py
|
thuong-lino/thing
|
e45d8f197896f4ab9b52dec0a85169396fff629a
|
[
"MIT"
] | null | null | null |
from django.conf.urls import include
from django.urls import path
from django.contrib import admin
from users.views import FacebookLogin
import django_js_reverse.views
from rest_framework.routers import DefaultRouter
from common.routes import routes as common_routes
router = DefaultRouter()
routes = common_routes
for route in routes:
router.register(route['regex'], route['viewset'],
basename=route['basename'])
urlpatterns = [
path("", include("common.urls"), name="common"),
path("assignments/", include("assignments.urls"), name='assignments'),
path('api-auth/', include('rest_framework.urls')),
path('rest-auth/', include('rest_auth.urls')),
path('rest-auth/registration/', include('rest_auth.registration.urls')),
path('rest-auth/facebook/', FacebookLogin.as_view(), name='fb_login'),
path("admin/", admin.site.urls, name="admin"),
path("jsreverse/", django_js_reverse.views.urls_js, name="js_reverse"),
path("api/", include(router.urls), name="api"),
path("api/assignments/", include("assignments.api.assignment.urls")),
path("api/grade-assignment/", include("assignments.api.graded-assignment.urls")),
path("api/", include("users.urls"), name="user"),
]
| 37.606061
| 85
| 0.706688
| 155
| 1,241
| 5.567742
| 0.290323
| 0.04635
| 0.041715
| 0.05562
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124093
| 1,241
| 32
| 86
| 38.78125
| 0.793928
| 0
| 0
| 0
| 0
| 0
| 0.295729
| 0.112812
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.269231
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23a5e45f9981098530b74e9239812e4a0d27fb21
| 7,302
|
py
|
Python
|
core/dataset/data_loader.py
|
thuzhaowang/idn-solver
|
7da29ce0b0bd7e76023e1cae56e3d186b324a394
|
[
"MIT"
] | 22
|
2021-10-11T02:31:52.000Z
|
2022-02-23T08:06:14.000Z
|
core/dataset/data_loader.py
|
xubin1994/idn-solver
|
6b5dcfd94f35cc118c5dee0f98401e4848e670e3
|
[
"MIT"
] | 4
|
2021-12-02T02:36:30.000Z
|
2022-03-16T01:04:47.000Z
|
core/dataset/data_loader.py
|
xubin1994/idn-solver
|
6b5dcfd94f35cc118c5dee0f98401e4848e670e3
|
[
"MIT"
] | 4
|
2022-01-20T03:12:23.000Z
|
2022-03-16T00:08:54.000Z
|
import numpy as np
from path import Path
import random
import pickle
import torch
import os
import cv2
def load_as_float(path):
"""Loads image"""
im = cv2.imread(path)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB).astype(np.float32)
return im
class SequenceFolder(torch.utils.data.Dataset):
"""Creates a pickle file for ScanNet scene loading, and corresponding dataloader"""
def __init__(self, root, ttype, seed=None, seq_length=3, seq_gap=20, transform=None):
np.random.seed(seed)
random.seed(seed)
self.root = Path(root)
scene_list_path = ttype
self.scene_list_path = scene_list_path[:-4]
fold_root = 'scans_test_sample' if 'test' in ttype else 'scannet_nas'
#fold_root = 'scannet_nas'
scenes = [self.root/fold_root/folder[:-1] for folder in open(scene_list_path)]
self.ttype = ttype
self.scenes = sorted(scenes)
self.seq_gap = seq_gap
self.seq_length = seq_length
self.transform = transform
file_pickle = self.scene_list_path+ '_len_'+str(self.seq_length)+ '_gap_'+str(self.seq_gap)+'.pickle'
if os.path.exists(file_pickle):
with open(file_pickle, 'rb') as handle:
sequence_set = pickle.load(handle)
self.samples = sequence_set
else:
self.crawl_folders()
def crawl_folders(self):
sequence_set = []
isc = 0
cnt = 0
for scene in self.scenes:
#print(isc, len(self.scenes))
isc += 1
frames = os.listdir(os.path.join(scene, "color"))
frames = [int(os.path.splitext(frame)[0]) for frame in frames]
frames = sorted(frames)
intrinsics = np.genfromtxt(os.path.join(scene, "intrinsic", "intrinsic_depth.txt")).astype(np.float32).reshape((4, 4))[:3,:3]
# The index from scannet nas is already sampled
if len(frames) < (self.seq_gap // 20) * self.seq_length:
continue
cnt += len(frames)
end_idx = len(frames) * 20
path_split = scene.split('/')
for i in range(len(frames)):
idx = frames[i]
img = os.path.join(scene, "color", "%04d.jpg" % idx)
if 'test' in self.ttype:
depth = os.path.join(scene, "depth", "%04d.png" % idx)
# do not require normal when test
normal = ""
else:
depth = os.path.join(scene, "depth", "%04d.npy" % idx)
normal = os.path.join(scene, "normal", "%04d_normal.npy" % idx)
pose_tgt = np.loadtxt(os.path.join(scene, "pose", "%04d.txt" % idx))
do_nan_tgt = False
nan_pose_tgt = np.sum(np.isnan(pose_tgt) | np.isinf(pose_tgt))
if nan_pose_tgt>0:
do_nan_tgt = True
sample = {'intrinsics': intrinsics, 'tgt': img, 'tgt_depth': depth, 'tgt_normal': normal, 'ref_depths': [], 'ref_imgs': [], 'ref_poses': [], 'path': []}
sample['path'] = os.path.join(scene , img[:-4])
if idx < self.seq_gap:
shifts = list(range(idx,idx+(self.seq_length-1)*self.seq_gap+1,self.seq_gap))
shifts.remove(idx) #.pop(i)
elif idx >= end_idx - self.seq_gap:
shifts = list(range(idx,end_idx,self.seq_gap))
shifts = list(range(idx-(self.seq_length-1)*self.seq_gap,idx+1,self.seq_gap))
shifts.remove(idx)
else:
if self.seq_length%2 == 1:
demi_length = self.seq_length//2
if (idx>=demi_length*self.seq_gap) and (idx<end_idx- demi_length*self.seq_gap):
shifts = list(range(idx- (demi_length)*self.seq_gap, idx+(demi_length)*self.seq_gap+1,self.seq_gap))
elif idx<demi_length*self.seq_gap:
diff_demi = (demi_length-idx//self.seq_gap)
shifts = list(range(idx- (demi_length-diff_demi)*self.seq_gap, idx+(demi_length+diff_demi)*self.seq_gap+1,self.seq_gap))
elif idx>=end_idx- demi_length*self.seq_gap:
diff_demi = (demi_length-(end_idx-idx-1)//self.seq_gap)
shifts = list(range(idx- (demi_length+diff_demi)*self.seq_gap, idx+(demi_length-diff_demi)*self.seq_gap+1,self.seq_gap))
else:
print('Error')
shifts.remove(idx)
else:
#2 scenarios
demi_length = self.seq_length//2
if (idx >= demi_length*self.seq_gap) and (idx < end_idx- demi_length*self.seq_gap):
shifts = list(range(idx - demi_length*self.seq_gap, idx + (demi_length-1)*self.seq_gap+1, self.seq_gap))
elif idx < demi_length*self.seq_gap:
diff_demi = (demi_length-idx//self.seq_gap)
shifts = list(range(idx- (demi_length-diff_demi)*self.seq_gap, idx+(demi_length+diff_demi-1)*self.seq_gap+1,self.seq_gap))
elif idx>=end_idx- demi_length*self.seq_gap:
diff_demi = (demi_length-(end_idx-idx-1)//self.seq_gap)
shifts = list(range(idx- (demi_length+diff_demi-1)*self.seq_gap, idx+(demi_length-diff_demi)*self.seq_gap+1,self.seq_gap))
else:
print('Error')
shifts.remove(idx)
do_nan = False
try:
for j in shifts:
pose_src = np.loadtxt(os.path.join(scene, "pose", "%04d.txt" % j))
pose_rel = np.linalg.inv(pose_src) @ pose_tgt
pose = pose_rel[:3,:].reshape((1,3,4)).astype(np.float32)
sample['ref_poses'].append(pose)
sample['ref_imgs'].append(os.path.join(scene, "color", "%04d.jpg" % j))
if 'test' in self.ttype:
sample['ref_depths'].append(os.path.join(scene, "depth", "%04d.png" % j))
else:
sample['ref_depths'].append(os.path.join(scene, "depth", "%04d.npy" % j))
nan_pose = np.sum(np.isnan(pose)) + np.sum(np.isinf(pose))
if nan_pose>0:
do_nan = True
if not do_nan_tgt and not do_nan:
sequence_set.append(sample)
except:
continue
file_pickle = self.scene_list_path+ '_len_'+str(self.seq_length)+ '_gap_'+str(self.seq_gap)+'.pickle'
with open(file_pickle, 'wb') as handle:
pickle.dump(sequence_set, handle, protocol=pickle.HIGHEST_PROTOCOL)
self.samples = sequence_set
def __getitem__(self, index):
sample = self.samples[index]
tgt_img = load_as_float(sample['tgt'])
if 'test' in self.ttype:
tgt_depth = cv2.imread(sample['tgt_depth'],-1).astype(np.float32) / 1000.0
tgt_normal = np.tile(np.expand_dims(np.ones_like(tgt_depth), -1), (1,1,3))
else:
tgt_depth = np.load(sample['tgt_depth']).astype(np.float32) / 1000.0
tgt_normal = np.load(sample['tgt_normal']).astype(np.float32)
tgt_normal = 1.0 - tgt_normal * 2.0 # [-1, 1]
tgt_normal[:,:,2] = np.abs(tgt_normal[:,:,2]) * -1.0
ref_poses = sample['ref_poses']
ref_imgs = [load_as_float(ref_img) for ref_img in sample['ref_imgs']]
if 'test' in self.ttype:
ref_depths = [cv2.imread(depth_img,-1).astype(np.float32)/1000.0 for depth_img in sample['ref_depths']]
else:
ref_depths = [np.load(depth_img).astype(np.float32)/1000.0 for depth_img in sample['ref_depths']]
if self.transform is not None:
imgs, depths, normals, intrinsics = self.transform([tgt_img] + ref_imgs, [tgt_depth] + ref_depths, [tgt_normal], np.copy(sample['intrinsics']))
tgt_img = imgs[0]
tgt_depth = depths[0]
tgt_normal = normals[0]
ref_imgs = imgs[1:]
ref_depths = depths[1:]
else:
intrinsics = np.copy(sample['intrinsics'])
intrinsics_inv = np.linalg.inv(intrinsics)
return tgt_img, ref_imgs, tgt_normal, ref_poses, intrinsics, intrinsics_inv, tgt_depth, ref_depths
def __len__(self):
return len(self.samples)
| 38.840426
| 161
| 0.65037
| 1,129
| 7,302
| 3.991143
| 0.139947
| 0.077674
| 0.09099
| 0.036618
| 0.43431
| 0.389925
| 0.388815
| 0.353085
| 0.3249
| 0.303373
| 0
| 0.021527
| 0.198439
| 7,302
| 187
| 162
| 39.048128
| 0.748334
| 0.033826
| 0
| 0.253333
| 0
| 0
| 0.064954
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.046667
| 0.006667
| 0.106667
| 0.013333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23a7e7e53ed3f920173ee73d17e3e8afad1d765f
| 3,813
|
py
|
Python
|
glue.py
|
mkechagia/android-survey
|
a1649c0fb9476fcc9fdf586ecde9da9a9a0138aa
|
[
"Apache-2.0"
] | 1
|
2022-01-26T08:14:24.000Z
|
2022-01-26T08:14:24.000Z
|
glue.py
|
mkechagia/android-survey-tool
|
a1649c0fb9476fcc9fdf586ecde9da9a9a0138aa
|
[
"Apache-2.0"
] | null | null | null |
glue.py
|
mkechagia/android-survey-tool
|
a1649c0fb9476fcc9fdf586ecde9da9a9a0138aa
|
[
"Apache-2.0"
] | null | null | null |
import re
import copy
from collections import defaultdict
from string import Template
# initialize the dictionary for the methods with checked exceptions such as {fake method: real method}
method_dict_checked = {'deleteRecord' : 'delete', \
'editText' : 'setText_new', \
'insertData' : 'insert_new', \
'setLayout' : 'setContentView_new', \
'findViewId' : 'findViewById_new', \
'changeTextColor' : 'setTextColor_new', \
'getCursorString' : 'getString', \
'queryData' : 'query_new', \
'updateRecord' : 'update', \
'drawTxt' : 'drawText_new'}
# initialize the dictionary for the methods with unchecked exceptions such as {fake method: real method}
method_dict_unchecked = {'deleteRecord' : 'delete', \
'editText' : 'setText', \
'insertData' : 'insert', \
'setLayout' : 'setContentView', \
'findViewId' : 'findViewById', \
'changeTextColor' : 'setTextColor', \
'getCursorString' : 'getString', \
'queryData' : 'query', \
'updateRecord' : 'update', \
'drawTxt' : 'drawText'}
# answer_block is a dict of user's answers,
# i.e. answer_block = {'answer_1' : fake_answer}
# survey type refers to the different surveys
# (methods with checked exceptions Vs. methods with unchecked exceptions--documented and undocumented)
def glue_answer(filepath, answers, survey_type, email):
method_dict = set_dict(survey_type)
# open the file
filein = open(filepath)
# read it
src = Template(filein.read())
result = src.substitute(answers)
with open('static/%s-NoteEditor.java' % (email), 'w') as f:
f.write("%s" % result)
# dictionary for answers with real Android's API methods
real_answers = bind_method(answers, method_dict)
#do the substitution
result = src.substitute(real_answers)
return result
# Bind the answers' methods to the real Android's API methods
# answers is a dict, i.e. answers = {'answer_1' : fake_answer}
# This function returns a dict of answers with real Android's
# API methods, i.e. real_answers = {'answer_1' : real_answer}
def bind_method(answers, method_dict):
real_answers = {}
a_keys = list(answers.keys())
m_keys = list(method_dict.keys())
# for each user answer
for k, l in enumerate(a_keys):
# get the value of the answer
an = answers.get(a_keys[k])
# for each fake method
for m, n in enumerate(m_keys):
# search for fake method in the answer
fake = m_keys[m]
if (re.search(fake, an)):
#print ("find fake :" + fake)
# get real method
real = method_dict.get(fake)
if (a_keys[k] not in list(real_answers.keys())):
real_answers[a_keys[k]] = re.sub(fake+'\(', real+'(', an)
break
# check if finally there exists fake method in user's answer
for d, f in enumerate(a_keys):
if (a_keys[d] not in list(real_answers.keys())):
real_answers[a_keys[d]] = answers.get(a_keys[d])
return real_answers
def replace_methods(compiler_output, survey_type):
method_dict = set_dict(survey_type)
for fake, real in method_dict.items():
#compiler_output = compiler_output.replace(fake, real)
compiler_output = re.sub(real, fake, compiler_output)
if re.search("\bsetTextColor\b\(\bcolors\b\)", compiler_output):
compiler_output = re.sub("\bsetTextColor\b\(\bcolors\b\)", "changeTextColor(colors)", replace_output)
# check for line numbers
#comp_output = remove_line_numbers(compiler_output)
return compiler_output
# dict depending on the survey type
def set_dict(survey_type):
if (survey_type == 'unchecked'):
return method_dict_unchecked
elif (survey_type == 'checked'):
return method_dict_checked
# replace line numbers with spaces
def remove_line_numbers(output):
out = ''
#.java:118
print ("Here is the output.")
print (output)
#if re.seach('.java:/d+', output):
# print ("OKK")
out = re.sub(':[0-9]+', '', output)
return out
# vim: tabstop=8 noexpandtab shiftwidth=8 softtabstop=0
| 34.981651
| 104
| 0.710464
| 528
| 3,813
| 4.979167
| 0.276515
| 0.041841
| 0.018258
| 0.019399
| 0.170407
| 0.141499
| 0.120959
| 0.065424
| 0.065424
| 0.03043
| 0
| 0.003427
| 0.158143
| 3,813
| 108
| 105
| 35.305556
| 0.815576
| 0.337792
| 0
| 0.086957
| 0
| 0
| 0.227747
| 0.043304
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072464
| false
| 0
| 0.057971
| 0
| 0.217391
| 0.028986
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23abc12980cb0a7128b692a9097ad4b745fb655b
| 756
|
py
|
Python
|
python/torch_helpers/trace2jit.py
|
zhaohb/Forward
|
08c7622090ce0cdd32fe5d0b462cb63258ce0a75
|
[
"BSD-3-Clause"
] | 1
|
2021-03-24T11:49:35.000Z
|
2021-03-24T11:49:35.000Z
|
python/torch_helpers/trace2jit.py
|
zhaohb/Forward
|
08c7622090ce0cdd32fe5d0b462cb63258ce0a75
|
[
"BSD-3-Clause"
] | null | null | null |
python/torch_helpers/trace2jit.py
|
zhaohb/Forward
|
08c7622090ce0cdd32fe5d0b462cb63258ce0a75
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
import torchvision.models as models
'''
Description:
convert torch module to JIT TracedModule.
功能说明:
将torch 模型转化为 JIT TracedModule。
'''
def TracedModelFactory(file_name, traced_model):
traced_model.save(file_name)
traced_model = torch.jit.load(file_name)
print("filename : ", file_name)
print(traced_model.graph)
if __name__ == "__main__":
dummy_input = torch.randn(1, 3, 224, 224) # dummy_input is customized by user
model = models.resnet18(pretrained=True) # model is customized by user
model = model.cpu().eval()
traced_model = torch.jit.trace(model, dummy_input)
model_name = 'model_name' # model_name is customized by user
TracedModelFactory(model_name + '.pth', traced_model)
| 28
| 81
| 0.718254
| 100
| 756
| 5.18
| 0.44
| 0.127413
| 0.081081
| 0.104247
| 0.088803
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016181
| 0.18254
| 756
| 26
| 82
| 29.076923
| 0.822006
| 0.124339
| 0
| 0
| 0
| 0
| 0.059891
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.214286
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23ad1135866d4f8277494a12a0ed3be2f1311aa3
| 9,739
|
py
|
Python
|
CppSimShared/Python/cppsimdata.py
|
silicon-vlsi-org/eda-sue2Plus
|
83a2afa9c80308d5afe07a3fa0214d8412addb6d
|
[
"MIT"
] | 1
|
2021-05-30T13:27:33.000Z
|
2021-05-30T13:27:33.000Z
|
CppSimShared/Python/cppsimdata.py
|
silicon-vlsi-org/eda-sue2Plus
|
83a2afa9c80308d5afe07a3fa0214d8412addb6d
|
[
"MIT"
] | null | null | null |
CppSimShared/Python/cppsimdata.py
|
silicon-vlsi-org/eda-sue2Plus
|
83a2afa9c80308d5afe07a3fa0214d8412addb6d
|
[
"MIT"
] | null | null | null |
# cppsimdata.py
# written by Michael H. Perrott
# with minor modifications by Doug Pastorello to work with both Python 2.7 and Python 3.4
# available at www.cppsim.com as part of the CppSim package
# Copyright (c) 2013-2017 by Michael H. Perrott
# This file is disributed under the MIT license (see Copying file)
import ctypes as ct
import numpy as np
import sys
import os
import platform
import subprocess as sp
import contextlib
from scipy.signal import lfilter,welch
class CPPSIM_STORAGE_INFO(ct.Structure):
_fields_ = [
('filename',ct.c_char_p),
('num_sigs',ct.c_int),
('num_samples',ct.c_int)
]
class CppSimData(object):
def __init__(self, filename=None):
if filename != None:
self.storage_info = CPPSIM_STORAGE_INFO(filename.encode('UTF-8'),0,0)
else:
self.storage_info = CPPSIM_STORAGE_INFO('None'.encode('UTF-8'),0,0)
self.err_msg = ct.create_string_buffer(1000)
self.cur_sig_name = ct.create_string_buffer(1000)
if sys.platform == 'darwin':
home_dir = os.getenv("HOME")
arch_val = platform.architecture()[0]
cppsimdata_lib_file = home_dir + '/CppSim/CppSimShared/Python/macosx/cppsimdata_lib.so'
elif sys.platform == 'win32':
cppsimsharedhome = os.getenv("CPPSIMSHAREDHOME")
if sys.maxsize == 2147483647:
cppsimdata_lib_file = cppsimsharedhome + '/Python/win32/cppsimdata_lib.dll'
else:
cppsimdata_lib_file = cppsimsharedhome + '/Python/win64/cppsimdata_lib.dll'
else:
cppsimsharedhome = os.getenv("CPPSIMSHAREDHOME")
arch_val = platform.architecture()[0]
if arch_val == '64bit':
cppsimdata_lib_file = cppsimsharedhome + '/Python/glnxa64/cppsimdata_lib.so'
else:
cppsimdata_lib_file = cppsimsharedhome + '/Python/glnx86/cppsimdata_lib.so'
self.cppsimdata_lib = ct.CDLL(cppsimdata_lib_file)
self.cppsimdata_lib.loadsig.argtypes = [ct.POINTER(CPPSIM_STORAGE_INFO), ct.c_char_p]
self.cppsimdata_lib.lssig.argtypes = [ct.POINTER(CPPSIM_STORAGE_INFO), ct.c_char_p, ct.c_char_p]
self.cppsimdata_lib.evalsig.argtypes = [ct.POINTER(CPPSIM_STORAGE_INFO), ct.c_char_p, ct.POINTER(ct.c_double), ct.c_char_p]
self.cppsimdata_lib.initialize()
if filename != None:
error_flag = self.cppsimdata_lib.loadsig(ct.byref(self.storage_info),self.err_msg)
if error_flag == 1:
print(self.err_msg.value.decode('UTF-8'))
sys.exit()
def __repr__(self):
return "File: '%s', num_samples = %d, num_sigs = %d"%(self.storage_info.filename, self.storage_info.num_samples, self.storage_info.num_sigs)
def loadsig(self,filename):
self.storage_info.filename = filename
error_flag = self.cppsimdata_lib.loadsig(ct.byref(self.storage_info),self.err_msg)
if error_flag == 1:
print(self.err_msg.value.decode('UTF-8'))
sys.exit()
def get_num_samples(self):
return self.storage_info.num_samples
def get_num_sigs(self):
return self.storage_info.num_sigs
def get_filename(self):
return self.storage_info.filename
def lssig(self,print_str_flag=None):
sig_list = []
self.cppsimdata_lib.reset_cur_sig_count()
for i in range(self.storage_info.num_sigs):
error_flag = self.cppsimdata_lib.lssig(ct.byref(self.storage_info),self.cur_sig_name, self.err_msg)
if error_flag == 1:
print(self.err_msg.value.decode('UTF-8'))
sys.exit()
if print_str_flag == 'print':
print('%d: %s' % (i,self.cur_sig_name.value.decode('UTF-8')))
sig_list.append(self.cur_sig_name.value.decode('UTF-8'))
return sig_list
def evalsig(self,sig_name):
# If the signal name is a string, convert it to a byte array for the interface
if (type(sig_name) is str):
sig_name = str.encode(sig_name)
sig_data = np.zeros(self.storage_info.num_samples)
error_flag = self.cppsimdata_lib.evalsig(ct.byref(self.storage_info), sig_name, sig_data.ctypes.data_as(ct.POINTER(ct.c_double)),self.err_msg)
if error_flag == 1:
print(self.err_msg.value.decode('UTF-8'))
sys.exit()
return sig_data
def cppsim_unbuffer_for_print(status, stream='stdout'):
newline_chars = ['\r', '\n', '\r\n']
stream = getattr(status, stream)
with contextlib.closing(stream):
while True:
out = []
last = stream.read(1)
if last == '' and status.poll() is not None:
break
while last not in newline_chars:
if last == '' and status.poll() is not None:
break
out.append(last)
last = stream.read(1)
out = ''.join(out)
yield out
def cppsim(sim_file="test.par"):
if sim_file.find('.par') < 0:
sim_file = sim_file + '.par'
cppsim_home = os.getenv('CppSimHome')
if cppsim_home == None:
cppsim_home = os.getenv('CPPSIMHOME')
if cppsim_home == None:
home = os.getenv('HOME')
if sys.platform == 'win32':
default_cppsim_home = "%s\\CppSim" % (home)
else:
default_cppsim_home = "%s/CppSim" % (home)
if os.path.isdir(default_cppsim_home):
cppsim_home = default_cppsim_home
else:
print('Error running cppsim from Python: environment variable')
print(' CPPSIMHOME is undefined')
cppsimshared_home = os.getenv('CppSimSharedHome')
if cppsimshared_home == None:
cppsimshared_home = os.getenv('CPPSIMSHAREDHOME')
if cppsimshared_home == None:
if sys.platform == 'win32':
default_cppsimshared_home = "%s\\CppSimShared" % (cppsim_home)
else:
default_cppsimshared_home = "%s/CppSimShared" % (cppsim_home)
if os.path.isdir(default_cppsimshared_home):
cppsimshared_home = default_cppsimshared_home
else:
print('Error running cppsim: environment variable')
print(' CPPSIMSHAREDHOME is undefined')
# print('cppsimhome: %s' % cppsim_home)
# print('cppsimsharedhome: %s' % cppsimshared_home)
cur_dir = os.getcwd()
if sys.platform == 'win32':
i = cur_dir.lower().find('\\simruns\\')
else:
i = cur_dir.lower().find('/simruns/')
if i < 0:
print('Error running cppsim: you need to run this Python script')
print(' in a directory of form:')
if sys.platform == 'win32':
print(' .....\\SimRuns\\Library_name\\Module_name')
else:
print(' ...../SimRuns/Library_name/Module_name')
print(' -> in this case, you ran in directory:')
print(' %s' % cur_dir)
sys.exit()
library_cell = cur_dir[i+9:1000]
if sys.platform == 'win32':
i = library_cell.find('\\')
else:
i = library_cell.find('/')
if i < 0:
print('Error running cppsim: you need to run this Python script')
print(' in a directory of form:')
print(' ...../SimRuns/Library_name/Module_name')
print(' -> in this case, you ran in directory:')
print(' %s' % cur_dir)
sys.exit()
library_name = library_cell[0:i]
cell_name = library_cell[i+1:1000]
print("Running CppSim on module '%s' (Lib:'%s'):" % (cell_name, library_name))
print("\n... netlisting ...\n")
if sys.platform == 'win32':
rp_base = '%s/Sue2/bin/win32/sue_cppsim_netlister' % (cppsimshared_home)
else:
rp_base = '%s/Sue2/bin/sue_cppsim_netlister' % (cppsimshared_home)
rp_arg1 = cell_name
rp_arg2 = '%s/Sue2/sue.lib' % (cppsim_home)
rp_arg3 = '%s/Netlist/netlist.cppsim' % (cppsim_home)
rp = [rp_base, rp_arg1, rp_arg2, rp_arg3]
status = sp.Popen(rp, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=True)
for line in cppsim_unbuffer_for_print(status):
print(line)
if status.returncode != 0:
print('************** ERROR: exited CppSim run prematurely! ****************')
sys.exit()
print('\n... running net2code ...\n')
if sys.platform == 'win32':
rp_base = '%s/bin/win32/net2code' % (cppsimshared_home)
else:
rp_base = '%s/bin/net2code' % (cppsimshared_home)
rp_arg1 = '-cpp'
rp_arg2 = sim_file
rp = [rp_base, rp_arg1, rp_arg2]
status = sp.Popen(rp, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=True)
for line in cppsim_unbuffer_for_print(status):
print(line)
if status.returncode != 0:
print('************** ERROR: exited CppSim run prematurely! ****************')
sys.exit()
print('... compiling ...\n')
if sys.platform == 'win32':
rp_base = '%s/msys/bin/make' % (cppsimshared_home)
else:
rp_base = 'make'
rp = [rp_base]
status = sp.Popen(rp, stdout=sp.PIPE, stderr=sp.STDOUT, universal_newlines=True)
for line in cppsim_unbuffer_for_print(status):
print(line)
if status.returncode != 0:
print('************** ERROR: exited CppSim run prematurely! ****************')
sys.exit()
# calculate phase noise: returns frequency (Hz) and specral density (dBc/Hz)
def calc_pll_phasenoise(noiseout,Ts):
num_segments = 20;
window_length = np.floor(noiseout.size/num_segments)
Kv = 1.0
phase = lfilter([Ts*2*np.pi*Kv],[1,-1],noiseout-np.mean(noiseout))
# calculate L(f)
f, Pxx = welch(phase,1/Ts,'hanning',2**16, None, None, 'constant', False, 'density',-1)
# In Matlab:
# [Pxx,f] = pwelch(phase,window_length,[],[],1/Ts,'twosided');
# [Pxx,f] = psd(sqrt(Ts)*phase,2^16,1/Ts,2^16,'mean');
Pxx_db = 10.0*np.log10(Pxx)
return f, Pxx_db
| 38.800797
| 149
| 0.629736
| 1,305
| 9,739
| 4.500383
| 0.201533
| 0.039333
| 0.038311
| 0.024519
| 0.530734
| 0.426698
| 0.340712
| 0.298655
| 0.275498
| 0.228163
| 0
| 0.018712
| 0.231749
| 9,739
| 250
| 150
| 38.956
| 0.766239
| 0.069822
| 0
| 0.401914
| 0
| 0
| 0.177355
| 0.045776
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.038278
| 0.019139
| 0.138756
| 0.172249
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23b3590bb9d68aac5032da0773011d5e1741a6b6
| 5,977
|
py
|
Python
|
notify/handlers.py
|
marzocchi/iterm-notify
|
5e587213ca89c0361a39c785fa4560fda275052f
|
[
"MIT"
] | 28
|
2019-12-01T21:45:28.000Z
|
2021-05-05T17:46:09.000Z
|
notify/handlers.py
|
marzocchi/iterm-notify
|
5e587213ca89c0361a39c785fa4560fda275052f
|
[
"MIT"
] | null | null | null |
notify/handlers.py
|
marzocchi/iterm-notify
|
5e587213ca89c0361a39c785fa4560fda275052f
|
[
"MIT"
] | 2
|
2020-08-04T12:55:04.000Z
|
2020-12-20T22:23:47.000Z
|
import logging
from datetime import datetime
from typing import List
from notify.backends import BackendFactory
from notify.commands import Command
from notify.config import Config, Stack
from notify.notifications import Factory, Notification
from notify.strategies import StrategyFactory
class MaintainConfig:
def __init__(self, stack: Stack,
success_template: Notification,
failure_template: Notification,
logger: logging.Logger):
self.__success_template = success_template
self.__failure_template = failure_template
self.__logger = logger
self.__configuration_stack = stack
self.__configuration_stack.on_pop += self.__apply_on_pop
self.__apply_config(self.__configuration_stack.current)
def __apply_config(self, cfg: Config):
self.notifications_backend_handler(cfg.notifications_backend.name, *cfg.notifications_backend.args)
self.success_title_handler(cfg.success_title)
self.success_message_handler(cfg.success_message)
self.success_icon_handler(cfg.success_icon)
self.success_sound_handler(cfg.success_sound)
self.failure_title_handler(cfg.failure_title)
self.failure_message_handler(cfg.failure_message)
self.failure_icon_handler(cfg.failure_icon)
self.failure_sound_handler(cfg.failure_sound)
self.command_complete_timeout_handler(*cfg.notifications_strategy.args)
self.logging_name_handler(cfg.logger_name)
self.logging_level_handler(cfg.logger_level)
def __apply_on_pop(self):
self.__apply_config(self.__configuration_stack.current)
def notifications_backend_handler(self, name: str, *args):
selected_backend = self.__configuration_stack.notifications_backend.with_name(name, *args)
self.__configuration_stack.notifications_backend = selected_backend
def command_complete_timeout_handler(self, t: str):
selected_strategy = self.__configuration_stack.notifications_strategy.with_args(int(t))
self.__configuration_stack.notifications_strategy = selected_strategy
def success_title_handler(self, title: str):
self.__success_template = self.__success_template.with_title(title)
self.__configuration_stack.success_title = self.__success_template.title
def success_message_handler(self, message: str):
self.__success_template = self.__success_template.with_message(message)
self.__configuration_stack.success_message = self.__success_template.message
def success_icon_handler(self, icon: str):
self.__success_template = self.__success_template.with_icon(icon if icon != "" else None)
self.__configuration_stack.success_icon = self.__success_template.icon
def success_sound_handler(self, sound: str):
self.__success_template = self.__success_template.with_sound(sound if sound != "" else None)
self.__configuration_stack.success_sound = self.__success_template.sound
def failure_title_handler(self, title: str):
self.__failure_template = self.__failure_template.with_title(title)
self.__configuration_stack.failure_title = self.__failure_template.title
def failure_message_handler(self, message: str):
self.__failure_template = self.__failure_template.with_message(message)
self.__configuration_stack.failure_message = self.__failure_template.message
def failure_icon_handler(self, icon: str):
self.__failure_template = self.__failure_template.with_icon(icon if icon != "" else None)
self.__configuration_stack.failure_icon = self.__failure_template.icon
def failure_sound_handler(self, sound: str):
self.__failure_template = self.__failure_template.with_sound(sound if sound != "" else None)
self.__configuration_stack.failure_sound = self.__failure_template.sound
def logging_name_handler(self, new_name: str):
self.__logger.name = new_name
self.__configuration_stack.logger_name = self.__logger.name
def logging_level_handler(self, new_level: str):
self.__logger.setLevel(new_level)
self.__configuration_stack.logger_level = self.__logger.level
class Notify:
def __init__(self, stack: Stack,
notification_factory: Factory,
backend_factory: BackendFactory):
self.__stack = stack
self.__notification_factory = notification_factory
self.__backend_factory = backend_factory
self.__commands: list = []
def notify(self, message: str, title: str):
n = self.__notification_factory.create(message=message, title=title, success=True)
self.__backend_factory.create(self.__stack.notifications_backend).notify(n)
class NotifyCommandComplete:
def __init__(self, stack: Stack,
strategy_factory: StrategyFactory,
notification_factory: Factory,
backend_factory: BackendFactory):
self.__stack = stack
self.__strategy_factory = strategy_factory
self.__notification_factory = notification_factory
self.__backend_factory = backend_factory
self.__commands: List[Command] = []
def before_command(self, command_line: str):
self.__stack.push()
self.__commands.append(Command(datetime.now(), command_line))
def after_command(self, exit_code: str):
exit_code = int(exit_code)
if len(self.__commands) == 0:
raise RuntimeError("after_command without a command")
cmd = self.__commands.pop()
complete_cmd = cmd.complete(exit_code, datetime.now())
if self.__strategy_factory.create(self.__stack.current.notifications_strategy).should_notify(complete_cmd):
n = self.__notification_factory.from_command(complete_cmd)
self.__backend_factory.create(self.__stack.current.notifications_backend).notify(n)
self.__stack.pop()
| 42.091549
| 115
| 0.736657
| 690
| 5,977
| 5.871014
| 0.108696
| 0.075537
| 0.097754
| 0.033325
| 0.408047
| 0.350531
| 0.269563
| 0.235991
| 0.135769
| 0.135769
| 0
| 0.000205
| 0.185545
| 5,977
| 141
| 116
| 42.390071
| 0.831964
| 0
| 0
| 0.144231
| 0
| 0
| 0.005187
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.192308
| false
| 0
| 0.076923
| 0
| 0.298077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23ba2ecb3b446799d3bd04447ada1a6c88421c82
| 8,113
|
py
|
Python
|
sdk/python/feast/loaders/ingest.py
|
wzpy/feast
|
06fe09b7047fe370cbf63555cec1ba820f1e7267
|
[
"Apache-2.0"
] | 1
|
2019-12-12T13:21:56.000Z
|
2019-12-12T13:21:56.000Z
|
sdk/python/feast/loaders/ingest.py
|
wzpy/feast
|
06fe09b7047fe370cbf63555cec1ba820f1e7267
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/feast/loaders/ingest.py
|
wzpy/feast
|
06fe09b7047fe370cbf63555cec1ba820f1e7267
|
[
"Apache-2.0"
] | null | null | null |
import logging
import multiprocessing
import os
import time
from functools import partial
from multiprocessing import Process, Queue, Pool
from typing import Iterable
import pandas as pd
import pyarrow as pa
from feast.feature_set import FeatureSet
from feast.type_map import convert_dict_to_proto_values
from feast.types.FeatureRow_pb2 import FeatureRow
from kafka import KafkaProducer
from tqdm import tqdm
from feast.constants import DATETIME_COLUMN
_logger = logging.getLogger(__name__)
GRPC_CONNECTION_TIMEOUT_DEFAULT = 3 # type: int
GRPC_CONNECTION_TIMEOUT_APPLY = 300 # type: int
FEAST_SERVING_URL_ENV_KEY = "FEAST_SERVING_URL" # type: str
FEAST_CORE_URL_ENV_KEY = "FEAST_CORE_URL" # type: str
BATCH_FEATURE_REQUEST_WAIT_TIME_SECONDS = 300
CPU_COUNT = os.cpu_count() # type: int
KAFKA_CHUNK_PRODUCTION_TIMEOUT = 120 # type: int
def _kafka_feature_row_producer(
feature_row_queue: Queue, row_count: int, brokers, topic, ctx: dict, pbar: tqdm
):
"""
Pushes Feature Rows to Kafka. Reads rows from a queue. Function will run
until total row_count is reached.
Args:
feature_row_queue: Queue containing feature rows.
row_count: Total row count to process
brokers: Broker to push to
topic: Topic to push to
ctx: Context dict used to communicate with primary process
pbar: Progress bar object
"""
# Callback for failed production to Kafka
def on_error(e):
# Save last exception
ctx["last_exception"] = e
# Increment error count
if "error_count" in ctx:
ctx["error_count"] += 1
else:
ctx["error_count"] = 1
# Callback for succeeded production to Kafka
def on_success(meta):
pbar.update()
producer = KafkaProducer(bootstrap_servers=brokers)
processed_rows = 0
# Loop through feature rows until all rows are processed
while processed_rows < row_count:
# Wait if queue is empty
if feature_row_queue.empty():
time.sleep(1)
producer.flush(timeout=KAFKA_CHUNK_PRODUCTION_TIMEOUT)
else:
while not feature_row_queue.empty():
row = feature_row_queue.get()
if row is not None:
# Push row to Kafka
producer.send(topic, row.SerializeToString()).add_callback(
on_success
).add_errback(on_error)
processed_rows += 1
# Force an occasional flush
if processed_rows % 10000 == 0:
producer.flush(timeout=KAFKA_CHUNK_PRODUCTION_TIMEOUT)
del row
pbar.refresh()
# Ensure that all rows are pushed
producer.flush(timeout=KAFKA_CHUNK_PRODUCTION_TIMEOUT)
# Using progress bar as counter is much faster than incrementing dict
ctx["success_count"] = pbar.n
pbar.close()
def _encode_pa_chunks(
tbl: pa.lib.Table,
fs: FeatureSet,
max_workers: int,
df_datetime_dtype: pd.DataFrame.dtypes,
chunk_size: int = 5000,
) -> Iterable[FeatureRow]:
"""
Generator function to encode rows in PyArrow table to FeatureRows by
breaking up the table into batches.
Each batch will have its rows spread accross a pool of workers to be
transformed into FeatureRow objects.
Args:
tbl: PyArrow table to be processed.
fs: FeatureSet describing PyArrow table.
max_workers: Maximum number of workers.
df_datetime_dtype: Pandas dtype of datetime column.
chunk_size: Maximum size of each chunk when PyArrow table is batched.
Returns:
Iterable FeatureRow object.
"""
pool = Pool(max_workers)
# Create a partial function with static non-iterable arguments
func = partial(
convert_dict_to_proto_values,
df_datetime_dtype=df_datetime_dtype,
feature_set=fs,
)
for batch in tbl.to_batches(max_chunksize=chunk_size):
m_df = batch.to_pandas()
results = pool.map_async(func, m_df.to_dict("records"))
yield from results.get()
pool.close()
pool.join()
return
def ingest_table_to_kafka(
feature_set: FeatureSet,
table: pa.lib.Table,
max_workers: int,
chunk_size: int = 5000,
disable_pbar: bool = False,
timeout: int = None,
) -> None:
"""
Ingest a PyArrow Table to a Kafka topic based for a Feature Set
Args:
feature_set: FeatureSet describing PyArrow table.
table: PyArrow table to be processed.
max_workers: Maximum number of workers.
chunk_size: Maximum size of each chunk when PyArrow table is batched.
disable_pbar: Flag to indicate if tqdm progress bar should be disabled.
timeout: Maximum time before method times out
"""
pbar = tqdm(unit="rows", total=table.num_rows, disable=disable_pbar)
# Use a small DataFrame to validate feature set schema
ref_df = table.to_batches(max_chunksize=100)[0].to_pandas()
df_datetime_dtype = ref_df[DATETIME_COLUMN].dtype
# Validate feature set schema
_validate_dataframe(ref_df, feature_set)
# Create queue through which encoding and production will coordinate
row_queue = Queue()
# Create a context object to send and receive information across processes
ctx = multiprocessing.Manager().dict(
{"success_count": 0, "error_count": 0, "last_exception": ""}
)
# Create producer to push feature rows to Kafka
ingestion_process = Process(
target=_kafka_feature_row_producer,
args=(
row_queue,
table.num_rows,
feature_set.get_kafka_source_brokers(),
feature_set.get_kafka_source_topic(),
ctx,
pbar,
),
)
try:
# Start ingestion process
print(
f"\n(ingest table to kafka) Ingestion started for {feature_set.name}:{feature_set.version}"
)
ingestion_process.start()
# Iterate over chunks in the table and return feature rows
for row in _encode_pa_chunks(
tbl=table,
fs=feature_set,
max_workers=max_workers,
chunk_size=chunk_size,
df_datetime_dtype=df_datetime_dtype,
):
# Push rows onto a queue for the production process to pick up
row_queue.put(row)
while row_queue.qsize() > chunk_size:
time.sleep(0.1)
row_queue.put(None)
except Exception as ex:
_logger.error(f"Exception occurred: {ex}")
finally:
# Wait for the Kafka production to complete
ingestion_process.join(timeout=timeout)
failed_message = (
""
if ctx["error_count"] == 0
else f"\nFail: {ctx['error_count']}/{table.num_rows}"
)
last_exception_message = (
""
if ctx["last_exception"] == ""
else f"\nLast exception:\n{ctx['last_exception']}"
)
print(
f"\nIngestion statistics:"
f"\nSuccess: {ctx['success_count']}/{table.num_rows}"
f"{failed_message}"
f"{last_exception_message}"
)
def _validate_dataframe(dataframe: pd.DataFrame, feature_set: FeatureSet):
"""
Validates a Pandas dataframe based on a feature set
Args:
dataframe: Pandas dataframe
feature_set: Feature Set instance
"""
if "datetime" not in dataframe.columns:
raise ValueError(
f'Dataframe does not contain entity "datetime" in columns {dataframe.columns}'
)
for entity in feature_set.entities:
if entity.name not in dataframe.columns:
raise ValueError(
f"Dataframe does not contain entity {entity.name} in columns {dataframe.columns}"
)
for feature in feature_set.features:
if feature.name not in dataframe.columns:
raise ValueError(
f"Dataframe does not contain feature {feature.name} in columns {dataframe.columns}"
)
| 31.815686
| 103
| 0.646986
| 1,013
| 8,113
| 4.987167
| 0.260612
| 0.037609
| 0.020784
| 0.021378
| 0.169438
| 0.114212
| 0.089667
| 0.061758
| 0.061758
| 0.061758
| 0
| 0.006695
| 0.282017
| 8,113
| 254
| 104
| 31.940945
| 0.860601
| 0.288796
| 0
| 0.130719
| 0
| 0
| 0.128951
| 0.03143
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0
| 0.098039
| 0
| 0.143791
| 0.013072
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23bb7ae2de638bcc64e1ae2469bf78db888b942c
| 389
|
py
|
Python
|
1stRound/Easy/389 Find the Difference/Counter.py
|
ericchen12377/Leetcode-Algorithm-Python
|
eb58cd4f01d9b8006b7d1a725fc48910aad7f192
|
[
"MIT"
] | 2
|
2020-04-24T18:36:52.000Z
|
2020-04-25T00:15:57.000Z
|
1stRound/Easy/389 Find the Difference/Counter.py
|
ericchen12377/Leetcode-Algorithm-Python
|
eb58cd4f01d9b8006b7d1a725fc48910aad7f192
|
[
"MIT"
] | null | null | null |
1stRound/Easy/389 Find the Difference/Counter.py
|
ericchen12377/Leetcode-Algorithm-Python
|
eb58cd4f01d9b8006b7d1a725fc48910aad7f192
|
[
"MIT"
] | null | null | null |
import collections
class Solution:
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
scount, tcount = collections.Counter(s), collections.Counter(t)
for t in tcount:
if tcount[t] > scount[t]:
return t
s = "abcd"
t = "abcde"
p = Solution()
print(p.findTheDifference(s,t))
| 24.3125
| 71
| 0.539846
| 47
| 389
| 4.468085
| 0.489362
| 0.019048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.33419
| 389
| 16
| 72
| 24.3125
| 0.810811
| 0.095116
| 0
| 0
| 0
| 0
| 0.028754
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.363636
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23bd05e550888fff887e56ad22915b9704444c37
| 4,136
|
py
|
Python
|
submission.py
|
Amar1729/Liked-Saved-Image-Downloader
|
48c17d8cb0cdce3bf7ebab16729510be11f51013
|
[
"MIT"
] | 60
|
2015-12-04T20:11:23.000Z
|
2019-03-17T20:00:56.000Z
|
submission.py
|
Amar1729/Liked-Saved-Image-Downloader
|
48c17d8cb0cdce3bf7ebab16729510be11f51013
|
[
"MIT"
] | 68
|
2019-03-22T01:07:32.000Z
|
2021-07-02T04:48:57.000Z
|
submission.py
|
Amar1729/Liked-Saved-Image-Downloader
|
48c17d8cb0cdce3bf7ebab16729510be11f51013
|
[
"MIT"
] | 19
|
2015-09-15T17:30:29.000Z
|
2019-03-17T18:05:30.000Z
|
# -*- coding: utf-8 -*-
import pickle
import os
# third-party imports
import jsonpickle
class Submission:
def __init__(self):
# Source is either Tumblr or Reddit
self.source = u''
self.title = u''
self.author = u''
self.subreddit = u''
self.subredditTitle = u''
self.body = u''
self.bodyUrl = u''
self.postUrl = u''
def getXML(self):
baseString = (u'\t<source>' + self.source + u'</source>\n'
+ u'\t<title>' + self.title + u'</title>\n'
+ u'\t<author>' + self.author + u'</author>\n'
+ u'\t<subreddit>' + self.subreddit + u'</subreddit>\n'
+ u'\t<subredditTitle>' + self.subredditTitle + u'</subredditTitle>\n'
+ u'\t<body>' + self.body + u'</body>\n'
+ u'\t<bodyUrl>' + self.bodyUrl + u'</bodyUrl>\n'
+ u'\t<postUrl>' + self.postUrl + u'</postUrl>\n')
return str(baseString)
def getHtml(self):
baseString = (u'\t<p>' + self.source + u'</p>\n'
+ u'\t<h2>' + self.title + u'</h2>\n'
+ u'\t<h3>' + self.author + u'</h3>\n'
+ u'\t<h4>' + self.subreddit + u'</h4>\n'
+ u'\t<h4>' + self.subredditTitle + u'</h4>\n'
+ u'\t<p>' + self.body + u'</p>\n'
# + u'\t<p>' + self.bodyUrl + u'</p>\n'
+ u'\t<a href=' + self.postUrl + u'/>Link</a><br /><br />\n')
return baseString
def getJson(self):
jsonpickle.set_preferred_backend('json')
jsonpickle.set_encoder_options('json', ensure_ascii=False, indent=4, separators=(',', ': '))
return jsonpickle.encode(self)
def getAsList(self):
return [self.source, self.title, self.author,
self.subreddit, self.subredditTitle,
self.body, self.bodyUrl, self.postUrl]
def initFromDict(self, dictEntry):
self.source = dictEntry['source']
self.title = dictEntry['title']
self.author = dictEntry['author']
self.subreddit = dictEntry['subreddit']
self.subredditTitle = dictEntry['subredditTitle']
self.body = dictEntry['body']
self.bodyUrl = dictEntry['bodyUrl']
self.postUrl = dictEntry['postUrl']
def getAsList_generator(submissions):
for submission in submissions:
yield submission.getAsList()
def writeOutSubmissionsAsJson(redditList, file):
file.write('{\n'.encode('utf8'))
for submission in redditList:
outputString = submission.getJson() + u',\n'
file.write(outputString.encode('utf8'))
file.write('}'.encode('utf8'))
def saveSubmissionsAsJson(submissions, fileName):
outputFile = open(fileName, 'wb')
writeOutSubmissionsAsJson(submissions, outputFile)
outputFile.close()
def writeOutSubmissionsAsHtml(redditList, file):
submissionsStr = ""
for submission in redditList:
submissionsStr += submission.getHtml() + u'\n'
htmlStructure = u"""<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Reddit Saved Comments</title>
</head>
<body>
{0}
</body>
</html>
""".format(submissionsStr)
file.write(htmlStructure.encode('utf8'))
def saveSubmissionsAsHtml(submissions, fileName):
outputFile = open(fileName, 'wb')
writeOutSubmissionsAsHtml(submissions, outputFile)
outputFile.close()
def writeOutSubmissionsAsXML(redditList, file):
for submission in redditList:
outputString = u'<submission>\n' + submission.getXML() + u'</submission>\n'
file.write(outputString.encode('utf8'))
def saveSubmissionsAsXML(submissions, fileName):
outputFile = open(fileName, 'wb')
writeOutSubmissionsAsXML(submissions, outputFile)
outputFile.close()
def writeCacheSubmissions(submissions, cacheFileName):
cacheFile = open(cacheFileName, 'wb')
pickle.dump(submissions, cacheFile)
def readCacheSubmissions(cacheFileName):
if os.path.exists(cacheFileName):
cacheFile = open(cacheFileName, 'rb')
submissions = pickle.load(cacheFile)
return submissions
else:
return []
| 30.637037
| 100
| 0.604691
| 445
| 4,136
| 5.597753
| 0.238202
| 0.012846
| 0.016861
| 0.00843
| 0.17503
| 0.077479
| 0
| 0
| 0
| 0
| 0
| 0.005402
| 0.23912
| 4,136
| 134
| 101
| 30.865672
| 0.786146
| 0.027321
| 0
| 0.11
| 0
| 0
| 0.144102
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.03
| 0.01
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23be77dcebe4a2a83f67827319e9327e25df75de
| 1,699
|
py
|
Python
|
exp/noise_features/models.py
|
WilliamCCHuang/GraphLIME
|
0f89bd67865c0b4b5a93becbc03273e55c15fc68
|
[
"MIT"
] | 38
|
2020-06-07T14:44:11.000Z
|
2022-03-08T06:19:49.000Z
|
exp/noise_features/models.py
|
WilliamCCHuang/GraphLIME
|
0f89bd67865c0b4b5a93becbc03273e55c15fc68
|
[
"MIT"
] | 9
|
2020-10-22T02:38:01.000Z
|
2022-03-15T09:53:30.000Z
|
exp/noise_features/models.py
|
WilliamCCHuang/GraphLIME
|
0f89bd67865c0b4b5a93becbc03273e55c15fc68
|
[
"MIT"
] | 6
|
2021-03-04T21:32:34.000Z
|
2021-12-24T05:58:35.000Z
|
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, GATConv
class GCN(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, dropout=0.5):
super(GCN, self).__init__()
self.dropout = dropout
self.conv1 = GCNConv(input_dim, hidden_dim)
self.conv2 = GCNConv(hidden_dim, output_dim)
def forward(self, x, edge_index):
x = self.conv1(x, edge_index)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
class GAT(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, heads_1=8, heads_2=1, att_dropout=0.6, input_dropout=0.6):
super(GAT, self).__init__()
self.att_dropout = att_dropout
self.input_dropout = input_dropout
self.conv1 = GATConv(in_channels=input_dim,
out_channels=hidden_dim // heads_1,
heads=heads_1,
concat=True,
dropout=att_dropout)
self.conv2 = GATConv(in_channels=hidden_dim,
out_channels=output_dim,
heads=heads_2,
concat=False,
dropout=att_dropout)
def forward(self, x, edge_index):
x = F.dropout(x, p=self.input_dropout, training=self.training)
x = self.conv1(x, edge_index)
x = F.elu(x)
x = F.dropout(x, p=self.input_dropout, training=self.training)
x = self.conv2(x, edge_index)
return F.log_softmax(x, dim=1)
| 33.313725
| 116
| 0.575633
| 226
| 1,699
| 4.070796
| 0.221239
| 0.058696
| 0.065217
| 0.047826
| 0.43913
| 0.43913
| 0.43913
| 0.368478
| 0.326087
| 0.326087
| 0
| 0.02
| 0.323131
| 1,699
| 50
| 117
| 33.98
| 0.78
| 0
| 0
| 0.324324
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.081081
| 0
| 0.297297
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23c09d19f8336af168a12e16ec8d400bf72a904d
| 7,740
|
py
|
Python
|
nscl/nn/scene_graph/scene_graph.py
|
OolongQian/NSCL-PyTorch-Release
|
4cf0a633ceeaa9d221d66e066ef7892c04cdf9eb
|
[
"MIT"
] | null | null | null |
nscl/nn/scene_graph/scene_graph.py
|
OolongQian/NSCL-PyTorch-Release
|
4cf0a633ceeaa9d221d66e066ef7892c04cdf9eb
|
[
"MIT"
] | null | null | null |
nscl/nn/scene_graph/scene_graph.py
|
OolongQian/NSCL-PyTorch-Release
|
4cf0a633ceeaa9d221d66e066ef7892c04cdf9eb
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : scene_graph.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 07/19/2018
#
# This file is part of NSCL-PyTorch.
# Distributed under terms of the MIT license.
"""
Scene Graph generation.
"""
import os
import torch
import torch.nn as nn
import jactorch
import jactorch.nn as jacnn
from . import functional
DEBUG = bool(int(os.getenv('DEBUG_SCENE_GRAPH', 0)))
__all__ = ['SceneGraph']
class SceneGraph(nn.Module):
def __init__(self, feature_dim, output_dims, downsample_rate):
super().__init__()
self.pool_size = 7
self.feature_dim = feature_dim
self.output_dims = output_dims
self.downsample_rate = downsample_rate
self.object_roi_pool = jacnn.PrRoIPool2D(self.pool_size, self.pool_size, 1.0 / downsample_rate)
self.context_roi_pool = jacnn.PrRoIPool2D(self.pool_size, self.pool_size, 1.0 / downsample_rate)
self.relation_roi_pool = jacnn.PrRoIPool2D(self.pool_size, self.pool_size, 1.0 / downsample_rate)
if not DEBUG:
self.context_feature_extract = nn.Conv2d(feature_dim, feature_dim, 1)
self.relation_feature_extract = nn.Conv2d(feature_dim, feature_dim // 2 * 3, 1)
self.object_feature_fuse = nn.Conv2d(feature_dim * 2, output_dims[1], 1)
self.relation_feature_fuse = nn.Conv2d(feature_dim // 2 * 3 + output_dims[1] * 2, output_dims[2], 1)
self.object_feature_fc = nn.Sequential(nn.ReLU(True),
nn.Linear(output_dims[1] * self.pool_size ** 2, output_dims[1]))
self.relation_feature_fc = nn.Sequential(nn.ReLU(True),
nn.Linear(output_dims[2] * self.pool_size ** 2, output_dims[2]))
self.reset_parameters()
else:
def gen_replicate(n):
def rep(x):
return torch.cat([x for _ in range(n)], dim=1)
return rep
self.pool_size = 32
self.object_roi_pool = jacnn.PrRoIPool2D(32, 32, 1.0 / downsample_rate)
self.context_roi_pool = jacnn.PrRoIPool2D(32, 32, 1.0 / downsample_rate)
self.relation_roi_pool = jacnn.PrRoIPool2D(32, 32, 1.0 / downsample_rate)
self.context_feature_extract = gen_replicate(2)
self.relation_feature_extract = gen_replicate(3)
self.object_feature_fuse = jacnn.Identity()
self.relation_feature_fuse = jacnn.Identity()
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight.data)
m.bias.data.zero_()
def forward(self, input, objects, objects_length):
"""qian: to thoroughly understand the meanings of object_features, context_features, relation_features,
i mean, the semantic meaning, i'd better go back to the paper itself."""
object_features = input # qian: (32, 256, 16, 24)
context_features = self.context_feature_extract(input) # qian: (32, 256, 16, 24)
relation_features = self.relation_feature_extract(input) # qian: (32, 384, 16, 24)
outputs = list()
objects_index = 0
for i in range(input.size(0)):
"""qian: iterate through every instance in the input batch."""
box = objects[objects_index:objects_index + objects_length[i].item()] # qian: (3, 4) [3 objects, 4 for bb].
objects_index += objects_length[i].item()
with torch.no_grad():
batch_ind = i + torch.zeros(box.size(0), 1, dtype=box.dtype, device=box.device)
# generate a "full-image" bounding box
image_h, image_w = input.size(2) * self.downsample_rate, input.size(3) * self.downsample_rate
image_box = torch.cat([
torch.zeros(box.size(0), 1, dtype=box.dtype, device=box.device),
torch.zeros(box.size(0), 1, dtype=box.dtype, device=box.device),
image_w + torch.zeros(box.size(0), 1, dtype=box.dtype, device=box.device),
image_h + torch.zeros(box.size(0), 1, dtype=box.dtype, device=box.device)
], dim=-1) # qian: this box contains the entire image.
# meshgrid to obtain the subject and object bounding boxes
"""qian: i don't perfectly understand the meaning of meshgrid,
but the idea is to obtain all the combinations of multiple bounding boxes (here is 2)."""
sub_id, obj_id = jactorch.meshgrid(torch.arange(box.size(0), dtype=torch.int64, device=box.device),
dim=0)
sub_id, obj_id = sub_id.contiguous().view(-1), obj_id.contiguous().view(-1)
sub_box, obj_box = jactorch.meshgrid(box, dim=0)
sub_box = sub_box.contiguous().view(box.size(0) ** 2, 4)
obj_box = obj_box.contiguous().view(box.size(0) ** 2, 4)
# union box
"""qian: union_box (9, 4), including all 9 possible bounding box pairs' union.
The union means the set union operation."""
union_box = functional.generate_union_box(sub_box, obj_box)
rel_batch_ind = i + torch.zeros(union_box.size(0), 1, dtype=box.dtype, device=box.device)
# intersection maps
# qian: (3, 1, 7, 7). crop the object ROI.
box_context_imap = functional.generate_intersection_map(box, image_box, self.pool_size)
# qian: (9, 1, 7, 7). crop ordered object ROI in each pair.
sub_union_imap = functional.generate_intersection_map(sub_box, union_box, self.pool_size)
# qian: (9, 1, 7, 7). crop ordered object ROI in each pair.
obj_union_imap = functional.generate_intersection_map(obj_box, union_box, self.pool_size)
this_context_features = self.context_roi_pool(context_features, torch.cat([batch_ind, image_box], dim=-1))
x, y = this_context_features.chunk(2, dim=1)
this_object_features = self.object_feature_fuse(torch.cat([
self.object_roi_pool(object_features, torch.cat([batch_ind, box], dim=-1)),
x, y * box_context_imap
], dim=1))
this_relation_features = self.relation_roi_pool(relation_features,
torch.cat([rel_batch_ind, union_box], dim=-1))
x, y, z = this_relation_features.chunk(3, dim=1)
this_relation_features = self.relation_feature_fuse(torch.cat([
this_object_features[sub_id], this_object_features[obj_id],
x, y * sub_union_imap, z * obj_union_imap
], dim=1))
if DEBUG:
outputs.append([
None,
this_object_features,
this_relation_features
])
else:
outputs.append([
None,
self._norm(self.object_feature_fc(this_object_features.view(box.size(0), -1))),
self._norm(
self.relation_feature_fc(this_relation_features.view(box.size(0) * box.size(0), -1)).view(
box.size(0), box.size(0), -1))
])
return outputs
def _norm(self, x):
return x / x.norm(2, dim=-1, keepdim=True)
| 47.484663
| 120
| 0.588889
| 1,004
| 7,740
| 4.318725
| 0.204183
| 0.017297
| 0.02583
| 0.018681
| 0.410055
| 0.354244
| 0.29036
| 0.259917
| 0.218404
| 0.218404
| 0
| 0.030966
| 0.30323
| 7,740
| 162
| 121
| 47.777778
| 0.773039
| 0.108786
| 0
| 0.148148
| 0
| 0
| 0.004169
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0.018519
| 0.157407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23c12f3233981fe353e6b9f38266cba0ebd64146
| 789
|
py
|
Python
|
roomcreator.py
|
ajaxalex5/MidYearProject
|
a399347cd8cb4b24cf1aeac4e11269a0a2109ddf
|
[
"MIT"
] | null | null | null |
roomcreator.py
|
ajaxalex5/MidYearProject
|
a399347cd8cb4b24cf1aeac4e11269a0a2109ddf
|
[
"MIT"
] | null | null | null |
roomcreator.py
|
ajaxalex5/MidYearProject
|
a399347cd8cb4b24cf1aeac4e11269a0a2109ddf
|
[
"MIT"
] | null | null | null |
class Room (object):
def __init__(self, name, xl, yl, layout):
self.name = str(name)
self.xl = int(xl)
self.yl = int(yl)
self.layout = layout
def load_room_file(file):
roomfile = open(file, "r")
roomlist = []
linelist = []
for line in roomfile:
linelist.append(line)
while linelist[0] != "STOP":
temproomformat = []
for line in range(0, int(linelist[1])):
temproomformat.append([])
for tile in range(0, int(linelist[2])):
temproomformat[-1].append(linelist[3+line][tile])
roomlist.append(Room(linelist[0], int(linelist[1]), int(linelist[2]), temproomformat))
for x in range(4+int(linelist[2])):
del(linelist[0])
return roomlist
| 24.65625
| 94
| 0.564005
| 98
| 789
| 4.479592
| 0.367347
| 0.125285
| 0.082005
| 0.050114
| 0.08656
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023214
| 0.290241
| 789
| 32
| 95
| 24.65625
| 0.760714
| 0
| 0
| 0
| 0
| 0
| 0.006329
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23c2c0ad760da305cb104343e55a702bf05d28ce
| 630
|
py
|
Python
|
nomad/tests/core/test_shortest_path_solver.py
|
romilbhardwaj/nomad
|
c6a8289872bfd07d1aa0b913f0aee7a2fccd5bf1
|
[
"MIT"
] | 2
|
2019-02-06T19:47:48.000Z
|
2019-10-30T07:30:14.000Z
|
nomad/tests/core/test_shortest_path_solver.py
|
romilbhardwaj/nomad
|
c6a8289872bfd07d1aa0b913f0aee7a2fccd5bf1
|
[
"MIT"
] | 6
|
2019-03-21T18:29:04.000Z
|
2019-04-11T18:31:34.000Z
|
nomad/tests/core/test_shortest_path_solver.py
|
romilbhardwaj/nomad
|
c6a8289872bfd07d1aa0b913f0aee7a2fccd5bf1
|
[
"MIT"
] | null | null | null |
import unittest
import networkx as nx
from core.placement.spsolver import DPShortestPathSolver
class TestShorthestPathSolverMethods(unittest.TestCase):
def setUp(self):
self.g1 = nx.read_weighted_edgelist('tests/test-graph_1.txt', create_using=nx.MultiDiGraph, nodetype=int)
def test_shortest_path(self):
u = 0
v = 3
k = 2
weight_shortest_path = 9
(weight, path) = DPShortestPathSolver.shortest_path(self.g1, u, v, k)
self.assertEqual(weight, weight_shortest_path)
self.assertEqual(path, [0, 2, 3])
if __name__ == '__main__':
unittest.main()
| 30
| 113
| 0.680952
| 78
| 630
| 5.25641
| 0.564103
| 0.117073
| 0.117073
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020367
| 0.220635
| 630
| 21
| 114
| 30
| 0.814664
| 0
| 0
| 0
| 0
| 0
| 0.047544
| 0.034865
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.125
| false
| 0
| 0.1875
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23c38e57ef816e8a8c15f2598a7fb8639340906e
| 1,285
|
py
|
Python
|
Leetcode/medium/integer-break.py
|
jen-sjen/data-structures-basics-leetcode
|
addac32974b16e0a37aa60c210ab7820b349b279
|
[
"MIT"
] | 6
|
2021-07-29T03:26:20.000Z
|
2022-01-28T15:11:45.000Z
|
Leetcode/medium/integer-break.py
|
jen-sjen/data-structures-basics-leetcode
|
addac32974b16e0a37aa60c210ab7820b349b279
|
[
"MIT"
] | 2
|
2021-09-30T09:47:23.000Z
|
2022-01-31T03:08:24.000Z
|
Leetcode/medium/integer-break.py
|
jen-sjen/data-structures-basics-leetcode
|
addac32974b16e0a37aa60c210ab7820b349b279
|
[
"MIT"
] | 5
|
2021-08-10T06:41:11.000Z
|
2022-01-29T17:50:20.000Z
|
"""
# INTEGER BREAK
Given a positive integer n, break it into the sum of at least two positive integers and maximize the product of those integers. Return the maximum product you can get.
Example 1:
Input: 2
Output: 1
Explanation: 2 = 1 + 1, 1 × 1 = 1.
Example 2:
Input: 10
Output: 36
Explanation: 10 = 3 + 3 + 4, 3 × 3 × 4 = 36.
Note: You may assume that n is not less than 2 and not larger than 58.
"""
class Solution:
def integerBreak(self, n: int) -> int:
if n == 0:
return 0
if n == 1:
return 1
if n == 2:
return 1
if n == 3:
return 2
# 0 to 3 are special cases beacuse they will produce a result less than their value. We can't use that lesser value in the other calculations
known = {0: 0, 1: 1, 2: 2, 3: 3}
return self.breakDown(n, known)
def breakDown(self, n, known):
if n in known:
return known[n]
else:
maximum = 0
for x in range(1, n // 2 + 1):
p1 = self.breakDown(x, known)
p2 = self.breakDown(n - x, known)
maximum = max(maximum, p1 * p2)
known[n] = maximum
return known[n]
| 26.770833
| 167
| 0.525292
| 193
| 1,285
| 3.512953
| 0.414508
| 0.022124
| 0.026549
| 0.029499
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067602
| 0.389883
| 1,285
| 48
| 168
| 26.770833
| 0.793367
| 0.42179
| 0
| 0.173913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0
| 0
| 0.434783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23c622f1dbca6b4b9f0f05bf93b50ad3b73a9109
| 408
|
py
|
Python
|
qiang00_before_project/qiang02_the_template/q02_add_template_filter.py
|
13528770807/flask_project
|
2930db1d59763b155f758ad4061a70d413bfc34d
|
[
"MIT"
] | null | null | null |
qiang00_before_project/qiang02_the_template/q02_add_template_filter.py
|
13528770807/flask_project
|
2930db1d59763b155f758ad4061a70d413bfc34d
|
[
"MIT"
] | null | null | null |
qiang00_before_project/qiang02_the_template/q02_add_template_filter.py
|
13528770807/flask_project
|
2930db1d59763b155f758ad4061a70d413bfc34d
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
li = [1, 2, 3, 4, 5, 6, 7]
return render_template('filter.html', li=li)
@app.template_filter('li_rv2') # 添加过滤器 方法二
def li_reverse(li):
res = list(li)
res.reverse()
return res
# app.add_template_filter(li_reverse, 'li_rv') # 添加过滤器 方法一
if __name__ == "__main__":
app.run(debug=True)
| 17
| 59
| 0.647059
| 63
| 408
| 3.857143
| 0.555556
| 0.17284
| 0.131687
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024465
| 0.198529
| 408
| 23
| 60
| 17.73913
| 0.718654
| 0.161765
| 0
| 0
| 0
| 0
| 0.076923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23c6a65b4e2832bc68e0d04d1fcc2bd1ed8f0280
| 801
|
py
|
Python
|
smps/rcmod.py
|
BenjiStomps/py-smps
|
c449bbfcd748203630bc0aecf2552c8d836f827c
|
[
"MIT"
] | 16
|
2017-02-22T02:26:41.000Z
|
2021-04-05T10:28:02.000Z
|
smps/rcmod.py
|
BenjiStomps/py-smps
|
c449bbfcd748203630bc0aecf2552c8d836f827c
|
[
"MIT"
] | 22
|
2017-02-27T21:50:45.000Z
|
2021-05-21T02:31:35.000Z
|
smps/rcmod.py
|
BenjiStomps/py-smps
|
c449bbfcd748203630bc0aecf2552c8d836f827c
|
[
"MIT"
] | 8
|
2017-09-30T09:50:44.000Z
|
2021-05-20T22:29:54.000Z
|
""""""
import matplotlib as mpl
__all__ = ["set"]
def set(tick_scale=1, rc=dict()):
"""
Control plot style and scaling using seaborn and the
matplotlib rcParams interface.
:param tick_scale: A scaler number controling the spacing
on tick marks, defaults to 1.
:type tick_scale: float
:param rc: Additional settings to pass to rcParams.
:type rc: dict
"""
rc_log_defaults = {
'xtick.major.size': 10. * tick_scale,
'xtick.minor.size': 6. * tick_scale,
'ytick.major.size': 10. * tick_scale,
'ytick.minor.size': 6. * tick_scale,
'xtick.color': '0.0',
'ytick.color': '0.0',
'axes.linewidth': 1.75,
'mathtext.default': 'regular'
}
mpl.rcParams.update(dict(rc_log_defaults, **rc))
| 26.7
| 62
| 0.601748
| 105
| 801
| 4.447619
| 0.514286
| 0.134904
| 0.038544
| 0.072805
| 0.167024
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025381
| 0.262172
| 801
| 29
| 63
| 27.62069
| 0.764805
| 0.33583
| 0
| 0
| 0
| 0
| 0.273859
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23c9d0fc017e203c468d9f46add866be9898f0bd
| 2,961
|
py
|
Python
|
abqPython_SvM_3_SaveODB.py
|
jtipton2/abaqusSignedvM
|
83f0577b6a3eab6d3c86a46ae110a94a7075981c
|
[
"BSD-3-Clause"
] | 2
|
2022-03-16T13:50:21.000Z
|
2022-03-27T15:14:09.000Z
|
abqPython_SvM_3_SaveODB.py
|
jtipton2/abaqusSignedvM
|
83f0577b6a3eab6d3c86a46ae110a94a7075981c
|
[
"BSD-3-Clause"
] | null | null | null |
abqPython_SvM_3_SaveODB.py
|
jtipton2/abaqusSignedvM
|
83f0577b6a3eab6d3c86a46ae110a94a7075981c
|
[
"BSD-3-Clause"
] | 2
|
2021-07-18T03:10:12.000Z
|
2022-03-27T15:14:11.000Z
|
# -*- coding: utf-8 -*-
import numpy as np
from odbAccess import *
from abaqusConstants import *
filename = 'Job-4e-SS-Pulse'
"""
LOAD DATA
===============================================================================
"""
results = np.load(filename + '.npz')
vonMisesMax = results['vonMisesMax'].transpose()
vonMisesMin = results['vonMisesMin'].transpose()
vonMisesStatic = results['vonMisesStatic'].transpose()
nodeNum = results['nodeNum'].transpose()
nodeCoord = results['nodeCoord']
# Sort nodeCoord on nodal values
nodeCoord = nodeCoord[nodeCoord[:,0].argsort()]
# Calculate Mean and Amplitude
vonMisesAmp = (vonMisesMax - vonMisesMin)/2
vonMisesMean = (vonMisesMax + vonMisesMin)/2
"""
LOAD ODB
===============================================================================
"""
odb = openOdb(filename+'.odb',readOnly=False)
# Get Instance
allInstances = (odb.rootAssembly.instances.keys())
odbInstance = odb.rootAssembly.instances[allInstances[-1]]
"""
FORMAT AND SAVE DATA TO ODB
===============================================================================
"""
vMNodes = np.ascontiguousarray(nodeNum, dtype=np.int32)
vMMax = np.ascontiguousarray(np.reshape(vonMisesMax,(-1,1)), dtype=np.float32)
vMMin = np.ascontiguousarray(np.reshape(vonMisesMin,(-1,1)), dtype=np.float32)
vMStatic = np.ascontiguousarray(np.reshape(vonMisesStatic,(-1,1)), dtype=np.float32)
vMMean = np.ascontiguousarray(np.reshape(vonMisesMean,(-1,1)), dtype=np.float32)
vMAmp = np.ascontiguousarray(np.reshape(vonMisesAmp,(-1,1)), dtype=np.float32)
newFieldOutputMax = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMMax', description = 'Max Signed von Mises', type = SCALAR)
newFieldOutputMax.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMMax.tolist())
newFieldOutputMin = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMMin', description = 'Min Signed von Mises', type = SCALAR)
newFieldOutputMin.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMMin.tolist())
newFieldOutputMStatic = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMStatic', description = 'Static Signed von Mises', type = SCALAR)
newFieldOutputMStatic.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMStatic.tolist())
newFieldOutputMean = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMMean', description = 'Signed von Mises Mean', type = SCALAR)
newFieldOutputMean.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMMean.tolist())
newFieldOutputAmp = odb.steps['Step-6-Response'].frames[-1].FieldOutput(name = 'vMAmp', description = 'Signed von Mises Amplitude', type = SCALAR)
newFieldOutputAmp.addData(position=NODAL, instance = odbInstance, labels = vMNodes, data = vMAmp.tolist())
"""
SAVE AND CLOSE
===============================================================================
"""
odb.save()
odb.close()
| 37.961538
| 150
| 0.660588
| 307
| 2,961
| 6.371336
| 0.29316
| 0.058282
| 0.053681
| 0.071575
| 0.330777
| 0.253067
| 0.253067
| 0.253067
| 0.109918
| 0
| 0
| 0.014184
| 0.095238
| 2,961
| 77
| 151
| 38.454545
| 0.715939
| 0.031746
| 0
| 0
| 0
| 0
| 0.117863
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.088235
| 0
| 0.088235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23cbfc7fdcdcf980a0e3a9a727e48fece2483a0e
| 7,014
|
py
|
Python
|
ssh.py
|
unazed/Py-s-SH
|
c20d883f75f094c71386e62cbfa8197120c641fc
|
[
"MIT"
] | null | null | null |
ssh.py
|
unazed/Py-s-SH
|
c20d883f75f094c71386e62cbfa8197120c641fc
|
[
"MIT"
] | null | null | null |
ssh.py
|
unazed/Py-s-SH
|
c20d883f75f094c71386e62cbfa8197120c641fc
|
[
"MIT"
] | null | null | null |
"""
SSH reimplementation in Python, made by Unazed Spectaculum under the MIT license
"""
import socket
import struct
class SSH(object):
"""
Abstracted interface for secure-shell protocol with underlying TCP structure
"""
def __init__(self, host_ip, hostname, host_port=22, version="SSH-2.0"):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((host_ip, host_port))
self.version = version
self.hostname = hostname
self.qualified_name = "%s-%s\r\n" % (version, hostname)
def listen(self, backlog=1):
self.socket.listen(backlog)
def accept(self):
while 1:
client, info = self.socket.accept()
print("{*} %s connected." % info[0])
yield (client, info)
print("{*} %s disconnected." % info[0])
client.close()
def handle_connections(self):
for client, info in self.accept():
version_info = client.recv(128)
print("{*} Version Information: %s" % repr(version_info))
if not version_info.startswith(self.version):
print("{*} Client has incompatible versions.")
continue
client.send(self.qualified_name)
pkt_len, pdn_len, payload, _ = self.binary_packet_parse(client)
data = self.kexinit_packet_parse(payload, client)
@staticmethod
def kexinit_packet_parse(payload, sock):
SSH_MSG_KEXINIT = payload[0]
COOKIE = payload[1:17]
PAYLOAD = payload[17:]
KEX_ALGORITHMS_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
KEX_ALGORITHMS = PAYLOAD[4:4+KEX_ALGORITHMS_LENGTH]
PAYLOAD = PAYLOAD[4+KEX_ALGORITHMS_LENGTH:]
SERVER_HOST_KEY_ALGORITHMS_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
SERVER_HOST_KEY_ALGORITHMS = PAYLOAD[4:4+SERVER_HOST_KEY_ALGORITHMS_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+SERVER_HOST_KEY_ALGORITHMS_LENGTH:]
ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER = PAYLOAD[4:4+ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH:]
ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT = PAYLOAD[4:4+ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH:]
MAC_ALGORITHMS_CLIENT_TO_SERVER_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
MAC_ALGORITHMS_CLIENT_TO_SERVER = PAYLOAD[4:4+MAC_ALGORITHMS_CLIENT_TO_SERVER_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+MAC_ALGORITHMS_CLIENT_TO_SERVER_LENGTH:]
MAC_ALGORITHMS_SERVER_TO_CLIENT_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
MAC_ALGORITHMS_SERVER_TO_CLIENT = PAYLOAD[4:4+MAC_ALGORITHMS_SERVER_TO_CLIENT_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+MAC_ALGORITHMS_SERVER_TO_CLIENT_LENGTH:]
COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER = PAYLOAD[4:4+COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER_LENGTH:]
COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT = PAYLOAD[4:4+COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT_LENGTH:]
LANGUAGES_CLIENT_TO_SERVER_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
LANGUAGES_CLIENT_TO_SERVER = PAYLOAD[4:4+LANGUAGES_CLIENT_TO_SERVER_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+LANGUAGES_CLIENT_TO_SERVER_LENGTH:]
LANGUAGES_SERVER_TO_CLIENT_LENGTH = struct.unpack("!l", PAYLOAD[:4])[0]
LANGUAGES_SERVER_TO_CLIENT = PAYLOAD[4:4+LANGUAGES_SERVER_TO_CLIENT_LENGTH].split(',')
PAYLOAD = PAYLOAD[4+LANGUAGES_SERVER_TO_CLIENT_LENGTH:]
FIRST_KEX_PACKET_FOLLOWS = bool(PAYLOAD[0])
PAYLOAD = PAYLOAD[1:]
RESERVED = struct.unpack("!l", PAYLOAD)
print("{*} SSH_MSG_KEXINIT = %r" % SSH_MSG_KEXINIT)
print("{*} Cookie = %r" % COOKIE)
print("{*} KEX_ALGORITHMS = %s" % KEX_ALGORITHMS)
print("{*} SERVER_HOST_KEY_ALGORITHMS = %s" % SERVER_HOST_KEY_ALGORITHMS)
print("{*} ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER = %s" % ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER)
print("{*} ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT = %s" % ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT)
print("{*} MAC_ALGORITHMS_CLIENT_TO_SERVER = %s" % MAC_ALGORITHMS_CLIENT_TO_SERVER)
print("{*} MAC_ALGORITHMS_SERVER_TO_CLIENT = %s" % MAC_ALGORITHMS_SERVER_TO_CLIENT)
print("{*} COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER = %s" % COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER)
print("{*} COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT = %s" % COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT)
print("{*} LANGUAGES_CLIENT_TO_SERVER = %s" % LANGUAGES_CLIENT_TO_SERVER)
print("{*} LANGUAGES_SERVER_TO_CLIENT = %s" % LANGUAGES_SERVER_TO_CLIENT)
print("{*} FIRST_KEX_PACKETS_FOLLOWS = %r" % FIRST_KEX_PACKET_FOLLOWS)
print("{*} RESERVED = %r" % RESERVED)
if FIRST_KEX_PACKET_FOLLOWS:
print("{*} Data = %r" % sock.recv(350000))
return (
SSH_MSG_KEXINIT,
COOKIE,
KEX_ALGORITHMS,
SERVER_HOST_KEY_ALGORITHMS,
ENCRYPTION_ALGORITHMS_CLIENT_TO_SERVER,
ENCRYPTION_ALGORITHMS_SERVER_TO_CLIENT,
MAC_ALGORITHMS_CLIENT_TO_SERVER,
MAC_ALGORITHMS_SERVER_TO_CLIENT,
COMPRESSION_ALGORITHMS_CLIENT_TO_SERVER,
COMPRESSION_ALGORITHMS_SERVER_TO_CLIENT,
LANGUAGES_CLIENT_TO_SERVER,
LANGUAGES_SERVER_TO_CLIENT,
FIRST_KEX_PACKET_FOLLOWS,
RESERVED # for error checking
)
@staticmethod
def namelist_create(lists):
pass
@staticmethod
def binary_packet_create(data):
PACKET_LENGTH = struct.pack("!l", len(data))
print("{*} PACKET_LENGTH = %r" % PACKET_LENGTH)
@staticmethod
def binary_packet_parse(sock):
PACKET_LENGTH = struct.unpack("!l", sock.recv(4))[0]
PADDING_LENGTH = struct.unpack("!b", sock.recv(1))[0]
PAYLOAD = sock.recv(PACKET_LENGTH-PADDING_LENGTH-1)
RANDOM_PADDING = sock.recv(PADDING_LENGTH+1)
print("{*} Packet length = %s" % PACKET_LENGTH)
print("{*} Pading length = %s" % PADDING_LENGTH)
print("{*} Padding = %r" % RANDOM_PADDING)
return (PACKET_LENGTH, PADDING_LENGTH, PAYLOAD, RANDOM_PADDING)
def close(self):
self.socket.close()
| 44.675159
| 120
| 0.681637
| 841
| 7,014
| 5.282996
| 0.146254
| 0.054018
| 0.088229
| 0.113437
| 0.525771
| 0.365069
| 0.313302
| 0.220122
| 0.180959
| 0.089129
| 0
| 0.014772
| 0.208583
| 7,014
| 156
| 121
| 44.961538
| 0.785624
| 0.025235
| 0
| 0.033898
| 0
| 0
| 0.10816
| 0.046815
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076271
| false
| 0.008475
| 0.016949
| 0
| 0.118644
| 0.194915
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23ce177acd70b69372b2d3dd196d4ee81ee251d0
| 1,140
|
py
|
Python
|
seriously/probably_prime.py
|
Mego/Seriously
|
07b256e4f35f5efec3b01434300f9ccc551b1c3e
|
[
"MIT"
] | 104
|
2015-11-02T00:08:32.000Z
|
2022-02-17T23:17:14.000Z
|
seriously/probably_prime.py
|
Mego/Seriously
|
07b256e4f35f5efec3b01434300f9ccc551b1c3e
|
[
"MIT"
] | 68
|
2015-11-09T05:33:24.000Z
|
2020-04-10T06:46:54.000Z
|
seriously/probably_prime.py
|
Mego/Seriously
|
07b256e4f35f5efec3b01434300f9ccc551b1c3e
|
[
"MIT"
] | 25
|
2015-11-19T05:34:09.000Z
|
2021-07-20T13:54:03.000Z
|
import random
def find_spelling(n):
"""
Finds d, r s.t. n-1 = 2^r * d
"""
r = 0
d = n - 1
# divmod used for large numbers
quotient, remainder = divmod(d, 2)
# while we can still divide 2's into n-1...
while remainder != 1:
r += 1
d = quotient # previous quotient before we overwrite it
quotient, remainder = divmod(d, 2)
return r, d
def probably_prime(n, k=10):
"""
Miller-Rabin primality test
Input: n > 3
k: accuracy of test
Output: True if n is "probably prime", False if it is composite
From psuedocode at https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test
"""
if n == 2:
return True
if n % 2 == 0:
return False
r, d = find_spelling(n)
for check in range(k):
a = random.randint(2, n - 1)
x = pow(a, d, n) # a^d % n
if x == 1 or x == n - 1:
continue
for i in range(r):
x = pow(x, 2, n)
if x == n - 1:
break
else:
return False
return True
| 24.782609
| 89
| 0.497368
| 168
| 1,140
| 3.345238
| 0.434524
| 0.021352
| 0.046263
| 0.085409
| 0.088968
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039301
| 0.397368
| 1,140
| 46
| 90
| 24.782609
| 0.778748
| 0.317544
| 0
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.035714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23ce1db523427cb59d90dd66571f9536a6eda982
| 4,859
|
py
|
Python
|
home/moz4r/Marty/marty_customInmoov.py
|
rv8flyboy/pyrobotlab
|
4e04fb751614a5cb6044ea15dcfcf885db8be65a
|
[
"Apache-2.0"
] | 63
|
2015-02-03T18:49:43.000Z
|
2022-03-29T03:52:24.000Z
|
home/moz4r/Marty/marty_customInmoov.py
|
rv8flyboy/pyrobotlab
|
4e04fb751614a5cb6044ea15dcfcf885db8be65a
|
[
"Apache-2.0"
] | 16
|
2016-01-26T19:13:29.000Z
|
2018-11-25T21:20:51.000Z
|
home/moz4r/Marty/marty_customInmoov.py
|
rv8flyboy/pyrobotlab
|
4e04fb751614a5cb6044ea15dcfcf885db8be65a
|
[
"Apache-2.0"
] | 151
|
2015-01-03T18:55:54.000Z
|
2022-03-04T07:04:23.000Z
|
#MARTY I2C PI
#SCRIPT BASED ON MATS WORK
#SCRIPT PUSHED INSIDE inmoovCustom : https://github.com/MyRobotLab/inmoov/tree/master/InmoovScript
raspi = Runtime.createAndStart("RasPi","RasPi")
adaFruit16c = Runtime.createAndStart("AdaFruit16C","Adafruit16CServoDriver")
adaFruit16c.setController("RasPi","1","0x40")
#
# This part is common for both devices and creates two servo instances
# on port 3 and 8 on the Adafruit16CServoDriver
# Change the names of the servos and the pin numbers to your usage
cuisseDroite = Runtime.createAndStart("cuisseDroite", "Servo")
genouDroite = Runtime.createAndStart("genouDroite", "Servo")
chevilleDroite = Runtime.createAndStart("chevilleDroite", "Servo")
cuisseGauche = Runtime.createAndStart("cuisseGauche", "Servo")
genouGauche = Runtime.createAndStart("genouGauche", "Servo")
chevilleGauche = Runtime.createAndStart("chevilleGauche", "Servo")
eyes = Runtime.createAndStart("eyes", "Servo")
armLeft = Runtime.createAndStart("armLeft", "Servo")
armRight = Runtime.createAndStart("armRight", "Servo")
sleep(1)
ledBlue=14
ledRed=13
ledGreen=12
vitesse=80
cuisseDroiteRest=90
genouDroiteRest=90
chevilleDroiteRest=80
cuisseGaucheRest=97
genouGaucheRest=95
chevilleGaucheRest=90
armLeftRest=90
armRightRest=120
eyesRest=90
cuisseDroite.setRest(cuisseDroiteRest)
genouDroite.setRest(genouDroiteRest)
chevilleDroite.setRest(chevilleDroiteRest)
cuisseGauche.setRest(cuisseGaucheRest)
genouGauche.setRest(genouGaucheRest)
chevilleGauche.setRest(chevilleGaucheRest)
eyes.setRest(eyesRest)
eyes.map(0,180,66,100)
armLeft.setRest(armLeftRest)
armRight.setRest(armRightRest)
cuisseDroite.attach(adaFruit16c,0)
genouDroite.attach(adaFruit16c,1)
chevilleDroite.attach(adaFruit16c,2)
cuisseGauche.attach(adaFruit16c,4)
genouGauche.attach(adaFruit16c,5)
chevilleGauche.attach(adaFruit16c,15)
eyes.attach(adaFruit16c,8)
armLeft.attach(adaFruit16c,9)
armRight.attach(adaFruit16c,10)
eyes.setVelocity(-1)
armLeft.setVelocity(-1)
armRight.setVelocity(-1)
cuisseDroite.rest()
genouDroite.rest()
chevilleDroite.rest()
cuisseGauche.rest()
genouGauche.rest()
chevilleGauche.rest()
eyes.rest()
armLeft.rest()
armRight.rest()
sleep(2)
cuisseDroite.detach()
genouDroite.detach()
chevilleDroite.detach()
cuisseGauche.detach()
genouGauche.detach()
chevilleGauche.detach()
armLeft.detach()
armRight.detach()
def walk(step):
talkBlocking("D'accord, c'est parti !")
start(step)
talk("Je m'aichauffe")
cuisseDroite.attach()
genouDroite.attach()
chevilleDroite.attach()
cuisseGauche.attach()
genouGauche.attach()
chevilleGauche.attach()
genouGauche.attach()
armLeft.attach()
armRight.attach()
cuisseDroite.setVelocity(vitesse)
genouDroite.setVelocity(vitesse)
chevilleDroite.setVelocity(vitesse)
cuisseGauche.setVelocity(vitesse)
genouGauche.setVelocity(vitesse)
chevilleGauche.setVelocity(vitesse)
for i in range(1,step) :
armLeft.moveTo(50)
armRight.moveTo(50)
chevilleDroite.moveTo(chevilleDroiteRest+20)
chevilleGauche.moveTo(chevilleGaucheRest+30)
sleep(0.8)
cuisseGauche.moveTo(cuisseDroiteRest+40)
cuisseDroite.moveTo(chevilleDroiteRest-40)
sleep(0.8)
chevilleDroite.moveTo(chevilleDroiteRest-30)
chevilleGauche.moveTo(chevilleGaucheRest-20)
sleep(0.8)
cuisseGauche.moveTo(cuisseGaucheRest)
cuisseDroite.moveTo(chevilleDroiteRest)
armLeft.moveTo(90)
armRight.moveTo(90)
sleep(0.8)
cuisseDroite.detach()
genouDroite.detach()
chevilleDroite.detach()
cuisseGauche.detach()
genouGauche.detach()
chevilleGauche.detach()
eyes.detach()
def start(step):
sleep(5)
armLeft.attach()
armRight.attach()
armLeft.attach()
eyes.attach()
eyes.moveTo(180)
armRight.moveTo(0)
sleep(2)
eyes.moveTo(0)
armRight.moveTo(120)
sleep(1)
eyes.moveTo(180)
sleep(0)
eyes.moveTo(180)
sleep(2)
eyes.moveTo(0)
armRight.moveTo(armRightRest)
adaFruit16c.setPinValue(7,0)
adaFruit16c.setPinValue(ledGreen,0)
adaFruit16c.setPinValue(ledRed,0)
adaFruit16c.setPinValue(ledBlue,0)
def red():
adaFruit16c.setPinValue(7,0)
adaFruit16c.setPinValue(ledGreen,1)
adaFruit16c.setPinValue(ledRed,0)
adaFruit16c.setPinValue(ledBlue,1)
def blue():
adaFruit16c.setPinValue(7,0)
adaFruit16c.setPinValue(ledGreen,1)
adaFruit16c.setPinValue(ledRed,1)
adaFruit16c.setPinValue(ledBlue,0)
def green():
adaFruit16c.setPinValue(7,0)
adaFruit16c.setPinValue(ledGreen,0)
adaFruit16c.setPinValue(ledRed,1)
adaFruit16c.setPinValue(ledBlue,1)
def noLed():
adaFruit16c.setPinValue(ledGreen,0)
adaFruit16c.setPinValue(ledRed,0)
adaFruit16c.setPinValue(ledBlue,0)
adaFruit16c.setPinValue(7,1)
red()
sleep(1)
green()
sleep(1)
blue()
sleep(1)
noLed()
led = Runtime.start("led","Clock")
led.setInterval(100)
global i
i=0
def ledFunc(timedata):
global i
if i==0:
red()
i=1
else:
noLed()
i=0
led.setInterval(random.randint(10,100))
led.addListener("pulse", python.name, "ledFunc")
| 22.919811
| 98
| 0.787611
| 562
| 4,859
| 6.809609
| 0.25089
| 0.114973
| 0.066109
| 0.025085
| 0.232558
| 0.215574
| 0.215574
| 0.175856
| 0.175856
| 0.175856
| 0
| 0.048616
| 0.085614
| 4,859
| 211
| 99
| 23.028436
| 0.812739
| 0.064622
| 0
| 0.366279
| 0
| 0
| 0.05465
| 0.004848
| 0
| 0
| 0.000881
| 0
| 0
| 1
| 0.040698
| false
| 0
| 0
| 0
| 0.040698
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23ce6753d608fd795d0aebbaec8257e2469df9e3
| 7,214
|
py
|
Python
|
tabular_experiments_supp_mat.py
|
juliendelaunay35000/APE-Adapted_Post-Hoc_Explanations
|
991c4cf6153fafef4200732a5ef8ac93f1175f27
|
[
"MIT"
] | null | null | null |
tabular_experiments_supp_mat.py
|
juliendelaunay35000/APE-Adapted_Post-Hoc_Explanations
|
991c4cf6153fafef4200732a5ef8ac93f1175f27
|
[
"MIT"
] | null | null | null |
tabular_experiments_supp_mat.py
|
juliendelaunay35000/APE-Adapted_Post-Hoc_Explanations
|
991c4cf6153fafef4200732a5ef8ac93f1175f27
|
[
"MIT"
] | null | null | null |
from sklearn import tree, svm
from sklearn.neural_network import MLPClassifier
from sklearn.multiclass import OneVsRestClassifier
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, VotingClassifier
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.naive_bayes import GaussianNB
import matplotlib.pyplot as plt
import numpy as np
from generate_dataset import generate_dataset, preparing_dataset
from storeExperimentalInformations import store_experimental_informations, prepare_legends
import baseGraph
import ape_tabular
import warnings
import pickle
#from keras.models import Sequential
#from keras.layers import Dense
if __name__ == "__main__":
# Filter the warning from matplotlib
warnings.filterwarnings("ignore")
# Datasets used for the experiments
dataset_names = ["generate_circles", "generate_moons", "blood", "diabete", "generate_blobs"]# "compas", "adult", "titanic"
# array of the models used for the experiments
models = [GradientBoostingClassifier(n_estimators=20, learning_rate=1.0),
RandomForestClassifier(n_estimators=20),
#MLPClassifier(random_state=1, activation="logistic"),
VotingClassifier(estimators=[('lr', LogisticRegression()), ('gnb', GaussianNB()), ('rc', LogisticRegression())], voting="soft"),
MLPClassifier(random_state=1),
RidgeClassifier()]#,
#LogisticRegression(),
#tree.DecisionTreeClassifier(),
#Sequential(),
#models=[RidgeClassifier(), MLPClassifier(random_state=1)]
# Number of instances explained by each model on each dataset
max_instance_to_explain = 10
# Print explanation result
illustrative_example = False
""" All the variable necessaries for generating the graph results """
# Store results inside graph if set to True
graph = True
verbose = False
growing_sphere = False
if growing_sphere:
label_graph = "growing spheres "
growing_method = "GS"
else:
label_graph = ""
growing_method = "GF"
# Threshold for explanation method precision
threshold_interpretability = 0.99
linear_separability_index = 1
interpretability_name = ['ls', 'ls regression', 'ls raw data', 'ls extend']
#interpretability_name = ['ls log reg', 'ls raw data']
# Initialize all the variable needed to store the result in graph
for dataset_name in dataset_names:
if graph: experimental_informations = store_experimental_informations(len(models), len(interpretability_name), interpretability_name, len(models))
models_name = []
# Store dataset inside x and y (x data and y labels), with aditional information
x, y, class_names, regression, multiclass, continuous_features, categorical_features, \
categorical_values, categorical_names, transformations = generate_dataset(dataset_name)
for nb_model, model in enumerate(models):
model_name = type(model).__name__
if "MLP" in model_name and nb_model <=2 :
model_name += "logistic"
if growing_sphere:
filename = "./results/"+dataset_name+"/"+model_name+"/growing_spheres/"+str(threshold_interpretability)+"/sup_mat_"
filename_all = "./results/"+dataset_name+"/growing_spheres/"+str(threshold_interpretability)+"/sup_mat_"
else:
filename="./results/"+dataset_name+"/"+model_name+"/"+str(threshold_interpretability)+"/sup_mat_"
filename_all="./results/"+dataset_name+"/"+str(threshold_interpretability)+"/sup_mat_"
if graph: experimental_informations.initialize_per_models(filename)
models_name.append(model_name)
# Split the dataset inside train and test set (50% each set)
dataset, black_box, x_train, x_test, y_train, y_test = preparing_dataset(x, y, dataset_name, model)
print("###", model_name, "training on", dataset_name, "dataset.")
if 'Sequential' in model_name:
# Train a neural network classifier with 2 relu and a sigmoid activation function
black_box.add(Dense(12, input_dim=len(x_train[0]), activation='relu'))
black_box.add(Dense(8, activation='relu'))
black_box.add(Dense(1, activation='sigmoid'))
black_box.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
black_box.fit(x_train, y_train, epochs=50, batch_size=10)
def predict(x):
if x.shape[0] > 1:
return np.asarray([prediction[0] for prediction in black_box.predict_classes(x)])
return black_box.predict_classes(x)[0]
def score(x, y):
return sum(predict(x) == y)/len(y)
else:
black_box = black_box.fit(x_train, y_train)
predict = black_box.predict
score = black_box.score
print('### Accuracy:', score(x_test, y_test))
cnt = 0
explainer = ape_tabular.ApeTabularExplainer(x_train, class_names, predict, black_box.predict_proba,
continuous_features=continuous_features,
categorical_features=categorical_features, categorical_values=categorical_values,
feature_names=dataset.feature_names, categorical_names=categorical_names,
verbose=verbose, threshold_precision=threshold_interpretability,
linear_separability_index=linear_separability_index,
transformations=transformations)
for instance_to_explain in x_test:
if cnt == max_instance_to_explain:
break
print("### Instance number:", cnt + 1, "over", max_instance_to_explain)
print("### Models ", nb_model + 1, "over", len(models))
print("instance to explain:", instance_to_explain)
try:
precision, coverage, f2 = explainer.explain_instance(instance_to_explain,
growing_method=growing_method,
local_surrogate_experiment=True)
print("precision", precision)
print("coverage", coverage)
print("f2", f2)
if graph: experimental_informations.store_experiments_information_instance(precision, 'precision.csv', coverage, 'coverage.csv', f2, 'f2.csv')
cnt += 1
except Exception as inst:
print(inst)
if graph: experimental_informations.store_experiments_information(max_instance_to_explain, nb_model, 'precision.csv', 'coverage.csv', 'f2.csv', filename_all=filename_all)
| 59.131148
| 182
| 0.624896
| 740
| 7,214
| 5.847297
| 0.293243
| 0.024035
| 0.031431
| 0.018489
| 0.172175
| 0.144211
| 0.081812
| 0.044373
| 0.029119
| 0.029119
| 0
| 0.008179
| 0.28819
| 7,214
| 122
| 183
| 59.131148
| 0.834469
| 0.123094
| 0
| 0.05102
| 0
| 0
| 0.078884
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020408
| false
| 0
| 0.142857
| 0.010204
| 0.193878
| 0.091837
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23ceb4be40ab14b96763eb535badca57463b0253
| 8,099
|
py
|
Python
|
summarise_results.py
|
MDBAuth/EWR_tool
|
5b05cf276822d97a38a32a5fc031209224a04fb3
|
[
"CC0-1.0"
] | 5
|
2021-03-17T00:33:53.000Z
|
2022-03-07T18:16:25.000Z
|
summarise_results.py
|
MDBAuth/EWR_tool
|
5b05cf276822d97a38a32a5fc031209224a04fb3
|
[
"CC0-1.0"
] | null | null | null |
summarise_results.py
|
MDBAuth/EWR_tool
|
5b05cf276822d97a38a32a5fc031209224a04fb3
|
[
"CC0-1.0"
] | 2
|
2022-01-14T03:50:10.000Z
|
2022-02-14T00:45:56.000Z
|
import pandas as pd
import numpy as np
import data_inputs, evaluate_EWRs
#--------------------------------------------------------------------------------------------------
def sum_events(events):
'''returns a sum of events'''
return int(round(events.sum(), 0))
def get_frequency(events):
'''Returns the frequency of years they occur in'''
if events.count() == 0:
result = 0
else:
result = (int(events.sum())/int(events.count()))*100
return int(round(result, 0))
def get_average(input_events):
'''Returns overall average length of events'''
events = input_events.dropna()
if len(events) == 0:
result = 0
else:
result = round(sum(events)/len(events),1)
return result
def get_event_length(input_events, num_events):
events = input_events.dropna()
if num_events == 0:
EL = 0
else:
EL = round(sum(events)/num_events,1)
return EL
def count_exceedence(input_events, EWR_info):
events = input_events.copy(deep=True)
if EWR_info['max_inter-event'] == None:
return 'N/A'
else:
masking = events.isna()
events[masking] = ''
total = 0
for year in events.index:
if list(events[year]) != '':
count = len(events[year])
total = total + count
return int(total)
def initialise_summary_df_columns(input_dict):
'''Ingest a dictionary of ewr yearly results and a list of statistical tests to perform
initialises a dataframe with these as a multilevel heading and returns this'''
analysis = data_inputs.analysis()
column_list = []
list_of_arrays = []
for scenario, scenario_results in input_dict.items():
for sub_col in analysis:
column_list = tuple((scenario, sub_col))
list_of_arrays.append(column_list)
array_of_arrays =tuple(list_of_arrays)
multi_col_df = pd.MultiIndex.from_tuples(array_of_arrays, names = ['scenario', 'type'])
return multi_col_df
def initialise_summary_df_rows(input_dict):
'''Ingests a dictionary of ewr yearly results
pulls the location information and the assocaited ewrs at each location,
saves these as respective indexes and return the multi-level index'''
index_1 = list()
index_2 = list()
index_3 = list()
combined_index = list()
# Get unique col list:
for scenario, scenario_results in input_dict.items():
for site, site_results in scenario_results.items():
for PU in site_results:
site_list = []
for col in site_results[PU]:
if '_' in col:
all_parts = col.split('_')
remove_end = all_parts[:-1]
if len(remove_end) > 1:
EWR_code = '_'.join(remove_end)
else:
EWR_code = remove_end[0]
else:
EWR_code = col
if EWR_code in site_list:
continue
else:
site_list.append(EWR_code)
add_index = tuple((site, PU, EWR_code))
if add_index not in combined_index:
combined_index.append(add_index)
unique_index = tuple(combined_index)
multi_index = pd.MultiIndex.from_tuples(unique_index, names = ['gauge', 'planning unit', 'EWR'])
return multi_index
def allocate(df, add_this, idx, site, PU, EWR, scenario, category):
'''Save element to a location in the dataframe'''
df.loc[idx[[site], [PU], [EWR]], idx[scenario, category]] = add_this
return df
def summarise(input_dict):
'''Ingests a dictionary with ewr pass/fails
summarises these results and returns a single summary dataframe'''
PU_items = data_inputs.get_planning_unit_info()
EWR_table, see_notes_ewrs, undefined_ewrs, noThresh_df, no_duration, DSF_ewrs = data_inputs.get_EWR_table()
# Initialise dataframe with multi level column heading and multi-index:
multi_col_df = initialise_summary_df_columns(input_dict)
index = initialise_summary_df_rows(input_dict)
df = pd.DataFrame(index = index, columns=multi_col_df)
# Run the analysis and add the results to the dataframe created above:
for scenario, scenario_results in input_dict.items():
for site, site_results in scenario_results.items():
for PU in site_results:
for col in site_results[PU]:
all_parts = col.split('_')
remove_end = all_parts[:-1]
if len(remove_end) > 1:
EWR = '_'.join(remove_end)
else:
EWR = remove_end[0]
idx = pd.IndexSlice
if ('_eventYears' in col):
S = sum_events(site_results[PU][col])
df = allocate(df, S, idx, site, PU, EWR, scenario, 'Event years')
F = get_frequency(site_results[PU][col])
df = allocate(df, F, idx, site, PU, EWR, scenario, 'Frequency')
PU_num = PU_items['PlanningUnitID'].loc[PU_items[PU_items['PlanningUnitName'] == PU].index[0]]
EWR_info = evaluate_EWRs.get_EWRs(PU_num, site, EWR, EWR_table, None, ['TF'])
TF = EWR_info['frequency']
df = allocate(df, TF, idx, site, PU, EWR, scenario, 'Target frequency')
elif ('_numAchieved' in col):
S = sum_events(site_results[PU][col])
df = allocate(df, S, idx, site, PU, EWR, scenario, 'Achievement count')
ME = get_average(site_results[PU][col])
df = allocate(df, ME, idx, site, PU, EWR, scenario, 'Achievements per year')
elif ('_numEvents' in col):
S = sum_events(site_results[PU][col])
df = allocate(df, S, idx, site, PU, EWR, scenario, 'Event count')
ME = get_average(site_results[PU][col])
df = allocate(df, ME, idx, site, PU, EWR, scenario, 'Events per year')
elif ('_eventLength' in col):
EL = get_event_length(site_results[PU][col], S)
df = allocate(df, EL, idx, site, PU, EWR, scenario, 'Event length')
elif ('_totalEventDays' in col):
AD = get_average(site_results[PU][col])
df = allocate(df, AD, idx, site, PU, EWR, scenario, 'Threshold days')
elif ('daysBetweenEvents' in col):
PU_num = PU_items['PlanningUnitID'].loc[PU_items[PU_items['PlanningUnitName'] == PU].index[0]]
EWR_info = evaluate_EWRs.get_EWRs(PU_num, site, EWR, EWR_table, None, ['MIE'])
DB = count_exceedence(site_results[PU][col], EWR_info)
df = allocate(df, DB, idx, site, PU, EWR, scenario, 'Inter-event exceedence count')
# Also save the max inter-event period to the data summary for reference
EWR_info = evaluate_EWRs.get_EWRs(PU_num, site, EWR, EWR_table, None, ['MIE'])
MIE = EWR_info['max_inter-event']
df = allocate(df, MIE, idx, site, PU, EWR, scenario, 'Max inter event period (years)')
elif ('_missingDays' in col):
MD = sum_events(site_results[PU][col])
df = allocate(df, MD, idx, site, PU, EWR, scenario, 'No data days')
elif ('_totalPossibleDays' in col):
TD = sum_events(site_results[PU][col])
df = allocate(df, TD, idx, site, PU, EWR, scenario, 'Total days')
return df
| 47.087209
| 118
| 0.548463
| 963
| 8,099
| 4.41433
| 0.185877
| 0.04399
| 0.033874
| 0.042343
| 0.410727
| 0.337803
| 0.262291
| 0.255705
| 0.255705
| 0.218772
| 0
| 0.004683
| 0.340783
| 8,099
| 172
| 119
| 47.087209
| 0.791534
| 0.115076
| 0
| 0.275362
| 0
| 0
| 0.06482
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.021739
| 0
| 0.15942
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23cf95b3c49a497e9b4fcecf5c43de957206031c
| 1,564
|
py
|
Python
|
setup.py
|
nitehawck/DevEnvManager
|
425b0d621be577fe73f22b4641f7099eac65669e
|
[
"MIT"
] | 1
|
2016-05-16T23:13:47.000Z
|
2016-05-16T23:13:47.000Z
|
setup.py
|
nitehawck/DevEnvManager
|
425b0d621be577fe73f22b4641f7099eac65669e
|
[
"MIT"
] | 41
|
2016-01-22T00:56:14.000Z
|
2016-05-12T14:38:37.000Z
|
setup.py
|
nitehawck/DevEnvManager
|
425b0d621be577fe73f22b4641f7099eac65669e
|
[
"MIT"
] | null | null | null |
from setuptools import setup
with open('README.rst') as f:
readme = f.read()
setup(
name="dem",
version="0.0.8",
author="Ian Macaulay, Jeremy Opalach",
author_email="ismacaul@gmail.com",
url="http://www.github.com/nitehawck/dem",
description="An agnostic library/package manager for setting up a development project environment",
long_description=readme,
license="MIT License",
classifiers=[
'Development Status :: 3 - Alpha',
#'Development Status :: 4 - Beta',
#'Development Status :: 5 - Production / Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Build Tools',
],
packages=['dem', 'dem.dependency', 'dem.project'],
install_requires=[
'virtualenv',
'PyYaml',
'wget',
'gitpython'
],
tests_require=[
'pyfakefs',
'mock'
],
entry_points={
'console_scripts': [
'dem = dem.__main__:main'
]
},
)
| 31.28
| 103
| 0.575448
| 152
| 1,564
| 5.855263
| 0.644737
| 0.106742
| 0.140449
| 0.08764
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012456
| 0.28133
| 1,564
| 50
| 104
| 31.28
| 0.779359
| 0.05179
| 0
| 0.065217
| 0
| 0
| 0.550607
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.021739
| 0
| 0.021739
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23d1f2c4f4ea5639727ded8d5757f9d66fc0cc39
| 13,959
|
py
|
Python
|
TarSync.py
|
waynegramlich/Fab
|
d4a23067a0354ffda106f7032df0501c8db24499
|
[
"MIT"
] | 1
|
2022-03-20T12:25:34.000Z
|
2022-03-20T12:25:34.000Z
|
TarSync.py
|
waynegramlich/Fab
|
d4a23067a0354ffda106f7032df0501c8db24499
|
[
"MIT"
] | null | null | null |
TarSync.py
|
waynegramlich/Fab
|
d4a23067a0354ffda106f7032df0501c8db24499
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""TarSync.py: Synchronize .fcstd and .tar files.
Usage: TarSync.py [OPTIONS] [DIR] ...
Recursively scans directories searching for `.fcstd`/`.FCstd` files
and synchronizes them with associated `.tar` files. The current
directory is used if no explicit directory or files are listed.
Options:
* [-n] Visit all files without doing anything. Use with [-v] option.
* [-v] Verbose mode.
Rationale:
A FreeCAD `.fcstd` file is basically a bunch of text files compressed with gzip.
For fun, the `unzip -l XYZ.fcstd` command lists the files contained in `XYZ.fcstd`.
Due to the repetitive nature of the text files contained therein, the gzip algorithm
can achieve significant overall file compression.
A `git` repository basically consists of a bunch files called blob's, where the
term "blob" stands for Binary Large Object. Each blob represents some version
of a file stored the repository. Being binary files, `.fcstd` files can be
stored inside of a git repository. However, the compressed (i.e. binary)
nature of `.fcstd` files can make the git repository storage requirements
grow at a pretty rapid rate as multiple versions of the `.fcstd` files get stored
into a git repository.
To combat the storage growth requirements, `git` uses a compression algorithm that
is applied to the repository as a whole. These compressed files are called Pack files.
Pack files are generated and updated whenever git decides to do so. Over time,
the overall git storage requirements associated with uncompressed files grows at a
slower rate than gzip compressed files. In addition, each time a git repositories
are synchronized, the over the wire protocol is via Pack file.
This program will convert a file from compressed in gzip format into simpler
uncompressed format call a `.tar` file. (`tar` stands for Tape ARchive for
back in the days of magnetic tapes.) Basically, what this program does is
manage two files in tandem, `XYZ.fcstd` and `XYZ.tar`. It does this by
comparing the modification times between the two files translates the content
of the newer file on top of the older file. When done, both files will have
the same modification time. This program works recursively over an entire
directory tree.
To use this program with a git repository, configure your `.gitignore` to
ignore `.fcstd` files in your repository by adding `*.fcstd` to your
`.gitignore` file. Run this program before doing a `git commit`
Whenever you update your git repository from a remote one, run this program
to again, to keep the `.fcstd` files in sync with any updated `.tar` files.
"""
# [Basic Git Concepts]
# (https://www.oreilly.com/library/view/version-control-with/9781449345037/ch04.html)
#
# FreeCAD forum topics:
# [https://forum.freecadweb.org/viewtopic.php?t=38353&start=30](1)
# [https://forum.freecadweb.org/viewtopic.php?f=8&t=36844a](2)
# [https://forum.freecadweb.org/viewtopic.php?t=40029&start=10](3)
# [https://forum.freecadweb.org/viewtopic.php?p=1727](4)
# [https://forum.freecadweb.org/viewtopic.php?t=8688](5)
# [https://forum.freecadweb.org/viewtopic.php?t=32521](6)
# [https://forum.freecadweb.org/viewtopic.php?t=57737)(7)
# [https://blog.lambda.cx/posts/freecad-and-git/](8)
# [https://tante.cc/2010/06/23/managing-zip-based-file-formats-in-git/](9)
from argparse import ArgumentParser
from io import BytesIO
import os
from pathlib import Path
from tarfile import TarFile, TarInfo
from tempfile import TemporaryDirectory
from typing import List, IO, Optional, Tuple
import time
from zipfile import ZIP_DEFLATED, ZipFile
# main():
def main() -> None:
"""Execute the main program."""
# Create an *argument_parser*:
parser: ArgumentParser = ArgumentParser(
description="Synchronize .fcstd/.tar files."
)
parser.add_argument("directories", metavar="DIR", type=str, nargs="*",
help="Directory to recursively scan")
parser.add_argument("-n", "--dry-run", action="store_true",
help="verbose mode")
parser.add_argument("-v", "--verbose", action="store_true",
help="verbose mode")
parser.add_argument("--unit-test", action="store_true",
help="run unit tests")
# Parse arguments:
arguments = parser.parse_args()
directories: Tuple[str, ...] = tuple(arguments.directories)
if arguments.unit_test:
# Run the unit test:
unit_test()
directories = ()
synchronize_directories(directories, arguments.dry_run, arguments.verbose)
# synchronize_directories():
def synchronize_directories(directory_names: Tuple[str, ...],
dry_run: bool, verbose: bool) -> Tuple[str, ...]:
"""Synchronize some directories.
* Arguments:
* *directory_names* (Tuple[str, ...):
A list of directories to recursively synchronize.
* dry_run (bool):
If False, the directories are scanned, but not synchronized. If True, the directories
are both scanned and synchronized.
* verbose (bool):
If True, the a summary message is printed if for each (possible) synchronization.
The actual synchronization only occurs if *dry_run* is False.
* Returns
* (Tuple[str, ...]) containing the summary
"""
# Recursively find all *fcstd_paths* in *directories*:
fcstd_paths: List[Path] = []
directory_name: str
for directory_name in directory_names:
suffix: str = "fcstd"
for suffix in ("fcstd", "fcSTD"):
fcstd_paths.extend(Path(directory_name).glob(f"**/*.{suffix}"))
# Perform all of the synchronizations:
summaries: List[str] = []
for fcstd_path in fcstd_paths:
summary: str = synchronize(fcstd_path, dry_run)
summaries.append(summary)
if verbose:
print(summary) # pragma: no unit cover
return tuple(summaries)
# Synchronize():
def synchronize(fcstd_path: Path, dry_run: bool = False) -> str:
"""Synchronize an .fcstd file with associated .tar file.
* Arguments:
* fcstd_path (Path):
The `.fcstd` file to synchronize.
* dry_run (bool):
If True, no synchronization occurs and only the summary string is returned.
(Default: False)
* Returns:
* (str) a summary string.
Synchronizes an `.fcstd` file with an associated `.tar` file and.
A summary is always returned even in *dry_run* mode.
"""
# Determine timestamps for *fstd_path* and associated *tar_path*:
tar_path: Path = fcstd_path.with_suffix(".tar")
fcstd_timestamp: int = int(fcstd_path.stat().st_mtime) if fcstd_path.exists() else 0
tar_timestamp: int = int(tar_path.stat().st_mtime) if tar_path.exists() else 0
# Using the timestamps do the synchronization (or not):
zip_file: ZipFile
tar_file: TarFile
tar_info: TarInfo
fcstd_name: str = str(fcstd_path)
tar_name: str = str(tar_path)
summary: str
if fcstd_timestamp > tar_timestamp:
# Update *tar_path* from *tar_path*:
summary = f"{fcstd_name} => {tar_name}"
if not dry_run:
with ZipFile(fcstd_path, "r") as zip_file:
with TarFile(tar_path, "w") as tar_file:
from_names: Tuple[str, ...] = tuple(zip_file.namelist())
for from_name in from_names:
from_content: bytes = zip_file.read(from_name)
# print(f"Read {fcstd_path}:{from_name}:"
# f"{len(from_content)}:{is_ascii(from_content)}")
tar_info = TarInfo(from_name)
tar_info.size = len(from_content)
# print(f"tar_info={tar_info} size={tar_info.size}")
tar_file.addfile(tar_info, BytesIO(from_content))
os.utime(tar_path, (fcstd_timestamp, fcstd_timestamp)) # Force modification time.
elif tar_timestamp > fcstd_timestamp:
# Update *fcstd_path* from *tar_path*:
summary = f"{tar_name} => {fcstd_name}"
if not dry_run:
with TarFile(tar_path, "r") as tar_file:
tar_infos: Tuple[TarInfo, ...] = tuple(tar_file.getmembers())
with ZipFile(fcstd_path, "w", ZIP_DEFLATED) as zip_file:
for tar_info in tar_infos:
buffered_reader: Optional[IO[bytes]] = tar_file.extractfile(tar_info)
assert buffered_reader
buffer: bytes = buffered_reader.read()
# print(f"{tar_info.name}: {len(buffer)}")
zip_file.writestr(tar_info.name, buffer)
os.utime(fcstd_path, (tar_timestamp, tar_timestamp)) # Force modification time.
else:
summary = f"{fcstd_name} in sync with {tar_name}"
return summary
# unit_test():
def unit_test() -> None:
"""Run the unit test."""
directory_name: str
# Use create a temporary *directory_path* to run the tests in:
with TemporaryDirectory() as directory_name:
a_content: str = "a contents"
b_content: str = "b contents"
buffered_reader: Optional[IO[bytes]]
c_content: str = "c contents"
directory_path: Path = Path(directory_name)
tar_name: str
tar_file: TarFile
tar_path: Path = directory_path / "test.tar"
tar_path_name: str = str(tar_path)
zip_file: ZipFile
zip_name: str
zip_path: Path = directory_path / "test.fcstd"
zip_path_name: str = str(zip_path)
# Create *zip_file* with a suffix of `.fcstd`:
with ZipFile(zip_path, "w", ZIP_DEFLATED) as zip_file:
zip_file.writestr("a", a_content)
zip_file.writestr("b", b_content)
assert zip_path.exists(), f"{zip_path_name=} not created"
zip_timestamp: int = int(zip_path.stat().st_mtime)
assert zip_timestamp > 0, f"{zip_path=} had bad timestamp."
# Perform synchronize with a slight delay to force a different modification time:
time.sleep(1.1)
summaries = synchronize_directories((directory_name, ), False, False)
assert len(summaries) == 1, "Only 1 summary expected"
summary: str = summaries[0]
desired_summary: str = f"{zip_path_name} => {tar_path_name}"
assert summary == desired_summary, f"{summary} != {desired_summary}"
assert tar_path.exists(), f"{tar_path_name=} not created"
tar_timestamp: int = int(tar_path.stat().st_mtime)
assert tar_timestamp == zip_timestamp, f"{zip_timestamp=} != {tar_timestamp=}"
# Now read *tar_file* and verify that it has the correct content:
with TarFile(tar_path, "r") as tar_file:
tar_infos: Tuple[TarInfo, ...] = tuple(tar_file.getmembers())
for tar_info in tar_infos:
buffered_reader = tar_file.extractfile(tar_info)
assert buffered_reader, f"Unable to read {tar_file=}"
content: str = buffered_reader.read().decode("latin-1")
found: bool = False
if tar_info.name == "a":
assert content == a_content, f"'{content}' != '{a_content}'"
found = True
elif tar_info.name == "b":
assert content == b_content, f"'{content}' != '{b_content}'"
found = True
assert found, f"Unexpected tar file name {tar_info.name}"
# Now run synchronize again and verify that nothing changed:
summaries = synchronize_directories((directory_name, ), False, False)
assert len(summaries) == 1, "Only one summary expected"
summary = summaries[0]
desired_summary = f"{str(zip_path)} in sync with {str(tar_path)}"
assert summary == desired_summary, f"'{summary}' != '{desired_summary}'"
zip_timestamp = int(zip_path.stat().st_mtime)
tar_timestamp = int(tar_path.stat().st_mtime)
assert tar_timestamp == zip_timestamp, f"timestamps {zip_timestamp=} != {tar_timestamp=}"
# Now update *tar_file* with new content (i.e. `git pull`).:
time.sleep(1.1) # Use delay to force a different timestamp.
with TarFile(tar_path, "w") as tar_file:
tar_info = TarInfo("c")
tar_info.size = len(c_content)
tar_file.addfile(tar_info, BytesIO(bytes(c_content, "latin-1")))
tar_info = TarInfo("a")
tar_info.size = len(a_content)
tar_file.addfile(tar_info, BytesIO(bytes(a_content, "latin-1")))
# Verify that the timestamp changed and force a synchronize().
new_tar_timestamp: int = int(tar_path.stat().st_mtime)
assert new_tar_timestamp > tar_timestamp, f"{new_tar_timestamp=} <= {tar_timestamp=}"
summary = synchronize(zip_path)
desired_summary = f"{tar_path_name} => {zip_path_name}"
assert summary == desired_summary, f"'{summary}' != '{desired_summary}'"
# Verify that the *zip_path* got updated verify that the content changed:
new_zip_timestamp: int = int(zip_path.stat().st_mtime)
assert new_zip_timestamp == new_tar_timestamp, (
f"{new_zip_timestamp=} != {new_tar_timestamp=}")
with ZipFile(zip_path, "r") as zip_file:
zip_names: Tuple[str, ...] = tuple(zip_file.namelist())
for zip_name in zip_names:
zip_content: str = zip_file.read(zip_name).decode("latin-1")
assert buffered_reader
found = False
if zip_name == "a":
assert zip_content == a_content, "Content mismatch"
found = True
elif zip_name == "c":
assert zip_content == c_content, "Content mismatch"
found = True
assert found, "Unexpected file '{zip_name}'"
if __name__ == "__main__":
main()
| 45.617647
| 98
| 0.646321
| 1,837
| 13,959
| 4.753402
| 0.209036
| 0.01924
| 0.009162
| 0.013743
| 0.243014
| 0.193541
| 0.163536
| 0.137197
| 0.08967
| 0.074782
| 0
| 0.008279
| 0.247224
| 13,959
| 305
| 99
| 45.767213
| 0.822707
| 0.389641
| 0
| 0.184049
| 0
| 0
| 0.13019
| 0
| 0
| 0
| 0
| 0
| 0.128834
| 1
| 0.02454
| false
| 0
| 0.055215
| 0
| 0.092025
| 0.006135
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23d7aa18934d135f4447648b4a864fe8e8b4a99c
| 1,790
|
py
|
Python
|
moods.py
|
henry232323/Discord-Pesterchum
|
70be67f3671b35aa6cbe6e4eb66a4a1c07707ce3
|
[
"MIT"
] | 27
|
2017-01-31T03:28:26.000Z
|
2021-09-05T21:02:36.000Z
|
moods.py
|
henry232323/Discord-Pesterchum
|
70be67f3671b35aa6cbe6e4eb66a4a1c07707ce3
|
[
"MIT"
] | 18
|
2018-02-03T16:44:18.000Z
|
2021-06-26T04:12:17.000Z
|
moods.py
|
henry232323/Discord-Pesterchum
|
70be67f3671b35aa6cbe6e4eb66a4a1c07707ce3
|
[
"MIT"
] | 5
|
2017-09-23T15:53:08.000Z
|
2020-07-26T06:19:13.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016-2020, henry232323
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
class Moods(object):
moods = ["chummy", "rancorous", "offline", "pleasant", "distraught",
"pranky", "smooth", "ecstatic", "relaxed", "discontent",
"devious", "sleek", "detestful", "mirthful", "manipulative",
"vigorous", "perky", "acceptant", "protective", "mystified",
"amazed", "insolent", "bemused"]
def __init__(self):
self.usermoods = dict()
self.value = 0
@staticmethod
def getMood(name):
name = "offline" if name.lower() == "abscond" else name
return Moods.moods.index(name.lower())
@staticmethod
def getName(index):
return Moods.moods[index]
| 42.619048
| 76
| 0.701117
| 234
| 1,790
| 5.346154
| 0.606838
| 0.070344
| 0.020783
| 0.033573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011212
| 0.202793
| 1,790
| 41
| 77
| 43.658537
| 0.865452
| 0.603352
| 0
| 0.125
| 0
| 0
| 0.281567
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0
| 0.0625
| 0.4375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23d7e7b0e05f376311c1a1430b049eda79a5c69d
| 4,465
|
py
|
Python
|
reclass/utils/tests/test_refvalue.py
|
bbinet/reclass
|
c08b844b328fa0fe182db49dd423cc203a016ce9
|
[
"Artistic-2.0"
] | 101
|
2015-01-09T14:59:57.000Z
|
2021-11-06T23:33:50.000Z
|
reclass/utils/tests/test_refvalue.py
|
bbinet/reclass
|
c08b844b328fa0fe182db49dd423cc203a016ce9
|
[
"Artistic-2.0"
] | 48
|
2015-01-30T05:53:47.000Z
|
2019-03-21T23:17:40.000Z
|
reclass/utils/tests/test_refvalue.py
|
bbinet/reclass
|
c08b844b328fa0fe182db49dd423cc203a016ce9
|
[
"Artistic-2.0"
] | 50
|
2015-01-30T08:56:07.000Z
|
2020-12-25T02:34:08.000Z
|
#
# -*- coding: utf-8 -*-
#
# This file is part of reclass (http://github.com/madduck/reclass)
#
# Copyright © 2007–14 martin f. krafft <madduck@madduck.net>
# Released under the terms of the Artistic Licence 2.0
#
from reclass.utils.refvalue import RefValue
from reclass.defaults import PARAMETER_INTERPOLATION_SENTINELS, \
PARAMETER_INTERPOLATION_DELIMITER
from reclass.errors import UndefinedVariableError, \
IncompleteInterpolationError
import unittest
def _var(s):
return '%s%s%s' % (PARAMETER_INTERPOLATION_SENTINELS[0], s,
PARAMETER_INTERPOLATION_SENTINELS[1])
CONTEXT = {'favcolour':'yellow',
'motd':{'greeting':'Servus!',
'colour':'${favcolour}'
},
'int':1,
'list':[1,2,3],
'dict':{1:2,3:4},
'bool':True
}
def _poor_mans_template(s, var, value):
return s.replace(_var(var), value)
class TestRefValue(unittest.TestCase):
def test_simple_string(self):
s = 'my cat likes to hide in boxes'
tv = RefValue(s)
self.assertFalse(tv.has_references())
self.assertEquals(tv.render(CONTEXT), s)
def _test_solo_ref(self, key):
s = _var(key)
tv = RefValue(s)
res = tv.render(CONTEXT)
self.assertTrue(tv.has_references())
self.assertEqual(res, CONTEXT[key])
def test_solo_ref_string(self):
self._test_solo_ref('favcolour')
def test_solo_ref_int(self):
self._test_solo_ref('int')
def test_solo_ref_list(self):
self._test_solo_ref('list')
def test_solo_ref_dict(self):
self._test_solo_ref('dict')
def test_solo_ref_bool(self):
self._test_solo_ref('bool')
def test_single_subst_bothends(self):
s = 'I like ' + _var('favcolour') + ' and I like it'
tv = RefValue(s)
self.assertTrue(tv.has_references())
self.assertEqual(tv.render(CONTEXT),
_poor_mans_template(s, 'favcolour',
CONTEXT['favcolour']))
def test_single_subst_start(self):
s = _var('favcolour') + ' is my favourite colour'
tv = RefValue(s)
self.assertTrue(tv.has_references())
self.assertEqual(tv.render(CONTEXT),
_poor_mans_template(s, 'favcolour',
CONTEXT['favcolour']))
def test_single_subst_end(self):
s = 'I like ' + _var('favcolour')
tv = RefValue(s)
self.assertTrue(tv.has_references())
self.assertEqual(tv.render(CONTEXT),
_poor_mans_template(s, 'favcolour',
CONTEXT['favcolour']))
def test_deep_subst_solo(self):
var = PARAMETER_INTERPOLATION_DELIMITER.join(('motd', 'greeting'))
s = _var(var)
tv = RefValue(s)
self.assertTrue(tv.has_references())
self.assertEqual(tv.render(CONTEXT),
_poor_mans_template(s, var,
CONTEXT['motd']['greeting']))
def test_multiple_subst(self):
greet = PARAMETER_INTERPOLATION_DELIMITER.join(('motd', 'greeting'))
s = _var(greet) + ' I like ' + _var('favcolour') + '!'
tv = RefValue(s)
self.assertTrue(tv.has_references())
want = _poor_mans_template(s, greet, CONTEXT['motd']['greeting'])
want = _poor_mans_template(want, 'favcolour', CONTEXT['favcolour'])
self.assertEqual(tv.render(CONTEXT), want)
def test_multiple_subst_flush(self):
greet = PARAMETER_INTERPOLATION_DELIMITER.join(('motd', 'greeting'))
s = _var(greet) + ' I like ' + _var('favcolour')
tv = RefValue(s)
self.assertTrue(tv.has_references())
want = _poor_mans_template(s, greet, CONTEXT['motd']['greeting'])
want = _poor_mans_template(want, 'favcolour', CONTEXT['favcolour'])
self.assertEqual(tv.render(CONTEXT), want)
def test_undefined_variable(self):
s = _var('no_such_variable')
tv = RefValue(s)
with self.assertRaises(UndefinedVariableError):
tv.render(CONTEXT)
def test_incomplete_variable(self):
s = PARAMETER_INTERPOLATION_SENTINELS[0] + 'incomplete'
with self.assertRaises(IncompleteInterpolationError):
tv = RefValue(s)
if __name__ == '__main__':
unittest.main()
| 34.882813
| 76
| 0.600224
| 501
| 4,465
| 5.095808
| 0.237525
| 0.041128
| 0.047395
| 0.046612
| 0.51038
| 0.438308
| 0.427732
| 0.410497
| 0.390521
| 0.390521
| 0
| 0.006194
| 0.27682
| 4,465
| 127
| 77
| 35.15748
| 0.783834
| 0.044345
| 0
| 0.357143
| 0
| 0
| 0.102161
| 0
| 0
| 0
| 0
| 0
| 0.183673
| 1
| 0.173469
| false
| 0
| 0.040816
| 0.020408
| 0.244898
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23d88124e0abeec9041b9f813d746d7445479956
| 1,506
|
py
|
Python
|
backend/neuroflow/routes/mood.py
|
isamu-isozaki/neuroflow-challenge
|
ca29b8e48be4853317ab706acd4731ea0a8bab10
|
[
"MIT"
] | null | null | null |
backend/neuroflow/routes/mood.py
|
isamu-isozaki/neuroflow-challenge
|
ca29b8e48be4853317ab706acd4731ea0a8bab10
|
[
"MIT"
] | null | null | null |
backend/neuroflow/routes/mood.py
|
isamu-isozaki/neuroflow-challenge
|
ca29b8e48be4853317ab706acd4731ea0a8bab10
|
[
"MIT"
] | null | null | null |
"""
Author: Isamu Isozaki (isamu.website@gmail.com)
Description: description
Created: 2021-12-01T16:32:53.089Z
Modified: !date!
Modified By: modifier
"""
from flask import Blueprint, redirect, jsonify, url_for, request
from neuroflow.repository import create_mood, get_authorized, load_moods_from_user
from functools import wraps
from flask_cors import cross_origin
blueprint = Blueprint('mood', __name__,
url_prefix='/mood')
def authorized():
def authorized_decorator(f):
@wraps(f)
def wrap(*args, **kwargs):
if not request.headers.get('Authorization', None):
return 'Unauthorized', 401
user = get_authorized(request)
if not user:
return 'Unauthorized', 401
return f(user, *args, **kwargs)
return wrap
return authorized_decorator
@blueprint.route('', methods=['POST', 'GET'])
@cross_origin()
@authorized()
def mood_processing(user):
if request.method == 'POST':
try:
request_json = request.get_json()
mood_val = float(request_json['mood'])
assert 0 <= mood_val <= 10
mood = create_mood(mood_val, user)
except Exception as e:
print(e)
return "Invalid request.", 400
del mood['_sa_instance_state']
del mood['user']
return jsonify({'mood': mood})
else:
moods = load_moods_from_user(user)
return jsonify({'moods': moods})
| 30.12
| 82
| 0.616866
| 173
| 1,506
| 5.190751
| 0.468208
| 0.033408
| 0.028953
| 0.037862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026508
| 0.273572
| 1,506
| 49
| 83
| 30.734694
| 0.794333
| 0.096946
| 0
| 0.054054
| 0
| 0
| 0.079882
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 1
| 0.108108
| false
| 0
| 0.108108
| 0
| 0.432432
| 0.108108
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23d8fd0ae625c1772c3f3bb0a2d8ee76180f8da6
| 2,684
|
py
|
Python
|
capstone/upload_to_s3.py
|
slangenbach/udacity-de-nanodegree
|
ba885eb4c6fbce063e443375a89b92dbc46fa809
|
[
"MIT"
] | 2
|
2020-03-07T23:32:41.000Z
|
2020-05-22T15:35:16.000Z
|
capstone/upload_to_s3.py
|
slangenbach/udacity-de-nanodegree
|
ba885eb4c6fbce063e443375a89b92dbc46fa809
|
[
"MIT"
] | 1
|
2020-05-25T11:17:15.000Z
|
2020-05-26T06:58:37.000Z
|
capstone/upload_to_s3.py
|
slangenbach/udacity-de-nanodegree
|
ba885eb4c6fbce063e443375a89b92dbc46fa809
|
[
"MIT"
] | 2
|
2020-03-31T13:00:01.000Z
|
2021-07-14T14:34:37.000Z
|
import logging
import time
from pathlib import Path
from configparser import ConfigParser
import boto3
from botocore.exceptions import ClientError
def create_bucket(bucket_name: str, region: str = 'us-west-2'):
"""
Create S3 bucket
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-example-creating-buckets.html
:param bucket_name: Name of S3 bucket
:param region: AWS region where bucket is created
:return: True if bucket is created or already exists, False if ClientError occurs
"""
try:
s3_client = boto3.client('s3', region=region)
# list buckets
response = s3_client.list_buckets()
# check if bucket exists
if bucket_name not in response['Buckets']:
s3_client.create_bucket(Bucket=bucket_name)
else:
logging.warning(f"{bucket_name} already exist in AWS region {region}")
except ClientError as e:
logging.exception(e)
return False
return True
def upload_file(file_name: str, bucket: str, object_name: str = None, region: str = 'us-west-2'):
"""
Upload file to S3 bucket
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html
:param file_name: Path to file including filename
:param bucket: Bucket where file is uploaded to
:param object_name: Name of file inside S3 bucket
:param region: AWS region where bucket is located
:return: True if upload succeeds, False if ClientError occurs
"""
if object_name is None:
object_name = file_name
try:
s3_client = boto3.client('s3', region=region)
s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.exception(e)
return False
return True
if __name__ == '__main__':
# load config
config = ConfigParser()
config.read('app.cfg')
# start logging
logging.basicConfig(level=config.get("logging", "level"), format="%(asctime)s - %(levelname)s - %(message)s")
logging.info("Started")
# start timer
start_time = time.perf_counter()
# define
data_path = Path(__file__).parent.joinpath('data')
# check if bucket exists
create_bucket(bucket_name='fff-streams')
# upload files to S3
upload_file(data_path.joinpath('world_happiness_2017.csv'), bucket='fff-streams', object_name='world_happiness.csv')
upload_file(data_path.joinpath('temp_by_city_clean.csv'), bucket='fff-streams', object_name='temp_by_city.csv')
# stop timer
stop_time = time.perf_counter()
logging.info(f"Uploaded files in {(stop_time - start_time):.2f} seconds")
logging.info("Finished")
| 31.209302
| 120
| 0.688897
| 362
| 2,684
| 4.944751
| 0.312155
| 0.039106
| 0.030168
| 0.024581
| 0.298324
| 0.251397
| 0.218994
| 0.218994
| 0.178771
| 0.132961
| 0
| 0.013189
| 0.209016
| 2,684
| 85
| 121
| 31.576471
| 0.829958
| 0.293219
| 0
| 0.292683
| 0
| 0
| 0.184818
| 0.025303
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.146341
| 0
| 0.292683
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23dbf2b9d9cefc92e0075e49e75f8a00b52cb7f9
| 4,174
|
py
|
Python
|
core/loader.py
|
CrackerCat/ZetaSploit
|
4589d467c9fb81c1a5075cd43358b2df9b896530
|
[
"MIT"
] | 3
|
2020-12-04T07:29:31.000Z
|
2022-01-30T10:14:41.000Z
|
core/loader.py
|
CrackerCat/ZetaSploit
|
4589d467c9fb81c1a5075cd43358b2df9b896530
|
[
"MIT"
] | null | null | null |
core/loader.py
|
CrackerCat/ZetaSploit
|
4589d467c9fb81c1a5075cd43358b2df9b896530
|
[
"MIT"
] | 1
|
2021-03-27T06:14:43.000Z
|
2021-03-27T06:14:43.000Z
|
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import time
import threading
import os
from core.badges import badges
from core.helper import helper
class loader:
def __init__(self):
self.badges = badges()
self.helper = helper()
def get_module(self, mu, name, folderpath):
folderpath_list = folderpath.split(".")
for i in dir(mu):
if i == name:
pass
return getattr(mu, name)
else:
if i in folderpath_list:
i = getattr(mu, i)
return self.get_module(i, name, folderpath)
def import_plugins(self, plugin_owner, plugin_system, controller):
plugins = dict()
plugin_path = "plugins/" + plugin_owner + "/" + plugin_system
for plugin_type in os.listdir(plugin_path):
plugin_path = plugin_path + "/" + plugin_type
for plugin in os.listdir(plugin_path):
if plugin == '__init__.py' or plugin[-3:] != '.py':
continue
else:
try:
plugin_directory = plugin_path.replace("/", ".").replace("\\", ".") + "." + plugin[:-3]
plugin_file = __import__(plugin_directory)
plugin_object = self.get_module(plugin_file, plugin[:-3], plugin_directory)
plugin_object = plugin_object.ZetaSploitPlugin(controller)
plugins[plugin_object.details['Name']] = plugin_object
except Exception as e:
print(self.badges.E + "Failed to load plugin! Reason: "+str(e))
return plugins
def import_modules(self):
modules = dict()
module_path = "modules"
for module_system in os.listdir(module_path):
module_path = module_path + "/" + module_system
for module_type in os.listdir(module_path):
module_path = module_path + "/" + module_type
for module in os.listdir(module_path):
if module == '__init__.py' or module[-3:] != '.py':
continue
else:
try:
module_directory = module_path.replace("/", ".").replace("\\", ".") + "." + module[:-3]
module_file = __import__(module_directory)
module_object = self.get_module(module_file, module[:-3], module_directory)
module_object = module_object.ZetaSploitModule()
modules[module_object.details['Name']] = module_object
except Exception as e:
print(self.badges.E + "Failed to load plugin! Reason: " + str(e))
return modules
def load_plugins(self, owner, system, controller):
plugins = self.import_plugins(owner, system, controller)
return plugins
def load_modules(self):
modules = self.import_modules()
return modules
| 43.030928
| 115
| 0.598946
| 481
| 4,174
| 5.037422
| 0.324324
| 0.037144
| 0.03962
| 0.033017
| 0.151465
| 0.099876
| 0.099876
| 0.099876
| 0.099876
| 0.099876
| 0
| 0.003837
| 0.313129
| 4,174
| 97
| 116
| 43.030928
| 0.841298
| 0.258984
| 0
| 0.234375
| 0
| 0
| 0.042345
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0.015625
| 0.1875
| 0
| 0.390625
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23dc4f684d9d5300357e5bf6d8fabca6e13f5585
| 8,556
|
py
|
Python
|
parameter_setting/parameters_setting_cropping_impact.py
|
MorganeAudrain/Calcium_new
|
1af0ab4f70b91d1ca55c6053112c1744b1da1bd3
|
[
"MIT"
] | null | null | null |
parameter_setting/parameters_setting_cropping_impact.py
|
MorganeAudrain/Calcium_new
|
1af0ab4f70b91d1ca55c6053112c1744b1da1bd3
|
[
"MIT"
] | null | null | null |
parameter_setting/parameters_setting_cropping_impact.py
|
MorganeAudrain/Calcium_new
|
1af0ab4f70b91d1ca55c6053112c1744b1da1bd3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 5
@author: Melisa Maidana
This script runs different cropping parameters, motion correct the cropped images using reasonable motion correction parameters that were previously selected
by using the parameters_setting_motion_correction scripts, and then run source extraction (with multiple parameters) and creates figures of the cropped
image and the extracted cells from that image. The idea is to compare the resulting source extraction neural footprint for different cropping selections.
Ideally the extracted sources should be similar. If that is the case, then all the parameter setting for every step can be run in small pieces of the image,
select the best ones, and implemented lated in the complete image.
"""
import os
import sys
import psutil
import logging
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import pylab as pl
# This should be in another file. Let's leave it here for now
sys.path.append('/home/sebastian/Documents/Melisa/calcium_imaging_analysis/src/')
sys.path.remove('/home/sebastian/Documents/calcium_imaging_analysis')
import src.configuration
import caiman as cm
import src.data_base_manipulation as db
from src.steps.cropping import run_cropper as main_cropping
from src.steps.motion_correction import run_motion_correction as main_motion_correction
from src.steps.source_extraction import run_source_extraction as main_source_extraction
import src.analysis.metrics as metrics
from caiman.source_extraction.cnmf.cnmf import load_CNMF
#Paths
analysis_states_database_path = 'references/analysis/analysis_states_database.xlsx'
backup_path = 'references/analysis/backup/'
#parameters_path = 'references/analysis/parameters_database.xlsx'
## Open thw data base with all data
states_df = db.open_analysis_states_database()
mouse = 51565
session = 1
trial = 1
is_rest = 1
# CROPPING
# Select the rows for cropping
x1_crops = np.arange(200,0,-50)
x2_crops = np.arange(350,550,50)
y1_crops = np.arange(200,0,-50)
y2_crops = np.arange(350,550,50)
n_processes = psutil.cpu_count()
cm.cluster.stop_server()
# Start a new cluster
c, dview, n_processes = cm.cluster.setup_cluster(backend='local',
n_processes=n_processes, # number of process to use, if you go out of memory try to reduce this one
single_thread=False)
logging.info(f'Starting cluster. n_processes = {n_processes}.')
#parametrs for motion correction
parameters_motion_correction = {'motion_correct': True, 'pw_rigid': True, 'save_movie_rig': False,
'gSig_filt': (5, 5), 'max_shifts': (25, 25), 'niter_rig': 1,
'strides': (48, 48),
'overlaps': (96, 96), 'upsample_factor_grid': 2, 'num_frames_split': 80,
'max_deviation_rigid': 15,
'shifts_opencv': True, 'use_cuda': False, 'nonneg_movie': True, 'border_nan': 'copy'}
#parameters for source extraction
gSig = 5
gSiz = 4 * gSig + 1
corr_limits = np.linspace(0.4, 0.6, 5)
pnr_limits = np.linspace(3, 7, 5)
cropping_v = np.zeros(5)
motion_correction_v = np.zeros(5)
selected_rows = db.select(states_df,'cropping', mouse = mouse, session = session, trial = trial , is_rest = is_rest)
mouse_row = selected_rows.iloc[0]
for kk in range(4):
cropping_interval = [x1_crops[kk], x2_crops[kk], y1_crops[kk], y2_crops[kk]]
parameters_cropping = {'crop_spatial': True, 'cropping_points_spatial': cropping_interval,
'crop_temporal': False, 'cropping_points_temporal': []}
mouse_row = main_cropping(mouse_row, parameters_cropping)
cropping_v[kk] = mouse_row.name[5]
states_df = db.append_to_or_merge_with_states_df(states_df, mouse_row)
db.save_analysis_states_database(states_df, path=analysis_states_database_path, backup_path = backup_path)
states_df = db.open_analysis_states_database()
for kk in range(4):
selected_rows = db.select(states_df, 'motion_correction', 56165, cropping_v = cropping_v[kk])
mouse_row = selected_rows.iloc[0]
mouse_row_new = main_motion_correction(mouse_row, parameters_motion_correction, dview)
mouse_row_new = metrics.get_metrics_motion_correction(mouse_row_new, crispness=True)
states_df = db.append_to_or_merge_with_states_df(states_df, mouse_row_new)
db.save_analysis_states_database(states_df, path=analysis_states_database_path, backup_path = backup_path)
motion_correction_v[kk]=mouse_row_new.name[6]
states_df = db.open_analysis_states_database()
for ii in range(corr_limits.shape[0]):
for jj in range(pnr_limits.shape[0]):
parameters_source_extraction = {'session_wise': False, 'fr': 10, 'decay_time': 0.1,
'min_corr': corr_limits[ii],
'min_pnr': pnr_limits[jj], 'p': 1, 'K': None, 'gSig': (gSig, gSig),
'gSiz': (gSiz, gSiz),
'merge_thr': 0.7, 'rf': 60, 'stride': 30, 'tsub': 1, 'ssub': 2, 'p_tsub': 1,
'p_ssub': 2, 'low_rank_background': None, 'nb': 0, 'nb_patch': 0,
'ssub_B': 2,
'init_iter': 2, 'ring_size_factor': 1.4, 'method_init': 'corr_pnr',
'method_deconvolution': 'oasis', 'update_background_components': True,
'center_psf': True, 'border_pix': 0, 'normalize_init': False,
'del_duplicates': True, 'only_init': True}
for kk in range(4):
selected_rows = db.select(states_df, 'source_extraction', 56165, cropping_v = cropping_v[kk])
mouse_row = selected_rows.iloc[0]
mouse_row_new = main_source_extraction(mouse_row, parameters_source_extraction, dview)
states_df = db.append_to_or_merge_with_states_df(states_df, mouse_row_new)
db.save_analysis_states_database(states_df, path=analysis_states_database_path, backup_path=backup_path)
states_df = db.open_analysis_states_database()
for ii in range(corr_limits.shape[0]):
for jj in range(pnr_limits.shape[0]):
figure, axes = plt.subplots(4, 3, figsize=(50, 30))
version = ii * pnr_limits.shape[0] + jj +1
for kk in range(4):
selected_rows = db.select(states_df, 'component_evaluation', 56165, cropping_v=cropping_v[kk], motion_correction_v = 1, source_extraction_v= version)
mouse_row = selected_rows.iloc[0]
decoding_output = mouse_row['decoding_output']
decoded_file = eval(decoding_output)['main']
m = cm.load(decoded_file)
axes[kk,0].imshow(m[0, :, :], cmap='gray')
cropping_interval = [x1_crops[kk], x2_crops[kk], y1_crops[kk], y2_crops[kk]]
[x_, _x, y_, _y] = cropping_interval
rect = Rectangle((y_, x_), _y - y_, _x - x_, fill=False, color='r', linestyle='--', linewidth = 3)
axes[kk,0].add_patch(rect)
output_cropping = mouse_row['cropping_output']
cropped_file = eval(output_cropping)['main']
m = cm.load(cropped_file)
axes[kk,1].imshow(m[0, :, :], cmap='gray')
output_source_extraction = eval(mouse_row['source_extraction_output'])
cnm_file_path = output_source_extraction['main']
cnm = load_CNMF(db.get_file(cnm_file_path))
corr_path = output_source_extraction['meta']['corr']['main']
cn_filter = np.load(db.get_file(corr_path))
axes[kk, 2].imshow(cn_filter)
coordinates = cm.utils.visualization.get_contours(cnm.estimates.A, np.shape(cn_filter), 0.2, 'max')
for c in coordinates:
v = c['coordinates']
c['bbox'] = [np.floor(np.nanmin(v[:, 1])), np.ceil(np.nanmax(v[:, 1])),
np.floor(np.nanmin(v[:, 0])), np.ceil(np.nanmax(v[:, 0]))]
axes[kk, 2].plot(*v.T, c='w',linewidth=3)
fig_dir ='/home/sebastian/Documents/Melisa/calcium_imaging_analysis/data/interim/cropping/meta/figures/cropping_inicialization/'
fig_name = fig_dir + db.create_file_name(2,mouse_row.name) + '_corr_' + f'{round(corr_limits[ii],1)}' + '_pnr_' + f'{round(pnr_limits[jj])}' + '.png'
figure.savefig(fig_name)
| 50.329412
| 161
| 0.661524
| 1,178
| 8,556
| 4.535654
| 0.282683
| 0.031443
| 0.04941
| 0.019465
| 0.270073
| 0.246491
| 0.21224
| 0.186786
| 0.186786
| 0.186786
| 0
| 0.024364
| 0.227676
| 8,556
| 169
| 162
| 50.627219
| 0.784201
| 0.132539
| 0
| 0.190083
| 0
| 0.008264
| 0.151081
| 0.061216
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.132231
| 0
| 0.132231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23dd6ab36e5a83840094cc404aedad771f6f9076
| 1,676
|
py
|
Python
|
src/data/energidataservice_api.py
|
titanbender/electricity-price-forecasting
|
c288a9b6d7489ac03ee800318539195bd1cd2650
|
[
"MIT"
] | 1
|
2021-04-15T13:05:03.000Z
|
2021-04-15T13:05:03.000Z
|
src/data/energidataservice_api.py
|
titanbender/electricity-price-forecasting
|
c288a9b6d7489ac03ee800318539195bd1cd2650
|
[
"MIT"
] | 1
|
2018-12-11T13:41:45.000Z
|
2018-12-11T14:15:15.000Z
|
src/data/energidataservice_api.py
|
titanbender/electricity-price-forecasting
|
c288a9b6d7489ac03ee800318539195bd1cd2650
|
[
"MIT"
] | 1
|
2020-01-01T21:03:02.000Z
|
2020-01-01T21:03:02.000Z
|
import pandas as pd
import json
import urllib2
def download_nordpool(limit, output_file):
'''
The method downloads the nordpool available data from www.energidataservice.dk and saves it in a csv file
limit: Int, the number of maximum rows of data to download
output_file: Str, the name of the output file
'''
url = 'https://api.energidataservice.dk/datastore_search?resource_id=8bd7a37f-1098-4643-865a-01eb55c62d21&limit=' + str(limit)
print("downloading nordpool data ...")
fileobj = urllib2.urlopen(url)
data = json.loads(fileobj.read())
nordpool_df = pd.DataFrame.from_dict(data['result']['records']) # the data is stored inside two dictionaries
nordpool_df.to_csv(output_file)
print("nordpool data has been downloaded and saved")
def download_dayforward(limit, output_file):
'''
The method downloads the available day ahead spotprices in DK and neighboring countries data
from www.energidataservice.dk and saves it in a csv file
limit: Int, the number of maximum rows of data to download
output_file: Str, the name of the output file
'''
url = 'https://api.energidataservice.dk/datastore_search?resource_id=c86859d2-942e-4029-aec1-32d56f1a2e5d&limit=' + str(limit)
print("downloading day forward data ...")
fileobj = urllib2.urlopen(url)
data = json.loads(fileobj.read())
nordpool_df = pd.DataFrame.from_dict(data['result']['records']) # the data is stored inside two dictionaries
nordpool_df.to_csv(output_file)
print("day forward data has been downloaded and saved")
if __name__ == '__main__':
print("connecting with the API")
download_nordpool(10000000, 'nordpool_data.csv')
download_dayforward(10000000, 'dayforward_data.csv')
| 37.244444
| 127
| 0.7679
| 245
| 1,676
| 5.122449
| 0.342857
| 0.063745
| 0.023904
| 0.028685
| 0.70757
| 0.661355
| 0.615139
| 0.557769
| 0.557769
| 0.557769
| 0
| 0.04321
| 0.130072
| 1,676
| 45
| 128
| 37.244444
| 0.817558
| 0.332339
| 0
| 0.347826
| 0
| 0.086957
| 0.414077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.130435
| 0
| 0.217391
| 0.217391
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23df352466c71a2286ba6b66bb76f8b89e0ba1ff
| 1,873
|
py
|
Python
|
models/cnn.py
|
amayuelas/NNKGReasoning
|
0e3623b344fd4e3088ece897f898ddbb1f80888d
|
[
"MIT"
] | 1
|
2022-03-16T22:20:12.000Z
|
2022-03-16T22:20:12.000Z
|
models/cnn.py
|
amayuelas/NNKGReasoning
|
0e3623b344fd4e3088ece897f898ddbb1f80888d
|
[
"MIT"
] | 2
|
2022-03-22T23:34:38.000Z
|
2022-03-24T17:35:53.000Z
|
models/cnn.py
|
amayuelas/NNKGReasoning
|
0e3623b344fd4e3088ece897f898ddbb1f80888d
|
[
"MIT"
] | null | null | null |
from typing import Any
import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
def __init__(self, entity_dim):
super(CNN, self).__init__()
self.dim = entity_dim
self.conv1 = nn.Conv1d(in_channels=1, out_channels=10,
kernel_size=6)
self.conv2 = nn.Conv1d(in_channels=10, out_channels=10,
kernel_size=6)
self.pool = nn.MaxPool1d(kernel_size=5)
self.fc1 = nn.Linear(int(self.dim / 25 - 2) * 10, self.dim)
self.fc2 = nn.Linear(self.dim, self.dim * 2)
self.fc3 = nn.Linear(self.dim * 2, self.dim)
def forward(self, x):
x = x.unsqueeze(1)
x = self.pool(self.conv1(x))
x = self.pool(self.conv2(x))
x = x.view(-1, x.shape[1] * x.shape[2])
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class CNN2(nn.Module):
def __init__(self, entity_dim):
super(CNN2, self).__init__()
self.dim = entity_dim
self.conv1 = nn.Conv1d(in_channels=1, out_channels=10,
kernel_size=6)
self.conv2 = nn.Conv1d(in_channels=10, out_channels=10,
kernel_size=6)
self.pool = nn.MaxPool1d(kernel_size=5)
self.fc1 = nn.Linear(2 * int(self.dim / 25 - 2) * 10, 2 * self.dim)
self.fc3 = nn.Linear(self.dim * 2, self.dim)
def forward(self, x1, x2):
x1 = x1.unsqueeze(1)
x1 = self.pool(self.conv1(x1))
x1 = self.pool(self.conv2(x1))
x2 = x2.unsqueeze(1)
x2 = self.pool(self.conv1(x2))
x2 = self.pool(self.conv2(x2))
x = torch.cat((x1, x2), dim=-1)
x = x.view(-1, x.shape[1] * x.shape[2])
x = F.relu(self.fc1(x))
x = self.fc3(x)
return x
| 31.745763
| 75
| 0.538708
| 284
| 1,873
| 3.433099
| 0.176056
| 0.086154
| 0.073846
| 0.073846
| 0.652308
| 0.652308
| 0.621538
| 0.588718
| 0.521026
| 0.521026
| 0
| 0.069586
| 0.317138
| 1,873
| 59
| 76
| 31.745763
| 0.692729
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23e397535cfd73ea5daf63a3a67cc1be6978c490
| 29,136
|
py
|
Python
|
src/valr_python/ws_client.py
|
duncan-lumina/valr-python
|
9c94b76990416b4b709d507b538bd8265ed51312
|
[
"MIT"
] | 6
|
2019-12-31T17:25:14.000Z
|
2021-12-15T14:30:05.000Z
|
src/valr_python/ws_client.py
|
duncan-lumina/valr-python
|
9c94b76990416b4b709d507b538bd8265ed51312
|
[
"MIT"
] | 17
|
2020-01-03T00:03:30.000Z
|
2022-03-14T19:17:50.000Z
|
src/valr_python/ws_client.py
|
duncan-lumina/valr-python
|
9c94b76990416b4b709d507b538bd8265ed51312
|
[
"MIT"
] | 6
|
2020-06-24T03:23:37.000Z
|
2021-12-17T14:20:46.000Z
|
import asyncio
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
from typing import Union
try:
import simplejson as json
except ImportError:
import json
import websockets
from valr_python.enum import AccountEvent
from valr_python.enum import CurrencyPair
from valr_python.enum import MessageFeedType
from valr_python.enum import TradeEvent
from valr_python.enum import WebSocketType
from valr_python.exceptions import HookNotFoundError
from valr_python.exceptions import WebSocketAPIException
from valr_python.utils import JSONType
from valr_python.utils import _get_valr_headers
__all__ = ('WebSocketClient',)
def get_event_type(ws_type: WebSocketType) -> Type[Union[TradeEvent, AccountEvent]]:
return TradeEvent if ws_type == WebSocketType.TRADE else AccountEvent
class WebSocketClient:
"""The WebSocket API is an advanced technology that makes it possible to open a two-way interactive
communication session between a client and a server. With this API, you can send messages to a server and
receive event-driven responses without having to poll the server for a reply.
Example Usage
~~~~~~~~~~~~~
>>> import asyncio
>>> from typing import Dict
>>> from pprint import pprint
>>> from valr_python import WebSocketClient
>>> from valr_python.enum import TradeEvent
>>> from valr_python.enum import WebSocketType
>>>
>>> def pretty_hook(data: Dict):
... pprint(data)
>>>
>>> c = WebSocketClient(api_key='api_key', api_secret='api_secret', currency_pairs=['BTCZAR'],
... ws_type=WebSocketType.TRADE.name,
... trade_subscriptions=[TradeEvent.MARKET_SUMMARY_UPDATE.name],
... hooks={TradeEvent.MARKET_SUMMARY_UPDATE.name : pretty_hook})
>>> loop = asyncio.get_event_loop()
>>> loop.run_until_complete(c.run())
{'currencyPairSymbol': 'BTCZAR',
'data': {'askPrice': '151601',
'baseVolume': '314.7631144',
'bidPrice': '151600',
'changeFromPrevious': '2.14',
'created': '2020-02-06T22:47:03.129Z',
'currencyPairSymbol': 'BTCZAR',
'highPrice': '152440',
'lastTradedPrice': '151600',
'lowPrice': '146765',
'previousClosePrice': '148410',
'quoteVolume': '47167382.04552981'},
'type': 'MARKET_SUMMARY_UPDATE'}
Connection
~~~~~~~~~~
Our WebSocket API is accessible on the following address: wss://api.valr.com.
Account WebSocket connection: In order to receive streaming updates about your VALR account, you would
open up a WebSocket connection to wss://api.valr.com/ws/account
Trade WebSocket connection: In order to receive streaming updates about Trade data, you would open up a
WebSocket connection to wss://api.valr.com/ws/trade
Authentication
~~~~~~~~~~~~~~
Our WebSocket API needs authentication. To authenticate, pass in the following headers to the first
call that establishes the WebSocket connection.
X-VALR-API-KEY: Your API Key
X-VALR-SIGNATURE: Generated signature. The signature is generated using the following parameters:
Api Secret
Timestamp of request
HTTP verb 'GET'
Path (either /ws/account or /ws/trade)
Request Body should be empty
X-VALR-TIMESTAMP: Timestamp of the request
The headers that are passed to establish the connection are the same 3 headers you pass to
any authenticated call to the REST API.
Subscribing to events
~~~~~~~~~~~~~~~~~~~~~
Once you open a connection to Account, you are automatically subscribed to all messages for all events on
the Account WebSocket connection. You will start receiving message feeds pertaining to your VALR account.
For example, you will receive messages when your balance is updated or when a new trade is executed on your account.
On the other hand, when you open a connection to Trade, in order to receive message feeds about trading data, you
must subscribe to events you are interested in on the Trade WebSocket connection. For example, if you want to
receive messages when markets fluctuate, you must send a message on the connection with the following payload:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"MARKET_SUMMARY_UPDATE",
"pairs":[
"BTCZAR"
]
}
]
}
Here, the event you are subscribing to is called MARKET_SUMMARY_UPDATE and the currency pair you are subscribing to
is an array. We currently only support BTCZAR and ETHZAR. XRPZAR will be added in due course.
Unsubscribing from events
~~~~~~~~~~~~~~~~~~~~~~~~~
When you are no longer interested in receiving messages for certain events on the Trade WebSocket connection,
you can send a synthetic "unsubscribe" message. For example, if you want to unsubscribe from MARKET_SUMMARY_UPDATE
event, you would send a message as follows:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"MARKET_SUMMARY_UPDATE",
"pairs":[
]
}
]
}
Staying connected with Ping-Pong messages
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To ensure that you stay connected to either the Account or Trade WebSocket you can send a "PING" message on the
WebSocket you wish to monitor. VALR will respond with a PONG event. The message must be as follows:
{
"type": "PING"
}
Events (On Trade WebSocket)
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here is a list of events you can subscribe to on the Trade WebSocket connection:
Event Description
AGGREGATED_ORDERBOOK_UPDATE When subscribed to this event for a given currency pair, the client receives the
top 20 bids and asks from the order book for that currency pair.
MARKET_SUMMARY_UPDATE When subscribed to this event for a given currency pair, the client receives a
message feed with the latest market summary for that currency pair.
NEW_TRADE_BUCKET When subscribed to this event for a given currency pair, the client receives the
Open, High, Low, Close data valid for the last 60 seconds.
NEW_TRADE When subscribed to this event for a given currency pair, the client receives
message feeds with the latest trades that are executed for that currency pair.
AGGREGATED_ORDERBOOK_UPDATE
In order to subscribe to AGGREGATED_ORDERBOOK_UPDATE for BTCZAR and ETHZAR, you must send the following message
on the Trade WebSocket connection once it is opened:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"AGGREGATED_ORDERBOOK_UPDATE",
"pairs":[
"BTCZAR",
"ETHZAR"
]
}
]
}
To unsubscribe, send the following message:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"AGGREGATED_ORDERBOOK_UPDATE",
"pairs":[
]
}
]
}
MARKET_SUMMARY_UPDATE
In order to subscribe to MARKET_SUMMARY_UPDATE for just BTCZAR, you must send the following message on the
Trade WebSocket connection once it is opened:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"MARKET_SUMMARY_UPDATE",
"pairs":[
"BTCZAR"
]
}
]
}
To unsubscribe, send the following message:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"MARKET_SUMMARY_UPDATE",
"pairs":[
]
}
]
}
NEW_TRADE_BUCKET
In order to subscribe to NEW_TRADE_BUCKET for BTCZAR as well as ETHZAR, you must send the following message on the
Trade WebSocket connection once it is opened:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"NEW_TRADE_BUCKET",
"pairs":[
"BTCZAR",
"ETHZAR"
]
}
]
}
To unsubscribe, send the following message:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"NEW_TRADE_BUCKET",
"pairs":[
]
}
]
}
NEW_TRADE
In order to subscribe to NEW_TRADE just for BTCZAR, you must send the following message on the Trade WebSocket
connection once it is opened:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"NEW_TRADE",
"pairs":[
"BTCZAR"
]
}
]
}
To unsubscribe, send the following message:
{
"type":"SUBSCRIBE",
"subscriptions":[
{
"event":"NEW_TRADE",
"pairs":[
]
}
]
}
Message Feeds (On Trade WebSocket)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As and when events occur, the message feeds come through to the Trade WebSocket connection for the events the client
has subscribed to. You will find an example message feed for each event specified above.
AGGREGATED_ORDERBOOK_UPDATE
Sample message feed:
{
"type":"AGGREGATED_ORDERBOOK_UPDATE",
"currencyPairSymbol":"BTCZAR",
"data":{
"Asks":[
{
"side":"sell",
"quantity":"0.005",
"price":"9500",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"sell",
"quantity":"0.01",
"price":"9750",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"sell",
"quantity":"0.643689",
"price":"10000",
"currencyPair":"BTCZAR",
"orderCount":3
},
{
"side":"sell",
"quantity":"0.2",
"price":"11606",
"currencyPair":"BTCZAR",
"orderCount":2
},
{
"side":"sell",
"quantity":"0.67713484",
"price":"14000",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"sell",
"quantity":"1",
"price":"15000",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"sell",
"quantity":"1",
"price":"16000",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"sell",
"quantity":"1",
"price":"17000",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"sell",
"quantity":"1",
"price":"18000",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"sell",
"quantity":"1",
"price":"19000",
"currencyPair":"BTCZAR",
"orderCount":1
}
],
"Bids":[
{
"side":"buy",
"quantity":"0.038",
"price":"9000",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.1",
"price":"8802",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.2",
"price":"8801",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.1",
"price":"8800",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.1",
"price":"8700",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.1",
"price":"8600",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.1",
"price":"8500",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.1",
"price":"8400",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.3",
"price":"8200",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.1",
"price":"8100",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"0.1",
"price":"8000",
"currencyPair":"BTCZAR",
"orderCount":1
},
{
"side":"buy",
"quantity":"1.08027437",
"price":"1",
"currencyPair":"BTCZAR",
"orderCount":3
}
]
}
}
MARKET_SUMMARY_UPDATE
Sample message feed:
{
"type":"MARKET_SUMMARY_UPDATE",
"currencyPairSymbol":"BTCZAR",
"data":{
"currencyPairSymbol":"BTCZAR",
"askPrice":"9500",
"bidPrice":"9000",
"lastTradedPrice":"9500",
"previousClosePrice":"9000",
"baseVolume":"0.0551",
"highPrice":"10000",
"lowPrice":"9000",
"created":"2016-04-25T19:41:16.237Z",
"changeFromPrevious":"5.55"
}
}
NEW_TRADE_BUCKET
Sample message feed:
{
"type":"NEW_TRADE_BUCKET",
"currencyPairSymbol":"BTCZAR",
"data":{
"currencyPairSymbol":"BTCZAR",
"bucketPeriodInSeconds":60,
"startTime":"2019-04-25T19:41:00Z",
"open":"9500",
"high":"9500",
"low":"9500",
"close":"9500",
"volume":"0"
}
}
NEW_TRADE
Sample message feed:
{
"type":"NEW_TRADE",
"currencyPairSymbol":"BTCZAR",
"data":{
"price":"9500",
"quantity":"0.001",
"currencyPair":"BTCZAR",
"tradedAt":"2019-04-25T19:51:55.393Z",
"takerSide":"buy"
}
}
Message Feeds (On Account WebSocket)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As and when events occur, the message feeds come through to the Account WebSocket connection. As mentioned
previously, the client is automatically subscribed to all events on the Account WebSocket connection as soon as
the connection is established. That means, the client need not subscribe to events on the Account WebSocket
connection. That also means that the client cannot unsubscribe from these events.
Here is a list of events that occur on the Account WebSocket and the corresponding sample message feed:
NEW_ACCOUNT_HISTORY_RECORD : NEW SUCCESSFUL TRANSACTION
Sample message feed:
{
"type":"NEW_ACCOUNT_HISTORY_RECORD",
"data":{
"transactionType":{
"type":"SIMPLE_BUY",
"description":"Simple Buy"
},
"debitCurrency":{
"symbol":"R",
"decimalPlaces":2,
"isActive":true,
"shortName":"ZAR",
"longName":"Rand",
"supportedWithdrawDecimalPlaces":2
},
"debitValue":"10",
"creditCurrency":{
"symbol":"BTC",
"decimalPlaces":8,
"isActive":true,
"shortName":"BTC",
"longName":"Bitcoin",
"supportedWithdrawDecimalPlaces":8
},
"creditValue":"0.00104473",
"feeCurrency":{
"symbol":"BTC",
"decimalPlaces":8,
"isActive":true,
"shortName":"BTC",
"longName":"Bitcoin",
"supportedWithdrawDecimalPlaces":8
},
"feeValue":"0.00000789",
"eventAt":"2019-04-25T20:36:53.426Z",
"additionalInfo":{
"costPerCoin":9500,
"costPerCoinSymbol":"R",
"currencyPairSymbol":"BTCZAR"
}
}
}
BALANCE_UPDATE : BALANCE HAS BEEN UPDATED
Sample message feed:
{
"type":"BALANCE_UPDATE",
"data":{
"currency":{
"symbol":"BTC",
"decimalPlaces":8,
"isActive":true,
"shortName":"BTC",
"longName":"Bitcoin",
"supportedWithdrawDecimalPlaces":8
},
"available":"0.88738681",
"reserved":"0.97803484",
"total":"1.86542165"
}
}
NEW_ACCOUNT_TRADE : NEW TRADE EXECUTED ON YOUR ACCOUNT
Sample message feed:
{
"type":"NEW_ACCOUNT_TRADE",
"currencyPairSymbol":"BTCZAR",
"data":{
"price":"9500",
"quantity":"0.00105263",
"currencyPair":"BTCZAR",
"tradedAt":"2019-04-25T20:36:53.426Z",
"side":"buy"
}
}
INSTANT_ORDER_COMPLETED: NEW SIMPLE BUY/SELL EXECUTED
Sample message feed:
{
"type":"INSTANT_ORDER_COMPLETED",
"data":{
"orderId":"247dc157-bb5b-49af-b476-2f613b780697",
"success":true,
"paidAmount":"10",
"paidCurrency":"R",
"receivedAmount":"0.00104473",
"receivedCurrency":"BTC",
"feeAmount":"0.00000789",
"feeCurrency":"BTC",
"orderExecutedAt":"2019-04-25T20:36:53.445"
}
}
OPEN_ORDERS_UPDATE : NEW ORDER ADDED TO OPEN ORDERS
Sample message feed (all open orders are returned) :
{
"type":"OPEN_ORDERS_UPDATE",
"data":[
{
"orderId":"38511e49-a755-4f8f-a2b1-232bae6967dc",
"side":"sell",
"remainingQuantity":"0.1",
"originalPrice":"10000",
"currencyPair":{
"id":1,
"symbol":"BTCZAR",
"baseCurrency":{
"id":2,
"symbol":"BTC",
"decimalPlaces":8,
"isActive":true,
"shortName":"BTC",
"longName":"Bitcoin",
"currencyDecimalPlaces":8,
"supportedWithdrawDecimalPlaces":8
},
"quoteCurrency":{
"id":1,
"symbol":"R",
"decimalPlaces":2,
"isActive":true,
"shortName":"ZAR",
"longName":"Rand",
"currencyDecimalPlaces":2,
"supportedWithdrawDecimalPlaces":2
},
"shortName":"BTC/ZAR",
"exchange":"VALR",
"active":true,
"minBaseAmount":0.0001,
"maxBaseAmount":2,
"minQuoteAmount":10,
"maxQuoteAmount":100000
},
"createdAt":"2019-04-17T19:51:35.776Z",
"originalQuantity":"0.1",
"filledPercentage":"0.00",
"customerOrderId":""
},
{
"orderId":"d1d9f20a-778c-4f4a-98a1-d336da960158",
"side":"sell",
"remainingQuantity":"0.1",
"originalPrice":"10000",
"currencyPair":{
"id":1,
"symbol":"BTCZAR",
"baseCurrency":{
"id":2,
"symbol":"BTC",
"decimalPlaces":8,
"isActive":true,
"shortName":"BTC",
"longName":"Bitcoin",
"currencyDecimalPlaces":8,
"supportedWithdrawDecimalPlaces":8
},
"quoteCurrency":{
"id":1,
"symbol":"R",
"decimalPlaces":2,
"isActive":true,
"shortName":"ZAR",
"longName":"Rand",
"currencyDecimalPlaces":2,
"supportedWithdrawDecimalPlaces":2
},
"shortName":"BTC/ZAR",
"exchange":"VALR",
"active":true,
"minBaseAmount":0.0001,
"maxBaseAmount":2,
"minQuoteAmount":10,
"maxQuoteAmount":100000
},
"createdAt":"2019-04-20T13:48:44.922Z",
"originalQuantity":"0.1",
"filledPercentage":"0.00",
"customerOrderId":"4"
}
]
}
ORDER_PROCESSED : ORDER PROCESSED
Sample message feed:
{
"type":"ORDER_PROCESSED",
"data":{
"orderId":"247dc157-bb5b-49af-b476-2f613b780697",
"success":true,
"failureReason":""
}
}
ORDER_STATUS_UPDATE : ORDER STATUS HAS BEEN UPDATED
Sample message feed:
{
"type":"ORDER_STATUS_UPDATE",
"data":{
"orderId":"247dc157-bb5b-49af-b476-2f613b780697",
"orderStatusType":"Filled",
"currencyPair":{
"id":1,
"symbol":"BTCZAR",
"baseCurrency":{
"id":2,
"symbol":"BTC",
"decimalPlaces":8,
"isActive":true,
"shortName":"BTC",
"longName":"Bitcoin",
"currencyDecimalPlaces":8,
"supportedWithdrawDecimalPlaces":8
},
"quoteCurrency":{
"id":1,
"symbol":"R",
"decimalPlaces":2,
"isActive":true,
"shortName":"ZAR",
"longName":"Rand",
"currencyDecimalPlaces":2,
"supportedWithdrawDecimalPlaces":2
},
"shortName":"BTC/ZAR",
"exchange":"VALR",
"active":true,
"minBaseAmount":0.0001,
"maxBaseAmount":2,
"minQuoteAmount":10,
"maxQuoteAmount":100000
},
"originalPrice":"80000",
"remainingQuantity":"0.01",
"originalQuantity":"0.01",
"orderSide":"buy",
"orderType":"limit",
"failedReason":"",
"orderUpdatedAt":"2019-05-10T14:47:24.826Z",
"orderCreatedAt":"2019-05-10T14:42:37.333Z",
"customerOrderId":"4"
}
}
orderStatusType can be one of the following values: "Placed", "Failed", "Cancelled", "Filled", "Partially Filled",
"Instant Order Balance Reserve Failed", "Instant Order Balance Reserved","Instant Order Completed".
FAILED_CANCEL_ORDER : UNABLE TO CANCEL ORDER
Sample message feed:
{
"type":"FAILED_CANCEL_ORDER",
"data":{
"orderId":"247dc157-bb5b-49af-b476-2f613b780697",
"message":"An error occurred while cancelling your order."
}
}
NEW_PENDING_RECEIVE : NEW PENDING CRYPTO DEPOSIT
Sample message feed:
{
"type":"NEW_PENDING_RECEIVE",
"data":{
"currency":{
"id":3,
"symbol":"ETH",
"decimalPlaces":8,
"isActive":true,
"shortName":"ETH",
"longName":"Ethereum",
"currencyDecimalPlaces":18,
"supportedWithdrawDecimalPlaces":8
},
"receiveAddress":"0xA7Fae2Fd50886b962d46FF4280f595A3982aeAa5",
"transactionHash":"0x804bbfa946b57fc5ffcb0c37ec02e7503435d19c35bf8eb0b0c6deb289f7009a",
"amount":0.01,
"createdAt":"2019-04-25T21:16:28Z",
"confirmations":1,
"confirmed":false
}
}
This message feed is sent through every time there is an update to the number of confirmations to this
pending deposit.
SEND_STATUS_UPDATE : CRYPTO WITHDRAWAL STATUS UPDATE
Sample message feed:
{
"type":"SEND_STATUS_UPDATE",
"data":{
"uniqueId":"beb8a612-1a1a-4d68-9bd3-96d5ea341119",
"status":"SEND_BROADCASTED",
"confirmations":0
}
}
"""
_WEBSOCKET_API_URI = 'wss://api.valr.com'
_ACCOUNT_CONNECTION = f'{_WEBSOCKET_API_URI}{WebSocketType.ACCOUNT.value}'
_TRADE_CONNECTION = f'{_WEBSOCKET_API_URI}{WebSocketType.TRADE.value}'
def __init__(self, api_key: str, api_secret: str, hooks: Dict[str, Callable],
currency_pairs: Optional[List[str]] = None, ws_type: str = 'trade',
trade_subscriptions: Optional[List[str]] = None):
self._api_key = api_key
self._api_secret = api_secret
self._ws_type = WebSocketType[ws_type.upper()]
self._hooks = {get_event_type(self._ws_type)[e.upper()]: f for e, f in hooks.items()}
if currency_pairs:
self._currency_pairs = [CurrencyPair[p.upper()] for p in currency_pairs]
else:
self._currency_pairs = [p for p in CurrencyPair]
if self._ws_type == WebSocketType.ACCOUNT:
self._uri = self._ACCOUNT_CONNECTION
else:
self._uri = self._TRADE_CONNECTION
if self._ws_type == WebSocketType.TRADE:
if trade_subscriptions:
self._trade_subscriptions = [TradeEvent[e] for e in trade_subscriptions]
else:
self._trade_subscriptions = [e for e in TradeEvent]
elif trade_subscriptions:
raise ValueError(f'trade subscriptions requires ws_type of {WebSocketType.TRADE.name} ')
else:
self._trade_subscriptions = None
async def run(self):
"""Open an async websocket connection, consume responses and executed mapped hooks. Async hooks are also
supported. The method relies on the underlying 'websockets' libraries ping-pong support. No API-level
ping-pong messages are sent to keep the connection alive (not necessary). Support for custom-handling of
websockets.exceptions.ConnectionClosed must be handled in the application.
"""
headers = _get_valr_headers(api_key=self._api_key, api_secret=self._api_secret, method='GET',
path=self._ws_type.value, data='')
async with websockets.connect(self._uri, ssl=True, extra_headers=headers) as ws:
if self._ws_type == WebSocketType.TRADE:
await ws.send(self.get_subscribe_data(self._currency_pairs, self._trade_subscriptions))
async for message in ws:
data = json.loads(message)
try:
# ignore auth and subscription response messages
if data['type'] not in (MessageFeedType.SUBSCRIBED.name, MessageFeedType.AUTHENTICATED.name):
func = self._hooks[get_event_type(self._ws_type)[data['type']]]
# apply hooks to mapped stream events
if asyncio.iscoroutinefunction(func):
await func(data)
else:
func(data)
except KeyError:
events = [e.name for e in get_event_type(self._ws_type)]
if data['type'] in events:
raise HookNotFoundError(f'no hook supplied for {data["type"]} event')
raise WebSocketAPIException(f'WebSocket API failed to handle {data["type"]} event: {data}')
@staticmethod
def get_subscribe_data(currency_pairs, events) -> JSONType:
"""Get subscription data for ws client request"""
subscriptions = [{"event": e.name, "pairs": [p.name for p in currency_pairs]} for e in events]
data = {
"type": MessageFeedType.SUBSCRIBE.name,
"subscriptions": subscriptions
}
return json.dumps(data, default=str)
| 30.864407
| 120
| 0.512699
| 2,622
| 29,136
| 5.607551
| 0.206712
| 0.029382
| 0.041896
| 0.037475
| 0.438822
| 0.369788
| 0.337006
| 0.30674
| 0.272938
| 0.23519
| 0
| 0.050607
| 0.37198
| 29,136
| 943
| 121
| 30.897137
| 0.752924
| 0.747426
| 0
| 0.1125
| 0
| 0
| 0.084629
| 0.030101
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0375
| false
| 0
| 0.25
| 0.0125
| 0.3625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23e64fd0f143ca1fd055ab9e432dcd782eb331eb
| 2,215
|
py
|
Python
|
emailer.py
|
dblossom/raffle-checker
|
807d33a305e836579a423986be2a7ff7c2d655e1
|
[
"MIT"
] | null | null | null |
emailer.py
|
dblossom/raffle-checker
|
807d33a305e836579a423986be2a7ff7c2d655e1
|
[
"MIT"
] | null | null | null |
emailer.py
|
dblossom/raffle-checker
|
807d33a305e836579a423986be2a7ff7c2d655e1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from database import Database
from rafflecollector import RaffleCollector
import os
import smtplib, ssl
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import schedule
import time
class Emailer:
db = Database()
email_id = os.environ['RAFFLE_EMAIL']
email_pass = os.environ['RAFFLE_EMAIL_PASSWORD']
port = 465 # For SSL
context = ssl.create_default_context()
message = MIMEMultipart("alternative")
def __init__(self):
self.send_alive_email()
self.check_db_tickets()
def check_db_tickets(self):
ticket_list = self.db.get_all_tickets()
rc = RaffleCollector()
raffle_winners = rc.winning_numbers()
for key, value in raffle_winners.items():
for tup in ticket_list:
if tup[0] == int(value):
self.build_message(self.db.get_email_pid(tup[1]),
tup[0])
self.send_email()
def build_message(self,to_email,ticket):
self.message["From"] = self.email_id
self.message["To"] = to_email
self.message["Subject"] = "Congratulations, You're a winner!"
text = """\
Congratulations! Ticket# """ + str(ticket) + """ is a winner!
"""
winner_message = MIMEText(text,"plain")
self.message.attach(winner_message)
def send_email(self):
with smtplib.SMTP_SSL("smtp.gmail.com", self.port, context=self.context) as server:
server.login(self.email_id, self.email_pass)
server.sendmail(self.email_id, self.message["To"], self.message.as_string())
def send_alive_email(self):
self.message["From"] = self.email_id
self.message["To"] = self.db.get_email_pid(1)
self.message["Subject"] = "Daily heartbeat email!"
text = """\
This is your daily heartbeat email!
"""
heartbeat = MIMEText(text,"plain")
self.message.attach(heartbeat)
self.send_email()
if __name__ == "__main__":
e = Emailer()
schedule.every().day.at("22:00").do(e.__init__)
while True:
schedule.run_pending()
time.sleep(1)
| 31.642857
| 91
| 0.621219
| 272
| 2,215
| 4.841912
| 0.363971
| 0.083523
| 0.033409
| 0.045558
| 0.157935
| 0.135156
| 0.083523
| 0.059226
| 0.059226
| 0
| 0
| 0.007917
| 0.258691
| 2,215
| 69
| 92
| 32.101449
| 0.794153
| 0.013093
| 0
| 0.140351
| 0
| 0
| 0.125916
| 0.009615
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087719
| false
| 0.035088
| 0.140351
| 0
| 0.350877
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23e79af618c8a287421e1a5d39cd45ed069fab6f
| 4,391
|
py
|
Python
|
website_handling/website_check.py
|
Dr3xler/CookieConsentChecker
|
816cdfb9d9dc741c57dbcd5e9c9ef59837196631
|
[
"MIT"
] | null | null | null |
website_handling/website_check.py
|
Dr3xler/CookieConsentChecker
|
816cdfb9d9dc741c57dbcd5e9c9ef59837196631
|
[
"MIT"
] | 3
|
2021-04-29T22:57:09.000Z
|
2021-05-03T15:32:39.000Z
|
website_handling/website_check.py
|
Dr3xler/CookieConsentChecker
|
816cdfb9d9dc741c57dbcd5e9c9ef59837196631
|
[
"MIT"
] | 1
|
2021-08-29T09:53:09.000Z
|
2021-08-29T09:53:09.000Z
|
import os
import json
import shutil
import time
from pathlib import Path
from sys import platform
# TODO: (stackoverflow.com/question/17136514/how-to-get-3rd-party-cookies)
# stackoverflow.com/questions/22200134/make-selenium-grab-all-cookies, add the selenium, phantomjs part to catch ALL cookies
# TODO: Maybe save cookies to global variable to compare them in another function without saving them?
'''
loading more than one addon for firefox to use with selenium:
extensions = [
'jid1-KKzOGWgsW3Ao4Q@jetpack.xpi',
'',
''
]
for extension in extensions:
driver.install_addon(extension_dir + extension, temporary=True)
'''
def load_with_addon(driver, websites):
"""This method will load all websites with 'i don't care about cookies' preinstalled.
Afterwards it will convert the cookies to dicts and save them locally for comparison
Be aware that this method will delete all saved cookies"""
print('creating dir for cookies with addon...')
# checks if cookie dir already exists, creates an empty dir.
if len(os.listdir('data/save/with_addon/')) != 0:
shutil.rmtree('data/save/with_addon/')
os.mkdir('data/save/with_addon/')
print('saving cookies in firefox with addons ...')
# the extension directory needs to be the one of your local machine
# linux
if platform == "linux":
extension_dir = os.getenv("HOME") + "/.mozilla/firefox/7ppp44j6.default-release/extensions/"
driver.install_addon(extension_dir + 'jid1-KKzOGWgsW3Ao4Q@jetpack.xpi', temporary=True)
# windows
if platform == "win32":
extension_dir = str(
Path.home()) + "/AppData/Roaming/Mozilla/Firefox/Profiles/shdzeteb.default-release/extensions/"
print(extension_dir)
driver.install_addon(extension_dir + 'jid1-KKzOGWgsW3Ao4Q@jetpack.xpi', temporary=True)
for website in websites:
name = website.split('www.')[1]
driver.get(website)
driver.execute_script("return document.readyState")
cookies_addons = driver.get_cookies()
cookies_dict = {}
cookiecount = 0
for cookie in cookies_addons:
cookies_dict = cookie
print('data/save/with_addon/%s/%s_%s.json' % (name, name, cookiecount))
print(cookies_dict)
# creates the website dir
if not os.path.exists('data/save/with_addon/%s/' % name):
os.mkdir('data/save/with_addon/%s/' % name)
# saves the cookies into the website dir
with open('data/save/with_addon/%s/%s_%s.json' % (name, name, cookiecount), 'w') as file:
json.dump(cookies_dict, file, sort_keys=True)
cookiecount += 1
def load_without_addon(driver, websites):
"""This method will load all websites on a vanilla firefox version.
Afterwards it will convert the cookies to dicts and save them locally for comparison
Be aware that this method will delete all saved cookies"""
print('creating dir for cookies in vanilla...')
# checks if cookie dir already exists, creates an empty dir.
if len(os.listdir('data/save/without_addon/')) != 0:
shutil.rmtree('data/save/without_addon/')
os.mkdir('data/save/without_addon')
print('saving cookies in firefox without addons ...')
for website in websites:
name = website.split('www.')[1]
driver.get(website)
driver.execute_script("return document.readyState")
time.sleep(5)
cookies_vanilla = driver.get_cookies()
cookies_dict = {}
cookiecount = 0
for cookie in cookies_vanilla:
cookies_dict = cookie
print('data/save/without_addon/%s/%s_%s.json' % (name, name, cookiecount))
print(cookies_dict)
# creates the website dir
if not os.path.exists('data/save/without_addon/%s/' % name):
os.mkdir('data/save/without_addon/%s/' % name)
# saves the cookies into the website dir
with open('data/save/without_addon/%s/%s_%s.json' % (name, name, cookiecount), 'w') as file:
json.dump(cookies_dict, file, sort_keys=True)
cookiecount += 1
def close_driver_session(driver):
"""This method will end the driver session and close all windows. Driver needs to be initialized again afterwards"""
driver.quit()
| 35.128
| 125
| 0.662491
| 574
| 4,391
| 4.979094
| 0.277003
| 0.039188
| 0.029391
| 0.041638
| 0.637159
| 0.63366
| 0.538139
| 0.520644
| 0.520644
| 0.487054
| 0
| 0.012148
| 0.231382
| 4,391
| 124
| 126
| 35.41129
| 0.834667
| 0.26486
| 0
| 0.360656
| 0
| 0
| 0.274423
| 0.19403
| 0
| 0
| 0
| 0.008065
| 0
| 1
| 0.04918
| false
| 0
| 0.098361
| 0
| 0.147541
| 0.147541
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23ed67548a141b4172f60911a628a2325339dc44
| 4,468
|
py
|
Python
|
podstreamer.py
|
Swall0w/pymusic
|
73e08e6a5ad4c6d418a0074fc3a83be0896cf97c
|
[
"MIT"
] | 1
|
2017-06-08T11:41:00.000Z
|
2017-06-08T11:41:00.000Z
|
podstreamer.py
|
Swall0w/pymusic
|
73e08e6a5ad4c6d418a0074fc3a83be0896cf97c
|
[
"MIT"
] | null | null | null |
podstreamer.py
|
Swall0w/pymusic
|
73e08e6a5ad4c6d418a0074fc3a83be0896cf97c
|
[
"MIT"
] | null | null | null |
import feedparser
import vlc
import argparse
import sys
import time
import curses
import wget
def arg():
parser = argparse.ArgumentParser(
description='Simple Podcast Streamer.')
parser.add_argument('--add', '-a', type=str, default=None,
help='Pass Podcast an URL argument that you want to add.')
parser.add_argument('--list', '-l', action='store_true',
help='Podcast lists that are contained.')
parser.add_argument('--delete', '-d', type=int, default=-1,
help='delete podcast channel.')
parser.add_argument('--detail', type=int, default=-1,
help='See podcast channel detail.')
parser.add_argument('--play', '-p', action='store_true',
help='Play Podcast. Please pass channel and\
track argument with play argument.')
parser.add_argument('--download', action='store_true',
help='Download Podcast. Please pass channel and track argument')
parser.add_argument('--channel', '-c', type=int,
help='Podcast Channel that you want to listen to.')
parser.add_argument('--track', '-t', type=int,
help='Podcast track that you want to listen to.')
return parser.parse_args()
def converttime(times):
minutes, seconds = divmod(times, 60)
hours, minutes = divmod(minutes, 60)
return int(hours), int(minutes), int(seconds)
def stream(rss_url, track):
try:
rssdata = feedparser.parse(rss_url)
rssdata = rssdata.entries[track]
except:
print('Unexepted Error: {0}'.format(sys.exc_info()))
sys.exit(1)
mp3_url = rssdata.media_content[0]['url']
player = vlc.MediaPlayer(mp3_url)
player.audio_set_volume(100)
player.play()
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
stdscr.nodelay(1)
while True:
try:
if player.is_playing():
status = 'playing...'
else:
status = 'pause...'
key_input = stdscr.getch()
if key_input == ord('k'):
player.audio_set_volume(int(player.audio_get_volume() + 5))
elif key_input == ord('j'):
player.audio_set_volume(int(player.audio_get_volume() - 5))
elif key_input == ord('l'):
player.set_time(player.get_time() + 10000)
elif key_input == ord('h'):
player.set_time(player.get_time() - 10000)
elif key_input == ord(' '):
player.pause()
elif key_input == ord('q'):
curses.nocbreak()
curses.echo()
curses.endwin()
sys.exit(0)
else:
pass
hours, minutes, seconds = converttime(player.get_time() / 1000)
m_hours, m_minutes, m_seconds = converttime(
player.get_length() / 1000)
comment = '\r{0} time: {1:0>2}:{2:0>2}:{3:0>2} /\
{4:0>2}:{5:0>2}:{6:0>2} volume:{7} '.format(
status, hours, minutes, seconds, m_hours, m_minutes,
m_seconds, player.audio_get_volume()
)
stdscr.addstr(0, 0, rssdata.title)
stdscr.addstr(1, 0, comment)
stdscr.refresh()
time.sleep(0.1)
except KeyboardInterrupt:
curses.nocbreak()
curses.echo()
curses.endwin()
def write_list(filename,items):
with open(filename, 'w') as f:
for item in items:
f.write(item + '\n')
def detail(channel_url):
rssdata = feedparser.parse(channel_url)
for index, entry in enumerate(rssdata.entries):
print(index, entry.title)
def main():
args = arg()
# Load Channels
with open('.channels', 'r') as f:
channels = [item.strip() for item in f.readlines()]
if args.list:
for index, channel in enumerate(channels):
print(index, channel)
if args.add:
channels.append(args.add)
write_list('.channels', channels)
if args.delete>=0:
del channels[args.delete]
write_list('.channels', channels)
if args.detail >= 0:
detail(channels[args.detail])
if args.play:
stream(channels[args.channel], args.track)
if args.download:
mp3_url = feedparser.parse(channels[args.channel]).entries[
args.track].media_content[0]['url']
wget.download(mp3_url)
if __name__ == '__main__':
main()
| 30.813793
| 75
| 0.57744
| 542
| 4,468
| 4.638376
| 0.282288
| 0.02864
| 0.054097
| 0.029833
| 0.217979
| 0.202864
| 0.115354
| 0.083532
| 0.083532
| 0.083532
| 0
| 0.021063
| 0.288048
| 4,468
| 144
| 76
| 31.027778
| 0.769255
| 0.00291
| 0
| 0.101695
| 0
| 0.016949
| 0.109364
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050847
| false
| 0.033898
| 0.059322
| 0
| 0.127119
| 0.025424
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23ef7212ca626e96219a55f6302d2adc0e8dabbe
| 5,704
|
py
|
Python
|
Engine.py
|
MaciejKrol51/chess
|
457590768d338b900253ba345e64e56afbdf1ddd
|
[
"Apache-2.0"
] | null | null | null |
Engine.py
|
MaciejKrol51/chess
|
457590768d338b900253ba345e64e56afbdf1ddd
|
[
"Apache-2.0"
] | null | null | null |
Engine.py
|
MaciejKrol51/chess
|
457590768d338b900253ba345e64e56afbdf1ddd
|
[
"Apache-2.0"
] | null | null | null |
def is_area_in_board(area):
if 0 <= area[0] <= 7 and 0 <= area[1] <= 7:
return True
return False
def cancel_castling(checker):
if abs(checker.val) == 50 or abs(checker.val) == 900:
checker.castling = False
def is_king_beaten(board, color):
for row in board:
for area in row:
if area.checker is not None and area.checker.color != color: # wedlug Pycharm is not
if area.checker.king_attack(board):
return True
return False
class Engine:
def __init__(self):
self.b_check = False
self.b_move_check = 0
self.w_check = False
self.w_move_check = 0
self.move_count = 0
self.win = 1
def what_kind_of_move(self, prev_pos, new_pos, board):
checker = board[prev_pos[0]][prev_pos[1]].checker
if abs(checker.val) == 10 and new_pos in checker.set_passe(board, self.move_count):
return 'Passe'
elif abs(checker.val) == 900 and new_pos in checker.set_castling(board):
return 'Castling'
else:
return 'Normal'
def normal_move(self, prev_pos, new_pos, board):
checker = board[prev_pos[0]][prev_pos[1]].checker
cancel_castling(checker)
checker.pos = new_pos
board[prev_pos[0]][prev_pos[1]].checker = None
board[new_pos[0]][new_pos[1]].checker = checker
def move_checker(self, prev_pos, new_pos, board):
checker = board[prev_pos[0]][prev_pos[1]].checker
if abs(checker.val) == 10:
self.passe_move(prev_pos, new_pos, board)
elif abs(checker.val) == 900:
self.castling_move(prev_pos, new_pos, board)
else:
self.normal_move(prev_pos, new_pos, board)
self.move_count += 1
def castling_move(self, prev_pos, new_pos, board):
if new_pos in board[prev_pos[0]][prev_pos[1]].checker.set_castling(board):
row = 0
if self.which_tour() == 1:
row = 7
board[row][4].checker.castling = False
self.normal_move((row, 4), new_pos, board)
if new_pos[1] == 2:
self.normal_move((row, 0), (row, 3), board)
else:
self.normal_move((row, 7), (row, 5), board)
else:
self.normal_move(prev_pos, new_pos, board)
def passe_move(self, prev_pos, new_pos, board):
if new_pos in board[prev_pos[0]][prev_pos[1]].checker.set_double_move(board):
board[prev_pos[0]][prev_pos[1]].checker.move_passe = self.move_count
self.normal_move(prev_pos, new_pos, board)
elif new_pos in board[prev_pos[0]][prev_pos[1]].checker.set_passe(board, self.move_count):
self.normal_move(prev_pos, new_pos, board)
color = board[new_pos[0]][new_pos[1]].checker.color
board[new_pos[0] + 1 * color][new_pos[1]].checker = None
else:
self.normal_move(prev_pos, new_pos, board)
board[new_pos[0]][new_pos[1]].checker.is_promotion(board)
def is_check(self, w_king_beat, b_king_beat):
if w_king_beat and b_king_beat:
if self.w_check is False and self.b_check is False:
self.w_check = True
self.w_move_check = self.move_count
self.b_check = True
self.b_move_check = self.move_count
elif w_king_beat and self.w_check is False:
self.w_check = True
self.w_move_check = self.move_count
self.b_check = False
self.b_move_check = 0
elif b_king_beat and self.b_check is False:
self.b_check = True
self.b_move_check = self.move_count
self.w_check = False
self.w_move_check = 0
def is_checkmate(self):
if self.w_check is True and self.move_count != self.w_move_check and self.move_count - self.w_move_check <= 2:
self.win = -1
return True
elif self.b_check is True and self.move_count != self.b_move_check and self.move_count - self.b_move_check <= 2:
self.win = 1
return True
else:
return False
def is_end(self, board):
w_king_beat = is_king_beaten(board, 1)
b_king_beat = is_king_beaten(board, -1)
if w_king_beat or b_king_beat:
self.is_check(w_king_beat, b_king_beat)
return self.is_checkmate()
self.w_check = False
self.w_move_check = 0
self.b_check = False
self.b_move_check = 0
return False
def which_tour(self):
if self.move_count % 2 == 0:
return 1
else:
return -1
def copy(self):
copy = Engine()
copy.b_check = self.b_check
copy.b_move_check = self.b_move_check
copy.w_check = self.w_check
copy.w_move_check = self.w_move_check
copy.move_count = self.move_count
copy.win = self.win
return copy
def value_of_table(self, board, bot):
ans = 0
for row in range(8):
for area in range(8):
if board[row][area].checker is not None:
ans += board[row][area].checker.val #+ bot.get_position_val(board[row][area].checker)
return ans
def back_move(self, prev_pos, now_pos, move_checker, beat_checker, board):
board[prev_pos[0]][prev_pos[1]].checker = move_checker
board[now_pos[0]][now_pos[1]].checker = beat_checker
self.move_count -= 1
| 37.526316
| 121
| 0.577489
| 830
| 5,704
| 3.708434
| 0.090361
| 0.0705
| 0.067576
| 0.059129
| 0.570825
| 0.514295
| 0.480507
| 0.430799
| 0.337882
| 0.291748
| 0
| 0.021734
| 0.322405
| 5,704
| 151
| 122
| 37.774834
| 0.774644
| 0.012097
| 0
| 0.343511
| 0
| 0
| 0.003467
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.122137
| false
| 0.045802
| 0
| 0
| 0.251908
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23f06c21c858b67e6817ed29322c8b3b1f30395d
| 2,281
|
py
|
Python
|
jsportal_docsite/portal/markdown_extensions/__init__.py
|
jumpscale7/prototypes
|
a17f20aa203d4965708b6e0e3a34582f55baac30
|
[
"Apache-2.0"
] | null | null | null |
jsportal_docsite/portal/markdown_extensions/__init__.py
|
jumpscale7/prototypes
|
a17f20aa203d4965708b6e0e3a34582f55baac30
|
[
"Apache-2.0"
] | null | null | null |
jsportal_docsite/portal/markdown_extensions/__init__.py
|
jumpscale7/prototypes
|
a17f20aa203d4965708b6e0e3a34582f55baac30
|
[
"Apache-2.0"
] | null | null | null |
"""
Original code Copyright 2009 [Waylan Limberg](http://achinghead.com)
All changes Copyright 2008-2014 The Python Markdown Project
Changed by Mohammad Tayseer to add CSS classes to table
License: [BSD](http://www.opensource.org/licenses/bsd-license.php)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from markdown import Extension
from markdown.extensions.tables import TableProcessor
from markdown.util import etree
class BootstrapTableProcessor(TableProcessor):
# This method actually was copied from TableProcessor.run. The only change is adding
# `table.set('class', 'table')` to set Bootstrap table class
def run(self, parent, blocks):
""" Parse a table block and build table. """
block = blocks.pop(0).split('\n')
header = block[0].strip()
seperator = block[1].strip()
rows = block[2:]
# Get format type (bordered by pipes or not)
border = False
if header.startswith('|'):
border = True
# Get alignment of columns
align = []
for c in self._split_row(seperator, border):
if c.startswith(':') and c.endswith(':'):
align.append('center')
elif c.startswith(':'):
align.append('left')
elif c.endswith(':'):
align.append('right')
else:
align.append(None)
# Build table
table = etree.SubElement(parent, 'table')
table.set('class', 'table table-striped table-bordered table-hover')
thead = etree.SubElement(table, 'thead')
self._build_row(header, thead, align, border)
tbody = etree.SubElement(table, 'tbody')
for row in rows:
self._build_row(row.strip(), tbody, align, border)
class BootstrapTableExtension(Extension):
""" Add tables to Markdown. """
def extendMarkdown(self, md, md_globals):
""" Add an instance of TableProcessor to BlockParser. """
md.parser.blockprocessors.add('bootstraptable',
BootstrapTableProcessor(md.parser),
'<hashheader')
def makeExtension(*args, **kwargs):
return BootstrapTableExtension(*args, **kwargs)
| 35.092308
| 88
| 0.621657
| 253
| 2,281
| 5.537549
| 0.498024
| 0.031406
| 0.022841
| 0.025696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009575
| 0.267427
| 2,281
| 64
| 89
| 35.640625
| 0.828845
| 0.259097
| 0
| 0
| 0
| 0
| 0.068154
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078947
| false
| 0
| 0.131579
| 0.026316
| 0.289474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23f14aa8cb681028e47a2e9707262f0b7d8d18f4
| 6,320
|
py
|
Python
|
NAS/single-path-one-shot/src/MNIST/test.py
|
naviocean/SimpleCVReproduction
|
61b43e3583977f42e6f91ef176ec5e1701e98d33
|
[
"Apache-2.0"
] | 923
|
2020-01-11T06:36:53.000Z
|
2022-03-31T00:26:57.000Z
|
NAS/single-path-one-shot/src/MNIST/test.py
|
Twenty3hree/SimpleCVReproduction
|
9939f8340c54dbd69b0017cecad875dccf428f26
|
[
"Apache-2.0"
] | 25
|
2020-02-27T08:35:46.000Z
|
2022-01-25T08:54:19.000Z
|
NAS/single-path-one-shot/src/MNIST/test.py
|
Twenty3hree/SimpleCVReproduction
|
9939f8340c54dbd69b0017cecad875dccf428f26
|
[
"Apache-2.0"
] | 262
|
2020-01-02T02:19:40.000Z
|
2022-03-23T04:56:16.000Z
|
import argparse
import json
import logging
import os
import sys
import time
import cv2
import numpy as np
import PIL
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from PIL import Image
from angle import generate_angle
# from cifar100_dataset import get_dataset
from slimmable_resnet20 import mutableResNet20
from utils import (ArchLoader, AvgrageMeter, CrossEntropyLabelSmooth, accuracy,
get_lastest_model, get_parameters, save_checkpoint, bn_calibration_init)
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def get_args():
parser = argparse.ArgumentParser("ResNet20-Cifar100-oneshot")
parser.add_argument('--arch-batch', default=200,
type=int, help="arch batch size")
parser.add_argument(
'--path', default="Track1_final_archs.json", help="path for json arch files")
parser.add_argument('--eval', default=False, action='store_true')
parser.add_argument('--eval-resume', type=str,
default='./snet_detnas.pkl', help='path for eval model')
parser.add_argument('--batch-size', type=int,
default=10240, help='batch size')
parser.add_argument('--save', type=str, default='./weights',
help='path for saving trained weights')
parser.add_argument('--label-smooth', type=float,
default=0.1, help='label smoothing')
parser.add_argument('--auto-continue', type=bool,
default=True, help='report frequency')
parser.add_argument('--display-interval', type=int,
default=20, help='report frequency')
parser.add_argument('--val-interval', type=int,
default=10000, help='report frequency')
parser.add_argument('--save-interval', type=int,
default=10000, help='report frequency')
parser.add_argument('--train-dir', type=str,
default='data/train', help='path to training dataset')
parser.add_argument('--val-dir', type=str,
default='data/val', help='path to validation dataset')
args = parser.parse_args()
return args
def main():
args = get_args()
# archLoader
arch_loader = ArchLoader(args.path)
# Log
log_format = '[%(asctime)s] %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%d %I:%M:%S')
t = time.time()
local_time = time.localtime(t)
if not os.path.exists('./log'):
os.mkdir('./log')
fh = logging.FileHandler(os.path.join(
'log/train-{}{:02}{}'.format(local_time.tm_year % 2000, local_time.tm_mon, t)))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
use_gpu = False
if torch.cuda.is_available():
use_gpu = True
val_loader = torch.utils.data.DataLoader(
datasets.MNIST(root="./data", train=False, transform=transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
print('load data successfully')
model = mutableResNet20(10)
criterion_smooth = CrossEntropyLabelSmooth(1000, 0.1)
if use_gpu:
model = nn.DataParallel(model)
loss_function = criterion_smooth.cuda()
device = torch.device("cuda")
else:
loss_function = criterion_smooth
device = torch.device("cpu")
model = model.to(device)
print("load model successfully")
all_iters = 0
print('load from latest checkpoint')
lastest_model, iters = get_lastest_model()
if lastest_model is not None:
all_iters = iters
checkpoint = torch.load(
lastest_model, map_location=None if use_gpu else 'cpu')
model.load_state_dict(checkpoint['state_dict'], strict=True)
# 参数设置
args.loss_function = loss_function
args.val_dataloader = val_loader
print("start to validate model")
validate(model, device, args, all_iters=all_iters, arch_loader=arch_loader)
def validate(model, device, args, *, all_iters=None, arch_loader=None):
assert arch_loader is not None
objs = AvgrageMeter()
top1 = AvgrageMeter()
top5 = AvgrageMeter()
loss_function = args.loss_function
val_dataloader = args.val_dataloader
model.eval()
# model.apply(bn_calibration_init)
max_val_iters = 0
t1 = time.time()
result_dict = {}
arch_dict = arch_loader.get_arch_dict()
base_model = mutableResNet20(10).cuda()
with torch.no_grad():
for key, value in arch_dict.items(): # 每一个网络
max_val_iters += 1
# print('\r ', key, ' iter:', max_val_iters, end='')
for data, target in val_dataloader: # 过一遍数据集
target = target.type(torch.LongTensor)
data, target = data.to(device), target.to(device)
output = model(data, value["arch"])
prec1, prec5 = accuracy(output, target, topk=(1, 5))
print("acc1: ", prec1.item())
n = data.size(0)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
tmp_dict = {}
tmp_dict['arch'] = value['arch']
tmp_dict['acc'] = top1.avg
result_dict[key] = tmp_dict
with open("acc_result.json","w") as f:
json.dump(result_dict, f)
# angle_result_dict = {}
# with torch.no_grad():
# for key, value in arch_dict.items():
# angle = generate_angle(base_model, model.module, value["arch"])
# tmp_dict = {}
# tmp_dict['arch'] = value['arch']
# tmp_dict['acc'] = angle.item()
# print("angle: ", angle.item())
# angle_result_dict[key] = tmp_dict
# print('\n', "="*10, "RESULTS", "="*10)
# for key, value in result_dict.items():
# print(key, "\t", value)
# print("="*10, "E N D", "="*10)
# with open("angle_result.json", "w") as f:
# json.dump(angle_result_dict, f)
if __name__ == "__main__":
main()
| 31.287129
| 91
| 0.612025
| 766
| 6,320
| 4.879896
| 0.295039
| 0.0313
| 0.059123
| 0.026752
| 0.158909
| 0.12306
| 0.087212
| 0.075441
| 0.075441
| 0.075441
| 0
| 0.0205
| 0.259019
| 6,320
| 201
| 92
| 31.442786
| 0.777707
| 0.112816
| 0
| 0.015504
| 0
| 0
| 0.131291
| 0.008598
| 0
| 0
| 0
| 0
| 0.007752
| 1
| 0.023256
| false
| 0
| 0.131783
| 0
| 0.162791
| 0.03876
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23f63778d171661ca3379def8f64e54d84bf8d22
| 2,868
|
py
|
Python
|
analysis/files/files.py
|
mg98/arbitrary-data-on-blockchains
|
6450e638cf7c54f53ef247ff779770b22128a024
|
[
"MIT"
] | 1
|
2022-03-21T01:51:44.000Z
|
2022-03-21T01:51:44.000Z
|
analysis/files/files.py
|
mg98/arbitrary-data-on-blockchains
|
6450e638cf7c54f53ef247ff779770b22128a024
|
[
"MIT"
] | null | null | null |
analysis/files/files.py
|
mg98/arbitrary-data-on-blockchains
|
6450e638cf7c54f53ef247ff779770b22128a024
|
[
"MIT"
] | null | null | null |
import codecs
import sqlite3
import json
from fnmatch import fnmatch
from abc import ABC, abstractmethod
class FilesAnalysis(ABC):
"""Abstraction for analysis of transaction input data that contain popular file types."""
def __init__(self, chain: str, limit: int = 0, content_types: list[str] = ['*']):
"""
Initialize files analysis.
:param chain Blockchain.
:param limit Limit results processed by BigQuery.
:param content_types List of considerable content types for this analysis. Asterix-sign supported.
"""
self.chain = chain
self.limit = limit
self.file_signatures = FilesAnalysis.get_file_signatures(content_types)
def __enter__(self):
self.conn = sqlite3.connect("results.db")
return self
def __exit__(self, type, val, tb):
self.conn.close()
def insert(self, hash: str, content_type: str, method: str, block_timestamp: str, type: str, data: str, to_contract: bool = False):
self.conn.execute("""
INSERT INTO files_results (
chain, hash, content_type, method, block_timestamp, type, data, to_contract
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
""", (self.chain, hash, content_type, method, block_timestamp, type, data, to_contract))
self.conn.commit()
@staticmethod
def get_file_signatures(content_types: list[str]) -> dict[str,list[str]]:
"""Returns dict of file signatures filtered by `content_types`."""
with open('analysis/files/file-signatures.json') as f: file_signatures = json.load(f)
return {
content_type : file_signatures[content_type]
for content_type in list(
filter(lambda k: any(fnmatch(k, ct) for ct in content_types), file_signatures)
)
}
def get_content_type(self, input):
"""Returns content type detected in input (candidate with most signature digits)."""
top_candidate = (None, 0) # tuple of content type and signature length
for (content_type, sigs) in self.file_signatures.items():
for sig in sigs:
if sig in input:
if top_candidate[1] < len(sig):
top_candidate = (content_type, len(sig))
return top_candidate[0]
@staticmethod
def hex_to_base64(hex_value: str):
"""Converts hex to base64."""
return codecs.encode(codecs.decode(hex_value, 'hex'), 'base64').decode()
def run(self):
"""Runs the query on BigQuery and persists results to the database."""
# setup database
self.conn.execute("""
CREATE TABLE IF NOT EXISTS files_results (
chain TEXT,
hash TEXT,
content_type TEXT,
method TEXT,
to_contract BOOLEAN,
type TEXT,
data TEXT,
block_timestamp DATETIME,
deleted BOOLEAN DEFAULT 0
)
""")
self.conn.execute("DELETE FROM files_results WHERE chain = ?", (self.chain,))
self.conn.commit()
self.run_core()
@abstractmethod
def run_core(self):
"""Runs the query on BigQuery and persists results to the database."""
raise NotImplementedError("Must override run_core")
| 32.224719
| 132
| 0.709902
| 392
| 2,868
| 5.040816
| 0.334184
| 0.066802
| 0.024292
| 0.019231
| 0.145749
| 0.116397
| 0.116397
| 0.116397
| 0.116397
| 0.116397
| 0
| 0.005495
| 0.175035
| 2,868
| 88
| 133
| 32.590909
| 0.82967
| 0.222455
| 0
| 0.096774
| 0
| 0
| 0.237047
| 0.016048
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145161
| false
| 0
| 0.080645
| 0
| 0.306452
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23faddb427ccf2b4a51011515cdd3a2b5edefbe2
| 1,211
|
py
|
Python
|
examples/pymt-frostnumbermodel-multidim-parameter-study.py
|
csdms/dakotathon
|
6af575b0c21384b2a1ab51e26b6a08512313bd84
|
[
"MIT"
] | 8
|
2019-09-11T12:59:57.000Z
|
2021-08-11T16:31:58.000Z
|
examples/pymt-frostnumbermodel-multidim-parameter-study.py
|
csdms/dakota
|
6af575b0c21384b2a1ab51e26b6a08512313bd84
|
[
"MIT"
] | 66
|
2015-04-06T17:11:21.000Z
|
2019-02-03T18:09:52.000Z
|
examples/pymt-frostnumbermodel-multidim-parameter-study.py
|
csdms/dakota
|
6af575b0c21384b2a1ab51e26b6a08512313bd84
|
[
"MIT"
] | 5
|
2015-03-24T22:39:34.000Z
|
2018-04-21T12:14:05.000Z
|
"""An example of using Dakota as a component with PyMT.
This example requires a WMT executor with PyMT installed, as well as
the CSDMS Dakota interface and FrostNumberModel installed as
components.
"""
import os
from pymt.components import MultidimParameterStudy, FrostNumberModel
from dakotathon.utils import configure_parameters
c, d = FrostNumberModel(), MultidimParameterStudy()
parameters = {
"component": type(c).__name__,
"descriptors": ["T_air_min", "T_air_max"],
"partitions": [3, 3],
"lower_bounds": [-20.0, 5.0],
"upper_bounds": [-5.0, 20.0],
"response_descriptors": [
"frostnumber__air",
"frostnumber__surface",
"frostnumber__stefan",
],
"response_statistics": ["median", "median", "median"],
}
parameters, substitutes = configure_parameters(parameters)
parameters["run_directory"] = c.setup(os.getcwd(), **substitutes)
cfg_file = "frostnumber_model.cfg" # get from pymt eventually
parameters["initialize_args"] = cfg_file
dtmpl_file = cfg_file + ".dtmpl"
os.rename(cfg_file, dtmpl_file)
parameters["template_file"] = dtmpl_file
d.setup(parameters["run_directory"], **parameters)
d.initialize("dakota.yaml")
d.update()
d.finalize()
| 27.522727
| 68
| 0.721718
| 146
| 1,211
| 5.767123
| 0.486301
| 0.033254
| 0.042755
| 0.038005
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011605
| 0.14616
| 1,211
| 43
| 69
| 28.162791
| 0.802708
| 0.182494
| 0
| 0
| 0
| 0
| 0.281059
| 0.021385
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.107143
| 0
| 0.107143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23fdbc64ade39f6aaca5e42eb2790bc7ac6b2823
| 4,427
|
py
|
Python
|
tensorflow/train_pretrained.py
|
sevakon/mobilenetv2
|
e6634da41c377ae1c76662d061e6b2b804a3b09c
|
[
"MIT"
] | 1
|
2020-01-17T07:54:02.000Z
|
2020-01-17T07:54:02.000Z
|
tensorflow/train_pretrained.py
|
sevakon/mobilenetv2
|
e6634da41c377ae1c76662d061e6b2b804a3b09c
|
[
"MIT"
] | null | null | null |
tensorflow/train_pretrained.py
|
sevakon/mobilenetv2
|
e6634da41c377ae1c76662d061e6b2b804a3b09c
|
[
"MIT"
] | null | null | null |
from callback import ValidationHistory
from dataloader import Dataloader
from normalizer import Normalizer
import tensorflow as tf
import numpy as np
import argparse
def get_model(input_shape, n_classes):
base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
include_top=False,
weights='imagenet')
model = tf.keras.Sequential([
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(n_classes, activation='softmax')
])
model.summary()
return model
def get_model_with_nn_head(input_shape, n_classes):
base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
include_top=False,
weights='imagenet')
model = tf.keras.Sequential([
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dropout(.1),
tf.keras.layers.Dense(n_classes, activation='softmax')
])
model.summary()
return model
def write_metrics_to_file(loss_acc, fold_idx):
file = open("pretrained_model/metrics_fold{}.txt".format(fold_idx), "x")
file.write('Best saved model validation accuracy: {}\n'.format(loss_acc[1]))
file.write('Best saved model validation loss: {}\n'.format(loss_acc[0]))
file.close()
def train(config, fold_idx):
print(' ... TRAIN MODEL ON FOLD #{}'.format(fold_idx + 1))
loader = Dataloader(img_size=config.input_size,
n_folds=config.n_folds, seed=config.seed)
loader = loader.fit(config.folder)
classes = loader.classes
train, train_steps = loader.train(batch_size=config.batch_size,
fold_idx=fold_idx, normalize=False)
val, val_steps = loader.val(64, fold_idx)
model = get_model((config.input_size, config.input_size, 3), len(classes))
model.compile(optimizer=tf.keras.optimizers.Adam(), # Optimizer
# Loss function to minimize
loss=tf.keras.losses.CategoricalCrossentropy(),
# List of metrics to monitor
metrics=[tf.keras.metrics.CategoricalAccuracy()])
filepath="pretrained_model/mobilenet_fold{}".format(fold_idx)
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath,
monitor='val_categorical_accuracy',
verbose=1,
save_best_only=True,
mode='max')
logdir = "logs/fold{}/".format(fold_idx)
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=logdir)
val_history = ValidationHistory()
callbacks = [checkpoint, tensorboard, val_history]
model.fit(train.repeat(),
epochs=config.epochs,
steps_per_epoch = train_steps,
validation_data=val.repeat(),
validation_steps=val_steps,
callbacks=callbacks)
write_metrics_to_file(val_history.best_model_stats('acc'), fold_idx)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Required arguments
parser.add_argument(
"-f",
"--folder",
required=True,
help="Path to directory containing images")
# Optional arguments.
parser.add_argument(
"-s",
"--input_size",
type=int,
default=224,
help="Input image size.")
parser.add_argument(
"-b",
"--batch_size",
type=int,
default=2,
help="Number of images in a training batch.")
parser.add_argument(
"-e",
"--epochs",
type=int,
default=100,
help="Number of training epochs.")
parser.add_argument(
"-seed",
"--seed",
type=int,
default=42,
help="Seed for data reproducing.")
parser.add_argument(
"-n",
"--n_folds",
type=int,
default=5,
help="Number of folds for CV Training")
args = parser.parse_args()
for fold_idx in range(args.n_folds):
train(args, fold_idx)
| 33.793893
| 87
| 0.5733
| 469
| 4,427
| 5.215352
| 0.328358
| 0.042927
| 0.029436
| 0.026165
| 0.232216
| 0.232216
| 0.205233
| 0.205233
| 0.205233
| 0.205233
| 0
| 0.008325
| 0.321663
| 4,427
| 130
| 88
| 34.053846
| 0.806194
| 0.022815
| 0
| 0.295238
| 0
| 0
| 0.116435
| 0.021296
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038095
| false
| 0
| 0.057143
| 0
| 0.114286
| 0.009524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23fe13301d5fe663179594a9c1c64fdce727026b
| 1,354
|
py
|
Python
|
source/test.py
|
valrus/alfred-org-mode-workflow
|
30f81772ad16519317ccb170d36782e387988633
|
[
"MIT"
] | 52
|
2016-08-04T02:15:52.000Z
|
2021-12-20T20:33:07.000Z
|
source/test.py
|
valrus/alfred-org-mode-workflow
|
30f81772ad16519317ccb170d36782e387988633
|
[
"MIT"
] | 3
|
2019-11-15T15:13:51.000Z
|
2020-11-25T10:42:34.000Z
|
source/test.py
|
valrus/alfred-org-mode-workflow
|
30f81772ad16519317ccb170d36782e387988633
|
[
"MIT"
] | 9
|
2019-03-06T04:21:29.000Z
|
2021-08-16T02:28:33.000Z
|
# coding=utf-8
from orgmode_entry import OrgmodeEntry
entry = u'#A Etwas machen:: DL: Morgen S: Heute Ausstellung am 23.09.2014 12:00 oder am Montag bzw. am 22.10 13:00 sollte man anschauen. '
org = OrgmodeEntry()
# Use an absolute path
org.inbox_file = '/Users/Alex/Documents/Planung/Planning/Inbox.org'
org.delimiter = ':: ' # tag to separate the head from the body of the entry
org.heading_suffix = "\n* " # depth of entry
org.use_priority_tags = True # use priority tags: #b => [#B]
org.priority_tag = '#' # tag that marks a priority value
org.add_creation_date = True # add a creation date
org.replace_absolute_dates = True # convert absolute dates like 01.10 15:00 into orgmode dates => <2016-10-01 Sun 15:00>
org.replace_relative_dates = True # convert relative dates like monday or tomorrow into orgmode dates
# Convert a schedule pattern into an org scheduled date
org.convert_scheduled = True # convert sche
org.scheduled_pattern = "S: "
# Convert a deadline pattern into an org deadline
org.convert_deadlines = True
org.deadline_pattern = "DL: "
org.smart_line_break = True # convert a pattern into a linebreak
org.line_break_pattern = "\s\s" # two spaces
# Cleanup spaces (double, leading, and trailing)
org.cleanup_spaces = True
entry = 'TODO ' + entry
message = org.add_entry(entry).encode('utf-8')
print(message)
| 33.02439
| 140
| 0.739291
| 214
| 1,354
| 4.574766
| 0.46729
| 0.044944
| 0.030644
| 0.032686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037102
| 0.163959
| 1,354
| 40
| 141
| 33.85
| 0.827739
| 0.39808
| 0
| 0
| 0
| 0.047619
| 0.258469
| 0.060226
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.047619
| 0
| 0.047619
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23fead2b5260640c347d0b505721cb2630c98560
| 407
|
py
|
Python
|
25/00/2.py
|
pylangstudy/201706
|
f1cc6af6b18e5bd393cda27f5166067c4645d4d3
|
[
"CC0-1.0"
] | null | null | null |
25/00/2.py
|
pylangstudy/201706
|
f1cc6af6b18e5bd393cda27f5166067c4645d4d3
|
[
"CC0-1.0"
] | 70
|
2017-06-01T11:02:51.000Z
|
2017-06-30T00:35:32.000Z
|
25/00/2.py
|
pylangstudy/201706
|
f1cc6af6b18e5bd393cda27f5166067c4645d4d3
|
[
"CC0-1.0"
] | null | null | null |
import gzip
import bz2
import lzma
s = b'witch which has which witches wrist watch'
with open('2.txt', 'wb') as f: f.write(s)
with gzip.open('2.txt.gz', 'wb') as f: f.write(s)
with bz2.open('2.txt.bz2', 'wb') as f: f.write(s)
with lzma.open('2.txt.xz', 'wb') as f: f.write(s)
print('txt', len(s))
print('gz ', len(gzip.compress(s)))
print('bz2', len(bz2.compress(s)))
print('xz ', len(lzma.compress(s)))
| 25.4375
| 49
| 0.641278
| 83
| 407
| 3.144578
| 0.301205
| 0.076628
| 0.122605
| 0.091954
| 0.229885
| 0.229885
| 0.183908
| 0
| 0
| 0
| 0
| 0.025352
| 0.127764
| 407
| 15
| 50
| 27.133333
| 0.709859
| 0
| 0
| 0
| 0
| 0
| 0.224138
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
23ff90db58dc31d3acc655b347ff8c32734fce8f
| 751
|
py
|
Python
|
timezones.py
|
rayjustinhuang/BitesofPy
|
03b694c5259ff607621419d9677c5caff90a6057
|
[
"MIT"
] | null | null | null |
timezones.py
|
rayjustinhuang/BitesofPy
|
03b694c5259ff607621419d9677c5caff90a6057
|
[
"MIT"
] | null | null | null |
timezones.py
|
rayjustinhuang/BitesofPy
|
03b694c5259ff607621419d9677c5caff90a6057
|
[
"MIT"
] | null | null | null |
import pytz
from datetime import datetime
MEETING_HOURS = range(6, 23) # meet from 6 - 22 max
TIMEZONES = set(pytz.all_timezones)
def within_schedule(utc, *timezones):
"""Receive a utc datetime and one or more timezones and check if
they are all within schedule (MEETING_HOURS)"""
times = []
timezone_list = list(timezones)
for zone in timezone_list:
if zone not in TIMEZONES:
raise ValueError
tz = pytz.timezone(zone)
times.append(pytz.utc.localize(utc).astimezone(tz))
boolean = []
for time in times:
if time.hour in MEETING_HOURS:
boolean.append(True)
else:
boolean.append(False)
return all(boolean)
pass
| 25.033333
| 68
| 0.624501
| 96
| 751
| 4.8125
| 0.520833
| 0.077922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011342
| 0.295606
| 751
| 30
| 69
| 25.033333
| 0.862004
| 0.170439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0.05
| 0.1
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b000540f0f753d3e1bc63731ed866572a4a795c
| 450
|
py
|
Python
|
config.py
|
saurabhchardereal/kernel-tracker
|
60d53e6ae377925f8540f148b742869929337088
|
[
"MIT"
] | null | null | null |
config.py
|
saurabhchardereal/kernel-tracker
|
60d53e6ae377925f8540f148b742869929337088
|
[
"MIT"
] | null | null | null |
config.py
|
saurabhchardereal/kernel-tracker
|
60d53e6ae377925f8540f148b742869929337088
|
[
"MIT"
] | null | null | null |
from os import sys, environ
from tracker.__main__ import args
# Name of the file to save kernel versions json
DB_FILE_NAME = "data.json"
# By default looks up in env for api and chat id or just put your stuff in here
# directly if you prefer it that way
BOT_API = environ.get("BOT_API")
CHAT_ID = environ.get("CHAT_ID")
if args.notify:
if (BOT_API and CHAT_ID) is None:
print("Either BOT_API or CHAT_ID is empty!")
sys.exit(1)
| 28.125
| 79
| 0.717778
| 84
| 450
| 3.678571
| 0.630952
| 0.097087
| 0.064725
| 0.07767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002801
| 0.206667
| 450
| 15
| 80
| 30
| 0.862745
| 0.351111
| 0
| 0
| 0
| 0
| 0.201389
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b02d42862a5d0797afc71d43094512a70c96510
| 3,302
|
py
|
Python
|
Packs/dnstwist/Integrations/dnstwist/dnstwist.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799
|
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/dnstwist/Integrations/dnstwist/dnstwist.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317
|
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/dnstwist/Integrations/dnstwist/dnstwist.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297
|
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import json
import subprocess
from CommonServerPython import *
TWIST_EXE = '/dnstwist/dnstwist.py'
if demisto.command() == 'dnstwist-domain-variations':
KEYS_TO_MD = ["whois_updated", "whois_created", "dns_a", "dns_mx", "dns_ns"]
DOMAIN = demisto.args()['domain']
LIMIT = int(demisto.args()['limit'])
WHOIS = demisto.args().get('whois')
def get_dnstwist_result(domain, include_whois):
args = [TWIST_EXE, '-f', 'json']
if include_whois:
args.append('-w')
args.append(domain)
res = subprocess.check_output(args)
return json.loads(res)
def get_domain_to_info_map(dns_twist_result):
results = []
for x in dns_twist_result:
temp = {} # type: dict
for k, v in x.items():
if k in KEYS_TO_MD:
if x["domain"] not in temp:
temp["domain-name"] = x["domain"]
if k == "dns_a":
temp["IP Address"] = v
else:
temp[k] = v
if temp:
results.append(temp)
return results
dnstwist_result = get_dnstwist_result(DOMAIN, WHOIS == 'yes')
new_result = get_domain_to_info_map(dnstwist_result)
md = tableToMarkdown('dnstwist for domain - ' + DOMAIN, new_result,
headers=["domain-name", "IP Address", "dns_mx", "dns_ns", "whois_updated", "whois_created"])
domain_context = new_result[0] # The requested domain for variations
domains_context_list = new_result[1:LIMIT + 1] # The variations domains
domains = []
for item in domains_context_list:
temp = {"Name": item["domain-name"]}
if "IP Address" in item:
temp["IP"] = item["IP Address"]
if "dns_mx" in item:
temp["DNS-MX"] = item["dns_mx"]
if "dns_ns" in item:
temp["DNS-NS"] = item["dns_ns"]
if "whois_updated" in item:
temp["WhoisUpdated"] = item["whois_updated"]
if "whois_created" in item:
temp["WhoisCreated"] = item["whois_created"]
domains.append(temp)
ec = {"Domains": domains}
if "domain-name" in domain_context:
ec["Name"] = domain_context["domain-name"]
if "IP Address" in domain_context:
ec["IP"] = domain_context["IP Address"]
if "dns_mx" in domain_context:
ec["DNS-MX"] = domain_context["dns_mx"]
if "dns_ns" in domain_context:
ec["DNS-NS"] = domain_context["dns_ns"]
if "whois_updated" in domain_context:
ec["WhoisUpdated"] = domain_context["whois_updated"]
if "whois_created" in domain_context:
ec["WhoisCreated"] = domain_context["whois_created"]
entry_result = {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': dnstwist_result,
'HumanReadable': md,
'ReadableContentsFormat': formats['markdown'],
'EntryContext': {'dnstwist.Domain(val.Name == obj.Name)': ec}
}
demisto.results(entry_result)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
subprocess.check_output([TWIST_EXE, '-h'], stderr=subprocess.STDOUT)
demisto.results('ok')
sys.exit(0)
| 35.891304
| 117
| 0.58934
| 398
| 3,302
| 4.690955
| 0.248744
| 0.09052
| 0.048206
| 0.054633
| 0.149973
| 0.111409
| 0
| 0
| 0
| 0
| 0
| 0.001676
| 0.277408
| 3,302
| 91
| 118
| 36.285714
| 0.780805
| 0.040581
| 0
| 0
| 0
| 0
| 0.219096
| 0.029402
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025974
| false
| 0
| 0.038961
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b049ff801a11852ac7c1f7e34a2e069aca68527
| 3,395
|
py
|
Python
|
test/test_resourcerequirements.py
|
noralsydmp/icetea
|
b486cdc8e0d2211e118f1f8211aa4d284ca02422
|
[
"Apache-2.0"
] | 6
|
2018-08-10T17:11:10.000Z
|
2020-04-29T07:05:36.000Z
|
test/test_resourcerequirements.py
|
noralsydmp/icetea
|
b486cdc8e0d2211e118f1f8211aa4d284ca02422
|
[
"Apache-2.0"
] | 58
|
2018-08-13T08:36:08.000Z
|
2021-07-07T08:32:52.000Z
|
test/test_resourcerequirements.py
|
noralsydmp/icetea
|
b486cdc8e0d2211e118f1f8211aa4d284ca02422
|
[
"Apache-2.0"
] | 7
|
2018-08-10T12:53:18.000Z
|
2021-11-08T05:15:42.000Z
|
# pylint: disable=missing-docstring,protected-access
"""
Copyright 2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from icetea_lib.ResourceProvider.ResourceRequirements import ResourceRequirements
class ResourceRequirementTestcase(unittest.TestCase):
def setUp(self):
self.simple_testreqs = {
"type": "process",
"allowed_platforms": [],
"expires": 2000,
"nick": None,
"tags": {"test": True}
}
self.simple_testreqs2 = {
"type": "process",
"allowed_platforms": ["DEV3"],
"nick": None,
}
self.recursion_testreqs = {
"type": "process",
"allowed_platforms": ["DEV3"],
"application": {"bin": "test_binary"},
"nick": None,
}
self.actual_descriptor1 = {"platform_name": "DEV2", "resource_type": "mbed"}
self.actual_descriptor2 = {"platform_name": "DEV1", "resource_type": "process"}
self.actual_descriptor3 = {"platform_name": "DEV3", "resource_type": "process"}
self.actual_descriptor4 = {"resource_type": "process", "bin": "test_binary"}
def test_get(self):
dutreq = ResourceRequirements(self.simple_testreqs)
self.assertEqual(dutreq.get("type"), "process")
dutreq = ResourceRequirements(self.recursion_testreqs)
self.assertEqual(dutreq.get("application.bin"), "test_binary")
self.assertIsNone(dutreq.get("application.bin.not_exist"))
def test_set(self):
dutreq = ResourceRequirements(self.simple_testreqs)
dutreq.set("test_key", "test_val")
self.assertEqual(dutreq._requirements["test_key"], "test_val")
# Test override
dutreq.set("test_key", "test_val2")
self.assertEqual(dutreq._requirements["test_key"], "test_val2")
# test tags merging. Also a test for set_tag(tags=stuff)
dutreq.set("tags", {"test": False, "test2": True})
self.assertEqual(dutreq._requirements["tags"], {"test": False, "test2": True})
dutreq.set("tags", {"test2": False})
self.assertEqual(dutreq._requirements["tags"], {"test": False, "test2": False})
def test_set_tags(self):
dutreq = ResourceRequirements(self.simple_testreqs)
dutreq._set_tag(tag="test", value=False)
dutreq._set_tag(tag="test2", value=True)
self.assertDictEqual(dutreq._requirements["tags"], {"test": False, "test2": True})
def test_empty_tags(self):
dutreq = ResourceRequirements(self.simple_testreqs)
dutreq._set_tag("test", value=None)
dutreq.remove_empty_tags()
self.assertEqual(dutreq._requirements["tags"], {})
self.assertEqual(dutreq.remove_empty_tags(tags={"test1": True, "test2": None}),
{"test1": True})
if __name__ == '__main__':
unittest.main()
| 38.146067
| 90
| 0.648895
| 378
| 3,395
| 5.653439
| 0.359788
| 0.056153
| 0.078615
| 0.077211
| 0.363126
| 0.218531
| 0.196069
| 0.1343
| 0.059897
| 0.059897
| 0
| 0.012448
| 0.219146
| 3,395
| 88
| 91
| 38.579545
| 0.793663
| 0.197644
| 0
| 0.214286
| 0
| 0
| 0.189757
| 0.009212
| 0
| 0
| 0
| 0
| 0.178571
| 1
| 0.089286
| false
| 0
| 0.035714
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b04ad53449f706663e52db825a5918226304aab
| 321
|
py
|
Python
|
hadoop_example/reduce.py
|
hatbot-team/hatbot
|
e7fea42b5431cc3e93d9e484c5bb5232d8f2e981
|
[
"MIT"
] | 1
|
2016-05-26T08:18:36.000Z
|
2016-05-26T08:18:36.000Z
|
hadoop_example/reduce.py
|
hatbot-team/hatbot
|
e7fea42b5431cc3e93d9e484c5bb5232d8f2e981
|
[
"MIT"
] | null | null | null |
hadoop_example/reduce.py
|
hatbot-team/hatbot
|
e7fea42b5431cc3e93d9e484c5bb5232d8f2e981
|
[
"MIT"
] | null | null | null |
#!/bin/python3
import sys
prev = ''
cnt = 0
for x in sys.stdin.readlines():
q, w = x.split('\t')[0], int(x.split('\t')[1])
if (prev == q):
cnt += 1
else:
if (cnt > 0):
print(prev + '\t' + str(cnt))
prev = q
cnt = w
if (cnt > 0):
print(prev + '\t' + str(cnt))
| 17.833333
| 50
| 0.433022
| 50
| 321
| 2.78
| 0.44
| 0.086331
| 0.100719
| 0.158273
| 0.316547
| 0.316547
| 0.316547
| 0.316547
| 0
| 0
| 0
| 0.033493
| 0.34891
| 321
| 17
| 51
| 18.882353
| 0.631579
| 0.040498
| 0
| 0.285714
| 0
| 0
| 0.026059
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b0792a063a2b49e22d50a2e57caac25388b1b3e
| 511
|
py
|
Python
|
tests/blockchain/test_hashing_and_proof.py
|
thecoons/blockchain
|
426ede04d058b5eb0e595fcf6e9c71d16605f9a7
|
[
"MIT"
] | null | null | null |
tests/blockchain/test_hashing_and_proof.py
|
thecoons/blockchain
|
426ede04d058b5eb0e595fcf6e9c71d16605f9a7
|
[
"MIT"
] | null | null | null |
tests/blockchain/test_hashing_and_proof.py
|
thecoons/blockchain
|
426ede04d058b5eb0e595fcf6e9c71d16605f9a7
|
[
"MIT"
] | null | null | null |
import json
import hashlib
from .test_case.blockchain import BlockchainTestCase
class TestHashingAndProofs(BlockchainTestCase):
def test_hash_is_correct(self):
self.create_block()
new_block = self.blockchain.last_block
new_block_json = json.dumps(
self.blockchain.last_block, sort_keys=True
).encode()
new_hash = hashlib.sha256(new_block_json).hexdigest()
assert len(new_hash) == 64
assert new_hash == self.blockchain.hash(new_block)
| 26.894737
| 61
| 0.702544
| 62
| 511
| 5.516129
| 0.467742
| 0.093567
| 0.076023
| 0.134503
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012469
| 0.215264
| 511
| 18
| 62
| 28.388889
| 0.840399
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b0816140cf40f94ed1ecf980a99d990c62d409b
| 14,495
|
py
|
Python
|
xgbse/_kaplan_neighbors.py
|
gdmarmerola/xgboost-survival-embeddings
|
cb672d5c2bf09c7d8cbf9edf7807a153bce4db40
|
[
"Apache-2.0"
] | null | null | null |
xgbse/_kaplan_neighbors.py
|
gdmarmerola/xgboost-survival-embeddings
|
cb672d5c2bf09c7d8cbf9edf7807a153bce4db40
|
[
"Apache-2.0"
] | null | null | null |
xgbse/_kaplan_neighbors.py
|
gdmarmerola/xgboost-survival-embeddings
|
cb672d5c2bf09c7d8cbf9edf7807a153bce4db40
|
[
"Apache-2.0"
] | null | null | null |
import warnings
import numpy as np
import pandas as pd
import xgboost as xgb
import scipy.stats as st
from sklearn.neighbors import BallTree
from xgbse._base import XGBSEBaseEstimator
from xgbse.converters import convert_data_to_xgb_format, convert_y
from xgbse.non_parametric import (
calculate_kaplan_vectorized,
get_time_bins,
calculate_interval_failures,
)
# at which percentiles will the KM predict
KM_PERCENTILES = np.linspace(0, 1, 11)
DEFAULT_PARAMS = {
"objective": "survival:aft",
"eval_metric": "aft-nloglik",
"aft_loss_distribution": "normal",
"aft_loss_distribution_scale": 1,
"tree_method": "hist",
"learning_rate": 5e-2,
"max_depth": 8,
"booster": "dart",
"subsample": 0.5,
"min_child_weight": 50,
"colsample_bynode": 0.5,
}
DEFAULT_PARAMS_TREE = {
"objective": "survival:cox",
"eval_metric": "cox-nloglik",
"tree_method": "exact",
"max_depth": 100,
"booster": "dart",
"subsample": 1.0,
"min_child_weight": 30,
"colsample_bynode": 1.0,
}
# class to turn XGB into a kNN with a kaplan meier in the NNs
class XGBSEKaplanNeighbors(XGBSEBaseEstimator):
"""
## XGBSEKaplanNeighbor
Convert xgboost into a nearest neighbor model, where we use hamming distance to define
similar elements as the ones that co-ocurred the most at the ensemble terminal nodes.
Then, at each neighbor-set compute survival estimates with the Kaplan-Meier estimator.
"""
def __init__(self, xgb_params=DEFAULT_PARAMS, n_neighbors=30, radius=None):
"""
Args:
xgb_params (Dict): parameters for XGBoost model, see
https://xgboost.readthedocs.io/en/latest/parameter.html
n_neighbors (Int): number of neighbors for computing KM estimates
radius (Float): If set, uses a radius around the point for neighbors search
"""
self.xgb_params = xgb_params
self.n_neighbors = n_neighbors
self.radius = radius
self.persist_train = False
self.index_id = None
self.radius = None
def fit(
self,
X,
y,
num_boost_round=1000,
validation_data=None,
early_stopping_rounds=None,
verbose_eval=0,
persist_train=True,
index_id=None,
time_bins=None,
):
"""
Transform feature space by fitting a XGBoost model and outputting its leaf indices.
Build search index in the new space to allow nearest neighbor queries at scoring time.
Args:
X ([pd.DataFrame, np.array]): design matrix to fit XGBoost model
y (structured array(numpy.bool_, numpy.number)): binary event indicator as first field,
and time of event or time of censoring as second field.
num_boost_round (Int): Number of boosting iterations.
validation_data (Tuple): Validation data in the format of a list of tuples [(X, y)]
if user desires to use early stopping
early_stopping_rounds (Int): Activates early stopping.
Validation metric needs to improve at least once
in every **early_stopping_rounds** round(s) to continue training.
See xgboost.train documentation.
verbose_eval ([Bool, Int]): level of verbosity. See xgboost.train documentation.
persist_train (Bool): whether or not to persist training data to use explainability
through prototypes
index_id (pd.Index): user defined index if intended to use explainability
through prototypes
time_bins (np.array): specified time windows to use when making survival predictions
Returns:
XGBSEKaplanNeighbors: fitted instance of XGBSEKaplanNeighbors
"""
self.E_train, self.T_train = convert_y(y)
if time_bins is None:
time_bins = get_time_bins(self.T_train, self.E_train)
self.time_bins = time_bins
# converting data to xgb format
dtrain = convert_data_to_xgb_format(X, y, self.xgb_params["objective"])
# converting validation data to xgb format
evals = ()
if validation_data:
X_val, y_val = validation_data
dvalid = convert_data_to_xgb_format(
X_val, y_val, self.xgb_params["objective"]
)
evals = [(dvalid, "validation")]
# training XGB
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
evals=evals,
verbose_eval=verbose_eval,
)
# creating nearest neighbor index
leaves = self.bst.predict(dtrain, pred_leaf=True)
self.tree = BallTree(leaves, metric="hamming", leaf_size=40)
if persist_train:
self.persist_train = True
if index_id is None:
index_id = X.index.copy()
self.index_id = index_id
return self
def predict(
self,
X,
time_bins=None,
return_ci=False,
ci_width=0.683,
return_interval_probs=False,
):
"""
Make queries to nearest neighbor search index build on the transformed XGBoost space.
Compute a Kaplan-Meier estimator for each neighbor-set. Predict the KM estimators.
Args:
X (pd.DataFrame): data frame with samples to generate predictions
time_bins (np.array): specified time windows to use when making survival predictions
return_ci (Bool): whether to return confidence intervals via the Exponential Greenwood formula
ci_width (Float): width of confidence interval
return_interval_probs (Bool): Boolean indicating if interval probabilities are
supposed to be returned. If False the cumulative survival is returned.
Returns:
(pd.DataFrame): A dataframe of survival probabilities
for all times (columns), from a time_bins array, for all samples of X
(rows). If return_interval_probs is True, the interval probabilities are returned
instead of the cumulative survival probabilities.
upper_ci (np.array): upper confidence interval for the survival
probability values
lower_ci (np.array): lower confidence interval for the survival
probability values
"""
# converting to xgb format
d_matrix = xgb.DMatrix(X)
# getting leaves and extracting neighbors
leaves = self.bst.predict(d_matrix, pred_leaf=True)
if self.radius:
assert self.radius > 0, "Radius must be positive"
neighs, _ = self.tree.query_radius(
leaves, r=self.radius, return_distance=True
)
number_of_neighbors = np.array([len(neigh) for neigh in neighs])
if np.argwhere(number_of_neighbors == 1).shape[0] > 0:
# If there is at least one sample without neighbors apart from itself
# a warning is raised suggesting a radius increase
warnings.warn(
"Warning: Some samples don't have neighbors apart from itself. Increase the radius",
RuntimeWarning,
)
else:
_, neighs = self.tree.query(leaves, k=self.n_neighbors)
# gathering times and events/censors for neighbor sets
T_neighs = self.T_train[neighs]
E_neighs = self.E_train[neighs]
# vectorized (very fast!) implementation of Kaplan Meier curves
if time_bins is None:
time_bins = self.time_bins
# calculating z-score from width
z = st.norm.ppf(0.5 + ci_width / 2)
preds_df, upper_ci, lower_ci = calculate_kaplan_vectorized(
T_neighs, E_neighs, time_bins, z
)
if return_ci and return_interval_probs:
raise ValueError(
"Confidence intervals for interval probabilities is not supported. Choose between return_ci and return_interval_probs."
)
if return_interval_probs:
preds_df = calculate_interval_failures(preds_df)
return preds_df
if return_ci:
return preds_df, upper_ci, lower_ci
return preds_df
def _align_leaf_target(neighs, target):
# getting times and events for each leaf element
target_neighs = neighs.apply(lambda x: target[x])
# converting to vectorized kaplan format
# filling nas due to different leaf sizes with 0
target_neighs = (
pd.concat([pd.DataFrame(e) for e in target_neighs.values], axis=1)
.T.fillna(0)
.values
)
return target_neighs
# class to turn XGB into a kNN with a kaplan meier in the NNs
class XGBSEKaplanTree(XGBSEBaseEstimator):
"""
## XGBSEKaplanTree
Single tree implementation as a simplification to `XGBSEKaplanNeighbors`.
Instead of doing nearest neighbor searches, fit a single tree via `xgboost`
and calculate KM curves at each of its leaves.
"""
def __init__(
self,
xgb_params=DEFAULT_PARAMS_TREE,
):
self.xgb_params = xgb_params
self.persist_train = False
self.index_id = None
"""
Args:
xgb_params (Dict): parameters for fitting the tree, see
https://xgboost.readthedocs.io/en/latest/parameter.html
"""
def fit(
self,
X,
y,
persist_train=True,
index_id=None,
time_bins=None,
ci_width=0.683,
**xgb_kwargs,
):
"""
Fit a single decision tree using xgboost. For each leaf in the tree,
build a Kaplan-Meier estimator.
Args:
X ([pd.DataFrame, np.array]): design matrix to fit XGBoost model
y (structured array(numpy.bool_, numpy.number)): binary event indicator as first field,
and time of event or time of censoring as second field.
persist_train (Bool): whether or not to persist training data to use explainability
through prototypes
index_id (pd.Index): user defined index if intended to use explainability
through prototypes
time_bins (np.array): specified time windows to use when making survival predictions
ci_width (Float): width of confidence interval
Returns:
XGBSEKaplanTree: Trained instance of XGBSEKaplanTree
"""
E_train, T_train = convert_y(y)
if time_bins is None:
time_bins = get_time_bins(T_train, E_train)
self.time_bins = time_bins
# converting data to xgb format
dtrain = convert_data_to_xgb_format(X, y, self.xgb_params["objective"])
# training XGB
self.bst = xgb.train(self.xgb_params, dtrain, num_boost_round=1, **xgb_kwargs)
# getting leaves
leaves = self.bst.predict(dtrain, pred_leaf=True)
# organizing elements per leaf
leaf_neighs = (
pd.DataFrame({"leaf": leaves})
.groupby("leaf")
.apply(lambda x: list(x.index))
)
# getting T and E for each leaf
T_leaves = _align_leaf_target(leaf_neighs, T_train)
E_leaves = _align_leaf_target(leaf_neighs, E_train)
# calculating z-score from width
z = st.norm.ppf(0.5 + ci_width / 2)
# vectorized (very fast!) implementation of Kaplan Meier curves
(
self._train_survival,
self._train_upper_ci,
self._train_lower_ci,
) = calculate_kaplan_vectorized(T_leaves, E_leaves, time_bins, z)
# adding leaf indexes
self._train_survival = self._train_survival.set_index(leaf_neighs.index)
self._train_upper_ci = self._train_upper_ci.set_index(leaf_neighs.index)
self._train_lower_ci = self._train_lower_ci.set_index(leaf_neighs.index)
if persist_train:
self.persist_train = True
if index_id is None:
index_id = X.index.copy()
self.tree = BallTree(leaves.reshape(-1, 1), metric="hamming", leaf_size=40)
self.index_id = index_id
return self
def predict(self, X, return_ci=False, return_interval_probs=False):
"""
Run samples through tree until terminal nodes. Predict the Kaplan-Meier
estimator associated to the leaf node each sample ended into.
Args:
X (pd.DataFrame): data frame with samples to generate predictions
return_ci (Bool): whether to return confidence intervals via the Exponential Greenwood formula
return_interval_probs (Bool): Boolean indicating if interval probabilities are
supposed to be returned. If False the cumulative survival is returned.
Returns:
preds_df (pd.DataFrame): A dataframe of survival probabilities
for all times (columns), from a time_bins array, for all samples of X
(rows). If return_interval_probs is True, the interval probabilities are returned
instead of the cumulative survival probabilities.
upper_ci (np.array): upper confidence interval for the survival
probability values
lower_ci (np.array): lower confidence interval for the survival
probability values
"""
# converting to xgb format
d_matrix = xgb.DMatrix(X)
# getting leaves and extracting neighbors
leaves = self.bst.predict(d_matrix, pred_leaf=True)
# searching for kaplan meier curves in leaves
preds_df = self._train_survival.loc[leaves].reset_index(drop=True)
upper_ci = self._train_upper_ci.loc[leaves].reset_index(drop=True)
lower_ci = self._train_lower_ci.loc[leaves].reset_index(drop=True)
if return_ci and return_interval_probs:
raise ValueError(
"Confidence intervals for interval probabilities is not supported. Choose between return_ci and return_interval_probs."
)
if return_interval_probs:
preds_df = calculate_interval_failures(preds_df)
return preds_df
if return_ci:
return preds_df, upper_ci, lower_ci
return preds_df
| 34.186321
| 135
| 0.635598
| 1,807
| 14,495
| 4.915329
| 0.201439
| 0.021617
| 0.02567
| 0.011822
| 0.558883
| 0.543684
| 0.501351
| 0.461946
| 0.433799
| 0.413983
| 0
| 0.005691
| 0.29693
| 14,495
| 423
| 136
| 34.267139
| 0.865862
| 0.41504
| 0
| 0.388889
| 0
| 0
| 0.092977
| 0.012168
| 0
| 0
| 0
| 0
| 0.005051
| 1
| 0.035354
| false
| 0
| 0.045455
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b086dcb5153716593628ec1966115cfb5eef668
| 3,932
|
py
|
Python
|
homework_2/1.py
|
jelic98/raf_mu
|
8b965fa41d5f89eeea371ab7b8e15bd167325b5f
|
[
"Apache-2.0"
] | null | null | null |
homework_2/1.py
|
jelic98/raf_mu
|
8b965fa41d5f89eeea371ab7b8e15bd167325b5f
|
[
"Apache-2.0"
] | null | null | null |
homework_2/1.py
|
jelic98/raf_mu
|
8b965fa41d5f89eeea371ab7b8e15bd167325b5f
|
[
"Apache-2.0"
] | 1
|
2021-05-30T15:26:52.000Z
|
2021-05-30T15:26:52.000Z
|
import math
import numpy as np
import pandas as pd
import tensorflow as tf
import datetime as dt
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import warnings
warnings.filterwarnings('ignore')
# Hiperparametri
epoch_max = 10
alpha_max = 0.025
alpha_min = 0.001
batch_size = 32
window_size = 14
test_ratio = 0.1
max_time = 16
lstm_size = 64
# Ucitavanje podataka
csv = pd.read_csv('data/sp500.csv')
dates, data = csv['Date'].values, csv['Close'].values
# Konverzija datuma
dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in dates]
dates = [dates[i + max_time] for i in range(len(dates) - max_time)]
# Grupisanje podataka pomocu kliznog prozora
data = [data[i : i + window_size] for i in range(len(data) - window_size)]
# Normalizacija podataka
norm = [data[0][0]] + [data[i-1][-1] for i, _ in enumerate(data[1:])]
data = [curr / norm[i] - 1.0 for i, curr in enumerate(data)]
nb_samples = len(data) - max_time
nb_train = int(nb_samples * (1.0 - test_ratio))
nb_test = nb_samples - nb_train
nb_batches = math.ceil(nb_train / batch_size)
# Grupisanje podataka za propagaciju greske kroz vreme
x = [data[i : i + max_time] for i in range(nb_samples)]
y = [data[i + max_time][-1] for i in range(nb_samples)]
# Skup podataka za treniranje
train_x = [x[i : i + batch_size] for i in range(0, nb_train, batch_size)]
train_y = [y[i : i + batch_size] for i in range(0, nb_train, batch_size)]
# Skup podataka za testiranje
test_x, test_y = x[-nb_test:], y[-nb_test:]
# Skup podataka za denormalizaciju
norm_y = [norm[i + max_time] for i in range(nb_samples)]
norm_test_y = norm_y[-nb_test:]
tf.reset_default_graph()
# Cene tokom prethodnih dana
X = tf.placeholder(tf.float32, [None, max_time, window_size])
# Cena na trenutni dan
Y = tf.placeholder(tf.float32, [None])
# Stopa ucenja
L = tf.placeholder(tf.float32)
# LSTM sloj
rnn = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.LSTMCell(lstm_size)])
# Izlaz LSTM sloja
val, _ = tf.nn.dynamic_rnn(rnn, X, dtype=tf.float32)
val = tf.transpose(val, [1, 0, 2])
# Poslednji izlaz LSTM sloja
last = tf.gather(val, val.get_shape()[0] - 1)
# Obucavajuci parametri
weight = tf.Variable(tf.random_normal([lstm_size, 1]))
bias = tf.Variable(tf.constant(0.0, shape=[1]))
# Predvidjena cena
prediction = tf.add(tf.matmul(last, weight), bias)
# MSE za predikciju
loss = tf.reduce_mean(tf.square(tf.subtract(prediction, Y)))
# Gradijentni spust pomocu Adam optimizacije
optimizer = tf.train.AdamOptimizer(L).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Treniranje modela
for epoch in range(epoch_max):
# Adaptiranje stope ucenja
epoch_loss, alpha = 0, max(alpha_min, alpha_max * (1 - epoch / epoch_max))
# Mini batch gradijentni spust
for b in np.random.permutation(nb_batches):
loss_val, _ = sess.run([loss, optimizer], {X: train_x[b], Y: train_y[b], L: alpha})
epoch_loss += loss_val
print('Epoch: {}/{}\tLoss: {}'.format(epoch+1, epoch_max, epoch_loss))
# Testiranje modela
test_pred = sess.run(prediction, {X: test_x, Y: test_y, L: alpha})
# Tacnost modela za predikciju monotonosti fluktuacije cene
acc = sum(1 for i in range(nb_test) if test_pred[i] * test_y[i] > 0) / nb_test
print('Accuracy: {}'.format(acc))
# Denormalizacija podataka
denorm_y = [(curr + 1.0) * norm_test_y[i] for i, curr in enumerate(test_y)]
denorm_pred = [(curr + 1.0) * norm_test_y[i] for i, curr in enumerate(test_pred)]
# Prikazivanje predikcija
plt.figure(figsize=(16,4))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=7))
plt.plot(dates[-nb_test:], denorm_y, '-b', label='Actual')
plt.plot(dates[-nb_test:], denorm_pred, '--r', label='Predicted')
plt.gcf().autofmt_xdate()
plt.legend()
plt.show()
| 31.206349
| 95
| 0.694557
| 634
| 3,932
| 4.15142
| 0.309148
| 0.018237
| 0.020517
| 0.033435
| 0.168693
| 0.119301
| 0.087766
| 0.080547
| 0.080547
| 0.059271
| 0
| 0.019787
| 0.164547
| 3,932
| 125
| 96
| 31.456
| 0.781431
| 0.170905
| 0
| 0
| 0
| 0
| 0.030622
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.115942
| 0
| 0.115942
| 0.028986
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b0a82ae7938b94fafa2d863a1f8c7ee8913dbbc
| 2,674
|
py
|
Python
|
playground/toy_grads_compositional.py
|
TUIlmenauAMS/nca_mss
|
f0deb4b0acd0e317fb50340a57979c2e0a43c293
|
[
"MIT"
] | 2
|
2019-08-15T11:51:17.000Z
|
2019-08-15T12:59:37.000Z
|
playground/toy_grads_compositional.py
|
TUIlmenauAMS/nca_mss
|
f0deb4b0acd0e317fb50340a57979c2e0a43c293
|
[
"MIT"
] | 1
|
2020-08-11T14:25:45.000Z
|
2020-08-11T14:25:45.000Z
|
playground/toy_grads_compositional.py
|
TUIlmenauAMS/nca_mss
|
f0deb4b0acd0e317fb50340a57979c2e0a43c293
|
[
"MIT"
] | 1
|
2021-03-16T12:30:31.000Z
|
2021-03-16T12:30:31.000Z
|
# -*- coding: utf-8 -*-
__author__ = 'S.I. Mimilakis'
__copyright__ = 'MacSeNet'
import torch
from torch.autograd import Variable
import numpy as np
dtype = torch.DoubleTensor
np.random.seed(2183)
torch.manual_seed(2183)
# D is the "batch size"; N is input dimension;
# H is hidden dimension; N_out is output dimension.
D, N, H, N_out = 1, 20, 20, 20
# Create random Tensors to hold input and outputs, and wrap them in Variables.
# Setting requires_grad=False indicates that we do not need to compute gradients
# with respect to these Variables during the backward pass.
x = Variable(torch.randn(N, D).type(dtype), requires_grad=True)
y = Variable(torch.randn(N_out, D).type(dtype), requires_grad=False)
# Create random Tensors for weights, and wrap them in Variables.
# Setting requires_grad=True indicates that we want to compute gradients with
# respect to these Variables during the backward pass.
layers = []
biases = []
w_e = Variable(torch.randn(N, H).type(dtype), requires_grad=True)
b_e = Variable(torch.randn(H,).type(dtype), requires_grad=True)
w_d = Variable(torch.randn(H, N_out).type(dtype), requires_grad=True)
b_d = Variable(torch.randn(N_out,).type(dtype), requires_grad=True)
layers.append(w_e)
layers.append(w_d)
biases.append(b_e)
biases.append(b_d)
# Matrices we need the gradients wrt
parameters = torch.nn.ParameterList()
p_e = torch.nn.Parameter(torch.randn(N, H).type(dtype), requires_grad=True)
p_d = torch.nn.Parameter(torch.randn(H, N_out).type(dtype), requires_grad=True)
parameters.append(p_e)
parameters.append(p_d)
# Non-linearity
relu = torch.nn.ReLU()
comb_matrix = torch.autograd.Variable(torch.eye(N), requires_grad=True).double()
for index in range(2):
b_sc_m = relu(parameters[index].mm((layers[index] + biases[index]).t()))
b_scaled = layers[index] * b_sc_m
comb_matrix = torch.matmul(b_scaled, comb_matrix)
y_pred = torch.matmul(comb_matrix, x)
loss = (y - y_pred).norm(1)
loss.backward()
delta_term = (torch.sign(y_pred - y)).mm(x.t())
# With relu
w_tilde_d = relu(parameters[1].mm((layers[1] + biases[1]).t())) * w_d
w_tilde_e = w_e * relu(parameters[0].mm((layers[0] + biases[0]).t()))
relu_grad_dec = p_d.mm((w_d + b_d).t()).gt(0).double()
relu_grad_enc = p_e.mm((w_e + b_e).t()).gt(0).double()
p_d_grad_hat = (delta_term.mm(w_tilde_e.t()) * w_d * relu_grad_dec).mm((w_d + b_d))
p_e_grad_hat = (w_tilde_d.t().mm(delta_term) * w_e * relu_grad_enc).mm((w_e + b_e))
print('Error between autograd computation and calculated:'+str((parameters[1].grad - p_d_grad_hat).abs().max()))
print('Error between autograd computation and calculated:'+str((parameters[0].grad - p_e_grad_hat).abs().max()))
# EOF
| 33.012346
| 112
| 0.726253
| 469
| 2,674
| 3.936034
| 0.26226
| 0.071506
| 0.078007
| 0.091008
| 0.375406
| 0.2974
| 0.282232
| 0.266522
| 0.222102
| 0.114843
| 0
| 0.011905
| 0.120419
| 2,674
| 80
| 113
| 33.425
| 0.772959
| 0.218773
| 0
| 0
| 0
| 0
| 0.058795
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.068182
| 0
| 0.068182
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b0ea10947bac276566d22b561a64d291c54aa39
| 3,195
|
py
|
Python
|
blog/forms.py
|
oversabiproject/ghostrr
|
0bf49537ddf0436d08d705b29bffbd49b66e7c65
|
[
"MIT"
] | null | null | null |
blog/forms.py
|
oversabiproject/ghostrr
|
0bf49537ddf0436d08d705b29bffbd49b66e7c65
|
[
"MIT"
] | null | null | null |
blog/forms.py
|
oversabiproject/ghostrr
|
0bf49537ddf0436d08d705b29bffbd49b66e7c65
|
[
"MIT"
] | null | null | null |
import string
from django import forms
from django.conf import settings
from django.shortcuts import get_object_or_404
from accounts.models import User, Profile
from .models import Blogs
from .utils import get_limit_for_level, write_to_limit
class EditLimitForm(forms.Form):
free_limit = forms.IntegerField(help_text='Enter the limit for the free users')
pro_limit = forms.IntegerField(help_text='Enter the limit for the pro users')
enterprise_limit = forms.IntegerField(help_text='Enter the limit for the enterprise users')
def save(self):
free_limit = self.cleaned_data.get("free_limit")
pro_limit = self.cleaned_data.get("pro_limit")
enterprise_limit = self.cleaned_data.get("enterprise_limit")
write_to_limit(free_limit, pro_limit, enterprise_limit)
return 'Saved'
class CreateBlogForm(forms.Form):
pk = forms.IntegerField()
title = forms.CharField(max_length=255, help_text='Enter a meaningful title of 5-15 words for the blog.')
sentence = forms.CharField(widget=forms.TextInput(), help_text='Enter the first two or more meaningful sentences to set the blog context, approximately 50 - 100 words expected.')
copy_text = forms.CharField(widget=forms.TextInput(), required=False)
copy_length = forms.IntegerField(help_text='Select the length of copy you want')
def clean_copy_length(self):
copy_length = int(self.data.get('copy_length'))
if copy_length not in [1,2]:
raise forms.ValidationError('Invalid length selected')
return copy_length
def clean_title(self):
title = self.data.get('title')
if len(title.split(' ')) < 5:
raise forms.ValidationError('Very few words have been entered for the title. Please enter at least 5 words')
if len(title.split(' ')) > 30:
raise forms.ValidationError('A lot of words have been entered for the title. Please enter less than 30 words only')
return title
def clean_sentence(self):
sentence = self.data.get('sentence')
sentence_split = sentence.split('.')
sentence_len = len(sentence_split)
# # Validate length
# if sentence_len < 10:
# raise forms.ValidationError('Input sentences are too few')
# Validate words length
word_len = 0
for i in sentence_split:
word_len += len(i.split(' '))
if word_len < 50:
raise forms.ValidationError('Very few words have been entered for the Blog description. Please enter at least 50 words')
if word_len > 200:
raise forms.ValidationError('A lot of words have been entered. Please enter less than 200 words')
# # Validate length extra
# word_avg = word_len / sentence_len
# if word_avg < 15:
# raise forms.ValidationError('Sentences entered are too short, Consider making the sentences more longer or meaningful.')
# # Reducing punctuation marks
# for i in string.punctuation:
# sentence = sentence.replace(i+i,i)
return sentence
def save(self, commit=True):
title = self.cleaned_data.get('title')
sentence = self.cleaned_data.get('sentence')
copy_text = self.cleaned_data.get('copy_text')
copy_length = self.cleaned_data.get('copy_length')
# Creating new blog
blog = Blogs(title=title, sentence=sentence, copy_length=copy_length, copy_text=copy_text)
return blog
| 34.728261
| 179
| 0.747418
| 470
| 3,195
| 4.938298
| 0.265957
| 0.030159
| 0.045239
| 0.054287
| 0.251616
| 0.173632
| 0.173632
| 0.173632
| 0.173632
| 0.15726
| 0
| 0.013739
| 0.157121
| 3,195
| 92
| 180
| 34.728261
| 0.848125
| 0.133959
| 0
| 0
| 0
| 0.018519
| 0.270811
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092593
| false
| 0
| 0.12963
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b0f367d08c895d53158d4654de98cbeabd4b541
| 1,032
|
py
|
Python
|
Class Work/Recursion & Search /app.py
|
Pondorasti/CS-1.2
|
c86efa40f8a09c1ca1ce0b937ca63a07108bfc6c
|
[
"MIT"
] | null | null | null |
Class Work/Recursion & Search /app.py
|
Pondorasti/CS-1.2
|
c86efa40f8a09c1ca1ce0b937ca63a07108bfc6c
|
[
"MIT"
] | null | null | null |
Class Work/Recursion & Search /app.py
|
Pondorasti/CS-1.2
|
c86efa40f8a09c1ca1ce0b937ca63a07108bfc6c
|
[
"MIT"
] | null | null | null |
a = [1, 2, 3, 5, 6]
def recursive_search(array, item_to_find, current_index=0):
if current_index == len(array):
return None
elif array[current_index] == item_to_find:
return current_index
else:
return recursive_search(array, item_to_find, current_index + 1)
# print(recursive_search(a, 3))
def binary_search(array, target):
start = 0
end = len(array) - 1
while (start <= end):
mid = (start + end) // 2
if array[mid] == target:
return mid
elif target < array[mid]:
end = mid - 1
else:
start = mid + 1
return None
a = [3,4,5,6,10,12,20]
print(binary_search(a, 5))
def recursive_fibonacci(index, current_index = 1, first = 0, second = 1):
if index == 0:
return 0
elif index == current_index:
return second
else:
return recursive_fibonacci(index, current_index = current_index + 1, first = second, second = first + second)
print(recursive_fibonacci(0))
| 21.957447
| 117
| 0.593992
| 140
| 1,032
| 4.214286
| 0.257143
| 0.183051
| 0.115254
| 0.081356
| 0.301695
| 0.142373
| 0.142373
| 0.142373
| 0
| 0
| 0
| 0.0427
| 0.296512
| 1,032
| 47
| 118
| 21.957447
| 0.769972
| 0.028101
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.366667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b119221fff46228bdcf97a9b0a6cdd84ac53dfa
| 6,623
|
py
|
Python
|
klusta/kwik/mock.py
|
hrnciar/klusta
|
408e898e8d5dd1788841d1f682e51d0dc003a296
|
[
"BSD-3-Clause"
] | 45
|
2016-03-19T14:39:40.000Z
|
2021-12-15T06:34:57.000Z
|
klusta/kwik/mock.py
|
hrnciar/klusta
|
408e898e8d5dd1788841d1f682e51d0dc003a296
|
[
"BSD-3-Clause"
] | 73
|
2016-03-19T16:15:45.000Z
|
2022-02-22T16:37:16.000Z
|
klusta/kwik/mock.py
|
hrnciar/klusta
|
408e898e8d5dd1788841d1f682e51d0dc003a296
|
[
"BSD-3-Clause"
] | 41
|
2016-04-08T14:04:00.000Z
|
2021-09-09T20:49:41.000Z
|
# -*- coding: utf-8 -*-
"""Mock Kwik files."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import os.path as op
import numpy as np
import numpy.random as nr
from .mea import staggered_positions
from .h5 import open_h5
from .model import _create_clustering
#------------------------------------------------------------------------------
# Mock functions
#------------------------------------------------------------------------------
def artificial_waveforms(n_spikes=None, n_samples=None, n_channels=None):
# TODO: more realistic waveforms.
return .25 * nr.normal(size=(n_spikes, n_samples, n_channels))
def artificial_features(*args):
return .25 * nr.normal(size=args)
def artificial_masks(n_spikes=None, n_channels=None):
masks = nr.uniform(size=(n_spikes, n_channels))
masks[masks < .25] = 0
return masks
def artificial_traces(n_samples, n_channels):
# TODO: more realistic traces.
return .25 * nr.normal(size=(n_samples, n_channels))
def artificial_spike_clusters(n_spikes, n_clusters, low=0):
return nr.randint(size=n_spikes, low=low, high=max(1, n_clusters))
def artificial_spike_samples(n_spikes, max_isi=50):
return np.cumsum(nr.randint(low=0, high=max_isi, size=n_spikes))
def artificial_correlograms(n_clusters, n_samples):
return nr.uniform(size=(n_clusters, n_clusters, n_samples))
def mock_prm(dat_path):
return dict(
prb_file='1x32_buzsaki',
traces=dict(
raw_data_files=[dat_path, dat_path],
voltage_gain=10.,
sample_rate=20000,
n_channels=32,
dtype='int16',
),
spikedetekt={
'n_features_per_channel': 4,
},
klustakwik2={},
)
#------------------------------------------------------------------------------
# Mock Kwik file
#------------------------------------------------------------------------------
def create_mock_kwik(dir_path, n_clusters=None, n_spikes=None,
n_channels=None, n_features_per_channel=None,
n_samples_traces=None,
with_kwx=True,
with_kwd=True,
add_original=True,
):
"""Create a test kwik file."""
filename = op.join(dir_path, '_test.kwik')
kwx_filename = op.join(dir_path, '_test.kwx')
kwd_filename = op.join(dir_path, '_test.raw.kwd')
# Create the kwik file.
with open_h5(filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
def _write_metadata(key, value):
f.write_attr('/application_data/spikedetekt', key, value)
_write_metadata('sample_rate', 20000.)
# Filter parameters.
_write_metadata('filter_low', 500.)
_write_metadata('filter_high_factor', 0.95 * .5)
_write_metadata('filter_butter_order', 3)
_write_metadata('extract_s_before', 15)
_write_metadata('extract_s_after', 25)
_write_metadata('n_features_per_channel', n_features_per_channel)
# Create spike times.
spike_samples = artificial_spike_samples(n_spikes).astype(np.int64)
spike_recordings = np.zeros(n_spikes, dtype=np.uint16)
# Size of the first recording.
recording_size = 2 * n_spikes // 3
if recording_size > 0:
# Find the recording offset.
recording_offset = spike_samples[recording_size]
recording_offset += spike_samples[recording_size + 1]
recording_offset //= 2
spike_recordings[recording_size:] = 1
# Make sure the spike samples of the second recording start over.
spike_samples[recording_size:] -= spike_samples[recording_size]
spike_samples[recording_size:] += 10
else:
recording_offset = 1
if spike_samples.max() >= n_samples_traces:
raise ValueError("There are too many spikes: decrease 'n_spikes'.")
f.write('/channel_groups/1/spikes/time_samples', spike_samples)
f.write('/channel_groups/1/spikes/recording', spike_recordings)
f.write_attr('/channel_groups/1', 'channel_order',
np.arange(1, n_channels - 1)[::-1])
graph = np.array([[1, 2], [2, 3]])
f.write_attr('/channel_groups/1', 'adjacency_graph', graph)
# Create channels.
positions = staggered_positions(n_channels)
for channel in range(n_channels):
group = '/channel_groups/1/channels/{0:d}'.format(channel)
f.write_attr(group, 'name', str(channel))
f.write_attr(group, 'position', positions[channel])
# Create spike clusters.
clusterings = [('main', n_clusters)]
if add_original:
clusterings += [('original', n_clusters * 2)]
for clustering, n_clusters_rec in clusterings:
spike_clusters = artificial_spike_clusters(n_spikes,
n_clusters_rec)
groups = {0: 0, 1: 1, 2: 2}
_create_clustering(f, clustering, 1, spike_clusters, groups)
# Create recordings.
f.write_attr('/recordings/0', 'name', 'recording_0')
f.write_attr('/recordings/1', 'name', 'recording_1')
f.write_attr('/recordings/0/raw', 'hdf5_path', kwd_filename)
f.write_attr('/recordings/1/raw', 'hdf5_path', kwd_filename)
# Create the kwx file.
if with_kwx:
with open_h5(kwx_filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
features = artificial_features(n_spikes,
(n_channels - 2) *
n_features_per_channel)
masks = artificial_masks(n_spikes,
(n_channels - 2) *
n_features_per_channel)
fm = np.dstack((features, masks)).astype(np.float32)
f.write('/channel_groups/1/features_masks', fm)
# Create the raw kwd file.
if with_kwd:
with open_h5(kwd_filename, 'w') as f:
f.write_attr('/', 'kwik_version', 2)
traces = artificial_traces(n_samples_traces, n_channels)
# TODO: int16 traces
f.write('/recordings/0/data',
traces[:recording_offset, ...].astype(np.float32))
f.write('/recordings/1/data',
traces[recording_offset:, ...].astype(np.float32))
return filename
| 36.191257
| 79
| 0.562434
| 757
| 6,623
| 4.638045
| 0.216645
| 0.029052
| 0.034178
| 0.032469
| 0.311023
| 0.22814
| 0.115921
| 0.07092
| 0.049559
| 0.029052
| 0
| 0.022709
| 0.261966
| 6,623
| 182
| 80
| 36.39011
| 0.695581
| 0.141326
| 0
| 0.06087
| 0
| 0
| 0.117637
| 0.036795
| 0
| 0
| 0
| 0.005495
| 0
| 1
| 0.086957
| false
| 0
| 0.052174
| 0.06087
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b11b55cfbda19b56fe51d5da114dd0268d96bc2
| 1,824
|
py
|
Python
|
telluride_decoding/preprocess_audio.py
|
RULCSoft/telluride_decoding
|
ff2a5b421a499370b379e7f4fc3f28033c045e17
|
[
"Apache-2.0"
] | 8
|
2019-07-03T15:33:52.000Z
|
2021-10-21T00:56:43.000Z
|
telluride_decoding/preprocess_audio.py
|
RULCSoft/telluride_decoding
|
ff2a5b421a499370b379e7f4fc3f28033c045e17
|
[
"Apache-2.0"
] | 3
|
2020-09-02T19:04:36.000Z
|
2022-03-12T19:46:50.000Z
|
telluride_decoding/preprocess_audio.py
|
RULCSoft/telluride_decoding
|
ff2a5b421a499370b379e7f4fc3f28033c045e17
|
[
"Apache-2.0"
] | 7
|
2019-07-03T15:50:24.000Z
|
2020-11-26T12:16:10.000Z
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Code to compute the audio intensity for preprocessing.
Code that stores incoming arbitrary audio data, and then yields fixed window
sizes for processing (like computing the intensity.)
After initializing the object, add a block of data to the object, and then
pull fixed sized blocks of data with a given half_window_width, and separated
by window_step samples. Data is always X x num_features, where X can change from
add_data call to call, but num_features must not change. Do not reuse the object
because it has internal state from previous calls.
"""
import numpy as np
from telluride_decoding import result_store
class AudioIntensityStore(result_store.WindowedDataStore):
"""Process a window of data, calculating the mean-squared value.
"""
def next_window(self):
for win in super(AudioIntensityStore, self).next_window():
yield np.mean(np.square(win))
class AudioLoudnessMick(result_store.WindowedDataStore):
"""Process a window of data, using Mick's loudness approximation.
"""
def next_window(self):
for audio_data in super(AudioLoudnessMick, self).next_window():
yield np.mean(np.abs(audio_data) ** np.log10(2))
| 36.48
| 80
| 0.733553
| 266
| 1,824
| 4.969925
| 0.545113
| 0.045386
| 0.019667
| 0.024206
| 0.143722
| 0.113464
| 0.113464
| 0.072617
| 0
| 0
| 0
| 0.007157
| 0.157346
| 1,824
| 49
| 81
| 37.22449
| 0.85296
| 0.719298
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b122420662104df8bedddda57c416404fd43cea
| 3,355
|
py
|
Python
|
aioouimeaux/device/__init__.py
|
frawau/aioouimeaux
|
ea473ded95e41e350793b0e289944a359049c501
|
[
"BSD-3-Clause"
] | 2
|
2019-01-26T02:44:14.000Z
|
2019-08-06T00:40:56.000Z
|
aioouimeaux/device/__init__.py
|
frawau/aioouimeaux
|
ea473ded95e41e350793b0e289944a359049c501
|
[
"BSD-3-Clause"
] | 1
|
2019-05-23T22:35:27.000Z
|
2019-05-25T20:23:50.000Z
|
aioouimeaux/device/__init__.py
|
frawau/aioouimeaux
|
ea473ded95e41e350793b0e289944a359049c501
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from urllib.parse import urlsplit
import asyncio as aio
from functools import partial
from .api.service import Service
from .api.xsd import device as deviceParser
from ..utils import requests_get
log = logging.getLogger(__name__)
class DeviceUnreachable(Exception): pass
class UnknownService(Exception): pass
class UnknownSignal(Exception): pass
class NotACallable(Exception): pass
class Device(object):
def __init__(self, url):
self._state = None
self.host = urlsplit(url).hostname
#self.port = urlsplit(url).port
self.services = {}
self.initialized = aio.Future()
self._callback = {"statechange":None}
xx = aio.ensure_future(self._get_xml(url))
async def _get_xml(self,url):
base_url = url.rsplit('/', 1)[0]
xml = await requests_get(url)
self._config = deviceParser.parseString(xml.raw_body).device
sl = self._config.serviceList
for svc in sl.service:
svcname = svc.get_serviceType().split(':')[-2]
service = Service(svc, base_url)
await service.initialized
service.eventSubURL = base_url + svc.get_eventSubURL()
self.services[svcname] = service
setattr(self, svcname, service)
fut = self.basicevent.GetBinaryState()
await fut
self._state = fut.result()["BinaryState"]
self.initialized.set_result(True)
def register_callback(self,signal,func):
if func is not None:
if signal not in self._callback:
raise UnknownSignal
if not callable(func):
raise NotACallable
self._callback[signal]=func
def _update_state(self, value):
self._state = int(value)
if self._callback["statechange"]:
if aio.iscoroutinefunction(self._callback["statechange"]):
aio.ensure_future(self._callback["statechange"](self))
else:
self._callback["statechange"](self)
def get_state(self, force_update=False):
"""
Returns 0 if off and 1 if on.
"""
if force_update or self._state is None:
xx = self.basicevent.GetBinaryState()
return self._state
def get_service(self, name):
try:
return self.services[name]
except KeyError:
raise UnknownService(name)
def list_services(self):
return self.services.keys()
def ping(self):
try:
self.get_state()
except Exception:
raise DeviceUnreachable(self)
def explain(self,prefix=""):
for name, svc in self.services.items():
print("{}{}".format(prefix, name))
print(prefix+'-' * len(name))
for aname, action in svc.actions.items():
print("%s %s(%s)" % (prefix,aname, ', '.join(action.args)))
print()
@property
def model(self):
return self._config.modelDescription
@property
def name(self):
return self._config.friendlyName
@property
def serialnumber(self):
return self._config.serialNumber
def test():
device = Device("http://10.42.1.102:49152/setup.xml")
print(device.get_service('basicevent').SetBinaryState(BinaryState=1))
if __name__ == "__main__":
test()
| 28.432203
| 76
| 0.614903
| 378
| 3,355
| 5.304233
| 0.338624
| 0.041895
| 0.057357
| 0.029925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007825
| 0.276304
| 3,355
| 117
| 77
| 28.675214
| 0.817957
| 0.017884
| 0
| 0.057471
| 0
| 0
| 0.041896
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0.045977
| 0.08046
| 0.045977
| 0.344828
| 0.057471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b1232a2760be1096b010b97407d362bad15d50f
| 2,012
|
py
|
Python
|
src/lib/localtime.py
|
RonaldHiemstra/BronartsmeiH
|
1ad3838b43abfe9a1f3416334439c8056aa50dde
|
[
"MIT"
] | null | null | null |
src/lib/localtime.py
|
RonaldHiemstra/BronartsmeiH
|
1ad3838b43abfe9a1f3416334439c8056aa50dde
|
[
"MIT"
] | 3
|
2021-03-17T16:05:01.000Z
|
2021-05-01T18:47:43.000Z
|
src/lib/localtime.py
|
RonaldHiemstra/BronartsmeiH
|
1ad3838b43abfe9a1f3416334439c8056aa50dde
|
[
"MIT"
] | null | null | null |
"""File providing localtime support."""
import time
import network
import ntptime
from machine import RTC, reset
from config import Config
system_config = Config('system_config.json')
class Localtime():
"""Synchronized realtime clock using NTP."""
def __init__(self, utc_offset=None):
self.utc_offset = utc_offset or system_config.get('utc_offset')
self.__synced = None
self._sync()
def _sync(self):
try:
ntptime.settime() # Synchronize the system time using NTP
except Exception as ex:
print('ERROR: ntp.settime() failed. err:', ex)
if network.WLAN().isconnected():
reset()
# year, month, day, day_of_week, hour, minute, second, millisecond
datetime_ymd_w_hms_m = list(RTC().datetime())
datetime_ymd_w_hms_m[4] += self.utc_offset
RTC().init(datetime_ymd_w_hms_m)
self.__synced = datetime_ymd_w_hms_m[2]
del datetime_ymd_w_hms_m
def now(self):
"""Retrieve a snapshot of the current time in milliseconds accurate."""
class Now():
"""Class representing a snapshot of the current time."""
def __init__(self):
(self.year, self.mon, self.day, self.dow,
self.hour, self.min, self.sec, self.msec) = RTC().datetime()
self._time = None
def get_time(self) -> float:
"""Convert this time snapshot to a time float value."""
if self._time is None:
self._time = time.mktime([self.year, self.mon, self.day,
self.hour, self.min, self.sec, 0, 0])
# self._time += self.msec / 1000 # float overflow when adding msec :(
return self._time
snapshot = Now()
if snapshot.day != self.__synced and snapshot.hour == 4: # sync every day @ 4am
self._sync()
snapshot = Now()
return snapshot
| 39.45098
| 92
| 0.578032
| 248
| 2,012
| 4.475806
| 0.379032
| 0.040541
| 0.054054
| 0.067568
| 0.2
| 0.127928
| 0.046847
| 0
| 0
| 0
| 0
| 0.007278
| 0.317097
| 2,012
| 50
| 93
| 40.24
| 0.800582
| 0.215706
| 0
| 0.102564
| 0
| 0
| 0.039406
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0
| 0.128205
| 0
| 0.358974
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b126b83c2c4f4a5775d0727f5ece4feb0b27a5c
| 448
|
py
|
Python
|
accounts/api/urls.py
|
tejaswari7/JagratiWebApp
|
e9030f8bd6319a7bb43e036bb7bc43cca01d64a1
|
[
"MIT"
] | 59
|
2019-12-05T13:23:14.000Z
|
2021-12-07T13:54:25.000Z
|
accounts/api/urls.py
|
tejaswari7/JagratiWebApp
|
e9030f8bd6319a7bb43e036bb7bc43cca01d64a1
|
[
"MIT"
] | 266
|
2020-09-22T16:22:56.000Z
|
2021-10-17T18:13:11.000Z
|
accounts/api/urls.py
|
tejaswari7/JagratiWebApp
|
e9030f8bd6319a7bb43e036bb7bc43cca01d64a1
|
[
"MIT"
] | 213
|
2020-05-20T18:17:21.000Z
|
2022-03-06T11:03:42.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('register/', views.registration_view, name='api_register'),
path('login/', views.LoginView.as_view(), name='api_login'),
path('complete_profile/', views.complete_profile_view, name='api_complete_profile'),
path('logout/', views.LogoutView.as_view(), name='api_logout'),
path('check_login_status/', views.check_login_status, name='api_check_login_status'),
]
| 44.8
| 89
| 0.736607
| 59
| 448
| 5.288136
| 0.355932
| 0.112179
| 0.141026
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104911
| 448
| 10
| 90
| 44.8
| 0.778055
| 0
| 0
| 0
| 0
| 0
| 0.291759
| 0.048998
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b1bd86935affb209f3416a74dae1cedee23495f
| 1,733
|
py
|
Python
|
SimpleBeep.py
|
RalphBacon/219-Raspberry-Pi-PICO-Sound-Generation
|
1c7a5cbfb5373aa5eccde00638bbdff062c57a2d
|
[
"MIT"
] | 2
|
2021-07-15T14:11:29.000Z
|
2022-03-25T23:20:54.000Z
|
SimpleBeep.py
|
RalphBacon/219-Raspberry-Pi-PICO-Sound-Generation
|
1c7a5cbfb5373aa5eccde00638bbdff062c57a2d
|
[
"MIT"
] | null | null | null |
SimpleBeep.py
|
RalphBacon/219-Raspberry-Pi-PICO-Sound-Generation
|
1c7a5cbfb5373aa5eccde00638bbdff062c57a2d
|
[
"MIT"
] | 1
|
2021-07-15T14:11:48.000Z
|
2021-07-15T14:11:48.000Z
|
# Import the required 'libraries' for pin definitions and PWM
from machine import Pin, PWM
# Also import a subset for sleep and millisecond sleep. If you just import
# the utime you will have to prefix each call with "utime."
from utime import sleep, sleep_ms
# Define what the buzzer object is - a PWM output on pin 15
buzzer = PWM(Pin(15))
# A list of frequencies
tones = (200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850, 900, 950, 1000, 1100, 1200, 1400, 1500)
# Define the function to play a single tone then stop
def buzz(freq):
# Set the frequence
buzzer.freq(freq)
# Set the duty cycle (affects volume)
buzzer.duty_u16(15000);
# Let the sound continue for X milliseconds
sleep_ms(30);
# Now switch the sound off
buzzer.duty_u16(0);
# And delay a small amount (gap between tones)
sleep_ms(20);
# Define a similar functionm with no delay between tones
def buzz2(freq):
buzzer.freq(freq)
buzzer.duty_u16(15000);
# Now sound the tones, one after the other
for tone in range(len(tones)):
buzz(tones[tone])
# Small gap in SECONDS after the ascending tones
sleep(1)
# Don't do this, it puts the device to Seep Sleep but it reboots on wakeup just
# like the ESP8266
#machine.deepsleep(1)
# Now sound the tones IN REVERSE ORDER ie descending
for tone in range(len(tones) -1, -1, -1):
buzz(tones[tone])
# Another delay
sleep(1)
# Now sound ALL the frequencies from X to Y
for tone in range(500, 2500):
buzz2(tone)
sleep_ms(5)
buzzer.duty_u16(0);
# And repeat in reverse order
for tone in range(2500, 500, -1):
buzz2(tone)
sleep_ms(4)
buzzer.duty_u16(0);
| 28.883333
| 119
| 0.671091
| 284
| 1,733
| 4.059859
| 0.464789
| 0.030356
| 0.056375
| 0.048569
| 0.06765
| 0.038161
| 0
| 0
| 0
| 0
| 0
| 0.098174
| 0.241777
| 1,733
| 60
| 120
| 28.883333
| 0.7793
| 0.508367
| 0
| 0.481481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.074074
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b1c20b6056395f07046b2fb8132dfe7ff823554
| 1,789
|
py
|
Python
|
vendor/packages/sqlalchemy/test/orm/test_bind.py
|
jgmize/kitsune
|
8f23727a9c7fcdd05afc86886f0134fb08d9a2f0
|
[
"BSD-3-Clause"
] | 2
|
2016-05-09T09:17:35.000Z
|
2016-08-03T16:30:16.000Z
|
test/orm/test_bind.py
|
clones/sqlalchemy
|
c9f08aa78a48ba53dd221d3c5de54e5956ecf806
|
[
"MIT"
] | null | null | null |
test/orm/test_bind.py
|
clones/sqlalchemy
|
c9f08aa78a48ba53dd221d3c5de54e5956ecf806
|
[
"MIT"
] | null | null | null |
from sqlalchemy.test.testing import assert_raises, assert_raises_message
from sqlalchemy import MetaData, Integer
from sqlalchemy.test.schema import Table
from sqlalchemy.test.schema import Column
from sqlalchemy.orm import mapper, create_session
import sqlalchemy as sa
from sqlalchemy.test import testing
from test.orm import _base
class BindTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('test_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', Integer))
@classmethod
def setup_classes(cls):
class Foo(_base.BasicEntity):
pass
@classmethod
@testing.resolve_artifact_names
def setup_mappers(cls):
meta = MetaData()
test_table.tometadata(meta)
assert meta.tables['test_table'].bind is None
mapper(Foo, meta.tables['test_table'])
@testing.resolve_artifact_names
def test_session_bind(self):
engine = self.metadata.bind
for bind in (engine, engine.connect()):
try:
sess = create_session(bind=bind)
assert sess.bind is bind
f = Foo()
sess.add(f)
sess.flush()
assert sess.query(Foo).get(f.id) is f
finally:
if hasattr(bind, 'close'):
bind.close()
@testing.resolve_artifact_names
def test_session_unbound(self):
sess = create_session()
sess.add(Foo())
assert_raises_message(
sa.exc.UnboundExecutionError,
('Could not locate a bind configured on Mapper|Foo|test_table '
'or this Session'),
sess.flush)
| 29.816667
| 75
| 0.618222
| 204
| 1,789
| 5.264706
| 0.372549
| 0.078212
| 0.067039
| 0.075419
| 0.160149
| 0.07635
| 0.07635
| 0
| 0
| 0
| 0
| 0
| 0.297373
| 1,789
| 59
| 76
| 30.322034
| 0.854415
| 0
| 0
| 0.122449
| 0
| 0
| 0.064913
| 0.011752
| 0
| 0
| 0
| 0
| 0.102041
| 1
| 0.102041
| false
| 0.020408
| 0.163265
| 0
| 0.306122
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b1ea81c58845b4a3bb52fdf9a88f5aa5548c833
| 3,316
|
py
|
Python
|
Chapter08/ppo/ppo_kb.py
|
rwill128/TensorFlow-Reinforcement-Learning-Quick-Start-Guide
|
45ec2bd23a49ed72ce75f8c8d440ce7840c8ffce
|
[
"MIT"
] | 40
|
2019-05-19T01:29:12.000Z
|
2022-03-27T04:37:31.000Z
|
Chapter08/ppo/ppo_kb.py
|
rwill128/TensorFlow-Reinforcement-Learning-Quick-Start-Guide
|
45ec2bd23a49ed72ce75f8c8d440ce7840c8ffce
|
[
"MIT"
] | null | null | null |
Chapter08/ppo/ppo_kb.py
|
rwill128/TensorFlow-Reinforcement-Learning-Quick-Start-Guide
|
45ec2bd23a49ed72ce75f8c8d440ce7840c8ffce
|
[
"MIT"
] | 19
|
2019-05-02T19:55:57.000Z
|
2022-02-26T01:51:45.000Z
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import gym
from class_ppo import *
from gym_torcs import TorcsEnv
#----------------------------------------------------------------------------------------
EP_MAX = 2000
EP_LEN = 1000
GAMMA = 0.95
A_LR = 1e-4
C_LR = 1e-4
BATCH = 128
A_UPDATE_STEPS = 10
C_UPDATE_STEPS = 10
S_DIM, A_DIM = 29, 3
METHOD = dict(name='clip', epsilon=0.1)
# train_test = 0 for train; =1 for test
train_test = 0
# irestart = 0 for fresh restart; =1 for restart from ckpt file
irestart = 0
iter_num = 0
if (irestart == 0):
iter_num = 0
#----------------------------------------------------------------------------------------
sess = tf.Session()
ppo = PPO(sess, S_DIM, A_DIM, A_LR, C_LR, A_UPDATE_STEPS, C_UPDATE_STEPS, METHOD)
saver = tf.train.Saver()
env = TorcsEnv(vision=False, throttle=True, gear_change=False)
#----------------------------------------------------------------------------------------
if (train_test == 0 and irestart == 0):
sess.run(tf.global_variables_initializer())
else:
saver.restore(sess, "ckpt/model")
for ep in range(iter_num, EP_MAX):
print("-"*50)
print("episode: ", ep)
if np.mod(ep, 100) == 0:
ob = env.reset(relaunch=True) #relaunch TORCS every N episode because of the memory leak error
else:
ob = env.reset()
s = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.wheelSpinVel/100.0, ob.rpm))
buffer_s, buffer_a, buffer_r = [], [], []
ep_r = 0
for t in range(EP_LEN): # in one episode
a = ppo.choose_action(s)
a[0] = np.clip(a[0],-1.0,1.0)
a[1] = np.clip(a[1],0.0,1.0)
a[2] = np.clip(a[2],0.0,1.0)
#print("a: ", a)
ob, r, done, _ = env.step(a)
s_ = np.hstack((ob.angle, ob.track, ob.trackPos, ob.speedX, ob.speedY, ob.speedZ, ob.wheelSpinVel/100.0, ob.rpm))
if (train_test == 0):
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
s = s_
ep_r += r
if (train_test == 0):
# update ppo
if (t+1) % BATCH == 0 or t == EP_LEN-1 or done == True:
#if t == EP_LEN-1 or done == True:
v_s_ = ppo.get_v(s_)
discounted_r = []
for r in buffer_r[::-1]:
v_s_ = r + GAMMA * v_s_
discounted_r.append(v_s_)
discounted_r.reverse()
bs = np.array(np.vstack(buffer_s))
ba = np.array(np.vstack(buffer_a))
br = np.array(discounted_r)[:, np.newaxis]
buffer_s, buffer_a, buffer_r = [], [], []
print("ppo update")
ppo.update(bs, ba, br)
#print("screen out: ")
#ppo.screen_out(bs, ba, br)
#print("-"*50)
if (done == True):
break
print('Ep: %i' % ep,"|Ep_r: %i" % ep_r,("|Lam: %.4f" % METHOD['lam']) if METHOD['name'] == 'kl_pen' else '',)
if (train_test == 0):
with open("performance.txt", "a") as myfile:
myfile.write(str(ep) + " " + str(t) + " " + str(round(ep_r,4)) + "\n")
if (train_test == 0 and ep%25 == 0):
saver.save(sess, "ckpt/model")
| 25.507692
| 123
| 0.495778
| 474
| 3,316
| 3.312236
| 0.291139
| 0.040127
| 0.044586
| 0.038217
| 0.217834
| 0.150318
| 0.123567
| 0.101911
| 0.101911
| 0.101911
| 0
| 0.036632
| 0.283776
| 3,316
| 129
| 124
| 25.705426
| 0.624421
| 0.16918
| 0
| 0.121622
| 0
| 0
| 0.037213
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.081081
| 0
| 0.081081
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b22737cee51dac49b519ede06b216b061a09833
| 1,628
|
py
|
Python
|
py/garage/tests/threads/test_executors.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | 3
|
2016-01-04T06:28:52.000Z
|
2020-09-20T13:18:40.000Z
|
py/garage/tests/threads/test_executors.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | null | null | null |
py/garage/tests/threads/test_executors.py
|
clchiou/garage
|
446ff34f86cdbd114b09b643da44988cf5d027a3
|
[
"MIT"
] | null | null | null |
import unittest
import threading
from garage.threads import executors
class ExecutorTest(unittest.TestCase):
def test_executor(self):
pool = executors.WorkerPool()
self.assertEqual(0, len(pool))
# No jobs, no workers are hired.
with executors.Executor(pool, 1) as executor:
self.assertEqual(0, len(pool))
self.assertEqual(0, len(pool))
with executors.Executor(pool, 1) as executor:
f1 = executor.submit(sum, (1, 2, 3))
f2 = executor.submit(sum, (4, 5, 6))
self.assertEqual(0, len(pool))
self.assertEqual(6, f1.result())
self.assertEqual(15, f2.result())
self.assertEqual(1, len(pool))
for worker in pool:
self.assertFalse(worker._get_future().done())
def test_shutdown(self):
pool = executors.WorkerPool()
self.assertEqual(0, len(pool))
with executors.Executor(pool, 1) as executor:
f1 = executor.submit(sum, (1, 2, 3))
f2 = executor.submit(sum, (4, 5, 6))
self.assertEqual(0, len(pool))
self.assertEqual(6, f1.result())
self.assertEqual(15, f2.result())
executor.shutdown(wait=False)
# shutdown(wait=False) does not return workers to the pool.
self.assertEqual(0, len(pool))
event = threading.Event()
with executors.Executor(pool, 1) as executor:
executor.submit(event.wait)
executor.shutdown(wait=False)
self.assertFalse(executor._work_queue)
if __name__ == '__main__':
unittest.main()
| 29.071429
| 67
| 0.595823
| 194
| 1,628
| 4.927835
| 0.304124
| 0.188285
| 0.117155
| 0.139121
| 0.574268
| 0.574268
| 0.546025
| 0.446653
| 0.446653
| 0.366109
| 0
| 0.032562
| 0.28317
| 1,628
| 55
| 68
| 29.6
| 0.786632
| 0.054054
| 0
| 0.621622
| 0
| 0
| 0.005205
| 0
| 0
| 0
| 0
| 0
| 0.378378
| 1
| 0.054054
| false
| 0
| 0.081081
| 0
| 0.162162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9b227c99cc76d04bed95afc7abf3ffae257b32fd
| 2,619
|
py
|
Python
|
exporter/BattleRoyal.py
|
dl-stuff/dl-datamine
|
aae37710d2525aaa2b83f809e908be67f074c2d2
|
[
"MIT"
] | 3
|
2020-04-29T12:35:33.000Z
|
2022-03-22T20:08:22.000Z
|
exporter/BattleRoyal.py
|
dl-stuff/dl-datamine
|
aae37710d2525aaa2b83f809e908be67f074c2d2
|
[
"MIT"
] | 1
|
2020-10-23T00:08:35.000Z
|
2020-10-29T04:10:35.000Z
|
exporter/BattleRoyal.py
|
dl-stuff/dl-datamine
|
aae37710d2525aaa2b83f809e908be67f074c2d2
|
[
"MIT"
] | 4
|
2020-04-05T15:09:08.000Z
|
2020-10-21T15:08:34.000Z
|
import os
import json
from tqdm import tqdm
from loader.Database import DBViewIndex, DBView, check_target_path
from exporter.Shared import snakey
from exporter.Adventurers import CharaData
from exporter.Dragons import DragonData
class BattleRoyalCharaSkin(DBView):
def __init__(self, index):
super().__init__(index, "BattleRoyalCharaSkin")
def process_result(self, res, **kwargs):
self.link(res, "_BaseCharaId", "CharaData", full_query=False)
self.index["CharaData"].set_animation_reference(res["_BaseCharaId"])
self.link(res, "_SpecialSkillId", "SkillData", **kwargs)
self.index["ActionParts"].animation_reference
filtered_res = {}
filtered_res["_Id"] = res["_Id"]
for name_key in ("_Name", "_NameJP", "_NameCN"):
filtered_res[name_key] = res["_BaseCharaId"][name_key]
filtered_res["_SpecialSkillId"] = res["_SpecialSkillId"]
return filtered_res
def export_all_to_folder(self, out_dir="./out", ext=".json"):
where = "_SpecialSkillId != 0"
out_dir = os.path.join(out_dir, "_br")
all_res = self.get_all(where=where)
check_target_path(out_dir)
sorted_res = {}
for res in tqdm(all_res, desc="_br"):
res = self.process_result(res)
sorted_res[res["_Id"]] = res
out_name = snakey(f"_chara_skin.json")
output = os.path.join(out_dir, out_name)
with open(output, "w", newline="", encoding="utf-8") as fp:
json.dump(sorted_res, fp, indent=2, ensure_ascii=False, default=str)
class BattleRoyalUnit(DBView):
def __init__(self, index):
super().__init__(index, "BattleRoyalUnit")
@staticmethod
def outfile_name(res, ext=".json"):
c_res = res["_BaseCharaDataId"]
name = "UNKNOWN" if "_Name" not in c_res else c_res["_Name"] if "_SecondName" not in c_res else c_res["_SecondName"]
return f'{res["_Id"]}_{name}{ext}'
def process_result(self, res, **kwargs):
self.link(res, "_BaseCharaDataId", "CharaData", condense=False)
# self.link(res, "_DragonDataId", "DragonData", **kwargs)
self.link(res, "_SkillId", "SkillData", **kwargs)
for ab in range(1, 11):
self.link(res, f"_ItemAbility{ab:02}", "AbilityData", **kwargs)
return res
def export_all_to_folder(self, out_dir="./out", ext=".json"):
out_dir = os.path.join(out_dir, "_br")
super().export_all_to_folder(out_dir, ext)
if __name__ == "__main__":
index = DBViewIndex()
view = BattleRoyalUnit(index)
view.export_all_to_folder()
| 37.956522
| 124
| 0.649866
| 329
| 2,619
| 4.844985
| 0.322188
| 0.033877
| 0.041405
| 0.04266
| 0.21079
| 0.200753
| 0.200753
| 0.179423
| 0.104141
| 0.053952
| 0
| 0.003876
| 0.211913
| 2,619
| 68
| 125
| 38.514706
| 0.768411
| 0.021
| 0
| 0.145455
| 0
| 0
| 0.162763
| 0.009368
| 0
| 0
| 0
| 0
| 0
| 1
| 0.127273
| false
| 0
| 0.127273
| 0
| 0.345455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1955c751f92a084391167fe5becfed42fd578e2
| 772
|
py
|
Python
|
test/test_slope_heuristic.py
|
StatisKit/Core
|
79d8ec07c203eb7973a6cf482852ddb2e8e1e93e
|
[
"Apache-2.0"
] | null | null | null |
test/test_slope_heuristic.py
|
StatisKit/Core
|
79d8ec07c203eb7973a6cf482852ddb2e8e1e93e
|
[
"Apache-2.0"
] | 7
|
2018-03-20T14:23:16.000Z
|
2019-04-09T11:57:57.000Z
|
test/test_slope_heuristic.py
|
StatisKit/Core
|
79d8ec07c203eb7973a6cf482852ddb2e8e1e93e
|
[
"Apache-2.0"
] | 7
|
2017-04-28T07:41:01.000Z
|
2021-03-15T18:17:20.000Z
|
import matplotlib
matplotlib.use('Agg')
from statiskit import core
from statiskit.data import core as data
import unittest
from nose.plugins.attrib import attr
@attr(linux=True,
osx=True,
win=True,
level=1)
class TestSlopeHeuristic(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Test multivariate data construction"""
cls._data = data.load('capushe')
@attr(win=False)
def test_slope_heuristic(self):
"""Test slope heuristic"""
sh = core.SlopeHeuristic([pen.value for pen in self._data.pen.events], [-contrast.value for contrast in self._data.contrast.events])
sh.plot()
@classmethod
def tearDownClass(cls):
"""Test multivariate data deletion"""
del cls._data
| 25.733333
| 140
| 0.676166
| 95
| 772
| 5.431579
| 0.505263
| 0.050388
| 0.073643
| 0.089147
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00165
| 0.215026
| 772
| 30
| 141
| 25.733333
| 0.849835
| 0.11399
| 0
| 0.095238
| 0
| 0
| 0.014948
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.238095
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1966b5ea95fad48b2c50f6ae0e84a62362e0d49
| 688
|
py
|
Python
|
holteandtalley/test/matToJson.py
|
garrettdreyfus/HolteAndTalleyMLDPy
|
baab854ef955664437f04fdc7de100dcc894bbda
|
[
"MIT"
] | 18
|
2019-03-07T06:25:58.000Z
|
2022-03-07T04:38:36.000Z
|
holteandtalley/test/matToJson.py
|
garrettdreyfus/HolteAndTalleyMLDPy
|
baab854ef955664437f04fdc7de100dcc894bbda
|
[
"MIT"
] | null | null | null |
holteandtalley/test/matToJson.py
|
garrettdreyfus/HolteAndTalleyMLDPy
|
baab854ef955664437f04fdc7de100dcc894bbda
|
[
"MIT"
] | 3
|
2020-06-21T23:22:19.000Z
|
2022-03-07T05:11:14.000Z
|
from scipy.io import loadmat
import pickle
mldinfo =loadmat('mldinfo.mat')["mldinfo"]
out={}
print(mldinfo)
for i in mldinfo:
line={}
line["floatNumber"] = i[0]
line["cycleNumber"] = i[26]
line["tempMLTFIT"] = i[27]
line["tempMLTFITIndex"] = i[28]
line["densityMLTFIT"] = i[30]
line["salinityMLTFIT"] = i[31]
line["steepest"] = i[29]
line["tempAlgo"] = i[4]
line["salinityAlgo"] = i[8]
line["densityAlgo"] = i[9]
line["tempThreshold"] = i[13]
line["densityThreshold"] = i[17]
line["tempGradient"] = i[21]
line["densityGradient"] = i[22]
out[i[0],i[26]]=line
with open("matOutput.pickle","wb") as f:
pickle.dump(out,f)
| 25.481481
| 42
| 0.604651
| 93
| 688
| 4.473118
| 0.526882
| 0.009615
| 0.033654
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048649
| 0.193314
| 688
| 26
| 43
| 26.461538
| 0.700901
| 0
| 0
| 0
| 0
| 0
| 0.298399
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f19839bccee38959af0b437965974c79d3cf702f
| 1,578
|
py
|
Python
|
natlas-server/natlas-db.py
|
purplesecops/natlas
|
74edd7ba9b5c265ec06dfdb3f7ee0b38751e5ef8
|
[
"Apache-2.0"
] | 500
|
2018-09-27T17:28:11.000Z
|
2022-03-30T02:05:57.000Z
|
natlas-server/natlas-db.py
|
purplesecops/natlas
|
74edd7ba9b5c265ec06dfdb3f7ee0b38751e5ef8
|
[
"Apache-2.0"
] | 888
|
2018-09-20T05:04:46.000Z
|
2022-03-28T04:11:22.000Z
|
natlas-server/natlas-db.py
|
purplesecops/natlas
|
74edd7ba9b5c265ec06dfdb3f7ee0b38751e5ef8
|
[
"Apache-2.0"
] | 79
|
2019-02-13T19:49:21.000Z
|
2022-02-27T16:39:04.000Z
|
#!/usr/bin/env python
"""
This is a special app instance that allows us to perform database operations
without going through the app's migration_needed check. Running this script
is functionally equivalent to what `flask db` normally does. The reason we
can't continue to use that is that command is that it invokes the app instance from
FLASK_APP env variable (natlas-server.py) which performs the migration check and exits
during initialization.
"""
import argparse
from app import create_app
from config import Config
from migrations import migrator
parser_desc = """Perform database operations for Natlas.\
It is best practice to take a backup of your database before you upgrade or downgrade, just in case something goes wrong.\
"""
def main():
parser = argparse.ArgumentParser(description=parser_desc)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"--upgrade",
action="store_true",
help="Perform a database upgrade, if necessary",
)
group.add_argument(
"--downgrade",
action="store_true",
help="Revert the most recent database upgrade. Danger: This will destroy data as necessary to revert to the previous version.",
)
args = parser.parse_args()
config = Config()
app = create_app(config, migrating=True)
if args.upgrade:
app.config.update({"DB_AUTO_UPGRADE": True})
migrator.handle_db_upgrade(app)
elif args.downgrade:
migrator.handle_db_downgrade(app)
if __name__ == "__main__":
main()
| 33.574468
| 135
| 0.716096
| 214
| 1,578
| 5.149533
| 0.53271
| 0.019964
| 0.045372
| 0.034483
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.207224
| 1,578
| 46
| 136
| 34.304348
| 0.880895
| 0.280101
| 0
| 0.133333
| 0
| 0.066667
| 0.349638
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.133333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f19909329b0b6001c89ab80ab88194f8528fba3b
| 4,368
|
py
|
Python
|
ontask/action/forms/crud.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 33
|
2017-12-02T04:09:24.000Z
|
2021-11-07T08:41:57.000Z
|
ontask/action/forms/crud.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 189
|
2017-11-16T04:06:29.000Z
|
2022-03-11T23:35:59.000Z
|
ontask/action/forms/crud.py
|
pinheiroo27/ontask_b
|
23fee8caf4e1c5694a710a77f3004ca5d9effeac
|
[
"MIT"
] | 30
|
2017-11-30T03:35:44.000Z
|
2022-01-31T03:08:08.000Z
|
# -*- coding: utf-8 -*-
"""Forms to process action related fields.
ActionUpdateForm: Basic form to process the name/description of an action
ActionForm: Inherits from Basic to process name, description and type
ActionDescriptionForm: Inherits from basic but process only description (for
surveys)
FilterForm: Form to process filter elements
ConditionForm: Form to process condition elements
"""
from builtins import str
import json
from typing import Dict
from django import forms
from django.utils.translation import ugettext_lazy as _
from ontask import models
from ontask.core import RestrictedFileField
import ontask.settings
class ActionUpdateForm(forms.ModelForm):
"""Basic class to edit name and description."""
def __init__(self, *args, **kwargs):
"""Store user and wokflow."""
self.workflow = kwargs.pop('workflow')
super().__init__(*args, **kwargs)
def clean(self) -> Dict:
"""Verify that the name is not taken."""
form_data = super().clean()
# Check if the name already exists
name_exists = self.workflow.actions.filter(
name=self.data['name'],
).exclude(id=self.instance.id).exists()
if name_exists:
self.add_error(
'name',
_('There is already an action with this name.'),
)
return form_data
class Meta:
"""Select Action and the two fields."""
model = models.Action
fields = ('name', 'description_text')
class ActionForm(ActionUpdateForm):
"""Edit name, description and action type."""
def __init__(self, *args: str, **kargs: str):
"""Adjust widget choices depending on action type."""
super().__init__(*args, **kargs)
at_field = self.fields['action_type']
at_field.widget.choices = [
(key, value)
for key, value in models.Action.AVAILABLE_ACTION_TYPES.items()]
if len(models.Action.AVAILABLE_ACTION_TYPES) == 1:
# There is only one type of action. No need to generate the field.
# Set to value and hide
at_field.widget = forms.HiddenInput()
at_field.initial = models.Action.AVAILABLE_ACTION_TYPES.items(
)[0][0]
class Meta(ActionUpdateForm.Meta):
"""Select action and the three fields."""
model = models.Action
fields = ('name', 'description_text', 'action_type')
class ActionDescriptionForm(forms.ModelForm):
"""Form to edit the description of an action."""
class Meta:
"""Select model and the description field."""
model = models.Action
fields = ('description_text',)
class ActionImportForm(forms.Form):
"""Form to edit information to import an action."""
upload_file = RestrictedFileField(
max_upload_size=int(ontask.settings.MAX_UPLOAD_SIZE),
content_types=json.loads(str(ontask.settings.CONTENT_TYPES)),
allow_empty_file=False,
label=_('File with previously exported OnTask actions'),
help_text=_('File containing a previously exported action'),
)
class RubricCellForm(forms.ModelForm):
"""Edit the content of a RubricCellForm."""
class Meta:
"""Select Action and the two fields."""
model = models.RubricCell
fields = ('description_text', 'feedback_text')
class RubricLOAForm(forms.Form):
"""Edit the levels of attainment of a rubric."""
levels_of_attainment = forms.CharField(
strip=True,
required=True,
label=_('Comma separated list of levels of attainment'))
def __init__(self, *args, **kwargs):
"""Store the criteria."""
self.criteria = kwargs.pop('criteria')
super().__init__(*args, **kwargs)
self.fields['levels_of_attainment'].initial = ', '.join(
self.criteria[0].categories)
def clean(self) -> Dict:
"""Check that the number of LOAs didn't change."""
form_data = super().clean()
current_n_loas = [
loa
for loa in form_data['levels_of_attainment'].split(',')
if loa]
if len(current_n_loas) != len(self.criteria[0].categories):
self.add_error(
'levels_of_attainment',
_('The number of levels cannot change.'))
return form_data
| 29.315436
| 78
| 0.63576
| 513
| 4,368
| 5.259259
| 0.311891
| 0.026686
| 0.04003
| 0.016679
| 0.130838
| 0.110823
| 0.064122
| 0.064122
| 0.034841
| 0.034841
| 0
| 0.001844
| 0.255266
| 4,368
| 148
| 79
| 29.513514
| 0.827544
| 0.24794
| 0
| 0.233766
| 0
| 0
| 0.125938
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064935
| false
| 0
| 0.116883
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f199cbd96d64f014fd31d99a8774f29dfb8baff8
| 3,400
|
py
|
Python
|
apps/events/tests/admin_tests.py
|
Kpaubert/onlineweb4
|
9ac79f163bc3a816db57ffa8477ea88770d97807
|
[
"MIT"
] | 32
|
2017-02-22T13:38:38.000Z
|
2022-03-31T23:29:54.000Z
|
apps/events/tests/admin_tests.py
|
Kpaubert/onlineweb4
|
9ac79f163bc3a816db57ffa8477ea88770d97807
|
[
"MIT"
] | 694
|
2017-02-15T23:09:52.000Z
|
2022-03-31T23:16:07.000Z
|
apps/events/tests/admin_tests.py
|
Kpaubert/onlineweb4
|
9ac79f163bc3a816db57ffa8477ea88770d97807
|
[
"MIT"
] | 35
|
2017-09-02T21:13:09.000Z
|
2022-02-21T11:30:30.000Z
|
from django.contrib.auth.models import Group
from django.test import TestCase
from django.urls import reverse, reverse_lazy
from django_dynamic_fixture import G
from apps.authentication.models import OnlineUser
from ..constants import EventType
from ..models import Event
from .utils import (
add_event_permissions,
add_to_group,
create_committee_group,
generate_event,
)
EVENTS_ADMIN_LIST_URL = reverse_lazy("admin:events_event_changelist")
EVENTS_DASHBOARD_INDEX_URL = reverse_lazy("dashboard_events_index")
def event_admin(event: Event) -> str:
return reverse("admin:events_event_change", args=(event.id,))
def attendance_list(event: Event) -> str:
return reverse("event_attendees_pdf", args=(event.id,))
def event_dashboard(event: Event) -> str:
return reverse("dashboard_events_edit", args=(event.id,))
class EventAdminTestCase(TestCase):
def setUp(self):
self.admin_group = create_committee_group(G(Group, name="Arrkom"))
self.other_group: Group = G(Group, name="Buddy")
add_event_permissions(self.admin_group)
self.user: OnlineUser = G(OnlineUser)
self.client.force_login(self.user)
self.event = generate_event(EventType.SOSIALT, organizer=self.admin_group)
# General committee members should not be able to access event admin pages.
self.expected_resp_code_own_django = 302
self.expected_resp_code_own_dashboard = 403
def test_view_event_list_admin(self):
resp = self.client.get(EVENTS_ADMIN_LIST_URL)
self.assertEqual(self.expected_resp_code_own_django, resp.status_code)
def test_view_event_detail_admin(self):
resp = self.client.get(event_admin(self.event))
self.assertEqual(self.expected_resp_code_own_django, resp.status_code)
def test_view_event_attendance_list(self):
resp = self.client.get(attendance_list(self.event))
self.assertEqual(self.expected_resp_code_own_django, resp.status_code)
def test_view_event_list_dashboard(self):
resp = self.client.get(EVENTS_DASHBOARD_INDEX_URL)
self.assertEqual(self.expected_resp_code_own_dashboard, resp.status_code)
def test_view_event_detail_dashboard(self):
resp = self.client.get(event_dashboard(self.event))
self.assertEqual(self.expected_resp_code_own_dashboard, resp.status_code)
class EventAdminGroupTestCase(EventAdminTestCase):
def setUp(self):
super().setUp()
self.event = generate_event(EventType.SOSIALT, organizer=self.admin_group)
add_to_group(self.admin_group, self.user)
self.expected_resp_code_own_django = 200
self.expected_resp_code_own_dashboard = 200
def test_cannot_view_event_attendance_list_for_bedkom(self):
event = generate_event()
resp = self.client.get(attendance_list(event))
self.assertEqual(302, resp.status_code)
def test_cannot_view_event_detail_admin_for_bedkom(self):
event = generate_event(EventType.BEDPRES, organizer=self.other_group)
resp = self.client.get(event_admin(event))
self.assertEqual(302, resp.status_code)
def test_cannot_view_event_detail_dashboard_for_bedkom(self):
event = generate_event(EventType.BEDPRES, organizer=self.other_group)
resp = self.client.get(event_dashboard(event))
self.assertEqual(403, resp.status_code)
| 33.009709
| 83
| 0.746471
| 452
| 3,400
| 5.287611
| 0.188053
| 0.037657
| 0.060251
| 0.075314
| 0.58159
| 0.509623
| 0.351883
| 0.351883
| 0.337657
| 0.333891
| 0
| 0.0074
| 0.165294
| 3,400
| 102
| 84
| 33.333333
| 0.834743
| 0.021471
| 0
| 0.2
| 0
| 0
| 0.038195
| 0.029173
| 0
| 0
| 0
| 0
| 0.123077
| 1
| 0.2
| false
| 0
| 0.123077
| 0.046154
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f19aa91679864846081cef43f5707f10afbe079f
| 9,380
|
py
|
Python
|
instagram.py
|
Breizhux/picture-dl
|
3e2bfa590097db56d3326a4aa36d0dd37c1bacc3
|
[
"Unlicense"
] | null | null | null |
instagram.py
|
Breizhux/picture-dl
|
3e2bfa590097db56d3326a4aa36d0dd37c1bacc3
|
[
"Unlicense"
] | 2
|
2019-09-06T12:19:18.000Z
|
2019-09-06T15:21:36.000Z
|
instagram.py
|
Breizhux/picture-dl
|
3e2bfa590097db56d3326a4aa36d0dd37c1bacc3
|
[
"Unlicense"
] | null | null | null |
# coding: utf-8
import urllib2
import message_box
from ast import literal_eval
class InfoExtractor():
""" Extractor Information class for Instagram
Instagram InfoExtractor that, given url, extract information about the
image (or images) the URL refers to. This information includes the real
image URL, the image title, author and others. The information is stored
in a list of dictionary."""
def __init__(self, url, verbose) :
self.url = url
# self.print_ = message_box.print_(verbose)
self.result_list = []
self.raw_informations = None
self.info_dictionary = {
'username' : None,
'author' : None,
'profile_url' : None,
'is_several_images' : False,
'id' : None,
'title' : None,
'format' : ".jpg", #all images from instagram are jpg
'description' : None,
'comments' : None,
'date' : None,
'localization' : None,
'real_urls_and_dimensions' : [], # list of urls and dimensions(W-H),
'like_nb' : None,} # ex : [["url1", 1080, 1080],["url2", 640, 640]]
def get_informations(self) :
self.raw_informations = self.download_webpage_informations(self.url) #type dictionary
if self.get_type_link(self.raw_informations) == "post" :
self.get_information_single_image(self.raw_informations)
return self.result_list
elif self.get_type_link(self.raw_informations) == "account" :
self.get_information_many_images(self.raw_informations)
return self.result_list
elif self.get_type_link(self.raw_informations) == "tagpage" :
self.get_informations_tagpage_images(self.raw_informations)
return self.result_list
else : return "Invalid url"
def download_webpage_informations(self, url) :
""" Return the dictionary of image(s) and account informations. """
request = urllib2.Request(url)
fh = urllib2.urlopen(request)
source_code = fh.read()
source_code = source_code[
source_code.index('<script type="text/javascript">window._sharedData = ')+52:
source_code.index(';</script>\n<script type="text/javascript">window.__initialDataLoaded(window._sharedData);</script>')]
source_code = source_code.replace("false", "False")
source_code = source_code.replace("true", "True")
source_code = source_code.replace("null", "None")
dict_of_information = literal_eval(source_code)
return dict_of_information
def get_type_link(self, webpage_info) :
""" Return type url from Instagram : many images (acount) or single image
or undeterminate. The determination find if in the dictionary of source
code of page exist the ['entry_data']['PostPage'] keys (simple post) or
['entry_data']['ProfilePage'] keys exists (account url)"""
webpage_info = webpage_info['entry_data']
if webpage_info.has_key('PostPage') :
return "post"
elif webpage_info.has_key('ProfilePage') :
self.info_dictionary['is_several_images'] = True
return "account"
elif webpage_info.has_key('TagPage') :
self.info_dictionary['is_several_images'] = True
return "tagpage"
else :
return "undeterminate"
def get_information_single_image(self, raw_informations) :
""" Complete the dictionary with information of code source webpage.
The result is locate in a list of result (result list) in the form
of dictionary."""
webpage_info = raw_informations['entry_data']['PostPage'][0]['graphql']['shortcode_media']
self.info_dictionary['username'] = webpage_info['owner']['username']
self.info_dictionary['author'] = webpage_info['owner']['full_name']
self.info_dictionary['profile_url'] = webpage_info['owner']['profile_pic_url']
self.info_dictionary['id'] = webpage_info['shortcode']
title, description = self.get_title_and_description(webpage_info)
self.info_dictionary['title'] = title
self.info_dictionary['description'] = description
self.info_dictionary['comments'] = webpage_info['edge_media_to_comment']
self.info_dictionary['localization'] = webpage_info['localization']
for i in webpage_info['display_resources'] :
self.info_dictionary['real_urls_and_dimensions'].append([
i['src'],
i['config_width'],
i['config_height']])
#self.info_dictionary["sizes"]
self.info_dictionary['like_nb'] = webpage_info['edge_media_preview_like']['count']
self.complete_result_list()
def get_information_many_images(self, raw_informations) :
""" Complete dictionary and put this in result list at the rate of
one dictionary per image. The dictionary is reset at each loop of
research information for one image. """
webpage_info = raw_informations['entry_data']['ProfilePage'][0]['graphql']['user']
for i in webpage_info['edge_owner_to_timeline_media']['edges'] :
self.info_dictionary['username'] = webpage_info['username']
self.info_dictionary['author'] = webpage_info['full_name']
self.info_dictionary['profile_url'] = webpage_info['profile_pic_url_hd']
self.info_dictionary['id'] = i['node']['shortcode']
title, description = self.get_title_and_description(i['node'])
self.info_dictionary['title'] = title
self.info_dictionary['description'] = description
self.info_dictionary['comments'] = i['node']['edge_media_to_comment']
self.info_dictionary['localization'] = i['node']['localization']
for j in i['node']['thumbnail_resources'] :
self.info_dictionary['real_urls_and_dimensions'].append([
j['src'],
j['config_width'],
j['config_height']])
self.info_dictionary['real_urls_and_dimensions'].append([
i['node']['display_url'],
i['node']['dimensions']['width'],
i['node']['dimensions']['height']])
self.info_dictionary['like_nb'] = i['node']['edge_liked_by']['count']
self.complete_result_list()
def get_informations_tagpage_images(self, raw_informations) :
""" Complete dictionary and put this in result list at the rate of
one dictionary per image. The dictionary is reset at each loop of
research information for one image. """
webpage_info = raw_informations['entry_data']['TagPage'][0]['graphql']['hashtag']
for i in webpage_info['edge_hashtag_to_media']['edges'] :
self.info_dictionary['username'] = webpage_info['name']
self.info_dictionary['author'] = webpage_info['name']
self.info_dictionary['id'] = i['node']['shortcode']
title, description = self.get_title_and_description(i['node'])
self.info_dictionary['title'] = title
self.info_dictionary['description'] = description
self.info_dictionary['comments'] = i['node']['edge_media_to_comment']
for j in i['node']['thumbnail_resources'] :
self.info_dictionary['real_urls_and_dimensions'].append([
j['src'],
j['config_width'],
j['config_height']])
self.info_dictionary['real_urls_and_dimensions'].append([
i['node']['display_url'],
i['node']['dimensions']['width'],
i['node']['dimensions']['height']])
self.info_dictionary['like_nb'] = i['node']['edge_liked_by']['count']
self.complete_result_list()
def get_title_and_description(self, webpage_info) :
""" Return a title for image with description of image.
if description don't exists, it can't found title.
The title is crop to the first caracter found : [#,.,!,?,\n]"""
if len(webpage_info['edge_media_to_caption']['edges']) == 0 :
return "No title :(", "Because no description..."
description = webpage_info['edge_media_to_caption']['edges'][0]['node']['text']
end_title = ["#", ".", "!", "?", "\n"]
i = 1
while description[i] not in end_title and i < len(description)-1 : i+=1
title = description[:i] if i < len(description) else "No title found ;("
return title.strip().replace("/","-"), description
def complete_result_list(self) :
""" Copy dictionary to result list, the list of dictionary.
There is one dictionary per image. After append dictionary
in list, clear it. """
self.result_list.append(self.info_dictionary)
self.info_dictionary = {
'username' : None,
'author' : None,
'profile_url' : None,
'is_several_images' : False,
'id' : None,
'title' : None,
'format' : ".jpg",
'description' : None,
'comments' : None,
'date' : None,
'localization' : None,
'real_urls_and_dimensions' : [],
'like_nb' : None,}
| 50.430108
| 133
| 0.615032
| 1,074
| 9,380
| 5.128492
| 0.17784
| 0.052288
| 0.117647
| 0.026688
| 0.598039
| 0.537582
| 0.509078
| 0.455338
| 0.377269
| 0.350218
| 0
| 0.004322
| 0.260021
| 9,380
| 185
| 134
| 50.702703
| 0.789223
| 0.173774
| 0
| 0.475862
| 0
| 0.006897
| 0.224769
| 0.061265
| 0
| 0
| 0
| 0
| 0
| 1
| 0.062069
| false
| 0
| 0.02069
| 0
| 0.158621
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1a479eb0ca5a8f8bbec21a491ef98b110500e1b
| 1,584
|
py
|
Python
|
python/qisrc/test/test_qisrc_foreach.py
|
vbarbaresi/qibuild
|
eab6b815fe0af49ea5c41ccddcd0dff2363410e1
|
[
"BSD-3-Clause"
] | null | null | null |
python/qisrc/test/test_qisrc_foreach.py
|
vbarbaresi/qibuild
|
eab6b815fe0af49ea5c41ccddcd0dff2363410e1
|
[
"BSD-3-Clause"
] | null | null | null |
python/qisrc/test/test_qisrc_foreach.py
|
vbarbaresi/qibuild
|
eab6b815fe0af49ea5c41ccddcd0dff2363410e1
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
def test_qisrc_foreach(qisrc_action, record_messages):
worktree = qisrc_action.worktree
worktree.create_project("not_in_git")
git_worktree = qisrc_action.git_worktree
git_worktree.create_git_project("git_project")
qisrc_action("foreach", "ls")
assert not record_messages.find("not_in_git")
assert record_messages.find("git_project")
record_messages.reset()
qisrc_action("foreach", "ls", "--all")
assert record_messages.find("not_in_git")
assert record_messages.find("git_project")
def test_non_cloned_groups(qisrc_action, git_server, record_messages):
git_server.create_group("foo", ["a.git", "b.git"])
git_server.create_group("bar", ["b.git", "c.git"])
qisrc_action("init", git_server.manifest_url, "--group", "foo")
record_messages.reset()
qisrc_action("foreach", "--group", "bar", "ls")
warning = record_messages.find(r"\[WARN \]")
assert warning
assert "Group bar is not currently in use" in warning
def test_do_not_warn_on_subgroups(qisrc_action, git_server, record_messages):
git_server.create_group("big", ["a.git", "b.git"])
git_server.create_group("small", ["b.git"])
qisrc_action("init", git_server.manifest_url, "--group", "big")
record_messages.reset()
qisrc_action("foreach", "--group", "small", "ls")
assert not record_messages.find(r"\[WARN \]")
assert record_messages.find(r"\* \(1/1\) b")
| 40.615385
| 77
| 0.709596
| 226
| 1,584
| 4.69469
| 0.292035
| 0.171536
| 0.118756
| 0.090481
| 0.538172
| 0.538172
| 0.427898
| 0.348728
| 0.295947
| 0.214892
| 0
| 0.007369
| 0.143308
| 1,584
| 38
| 78
| 41.684211
| 0.774503
| 0.101641
| 0
| 0.172414
| 0
| 0
| 0.187456
| 0
| 0
| 0
| 0
| 0
| 0.275862
| 1
| 0.103448
| false
| 0
| 0
| 0
| 0.103448
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1ad55c7e2b9846cac3302cc84dc78c54a2ce31b
| 3,562
|
py
|
Python
|
coursework/src/highscore.py
|
SpeedoDevo/G51FSE
|
bf5e203d936965e254eff1efa0b74edc368a6cda
|
[
"MIT"
] | null | null | null |
coursework/src/highscore.py
|
SpeedoDevo/G51FSE
|
bf5e203d936965e254eff1efa0b74edc368a6cda
|
[
"MIT"
] | null | null | null |
coursework/src/highscore.py
|
SpeedoDevo/G51FSE
|
bf5e203d936965e254eff1efa0b74edc368a6cda
|
[
"MIT"
] | null | null | null |
import pygame
import sys
import collections # for ordered dict
import pickle # for saving and loading highscores
from constants import (SCREEN_WIDTH, SCREEN_HEIGHT, RED, GREEN, GREY, BLACK, WHITE)
# class that shows, saves and loads highscores
class ScoreTable(pygame.sprite.Sprite):
# passing in bg so that it's never reinitialized
def __init__(self, screen, clock, bg):
pygame.sprite.Sprite.__init__(self)
self.titleFont = pygame.font.Font('image/langdon.otf', 50)
self.title = self.titleFont.render("highscores", True, GREY)
self.titleRect = self.title.get_rect()
# center on top of the screen
self.titleRect.center = (SCREEN_WIDTH/2,75)
self.scoreFont = pygame.font.Font('image/muzarela.ttf', 30)
self.clock = clock
self.screen = screen
self.bg = bg
# last sores the player's last highscore
self.last = 0
self.load()
def draw(self,screen):
#update then blit bg
self.bg.update()
screen.blit(self.bg.image,self.bg.rect)
screen.blit(self.title,self.titleRect)
for i in range(len(self.hs)):
#red color for the user's highscore
if list(self.hs.items())[i][0] == self.last:
color = RED
else:
color = WHITE
self.text = self.scoreFont.render(str(i+1) + ". " + str(list(self.hs.items())[i][1]) + ": " + str(list(self.hs.items())[i][0]), True, color)
self.textrect = self.text.get_rect()
# position text based on iteration number
self.textrect.center = (SCREEN_WIDTH/2,(150+i*35))
self.screen.blit(self.text,self.textrect)
pygame.display.update()
def update(self):
for event in pygame.event.get():
# let the game quit
if event.type == pygame.QUIT:
pygame.quit()
sys.exit(0)
# quit from hstable with click or enter
if event.type == pygame.MOUSEBUTTONDOWN or (event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN):
return True
return False
def run(self):
while 1:
# because we are out of the game loop here we need an own ticking
self.clock.tick(70)
self.draw(self.screen)
if self.update(): return
def getLowest(self):
# get the lowest score to decide whether it's high enough fot adding in the table
return min(list(self.hs.keys()))
def submitScore(self,name,score):
# delete the last
self.hs.popitem()
# add item
self.hs[score] = name
# save which was it
self.last = score
# reorder list
self.hs = collections.OrderedDict(sorted(self.hs.items(), reverse=True))
# save to file
self.save()
def noHS(self):
# remove highlighting if the score wasn't high enough
self.last = None
def save(self):
# pickle highscores into file
pickle.dump(self.hs, open("hs.dat", "wb"), 2)
def load(self):
# load highscores if it already exists
try:
self.hs = pickle.load(open("hs.dat", "rb"))
# create new file if it doesn't
except:
temp = {50000:"SpeedoDevo", 40000:"OliGee", 30000:"Jaume", 20000:"Kyle", 10000:"Steve", 9000:"Danielle", 8000:"Phil", 7000:"Mark", 6000:"Hugh", 5000:"Lisa"}
self.hs = collections.OrderedDict(sorted(temp.items(), reverse=True))
self.save()
| 37.893617
| 168
| 0.588433
| 470
| 3,562
| 4.42766
| 0.410638
| 0.034599
| 0.024027
| 0.021624
| 0.061028
| 0.028352
| 0.019702
| 0.019702
| 0
| 0
| 0
| 0.02707
| 0.294778
| 3,562
| 94
| 169
| 37.893617
| 0.801354
| 0.201572
| 0
| 0.03125
| 0
| 0
| 0.042169
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.140625
| false
| 0
| 0.078125
| 0.015625
| 0.28125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1b0c5d59ac79b7bc53e1a8befc59467c9a655ae
| 3,188
|
py
|
Python
|
judge/download.py
|
tokusumi/judge-cli
|
e6883ba55dc37e8ca2f328105a4df57b0b3145ba
|
[
"MIT"
] | null | null | null |
judge/download.py
|
tokusumi/judge-cli
|
e6883ba55dc37e8ca2f328105a4df57b0b3145ba
|
[
"MIT"
] | 6
|
2021-04-04T06:19:30.000Z
|
2021-09-18T16:48:41.000Z
|
judge/download.py
|
tokusumi/judge-cli
|
e6883ba55dc37e8ca2f328105a4df57b0b3145ba
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from typing import Optional, Tuple
import typer
from onlinejudge import utils
from pydantic.networks import HttpUrl
from pydantic.types import DirectoryPath
from judge.schema import JudgeConfig
from judge.tools.download import DownloadArgs, LoginForm, SaveArgs
from judge.tools.download import download as download_tool
from judge.tools.download import save as save_tool
class DownloadJudgeConfig(JudgeConfig):
URL: HttpUrl
testdir: DirectoryPath
class CLILoginForm(LoginForm):
def get_credentials(self) -> Tuple[str, str]:
username = typer.prompt("What's your username?")
password = typer.prompt("What's your password?", hide_input=True)
return username, password
def main(
workdir: Path = typer.Argument(".", help="a directory path for working directory"),
url: Optional[str] = typer.Option(None, help="a download URL"),
directory: Path = typer.Option(None, help="a directory path for test cases"),
no_store: bool = typer.Option(False, help="testcases is shown but not saved"),
format: str = typer.Option("sample-%i.%e", help="custom filename format"),
login: bool = typer.Option(False, help="login into target service"),
cookie: Path = typer.Option(utils.default_cookie_path, help="directory for cookie"),
) -> None:
"""
Here is shortcut for download with `online-judge-tools`.
At first, call `judge conf` for configuration.
Pass `problem` at `contest` you want to test.
Ex) the following leads to download test cases for Problem `C` at `ABC 051`:
```download```
"""
typer.echo("Load configuration...")
if not workdir.exists():
typer.secho(f"Not exists: {str(workdir.resolve())}", fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
try:
_config = JudgeConfig.from_toml(workdir)
except KeyError as e:
typer.secho(str(e), fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
__config = _config.dict()
if url or directory:
# check arguments
if url:
__config["URL"] = url
if directory:
__config["testdir"] = directory.resolve()
try:
config = DownloadJudgeConfig(**__config)
except KeyError as e:
typer.secho(str(e), fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
typer.echo(f"Download {config.URL}")
try:
login_form: Optional[LoginForm] = None
if login:
login_form = CLILoginForm()
testcases = download_tool(
DownloadArgs(
url=config.URL,
login_form=login_form,
cookie=cookie,
)
)
except Exception as e:
typer.secho(str(e), fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
if not no_store:
try:
save_tool(
testcases,
SaveArgs(
format=format,
directory=Path(config.testdir),
),
)
except Exception as e:
typer.secho(str(e), fg=typer.colors.BRIGHT_RED)
raise typer.Abort()
if __name__ == "__main__":
typer.run(main)
| 30.653846
| 88
| 0.631117
| 383
| 3,188
| 5.154047
| 0.334204
| 0.033435
| 0.032928
| 0.048126
| 0.285714
| 0.159574
| 0.159574
| 0.159574
| 0.140831
| 0.140831
| 0
| 0.001279
| 0.264115
| 3,188
| 103
| 89
| 30.951456
| 0.840153
| 0.081556
| 0
| 0.223684
| 0
| 0
| 0.114986
| 0.008287
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0.026316
| 0.131579
| 0
| 0.223684
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1b7cdef9de310ce5a7fb0146da43f000e1ce55f
| 18,861
|
py
|
Python
|
gitflow/context.py
|
abacusresearch/gitflow
|
81ea7f5d468f9b128cd593f62972f13352bd3a63
|
[
"MIT"
] | null | null | null |
gitflow/context.py
|
abacusresearch/gitflow
|
81ea7f5d468f9b128cd593f62972f13352bd3a63
|
[
"MIT"
] | null | null | null |
gitflow/context.py
|
abacusresearch/gitflow
|
81ea7f5d468f9b128cd593f62972f13352bd3a63
|
[
"MIT"
] | null | null | null |
import atexit
import os
import re
import shutil
from enum import Enum
from typing import List, Optional
import collections
from gitflow import cli, const, repotools, _, utils
from gitflow.common import Result
from gitflow.const import VersioningScheme
from gitflow.properties import PropertyIO
from gitflow.repotools import RepoContext
from gitflow.version import VersionMatcher, VersionConfig
class BuildStepType(Enum):
ASSEMBLE = 'assemble',
TEST = 'test',
INTEGRATION_TEST = 'integration_test',
PACKAGE = 'package',
DEPLOY = 'deploy'
class BuildLabels(Enum):
OPENSHIFT_S2I_TEST = 'com.openshift:s2i'
class BuildStep(object):
name: str = None
commands: list = None
"""a list of command arrays"""
labels: set = None
"""contains labels for mapping to the ci tasks, effectively extending the label set in the enclosing stage"""
class BuildStage(object):
type: str
steps: list = None
labels: list = None
"""contains labels for mapping to ci tasks"""
def __init__(self):
self.steps = list()
self.labels = list()
class Config(object):
# project properties
property_file: str = None
sequence_number_property: str = None
version_property: str = None
# validation mode
strict_mode = True
# version
version_config: VersionConfig = None
# repo
remote_name = None
release_branch_base = None
dev_branch_types = ['feature', 'integration',
'fix', 'chore', 'doc', 'issue']
prod_branch_types = ['fix', 'chore', 'doc', 'issue']
# build config
version_change_actions: List[List[str]] = None
build_stages: list = None
# hard config
# TODO checks on merge base
allow_shared_release_branch_base = False
# TODO distinction of commit-based and purely tag based increments
allow_qualifier_increments_within_commit = True
# TODO config var & CLI option
# requires clean workspace and temporary detachment from branches to be pushed
push_to_local = False
pull_after_bump = True
# properties
@property
def sequential_versioning(self) -> bool:
return self.version_config.versioning_scheme == VersioningScheme.SEMVER_WITH_SEQ
@property
def tie_sequential_version_to_semantic_version(self) -> bool:
return self.version_config.versioning_scheme == VersioningScheme.SEMVER_WITH_SEQ
@property
def commit_version_property(self) -> bool:
return self.version_property is not None
@property
def commit_sequential_version_property(self) -> bool:
return self.sequence_number_property is not None \
and self.sequential_versioning
@property
def requires_property_commits(self) -> bool:
return self.commit_version_property \
or self.commit_sequential_version_property
class AbstractContext(object):
result: Result = None
def __init__(self):
self.result = Result()
def warn(self, message, reason):
self.result.warn(message, reason)
def error(self, exit_code, message, reason, throw: bool = False):
self.result.error(exit_code, message, reason, throw)
def fail(self, exit_code, message, reason):
self.result.fail(exit_code, message, reason)
def add_subresult(self, subresult):
self.result.add_subresult(subresult)
def has_errors(self):
return self.result.has_errors()
def abort_on_error(self):
return self.result.abort_on_error()
def abort(self):
return self.result.abort()
class Context(AbstractContext):
config: Config = None
repo: RepoContext = None
# args
args = None
root = None
batch = False
assume_yes = False
dry_run = False
verbose = const.ERROR_VERBOSITY
pretty = False
# matchers
release_base_branch_matcher: VersionMatcher = None
release_branch_matcher: VersionMatcher = None
work_branch_matcher: VersionMatcher = None
version_tag_matcher: VersionMatcher = None
discontinuation_tag_matcher: VersionMatcher = None
# resources
temp_dirs: list = None
clones: list = None
# misc
git_version: str = None
def __init__(self):
super().__init__()
atexit.register(self.cleanup)
@staticmethod
def create(args: dict, result_out: Result) -> 'Context':
context = Context()
context.config: Config = Config()
if args is not None:
context.args = args
context.batch = context.args['--batch']
context.assume_yes = context.args.get('--assume-yes')
context.dry_run = context.args.get('--dry-run')
# TODO remove this workaround
context.verbose = (context.args['--verbose'] + 1) // 2
context.pretty = context.args['--pretty']
else:
context.args = dict()
# configure CLI
cli.set_allow_color(not context.batch)
# initialize repo context and attempt to load the config file
if '--root' in context.args and context.args['--root'] is not None:
context.root = context.args['--root']
context.repo = RepoContext()
context.repo.dir = context.root
context.repo.verbose = context.verbose
context.git_version = repotools.git_version(context.repo)
# context.repo.use_root_dir_arg = semver.compare(context.git_version, "2.9.0") >= 0
context.repo.use_root_dir_arg = False
repo_root = repotools.git_rev_parse(context.repo, '--show-toplevel')
# None when invalid or bare
if repo_root is not None:
context.repo.dir = repo_root
if context.verbose >= const.TRACE_VERBOSITY:
cli.print("--------------------------------------------------------------------------------")
cli.print("refs in {repo}:".format(repo=context.repo.dir))
cli.print("--------------------------------------------------------------------------------")
for ref in repotools.git_list_refs(context.repo):
cli.print(repr(ref))
cli.print("--------------------------------------------------------------------------------")
config_dir = context.repo.dir
else:
context.repo = None
config_dir = context.root
gitflow_config_file: Optional[str] = None
if context.args['--config'] is not None:
gitflow_config_file = os.path.join(config_dir, context.args['--config'])
if gitflow_config_file is None:
result_out.fail(os.EX_DATAERR,
_("the specified config file does not exist or is not a regular file: {path}.")
.format(path=repr(gitflow_config_file)),
None
)
else:
for config_filename in const.DEFAULT_CONFIGURATION_FILE_NAMES:
path = os.path.join(config_dir, config_filename)
if os.path.exists(path):
gitflow_config_file = path
break
if gitflow_config_file is None:
result_out.fail(os.EX_DATAERR,
_("config file not found.")
.format(path=repr(gitflow_config_file)),
_("Default config files are\n:{list}")
.format(list=const.DEFAULT_CONFIGURATION_FILE_NAMES)
)
if context.verbose >= const.TRACE_VERBOSITY:
cli.print("gitflow_config_file: " + gitflow_config_file)
with open(gitflow_config_file) as json_file:
config = PropertyIO.get_instance_by_filename(gitflow_config_file).from_stream(json_file)
else:
config = object()
build_config_json = config.get(const.CONFIG_BUILD)
context.config.version_change_actions = config.get(const.CONFIG_ON_VERSION_CHANGE, [])
context.config.build_stages = list()
if build_config_json is not None:
stages_json = build_config_json.get('stages')
if stages_json is not None:
for stage_key, stage_json in stages_json.items():
stage = BuildStage()
if isinstance(stage_json, dict):
stage.type = stage_json.get('type') or stage_key
if stage.type not in const.BUILD_STAGE_TYPES:
result_out.fail(
os.EX_DATAERR,
_("Configuration failed."),
_("Invalid build stage type {key}."
.format(key=repr(stage.type)))
)
stage.name = stage_json.get('name') or stage_key
stage_labels = stage_json.get('labels')
if isinstance(stage_labels, list):
stage.labels.extend(stage_labels)
else:
stage.labels.append(stage_labels)
stage_steps_json = stage_json.get('steps')
if stage_steps_json is not None:
for step_key, step_json in stage_steps_json.items():
step = BuildStep()
if isinstance(step_json, dict):
step.name = step_json.get('name') or step_key
step.commands = step_json.get('commands')
stage_labels = stage_json.get('labels')
if isinstance(stage_labels, list):
stage.labels.extend(stage_labels)
else:
stage.labels.append(stage_labels)
elif isinstance(step_json, list):
step.name = step_key
step.type = step_key
step.commands = step_json
else:
result_out.fail(
os.EX_DATAERR,
_("Configuration failed."),
_("Invalid build step definition {type} {key}."
.format(type=repr(type(step_json)), key=repr(step_key)))
)
stage.steps.append(step)
elif isinstance(stage_json, list):
stage.type = stage_key
stage.name = stage_key
if len(stage_json):
step = BuildStep()
step.name = '#'
step.commands = stage_json
stage.steps.append(step)
else:
result_out.fail(
os.EX_DATAERR,
_("Configuration failed."),
_("Invalid build stage definition {key}."
.format(key=repr(stage_key)))
)
context.config.build_stages.append(stage)
context.config.build_stages.sort(key=utils.cmp_to_key(lambda stage_a, stage_b:
const.BUILD_STAGE_TYPES.index(stage_a.type)
- const.BUILD_STAGE_TYPES.index(stage_b.type)
),
reverse=False
)
# project properties config
context.config.property_file = config.get(const.CONFIG_PROJECT_PROPERTY_FILE)
if context.config.property_file is not None:
context.config.property_file = os.path.join(context.root, context.config.property_file)
context.config.version_property = config.get(const.CONFIG_VERSION_PROPERTY)
context.config.sequence_number_property = config.get(
const.CONFIG_SEQUENCE_NUMBER_PROPERTY)
context.config.version_property = config.get(
const.CONFIG_VERSION_PROPERTY)
property_names = [property for property in
[context.config.sequence_number_property, context.config.version_property] if
property is not None]
duplicate_property_names = [item for item, count in collections.Counter(property_names).items() if count > 1]
if len(duplicate_property_names):
result_out.fail(os.EX_DATAERR, _("Configuration failed."),
_("Duplicate property names: {duplicate_property_names}").format(
duplicate_property_names=', '.join(duplicate_property_names))
)
# version config
context.config.version_config = VersionConfig()
versioning_scheme = config.get(const.CONFIG_VERSIONING_SCHEME, const.DEFAULT_VERSIONING_SCHEME)
if versioning_scheme not in const.VERSIONING_SCHEMES:
result_out.fail(os.EX_DATAERR, _("Configuration failed."),
_("The versioning scheme {versioning_scheme} is invalid.").format(
versioning_scheme=utils.quote(versioning_scheme, '\'')))
context.config.version_config.versioning_scheme = const.VERSIONING_SCHEMES[versioning_scheme]
if context.config.version_config.versioning_scheme == VersioningScheme.SEMVER:
qualifiers = config.get(const.CONFIG_VERSION_TYPES, const.DEFAULT_PRE_RELEASE_QUALIFIERS)
if isinstance(qualifiers, str):
qualifiers = [qualifier.strip() for qualifier in qualifiers.split(",")]
if qualifiers != sorted(qualifiers):
result_out.fail(
os.EX_DATAERR,
_("Configuration failed."),
_("Pre-release qualifiers are not specified in ascending order.")
)
context.config.version_config.qualifiers = qualifiers
context.config.version_config.initial_version = const.DEFAULT_INITIAL_VERSION
elif context.config.version_config.versioning_scheme == VersioningScheme.SEMVER_WITH_SEQ:
context.config.version_config.qualifiers = None
context.config.version_config.initial_version = const.DEFAULT_INITIAL_SEQ_VERSION
else:
context.fail(os.EX_CONFIG, "configuration error", "invalid versioning scheme")
# branch config
context.config.remote_name = "origin"
context.config.release_branch_base = config.get(const.CONFIG_RELEASE_BRANCH_BASE,
const.DEFAULT_RELEASE_BRANCH_BASE)
remote_prefix = repotools.create_ref_name(const.REMOTES_PREFIX, context.config.remote_name)
context.release_base_branch_matcher = VersionMatcher(
[const.LOCAL_BRANCH_PREFIX, remote_prefix],
None,
re.escape(context.config.release_branch_base),
)
context.release_branch_matcher = VersionMatcher(
[const.LOCAL_BRANCH_PREFIX, remote_prefix],
config.get(
const.CONFIG_RELEASE_BRANCH_PREFIX,
const.DEFAULT_RELEASE_BRANCH_PREFIX),
config.get(
const.CONFIG_RELEASE_BRANCH_PATTERN,
const.DEFAULT_RELEASE_BRANCH_PATTERN),
)
context.work_branch_matcher = VersionMatcher(
[const.LOCAL_BRANCH_PREFIX, remote_prefix],
[const.BRANCH_PREFIX_DEV, const.BRANCH_PREFIX_PROD],
config.get(
const.CONFIG_WORK_BRANCH_PATTERN,
const.DEFAULT_WORK_BRANCH_PATTERN),
)
context.version_tag_matcher = VersionMatcher(
[const.LOCAL_TAG_PREFIX],
config.get(
const.CONFIG_VERSION_TAG_PREFIX,
const.DEFAULT_VERSION_TAG_PREFIX),
config.get(
const.CONFIG_VERSION_TAG_PATTERN,
const.DEFAULT_SEMVER_VERSION_TAG_PATTERN
if context.config.version_config.versioning_scheme == VersioningScheme.SEMVER
else const.DEFAULT_SEMVER_WITH_SEQ_VERSION_TAG_PATTERN)
)
context.version_tag_matcher.group_unique_code = None \
if context.config.version_config.versioning_scheme == VersioningScheme.SEMVER \
else 'prerelease_type'
context.discontinuation_tag_matcher = VersionMatcher(
[const.LOCAL_TAG_PREFIX],
config.get(
const.CONFIG_DISCONTINUATION_TAG_PREFIX,
const.DEFAULT_DISCONTINUATION_TAG_PREFIX),
config.get(
const.CONFIG_DISCONTINUATION_TAG_PATTERN,
const.DEFAULT_DISCONTINUATION_TAG_PATTERN),
None
)
return context
def add_temp_dir(self, dir):
if self.temp_dirs is None:
self.temp_dirs = list()
self.temp_dirs.append(dir)
pass
def get_release_branches(self, reverse: bool = True):
release_branches = list(filter(
lambda branch_ref: self.release_branch_matcher.format(
branch_ref.name) is not None,
repotools.git_list_refs(self.repo,
repotools.create_ref_name(const.REMOTES_PREFIX, self.config.remote_name),
const.LOCAL_BRANCH_PREFIX)
))
release_branches.sort(
reverse=reverse,
key=self.release_branch_matcher.key_func
)
return release_branches
def cleanup(self):
atexit.unregister(self.cleanup)
if self.temp_dirs is not None:
for temp_dir in self.temp_dirs:
if self.verbose >= const.DEBUG_VERBOSITY:
cli.print("deleting temp dir: " + temp_dir)
shutil.rmtree(temp_dir)
self.temp_dirs.clear()
if self.clones is not None:
for clone in self.clones:
clone.cleanup()
self.clones.clear()
def __del__(self):
self.cleanup()
| 38.64959
| 117
| 0.564233
| 1,893
| 18,861
| 5.367142
| 0.151083
| 0.035827
| 0.022047
| 0.031496
| 0.31624
| 0.246752
| 0.204921
| 0.189961
| 0.142323
| 0.106201
| 0
| 0.000731
| 0.34733
| 18,861
| 487
| 118
| 38.728953
| 0.824614
| 0.030433
| 0
| 0.194444
| 0
| 0
| 0.063499
| 0.014713
| 0
| 0
| 0
| 0.002053
| 0
| 1
| 0.055556
| false
| 0.002778
| 0.036111
| 0.022222
| 0.263889
| 0.019444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1b87ee915b0b88ddd9829337e7a6f4316b55ca5
| 705
|
py
|
Python
|
LeetCode/1219_Path_with_Maximum_Gold/main.py
|
sungmen/Acmicpc_Solve
|
0298a6aec84993a4d8767bd2c00490b7201e06a4
|
[
"MIT"
] | 1
|
2020-07-08T23:16:19.000Z
|
2020-07-08T23:16:19.000Z
|
LeetCode/1219_Path_with_Maximum_Gold/main.py
|
sungmen/Acmicpc_Solve
|
0298a6aec84993a4d8767bd2c00490b7201e06a4
|
[
"MIT"
] | 1
|
2020-05-16T03:12:24.000Z
|
2020-05-16T03:14:42.000Z
|
LeetCode/1219_Path_with_Maximum_Gold/main.py
|
sungmen/Acmicpc_Solve
|
0298a6aec84993a4d8767bd2c00490b7201e06a4
|
[
"MIT"
] | 2
|
2020-05-16T03:25:16.000Z
|
2021-02-10T16:51:25.000Z
|
class Solution:
def __init__(self):
self.m = 0
self.n = 0
def dfs(self, y, x, grid) -> int:
if y < 0 or y >= self.m or x < 0 or x >= self.n or grid[y][x] == 0:
return 0
res = 0
tmpGrid = grid[y][x]
grid[y][x] = 0
for y_, x_ in ((y, x - 1), (y, x + 1), (y - 1, x), (y + 1, x)):
res = max(self.dfs(y_, x_, grid), res)
grid[y][x] = tmpGrid
return grid[y][x] + res
def getMaximumGold(self, grid: List[List[int]]) -> int:
self.m = len(grid)
self.n = len(grid[0])
res = max(self.dfs(i, j, grid) for i in range(self.m) for j in range(self.n))
return res
| 32.045455
| 85
| 0.453901
| 120
| 705
| 2.6
| 0.233333
| 0.064103
| 0.096154
| 0.044872
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029613
| 0.377305
| 705
| 22
| 86
| 32.045455
| 0.681093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1b884785bf603bff438ce57a6af789de6bc8891
| 2,307
|
py
|
Python
|
test/test_modify_contact.py
|
peruana80/python_training
|
0070bdc07b22d80594c029984c9967e56ba51951
|
[
"Apache-2.0"
] | null | null | null |
test/test_modify_contact.py
|
peruana80/python_training
|
0070bdc07b22d80594c029984c9967e56ba51951
|
[
"Apache-2.0"
] | null | null | null |
test/test_modify_contact.py
|
peruana80/python_training
|
0070bdc07b22d80594c029984c9967e56ba51951
|
[
"Apache-2.0"
] | null | null | null |
from model.contact import Contact
from random import randrange
def test_modify_contact_name(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.contact.create(Contact(firstname="test"))
old_contacts = db.get_contact_list()
index = randrange(len(old_contacts))
contact = Contact(first_name="Zmodyfikuj imie", middle_name="Zmodyfikuj drugie imie", last_name="Zmodyfikuj nazwisko", nickname="Zmodyfikuj ksywe", title="Zmodyfikuj tytul",
company="Zmodyfikuj firme", address="Zmodyfikuj adres", home_number="Zmodyfikuj_telefon_domowy", mobile_number="Zmodyfikuj_telefon_komorkowy",
work_number="Zmodyfikuj_telefon_sluzbowy", fax="Zmodyfikuj fax", email="Zmodyfikuj email", email2="Zmodyfikuj email2", email3="Zmodyfikuj email3",
homepage="Zmodyfikuj strone domowa", byear="1990", ayear="2000", address_2="Zmodyfikuj drugi adres", phone2="Zmodyfikuj_telefon domowy 2",
notes="Zmodyfikuj notatki")
contact.id=old_contacts[index].id
app.contact.modify_contact_by_id(contact)
new_contacts = db.get_contact_list()
assert len(old_contacts) == len(new_contacts)
old_contacts[index] = contact
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
new_contacts = app.contact.get_contact_list()
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
#def test_modify_first_contact_first_name(app):
# if app.contact.count() == 0:
# app.contact.create(Contact(first_name="test"))
# old_contacts = app.contact.get_contact_list()
# app.contact.modify_first_contact(Contact(first_name="zmodyfikuj imie"))
# new_contacts = app.contact.get_contact_list()
# assert len(old_contacts) == len(new_contacts)
#def test_modify_first_contact_email(app):
# if app.contact.count() == 0:
# app.contact.create(Contact(first_name="test"))
# old_contacts = app.contact.get_contact_list()
# app.contact.modify_first_contact(Contact(last_name="Zmodyfikuj nazwisko"))
# new_contacts = app.contact.get_contact_list()
# assert len(old_contacts) == len(new_contacts)
| 56.268293
| 186
| 0.702211
| 292
| 2,307
| 5.263699
| 0.25
| 0.08458
| 0.063761
| 0.068315
| 0.514639
| 0.444372
| 0.405335
| 0.405335
| 0.382563
| 0.382563
| 0
| 0.009534
| 0.181621
| 2,307
| 41
| 187
| 56.268293
| 0.804555
| 0.306025
| 0
| 0.095238
| 0
| 0
| 0.231108
| 0.050378
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1b8db0ca9074a5d55378aaf5be9d198fcaa6a0b
| 734
|
py
|
Python
|
base.py
|
oknalv/linky
|
78fba19946e2212b10f3d1a5b27c7d9329556290
|
[
"MIT"
] | null | null | null |
base.py
|
oknalv/linky
|
78fba19946e2212b10f3d1a5b27c7d9329556290
|
[
"MIT"
] | null | null | null |
base.py
|
oknalv/linky
|
78fba19946e2212b10f3d1a5b27c7d9329556290
|
[
"MIT"
] | null | null | null |
import webapp2
from webapp2_extras import sessions
class BaseHandler(webapp2.RequestHandler):
def dispatch(self):
self.session_store = sessions.get_store(request=self.request)
try:
webapp2.RequestHandler.dispatch(self)
finally:
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
return self.session_store.get_session()
def set_flash(self, type, message_tag):
if not self.session.get("flash"):
self.session["flash"] = []
self.session["flash"].append([type, message_tag])
def get_flash(self):
ret = self.session.get("flash")
self.session["flash"] = []
return ret
| 29.36
| 69
| 0.647139
| 85
| 734
| 5.447059
| 0.364706
| 0.190065
| 0.103672
| 0.136069
| 0.151188
| 0.151188
| 0.151188
| 0
| 0
| 0
| 0
| 0.008961
| 0.239782
| 734
| 25
| 70
| 29.36
| 0.820789
| 0
| 0
| 0.1
| 0
| 0
| 0.034014
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.1
| 0.05
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1b9ea9a68748f5299174c8b988d634a02fb6fda
| 6,999
|
py
|
Python
|
tests/test_helpers.py
|
albertoalcolea/dbhelpers
|
c65f77a750cf46874ae7b5b0e6d4930e9df729af
|
[
"Apache-2.0"
] | 2
|
2015-10-31T20:36:22.000Z
|
2021-10-05T12:08:10.000Z
|
tests/test_helpers.py
|
albertoalcolea/dbhelpers
|
c65f77a750cf46874ae7b5b0e6d4930e9df729af
|
[
"Apache-2.0"
] | null | null | null |
tests/test_helpers.py
|
albertoalcolea/dbhelpers
|
c65f77a750cf46874ae7b5b0e6d4930e9df729af
|
[
"Apache-2.0"
] | null | null | null |
import unittest
try:
from unittest.mock import Mock, call
except ImportError:
from mock import Mock, call
from dbhelpers import cm_cursor, fetchiter, fetchone_nt, fetchmany_nt, fetchall_nt, fetchiter_nt
class HelpersTestCase(unittest.TestCase):
def test_cm_cursor(self):
"""
Creates a context manager for a cursor and it is able to commit on exit.
"""
conn = Mock(spec=['cursor', 'commit', 'rollback'])
cursor_mock = Mock()
conn.cursor = Mock(return_value=cursor_mock)
conn.commit = Mock()
conn.rollback = Mock()
# Commit on exit
with cm_cursor(conn) as cursor:
self.assertEqual(cursor, cursor_mock)
self.assertTrue(conn.commit.called)
self.assertFalse(conn.rollback.called)
conn.commit.reset_mock()
# Disable auto commit
with cm_cursor(conn, commit=False) as cursor:
self.assertEqual(cursor, cursor_mock)
self.assertFalse(conn.commit.called)
self.assertFalse(conn.rollback.called)
# If exception no commit
def test_with_exc(conn, commit=True):
with cm_cursor(conn, commit=commit) as cursor:
raise Exception()
# If exception and commit=True, call rollback
self.assertRaises(Exception, test_with_exc, conn=conn, commit=True)
self.assertFalse(conn.commit.called)
self.assertTrue(conn.rollback.called)
conn.rollback.reset_mock()
# If exception and commit=False, no call commit nor rollback
self.assertRaises(Exception, test_with_exc, conn=conn, commit=False)
self.assertFalse(conn.commit.called)
self.assertFalse(conn.rollback.called)
def test_fetchiter(self):
cursor = Mock()
def test_iterator(cursor, use_server_cursor=False, **kwargs):
cursor.fetchmany = Mock(return_value=[1,2,3])
num_it = 0
for row in fetchiter(cursor, **kwargs):
if num_it == 3:
raise StopIteration
self.assertIn(row, [1,2,3])
num_it += 1
if row == 3:
# Stop
if use_server_cursor:
cursor.fetchall = Mock(return_value=[])
else:
cursor.fetchmany = Mock(return_value=[])
self.assertEqual(num_it, 3)
# Standard
test_iterator(cursor)
# Size
test_iterator(cursor, size=2)
cursor.fetchmany.assert_called_with(2)
# Batch
cursor.fetchmany = Mock(return_value=[1,2])
for row in fetchiter(cursor, batch=True):
self.assertEqual(row, [1,2])
# Stop
cursor.fetchmany = Mock(return_value=[])
# Server cursor
cursor.execute = Mock()
cursor.fetchall = Mock(return_value=[1,2,3])
test_iterator(cursor, use_server_cursor=True, size=10, server_cursor='C')
calls = [call("FETCH %s FROM C", (10,))] * 2
cursor.execute.assert_has_calls(calls)
def test_fetchone_nt(self):
cursor = Mock()
cursor.description = (('id', 3, 2, 11, 11, 0, 0), ('status', 253, 7, 80, 80, 0, 0))
cursor.fetchone = Mock(return_value=(34, 'info'))
r = fetchone_nt(cursor)
self.assertEqual(r.__class__.__name__, 'Results')
self.assertEqual(r.id, 34)
self.assertEqual(r.status, 'info')
def test_fetchmany_nt(self):
cursor = Mock()
cursor.description = (('id', 3, 2, 11, 11, 0, 0), ('status', 253, 7, 80, 80, 0, 0))
cursor.fetchmany = Mock(return_value=((34, 'info'), (99, 'warning')))
r = fetchmany_nt(cursor)
self.assertEqual(r.__class__.__name__, 'list')
self.assertEqual(r[0].__class__.__name__, 'Results')
self.assertEqual(r[0].id, 34)
self.assertEqual(r[0].status, 'info')
self.assertEqual(r[1].__class__.__name__, 'Results')
self.assertEqual(r[1].id, 99)
self.assertEqual(r[1].status, 'warning')
def test_fetchall_nt(self):
cursor = Mock()
cursor.description = (('id', 3, 2, 11, 11, 0, 0), ('status', 253, 7, 80, 80, 0, 0))
cursor.fetchall = Mock(return_value=((34, 'info'), (99, 'warning')))
r = fetchall_nt(cursor)
self.assertEqual(r.__class__.__name__, 'list')
self.assertEqual(r[0].__class__.__name__, 'Results')
self.assertEqual(r[0].id, 34)
self.assertEqual(r[0].status, 'info')
self.assertEqual(r[1].__class__.__name__, 'Results')
self.assertEqual(r[1].id, 99)
self.assertEqual(r[1].status, 'warning')
def test_fetchiter_nt(self):
cursor = Mock()
cursor.description = (('id', 3, 2, 11, 11, 0, 0), ('status', 253, 7, 80, 80, 0, 0))
# Standard
cursor.fetchmany = Mock(return_value=((34, 'info'), (99, 'warning')))
num_it = 0
for row in fetchiter_nt(cursor):
self.assertEqual(row.__class__.__name__, 'Results')
if num_it == 0:
self.assertEqual(row.id, 34)
self.assertEqual(row.status, 'info')
if num_it == 1:
self.assertEqual(row.id, 99)
self.assertEqual(row.status, 'warning')
if num_it == 2:
raise StopIteration
num_it += 1
if num_it == 2:
cursor.fetchmany = Mock(return_value=[])
self.assertEqual(num_it, 2)
# Batch
cursor.fetchmany = Mock(return_value=((34, 'info'), (99, 'warning')))
num_it = 0
for row in fetchiter_nt(cursor, batch=True):
self.assertEqual(row.__class__.__name__, 'list')
self.assertEqual(row[0].__class__.__name__, 'Results')
self.assertEqual(row[0].id, 34)
self.assertEqual(row[0].status, 'info')
self.assertEqual(row[1].__class__.__name__, 'Results')
self.assertEqual(row[1].id, 99)
self.assertEqual(row[1].status, 'warning')
if num_it == 1:
raise StopIteration
num_it += 1
if num_it == 1:
cursor.fetchmany = Mock(return_value=[])
self.assertEqual(num_it, 1)
# Server cursor
cursor.fetchall = Mock(return_value=((34, 'info'), (99, 'warning')))
num_it = 0
for row in fetchiter_nt(cursor, server_cursor='C'):
self.assertEqual(row.__class__.__name__, 'Results')
if num_it == 0:
self.assertEqual(row.id, 34)
self.assertEqual(row.status, 'info')
if num_it == 1:
self.assertEqual(row.id, 99)
self.assertEqual(row.status, 'warning')
if num_it == 2:
raise StopIteration
num_it += 1
if num_it == 2:
cursor.fetchall = Mock(return_value=[])
self.assertEqual(num_it, 2)
| 38.456044
| 96
| 0.570796
| 841
| 6,999
| 4.526754
| 0.117717
| 0.161545
| 0.085106
| 0.059102
| 0.711321
| 0.635671
| 0.563961
| 0.515629
| 0.462832
| 0.401103
| 0
| 0.036431
| 0.3019
| 6,999
| 181
| 97
| 38.668508
| 0.742734
| 0.043863
| 0
| 0.531469
| 0
| 0
| 0.039369
| 0
| 0
| 0
| 0
| 0
| 0.377622
| 1
| 0.055944
| false
| 0
| 0.034965
| 0
| 0.097902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1be0f593e7493f91a2f96246f4cf8a9df42b366
| 1,367
|
py
|
Python
|
electrumsv_sdk/builtin_components/electrumsv_server/local_tools.py
|
electrumsv/electrumsv-sdk
|
2d4b9474b2e2fc5518bba10684c5d5130ffb6328
|
[
"OML"
] | 4
|
2020-07-06T12:13:14.000Z
|
2021-07-29T12:45:27.000Z
|
electrumsv_sdk/builtin_components/electrumsv_server/local_tools.py
|
electrumsv/electrumsv-sdk
|
2d4b9474b2e2fc5518bba10684c5d5130ffb6328
|
[
"OML"
] | 62
|
2020-07-04T04:50:27.000Z
|
2021-08-19T21:06:10.000Z
|
electrumsv_sdk/builtin_components/electrumsv_server/local_tools.py
|
electrumsv/electrumsv-sdk
|
2d4b9474b2e2fc5518bba10684c5d5130ffb6328
|
[
"OML"
] | 3
|
2021-01-21T09:22:45.000Z
|
2021-06-12T10:16:03.000Z
|
import logging
import typing
from electrumsv_sdk.utils import get_directory_name
COMPONENT_NAME = get_directory_name(__file__)
logger = logging.getLogger(COMPONENT_NAME)
if typing.TYPE_CHECKING:
from .electrumsv_server import Plugin
class LocalTools:
"""helper for operating on plugin-specific state (like source dir, port, datadir etc.)"""
def __init__(self, plugin: 'Plugin'):
self.plugin = plugin
self.cli_inputs = plugin.cli_inputs
self.logger = logging.getLogger(self.plugin.COMPONENT_NAME)
def get_network_choice(self) -> str:
network_options = [
self.cli_inputs.cli_extension_args['regtest'],
self.cli_inputs.cli_extension_args['testnet'],
self.cli_inputs.cli_extension_args['scaling_testnet'],
self.cli_inputs.cli_extension_args['main']
]
assert len([is_selected for is_selected in network_options if is_selected]) in {0, 1}, \
"can only select 1 network"
network_choice = "regtest"
if self.cli_inputs.cli_extension_args['testnet']:
network_choice = "testnet"
elif self.cli_inputs.cli_extension_args['scaling_testnet']:
network_choice = "scaling-testnet"
elif self.cli_inputs.cli_extension_args['main']:
network_choice = "main"
return network_choice
| 34.175
| 96
| 0.686906
| 168
| 1,367
| 5.261905
| 0.357143
| 0.091629
| 0.117647
| 0.126697
| 0.311086
| 0.311086
| 0.278281
| 0.154977
| 0
| 0
| 0
| 0.002825
| 0.223116
| 1,367
| 39
| 97
| 35.051282
| 0.829567
| 0.060717
| 0
| 0
| 0
| 0
| 0.096244
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 1
| 0.068966
| false
| 0
| 0.137931
| 0
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f1be97cb28ba644933394a127fc92f299492f132
| 4,955
|
py
|
Python
|
cluster/core/include/python/http_parser.py
|
JarryShaw/broapt
|
5a6253af862cb618718d8fad69343a23ef2ac9e4
|
[
"BSD-3-Clause"
] | 3
|
2020-04-25T08:47:55.000Z
|
2020-11-04T11:18:21.000Z
|
cluster/core/include/python/http_parser.py
|
JarryShaw/broapt
|
5a6253af862cb618718d8fad69343a23ef2ac9e4
|
[
"BSD-3-Clause"
] | 11
|
2020-06-15T16:28:15.000Z
|
2021-11-29T17:11:07.000Z
|
source/include/python/http_parser.py
|
JarryShaw/broapt
|
5a6253af862cb618718d8fad69343a23ef2ac9e4
|
[
"BSD-3-Clause"
] | 3
|
2019-07-24T02:41:37.000Z
|
2021-12-06T09:38:58.000Z
|
# -*- coding: utf-8 -*-
# pylint: disable=all
import base64
import binascii
import contextlib
import math
import os
import textwrap
import time
import urllib.parse
from const import LOGS_PATH
from logparser import parse
from utils import is_nan, print_file
# from utils import IPAddressJSONEncoder, is_nan, print_file
# today
DATE = time.strftime('%Y-%m-%d')
# log path
LOGS = os.path.join(LOGS_PATH, 'http')
os.makedirs(LOGS, exist_ok=True)
# http log
HTTP_LOG = os.path.join(LOGS_PATH, 'http', f'{DATE}.log')
# macros
SEPARATOR = '\t'
SET_SEPARATOR = ','
EMPTY_FIELD = '(empty)'
UNSET_FIELD = 'NoDef'
FIELDS = ('scrip', 'ad', 'ts', 'url', 'ref', 'ua', 'dstip', 'cookie', 'src_port', 'json', 'method', 'body')
TYPES = ('addr', 'string', 'time', 'string', 'string', 'string', 'addr', 'string', 'port', 'vector[string]', 'string', 'string')
def hexlify(string):
hex_string = binascii.hexlify(string.encode()).decode()
return ''.join(map(lambda s: f'\\x{s}', textwrap.wrap(hex_string, 2)))
def init(HTTP_LOG):
print_file(f'#separator {hexlify(SEPARATOR)}', file=HTTP_LOG)
print_file(f'#set_separator{SEPARATOR}{SET_SEPARATOR}', file=HTTP_LOG)
print_file(f'#empty_field{SEPARATOR}{EMPTY_FIELD}', file=HTTP_LOG)
print_file(f'#unset_field{SEPARATOR}{UNSET_FIELD}', file=HTTP_LOG)
print_file(f'#path{SEPARATOR}http', file=HTTP_LOG)
print_file(f'#open{SEPARATOR}{time.strftime("%Y-%m-%d-%H-%M-%S")}', file=HTTP_LOG)
print_file(f'#fields{SEPARATOR}{SEPARATOR.join(FIELDS)}', file=HTTP_LOG)
print_file(f'#types{SEPARATOR}{SEPARATOR.join(TYPES)}', file=HTTP_LOG)
def make_url(line):
host = line.get('host')
if is_nan(host):
host = str()
uri = line.get('uri')
if is_nan(uri):
uri = str()
url = urllib.parse.urljoin(host, uri)
port = int(line['id.resp_p'])
if port == 80:
base = 'http://%s' % line['id.resp_h']
else:
base = 'http://%s:%s' % (line['id.resp_h'], line['id.resp_p'])
return urllib.parse.urljoin(base, url)
def make_b64(data):
if is_nan(data):
return None
return base64.b64encode(data.encode()).decode()
def make_json(line):
client_headers = line.get('client_header_names')
if is_nan(client_headers):
client_headers = list()
server_headers = line.get('server_header_names')
if is_nan(server_headers):
server_headers = list()
headers = list()
headers.extend(filter(lambda header: not is_nan(header), client_headers))
headers.extend(filter(lambda header: not is_nan(header), server_headers))
return ','.join(filter(lambda header: len(header), headers))
def beautify(obj):
if obj is None:
return UNSET_FIELD
if isinstance(obj, str):
return obj or EMPTY_FIELD
if isinstance(obj, (set, list, tuple)):
return SET_SEPARATOR.join(obj) or EMPTY_FIELD
return str(obj) or EMPTY_FIELD
def generate(log_name):
global DATE, HTTP_LOG
date = time.strftime('%Y-%m-%d')
if date != DATE:
close()
DATE = date
HTTP_LOG = os.path.join(LOGS_PATH, 'http', f'{DATE}.log')
init(HTTP_LOG)
log_root = os.path.join(LOGS_PATH, log_name)
http_log = os.path.join(log_root, 'http.log')
if not os.path.isfile(http_log):
return
LOG_HTTP = parse(http_log)
for (index, line) in LOG_HTTP.context.iterrows():
# record = dict(
# srcip=line['id.orig_h'],
# ad=None,
# ts=math.floor((line['ts'] if LOG_HTTP.format == 'json' else line['ts'].timestamp()) * 1000),
# url=make_url(line),
# ref=make_b64(line.get('referrer')),
# ua=make_ua(line),
# dstip=line['id.resp_h'],
# cookie=make_cookie(line),
# src_port=int(line['id.orig_p']),
# # json=make_json(line),
# method=line['method'],
# body=line['post_body'],
# )
record = (
# scrip
line['id.orig_h'],
# ad
None,
# ts
math.floor((line['ts'] if LOG_HTTP.format == 'json' else line['ts'].timestamp()) * 1000),
# url
make_url(line),
# ref
make_b64(line.get('referrer')),
# ua
make_b64(line.get('user_agent')),
# dstip
line['id.resp_h'],
# cookie
make_b64(line.get('cookies')),
# src_port
int(line['id.orig_p']),
# json
make_json(line),
# method
line.get('method'),
# body
make_b64(line.get('post_body')),
)
# data = json.dumps(record, cls=IPAddressJSONEncoder)
data = '\t'.join(map(lambda obj: beautify(obj), record))
print_file(data, file=HTTP_LOG)
def close():
print_file(f'#close{SEPARATOR}{time.strftime("%Y-%m-%d-%H-%M-%S")}', file=HTTP_LOG)
| 30.030303
| 128
| 0.594147
| 673
| 4,955
| 4.20951
| 0.210996
| 0.049418
| 0.038828
| 0.045182
| 0.340275
| 0.300741
| 0.261207
| 0.203318
| 0.203318
| 0.17155
| 0
| 0.007998
| 0.242987
| 4,955
| 164
| 129
| 30.213415
| 0.747267
| 0.134208
| 0
| 0.019608
| 0
| 0.019608
| 0.171368
| 0.070287
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078431
| false
| 0
| 0.107843
| 0
| 0.284314
| 0.107843
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|