hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
07e2d1b8a7c46e378298b64b296fe93ed48acbf5
| 1,828
|
py
|
Python
|
tests/integration/api/test_target_groups.py
|
lanz/Tenable.io-SDK-for-Python
|
e81a61c369ac103d1524b0898153a569536a131e
|
[
"MIT"
] | 90
|
2017-02-02T18:36:17.000Z
|
2022-02-05T17:58:50.000Z
|
tests/integration/api/test_target_groups.py
|
lanz/Tenable.io-SDK-for-Python
|
e81a61c369ac103d1524b0898153a569536a131e
|
[
"MIT"
] | 64
|
2017-02-03T00:54:00.000Z
|
2020-08-06T14:06:50.000Z
|
tests/integration/api/test_target_groups.py
|
lanz/Tenable.io-SDK-for-Python
|
e81a61c369ac103d1524b0898153a569536a131e
|
[
"MIT"
] | 49
|
2017-02-03T01:01:00.000Z
|
2022-02-25T13:25:28.000Z
|
import pytest
from tenable_io.api.target_groups import TargetListEditRequest
from tenable_io.api.models import TargetGroup, TargetGroupList
@pytest.mark.vcr()
def test_target_groups_create(new_target_group):
assert isinstance(new_target_group, TargetGroup), u'The `create` method did not return type `TargetGroup`.'
@pytest.mark.vcr()
def test_target_groups_details(client, new_target_group):
target_group = new_target_group
details = client.target_groups_api.details(target_group.id)
assert isinstance(details, TargetGroup), u'The `details` method did not return type `TargetGroup`.'
assert details.id == target_group.id, u'Expected the `details` response to match the requested target group.'
@pytest.mark.vcr()
def test_target_groups_list(client):
target_groups = client.target_groups_api.list()
assert isinstance(target_groups, TargetGroupList), u'The `details` method did not return type `TargetGroup`.'
for group in target_groups.target_groups:
assert isinstance(group, TargetGroup), u'Expected a list of type `TargetGroup`.'
@pytest.mark.vcr()
def test_target_groups_delete(client, new_target_group):
assert client.target_groups_api.delete(new_target_group.id), u'The target group was not deleted.'
@pytest.mark.vcr()
def test_target_groups_edit(client, new_target_group):
target_group = new_target_group
edited_name = 'test_target_group_edit'
edited_group = client.target_groups_api.edit(TargetListEditRequest(name=edited_name), target_group.id)
assert isinstance(edited_group, TargetGroup), u'The `edit` method did not return type `TargetGroup`.'
assert edited_group.id == target_group.id, u'Expected the edited target group to match the requested target group.'
assert edited_group.name == edited_name, u'Expected the name to be updated.'
| 44.585366
| 119
| 0.784464
| 260
| 1,828
| 5.276923
| 0.188462
| 0.152332
| 0.081633
| 0.058309
| 0.439504
| 0.39723
| 0.340379
| 0.198251
| 0.198251
| 0
| 0
| 0
| 0.126368
| 1,828
| 40
| 120
| 45.7
| 0.859111
| 0
| 0
| 0.241379
| 0
| 0
| 0.261488
| 0.012035
| 0
| 0
| 0
| 0
| 0.310345
| 1
| 0.172414
| false
| 0
| 0.103448
| 0
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07e4a4e5e49ff1a01f2886f954c1382ba8822f86
| 9,352
|
py
|
Python
|
train.py
|
hui-won/KoBART_Project
|
105608997473abc669d777c588d56382efb524c6
|
[
"MIT"
] | 13
|
2020-12-30T15:09:08.000Z
|
2022-01-02T08:11:18.000Z
|
train.py
|
hui-won/KoBART_Project
|
105608997473abc669d777c588d56382efb524c6
|
[
"MIT"
] | 2
|
2021-11-21T11:49:31.000Z
|
2022-03-18T05:09:13.000Z
|
train.py
|
hui-won/KoBART_Project
|
105608997473abc669d777c588d56382efb524c6
|
[
"MIT"
] | 1
|
2021-06-15T01:24:18.000Z
|
2021-06-15T01:24:18.000Z
|
import argparse
import logging
import os
import numpy as np
import pandas as pd
import pytorch_lightning as pl
import torch
from pytorch_lightning import loggers as pl_loggers
from torch.utils.data import DataLoader, Dataset
from dataset import KoBARTSummaryDataset
from transformers import BartForConditionalGeneration, PreTrainedTokenizerFast
from transformers.optimization import AdamW, get_cosine_schedule_with_warmup
from kobart import get_pytorch_kobart_model, get_kobart_tokenizer
parser = argparse.ArgumentParser(description='KoBART translation')
parser.add_argument('--checkpoint_path',
type=str,
help='checkpoint path')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class ArgsBase():
@staticmethod
def add_model_specific_args(parent_parser):
parser = argparse.ArgumentParser(
parents=[parent_parser], add_help=False)
parser.add_argument('--train_file',
type=str,
default='data/train.tsv',
help='train file')
parser.add_argument('--test_file',
type=str,
default='data/test.tsv',
help='test file')
parser.add_argument('--batch_size',
type=int,
default=28,
help='')
parser.add_argument('--max_len',
type=int,
default=512,
help='max seq len')
return parser
class KobartSummaryModule(pl.LightningDataModule):
def __init__(self, train_file,
test_file, tok,
max_len=512,
batch_size=8,
num_workers=5):
super().__init__()
self.batch_size = batch_size
self.max_len = max_len
self.train_file_path = train_file
self.test_file_path = test_file
if tok is None:
self.tok = get_kobart_tokenizer()
else:
self.tok = tok
self.num_workers = num_workers
@staticmethod
def add_model_specific_args(parent_parser):
parser = argparse.ArgumentParser(
parents=[parent_parser], add_help=False)
parser.add_argument('--num_workers',
type=int,
default=5,
help='num of worker for dataloader')
return parser
# OPTIONAL, called for every GPU/machine (assigning state is OK)
def setup(self, stage):
# split dataset
self.train = KoBARTSummaryDataset(self.train_file_path,
self.tok,
self.max_len)
self.test = KoBARTSummaryDataset(self.test_file_path,
self.tok,
self.max_len)
def train_dataloader(self):
train = DataLoader(self.train,
batch_size=self.batch_size,
num_workers=self.num_workers, shuffle=True)
return train
def val_dataloader(self):
val = DataLoader(self.test,
batch_size=self.batch_size,
num_workers=self.num_workers, shuffle=False)
return val
def test_dataloader(self):
test = DataLoader(self.test,
batch_size=self.batch_size,
num_workers=self.num_workers, shuffle=False)
return test
class Base(pl.LightningModule):
def __init__(self, hparams, **kwargs) -> None:
super(Base, self).__init__()
self.hparams = hparams
@staticmethod
def add_model_specific_args(parent_parser):
# add model specific args
parser = argparse.ArgumentParser(
parents=[parent_parser], add_help=False)
parser.add_argument('--batch-size',
type=int,
default=14,
help='batch size for training (default: 96)')
parser.add_argument('--lr',
type=float,
default=3e-5,
help='The initial learning rate')
parser.add_argument('--warmup_ratio',
type=float,
default=0.1,
help='warmup ratio')
parser.add_argument('--model_path',
type=str,
default=None,
help='kobart model path')
return parser
def configure_optimizers(self):
# Prepare optimizer
param_optimizer = list(self.model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(
nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=self.hparams.lr, correct_bias=False)
# warm up lr
num_workers = (self.hparams.gpus if self.hparams.gpus is not None else 1) * (self.hparams.num_nodes if self.hparams.num_nodes is not None else 1)
data_len = len(self.train_dataloader().dataset)
logging.info(f'number of workers {num_workers}, data length {data_len}')
num_train_steps = int(data_len / (self.hparams.batch_size * num_workers) * self.hparams.max_epochs)
logging.info(f'num_train_steps : {num_train_steps}')
num_warmup_steps = int(num_train_steps * self.hparams.warmup_ratio)
logging.info(f'num_warmup_steps : {num_warmup_steps}')
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=num_warmup_steps, num_training_steps=num_train_steps)
lr_scheduler = {'scheduler': scheduler,
'monitor': 'loss', 'interval': 'step',
'frequency': 1}
return [optimizer], [lr_scheduler]
class KoBARTConditionalGeneration(Base):
def __init__(self, hparams, **kwargs):
super(KoBARTConditionalGeneration, self).__init__(hparams, **kwargs)
self.model = BartForConditionalGeneration.from_pretrained(get_pytorch_kobart_model())
self.model.train()
self.bos_token = '<s>'
self.eos_token = '</s>'
self.pad_token_id = 0
self.tokenizer = get_kobart_tokenizer()
def forward(self, inputs):
attention_mask = inputs['input_ids'].ne(self.pad_token_id).float()
decoder_attention_mask = inputs['decoder_input_ids'].ne(self.pad_token_id).float()
return self.model(input_ids=inputs['input_ids'],
attention_mask=attention_mask,
decoder_input_ids=inputs['decoder_input_ids'],
decoder_attention_mask=decoder_attention_mask,
labels=inputs['labels'], return_dict=True)
def training_step(self, batch, batch_idx):
outs = self(batch)
loss = outs.loss
self.log('train_loss', loss, prog_bar=True)
return loss
def validation_step(self, batch, batch_idx):
outs = self(batch)
loss = outs['loss']
return (loss)
def validation_epoch_end(self, outputs):
losses = []
for loss in outputs:
losses.append(loss)
self.log('val_loss', torch.stack(losses).mean(), prog_bar=True)
if __name__ == '__main__':
parser = Base.add_model_specific_args(parser)
parser = ArgsBase.add_model_specific_args(parser)
parser = KobartSummaryModule.add_model_specific_args(parser)
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
logging.info(args)
model = KoBARTConditionalGeneration(args)
dm = KobartSummaryModule(args.train_file,
args.test_file,
None,
max_len=args.max_len,
batch_size=args.batch_size,
num_workers=args.num_workers)
checkpoint_callback = pl.callbacks.ModelCheckpoint(monitor='val_loss',
dirpath=args.default_root_dir,
filename='model_chp/{epoch:02d}-{val_loss:.3f}',
verbose=True,
save_last=True,
mode='min',
save_top_k=-1,
prefix='kobart_translation')
tb_logger = pl_loggers.TensorBoardLogger(os.path.join(args.default_root_dir, 'tb_logs'))
lr_logger = pl.callbacks.LearningRateMonitor()
trainer = pl.Trainer.from_argparse_args(args, logger=tb_logger,
callbacks=[checkpoint_callback, lr_logger])
trainer.fit(model, dm)
| 39.627119
| 153
| 0.559773
| 971
| 9,352
| 5.124614
| 0.223481
| 0.02713
| 0.034164
| 0.028135
| 0.270498
| 0.226889
| 0.196342
| 0.186294
| 0.152532
| 0.140474
| 0
| 0.005295
| 0.353828
| 9,352
| 235
| 154
| 39.795745
| 0.818137
| 0.013794
| 0
| 0.183673
| 0
| 0
| 0.079201
| 0.003906
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076531
| false
| 0
| 0.066327
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07e4f4a4fe370f9d6aeaae97b9bd2ee2d9364898
| 11,945
|
py
|
Python
|
homeassistant/components/shelly/sensor.py
|
RavensburgOP/core
|
0ea76e848b182ca0ebb0fdb54558f7f733898ad7
|
[
"Apache-2.0"
] | 1
|
2019-08-28T00:54:28.000Z
|
2019-08-28T00:54:28.000Z
|
homeassistant/components/shelly/sensor.py
|
RavensburgOP/core
|
0ea76e848b182ca0ebb0fdb54558f7f733898ad7
|
[
"Apache-2.0"
] | 71
|
2020-07-14T09:08:56.000Z
|
2022-03-31T06:01:47.000Z
|
homeassistant/components/shelly/sensor.py
|
Vaarlion/core
|
f3de8b9f28de01abf72c0f5bb0b457eb1841f201
|
[
"Apache-2.0"
] | null | null | null |
"""Sensor for Shelly."""
from __future__ import annotations
from datetime import timedelta
import logging
from typing import Final, cast
import aioshelly
from homeassistant.components import sensor
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONCENTRATION_PARTS_PER_MILLION,
DEGREE,
ELECTRIC_CURRENT_AMPERE,
ELECTRIC_POTENTIAL_VOLT,
ENERGY_KILO_WATT_HOUR,
LIGHT_LUX,
PERCENTAGE,
POWER_WATT,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.util import dt
from . import ShellyDeviceWrapper
from .const import LAST_RESET_NEVER, LAST_RESET_UPTIME, SHAIR_MAX_WORK_HOURS
from .entity import (
BlockAttributeDescription,
RestAttributeDescription,
ShellyBlockAttributeEntity,
ShellyRestAttributeEntity,
ShellySleepingBlockAttributeEntity,
async_setup_entry_attribute_entities,
async_setup_entry_rest,
)
from .utils import get_device_uptime, temperature_unit
_LOGGER: Final = logging.getLogger(__name__)
SENSORS: Final = {
("device", "battery"): BlockAttributeDescription(
name="Battery",
unit=PERCENTAGE,
device_class=sensor.DEVICE_CLASS_BATTERY,
state_class=sensor.STATE_CLASS_MEASUREMENT,
removal_condition=lambda settings, _: settings.get("external_power") == 1,
),
("device", "deviceTemp"): BlockAttributeDescription(
name="Device Temperature",
unit=temperature_unit,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_TEMPERATURE,
state_class=sensor.STATE_CLASS_MEASUREMENT,
default_enabled=False,
),
("emeter", "current"): BlockAttributeDescription(
name="Current",
unit=ELECTRIC_CURRENT_AMPERE,
value=lambda value: value,
device_class=sensor.DEVICE_CLASS_CURRENT,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("light", "power"): BlockAttributeDescription(
name="Power",
unit=POWER_WATT,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_POWER,
state_class=sensor.STATE_CLASS_MEASUREMENT,
default_enabled=False,
),
("device", "power"): BlockAttributeDescription(
name="Power",
unit=POWER_WATT,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_POWER,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("emeter", "power"): BlockAttributeDescription(
name="Power",
unit=POWER_WATT,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_POWER,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("emeter", "voltage"): BlockAttributeDescription(
name="Voltage",
unit=ELECTRIC_POTENTIAL_VOLT,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_VOLTAGE,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("emeter", "powerFactor"): BlockAttributeDescription(
name="Power Factor",
unit=PERCENTAGE,
value=lambda value: round(value * 100, 1),
device_class=sensor.DEVICE_CLASS_POWER_FACTOR,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("relay", "power"): BlockAttributeDescription(
name="Power",
unit=POWER_WATT,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_POWER,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("roller", "rollerPower"): BlockAttributeDescription(
name="Power",
unit=POWER_WATT,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_POWER,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("device", "energy"): BlockAttributeDescription(
name="Energy",
unit=ENERGY_KILO_WATT_HOUR,
value=lambda value: round(value / 60 / 1000, 2),
device_class=sensor.DEVICE_CLASS_ENERGY,
state_class=sensor.STATE_CLASS_MEASUREMENT,
last_reset=LAST_RESET_UPTIME,
),
("emeter", "energy"): BlockAttributeDescription(
name="Energy",
unit=ENERGY_KILO_WATT_HOUR,
value=lambda value: round(value / 1000, 2),
device_class=sensor.DEVICE_CLASS_ENERGY,
state_class=sensor.STATE_CLASS_MEASUREMENT,
last_reset=LAST_RESET_NEVER,
),
("emeter", "energyReturned"): BlockAttributeDescription(
name="Energy Returned",
unit=ENERGY_KILO_WATT_HOUR,
value=lambda value: round(value / 1000, 2),
device_class=sensor.DEVICE_CLASS_ENERGY,
state_class=sensor.STATE_CLASS_MEASUREMENT,
last_reset=LAST_RESET_NEVER,
),
("light", "energy"): BlockAttributeDescription(
name="Energy",
unit=ENERGY_KILO_WATT_HOUR,
value=lambda value: round(value / 60 / 1000, 2),
device_class=sensor.DEVICE_CLASS_ENERGY,
state_class=sensor.STATE_CLASS_MEASUREMENT,
default_enabled=False,
last_reset=LAST_RESET_UPTIME,
),
("relay", "energy"): BlockAttributeDescription(
name="Energy",
unit=ENERGY_KILO_WATT_HOUR,
value=lambda value: round(value / 60 / 1000, 2),
device_class=sensor.DEVICE_CLASS_ENERGY,
state_class=sensor.STATE_CLASS_MEASUREMENT,
last_reset=LAST_RESET_UPTIME,
),
("roller", "rollerEnergy"): BlockAttributeDescription(
name="Energy",
unit=ENERGY_KILO_WATT_HOUR,
value=lambda value: round(value / 60 / 1000, 2),
device_class=sensor.DEVICE_CLASS_ENERGY,
state_class=sensor.STATE_CLASS_MEASUREMENT,
last_reset=LAST_RESET_UPTIME,
),
("sensor", "concentration"): BlockAttributeDescription(
name="Gas Concentration",
unit=CONCENTRATION_PARTS_PER_MILLION,
icon="mdi:gauge",
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("sensor", "extTemp"): BlockAttributeDescription(
name="Temperature",
unit=temperature_unit,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_TEMPERATURE,
state_class=sensor.STATE_CLASS_MEASUREMENT,
available=lambda block: cast(bool, block.extTemp != 999),
),
("sensor", "humidity"): BlockAttributeDescription(
name="Humidity",
unit=PERCENTAGE,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_HUMIDITY,
state_class=sensor.STATE_CLASS_MEASUREMENT,
available=lambda block: cast(bool, block.extTemp != 999),
),
("sensor", "luminosity"): BlockAttributeDescription(
name="Luminosity",
unit=LIGHT_LUX,
device_class=sensor.DEVICE_CLASS_ILLUMINANCE,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("sensor", "tilt"): BlockAttributeDescription(
name="Tilt",
unit=DEGREE,
icon="mdi:angle-acute",
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("relay", "totalWorkTime"): BlockAttributeDescription(
name="Lamp Life",
unit=PERCENTAGE,
icon="mdi:progress-wrench",
value=lambda value: round(100 - (value / 3600 / SHAIR_MAX_WORK_HOURS), 1),
extra_state_attributes=lambda block: {
"Operational hours": round(block.totalWorkTime / 3600, 1)
},
),
("adc", "adc"): BlockAttributeDescription(
name="ADC",
unit=ELECTRIC_POTENTIAL_VOLT,
value=lambda value: round(value, 1),
device_class=sensor.DEVICE_CLASS_VOLTAGE,
state_class=sensor.STATE_CLASS_MEASUREMENT,
),
("sensor", "sensorOp"): BlockAttributeDescription(
name="Operation",
icon="mdi:cog-transfer",
value=lambda value: value,
extra_state_attributes=lambda block: {"self_test": block.selfTest},
),
}
REST_SENSORS: Final = {
"rssi": RestAttributeDescription(
name="RSSI",
unit=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
value=lambda status, _: status["wifi_sta"]["rssi"],
device_class=sensor.DEVICE_CLASS_SIGNAL_STRENGTH,
state_class=sensor.STATE_CLASS_MEASUREMENT,
default_enabled=False,
),
"uptime": RestAttributeDescription(
name="Uptime",
value=get_device_uptime,
device_class=sensor.DEVICE_CLASS_TIMESTAMP,
default_enabled=False,
),
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up sensors for device."""
if config_entry.data["sleep_period"]:
await async_setup_entry_attribute_entities(
hass, config_entry, async_add_entities, SENSORS, ShellySleepingSensor
)
else:
await async_setup_entry_attribute_entities(
hass, config_entry, async_add_entities, SENSORS, ShellySensor
)
await async_setup_entry_rest(
hass, config_entry, async_add_entities, REST_SENSORS, ShellyRestSensor
)
class ShellySensor(ShellyBlockAttributeEntity, SensorEntity):
"""Represent a shelly sensor."""
def __init__(
self,
wrapper: ShellyDeviceWrapper,
block: aioshelly.Block,
attribute: str,
description: BlockAttributeDescription,
) -> None:
"""Initialize sensor."""
super().__init__(wrapper, block, attribute, description)
self._last_value: float | None = None
if description.last_reset == LAST_RESET_NEVER:
self._attr_last_reset = dt.utc_from_timestamp(0)
elif description.last_reset == LAST_RESET_UPTIME:
self._attr_last_reset = (
dt.utcnow() - timedelta(seconds=wrapper.device.status["uptime"])
).replace(second=0, microsecond=0)
@property
def state(self) -> StateType:
"""Return value of sensor."""
if (
self.description.last_reset == LAST_RESET_UPTIME
and self.attribute_value is not None
):
value = cast(float, self.attribute_value)
if self._last_value and self._last_value > value:
self._attr_last_reset = dt.utcnow().replace(second=0, microsecond=0)
_LOGGER.info("Energy reset detected for entity %s", self.name)
self._last_value = value
return self.attribute_value
@property
def state_class(self) -> str | None:
"""State class of sensor."""
return self.description.state_class
@property
def unit_of_measurement(self) -> str | None:
"""Return unit of sensor."""
return cast(str, self._unit)
class ShellyRestSensor(ShellyRestAttributeEntity, SensorEntity):
"""Represent a shelly REST sensor."""
@property
def state(self) -> StateType:
"""Return value of sensor."""
return self.attribute_value
@property
def state_class(self) -> str | None:
"""State class of sensor."""
return self.description.state_class
@property
def unit_of_measurement(self) -> str | None:
"""Return unit of sensor."""
return self.description.unit
class ShellySleepingSensor(ShellySleepingBlockAttributeEntity, SensorEntity):
"""Represent a shelly sleeping sensor."""
@property
def state(self) -> StateType:
"""Return value of sensor."""
if self.block is not None:
return self.attribute_value
return self.last_state
@property
def state_class(self) -> str | None:
"""State class of sensor."""
return self.description.state_class
@property
def unit_of_measurement(self) -> str | None:
"""Return unit of sensor."""
return cast(str, self._unit)
| 34.226361
| 84
| 0.669904
| 1,244
| 11,945
| 6.157556
| 0.152733
| 0.071802
| 0.048042
| 0.063055
| 0.566449
| 0.517102
| 0.487728
| 0.45248
| 0.45248
| 0.437728
| 0
| 0.008376
| 0.230389
| 11,945
| 348
| 85
| 34.324713
| 0.824867
| 0.028631
| 0
| 0.508251
| 0
| 0
| 0.060957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033003
| false
| 0
| 0.056106
| 0
| 0.132013
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07e700a1cf3d3463190722de4956e44165a923c7
| 1,969
|
py
|
Python
|
InvenTree/InvenTree/management/commands/rebuild_thumbnails.py
|
rocheparadox/InvenTree
|
76c1e936db78424e0d6953c4062eb32863e302c6
|
[
"MIT"
] | 656
|
2017-03-29T22:06:14.000Z
|
2022-03-30T11:23:52.000Z
|
InvenTree/InvenTree/management/commands/rebuild_thumbnails.py
|
rocheparadox/InvenTree
|
76c1e936db78424e0d6953c4062eb32863e302c6
|
[
"MIT"
] | 1,545
|
2017-04-10T23:26:04.000Z
|
2022-03-31T18:32:10.000Z
|
InvenTree/InvenTree/management/commands/rebuild_thumbnails.py
|
fablabbcn/InvenTree
|
1d7ea7716cc96c6ffd151c822b01cd1fb5dcfecd
|
[
"MIT"
] | 196
|
2017-03-28T03:06:21.000Z
|
2022-03-28T11:53:29.000Z
|
"""
Custom management command to rebuild thumbnail images
- May be required after importing a new dataset, for example
"""
import os
import logging
from PIL import UnidentifiedImageError
from django.core.management.base import BaseCommand
from django.conf import settings
from django.db.utils import OperationalError, ProgrammingError
from company.models import Company
from part.models import Part
logger = logging.getLogger("inventree-thumbnails")
class Command(BaseCommand):
"""
Rebuild all thumbnail images
"""
def rebuild_thumbnail(self, model):
"""
Rebuild the thumbnail specified by the "image" field of the provided model
"""
if not model.image:
return
img = model.image
url = img.thumbnail.name
loc = os.path.join(settings.MEDIA_ROOT, url)
if not os.path.exists(loc):
logger.info(f"Generating thumbnail image for '{img}'")
try:
model.image.render_variations(replace=False)
except FileNotFoundError:
logger.error(f"ERROR: Image file '{img}' is missing")
except UnidentifiedImageError:
logger.error(f"ERROR: Image file '{img}' is not a valid image")
def handle(self, *args, **kwargs):
logger.setLevel(logging.INFO)
logger.info("Rebuilding Part thumbnails")
for part in Part.objects.exclude(image=None):
try:
self.rebuild_thumbnail(part)
except (OperationalError, ProgrammingError):
logger.error("ERROR: Database read error.")
break
logger.info("Rebuilding Company thumbnails")
for company in Company.objects.exclude(image=None):
try:
self.rebuild_thumbnail(company)
except (OperationalError, ProgrammingError):
logger.error("ERROR: abase read error.")
break
| 27.732394
| 82
| 0.630269
| 214
| 1,969
| 5.775701
| 0.429907
| 0.05178
| 0.019417
| 0.027508
| 0.211974
| 0.211974
| 0.124595
| 0.124595
| 0
| 0
| 0
| 0
| 0.285932
| 1,969
| 70
| 83
| 28.128571
| 0.87909
| 0.111224
| 0
| 0.175
| 0
| 0
| 0.144366
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.2
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07e994b02286199ddba77a78c4751e4388520310
| 267
|
py
|
Python
|
examples/show_artist.py
|
jimcortez/spotipy_twisted
|
49ff2a4a5a5a9b3184b22adbe068eb91a38f3102
|
[
"MIT"
] | null | null | null |
examples/show_artist.py
|
jimcortez/spotipy_twisted
|
49ff2a4a5a5a9b3184b22adbe068eb91a38f3102
|
[
"MIT"
] | null | null | null |
examples/show_artist.py
|
jimcortez/spotipy_twisted
|
49ff2a4a5a5a9b3184b22adbe068eb91a38f3102
|
[
"MIT"
] | null | null | null |
# shows artist info for a URN or URL
import spotipy_twisted
import sys
import pprint
if len(sys.argv) > 1:
urn = sys.argv[1]
else:
urn = 'spotify:artist:3jOstUTkEu2JkjvRdBA5Gu'
sp = spotipy_twisted.Spotify()
artist = sp.artist(urn)
pprint.pprint(artist)
| 15.705882
| 49
| 0.726592
| 40
| 267
| 4.8
| 0.525
| 0.145833
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022624
| 0.172285
| 267
| 16
| 50
| 16.6875
| 0.846154
| 0.127341
| 0
| 0
| 0
| 0
| 0.160173
| 0.160173
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07ebcae81863c1e60bd65e743d7f7961451a23cf
| 2,895
|
py
|
Python
|
code_doc/views/author_views.py
|
coordt/code_doc
|
c2fac64ac3ad61952a2d9f036727166741f9aff9
|
[
"BSD-3-Clause"
] | null | null | null |
code_doc/views/author_views.py
|
coordt/code_doc
|
c2fac64ac3ad61952a2d9f036727166741f9aff9
|
[
"BSD-3-Clause"
] | null | null | null |
code_doc/views/author_views.py
|
coordt/code_doc
|
c2fac64ac3ad61952a2d9f036727166741f9aff9
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import render
from django.http import Http404
from django.views.generic.edit import UpdateView
from django.views.generic import ListView, View
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.utils.decorators import method_decorator
import logging
from ..models.projects import Project
from ..models.authors import Author
from ..forms import AuthorForm
from .permission_helpers import PermissionOnObjectViewMixin
# logger for this file
logger = logging.getLogger(__name__)
class AuthorListView(ListView):
"""A generic view of the authors in a list"""
paginate_by = 10
template_name = "code_doc/authors/author_list.html"
context_object_name = "authors"
model = Author
def detail_author(request, author_id):
try:
author = Author.objects.get(pk=author_id)
except Author.DoesNotExist:
raise Http404
project_list = Project.objects.filter(authors=author)
coauthor_list = (
Author.objects.filter(project__in=project_list).distinct().exclude(pk=author_id)
)
return render(
request,
"code_doc/authors/author_details.html",
{
"project_list": project_list,
"author": author,
"user": request.user,
"coauthor_list": coauthor_list,
},
)
class AuthorUpdateView(PermissionOnObjectViewMixin, UpdateView):
"""View for editing information about an Author
.. note:: in order to be able to edit an Author, the user should have the
'code_doc.author_edit' permission on the Author object.
"""
form_class = AuthorForm
model = Author
permissions_on_object = ("code_doc.author_edit",)
permissions_object_getter = "get_author_from_request"
template_name = "code_doc/authors/author_edit.html"
pk_url_kwarg = "author_id"
def get_author_from_request(self, request, *args, **kwargs):
# TODO check if needed
try:
return Author.objects.get(pk=kwargs["author_id"])
except Author.DoesNotExist:
logger.warning(
"[AuthorUpdateView] non existent Author with id %s", kwargs["author_id"]
)
return None
class MaintainerProfileView(View):
"""Manages the views associated to the maintainers"""
@method_decorator(login_required)
def get(self, request, maintainer_id):
try:
maintainer = User.objects.get(pk=maintainer_id)
except Project.DoesNotExist:
raise Http404
projects = Project.objects.filter(administrators=maintainer)
return render(
request,
"code_doc/maintainer_details.html",
{"projects": projects, "maintainer": maintainer},
)
@method_decorator(login_required)
def post(self, request):
pass
| 28.382353
| 88
| 0.680484
| 332
| 2,895
| 5.756024
| 0.337349
| 0.03663
| 0.021978
| 0.031397
| 0.126635
| 0.03349
| 0
| 0
| 0
| 0
| 0
| 0.004966
| 0.234888
| 2,895
| 101
| 89
| 28.663366
| 0.857788
| 0.109154
| 0
| 0.220588
| 0
| 0
| 0.122986
| 0.06169
| 0
| 0
| 0
| 0.009901
| 0
| 1
| 0.058824
| false
| 0.014706
| 0.176471
| 0
| 0.485294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07eea84b8f7990a608b685c1a60f3250095ce8a2
| 1,271
|
py
|
Python
|
mingpt/lr_decay.py
|
asigalov61/minGPT
|
b4f8d57aaf1bb5c64d480f8005b73d39b075ae4b
|
[
"MIT"
] | 18
|
2020-09-10T02:29:38.000Z
|
2022-03-16T03:17:35.000Z
|
mingpt/lr_decay.py
|
asigalov61/minGPT
|
b4f8d57aaf1bb5c64d480f8005b73d39b075ae4b
|
[
"MIT"
] | null | null | null |
mingpt/lr_decay.py
|
asigalov61/minGPT
|
b4f8d57aaf1bb5c64d480f8005b73d39b075ae4b
|
[
"MIT"
] | 7
|
2020-08-20T16:35:38.000Z
|
2022-01-10T21:57:49.000Z
|
import math
import pytorch_lightning as pl
class LearningRateDecayCallback(pl.Callback):
def __init__(self, learning_rate, warmup_tokens=375e6, final_tokens=260e9, lr_decay=True):
super().__init__()
self.learning_rate = learning_rate
self.tokens = 0
self.final_tokens = final_tokens
self.lr_decay = lr_decay
self.warmup_tokens = warmup_tokens
def on_train_batch_end(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
optimizer = trainer.optimizers[0]
_, y = batch
if self.lr_decay:
self.tokens += (y >= 0).sum() # number of tokens processed this step (i.e. label is not -100)
if self.tokens < self.warmup_tokens:
# linear warmup
lr_mult = float(self.tokens) / float(max(1, self.warmup_tokens))
else:
# cosine learning rate decay
progress = float(self.tokens - self.warmup_tokens) / float(
max(1, self.final_tokens - self.warmup_tokens))
lr_mult = max(0.1, 0.5 * (1.0 + math.cos(math.pi * progress)))
lr = self.learning_rate * lr_mult
for param_group in optimizer.param_groups:
param_group['lr'] = lr
| 41
| 106
| 0.609756
| 162
| 1,271
| 4.530864
| 0.395062
| 0.114441
| 0.108992
| 0.089918
| 0.114441
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024581
| 0.29583
| 1,271
| 31
| 107
| 41
| 0.795531
| 0.080252
| 0
| 0
| 0
| 0
| 0.001715
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07ef68929a367d76f0cb572e51ac36b254e815b0
| 40,518
|
py
|
Python
|
apprise/config/ConfigBase.py
|
calvinbui/apprise
|
a5510790baf5aa1d74afabab25ff57d6b2304d56
|
[
"MIT"
] | null | null | null |
apprise/config/ConfigBase.py
|
calvinbui/apprise
|
a5510790baf5aa1d74afabab25ff57d6b2304d56
|
[
"MIT"
] | null | null | null |
apprise/config/ConfigBase.py
|
calvinbui/apprise
|
a5510790baf5aa1d74afabab25ff57d6b2304d56
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Chris Caron <lead2gold@gmail.com>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import re
import six
import yaml
import time
from .. import plugins
from ..AppriseAsset import AppriseAsset
from ..URLBase import URLBase
from ..common import ConfigFormat
from ..common import CONFIG_FORMATS
from ..common import ContentIncludeMode
from ..utils import GET_SCHEMA_RE
from ..utils import parse_list
from ..utils import parse_bool
from ..utils import parse_urls
from . import SCHEMA_MAP
# Test whether token is valid or not
VALID_TOKEN = re.compile(
r'(?P<token>[a-z0-9][a-z0-9_]+)', re.I)
class ConfigBase(URLBase):
"""
This is the base class for all supported configuration sources
"""
# The Default Encoding to use if not otherwise detected
encoding = 'utf-8'
# The default expected configuration format unless otherwise
# detected by the sub-modules
default_config_format = ConfigFormat.TEXT
# This is only set if the user overrides the config format on the URL
# this should always initialize itself as None
config_format = None
# Don't read any more of this amount of data into memory as there is no
# reason we should be reading in more. This is more of a safe guard then
# anything else. 128KB (131072B)
max_buffer_size = 131072
# By default all configuration is not includable using the 'include'
# line found in configuration files.
allow_cross_includes = ContentIncludeMode.NEVER
# the config path manages the handling of relative include
config_path = os.getcwd()
def __init__(self, cache=True, recursion=0, insecure_includes=False,
**kwargs):
"""
Initialize some general logging and common server arguments that will
keep things consistent when working with the configurations that
inherit this class.
By default we cache our responses so that subsiquent calls does not
cause the content to be retrieved again. For local file references
this makes no difference at all. But for remote content, this does
mean more then one call can be made to retrieve the (same) data. This
method can be somewhat inefficient if disabled. Only disable caching
if you understand the consequences.
You can alternatively set the cache value to an int identifying the
number of seconds the previously retrieved can exist for before it
should be considered expired.
recursion defines how deep we recursively handle entries that use the
`include` keyword. This keyword requires us to fetch more configuration
from another source and add it to our existing compilation. If the
file we remotely retrieve also has an `include` reference, we will only
advance through it if recursion is set to 2 deep. If set to zero
it is off. There is no limit to how high you set this value. It would
be recommended to keep it low if you do intend to use it.
insecure_include by default are disabled. When set to True, all
Apprise Config files marked to be in STRICT mode are treated as being
in ALWAYS mode.
Take a file:// based configuration for example, only a file:// based
configuration can include another file:// based one. because it is set
to STRICT mode. If an http:// based configuration file attempted to
include a file:// one it woul fail. However this include would be
possible if insecure_includes is set to True.
There are cases where a self hosting apprise developer may wish to load
configuration from memory (in a string format) that contains 'include'
entries (even file:// based ones). In these circumstances if you want
these 'include' entries to be honored, this value must be set to True.
"""
super(ConfigBase, self).__init__(**kwargs)
# Tracks the time the content was last retrieved on. This place a role
# for cases where we are not caching our response and are required to
# re-retrieve our settings.
self._cached_time = None
# Tracks previously loaded content for speed
self._cached_servers = None
# Initialize our recursion value
self.recursion = recursion
# Initialize our insecure_includes flag
self.insecure_includes = insecure_includes
if 'encoding' in kwargs:
# Store the encoding
self.encoding = kwargs.get('encoding')
if 'format' in kwargs \
and isinstance(kwargs['format'], six.string_types):
# Store the enforced config format
self.config_format = kwargs.get('format').lower()
if self.config_format not in CONFIG_FORMATS:
# Simple error checking
err = 'An invalid config format ({}) was specified.'.format(
self.config_format)
self.logger.warning(err)
raise TypeError(err)
# Set our cache flag; it can be True or a (positive) integer
try:
self.cache = cache if isinstance(cache, bool) else int(cache)
if self.cache < 0:
err = 'A negative cache value ({}) was specified.'.format(
cache)
self.logger.warning(err)
raise TypeError(err)
except (ValueError, TypeError):
err = 'An invalid cache value ({}) was specified.'.format(cache)
self.logger.warning(err)
raise TypeError(err)
return
def servers(self, asset=None, **kwargs):
"""
Performs reads loaded configuration and returns all of the services
that could be parsed and loaded.
"""
if not self.expired():
# We already have cached results to return; use them
return self._cached_servers
# Our cached response object
self._cached_servers = list()
# read() causes the child class to do whatever it takes for the
# config plugin to load the data source and return unparsed content
# None is returned if there was an error or simply no data
content = self.read(**kwargs)
if not isinstance(content, six.string_types):
# Set the time our content was cached at
self._cached_time = time.time()
# Nothing more to do; return our empty cache list
return self._cached_servers
# Our Configuration format uses a default if one wasn't one detected
# or enfored.
config_format = \
self.default_config_format \
if self.config_format is None else self.config_format
# Dynamically load our parse_ function based on our config format
fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format))
# Initialize our asset object
asset = asset if isinstance(asset, AppriseAsset) else self.asset
# Execute our config parse function which always returns a tuple
# of our servers and our configuration
servers, configs = fn(content=content, asset=asset)
self._cached_servers.extend(servers)
# Configuration files were detected; recursively populate them
# If we have been configured to do so
for url in configs:
if self.recursion > 0:
# Attempt to acquire the schema at the very least to allow
# our configuration based urls.
schema = GET_SCHEMA_RE.match(url)
if schema is None:
# Plan B is to assume we're dealing with a file
schema = 'file'
if not os.path.isabs(url):
# We're dealing with a relative path; prepend
# our current config path
url = os.path.join(self.config_path, url)
url = '{}://{}'.format(schema, URLBase.quote(url))
else:
# Ensure our schema is always in lower case
schema = schema.group('schema').lower()
# Some basic validation
if schema not in SCHEMA_MAP:
ConfigBase.logger.warning(
'Unsupported include schema {}.'.format(schema))
continue
# Parse our url details of the server object as dictionary
# containing all of the information parsed from our URL
results = SCHEMA_MAP[schema].parse_url(url)
if not results:
# Failed to parse the server URL
self.logger.warning(
'Unparseable include URL {}'.format(url))
continue
# Handle cross inclusion based on allow_cross_includes rules
if (SCHEMA_MAP[schema].allow_cross_includes ==
ContentIncludeMode.STRICT
and schema not in self.schemas()
and not self.insecure_includes) or \
SCHEMA_MAP[schema].allow_cross_includes == \
ContentIncludeMode.NEVER:
# Prevent the loading if insecure base protocols
ConfigBase.logger.warning(
'Including {}:// based configuration is prohibited. '
'Ignoring URL {}'.format(schema, url))
continue
# Prepare our Asset Object
results['asset'] = asset
# No cache is required because we're just lumping this in
# and associating it with the cache value we've already
# declared (prior to our recursion)
results['cache'] = False
# Recursion can never be parsed from the URL; we decrement
# it one level
results['recursion'] = self.recursion - 1
# Insecure Includes flag can never be parsed from the URL
results['insecure_includes'] = self.insecure_includes
try:
# Attempt to create an instance of our plugin using the
# parsed URL information
cfg_plugin = SCHEMA_MAP[results['schema']](**results)
except Exception as e:
# the arguments are invalid or can not be used.
self.logger.warning(
'Could not load include URL: {}'.format(url))
self.logger.debug('Loading Exception: {}'.format(str(e)))
continue
# if we reach here, we can now add this servers found
# in this configuration file to our list
self._cached_servers.extend(
cfg_plugin.servers(asset=asset))
# We no longer need our configuration object
del cfg_plugin
else:
self.logger.debug(
'Recursion limit reached; ignoring Include URL: %s' % url)
if self._cached_servers:
self.logger.info('Loaded {} entries from {}'.format(
len(self._cached_servers), self.url()))
else:
self.logger.warning(
'Failed to load Apprise configuration from {}'.format(
self.url()))
# Set the time our content was cached at
self._cached_time = time.time()
return self._cached_servers
def read(self):
"""
This object should be implimented by the child classes
"""
return None
def expired(self):
"""
Simply returns True if the configuration should be considered
as expired or False if content should be retrieved.
"""
if isinstance(self._cached_servers, list) and self.cache:
# We have enough reason to look further into our cached content
# and verify it has not expired.
if self.cache is True:
# we have not expired, return False
return False
# Verify our cache time to determine whether we will get our
# content again.
age_in_sec = time.time() - self._cached_time
if age_in_sec <= self.cache:
# We have not expired; return False
return False
# If we reach here our configuration should be considered
# missing and/or expired.
return True
@staticmethod
def parse_url(url, verify_host=True):
"""Parses the URL and returns it broken apart into a dictionary.
This is very specific and customized for Apprise.
Args:
url (str): The URL you want to fully parse.
verify_host (:obj:`bool`, optional): a flag kept with the parsed
URL which some child classes will later use to verify SSL
keys (if SSL transactions take place). Unless under very
specific circumstances, it is strongly recomended that
you leave this default value set to True.
Returns:
A dictionary is returned containing the URL fully parsed if
successful, otherwise None is returned.
"""
results = URLBase.parse_url(url, verify_host=verify_host)
if not results:
# We're done; we failed to parse our url
return results
# Allow overriding the default config format
if 'format' in results['qsd']:
results['format'] = results['qsd'].get('format')
if results['format'] not in CONFIG_FORMATS:
URLBase.logger.warning(
'Unsupported format specified {}'.format(
results['format']))
del results['format']
# Defines the encoding of the payload
if 'encoding' in results['qsd']:
results['encoding'] = results['qsd'].get('encoding')
# Our cache value
if 'cache' in results['qsd']:
# First try to get it's integer value
try:
results['cache'] = int(results['qsd']['cache'])
except (ValueError, TypeError):
# No problem, it just isn't an integer; now treat it as a bool
# instead:
results['cache'] = parse_bool(results['qsd']['cache'])
return results
@staticmethod
def detect_config_format(content, **kwargs):
"""
Takes the specified content and attempts to detect the format type
The function returns the actual format type if detected, otherwise
it returns None
"""
# Detect Format Logic:
# - A pound/hashtag (#) is alawys a comment character so we skip over
# lines matched here.
# - Detection begins on the first non-comment and non blank line
# matched.
# - If we find a string followed by a colon, we know we're dealing
# with a YAML file.
# - If we find a string that starts with a URL, or our tag
# definitions (accepting commas) followed by an equal sign we know
# we're dealing with a TEXT format.
# Define what a valid line should look like
valid_line_re = re.compile(
r'^\s*(?P<line>([;#]+(?P<comment>.*))|'
r'(?P<text>((?P<tag>[ \t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|'
r'((?P<yaml>[a-z0-9]+):.*))?$', re.I)
try:
# split our content up to read line by line
content = re.split(r'\r*\n', content)
except TypeError:
# content was not expected string type
ConfigBase.logger.error(
'Invalid Apprise configuration specified.')
return None
# By default set our return value to None since we don't know
# what the format is yet
config_format = None
# iterate over each line of the file to attempt to detect it
# stop the moment a the type has been determined
for line, entry in enumerate(content, start=1):
result = valid_line_re.match(entry)
if not result:
# Invalid syntax
ConfigBase.logger.error(
'Undetectable Apprise configuration found '
'based on line {}.'.format(line))
# Take an early exit
return None
# Attempt to detect configuration
if result.group('yaml'):
config_format = ConfigFormat.YAML
ConfigBase.logger.debug(
'Detected YAML configuration '
'based on line {}.'.format(line))
break
elif result.group('text'):
config_format = ConfigFormat.TEXT
ConfigBase.logger.debug(
'Detected TEXT configuration '
'based on line {}.'.format(line))
break
# If we reach here, we have a comment entry
# Adjust default format to TEXT
config_format = ConfigFormat.TEXT
return config_format
@staticmethod
def config_parse(content, asset=None, config_format=None, **kwargs):
"""
Takes the specified config content and loads it based on the specified
config_format. If a format isn't specified, then it is auto detected.
"""
if config_format is None:
# Detect the format
config_format = ConfigBase.detect_config_format(content)
if not config_format:
# We couldn't detect configuration
ConfigBase.logger.error('Could not detect configuration')
return (list(), list())
if config_format not in CONFIG_FORMATS:
# Invalid configuration type specified
ConfigBase.logger.error(
'An invalid configuration format ({}) was specified'.format(
config_format))
return (list(), list())
# Dynamically load our parse_ function based on our config format
fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format))
# Execute our config parse function which always returns a list
return fn(content=content, asset=asset)
@staticmethod
def config_parse_text(content, asset=None):
"""
Parse the specified content as though it were a simple text file only
containing a list of URLs.
Return a tuple that looks like (servers, configs) where:
- servers contains a list of loaded notification plugins
- configs contains a list of additional configuration files
referenced.
You may also optionally associate an asset with the notification.
The file syntax is:
#
# pound/hashtag allow for line comments
#
# One or more tags can be idenified using comma's (,) to separate
# them.
<Tag(s)>=<URL>
# Or you can use this format (no tags associated)
<URL>
# you can also use the keyword 'include' and identify a
# configuration location (like this file) which will be included
# as additional configuration entries when loaded.
include <ConfigURL>
"""
# A list of loaded Notification Services
servers = list()
# A list of additional configuration files referenced using
# the include keyword
configs = list()
# Define what a valid line should look like
valid_line_re = re.compile(
r'^\s*(?P<line>([;#]+(?P<comment>.*))|'
r'(\s*(?P<tags>[^=]+)=|=)?\s*'
r'(?P<url>[a-z0-9]{2,9}://.*)|'
r'include\s+(?P<config>.+))?\s*$', re.I)
try:
# split our content up to read line by line
content = re.split(r'\r*\n', content)
except TypeError:
# content was not expected string type
ConfigBase.logger.error(
'Invalid Apprise TEXT based configuration specified.')
return (list(), list())
for line, entry in enumerate(content, start=1):
result = valid_line_re.match(entry)
if not result:
# Invalid syntax
ConfigBase.logger.error(
'Invalid Apprise TEXT configuration format found '
'{} on line {}.'.format(entry, line))
# Assume this is a file we shouldn't be parsing. It's owner
# can read the error printed to screen and take action
# otherwise.
return (list(), list())
url, config = result.group('url'), result.group('config')
if not (url or config):
# Comment/empty line; do nothing
continue
if config:
ConfigBase.logger.debug('Include URL: {}'.format(config))
# Store our include line
configs.append(config.strip())
continue
# Acquire our url tokens
results = plugins.url_to_dict(url)
if results is None:
# Failed to parse the server URL
ConfigBase.logger.warning(
'Unparseable URL {} on line {}.'.format(url, line))
continue
# Build a list of tags to associate with the newly added
# notifications if any were set
results['tag'] = set(parse_list(result.group('tags')))
# Prepare our Asset Object
results['asset'] = \
asset if isinstance(asset, AppriseAsset) else AppriseAsset()
try:
# Attempt to create an instance of our plugin using the
# parsed URL information
plugin = plugins.SCHEMA_MAP[results['schema']](**results)
# Create log entry of loaded URL
ConfigBase.logger.debug('Loaded URL: {}'.format(plugin.url()))
except Exception as e:
# the arguments are invalid or can not be used.
ConfigBase.logger.warning(
'Could not load URL {} on line {}.'.format(
url, line))
ConfigBase.logger.debug('Loading Exception: %s' % str(e))
continue
# if we reach here, we successfully loaded our data
servers.append(plugin)
# Return what was loaded
return (servers, configs)
@staticmethod
def config_parse_yaml(content, asset=None):
"""
Parse the specified content as though it were a yaml file
specifically formatted for Apprise.
Return a tuple that looks like (servers, configs) where:
- servers contains a list of loaded notification plugins
- configs contains a list of additional configuration files
referenced.
You may optionally associate an asset with the notification.
"""
# A list of loaded Notification Services
servers = list()
# A list of additional configuration files referenced using
# the include keyword
configs = list()
try:
# Load our data (safely)
result = yaml.load(content, Loader=yaml.SafeLoader)
except (AttributeError,
yaml.parser.ParserError,
yaml.error.MarkedYAMLError) as e:
# Invalid content
ConfigBase.logger.error(
'Invalid Apprise YAML data specified.')
ConfigBase.logger.debug(
'YAML Exception:{}{}'.format(os.linesep, e))
return (list(), list())
if not isinstance(result, dict):
# Invalid content
ConfigBase.logger.error(
'Invalid Apprise YAML based configuration specified.')
return (list(), list())
# YAML Version
version = result.get('version', 1)
if version != 1:
# Invalid syntax
ConfigBase.logger.error(
'Invalid Apprise YAML version specified {}.'.format(version))
return (list(), list())
#
# global asset object
#
asset = asset if isinstance(asset, AppriseAsset) else AppriseAsset()
tokens = result.get('asset', None)
if tokens and isinstance(tokens, dict):
for k, v in tokens.items():
if k.startswith('_') or k.endswith('_'):
# Entries are considered reserved if they start or end
# with an underscore
ConfigBase.logger.warning(
'Ignored asset key "{}".'.format(k))
continue
if not (hasattr(asset, k) and
isinstance(getattr(asset, k),
(bool, six.string_types))):
# We can't set a function or non-string set value
ConfigBase.logger.warning(
'Invalid asset key "{}".'.format(k))
continue
if v is None:
# Convert to an empty string
v = ''
if (isinstance(v, (bool, six.string_types))
and isinstance(getattr(asset, k), bool)):
# If the object in the Asset is a boolean, then
# we want to convert the specified string to
# match that.
setattr(asset, k, parse_bool(v))
elif isinstance(v, six.string_types):
# Set our asset object with the new value
setattr(asset, k, v.strip())
else:
# we must set strings with a string
ConfigBase.logger.warning(
'Invalid asset value to "{}".'.format(k))
continue
#
# global tag root directive
#
global_tags = set()
tags = result.get('tag', None)
if tags and isinstance(tags, (list, tuple, six.string_types)):
# Store any preset tags
global_tags = set(parse_list(tags))
#
# include root directive
#
includes = result.get('include', None)
if isinstance(includes, six.string_types):
# Support a single inline string or multiple ones separated by a
# comma and/or space
includes = parse_urls(includes)
elif not isinstance(includes, (list, tuple)):
# Not a problem; we simply have no includes
includes = list()
# Iterate over each config URL
for no, url in enumerate(includes):
if isinstance(url, six.string_types):
# Support a single inline string or multiple ones separated by
# a comma and/or space
configs.extend(parse_urls(url))
elif isinstance(url, dict):
# Store the url and ignore arguments associated
configs.extend(u for u in url.keys())
#
# urls root directive
#
urls = result.get('urls', None)
if not isinstance(urls, (list, tuple)):
# Not a problem; we simply have no urls
urls = list()
# Iterate over each URL
for no, url in enumerate(urls):
# Our results object is what we use to instantiate our object if
# we can. Reset it to None on each iteration
results = list()
if isinstance(url, six.string_types):
# We're just a simple URL string...
schema = GET_SCHEMA_RE.match(url)
if schema is None:
# Log invalid entries so that maintainer of config
# config file at least has something to take action
# with.
ConfigBase.logger.warning(
'Invalid URL {}, entry #{}'.format(url, no + 1))
continue
# We found a valid schema worthy of tracking; store it's
# details:
_results = plugins.url_to_dict(url)
if _results is None:
ConfigBase.logger.warning(
'Unparseable URL {}, entry #{}'.format(
url, no + 1))
continue
# add our results to our global set
results.append(_results)
elif isinstance(url, dict):
# We are a url string with additional unescaped options. In
# this case we want to iterate over all of our options so we
# can at least tell the end user what entries were ignored
# due to errors
if six.PY2:
it = url.iteritems()
else: # six.PY3
it = iter(url.items())
# Track the URL to-load
_url = None
# Track last acquired schema
schema = None
for key, tokens in it:
# Test our schema
_schema = GET_SCHEMA_RE.match(key)
if _schema is None:
# Log invalid entries so that maintainer of config
# config file at least has something to take action
# with.
ConfigBase.logger.warning(
'Ignored entry {} found under urls, entry #{}'
.format(key, no + 1))
continue
# Store our schema
schema = _schema.group('schema').lower()
# Store our URL and Schema Regex
_url = key
if _url is None:
# the loop above failed to match anything
ConfigBase.logger.warning(
'Unsupported URL, entry #{}'.format(no + 1))
continue
_results = plugins.url_to_dict(_url)
if _results is None:
# Setup dictionary
_results = {
# Minimum requirements
'schema': schema,
}
if isinstance(tokens, (list, tuple, set)):
# populate and/or override any results populated by
# parse_url()
for entries in tokens:
# Copy ourselves a template of our parsed URL as a base
# to work with
r = _results.copy()
# We are a url string with additional unescaped options
if isinstance(entries, dict):
if six.PY2:
_url, tokens = next(url.iteritems())
else: # six.PY3
_url, tokens = next(iter(url.items()))
# Tags you just can't over-ride
if 'schema' in entries:
del entries['schema']
# support our special tokens (if they're present)
if schema in plugins.SCHEMA_MAP:
entries = ConfigBase.__extract_special_tokens(
schema, entries)
# Extend our dictionary with our new entries
r.update(entries)
# add our results to our global set
results.append(r)
elif isinstance(tokens, dict):
# support our special tokens (if they're present)
if schema in plugins.SCHEMA_MAP:
tokens = ConfigBase.__extract_special_tokens(
schema, tokens)
# Copy ourselves a template of our parsed URL as a base to
# work with
r = _results.copy()
# add our result set
r.update(tokens)
# add our results to our global set
results.append(r)
else:
# add our results to our global set
results.append(_results)
else:
# Unsupported
ConfigBase.logger.warning(
'Unsupported Apprise YAML entry #{}'.format(no + 1))
continue
# Track our entries
entry = 0
while len(results):
# Increment our entry count
entry += 1
# Grab our first item
_results = results.pop(0)
# tag is a special keyword that is managed by Apprise object.
# The below ensures our tags are set correctly
if 'tag' in _results:
# Tidy our list up
_results['tag'] = \
set(parse_list(_results['tag'])) | global_tags
else:
# Just use the global settings
_results['tag'] = global_tags
for key in list(_results.keys()):
# Strip out any tokens we know that we can't accept and
# warn the user
match = VALID_TOKEN.match(key)
if not match:
ConfigBase.logger.warning(
'Ignoring invalid token ({}) found in YAML '
'configuration entry #{}, item #{}'
.format(key, no + 1, entry))
del _results[key]
ConfigBase.logger.trace(
'URL #{}: {} unpacked as:{}{}'
.format(no + 1, url, os.linesep, os.linesep.join(
['{}="{}"'.format(k, a)
for k, a in _results.items()])))
# Prepare our Asset Object
_results['asset'] = asset
try:
# Attempt to create an instance of our plugin using the
# parsed URL information
plugin = plugins.SCHEMA_MAP[_results['schema']](**_results)
# Create log entry of loaded URL
ConfigBase.logger.debug(
'Loaded URL: {}'.format(plugin.url()))
except Exception as e:
# the arguments are invalid or can not be used.
ConfigBase.logger.warning(
'Could not load Apprise YAML configuration '
'entry #{}, item #{}'
.format(no + 1, entry))
ConfigBase.logger.debug('Loading Exception: %s' % str(e))
continue
# if we reach here, we successfully loaded our data
servers.append(plugin)
return (servers, configs)
def pop(self, index=-1):
"""
Removes an indexed Notification Service from the stack and returns it.
By default, the last element of the list is removed.
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
# Pop the element off of the stack
return self._cached_servers.pop(index)
@staticmethod
def __extract_special_tokens(schema, tokens):
"""
This function takes a list of tokens and updates them to no longer
include any special tokens such as +,-, and :
- schema must be a valid schema of a supported plugin type
- tokens must be a dictionary containing the yaml entries parsed.
The idea here is we can post process a set of tokens provided in
a YAML file where the user provided some of the special keywords.
We effectivley look up what these keywords map to their appropriate
value they're expected
"""
# Create a copy of our dictionary
tokens = tokens.copy()
for kw, meta in plugins.SCHEMA_MAP[schema]\
.template_kwargs.items():
# Determine our prefix:
prefix = meta.get('prefix', '+')
# Detect any matches
matches = \
{k[1:]: str(v) for k, v in tokens.items()
if k.startswith(prefix)}
if not matches:
# we're done with this entry
continue
if not isinstance(tokens.get(kw, None), dict):
# Invalid; correct it
tokens[kw] = dict()
# strip out processed tokens
tokens = {k: v for k, v in tokens.items()
if not k.startswith(prefix)}
# Update our entries
tokens[kw].update(matches)
# Return our tokens
return tokens
def __getitem__(self, index):
"""
Returns the indexed server entry associated with the loaded
notification servers
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return self._cached_servers[index]
def __iter__(self):
"""
Returns an iterator to our server list
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return iter(self._cached_servers)
def __len__(self):
"""
Returns the total number of servers loaded
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return len(self._cached_servers)
def __bool__(self):
"""
Allows the Apprise object to be wrapped in an Python 3.x based 'if
statement'. True is returned if our content was downloaded correctly.
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return True if self._cached_servers else False
def __nonzero__(self):
"""
Allows the Apprise object to be wrapped in an Python 2.x based 'if
statement'. True is returned if our content was downloaded correctly.
"""
if not isinstance(self._cached_servers, list):
# Generate ourselves a list of content we can pull from
self.servers()
return True if self._cached_servers else False
| 38.116651
| 79
| 0.550768
| 4,575
| 40,518
| 4.820109
| 0.157158
| 0.023218
| 0.01696
| 0.007618
| 0.33294
| 0.288455
| 0.267504
| 0.239162
| 0.225648
| 0.206421
| 0
| 0.002547
| 0.37993
| 40,518
| 1,062
| 80
| 38.152542
| 0.875179
| 0.376944
| 0
| 0.371739
| 0
| 0.002174
| 0.090078
| 0.010273
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034783
| false
| 0
| 0.034783
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07f0b2b68417d129704d340d100e569555824ebc
| 977
|
py
|
Python
|
ffmpeg_util.py
|
manuel-fischer/ScrollRec
|
ec5662d3f61630f939613481290a166133d23a20
|
[
"MIT"
] | null | null | null |
ffmpeg_util.py
|
manuel-fischer/ScrollRec
|
ec5662d3f61630f939613481290a166133d23a20
|
[
"MIT"
] | null | null | null |
ffmpeg_util.py
|
manuel-fischer/ScrollRec
|
ec5662d3f61630f939613481290a166133d23a20
|
[
"MIT"
] | null | null | null |
import sys
import subprocess
from subprocess import Popen, PIPE
AV_LOG_QUIET = "quiet"
AV_LOG_PANIC = "panic"
AV_LOG_FATAL = "fatal"
AV_LOG_ERROR = "error"
AV_LOG_WARNING = "warning"
AV_LOG_INFO = "info"
AV_LOG_VERBOSE = "verbose"
AV_LOG_DEBUG = "debug"
ffmpeg_loglevel = AV_LOG_ERROR
IS_WIN32 = 'win32' in str(sys.platform).lower()
SUBPROCESS_ARGS = {}
if IS_WIN32:
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
SUBPROCESS_ARGS['startupinfo'] = startupinfo
def popen_ffmpeg(inner_args):
cmd = [
'ffmpeg',
*inner_args,
# logging
'-loglevel', ffmpeg_loglevel,
'-hide_banner',
]
process = Popen(cmd, stdout=PIPE, stderr=PIPE, **SUBPROCESS_ARGS)
stdout, stderr = process.communicate()
print(stderr.decode(), end='', file=sys.stderr)
return stdout, stderr
| 24.425
| 89
| 0.698055
| 118
| 977
| 5.5
| 0.423729
| 0.069337
| 0.030817
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007624
| 0.194473
| 977
| 40
| 90
| 24.425
| 0.817027
| 0.007165
| 0
| 0
| 0
| 0
| 0.088751
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.1
| 0
| 0.166667
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07f1195aa55500ccfbdb1eb16ce8a5e553bfeb5d
| 11,381
|
py
|
Python
|
analysis_tools/PYTHON_RICARDO/output_ingress_egress/scripts/uniform_grid.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
analysis_tools/PYTHON_RICARDO/output_ingress_egress/scripts/uniform_grid.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
analysis_tools/PYTHON_RICARDO/output_ingress_egress/scripts/uniform_grid.py
|
lefevre-fraser/openmeta-mms
|
08f3115e76498df1f8d70641d71f5c52cab4ce5f
|
[
"MIT"
] | null | null | null |
""" Represent a triangulated surface using a 3D boolean grid"""
import logging
import numpy as np
from rpl.tools.ray_tracing.bsp_tree_poly import BSP_Element
from rpl.tools.geometry import geom_utils
import data_io
class BSP_Grid(object):
def __init__(self, node_array, tris, allocate_step=100000):
"""
Store the triangles with an enumeration so that even when they are subdivided their
identity is not lost.
"""
tri_nums = np.arange(len(tris), dtype=np.int32).reshape((len(tris), 1))
minus_ones = -np.ones((len(tris), 6), dtype=np.int32)
self.tris = np.hstack((tris, minus_ones, tri_nums))
self.allocate_step = allocate_step
self.node_array = node_array # Reference to the full list of nodes
self._resize()
self.next_free = len(node_array)
self.split_cache = np.zeros(len(self.tris), dtype=np.int32)
def _resize(self):
"""
Increase node array size by the allocate_step amount.
"""
self.array_size = len(self.node_array) + self.allocate_step
self.node_array = np.concatenate((self.node_array, np.zeros((self.allocate_step, 3))))
def add_node(self, node):
"""
Adds a new node to the end of the node array (expanding if required). Returns the index of
the newly added node.
"""
if self.next_free == self.array_size:
self._resize()
self.node_array[self.next_free] = node
self.next_free += 1
return self.next_free - 1
def prepare_add(self, num_add_nodes):
"""
Make sure that ``num_add_nodes`` can be added later without needing a resize.
Useful if adding nodes from within cython where resizing is tricky.
"""
if self.next_free + num_add_nodes >= self.array_size:
self._resize()
return self.next_free
def make_grid(veh_surfs, settings):
"""
Make coordinates of voxelated grid based on overall list of vehicle surfaces
"""
## Find overall bounding box
x_min, x_max = 1e30, -1e30
y_min, y_max = 1e30, -1e30
z_min, z_max = 1e30, -1e30
for key, veh_surf in veh_surfs.items():
x_min, x_max = min(x_min, np.min(veh_surf["x"])), max(x_max, np.max(veh_surf["x"]))
y_min, y_max = min(y_min, np.min(veh_surf["y"])), max(y_max, np.max(veh_surf["y"]))
z_min, z_max = min(z_min, np.min(veh_surf["z"])), max(z_max, np.max(veh_surf["z"]))
x_min, x_max = x_min - settings["voxel_size"], x_max + settings["voxel_size"]
y_min, y_max = y_min - settings["voxel_size"], y_max + settings["voxel_size"]
z_min, z_max = z_min - settings["voxel_size"], z_max + settings["voxel_size"]
###########################################
# Create the uniformly spaced grid points
x_grid = np.arange(x_min, x_max + settings["voxel_size"], settings["voxel_size"])
y_grid = np.arange(y_min, y_max + settings["voxel_size"], settings["voxel_size"])
z_grid = np.arange(z_min, z_max + settings["voxel_size"], settings["voxel_size"])
return x_grid, y_grid, z_grid
def convert_geom(veh_surf, tr_mat):
"""
Rotate nodes using provided transformation matrix; convert xyz node dict to nodes array
"""
veh_surf["nodes"] = np.vstack((veh_surf["x"], veh_surf["y"], veh_surf["z"])).T
veh_surf['nodes'] = np.dot(veh_surf['nodes'], tr_mat[:3, :3])
veh_surf["x"] = veh_surf['nodes'][:, 0]
veh_surf["y"] = veh_surf['nodes'][:, 1]
veh_surf["z"] = veh_surf['nodes'][:, 2]
return veh_surf
def find_occupied_voxels(surf, surf_mask, voxel_data):
"""
Voxels with any triangle from ``surf`` are considered occupied and or'ed with ``group_mask``.
If the supplied ``occupied_voxels`` is None a voxel array is created and returned.
"""
nodes = surf["nodes"]
tris = surf["tris"]
x_pts, y_pts, z_pts = [voxel_data[k] for k in ("x_grid", "y_grid", "z_grid")]
vox_size = voxel_data["vox_size"]
## Find the local extents of this part
min_x, max_x = np.min(surf["x"]) - vox_size, np.max(surf["x"]) + vox_size
min_y, max_y = np.min(surf["y"]) - vox_size, np.max(surf["y"]) + vox_size
min_z, max_z = np.min(surf["z"]) - vox_size, np.max(surf["z"]) + vox_size
b_tree = BSP_Grid(nodes, tris)
# Create BSP tree elements- we're not using a tree, but we are using some of the functions
b_x_root = BSP_Element(b_tree.tris, b_tree)
size_i, size_j, size_k = len(x_pts), len(y_pts), len(z_pts)
## Create the occupied voxels if none were supplied
if voxel_data["value"] is None:
voxel_data["value"] = np.zeros((size_i - 1, size_j - 1, size_k - 1), dtype=np.uint32)
occupied_voxels = voxel_data["value"]
## The [1:] is because to make n voxels in a given direction we need n-1 splits
for i, x_pos in enumerate(x_pts[1:]):
if x_pos < min_x: continue
if x_pos > max_x: break
b_above_x, b_below_x = b_x_root.split_at(0, x_pos)
b_y_root = b_below_x
for j, y_pos in enumerate(y_pts[1:]):
if b_y_root is None:
break
if y_pos < min_y: continue
if y_pos > max_y: break
b_above_y, b_below_y = b_y_root.split_at(1, y_pos)
b_z_root = b_below_y
for k, z_pos in enumerate(z_pts[1:]):
if b_z_root is None:
break
if z_pos < min_z: continue
if z_pos > max_z: break
b_above_z, b_below_z = b_z_root.split_at(2, z_pos)
if not (b_below_z and (len(b_below_z.tris) == 0)):
## There is at least part of triangle here so mark as occupied
occupied_voxels[i, j, k] |= surf_mask
b_z_root = b_above_z
b_y_root = b_above_y
b_x_root = b_above_x
return voxel_data
#############
# Main code
def main(vehicle_comp_coords, tr_mat, voxel_masks, settings):
"""
Perform voxelization for all vehicle geometries in a list of parts. Combine on a uniform grid.
"""
for key, veh_surf in vehicle_comp_coords.items():
# Convert coordinates and find overall best bounding box
veh_surf = convert_geom(veh_surf, tr_mat)
x_grid, y_grid, z_grid = make_grid(vehicle_comp_coords, settings)
voxel_data = {"x_grid": x_grid,
"y_grid": y_grid,
"z_grid": z_grid,
"vox_size": settings["voxel_size"],
"csys_trans": tr_mat,
"value": None}
for key, veh_surf in vehicle_comp_coords.items():
# Build up the voxel_data
logging.debug("Sampling component: {}".format(key))
## Default mask is 1 for anything not in an identified set
surf_mask = 1
for mask, geo_set in voxel_masks.items():
if veh_surf['part_class'] in geo_set:
surf_mask |= mask
voxel_data = find_occupied_voxels(veh_surf, surf_mask, voxel_data)
return voxel_data
if __name__ == "__main__":
from rpl.tools.api import test_bench_api as tb_api
SETTINGS = tb_api.load_settings("settings.js")
DOORS = {'Hatch_Assembly_Rear_Ramp', 'Hatch_Assembly_Personnel_Door'}
HATCHES = {'Hatch_Assembly_Driver_Commander', 'Hatch_Assembly_Cargo'}
HULLS = {"Hull_Assembly_Parametric", 'Hull_Assembly_Example_With_Connector'}
MANIKINS = {"Manikin"}
# Special labels applied to specific types of voxels
VOXEL_LABELS = {2: HULLS,
4: DOORS,
8: HATCHES,
16: MANIKINS}
vehicle_surfs = tb_api.load_geometry(tb_api.get_all_geom_set() - MANIKINS, single_file=False)
# Modify node coords so object aligns with cartesian axes of occ voxel grid, +z=up
# Vector to rotate around is cross product of current z axis and sfc normal
veh_up = np.array([0., 1., 0.])
rot_around = np.cross(veh_up, np.array([0, 0, 1]))
rot_ang = -np.arccos(veh_up[2])
tr_mat = geom_utils.rotation_about_vector(rot_around, rot_ang)
# voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS)
vox_veh_folder = r"voxelated_models/vehicles/{}/{}".format(SETTINGS["run_id"],
SETTINGS["voxel_size"])
vox_veh_file = "voxels_{}_vox{}_hacked".format(SETTINGS["run_id"],
SETTINGS["voxel_size"])
try:
voxel_data = data_io.load_array(vox_veh_folder, vox_veh_file, True)
except:
voxel_data = main(vehicle_surfs, tr_mat, VOXEL_LABELS, SETTINGS)
from mayavi import mlab
xo, yo, zo = np.where(voxel_data["value"] == 1)
plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
voxel_data["y_grid"][yo],
voxel_data["z_grid"][zo],
color=(0.9, 0.9, 0.9),
scale_mode="none", scale_factor=voxel_data["vox_size"],
mode='cube', opacity=1)
xo, yo, zo = np.where(voxel_data["value"] & 2)
plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
voxel_data["y_grid"][yo],
voxel_data["z_grid"][zo],
color=(1, 1, 1),
scale_mode="none", scale_factor=voxel_data["vox_size"],
mode='cube', opacity=0.05)
xo, yo, zo = np.where(voxel_data["value"] & 4)
plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
voxel_data["y_grid"][yo],
voxel_data["z_grid"][zo],
color=(1.0, 0.5, 0.5),
scale_mode="none", scale_factor=voxel_data["vox_size"],
mode='cube', opacity=1)
xo, yo, zo = np.where(voxel_data["value"] & 8)
plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
voxel_data["y_grid"][yo],
voxel_data["z_grid"][zo],
color=(0.6, 0.6, 1.0),
scale_mode="none", scale_factor=voxel_data["vox_size"],
mode='cube', opacity=1)
# No manikins included, no need to plot them
# xo, yo, zo = np.where(voxel_data["value"] & 16)
# plot_vehicle = mlab.points3d(voxel_data["x_grid"][xo],
# voxel_data["y_grid"][yo],
# voxel_data["z_grid"][zo],
# color=(0.5, 1.0, 0.8),
# scale_mode="none", scale_factor=voxel_data["vox_size"],
# mode='cube', opacity=1.0)
mlab.show()
# Save the voxelated model of the vehicle (sans door and other excluded parts)
data_io.save_multi_array(vox_veh_folder, vox_veh_file, voxel_data)
| 42.30855
| 99
| 0.567613
| 1,601
| 11,381
| 3.757027
| 0.202998
| 0.061347
| 0.042394
| 0.01995
| 0.299584
| 0.21596
| 0.200831
| 0.16143
| 0.147963
| 0.135661
| 0
| 0.014721
| 0.313593
| 11,381
| 269
| 100
| 42.30855
| 0.755248
| 0.199719
| 0
| 0.186335
| 0
| 0
| 0.085564
| 0.023027
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049689
| false
| 0
| 0.043478
| 0
| 0.136646
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07f1ea6de606abc50abb899228cdc43831fa522e
| 876
|
py
|
Python
|
pyfire/errors.py
|
RavidLevi98/pyfire
|
404ae2082fd5be3ef652b3e15a66ad0d79b7a1b5
|
[
"BSD-3-Clause"
] | null | null | null |
pyfire/errors.py
|
RavidLevi98/pyfire
|
404ae2082fd5be3ef652b3e15a66ad0d79b7a1b5
|
[
"BSD-3-Clause"
] | 1
|
2021-05-22T21:34:44.000Z
|
2021-05-22T21:34:44.000Z
|
pyfire/errors.py
|
RavidLevi98/pyfire
|
404ae2082fd5be3ef652b3e15a66ad0d79b7a1b5
|
[
"BSD-3-Clause"
] | 1
|
2021-05-22T21:21:11.000Z
|
2021-05-22T21:21:11.000Z
|
# -*- coding: utf-8 -*-
"""
pyfire.errors
~~~~~~~~~~~~~~~~~~~~~~
Holds the global used base errors
:copyright: 2011 by the pyfire Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import xml.etree.ElementTree as ET
class XMPPProtocolError(Exception):
"""Base class for all errors that can be
sent via XMPP Protocol to peer
"""
def __init__(self, error_element, error_namespace, error_name=None):
self.error_name = error_name
self.element = ET.Element(error_element)
self.element.set("xmlns", error_namespace)
# per default all errors are recoverable
self.unrecoverable = False
def __unicode__(self):
if self.error_name is not None:
self.element.append(ET.Element(self.error_name))
return unicode(ET.tostring(self.element))
| 28.258065
| 72
| 0.651826
| 112
| 876
| 4.946429
| 0.5625
| 0.081227
| 0.070397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007452
| 0.234018
| 876
| 30
| 73
| 29.2
| 0.818182
| 0.359589
| 0
| 0
| 0
| 0
| 0.009709
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07f6a33d952a989c19f3efa056df22e95ace1f20
| 4,526
|
py
|
Python
|
tests/unit/commands/local/start_lambda/test_cli.py
|
ourobouros/aws-sam-cli
|
3fba861f5106d604fde6d023923a9b83377a35d9
|
[
"Apache-2.0"
] | 2
|
2018-11-09T04:43:41.000Z
|
2018-11-20T06:39:45.000Z
|
tests/unit/commands/local/start_lambda/test_cli.py
|
ourobouros/aws-sam-cli
|
3fba861f5106d604fde6d023923a9b83377a35d9
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/commands/local/start_lambda/test_cli.py
|
ourobouros/aws-sam-cli
|
3fba861f5106d604fde6d023923a9b83377a35d9
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from mock import patch, Mock
from samcli.commands.local.start_lambda.cli import do_cli as start_lambda_cli
from samcli.commands.local.cli_common.user_exceptions import UserException
from samcli.commands.validate.lib.exceptions import InvalidSamDocumentException
from samcli.commands.local.lib.exceptions import OverridesNotWellDefinedError
class TestCli(TestCase):
def setUp(self):
self.template = "template"
self.env_vars = "env-vars"
self.debug_port = 123
self.debug_args = "args"
self.debugger_path = "/test/path"
self.docker_volume_basedir = "basedir"
self.docker_network = "network"
self.log_file = "logfile"
self.skip_pull_image = True
self.profile = "profile"
self.region = "region"
self.parameter_overrides = {}
self.host = "host"
self.port = 123
@patch("samcli.commands.local.start_lambda.cli.InvokeContext")
@patch("samcli.commands.local.start_lambda.cli.LocalLambdaService")
def test_cli_must_setup_context_and_start_service(self, local_lambda_service_mock,
invoke_context_mock):
# Mock the __enter__ method to return a object inside a context manager
context_mock = Mock()
invoke_context_mock.return_value.__enter__.return_value = context_mock
service_mock = Mock()
local_lambda_service_mock.return_value = service_mock
self.call_cli()
invoke_context_mock.assert_called_with(template_file=self.template,
function_identifier=None,
env_vars_file=self.env_vars,
docker_volume_basedir=self.docker_volume_basedir,
docker_network=self.docker_network,
log_file=self.log_file,
skip_pull_image=self.skip_pull_image,
aws_profile=self.profile,
debug_port=self.debug_port,
debug_args=self.debug_args,
debugger_path=self.debugger_path,
aws_region=self.region,
parameter_overrides=self.parameter_overrides)
local_lambda_service_mock.assert_called_with(lambda_invoke_context=context_mock,
port=self.port,
host=self.host)
service_mock.start.assert_called_with()
@patch("samcli.commands.local.start_lambda.cli.InvokeContext")
def test_must_raise_user_exception_on_invalid_sam_template(self, invoke_context_mock):
invoke_context_mock.side_effect = InvalidSamDocumentException("bad template")
with self.assertRaises(UserException) as context:
self.call_cli()
msg = str(context.exception)
expected = "bad template"
self.assertEquals(msg, expected)
@patch("samcli.commands.local.start_lambda.cli.InvokeContext")
def test_must_raise_user_exception_on_invalid_env_vars(self, invoke_context_mock):
invoke_context_mock.side_effect = OverridesNotWellDefinedError("bad env vars")
with self.assertRaises(UserException) as context:
self.call_cli()
msg = str(context.exception)
expected = "bad env vars"
self.assertEquals(msg, expected)
def call_cli(self):
start_lambda_cli(ctx=None,
host=self.host,
port=self.port,
template=self.template,
env_vars=self.env_vars,
debug_port=self.debug_port,
debug_args=self.debug_args,
debugger_path=self.debugger_path,
docker_volume_basedir=self.docker_volume_basedir,
docker_network=self.docker_network,
log_file=self.log_file,
skip_pull_image=self.skip_pull_image,
profile=self.profile,
region=self.region,
parameter_overrides=self.parameter_overrides)
| 45.26
| 96
| 0.579319
| 451
| 4,526
| 5.48337
| 0.203991
| 0.04448
| 0.053781
| 0.048524
| 0.430247
| 0.430247
| 0.416903
| 0.401537
| 0.335625
| 0.296806
| 0
| 0.002057
| 0.355502
| 4,526
| 99
| 97
| 45.717172
| 0.845732
| 0.015245
| 0
| 0.375
| 0
| 0
| 0.07385
| 0.047811
| 0
| 0
| 0
| 0
| 0.0875
| 1
| 0.0625
| false
| 0
| 0.075
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07f928cd0ad75195469f95ed414958ac002210c7
| 3,376
|
py
|
Python
|
pytorch_toolkit/ote/ote/modules/trainers/mmdetection.py
|
abhatikar/training_extensions
|
1c96e0f5f39688f8b79735e8dfa90646afc3d5e6
|
[
"Apache-2.0"
] | 2
|
2021-01-07T05:09:17.000Z
|
2021-10-15T05:13:46.000Z
|
pytorch_toolkit/ote/ote/modules/trainers/mmdetection.py
|
abhatikar/training_extensions
|
1c96e0f5f39688f8b79735e8dfa90646afc3d5e6
|
[
"Apache-2.0"
] | 9
|
2021-09-08T03:12:59.000Z
|
2022-03-12T00:57:19.000Z
|
pytorch_toolkit/ote/ote/modules/trainers/mmdetection.py
|
abhatikar/training_extensions
|
1c96e0f5f39688f8b79735e8dfa90646afc3d5e6
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import logging
import subprocess
import tempfile
from ote import MMDETECTION_TOOLS
from .base import BaseTrainer
from ..registry import TRAINERS
@TRAINERS.register_module()
class MMDetectionTrainer(BaseTrainer):
def __init__(self):
super(MMDetectionTrainer, self).__init__()
def _get_tools_dir(self):
return MMDETECTION_TOOLS
def _add_extra_args(self, cfg, config_path, update_config):
if self.__is_clustering_needed(cfg):
update_config = self.__cluster(cfg, config_path, update_config)
return update_config
@staticmethod
def __is_clustering_needed(cfg):
if cfg.total_epochs > 0:
return False
if not hasattr(cfg.model, 'bbox_head') or not cfg.model.bbox_head.type == 'SSDHead':
return False
if not cfg.model.bbox_head.anchor_generator.type == 'SSDAnchorGeneratorClustered':
return False
return True
@staticmethod
def __cluster(cfg, config_path, update_config):
logging.info('Clustering started...')
widths = cfg.model.bbox_head.anchor_generator.widths
n_clust = 0
for w in widths:
n_clust += len(w) if isinstance(w, (list, tuple)) else 1
n_clust = ' --n_clust ' + str(n_clust)
group_as = ''
if isinstance(widths[0], (list, tuple)):
group_as = ' --group_as ' + ' '.join([str(len(w)) for w in widths])
config = ' --config ' + config_path
tmp_file = tempfile.NamedTemporaryFile(delete=False)
out = f' --out {tmp_file.name}'
if 'pipeline' in cfg.data.train:
img_shape = [t for t in cfg.data.train.pipeline if t['type'] == 'Resize'][0][
'img_scale']
else:
img_shape = [t for t in cfg.data.train.dataset.pipeline if t['type'] == 'Resize'][0][
'img_scale']
img_shape = f' --image_size_wh {img_shape[0]} {img_shape[1]}'
subprocess.run(f'python {MMDETECTION_TOOLS}/cluster_boxes.py'
f'{config}'
f'{n_clust}'
f'{group_as}'
f'{update_config}'
f'{img_shape}'
f'{out}'.split(' '), check=True)
with open(tmp_file.name) as src_file:
content = json.load(src_file)
widths, heights = content['widths'], content['heights']
if not update_config:
update_config = ' --update_config'
update_config += f' model.bbox_head.anchor_generator.widths={str(widths).replace(" ", "")}'
update_config += f' model.bbox_head.anchor_generator.heights={str(heights).replace(" ", "")}'
logging.info('... clustering completed.')
return update_config
| 34.44898
| 101
| 0.627073
| 426
| 3,376
| 4.774648
| 0.373239
| 0.070796
| 0.038348
| 0.031465
| 0.205015
| 0.181908
| 0.096362
| 0.096362
| 0.026549
| 0
| 0
| 0.006457
| 0.265995
| 3,376
| 97
| 102
| 34.804124
| 0.814366
| 0.166469
| 0
| 0.142857
| 0
| 0
| 0.183119
| 0.067597
| 0.015873
| 0
| 0
| 0
| 0
| 1
| 0.079365
| false
| 0
| 0.111111
| 0.015873
| 0.31746
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07fabd24f913f0cde7669692291156d001f2e833
| 1,979
|
py
|
Python
|
svn-go-stats/transform.py
|
BT-OpenSource/bt-betalab
|
af5a1b0d778c1746312149f62da0c4159f387293
|
[
"MIT"
] | 1
|
2021-03-02T10:44:07.000Z
|
2021-03-02T10:44:07.000Z
|
svn-go-stats/transform.py
|
BT-OpenSource/bt-betalab
|
af5a1b0d778c1746312149f62da0c4159f387293
|
[
"MIT"
] | null | null | null |
svn-go-stats/transform.py
|
BT-OpenSource/bt-betalab
|
af5a1b0d778c1746312149f62da0c4159f387293
|
[
"MIT"
] | null | null | null |
import sys
import json
import subprocess
import re
import statistics
def get_complexity():
# Load the cyclomatic complexity info
cyclostats = subprocess.check_output(['./gocyclo', 'repo']).decode("utf-8")
results = re.findall('([0-9]+)\s([^\s]+)\s([^\s]+)\s([^:]+):([0-9]+):([0-9]+)', cyclostats)
# Setup a dictionary in which to keep track of the complixities
# for each file
files = {}
# Build an array of complexities for each file
for result in results:
if result[3] in files:
files[result[3]].append(int(result[0]))
else:
files[result[3]] = [int(result[0])]
# Pick out the median value (picking the highest of the two
# middle entries if needed) for each file
for name, values in files.items():
files[name] = statistics.median_high(values)
return files
def get_duplicate_const_strings():
# Load the const string duplication info
cyclostats = subprocess.check_output(['./goconst', './repo/...']).decode("utf-8")
results = re.findall('([^:]+).+ other occurrence\(s\) of \"(.+)\" found in: ([^:]+).+\n?', cyclostats)
files = {}
# Build an array containing the number of potentially duplicated
# constants by file
for result in results:
if result[0] in files:
files[result[0]] = files[result[0]]+1
else:
files[result[0]] = 1
return files
# Main service body
if __name__ == "__main__":
complexity = get_complexity()
duplicate_const_strings = get_duplicate_const_strings()
files = set()
files.update(complexity.keys())
files.update(duplicate_const_strings.keys())
result = []
for f in files:
result.append({
'filename': f,
'cyclomaticComplexity': complexity[f] if f in complexity else 0,
'duplicateConstStrings': duplicate_const_strings[f] if f in duplicate_const_strings else 0
})
print(json.dumps(result))
| 29.102941
| 106
| 0.623042
| 250
| 1,979
| 4.824
| 0.376
| 0.054726
| 0.104478
| 0.048093
| 0.157546
| 0.099502
| 0.099502
| 0
| 0
| 0
| 0
| 0.013953
| 0.239515
| 1,979
| 68
| 107
| 29.102941
| 0.787375
| 0.19808
| 0
| 0.195122
| 0
| 0.02439
| 0.139505
| 0.048193
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.121951
| 0
| 0.219512
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07fb5058c7a297096cbf1ff7f21aedcf66b7d3ad
| 985
|
py
|
Python
|
shogitk/usikif.py
|
koji-hirono/pytk-shogi-replayer
|
a10819a797faecbee5c7b0654beb3694eb522840
|
[
"MIT"
] | null | null | null |
shogitk/usikif.py
|
koji-hirono/pytk-shogi-replayer
|
a10819a797faecbee5c7b0654beb3694eb522840
|
[
"MIT"
] | null | null | null |
shogitk/usikif.py
|
koji-hirono/pytk-shogi-replayer
|
a10819a797faecbee5c7b0654beb3694eb522840
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from shogitk.shogi import Coords, Move, BLACK, WHITE, DROP, PROMOTE
RANKNUM = {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
'e': 5,
'f': 6,
'g': 7,
'h': 8,
'i': 9
}
def decoder(f):
color = [BLACK, WHITE]
step = 0
for line in f:
line = line.strip()
if line[0] == '[':
pass
elif line[0].isdigit():
src = Coords(int(line[0]), RANKNUM[line[1]])
dst = Coords(int(line[2]), RANKNUM[line[3]])
if line[-1] == '+':
modifier = PROMOTE
else:
modifier = None
yield Move(color[step & 1], dst, src, None, modifier=modifier)
step += 1
elif line[0].isupper():
dst = Coords(int(line[2]), RANKNUM[line[3]])
yield Move(color[step & 1], dst, None, line[0], modifier=DROP)
step += 1
| 25.921053
| 74
| 0.450761
| 121
| 985
| 3.628099
| 0.454545
| 0.056948
| 0.088838
| 0.072893
| 0.232346
| 0.232346
| 0.132118
| 0.132118
| 0
| 0
| 0
| 0.043046
| 0.386802
| 985
| 37
| 75
| 26.621622
| 0.683775
| 0.02132
| 0
| 0.121212
| 0
| 0
| 0.011435
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0.030303
| 0.060606
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
07fca3ec2f3896a49c6703b50dabc9ec79e258a9
| 2,160
|
py
|
Python
|
etherbank_cli/oracles.py
|
ideal-money/etherbank-cli
|
d957daa13aa951331cadc35c246c1ce8459ca8df
|
[
"BSD-2-Clause"
] | 1
|
2018-12-29T02:12:25.000Z
|
2018-12-29T02:12:25.000Z
|
etherbank_cli/oracles.py
|
ideal-money/etherbank-cli
|
d957daa13aa951331cadc35c246c1ce8459ca8df
|
[
"BSD-2-Clause"
] | 5
|
2018-12-20T12:45:39.000Z
|
2019-01-08T06:16:01.000Z
|
etherbank_cli/oracles.py
|
ideal-money/etherbank-cli
|
d957daa13aa951331cadc35c246c1ce8459ca8df
|
[
"BSD-2-Clause"
] | null | null | null |
import click
from . import utils
@click.group()
def main():
"Simple CLI for oracles to work with Ether dollar"
pass
@main.command()
@click.option('--ether-price', type=float, help="The ether price in ether dollar")
@click.option('--collateral-ratio', type=float, help="The collateral ratio")
@click.option(
'--liquidation-duration',
type=int,
help="The liquidation duration in minutes")
@click.option(
'--private-key',
callback=utils.check_account,
help='The privat key to sign the transaction')
def vote(ether_price, collateral_ratio, liquidation_duration, private_key):
"Vote on the variable for setting up Ether Bank"
assert [ether_price, collateral_ratio, liquidation_duration
].count(None) == 2, "You should set one variable per vote"
if ether_price:
var_code = 0
value = int(ether_price * 100)
elif collateral_ratio:
var_code = 1
value = int(collateral_ratio * 1000)
elif liquidation_duration:
var_code = 2
value = liquidation_duration * 60
func = utils.contracts['oracles'].functions.vote(var_code, value)
tx_hash = utils.send_transaction(func, 0, private_key)
return tx_hash
@main.command()
@click.option('--oracle', required=True, help="The oracle's address")
@click.option('--score', type=int, required=True, help="The oracle's score")
@click.option(
'--private-key',
callback=utils.check_account,
help='The privat key to sign the transaction')
def set_score(oracle, score, private_key):
"Edit oracle's score"
oracle = utils.w3.toChecksumAddress(oracle)
func = utils.contracts['oracles'].functions.setScore(oracle, score)
tx_hash = utils.send_transaction(func, 0, private_key)
return tx_hash
@main.command()
@click.option(
'--private-key',
callback=utils.check_account,
help='The privat key to sign the transaction')
def finish_recruiting(private_key):
"Set recruiting as finished"
func = utils.contracts['oracles'].functions.finishRecruiting()
tx_hash = utils.send_transaction(func, 0, private_key)
return tx_hash
if __name__ == '__main__':
main()
| 30
| 82
| 0.697222
| 286
| 2,160
| 5.111888
| 0.304196
| 0.06156
| 0.032832
| 0.045144
| 0.471272
| 0.401505
| 0.305746
| 0.305746
| 0.305746
| 0.305746
| 0
| 0.009637
| 0.183333
| 2,160
| 71
| 83
| 30.422535
| 0.819161
| 0.065741
| 0
| 0.37931
| 0
| 0
| 0.254167
| 0.010185
| 0
| 0
| 0
| 0
| 0.017241
| 1
| 0.068966
| false
| 0.017241
| 0.034483
| 0
| 0.155172
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
580077f8f713a612aa61ab64e08f6fd83f19a081
| 1,454
|
py
|
Python
|
tests/effects/test_cheerlights.py
|
RatJuggler/led-shim-effects
|
3c63f5f2ce3f35f52e784489deb9212757c18cd2
|
[
"MIT"
] | 1
|
2021-04-17T16:18:14.000Z
|
2021-04-17T16:18:14.000Z
|
tests/effects/test_cheerlights.py
|
RatJuggler/led-shim-effects
|
3c63f5f2ce3f35f52e784489deb9212757c18cd2
|
[
"MIT"
] | 12
|
2019-07-26T18:01:56.000Z
|
2019-08-31T15:35:17.000Z
|
tests/effects/test_cheerlights.py
|
RatJuggler/led-shim-demo
|
3c63f5f2ce3f35f52e784489deb9212757c18cd2
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from unittest.mock import Mock, patch
import sys
sys.modules['smbus'] = Mock() # Mock the hardware layer to avoid errors.
from ledshimdemo.canvas import Canvas
from ledshimdemo.effects.cheerlights import CheerLightsEffect
class TestCheerLights(TestCase):
TEST_CANVAS_SIZE = 3 # type: int
def test_cheerlight_call(self):
canvas = Canvas(self.TEST_CANVAS_SIZE)
effect = CheerLightsEffect(canvas)
self.assertIsNone(effect.get_colour_from_channel("http://ejiferfneciudwedwojcmeiocnw.com"))
@patch('ledshimdemo.effects.cheerlights.CheerLightsEffect.get_colour_from_channel', return_value=None)
def test_effect_failed_cheerlights(self, patch_function):
canvas = Canvas(self.TEST_CANVAS_SIZE)
effect = CheerLightsEffect(canvas)
effect.compose()
patch_function.assert_called_once()
for i in range(canvas.get_size()):
self.assertEqual(canvas.get_pixel(i), canvas.BLANK_PIXEL)
def test_effect_working_cheerlights(self):
canvas = Canvas(self.TEST_CANVAS_SIZE)
effect = CheerLightsEffect(canvas)
# Must check before and after in case it changes during the test.
before = effect.get_colour_from_channel(effect.URL)
effect.compose()
after = effect.get_colour_from_channel(effect.URL)
self.assertRegex(repr(effect), "^CheerLights\\(Colour:({0}|{1})\\)$".format(before, after))
| 39.297297
| 106
| 0.72696
| 175
| 1,454
| 5.834286
| 0.405714
| 0.039177
| 0.054848
| 0.078355
| 0.27522
| 0.249755
| 0.249755
| 0.181195
| 0.181195
| 0.123408
| 0
| 0.00251
| 0.178129
| 1,454
| 36
| 107
| 40.388889
| 0.851883
| 0.078404
| 0
| 0.296296
| 0
| 0
| 0.113024
| 0.080838
| 0
| 0
| 0
| 0
| 0.148148
| 1
| 0.111111
| false
| 0
| 0.185185
| 0
| 0.37037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5800997c4a49cdfc01a368bf3ebf423b84d98d2c
| 7,074
|
py
|
Python
|
figures/Figure_7/02_generate_images.py
|
Jhsmit/ColiCoords-Paper
|
7b92e67600930f64859d14867113b6de3edf1379
|
[
"MIT"
] | 2
|
2019-05-12T12:06:50.000Z
|
2020-11-11T16:44:49.000Z
|
figures/Figure_7/02_generate_images.py
|
Jhsmit/ColiCoords-Paper
|
7b92e67600930f64859d14867113b6de3edf1379
|
[
"MIT"
] | null | null | null |
figures/Figure_7/02_generate_images.py
|
Jhsmit/ColiCoords-Paper
|
7b92e67600930f64859d14867113b6de3edf1379
|
[
"MIT"
] | 2
|
2019-06-17T16:00:56.000Z
|
2020-02-07T22:17:47.000Z
|
from colicoords.synthetic_data import add_readout_noise, draw_poisson
from colicoords import load
import numpy as np
import mahotas as mh
from tqdm import tqdm
import os
import tifffile
def chunk_list(l, sizes):
prev = 0
for s in sizes:
result = l[prev:prev+s]
prev += s
yield result
def generate_images(cell_list, num_images, cell_per_img, cell_per_img_std, shape):
nums = np.round(np.random.normal(cell_per_img, cell_per_img_std, num_images)).astype(int)
nums = nums[nums > 0]
assert sum(nums) < len(cell_list), 'Not enough cells'
chunked = [chunk for chunk in tqdm(chunk_list(cell_list, nums))]
dicts = [generate_image(cells, shape) for cells in tqdm(chunked)]
out_dict = {}
for i, d in enumerate(dicts):
for k, v in d.items():
if 'storm' in k:
v['frame'] = i + 1
if k in out_dict:
out_dict[k] = np.append(out_dict[k], v)
else:
out_dict[k] = v
else:
if k in out_dict:
out_dict[k][i] = v
else:
out_dict[k] = np.zeros((num_images, *shape))
out_dict[k][i] = v
return out_dict
def generate_image(cells, shape, max_dist=5):
thetas = 360 * np.random.rand(len(cells))
data_list = [cell.data.rotate(theta) for cell, theta in zip(cells, thetas)]
assert all([data.names == data_list[0].names for data in data_list]), 'All cells must have the same data elements'
out_dict = {name: np.zeros(shape) for name, dclass in zip(data_list[0].names, data_list[0].dclasses) if dclass != 'storm'}
for i, data in enumerate(data_list):
valid_position = False
while not valid_position:
pos_x = int(np.round(shape[1] * np.random.rand()))
pos_y = int(np.round(shape[0] * np.random.rand()))
min1 = pos_y - int(np.floor(data.shape[0]))
max1 = min1 + data.shape[0]
min2 = pos_x - int(np.floor(data.shape[1]))
max2 = min2 + data.shape[1]
# Crop the data for when the cell is on the border of the image
d_min1 = np.max([0 - min1, 0])
d_max1 = np.min([data.shape[0] + (shape[0] - pos_y), data.shape[0]])
d_min2 = np.max([0 - min2, 0])
d_max2 = np.min([data.shape[1] + (shape[1] - pos_x), data.shape[1]])
data_cropped = data[d_min1:d_max1, d_min2:d_max2]
# Limit image position to the edges of the image
min1 = np.max([min1, 0])
max1 = np.min([max1, shape[0]])
min2 = np.max([min2, 0])
max2 = np.min([max2, shape[1]])
temp_binary = np.zeros(shape)
temp_binary[min1:max1, min2:max2] = data_cropped.binary_img
out_binary = (out_dict['binary'] > 0).astype(int)
distance_map = mh.distance(1 - out_binary, metric='euclidean')
if np.any(distance_map[temp_binary.astype(bool)] < max_dist):
continue
valid_position = True
for name in data.names:
data_elem = data_cropped.data_dict[name]
if data_elem.dclass == 'storm':
data_elem['x'] += min2
data_elem['y'] += min1
xmax, ymax = shape[1], shape[0]
bools = (data_elem['x'] < 0) + (data_elem['x'] > xmax) + (data_elem['y'] < 0) + (data_elem['y'] > ymax)
data_out = data_elem[~bools].copy()
if name in out_dict:
out_dict[name] = np.append(out_dict[name], data_out)
else:
out_dict[name] = data_out
continue
elif data_elem.dclass == 'binary':
out_dict[name][min1:max1, min2:max2] += ((i+1)*data_elem)
else:
out_dict[name][min1:max1, min2:max2] += data_elem
return out_dict
def gen_image_from_storm(storm_table, shape, sigma=1.54, sigma_std=0.3):
xmax = shape[1]
ymax = shape[0]
step = 1
xi = np.arange(step / 2, xmax, step)
yi = np.arange(step / 2, ymax, step)
x_coords = np.repeat(xi, len(yi)).reshape(len(xi), len(yi)).T
y_coords = np.repeat(yi, len(xi)).reshape(len(yi), len(xi))
x, y = storm_table['x'], storm_table['y']
img = np.zeros_like(x_coords)
intensities = storm_table['intensity']
sigma = sigma * np.ones_like(x) if not sigma_std else np.random.normal(sigma, sigma_std, size=len(x))
for _sigma, _int, _x, _y in zip(sigma, intensities, x, y):
img += _int * np.exp(-(((_x - x_coords) / _sigma) ** 2 + ((_y - y_coords) / _sigma) ** 2) / 2)
return img
def gen_im(data_dir):
"""Generate microscopy images from a list of cell objects by placing them randomly oriented in the image."""
cell_list = load(os.path.join(data_dir, 'cell_obj', 'cells_final_selected.hdf5'))
out_dict = generate_images(cell_list, 1000, 10, 3, (512, 512))
if not os.path.exists(os.path.join(data_dir, 'images')):
os.mkdir(os.path.join(data_dir, 'images'))
np.save(os.path.join(data_dir, 'images', 'binary.npy'), out_dict['binary'])
np.save(os.path.join(data_dir, 'images', 'brightfield.npy'), out_dict['brightfield'])
np.save(os.path.join(data_dir, 'images', 'foci_inner.npy'), out_dict['foci_inner'])
np.save(os.path.join(data_dir, 'images', 'foci_outer.npy'), out_dict['foci_outer'])
np.save(os.path.join(data_dir, 'images', 'storm_inner.npy'), out_dict['storm_inner'])
np.save(os.path.join(data_dir, 'images', 'storm_outer.npy'), out_dict['storm_outer'])
tifffile.imsave(os.path.join(data_dir, 'images', 'binary.tif'), out_dict['binary'])
tifffile.imsave(os.path.join(data_dir, 'images', 'brightfield.tif'), out_dict['brightfield'])
tifffile.imsave(os.path.join(data_dir, 'images', 'foci_inner.tif'), out_dict['foci_inner'])
tifffile.imsave(os.path.join(data_dir, 'images', 'foci_outer.tif'), out_dict['foci_outer'])
np.savetxt(os.path.join(data_dir, 'images', 'storm_inner.txt'), out_dict['storm_inner'])
np.savetxt(os.path.join(data_dir, 'images', 'storm_outer.txt'), out_dict['storm_inner'])
def noise_bf(data_dir):
"""add poissonian and readout noise to brightfield images"""
noise = 20
img_stack = np.load(os.path.join(data_dir, 'images', 'brightfield.npy'))
for photons in [10000, 1000, 500]:
ratio = 1.0453 # ratio between 'background' (no cells) and cell wall
img = (photons*(ratio-1))*img_stack + photons
img = draw_poisson(img)
img = add_readout_noise(img, noise)
tifffile.imsave(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.tif'.format(photons)), img)
np.save(os.path.join(data_dir, 'images', 'bf_noise_{}_photons.npy'.format(photons)), img)
if __name__ == '__main__':
np.random.seed(42)
data_dir = r'.'
if not os.path.exists(os.path.join(data_dir, 'images')):
os.mkdir(os.path.join(data_dir, 'images'))
gen_im(data_dir)
noise_bf(data_dir)
| 40.890173
| 126
| 0.602771
| 1,062
| 7,074
| 3.822034
| 0.186441
| 0.055186
| 0.049273
| 0.068983
| 0.293915
| 0.232816
| 0.226657
| 0.196354
| 0.126632
| 0.034491
| 0
| 0.023374
| 0.250071
| 7,074
| 172
| 127
| 41.127907
| 0.741753
| 0.045095
| 0
| 0.128788
| 0
| 0
| 0.091516
| 0.010531
| 0
| 0
| 0
| 0
| 0.015152
| 1
| 0.045455
| false
| 0
| 0.05303
| 0
| 0.121212
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5804951a8f92330526763d3f11395d318d54d180
| 10,444
|
py
|
Python
|
flink-ai-flow/ai_flow/metric/utils.py
|
MarvinMiao/flink-ai-extended
|
e45eecf2deea6976ba3d7ba821ffb8d9ce0a17f4
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2020-12-12T15:21:05.000Z
|
2020-12-12T15:21:05.000Z
|
flink-ai-flow/ai_flow/metric/utils.py
|
MarvinMiao/flink-ai-extended
|
e45eecf2deea6976ba3d7ba821ffb8d9ce0a17f4
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-01-30T11:28:37.000Z
|
2021-01-30T11:28:37.000Z
|
flink-ai-flow/ai_flow/metric/utils.py
|
MarvinMiao/flink-ai-extended
|
e45eecf2deea6976ba3d7ba821ffb8d9ce0a17f4
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import ast
from typing import Text, Optional, Union, List
from ai_flow.rest_endpoint.protobuf.metric_service_pb2 import MetricMetaResponse, ListMetricMetaResponse, \
MetricSummaryResponse, ListMetricSummaryResponse
from ai_flow.rest_endpoint.service import int64Value, stringValue
from ai_flow.common.properties import Properties
from ai_flow.meta.metric_meta import MetricMeta, MetricType, MetricSummary
from ai_flow.rest_endpoint.protobuf.message_pb2 import MetricMetaProto, MetricSummaryProto, MetricTypeProto, ReturnCode, \
SUCCESS, RESOURCE_DOES_NOT_EXIST
from ai_flow.store.db.db_model import SqlMetricMeta, SqlMetricSummary
from ai_flow.store.db.db_model import MongoMetricSummary, MongoMetricMeta
def table_to_metric_meta(metric_meta_result) -> MetricMeta:
properties = metric_meta_result.properties
if properties is not None:
properties = ast.literal_eval(properties)
return MetricMeta(uuid=metric_meta_result.uuid,
name=metric_meta_result.name,
dataset_id=metric_meta_result.dataset_id,
model_name=metric_meta_result.model_name,
model_version=metric_meta_result.model_version,
job_id=metric_meta_result.job_id,
start_time=metric_meta_result.start_time,
end_time=metric_meta_result.end_time,
metric_type=MetricType.value_of(metric_meta_result.metric_type),
uri=metric_meta_result.uri,
tags=metric_meta_result.tags,
metric_description=metric_meta_result.metric_description,
properties=properties)
def table_to_metric_summary(metric_summary_result) -> MetricSummary:
return MetricSummary(uuid=metric_summary_result.uuid,
metric_id=metric_summary_result.metric_id,
metric_key=metric_summary_result.metric_key,
metric_value=metric_summary_result.metric_value)
def metric_meta_to_table(name: Text,
dataset_id: int,
model_name: Optional[Text],
model_version: Optional[Text],
job_id: int,
start_time: int,
end_time: int,
metric_type: MetricType,
uri: Text,
tags: Text,
metric_description: Text,
properties: Properties,
store_type: Text = 'SqlAlchemyStore'):
if properties is not None:
properties = str(properties)
if store_type == 'MongoStore':
_class = MongoMetricMeta
else:
_class = SqlMetricMeta
return _class(name=name,
dataset_id=dataset_id,
model_name=model_name,
model_version=model_version,
job_id=job_id,
start_time=start_time,
end_time=end_time,
metric_type=metric_type.value,
uri=uri,
tags=tags,
metric_description=metric_description,
properties=properties)
def metric_summary_to_table(metric_id: int,
metric_key: Text,
metric_value: Text,
store_type: Text = 'SqlAlchemyStore'):
if store_type == 'MongoStore':
_class = MongoMetricSummary
else:
_class = SqlMetricSummary
return _class(metric_id=metric_id,
metric_key=metric_key,
metric_value=metric_value)
def metric_meta_to_proto(metric_meta: MetricMeta) -> MetricMetaProto:
if metric_meta.metric_type == MetricType.DATASET:
metric_type = MetricTypeProto.DATASET
else:
metric_type = MetricTypeProto.MODEL
return MetricMetaProto(uuid=metric_meta.uuid,
name=stringValue(metric_meta.name),
dataset_id=int64Value(metric_meta.dataset_id),
model_name=stringValue(metric_meta.model_name),
model_version=stringValue(metric_meta.model_version),
job_id=int64Value(metric_meta.job_id),
start_time=int64Value(metric_meta.start_time),
end_time=int64Value(metric_meta.end_time),
metric_type=metric_type,
uri=stringValue(metric_meta.uri),
tags=stringValue(metric_meta.tags),
metric_description=stringValue(metric_meta.metric_description),
properties=metric_meta.properties)
def metric_summary_to_proto(metric_summary: MetricSummary) -> MetricSummaryProto:
return MetricSummaryProto(uuid=metric_summary.uuid,
metric_id=int64Value(metric_summary.metric_id),
metric_key=stringValue(metric_summary.metric_key),
metric_value=stringValue(metric_summary.metric_value))
def proto_to_metric_meta(metric_meta_proto: MetricMetaProto) -> MetricMeta:
if MetricTypeProto.DATASET == metric_meta_proto.metric_type:
metric_type = MetricType.DATASET
else:
metric_type = MetricType.MODEL
return MetricMeta(uuid=metric_meta_proto.uuid,
name=metric_meta_proto.name.value,
dataset_id=metric_meta_proto.dataset_id.value,
model_name=metric_meta_proto.model_name.value,
model_version=metric_meta_proto.model_version.value,
job_id=metric_meta_proto.job_id.value,
start_time=metric_meta_proto.start_time.value,
end_time=metric_meta_proto.end_time.value,
metric_type=metric_type,
uri=metric_meta_proto.uri.value if metric_meta_proto.HasField('uri') else None,
tags=metric_meta_proto.tags.value if metric_meta_proto.HasField('tags') else None,
metric_description=metric_meta_proto.metric_description.value
if metric_meta_proto.HasField('metric_description') else None,
properties=metric_meta_proto.properties
)
def proto_to_metric_summary(metric_summary_proto: MetricSummaryProto) -> MetricSummary:
return MetricSummary(uuid=metric_summary_proto.uuid,
metric_id=metric_summary_proto.metric_id.value,
metric_key=metric_summary_proto.metric_key.value
if metric_summary_proto.HasField('metric_key') else None,
metric_value=metric_summary_proto.metric_value.value
if metric_summary_proto.HasField('metric_value') else None
)
def _warp_metric_meta_response(metric_meta: Optional[MetricMeta]) -> MetricMetaResponse:
if metric_meta is not None:
return MetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),
metric_meta=metric_meta_to_proto(metric_meta))
else:
return MetricMetaResponse(return_code=1,
return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(),
metric_meta=None)
def _warp_list_metric_meta_response(metric_meta: Union[None, MetricMeta, List[MetricMeta]]) -> MetricMetaResponse:
if metric_meta is not None:
if isinstance(metric_meta, MetricMeta):
return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),
metric_meta=[metric_meta_to_proto(metric_meta)])
else:
res = []
for meta in metric_meta:
res.append(metric_meta_to_proto(meta))
return ListMetricMetaResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),
metric_meta=res)
else:
return ListMetricMetaResponse(return_code=1,
return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(),
metric_meta=None)
def _warp_metric_summary_response(metric_summary: Optional[MetricSummary]) -> MetricSummaryResponse:
if metric_summary is not None:
return MetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),
metric_summary=metric_summary_to_proto(metric_summary))
else:
return MetricSummaryResponse(return_code=1,
return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(),
metric_summary=None)
def _warp_list_metric_summary_response(metric_summary: Optional[List[MetricSummary]]) -> ListMetricSummaryResponse:
if metric_summary is not None:
res = []
for summary in metric_summary:
res.append(metric_summary_to_proto(summary))
return ListMetricSummaryResponse(return_code=0, return_msg=ReturnCode.Name(SUCCESS).lower(),
metric_summary=res)
else:
return ListMetricSummaryResponse(return_code=1,
return_msg=ReturnCode.Name(RESOURCE_DOES_NOT_EXIST).lower(),
metric_summary=None)
| 49.032864
| 122
| 0.627633
| 1,094
| 10,444
| 5.66819
| 0.142596
| 0.10966
| 0.041122
| 0.033382
| 0.362038
| 0.254959
| 0.153524
| 0.140945
| 0.115465
| 0.115465
| 0
| 0.003742
| 0.309077
| 10,444
| 212
| 123
| 49.264151
| 0.855599
| 0.072003
| 0
| 0.208333
| 0
| 0
| 0.010028
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.053571
| 0.017857
| 0.22619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
58055aabc65a23b166d03e3a5c7b5b2ffaa06173
| 3,154
|
py
|
Python
|
src/moduels/gui/Tab_Help.py
|
HaujetZhao/Caps_Writer
|
f2b2038a2c0984a1d356f024cbac421fe594601a
|
[
"MIT"
] | 234
|
2020-07-10T11:23:09.000Z
|
2022-03-31T09:41:40.000Z
|
src/moduels/gui/Tab_Help.py
|
HaujetZhao/Caps_Writer
|
f2b2038a2c0984a1d356f024cbac421fe594601a
|
[
"MIT"
] | 9
|
2020-07-11T08:31:11.000Z
|
2022-03-01T04:30:08.000Z
|
src/moduels/gui/Tab_Help.py
|
HaujetZhao/Caps_Writer
|
f2b2038a2c0984a1d356f024cbac421fe594601a
|
[
"MIT"
] | 23
|
2020-07-14T08:58:44.000Z
|
2022-03-17T06:38:10.000Z
|
# -*- coding: UTF-8 -*-
from PySide2.QtWidgets import QWidget, QPushButton, QVBoxLayout
from PySide2.QtCore import Signal
from moduels.component.NormalValue import 常量
from moduels.component.SponsorDialog import SponsorDialog
import os, webbrowser
class Tab_Help(QWidget):
状态栏消息 = Signal(str, int)
def __init__(self):
super().__init__()
self.initElement() # 先初始化各个控件
self.initSlots() # 再将各个控件连接到信号槽
self.initLayout() # 然后布局
self.initValue() # 再定义各个控件的值
def initElement(self):
self.打开帮助按钮 = QPushButton(self.tr('打开帮助文档'))
self.ffmpegMannualNoteButton = QPushButton(self.tr('查看作者的 FFmpeg 笔记'))
self.openVideoHelpButtone = QPushButton(self.tr('查看视频教程'))
self.openGiteePage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Gitee 检查新版本'))
self.openGithubPage = QPushButton(self.tr(f'当前版本是 v{常量.软件版本},到 Github 检查新版本'))
self.linkToDiscussPage = QPushButton(self.tr('加入 QQ 群'))
self.tipButton = QPushButton(self.tr('打赏作者'))
self.masterLayout = QVBoxLayout()
def initSlots(self):
self.打开帮助按钮.clicked.connect(self.openHelpDocument)
self.ffmpegMannualNoteButton.clicked.connect(lambda: webbrowser.open(self.tr(r'https://hacpai.com/article/1595480295489')))
self.openVideoHelpButtone.clicked.connect(lambda: webbrowser.open(self.tr(r'https://www.bilibili.com/video/BV12A411p73r/')))
self.openGiteePage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://gitee.com/haujet/CapsWriter/releases')))
self.openGithubPage.clicked.connect(lambda: webbrowser.open(self.tr(r'https://github.com/HaujetZhao/CapsWriter/releases')))
self.linkToDiscussPage.clicked.connect(lambda: webbrowser.open(
self.tr(r'https://qm.qq.com/cgi-bin/qm/qr?k=DgiFh5cclAElnELH4mOxqWUBxReyEVpm&jump_from=webapi')))
self.tipButton.clicked.connect(lambda: SponsorDialog(self))
def initLayout(self):
self.setLayout(self.masterLayout)
# self.masterLayout.addWidget(self.打开帮助按钮)
# self.masterLayout.addWidget(self.ffmpegMannualNoteButton)
self.masterLayout.addWidget(self.openVideoHelpButtone)
self.masterLayout.addWidget(self.openGiteePage)
self.masterLayout.addWidget(self.openGithubPage)
self.masterLayout.addWidget(self.linkToDiscussPage)
self.masterLayout.addWidget(self.tipButton)
def initValue(self):
self.打开帮助按钮.setMaximumHeight(100)
self.ffmpegMannualNoteButton.setMaximumHeight(100)
self.openVideoHelpButtone.setMaximumHeight(100)
self.openGiteePage.setMaximumHeight(100)
self.openGithubPage.setMaximumHeight(100)
self.linkToDiscussPage.setMaximumHeight(100)
self.tipButton.setMaximumHeight(100)
def openHelpDocument(self):
try:
if 常量.系统平台 == 'Darwin':
import shlex
os.system("open " + shlex.quote(self.tr("./misc/Docs/README_zh.html")))
elif 常量.系统平台 == 'Windows':
os.startfile(os.path.realpath(self.tr('./misc/Docs/README_zh.html')))
except:
print('未能打开帮助文档')
| 45.057143
| 132
| 0.69499
| 337
| 3,154
| 6.468843
| 0.338279
| 0.038532
| 0.054587
| 0.093119
| 0.157798
| 0.157798
| 0.157798
| 0.133945
| 0.133945
| 0
| 0
| 0.017836
| 0.182308
| 3,154
| 69
| 133
| 45.710145
| 0.827453
| 0.049778
| 0
| 0
| 0
| 0.018182
| 0.146203
| 0.017397
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109091
| false
| 0
| 0.109091
| 0
| 0.254545
| 0.018182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5805ce50d417618b337a1e60276ff06de0f997f8
| 1,425
|
py
|
Python
|
utils/visual.py
|
xizaoqu/Panoptic-PolarNet
|
8ce05f437f54e030eac7de150f43caab2810cfbb
|
[
"BSD-3-Clause"
] | 90
|
2021-03-30T08:02:15.000Z
|
2022-03-30T03:29:56.000Z
|
utils/visual.py
|
xizaoqu/Panoptic-PolarNet
|
8ce05f437f54e030eac7de150f43caab2810cfbb
|
[
"BSD-3-Clause"
] | 11
|
2021-04-01T02:29:08.000Z
|
2022-03-04T07:30:50.000Z
|
utils/visual.py
|
xizaoqu/Panoptic-PolarNet
|
8ce05f437f54e030eac7de150f43caab2810cfbb
|
[
"BSD-3-Clause"
] | 21
|
2021-04-01T09:29:38.000Z
|
2022-03-28T01:36:02.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import cv2
import numpy as np
def flow_to_img(flow, normalize=True):
"""Convert flow to viewable image, using color hue to encode flow vector orientation, and color saturation to
encode vector length. This is similar to the OpenCV tutorial on dense optical flow, except that they map vector
length to the value plane of the HSV color model, instead of the saturation plane, as we do here.
Args:
flow: optical flow
normalize: Normalize flow to 0..255
Returns:
img: viewable representation of the dense optical flow in RGB format
Ref:
https://github.com/philferriere/tfoptflow/blob/33e8a701e34c8ce061f17297d40619afbd459ade/tfoptflow/optflow.py
"""
hsv = np.zeros((flow.shape[0], flow.shape[1], 3), dtype=np.uint8)
flow_magnitude, flow_angle = cv2.cartToPolar(flow[..., 0].astype(np.float32), flow[..., 1].astype(np.float32))
# A couple times, we've gotten NaNs out of the above...
nans = np.isnan(flow_magnitude)
if np.any(nans):
nans = np.where(nans)
flow_magnitude[nans] = 0.
# Normalize
hsv[..., 0] = flow_angle * 180 / np.pi / 2
if normalize is True:
hsv[..., 1] = cv2.normalize(flow_magnitude, None, 0, 255, cv2.NORM_MINMAX)
else:
hsv[..., 1] = flow_magnitude
hsv[..., 2] = 255
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
return img
| 39.583333
| 116
| 0.665263
| 207
| 1,425
| 4.52657
| 0.497585
| 0.06937
| 0.034152
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057502
| 0.218947
| 1,425
| 36
| 117
| 39.583333
| 0.784367
| 0.486316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.117647
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5806fd8ba37feb4c4d823dfb9c4c105ed07bdd0c
| 624
|
py
|
Python
|
DistributedRL/Gateway/build/Code/sim/Parser/LAI/GreenIndex.py
|
zhkmxx9302013/SoftwarePilot
|
826098465b800085774946c20a7a283f369f1d21
|
[
"MIT"
] | 4
|
2019-03-20T17:46:01.000Z
|
2019-03-31T17:32:44.000Z
|
DistributedRL/Gateway/build/Code/sim/Parser/LAI/GreenIndex.py
|
zhkmxx9302013/SoftwarePilot
|
826098465b800085774946c20a7a283f369f1d21
|
[
"MIT"
] | null | null | null |
DistributedRL/Gateway/build/Code/sim/Parser/LAI/GreenIndex.py
|
zhkmxx9302013/SoftwarePilot
|
826098465b800085774946c20a7a283f369f1d21
|
[
"MIT"
] | null | null | null |
import argparse
from PIL import Image, ImageStat
import math
parser = argparse.ArgumentParser()
parser.add_argument('fname')
parser.add_argument('pref', default="", nargs="?")
args = parser.parse_args()
im = Image.open(args.fname)
RGB = im.convert('RGB')
imWidth, imHeight = im.size
ratg = 1.2
ratgb = 1.66
ming = 10
ratr = 2
speed = 8
leafcount = 0
total = 0
for i in range(0, int(imWidth/speed)):
for j in range(0, int(imHeight/speed)):
R,G,B = RGB.getpixel((i*speed,j*speed))
if R*ratg < G and B*ratgb < G and B*ratr < R:
leafcount = leafcount + 1
total = total+1
print("LAI="+str(float(leafcount)/total))
| 20.8
| 50
| 0.684295
| 104
| 624
| 4.076923
| 0.509615
| 0.042453
| 0.080189
| 0.051887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 0.158654
| 624
| 29
| 51
| 21.517241
| 0.779048
| 0
| 0
| 0
| 0
| 0
| 0.027244
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
58077bea9c4435d13d9ff119348291eadd3323f7
| 4,561
|
py
|
Python
|
reports/heliosV1/python/heliosStorageStats/heliosStorageStats.py
|
ped998/scripts
|
0dcaaf47f9676210e1c972a5d59d8d0de82a1d93
|
[
"Apache-2.0"
] | null | null | null |
reports/heliosV1/python/heliosStorageStats/heliosStorageStats.py
|
ped998/scripts
|
0dcaaf47f9676210e1c972a5d59d8d0de82a1d93
|
[
"Apache-2.0"
] | null | null | null |
reports/heliosV1/python/heliosStorageStats/heliosStorageStats.py
|
ped998/scripts
|
0dcaaf47f9676210e1c972a5d59d8d0de82a1d93
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""cluster storage stats for python"""
# import pyhesity wrapper module
from pyhesity import *
from datetime import datetime
import codecs
# command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, default='helios.cohesity.com') # cluster to connect to
parser.add_argument('-u', '--username', type=str, required=True) # username
parser.add_argument('-d', '--domain', type=str, default='local') # (optional) domain - defaults to local
parser.add_argument('-pwd', '--password', type=str, default=None) # optional password
parser.add_argument('-n', '--unit', type=str, choices=['GiB', 'TiB', 'gib', 'tib'], default='TiB')
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
password = args.password
unit = args.unit
if unit.lower() == 'tib':
multiplier = 1024 * 1024 * 1024 * 1024
unit = 'TiB'
else:
multiplier = 1024 * 1024 * 1024
unit = 'GiB'
def toUnits(value):
return round(float(value) / multiplier, 1)
# authenticate
apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=True, noretry=True)
# outfile
now = datetime.now()
# cluster = api('get', 'cluster')
dateString = now.strftime("%Y-%m-%d")
outfile = 'heliosStorageStats-%s.csv' % dateString
f = codecs.open(outfile, 'w')
# headings
f.write('Date,Capacity (%s),Consumed (%s),Free (%s),Used %%,Data In (%s),Data Written (%s),Storage Reduction,Data Reduction\n' % (unit, unit, unit, unit, unit))
stats = {}
def parseStats(clusterName, dataPoint, statName):
if clusterName not in stats.keys():
stats[clusterName] = {}
stats[clusterName][statName] = dataPoint['data']['int64Value']
endMsecs = dateToUsecs(now.strftime("%Y-%m-%d %H:%M:%S")) / 1000
startMsecs = (timeAgo(2, 'days')) / 1000
print('\nGathering cluster stats:\n')
for cluster in heliosClusters():
heliosCluster(cluster)
print(' %s' % cluster['name'])
capacityStats = api('get', 'statistics/timeSeriesStats?endTimeMsecs=%s&entityId=%s&metricName=kCapacityBytes&metricUnitType=0&range=day&rollupFunction=average&rollupIntervalSecs=86400&schemaName=kBridgeClusterStats&startTimeMsecs=%s' % (endMsecs, cluster['clusterId'], startMsecs))
consumedStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterTierPhysicalStats&metricName=kMorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s:Local&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs))
dataInStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=BrickBytesLogical&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs))
dataWrittenStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=ApolloV2ClusterStats&metricName=ChunkBytesMorphed&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s (ID %s)&endTimeMsecs=%s' % (startMsecs, cluster['name'], cluster['clusterId'], endMsecs))
logicalSizeStats = api('get', 'statistics/timeSeriesStats?startTimeMsecs=%s&schemaName=kBridgeClusterLogicalStats&metricName=kUnmorphedUsageBytes&rollupIntervalSecs=86400&rollupFunction=latest&entityIdList=%s&endTimeMsecs=%s' % (startMsecs, cluster['clusterId'], endMsecs))
parseStats(cluster['name'], capacityStats['dataPointVec'][0], 'capacity')
parseStats(cluster['name'], consumedStats['dataPointVec'][0], 'consumed')
parseStats(cluster['name'], dataInStats['dataPointVec'][0], 'dataIn')
parseStats(cluster['name'], dataWrittenStats['dataPointVec'][0], 'dataWritten')
parseStats(cluster['name'], logicalSizeStats['dataPointVec'][0], 'logicalSize')
for clusterName in sorted(stats.keys()):
capacity = stats[clusterName]['capacity']
consumed = stats[clusterName]['consumed']
dataIn = stats[clusterName]['dataIn']
dataWritten = stats[clusterName]['dataWritten']
logicalSize = stats[clusterName]['logicalSize']
free = capacity - consumed
pctUsed = round(100 * consumed / capacity, 0)
storageReduction = round(float(logicalSize) / consumed, 1)
dataReduction = round(float(dataIn) / dataWritten, 1)
f.write('"%s","%s","%s","%s","%s","%s","%s","%s","%s"\n' % (clusterName, toUnits(capacity), toUnits(consumed), toUnits(free), pctUsed, toUnits(dataIn), toUnits(dataWritten), storageReduction, dataReduction))
f.close()
print('\nOutput saved to %s\n' % outfile)
| 49.043011
| 293
| 0.726814
| 501
| 4,561
| 6.60479
| 0.315369
| 0.026594
| 0.006346
| 0.007253
| 0.229979
| 0.221517
| 0.159263
| 0.125416
| 0.122696
| 0.070716
| 0
| 0.019425
| 0.10831
| 4,561
| 92
| 294
| 49.576087
| 0.794197
| 0.056128
| 0
| 0
| 0
| 0.09375
| 0.375146
| 0.234554
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0.046875
| 0.0625
| 0.015625
| 0.109375
| 0.046875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6af1bee4f8dfc29969377047eaf5953641fb77f7
| 4,823
|
py
|
Python
|
tests/test_find_forks/test_find_forks.py
|
ivan2kh/find_forks
|
409251282a85da48445afc03c5a1797df393ca95
|
[
"MIT"
] | 41
|
2015-05-15T14:37:42.000Z
|
2022-02-05T01:52:00.000Z
|
tests/test_find_forks/test_find_forks.py
|
ivan2kh/find_forks
|
409251282a85da48445afc03c5a1797df393ca95
|
[
"MIT"
] | 12
|
2015-05-15T22:10:36.000Z
|
2021-12-05T14:21:58.000Z
|
tests/test_find_forks/test_find_forks.py
|
ivan2kh/find_forks
|
409251282a85da48445afc03c5a1797df393ca95
|
[
"MIT"
] | 16
|
2015-05-15T14:44:33.000Z
|
2020-11-18T00:54:18.000Z
|
# coding: utf-8
"""test_find_fork."""
# pylint: disable=no-self-use
from __future__ import absolute_import, division, print_function, unicode_literals
from os import path
import unittest
from six import PY3
from find_forks.__init__ import CONFIG
from find_forks.find_forks import add_forks, determine_names, find_forks, main
from .__init__ import BASEPATH
if PY3:
from unittest.mock import patch, MagicMock, Mock # pylint: disable=no-name-in-module
else:
from mock import patch, MagicMock, Mock
class FindForksCommon(unittest.TestCase):
@staticmethod
def make_mock(json_response):
"""Used in test_interesting.py."""
response_mock = MagicMock()
response_mock.read = Mock(return_value=json_response)
if PY3:
response_mock.status = 200
response_mock.getheader = Mock(return_value='<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel="next", '
'<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel="last"')
else:
response_mock.code = 200
response_mock.info = Mock(return_value=(('link', '<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2>; rel="next", '
'<https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=3>; rel="last"'), ))
return response_mock
def make_test(self, response_mock):
"""Used in test_interesting.py."""
url = 'https://github.com/frost-nzcr4/find_forks'
with patch('find_forks.find_forks.urllib.request.urlopen', return_value=response_mock) as urlopen_mock:
with patch('find_forks.git_wrapper.subprocess.call', return_value=None):
self.assertEqual(add_forks(url), 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?page=2')
urlopen_mock.assert_called_once_with(url, timeout=6)
if PY3:
response_mock.status = 404
else:
response_mock.code = 404
self.assertIsNone(add_forks(url))
class FindForksTest(FindForksCommon):
def test_add_forks(self):
self.assertIsNone(add_forks('httttps://unavailable!url'))
with open(path.join(BASEPATH, 'fixture/response.json'), 'rb') as fixture:
json_response = fixture.read()
response_mock = self.make_mock(json_response)
self.make_test(response_mock)
def test_determine_names(self):
"""To run this test you'll need to prepare git first, run:
git remote add test-origin-1 https://github.com/frost-nzcr4/find_forks.git
git remote add test-origin-2 https://github.com/yagmort/symfony1.git
git remote add test-origin-3 git@github.com:tjerkw/Android-SlideExpandableListView.git
"""
user, repo = determine_names()
self.assertEqual(user, 'frost-nzcr4')
self.assertEqual(repo, 'find_forks')
user, repo = determine_names('test-origin-1')
self.assertEqual(user, 'frost-nzcr4')
self.assertEqual(repo, 'webmoney')
user, repo = determine_names('test-origin-2')
self.assertEqual(user, 'yagmort')
self.assertEqual(repo, 'symfony1')
user, repo = determine_names('test-origin-3')
self.assertEqual(user, 'tjerkw')
self.assertEqual(repo, 'Android-SlideExpandableListView')
with self.assertRaises(RuntimeError):
user, repo = determine_names('name-with-an-error')
def test_find_forks(self):
sent_args = {
'per_page': 99,
'start_page': 3
}
url = 'https://api.github.com/repos/frost-nzcr4/find_forks/forks?per_page=%s&page=%s' % (sent_args['per_page'], sent_args['start_page'])
with patch('find_forks.git_wrapper.subprocess.call', return_value=None) as call_mock:
with patch('find_forks.find_forks.add_forks', return_value=None) as add_forks_mock:
find_forks(**sent_args)
add_forks_mock.assert_called_once_with(url)
call_mock.assert_called_once()
def test_main(self):
with patch('find_forks.find_forks.find_forks', return_value=None) as find_forks_mock:
main()
sent_args = CONFIG.copy()
sent_args.update({'user': None, 'repo': None, 'no_fetch': False})
find_forks_mock.assert_called_once_with(**sent_args)
# Test __version__ exceptions.
find_forks_mock = MagicMock(side_effect=SystemError())
del find_forks_mock.__version__
modules = {
'find_forks.__init__': find_forks_mock
}
with patch.dict('sys.modules', modules):
self.assertRaises(ImportError, main)
| 41.577586
| 146
| 0.644412
| 602
| 4,823
| 4.918605
| 0.237542
| 0.091185
| 0.037825
| 0.051334
| 0.387707
| 0.312732
| 0.206349
| 0.184059
| 0.151638
| 0.151638
| 0
| 0.011999
| 0.239685
| 4,823
| 115
| 147
| 41.93913
| 0.795473
| 0.096828
| 0
| 0.097561
| 0
| 0.073171
| 0.224238
| 0.060479
| 0
| 0
| 0
| 0
| 0.207317
| 1
| 0.073171
| false
| 0
| 0.121951
| 0
| 0.231707
| 0.012195
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6af1e67adc2134fb57f91c04b0e1763048fc52e2
| 15,853
|
py
|
Python
|
neutron/agent/l3/dvr_router.py
|
insequent/neutron
|
2b1c4f121e3e8ba1c5eb2ba6661bf6326e1507c5
|
[
"Apache-2.0"
] | null | null | null |
neutron/agent/l3/dvr_router.py
|
insequent/neutron
|
2b1c4f121e3e8ba1c5eb2ba6661bf6326e1507c5
|
[
"Apache-2.0"
] | null | null | null |
neutron/agent/l3/dvr_router.py
|
insequent/neutron
|
2b1c4f121e3e8ba1c5eb2ba6661bf6326e1507c5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015 Openstack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import binascii
import netaddr
from oslo_log import log as logging
from oslo_utils import excutils
from neutron.agent.l3 import dvr_fip_ns
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import router_info as router
from neutron.agent.linux import ip_lib
from neutron.common import constants as l3_constants
from neutron.common import utils as common_utils
from neutron.i18n import _LE
LOG = logging.getLogger(__name__)
# xor-folding mask used for IPv6 rule index
MASK_30 = 0x3fffffff
class DvrRouter(router.RouterInfo):
def __init__(self, agent, host, *args, **kwargs):
super(DvrRouter, self).__init__(*args, **kwargs)
self.agent = agent
self.host = host
self.floating_ips_dict = {}
self.snat_iptables_manager = None
# Linklocal subnet for router and floating IP namespace link
self.rtr_fip_subnet = None
self.dist_fip_count = None
self.snat_namespace = None
def get_floating_ips(self):
"""Filter Floating IPs to be hosted on this agent."""
floating_ips = super(DvrRouter, self).get_floating_ips()
return [i for i in floating_ips if i['host'] == self.host]
def get_snat_interfaces(self):
return self.router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])
def get_snat_int_device_name(self, port_id):
long_name = dvr_snat_ns.SNAT_INT_DEV_PREFIX + port_id
return long_name[:self.driver.DEV_NAME_LEN]
def _handle_fip_nat_rules(self, interface_name, action):
"""Configures NAT rules for Floating IPs for DVR.
Remove all the rules. This is safe because if
use_namespaces is set as False then the agent can
only configure one router, otherwise each router's
NAT rules will be in their own namespace.
"""
self.iptables_manager.ipv4['nat'].empty_chain('POSTROUTING')
self.iptables_manager.ipv4['nat'].empty_chain('snat')
# Add back the jump to float-snat
self.iptables_manager.ipv4['nat'].add_rule('snat', '-j $float-snat')
# And add them back if the action is add_rules
if action == 'add_rules' and interface_name:
rule = ('POSTROUTING', '! -i %(interface_name)s '
'! -o %(interface_name)s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' %
{'interface_name': interface_name})
self.iptables_manager.ipv4['nat'].add_rule(*rule)
self.iptables_manager.apply()
def floating_ip_added_dist(self, fip, fip_cidr):
"""Add floating IP to FIP namespace."""
floating_ip = fip['floating_ip_address']
fixed_ip = fip['fixed_ip_address']
rule_pr = self.fip_ns.allocate_rule_priority()
self.floating_ips_dict[floating_ip] = rule_pr
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
ip_rule.rule.add(fixed_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr)
#Add routing rule in fip namespace
fip_ns_name = self.fip_ns.get_name()
rtr_2_fip, _ = self.rtr_fip_subnet.get_pair()
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
device.route.add_route(fip_cidr, str(rtr_2_fip.ip))
interface_name = (
self.fip_ns.get_ext_device_name(
self.fip_ns.agent_gateway_port['id']))
ip_lib.send_garp_for_proxyarp(fip_ns_name,
interface_name,
floating_ip,
self.agent_conf.send_arp_for_ha)
# update internal structures
self.dist_fip_count = self.dist_fip_count + 1
def floating_ip_removed_dist(self, fip_cidr):
"""Remove floating IP from FIP namespace."""
floating_ip = fip_cidr.split('/')[0]
rtr_2_fip_name = self.fip_ns.get_rtr_ext_device_name(self.router_id)
fip_2_rtr_name = self.fip_ns.get_int_device_name(self.router_id)
if self.rtr_fip_subnet is None:
self.rtr_fip_subnet = self.fip_ns.local_subnets.allocate(
self.router_id)
rtr_2_fip, fip_2_rtr = self.rtr_fip_subnet.get_pair()
fip_ns_name = self.fip_ns.get_name()
if floating_ip in self.floating_ips_dict:
rule_pr = self.floating_ips_dict[floating_ip]
ip_rule = ip_lib.IPRule(namespace=self.ns_name)
ip_rule.rule.delete(floating_ip, dvr_fip_ns.FIP_RT_TBL, rule_pr)
self.fip_ns.deallocate_rule_priority(rule_pr)
#TODO(rajeev): Handle else case - exception/log?
device = ip_lib.IPDevice(fip_2_rtr_name, namespace=fip_ns_name)
device.route.delete_route(fip_cidr, str(rtr_2_fip.ip))
# check if this is the last FIP for this router
self.dist_fip_count = self.dist_fip_count - 1
if self.dist_fip_count == 0:
#remove default route entry
device = ip_lib.IPDevice(rtr_2_fip_name, namespace=self.ns_name)
ns_ip = ip_lib.IPWrapper(namespace=fip_ns_name)
device.route.delete_gateway(str(fip_2_rtr.ip),
table=dvr_fip_ns.FIP_RT_TBL)
self.fip_ns.local_subnets.release(self.router_id)
self.rtr_fip_subnet = None
ns_ip.del_veth(fip_2_rtr_name)
is_last = self.fip_ns.unsubscribe(self.router_id)
if is_last:
# TODO(Carl) I can't help but think that another router could
# come in and want to start using this namespace while this is
# destroying it. The two could end up conflicting on
# creating/destroying interfaces and such. I think I'd like a
# semaphore to sync creation/deletion of this namespace.
self.fip_ns.delete()
self.fip_ns = None
def add_floating_ip(self, fip, interface_name, device):
if not self._add_fip_addr_to_device(fip, device):
return l3_constants.FLOATINGIP_STATUS_ERROR
# Special Handling for DVR - update FIP namespace
ip_cidr = common_utils.ip_to_cidr(fip['floating_ip_address'])
self.floating_ip_added_dist(fip, ip_cidr)
return l3_constants.FLOATINGIP_STATUS_ACTIVE
def remove_floating_ip(self, device, ip_cidr):
super(DvrRouter, self).remove_floating_ip(device, ip_cidr)
self.floating_ip_removed_dist(ip_cidr)
def create_snat_namespace(self):
# TODO(mlavalle): in the near future, this method should contain the
# code in the L3 agent that creates a gateway for a dvr. The first step
# is to move the creation of the snat namespace here
self.snat_namespace = dvr_snat_ns.SnatNamespace(self.router['id'],
self.agent_conf,
self.driver,
self.use_ipv6)
self.snat_namespace.create()
return self.snat_namespace
def delete_snat_namespace(self):
# TODO(mlavalle): in the near future, this method should contain the
# code in the L3 agent that removes an external gateway for a dvr. The
# first step is to move the deletion of the snat namespace here
self.snat_namespace.delete()
self.snat_namespace = None
def _get_internal_port(self, subnet_id):
"""Return internal router port based on subnet_id."""
router_ports = self.router.get(l3_constants.INTERFACE_KEY, [])
for port in router_ports:
fips = port['fixed_ips']
for f in fips:
if f['subnet_id'] == subnet_id:
return port
def _update_arp_entry(self, ip, mac, subnet_id, operation):
"""Add or delete arp entry into router namespace for the subnet."""
port = self._get_internal_port(subnet_id)
# update arp entry only if the subnet is attached to the router
if not port:
return
try:
# TODO(mrsmith): optimize the calls below for bulk calls
interface_name = self.get_internal_device_name(port['id'])
device = ip_lib.IPDevice(interface_name, namespace=self.ns_name)
if operation == 'add':
device.neigh.add(ip, mac)
elif operation == 'delete':
device.neigh.delete(ip, mac)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("DVR: Failed updating arp entry"))
def _set_subnet_arp_info(self, port):
"""Set ARP info retrieved from Plugin for existing ports."""
if 'id' not in port['subnet']:
return
subnet_id = port['subnet']['id']
# TODO(Carl) Can we eliminate the need to make this RPC while
# processing a router.
subnet_ports = self.agent.get_ports_by_subnet(subnet_id)
for p in subnet_ports:
if p['device_owner'] not in l3_constants.ROUTER_INTERFACE_OWNERS:
for fixed_ip in p['fixed_ips']:
self._update_arp_entry(fixed_ip['ip_address'],
p['mac_address'],
subnet_id,
'add')
def _map_internal_interfaces(self, int_port, snat_ports):
"""Return the SNAT port for the given internal interface port."""
fixed_ip = int_port['fixed_ips'][0]
subnet_id = fixed_ip['subnet_id']
match_port = [p for p in snat_ports if
p['fixed_ips'][0]['subnet_id'] == subnet_id]
if match_port:
return match_port[0]
else:
LOG.error(_LE('DVR: no map match_port found!'))
@staticmethod
def _get_snat_idx(ip_cidr):
"""Generate index for DVR snat rules and route tables.
The index value has to be 32 bits or less but more than the system
generated entries i.e. 32768. For IPv4 use the numeric value of the
cidr. For IPv6 generate a crc32 bit hash and xor-fold to 30 bits.
Use the freed range to extend smaller values so that they become
greater than system generated entries.
"""
net = netaddr.IPNetwork(ip_cidr)
if net.version == 6:
# the crc32 & 0xffffffff is for Python 2.6 and 3.0 compatibility
snat_idx = binascii.crc32(ip_cidr) & 0xffffffff
# xor-fold the hash to reserve upper range to extend smaller values
snat_idx = (snat_idx >> 30) ^ (snat_idx & MASK_30)
if snat_idx < 32768:
snat_idx = snat_idx + MASK_30
else:
snat_idx = net.value
return snat_idx
def _snat_redirect_add(self, gateway, sn_port, sn_int):
"""Adds rules and routes for SNAT redirection."""
try:
ip_cidr = sn_port['ip_cidr']
snat_idx = self._get_snat_idx(ip_cidr)
ns_ipr = ip_lib.IPRule(namespace=self.ns_name)
ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name)
ns_ipwrapr = ip_lib.IPWrapper(namespace=self.ns_name)
ns_ipd.route.add_gateway(gateway, table=snat_idx)
ns_ipr.rule.add(ip_cidr, snat_idx, snat_idx)
ns_ipwrapr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.'
'send_redirects=0' % sn_int])
except Exception:
LOG.exception(_LE('DVR: error adding redirection logic'))
def _snat_redirect_remove(self, gateway, sn_port, sn_int):
"""Removes rules and routes for SNAT redirection."""
try:
ip_cidr = sn_port['ip_cidr']
snat_idx = self._get_snat_idx(ip_cidr)
ns_ipr = ip_lib.IPRule(namespace=self.ns_name)
ns_ipd = ip_lib.IPDevice(sn_int, namespace=self.ns_name)
ns_ipd.route.delete_gateway(gateway, table=snat_idx)
ns_ipr.rule.delete(ip_cidr, snat_idx, snat_idx)
except Exception:
LOG.exception(_LE('DVR: removed snat failed'))
def get_gw_port_host(self):
host = self.router.get('gw_port_host')
if not host:
LOG.debug("gw_port_host missing from router: %s",
self.router['id'])
return host
def internal_network_added(self, port):
super(DvrRouter, self).internal_network_added(port)
ex_gw_port = self.get_ex_gw_port()
if not ex_gw_port:
return
snat_ports = self.get_snat_interfaces()
sn_port = self._map_internal_interfaces(port, snat_ports)
if not sn_port:
return
interface_name = self.get_internal_device_name(port['id'])
self._snat_redirect_add(sn_port['fixed_ips'][0]['ip_address'],
port,
interface_name)
# TODO(Carl) This is a sign that dvr needs two router classes.
is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and
self.get_gw_port_host() == self.host)
if not is_this_snat_host:
return
ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(self.router['id'])
self._set_subnet_info(sn_port)
interface_name = self.get_snat_int_device_name(sn_port['id'])
self._internal_network_added(
ns_name,
sn_port['network_id'],
sn_port['id'],
sn_port['ip_cidr'],
sn_port['mac_address'],
interface_name,
dvr_snat_ns.SNAT_INT_DEV_PREFIX)
self._set_subnet_arp_info(port)
def _dvr_internal_network_removed(self, port):
if not self.ex_gw_port:
return
sn_port = self._map_internal_interfaces(port, self.snat_ports)
if not sn_port:
return
# DVR handling code for SNAT
interface_name = self.get_internal_device_name(port['id'])
self._snat_redirect_remove(sn_port['fixed_ips'][0]['ip_address'],
port,
interface_name)
is_this_snat_host = (self.agent_conf.agent_mode == 'dvr_snat' and
self.ex_gw_port['binding:host_id'] == self.host)
if not is_this_snat_host:
return
snat_interface = (
self.get_snat_int_device_name(sn_port['id']))
ns_name = self.snat_namespace.name
prefix = dvr_snat_ns.SNAT_INT_DEV_PREFIX
if ip_lib.device_exists(snat_interface, namespace=ns_name):
self.driver.unplug(snat_interface, namespace=ns_name,
prefix=prefix)
def internal_network_removed(self, port):
self._dvr_internal_network_removed(port)
super(DvrRouter, self).internal_network_removed(port)
def get_floating_agent_gw_interface(self, ext_net_id):
"""Filter Floating Agent GW port for the external network."""
fip_ports = self.router.get(l3_constants.FLOATINGIP_AGENT_INTF_KEY, [])
return next(
(p for p in fip_ports if p['network_id'] == ext_net_id), None)
| 43.196185
| 79
| 0.6272
| 2,172
| 15,853
| 4.28453
| 0.177256
| 0.012895
| 0.01354
| 0.018375
| 0.356437
| 0.290673
| 0.229422
| 0.190415
| 0.157748
| 0.1268
| 0
| 0.008087
| 0.290229
| 15,853
| 366
| 80
| 43.314208
| 0.818966
| 0.200151
| 0
| 0.204819
| 0
| 0
| 0.055867
| 0
| 0
| 0
| 0.001603
| 0.005464
| 0
| 1
| 0.092369
| false
| 0
| 0.044177
| 0.004016
| 0.216867
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6af335ed4a4087ef091d5830d5a795b074596342
| 1,032
|
py
|
Python
|
sandbox/test/testChainop.py
|
turkeydonkey/nzmath3
|
a48ae9efcf0d9ad1485c2e9863c948a7f1b20311
|
[
"BSD-3-Clause"
] | 1
|
2021-05-26T19:22:17.000Z
|
2021-05-26T19:22:17.000Z
|
sandbox/test/testChainop.py
|
turkeydonkey/nzmath3
|
a48ae9efcf0d9ad1485c2e9863c948a7f1b20311
|
[
"BSD-3-Clause"
] | null | null | null |
sandbox/test/testChainop.py
|
turkeydonkey/nzmath3
|
a48ae9efcf0d9ad1485c2e9863c948a7f1b20311
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import operator
import sandbox.chainop as chainop
class BasicChainTest (unittest.TestCase):
def testBasicChain(self):
double = lambda x: x * 2
self.assertEqual(62, chainop.basic_chain((operator.add, double), 2, 31))
square = lambda x: x ** 2
self.assertEqual(2**31, chainop.basic_chain((operator.mul, square), 2, 31))
class MultiChainTest (unittest.TestCase):
def testMultiChain(self):
double = lambda x: x * 2
self.assertEqual([62, 93], chainop.multi_chains((operator.add, double), (2, 3), 31))
square = lambda x: x ** 2
self.assertEqual([2**31, 3**31], chainop.multi_chains((operator.mul, square), [2, 3], 31))
def suite(suffix="Test"):
suite = unittest.TestSuite()
all_names = globals()
for name in all_names:
if name.endswith(suffix):
suite.addTest(unittest.makeSuite(all_names[name], "test"))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| 31.272727
| 98
| 0.650194
| 132
| 1,032
| 4.969697
| 0.371212
| 0.042683
| 0.04878
| 0.054878
| 0.216463
| 0.216463
| 0.216463
| 0.216463
| 0.216463
| 0.106707
| 0
| 0.04059
| 0.212209
| 1,032
| 32
| 99
| 32.25
| 0.766298
| 0
| 0
| 0.16
| 0
| 0
| 0.015504
| 0
| 0
| 0
| 0
| 0
| 0.16
| 1
| 0.12
| false
| 0
| 0.12
| 0
| 0.36
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6af3beecef5460df43fd5570a5ba8ce1f6a0d13d
| 1,131
|
py
|
Python
|
labs_final/lab5/experiments/run_trpo_pendulum.py
|
mrmotallebi/berkeley-deeprl-bootcamp
|
9257c693724c38edfa4571e3510667ca168b7ca1
|
[
"MIT"
] | 3
|
2018-03-26T14:13:11.000Z
|
2020-07-23T22:26:28.000Z
|
labs_final/lab5/experiments/run_trpo_pendulum.py
|
mrmotallebi/berkeley-deeprl-bootcamp
|
9257c693724c38edfa4571e3510667ca168b7ca1
|
[
"MIT"
] | null | null | null |
labs_final/lab5/experiments/run_trpo_pendulum.py
|
mrmotallebi/berkeley-deeprl-bootcamp
|
9257c693724c38edfa4571e3510667ca168b7ca1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import chainer
from algs import trpo
from env_makers import EnvMaker
from models import GaussianMLPPolicy, MLPBaseline
from utils import SnapshotSaver
import numpy as np
import os
import logger
log_dir = "data/local/trpo-pendulum"
np.random.seed(42)
# Clean up existing logs
os.system("rm -rf {}".format(log_dir))
with logger.session(log_dir):
env_maker = EnvMaker('Pendulum-v0')
env = env_maker.make()
policy = GaussianMLPPolicy(
observation_space=env.observation_space,
action_space=env.action_space,
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=chainer.functions.tanh,
)
baseline = MLPBaseline(
observation_space=env.observation_space,
action_space=env.action_space,
env_spec=env.spec,
hidden_sizes=(64, 64),
hidden_nonlinearity=chainer.functions.tanh,
)
trpo(
env=env,
env_maker=env_maker,
n_envs=16,
policy=policy,
baseline=baseline,
batch_size=10000,
n_iters=100,
snapshot_saver=SnapshotSaver(log_dir),
)
| 24.586957
| 51
| 0.678161
| 142
| 1,131
| 5.211268
| 0.450704
| 0.064865
| 0.075676
| 0.081081
| 0.343243
| 0.343243
| 0.343243
| 0.343243
| 0.343243
| 0.343243
| 0
| 0.02411
| 0.229885
| 1,131
| 45
| 52
| 25.133333
| 0.825488
| 0.038019
| 0
| 0.263158
| 0
| 0
| 0.040516
| 0.022099
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.210526
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6af3f2a17f291a65e5f9b17cbe9f19d00752f642
| 2,098
|
py
|
Python
|
jtyoui/regular/regexengine.py
|
yy1244/Jtyoui
|
d3c212ed9d6ffa6b37a8ca49098ab59c89216f09
|
[
"MIT"
] | 1
|
2019-12-05T09:46:51.000Z
|
2019-12-05T09:46:51.000Z
|
jtyoui/regular/regexengine.py
|
yy1244/Jtyoui
|
d3c212ed9d6ffa6b37a8ca49098ab59c89216f09
|
[
"MIT"
] | null | null | null |
jtyoui/regular/regexengine.py
|
yy1244/Jtyoui
|
d3c212ed9d6ffa6b37a8ca49098ab59c89216f09
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3.7
# -*- coding: utf-8 -*-
# @Time : 2019/12/2 10:17
# @Author: Jtyoui@qq.com
"""
正则解析器
"""
try:
import xml.etree.cElementTree as et
except ModuleNotFoundError:
import xml.etree.ElementTree as et
import re
class RegexEngine:
def __init__(self, xml, str_):
"""加载正则表。正则表为xml
:param xml: 正则表的位置
:param str_: 要匹配的字符串
"""
self._string = str_
self._root = et.parse(xml).getroot()
self.re = ''
self.data = []
def select(self, tag):
"""根据xml的tag来实现不同的正则提取
:param tag: xml的tag标签
:return: 正则提取的数据
"""
root = self._root.find(tag)
attrib = root.attrib
if attrib.get('part', 'False').lower() == 'true':
self._part_tag(root)
return list(filter(lambda x: x[1], self.data))
else:
sf = self._no_part(root)
self.re = ''.join(self.data) + sf
return re.findall(self.re, self._string)
def _no_part(self, tags):
"""tag标签不分开抽取"""
for tag in tags:
if tag:
if tag.attrib.get('must', 'true').lower() == 'true':
self.data.append(self.re)
self.re = ''
self.re = '(?:' + self._no_part(tag) + ')'
else:
self.re = self._no_part(tag)
else:
attrib = tag.attrib
text = tag.text.strip()
if attrib.get('must', 'true').lower() == 'true':
self.re = '(?:' + text + ')'
else:
self.re += '(?:' + text + ')?'
return self.re
def _part_tag(self, tags):
"""tag标签分开提取"""
for tag in tags:
if tag:
self._part_tag(tag)
else:
self.data.append((tag.tag, re.findall(tag.text.strip(), self._string)))
@property
def string(self):
return self._string
@string.setter
def string(self, str_):
self._string = str_
self.re, self.data = '', []
| 27.246753
| 87
| 0.479981
| 234
| 2,098
| 4.175214
| 0.34188
| 0.067554
| 0.071648
| 0.0348
| 0.155578
| 0.143296
| 0.108495
| 0
| 0
| 0
| 0
| 0.011407
| 0.373213
| 2,098
| 76
| 88
| 27.605263
| 0.731559
| 0.109152
| 0
| 0.254902
| 0
| 0
| 0.027964
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0.019608
| 0.27451
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6af4d5fe8b77a49f0cbdce7b5f8e3248894cc3b5
| 5,117
|
py
|
Python
|
proglearn/transformers.py
|
rflperry/ProgLearn
|
9f799b4a8cf2157ba40b04842dc88eaf646e6420
|
[
"MIT"
] | null | null | null |
proglearn/transformers.py
|
rflperry/ProgLearn
|
9f799b4a8cf2157ba40b04842dc88eaf646e6420
|
[
"MIT"
] | 1
|
2020-11-25T19:21:54.000Z
|
2020-11-25T19:21:54.000Z
|
proglearn/transformers.py
|
rflperry/ProgLearn
|
9f799b4a8cf2157ba40b04842dc88eaf646e6420
|
[
"MIT"
] | null | null | null |
"""
Main Author: Will LeVine
Corresponding Email: levinewill@icloud.com
"""
import keras
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils.validation import check_array, check_is_fitted, check_X_y
from .base import BaseTransformer
class NeuralClassificationTransformer(BaseTransformer):
"""
A class used to transform data from a category to a specialized representation.
Parameters
----------
network : object
A neural network used in the classification transformer.
euclidean_layer_idx : int
An integer to represent the final layer of the transformer.
optimizer : str or keras.optimizers instance
An optimizer used when compiling the neural network.
loss : str, default="categorical_crossentropy"
A loss function used when compiling the neural network.
pretrained : bool, default=False
A boolean used to identify if the network is pretrained.
compile_kwargs : dict, default={"metrics": ["acc"]}
A dictionary containing metrics for judging network performance.
fit_kwargs : dict, default={
"epochs": 100,
"callbacks": [keras.callbacks.EarlyStopping(patience=5, monitor="val_acc")],
"verbose": False,
"validation_split": 0.33,
},
A dictionary to hold epochs, callbacks, verbose, and validation split for the network.
Attributes
----------
encoder_ : object
A Keras model with inputs and outputs based on the network attribute.
Output layers are determined by the euclidean_layer_idx parameter.
"""
def __init__(
self,
network,
euclidean_layer_idx,
optimizer,
loss="categorical_crossentropy",
pretrained=False,
compile_kwargs={"metrics": ["acc"]},
fit_kwargs={
"epochs": 100,
"callbacks": [keras.callbacks.EarlyStopping(patience=5, monitor="val_acc")],
"verbose": False,
"validation_split": 0.33,
},
):
self.network = keras.models.clone_model(network)
self.encoder_ = keras.models.Model(
inputs=self.network.inputs,
outputs=self.network.layers[euclidean_layer_idx].output,
)
self.pretrained = pretrained
self.optimizer = optimizer
self.loss = loss
self.compile_kwargs = compile_kwargs
self.fit_kwargs = fit_kwargs
def fit(self, X, y):
"""
Fits the transformer to data X with labels y.
Parameters
----------
X : ndarray
Input data matrix.
y : ndarray
Output (i.e. response data matrix).
Returns
-------
self : NeuralClassificationTransformer
The object itself.
"""
check_X_y(X, y)
_, y = np.unique(y, return_inverse=True)
# more typechecking
self.network.compile(
loss=self.loss, optimizer=self.optimizer, **self.compile_kwargs
)
self.network.fit(X, keras.utils.to_categorical(y), **self.fit_kwargs)
return self
def transform(self, X):
"""
Performs inference using the transformer.
Parameters
----------
X : ndarray
Input data matrix.
Returns
-------
X_transformed : ndarray
The transformed input.
Raises
------
NotFittedError
When the model is not fitted.
"""
check_is_fitted(self)
check_array(X)
return self.encoder_.predict(X)
class TreeClassificationTransformer(BaseTransformer):
"""
A class used to transform data from a category to a specialized representation.
Parameters
----------
kwargs : dict, default={}
A dictionary to contain parameters of the tree.
Attributes
----------
transformer : sklearn.tree.DecisionTreeClassifier
an internal sklearn DecisionTreeClassifier
"""
def __init__(self, kwargs={}):
self.kwargs = kwargs
def fit(self, X, y):
"""
Fits the transformer to data X with labels y.
Parameters
----------
X : ndarray
Input data matrix.
y : ndarray
Output (i.e. response data matrix).
Returns
-------
self : TreeClassificationTransformer
The object itself.
"""
X, y = check_X_y(X, y)
self.transformer_ = DecisionTreeClassifier(**self.kwargs).fit(X, y)
return self
def transform(self, X):
"""
Performs inference using the transformer.
Parameters
----------
X : ndarray
Input data matrix.
Returns
-------
X_transformed : ndarray
The transformed input.
Raises
------
NotFittedError
When the model is not fitted.
"""
check_is_fitted(self)
X = check_array(X)
return self.transformer_.apply(X)
| 26.931579
| 94
| 0.587063
| 526
| 5,117
| 5.60076
| 0.277567
| 0.00611
| 0.023082
| 0.031229
| 0.39647
| 0.376103
| 0.3537
| 0.3537
| 0.3537
| 0.3537
| 0
| 0.004018
| 0.319132
| 5,117
| 189
| 95
| 27.074074
| 0.841561
| 0.492671
| 0
| 0.148148
| 0
| 0
| 0.039939
| 0.012133
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.092593
| 0
| 0.314815
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6af5766ae43b84c8b76547fb51e5b56cfdb7f3af
| 9,900
|
py
|
Python
|
morphelia/external/saphire.py
|
marx-alex/Morphelia
|
809278b07f1a535789455d54df3cbddc850d609c
|
[
"MIT"
] | null | null | null |
morphelia/external/saphire.py
|
marx-alex/Morphelia
|
809278b07f1a535789455d54df3cbddc850d609c
|
[
"MIT"
] | null | null | null |
morphelia/external/saphire.py
|
marx-alex/Morphelia
|
809278b07f1a535789455d54df3cbddc850d609c
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.collections as mcoll
from matplotlib.ticker import MaxNLocator
plt.style.use('seaborn-darkgrid')
class BaseTraj:
def __init__(self, model, X):
self.model = model
assert len(X.shape) == 2, f"X should be 2-d, instead got shape {X.shape}"
self.X = X
self.means = self.model.means_.copy()
self.states = self.model.predict(X)
self.n_states = len(np.unique(self.states))
self.trans = self.model.transmat_.copy()
def rho_dt_bins(self, rho, theta, dt, bins=12):
"""
Bin rho values and dwell time on polar coordinates.
:param rho:
:param theta:
:param dt:
:param bins:
:return:
"""
bins = np.linspace(-np.pi, np.pi, bins+1)
bin_means = (bins[:-1] + bins[1:]) / 2
bin_ix = np.digitize(theta, bins)
bin_rd = [rho[(bin_ix == i) & (rho > 0)].mean()
if len(rho[(bin_ix == i) & (rho > 0)]) > 0 else
0 for i in range(1, len(bins))]
bin_dt = [dt[(bin_ix == i) & (dt > 0)].sum()
if len(dt[(bin_ix == i) & (dt > 0)]) > 0 else
0 for i in range(1, len(bins))]
return bin_means, bin_rd, bin_dt
def transition_vectors(self):
"""
Transition vectors between states on polar coordinates.
:return:
"""
mu_x, mu_y = self.means[:, 0], self.means[:, 1]
mu_x_dist = mu_x - mu_x[:, np.newaxis]
mu_y_dist = mu_y - mu_y[:, np.newaxis]
dist_vect = np.column_stack((mu_x_dist.flatten(), mu_y_dist.flatten()))
trans_rho, trans_theta = self.cart2pol(dist_vect)
trans_rho = (trans_rho.reshape((self.n_states, self.n_states)) * self.design_transition()).flatten()
return trans_rho, trans_theta
def design_transition(self, thresh=0.1):
design_trans = self.trans
diag_ix = np.diag_indices(len(design_trans))
design_trans[diag_ix] = 0
design_trans[design_trans < thresh] = 0
design_trans[design_trans >= thresh] = 1
return design_trans
def norm_trans_time(self):
"""
Normalized transition time.
:return:
"""
unique, counts = np.unique(self.states, return_counts=True)
sort_ix = unique.argsort()
counts = counts[sort_ix]
# normalize by transition probability
dt = (counts * self.design_transition()).flatten()
return dt / dt.sum()
def norm_state_time(self):
"""
Normalized state time.
:return:
"""
unique, counts = np.unique(self.states, return_counts=True)
sort_ix = unique.argsort()
counts = counts[sort_ix]
return counts / counts.sum()
@staticmethod
def cart2pol(arr):
"""
Cartesion space to polar space.
Args:
arr (numpy.array): Array of shape [n_state x dims]
"""
x, y = arr[:, 0], arr[:, 1]
rho = np.sqrt(x ** 2 + y ** 2)
theta = np.arctan2(y, x)
return rho, theta
class PhenoSign(BaseTraj):
"""Phenotypic Signature class."""
def __init__(self, model, X):
super(PhenoSign, self).__init__(model, X)
self.bin_means, self.signature = self.get_signature()
def get_signature(self):
"""
Calculate phenotypic signature for a given model.
:return: bin_means, array of shape [4 x n_bins] with
1. state radial distances
2. state dwell times
3. transition distances
3. transition dwell times
"""
# states
mu_rho, mu_theta = self.cart2pol(self.means)
state_dt = self.norm_state_time()
bin_means_1, state_rd_bins, state_dt_bins = self.rho_dt_bins(mu_rho, mu_theta, state_dt)
# transitions
trans_rho, trans_theta = self.transition_vectors()
trans_dt = self.norm_trans_time()
bin_means_2, trans_rd_bins, trans_dt_bins = self.rho_dt_bins(trans_rho, trans_theta, trans_dt)
assert (bin_means_1 == bin_means_2).all(), "state and transition vectors are binned differently and can" \
"not be concatenated."
return bin_means_1, np.vstack((state_rd_bins, state_dt_bins, trans_rd_bins, trans_dt_bins))
class Saphire(PhenoSign):
"""Implementation of the SAPHIRE algorithm for plotting Hidden Markov Models.
Gordonov S, Hwang MK, Wells A, Gertler FB, Lauffenburger DA,
Bathe M. Time series modeling of live-cell shape dynamics for
image-based phenotypic profiling. Integr Biol (Camb). 2016;8(1):73-90.
"""
def __init__(self, model, X):
super(Saphire, self).__init__(model, X)
def plot_traj(self, projection='cartesian', ymax=None):
"""
Plot cell trajectory.
Args:
projection (str): cartesian or polar.
ymax (int)
"""
avail_proj = ['cartesian', 'polar']
projection = projection.lower()
assert projection in avail_proj, f"projection unknown: {projection}"
if projection == 'cartesian':
projection = None
cmap = plt.get_cmap('binary')
cmap = truncate_colormap(cmap, minval=0.2)
if projection == 'polar':
y, x = self.cart2pol(self.X)
y_mu, x_mu = self.cart2pol(self.means)
else:
x, y = self.X[:, 0], self.X[:, 1]
x_mu, y_mu = self.means[:, 0], self.means[:, 1]
fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': projection})
ax.scatter(x, y,
c=self.states, cmap='Set1', zorder=2)
traj = ax.scatter(x_mu, y_mu,
c=np.unique(self.states), cmap='Set1',
s=200, zorder=2, edgecolor='black', alpha=0.6)
legend = ax.legend(*traj.legend_elements(),
loc="upper right", bbox_to_anchor=(1.2, 0.94),
title="States")
ax.add_artist(legend)
if ymax is not None:
ax.set_ylim(0, ymax)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
colorline(x, y, cmap=cmap, zorder=1)
norm = mpl.colors.Normalize(vmin=0, vmax=48)
cax = fig.add_axes([0.94, 0.15, 0.05, 0.3])
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax,
orientation='vertical', label='Time')
plt.show()
return fig, ax
def plot_states(self, ymax=None):
"""
Plot cell states.
"""
bin_rd, bin_dt = self.signature[0, :], self.signature[1, :]
fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'})
cmap = plt.get_cmap("Oranges")
N = 12
width = (2 * np.pi) / N
ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt))
if ymax is not None:
ax.set_ylim(0, ymax)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
norm = mpl.colors.Normalize(vmin=0, vmax=1)
cax = fig.add_axes([0.94, 0.15, 0.05, 0.3])
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax,
orientation='vertical', label='Increasing state dwell time',
ticks=[0, 0.5, 1])
return fig, ax
def plot_transition(self, ymax=None):
"""
Plot transition between cell states.
"""
bin_rd, bin_dt = self.signature[2, :], self.signature[3, :]
fig, ax = plt.subplots(figsize=(5, 5), subplot_kw={'projection': 'polar'})
cmap = plt.get_cmap("Blues")
N = 12
width = (2 * np.pi) / N
ax.bar(self.bin_means, bin_rd, width=width, color=cmap(bin_dt))
if ymax is not None:
ax.set_ylim(0, ymax)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
norm = mpl.colors.Normalize(vmin=0, vmax=1)
cax = fig.add_axes([0.94, 0.15, 0.05, 0.3])
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax,
orientation='vertical', label='Increasing transition dwell time',
ticks=[0, 0.5, 1])
return fig, ax
def colorline(x, y, z=None, cmap=plt.get_cmap('copper'), norm=plt.Normalize(0.0, 1.0),
linewidth=3, alpha=1.0, zorder=1):
"""
Plot a colored line with coordinates x and y
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width
"""
# Default colors equally spaced on [0,1]:
if z is None:
z = np.linspace(0.0, 1.0, len(x))
# Special case if a single number:
if not hasattr(z, "__iter__"): # to check for numerical input -- this is a hack
z = np.array([z])
z = np.asarray(z)
segments = make_segments(x, y)
lc = mcoll.LineCollection(segments, array=z, cmap=cmap, norm=norm,
linewidth=linewidth, alpha=alpha, zorder=zorder)
ax = plt.gca()
ax.add_collection(lc)
return lc
def make_segments(x, y):
"""
Create list of line segments from x and y coordinates, in the correct format
for LineCollection: an array of the form numlines x (points per line) x 2 (x
and y) array
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
'''
https://stackoverflow.com/a/18926541
'''
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
new_cmap = mpl.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
| 34.256055
| 114
| 0.58101
| 1,353
| 9,900
| 4.10643
| 0.219512
| 0.015839
| 0.011699
| 0.012599
| 0.330094
| 0.287077
| 0.232901
| 0.227322
| 0.215443
| 0.215443
| 0
| 0.02578
| 0.290808
| 9,900
| 288
| 115
| 34.375
| 0.76556
| 0.153939
| 0
| 0.248485
| 0
| 0
| 0.053348
| 0.003279
| 0
| 0
| 0
| 0
| 0.018182
| 1
| 0.09697
| false
| 0
| 0.030303
| 0
| 0.224242
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6af5ea523e6e4b25159d80c12448780bfd106c8c
| 4,824
|
py
|
Python
|
account/views.py
|
Stfuncode/food-beverage-investigator
|
0fea4943a5c2634068dc04118f83742327937c25
|
[
"MIT"
] | null | null | null |
account/views.py
|
Stfuncode/food-beverage-investigator
|
0fea4943a5c2634068dc04118f83742327937c25
|
[
"MIT"
] | null | null | null |
account/views.py
|
Stfuncode/food-beverage-investigator
|
0fea4943a5c2634068dc04118f83742327937c25
|
[
"MIT"
] | null | null | null |
import imp
from venv import create
from django.shortcuts import render, redirect
from django.views import View
from django.views.generic import (
ListView,
)
from account.models import *
from account.forms import *
from data.models import *
from django.contrib.auth import login as auth_login
from django.contrib.auth.models import auth
from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin, LoginRequiredMixin
# Create your views here.
def login(request):
if request.method == "POST":
form = loginForm(data=request.POST)
if form.is_valid():
user = form.get_user()
auth_login(request, user)
print("succesful login")
remember_me = form.cleaned_data["remember_me"]
if remember_me:
request.session.set_expiry(1209600)
return redirect("home")
else:
messages.warning(request, 'There is an issue with your login processes')
return redirect("login")
else:
form = loginForm()
create_form = createUserForm()
context = {
"form": form,
"create_form": create_form
}
return render(request, "login.html", context)
def logout(request):
auth.logout(request)
return redirect("login")
def register(request):
if request.method == "POST":
create_form = createUserForm(data=request.POST)
if create_form.is_valid():
user = create_form.save(commit=False)
user.save()
messages.success(request, "User created successfully!")
return redirect("login")
else:
messages.error(request, "User creation failed")
else:
create_form = createUserForm()
return render(request, "login.html", {"create_form": create_form})
def homepage(request):
user = Account.objects.filter(is_superuser=False).count()
rest = Restaurant.objects.all().count()
rating = RestaurantReview.objects.exclude(rating__isnull=True).count()
review = RestaurantReview.objects.exclude(review__isnull=True).count()
context = {
"user_count" : user,
"rest_count" : rest,
"rating_count" : rating,
"review_count" : review,
}
return render(request, "home.html", context)
class ViewUserView(View, LoginRequiredMixin, PermissionRequiredMixin):
permission_required = 'accounts.view_account'
raise_exception = True
def post(self, request, pk, *args, **kwargs):
user = Account.objects.get(account_id=pk)
form = viewUserForm(request.POST, instance=user)
return redirect("userlist")
def get(self, request, pk, *args, **kwargs):
user = Account.objects.get(account_id=pk)
form = viewUserForm(instance=user)
context = {
"form": form,
"pk": pk
}
return render(request, "profile.html", context)
class EditUserView(View, LoginRequiredMixin, PermissionRequiredMixin):
permission_required = 'accounts.change_account'
raise_exception = True
def post(self, request, pk, *args, **kwargs):
user = Account.objects.get(account_id=pk)
form = editUserForm(request.POST, instance=user)
if form.is_valid():
user = form.save()
role = request.POST.get("role")
user.save()
messages.success(request, "Successfully updated profile!")
return redirect(f'/viewUser/{user.account_id}')
else:
form = editUserForm(instance=user)
extra_context = {
"form": form,
}
print('something wrong')
messages.error(request, "Invalid input! Please input a valid information.")
return render(request, "editUser.html", extra_context)
def get(self, request, pk, *args, **kwargs):
user = Account.objects.get(account_id=pk)
form = editUserForm(instance=user)
extra_context = {
"form": form,
}
return render(request, "editUser.html", extra_context)
class UserListView(LoginRequiredMixin, PermissionRequiredMixin, ListView):
permission_required = 'accounts.view_account'
template_name = "userList.html"
queryset = Account.objects.all()
class UpdateProfilePicView(View, LoginRequiredMixin, PermissionRequiredMixin):
permission_required = 'accounts.change_account'
raise_exception = True
def post(self, request, pk, *args, **kwargs):
user = Account.objects.get(account_id=pk)
user.profile_pic = request.FILES.get('profile-pic')
user.save()
return redirect('viewUser', pk)
def deleteUser(request, event_id):
event = Account.objects.get(pk=event_id)
event.delete()
return redirect('userlist')
| 33.5
| 87
| 0.645522
| 521
| 4,824
| 5.873321
| 0.241843
| 0.029412
| 0.037255
| 0.027778
| 0.355882
| 0.287582
| 0.250654
| 0.222549
| 0.196405
| 0.196405
| 0
| 0.001926
| 0.246476
| 4,824
| 144
| 88
| 33.5
| 0.83989
| 0.004768
| 0
| 0.385246
| 0
| 0
| 0.115
| 0.023958
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081967
| false
| 0
| 0.098361
| 0
| 0.401639
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6af67173be2103fc04ef7a7c51b006d1f866e003
| 2,697
|
py
|
Python
|
fpds/client.py
|
mgradowski/aiproject
|
855332bd982bef2530ad935a209ae8be35963165
|
[
"MIT"
] | null | null | null |
fpds/client.py
|
mgradowski/aiproject
|
855332bd982bef2530ad935a209ae8be35963165
|
[
"MIT"
] | null | null | null |
fpds/client.py
|
mgradowski/aiproject
|
855332bd982bef2530ad935a209ae8be35963165
|
[
"MIT"
] | null | null | null |
import cv2
import aiohttp
import asyncio
import concurrent.futures
import argparse
import numpy as np
async def camera_source(ws: aiohttp.ClientWebSocketResponse, threadpool: concurrent.futures.ThreadPoolExecutor, src_id: int=0):
loop = asyncio.get_running_loop()
try:
src = await loop.run_in_executor(threadpool, lambda: cv2.VideoCapture(src_id))
while True:
_, im = await loop.run_in_executor(threadpool, src.read)
im = cv2.resize(im, (640, 384))
enc_param = [int(cv2.IMWRITE_JPEG_QUALITY), 40]
_, im = await loop.run_in_executor(threadpool, lambda: cv2.imencode('.jpg', im, enc_param))
await ws.send_bytes(im.tobytes())
except asyncio.CancelledError:
pass
finally:
src.release()
async def preview_window(queue: asyncio.Queue, threadpool: concurrent.futures.ThreadPoolExecutor):
loop = asyncio.get_running_loop()
try:
while True:
im = await queue.get()
im = np.frombuffer(im, dtype=np.uint8)
im = await loop.run_in_executor(threadpool, lambda: cv2.imdecode(im, cv2.IMREAD_ANYCOLOR))
cv2.imshow('fpds_remote_preview', im)
cv2.waitKey(1)
except asyncio.CancelledError:
pass
finally:
cv2.destroyAllWindows()
async def run_client(
ws: aiohttp.ClientWebSocketResponse,
threadpool: concurrent.futures.ThreadPoolExecutor
) -> None:
# --
dst_queue = asyncio.Queue(maxsize=1)
src_task = asyncio.create_task(camera_source(ws, threadpool))
dst_task = asyncio.create_task(preview_window(dst_queue, threadpool))
try:
while True:
im = await ws.receive_bytes()
await dst_queue.put(im)
except asyncio.CancelledError:
await ws.send_str('close')
src_task.cancel()
dst_task.cancel()
await asyncio.wait([src_task, dst_task])
async def amain(url: str):
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as threadpool:
async with aiohttp.ClientSession() as session, session.ws_connect(url) as ws:
await run_client(ws, threadpool)
def main():
parser = argparse.ArgumentParser('fpds.client')
parser.add_argument('url', type=str, help='WebSocket endpoint of fpds.server e.g. http://localhost:8181/fpds')
args = parser.parse_args()
loop = asyncio.get_event_loop()
task = loop.create_task(amain(args.url))
try:
loop.run_until_complete(task)
except KeyboardInterrupt:
task.cancel()
loop.run_until_complete(asyncio.wait_for(task, timeout=None))
finally:
loop.close()
if __name__ == '__main__':
main()
| 35.025974
| 127
| 0.668891
| 330
| 2,697
| 5.275758
| 0.354545
| 0.024124
| 0.080414
| 0.032165
| 0.276852
| 0.213096
| 0.180931
| 0.072947
| 0.049397
| 0
| 0
| 0.012943
| 0.226548
| 2,697
| 76
| 128
| 35.486842
| 0.821668
| 0.000742
| 0
| 0.25
| 0
| 0
| 0.042703
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014706
| false
| 0.029412
| 0.088235
| 0
| 0.102941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6af759aad1d331394cb7f013c9559f17569541f2
| 3,619
|
py
|
Python
|
Giveme5W1H/extractor/tools/key_value_cache.py
|
bkrrr/Giveme5W
|
657738781fe387d76e6e0da35ed009ccf81f4290
|
[
"Apache-2.0"
] | 410
|
2018-05-02T12:53:02.000Z
|
2022-03-28T16:11:34.000Z
|
Giveme5W1H/extractor/tools/key_value_cache.py
|
bkrrr/Giveme5W
|
657738781fe387d76e6e0da35ed009ccf81f4290
|
[
"Apache-2.0"
] | 51
|
2018-05-02T13:53:19.000Z
|
2022-03-22T00:16:39.000Z
|
Giveme5W1H/extractor/tools/key_value_cache.py
|
TU-Berlin/Giveme5W1H
|
b1586328393a50acde86015d22f78a4c15bf2f34
|
[
"Apache-2.0"
] | 81
|
2018-05-29T14:03:27.000Z
|
2022-02-08T08:59:38.000Z
|
import logging
import os
import pickle
import sys
import threading
import time
from typing import List
from Giveme5W1H.extractor.root import path
from Giveme5W1H.extractor.tools.util import bytes_2_human_readable
class KeyValueCache(object):
def __init__(self, cache_path):
"""
:param cache_path: path to cache, must be relative to the root.py file
"""
self.log = logging.getLogger('GiveMe5W')
# resolve path relative to the path file
self._cache_path = path(cache_path)
# ad a meaningful extension
self._cache_path = self._cache_path + '.prickle'
self._cache = {}
if cache_path and os.path.isfile(self._cache_path) and os.path.getsize(self._cache_path) > 0:
# reload cache object form disc, if any
with open(self._cache_path, 'rb') as ff:
self._cache = pickle.load(ff)
self.log.debug('KeyValueCache: ' + self._cache_path + ' restored')
self.log_stats()
else:
self._cache = {}
self._lock = threading.Lock()
def log_stats(self):
# size is not considering child's
self.log.info(self._cache_path + ' entries: ' + str(len(self._cache)) + ' size: ' + bytes_2_human_readable(
sys.getsizeof(self._cache)))
def persist(self):
with open(self._cache_path, 'wb') as f:
pickle.dump(self._cache, f, pickle.HIGHEST_PROTOCOL)
def cache(self, key: str, value: object):
"""
None values are considered as invalid results (ToughRequest) is producing none for exceptions
set -1 if you want to store "No distance"
:param key:
:param value:
:return:
"""
self._lock.acquire()
if value is not None:
self._cache[key] = self._pack(value);
self.log.debug(self._cache_path + ' CACHED: ' + str(key) + ': ' + str(value))
self.persist()
self._lock.release()
def get(self, key):
"""
Read cache entries
:param key:
:return:
"""
self._lock.acquire()
result = None
value = self._cache.get(key)
if value is not None:
self.log.debug(self._cache_path + ' LOADED: ' + str(key) + ': ' + str(value))
result = self._unpack(value)
self._lock.release()
return result
def get_complex(self, list_of_keys: List[str]):
"""
Read complex cache entries
"""
return self.get(self._get_id(list_of_keys))
def cache_complex(self, list_of_keys: List[str], value):
"""
helper to cache multi (string)key values.
They are sorted before concatenation, therefore an order is determined.
"""
self.cache(self._get_id(list_of_keys), value)
def _get_id(self, list_of_keys: List[str]):
"""
sorts list_of_keys, concatenates with # for readability
:param list_of_keys:
:return:
"""
sorted(list_of_keys)
return "#".join(list_of_keys)
def _pack(self, value):
"""
cache tracks the age of an entry, may be helpful in the future
:param value:
:return:
"""
return [value, str(time.time())]
def _unpack(self, value):
"""
removes the timestamp around the cached value, if any
:param value:
:return:
"""
# there are some old entries without timestamp
if isinstance(value, str) or isinstance(value, int):
return value
return value[0]
| 30.931624
| 115
| 0.585797
| 449
| 3,619
| 4.538976
| 0.325167
| 0.092738
| 0.076546
| 0.020608
| 0.136899
| 0.098626
| 0.027478
| 0
| 0
| 0
| 0
| 0.004
| 0.309201
| 3,619
| 116
| 116
| 31.198276
| 0.8112
| 0.235424
| 0
| 0.137931
| 0
| 0
| 0.033953
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.172414
| false
| 0
| 0.155172
| 0
| 0.448276
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6af7f07129b756fc33dfdd705556d009ef89fe63
| 3,121
|
py
|
Python
|
nsst_translate_corpus.py
|
AlexanderJenke/nsst
|
75f6afa39568c72c9c513ac0313db33b80bb67d5
|
[
"Apache-2.0"
] | null | null | null |
nsst_translate_corpus.py
|
AlexanderJenke/nsst
|
75f6afa39568c72c9c513ac0313db33b80bb67d5
|
[
"Apache-2.0"
] | null | null | null |
nsst_translate_corpus.py
|
AlexanderJenke/nsst
|
75f6afa39568c72c9c513ac0313db33b80bb67d5
|
[
"Apache-2.0"
] | null | null | null |
from argparse import ArgumentParser
from tqdm import tqdm
import NSST
from nsst_translate import best_transition_sequence
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--nsst_file", default="output/nsst_tss20_th4_nSt100_Q0.pkl", help="nsst file")
parser.add_argument("--src_lang", default="output/europarl-v7.de-en.de.clean")
parser.add_argument("--tgt_lang", default="output/europarl-v7.de-en.en.clean")
parser.add_argument("--enforce_n_reg", default=True)
parser.add_argument("--output", default=f"output/nsst_stat_nreg_100Q0.csv")
args = parser.parse_args()
args.enforce_n_final_reg = False
# load NSST
nsst = NSST.NSST()
nsst.load(args.nsst_file)
args.nsst = nsst
# open files
src_file = open(args.src_lang, 'r')
tgt_file = open(args.tgt_lang, 'r')
output_file = open(args.output, 'w')
# iterate over sentences, first 4096 -> test sentences
for src, tgt, _ in tqdm(list(zip(src_file, tgt_file, range(4096))), desc="Processing sentences"):
# remove line breaks
src = src[:-1]
tgt = tgt[:-1]
# try to translate
try:
# prepare tokenisations
token_src = [nsst.tokenization_src[word] if word in nsst.tokenization_src else 0
for word in src.split(" ") if len(word)]
token_tgt = [nsst.tokenization_tgt[word] if word in nsst.tokenization_tgt else 0
for word in tgt.split(" ") if len(word)]
# run nsst
args.input = src
args.token_src = token_src
result = best_transition_sequence(args)
# get best result
pred = sorted((k for k in result
if ('Qf' in args.nsst_file or not args.enforce_n_final_reg or len(k[1]) == 1)
and ('Q0' in args.nsst_file or k[0] == -1)
),
key=lambda x: x[2],
reverse=True)[0]
n_res = len(result)
q, reg, prob = pred
# write to csv
if not len(reg): # catch empty registers
continue
token_pred = [w for w in reg[0].split(' ') if len(w)]
pred_str = ""
for t in token_pred:
pred_str += f"{nsst.tokenization_tgt_lut[int(t)]} "
token_src_str = ""
for t in token_src:
token_src_str += f"{t} "
token_tgt_str = ""
for t in token_tgt:
token_tgt_str += f"{t} "
token_pred_str = ""
for t in token_pred:
token_pred_str += f"{t} "
print(f"{src};{token_src_str[:-1]};"
f"{tgt};{token_tgt_str[:-1]};"
f"{pred_str};{token_pred_str[:-1]};"
f"{prob};{len(reg)};{n_res}",
file=output_file)
output_file.flush()
except RuntimeError:
pass
# close files
src_file.close()
tgt_file.close()
output_file.close()
| 32.852632
| 104
| 0.544056
| 404
| 3,121
| 3.977723
| 0.279703
| 0.034848
| 0.052894
| 0.022402
| 0.18046
| 0.100809
| 0.065961
| 0
| 0
| 0
| 0
| 0.017476
| 0.339955
| 3,121
| 94
| 105
| 33.202128
| 0.762621
| 0.065043
| 0
| 0.03125
| 0
| 0
| 0.135237
| 0.096008
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.015625
| 0.0625
| 0
| 0.0625
| 0.015625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6afa2508436ad02c7fe767127789a72b5fa053d8
| 382
|
py
|
Python
|
10 Days of Statistics/Day 1/Standard Deviation.py
|
dhyanpatel110/HACKERRANK
|
949b1ff468ff3487663bf063a8fe6cdfb9dea26b
|
[
"Apache-2.0"
] | null | null | null |
10 Days of Statistics/Day 1/Standard Deviation.py
|
dhyanpatel110/HACKERRANK
|
949b1ff468ff3487663bf063a8fe6cdfb9dea26b
|
[
"Apache-2.0"
] | null | null | null |
10 Days of Statistics/Day 1/Standard Deviation.py
|
dhyanpatel110/HACKERRANK
|
949b1ff468ff3487663bf063a8fe6cdfb9dea26b
|
[
"Apache-2.0"
] | null | null | null |
# Import library
import math
# Define functionts
def mean(data):
return sum(data) / len(data)
def stddev(data, size):
sum = 0
for i in range(size):
sum = sum + (data[i] - mean(data)) ** 2
return math.sqrt(sum / size)
# Set data
size = int(input())
numbers = list(map(int, input().split()))
# Get standard deviation
print(round(stddev(numbers, size), 1))
| 19.1
| 47
| 0.63089
| 57
| 382
| 4.22807
| 0.578947
| 0.06639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01
| 0.21466
| 382
| 19
| 48
| 20.105263
| 0.793333
| 0.167539
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0.090909
| 0.454545
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6afb588f82055ac18339fc17c00162ed0a0496d8
| 314
|
py
|
Python
|
Homework/Hw4/Solution/problem5a.py
|
jmsevillam/Herramientas-Computacionales-UniAndes
|
957338873bd6a17201dfd4629c7edd5760e2271d
|
[
"MIT"
] | null | null | null |
Homework/Hw4/Solution/problem5a.py
|
jmsevillam/Herramientas-Computacionales-UniAndes
|
957338873bd6a17201dfd4629c7edd5760e2271d
|
[
"MIT"
] | null | null | null |
Homework/Hw4/Solution/problem5a.py
|
jmsevillam/Herramientas-Computacionales-UniAndes
|
957338873bd6a17201dfd4629c7edd5760e2271d
|
[
"MIT"
] | 5
|
2019-05-27T13:35:51.000Z
|
2020-09-30T15:19:39.000Z
|
def decode(word1,word2,code):
if len(word1)==1:
code+=word1+word2
return code
else:
code+=word1[0]+word2[0]
return decode(word1[1:],word2[1:],code)
Alice='Ti rga eoe esg o h ore"ermetsCmuainls'
Bob='hspormdcdsamsaefrtecus Hraina optcoae"'
print(decode(Alice,Bob,''))
| 24.153846
| 47
| 0.630573
| 44
| 314
| 4.5
| 0.568182
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 0.219745
| 314
| 12
| 48
| 26.166667
| 0.75102
| 0
| 0
| 0
| 0
| 0
| 0.242038
| 0.070064
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.3
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6afbbfc0a8b4d96b676b80363b2e541af846b662
| 7,415
|
py
|
Python
|
pychron/lasers/power/composite_calibration_manager.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/lasers/power/composite_calibration_manager.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/lasers/power/composite_calibration_manager.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import HasTraits, Instance, DelegatesTo, Button, List, Any, Float
from traitsui.api import View, Item, VGroup, HGroup, Group, spring, TabularEditor
# ============= standard library imports ========================
import pickle
import os
from numpy import polyval
# ============= local library imports ==========================
from pychron.managers.manager import Manager
from pychron.database.selectors.power_calibration_selector import (
PowerCalibrationSelector,
)
from pychron.database.adapters.power_calibration_adapter import PowerCalibrationAdapter
from pychron.paths import paths
from pychron.graph.graph import Graph
from pychron.hardware.meter_calibration import MeterCalibration
"""
use a dbselector to select data
"""
class BoundsSelector(HasTraits):
graph = Instance(Graph)
def traits_view(self):
v = View(
Item("graph", show_label=False, style="custom"),
buttons=["OK", "Cancel"],
kind="livemodal",
)
return v
class CompositeCalibrationManager(Manager):
db = Instance(PowerCalibrationAdapter)
selector = Instance(PowerCalibrationSelector)
append = Button
replace = Button
load_graph = Button
save = Button
selected_calibrations = List
selected = Any
results = DelegatesTo("selector")
graph = Instance(Graph)
dclicked = Any
parent_name = "FusionsDiode"
power = Float
input = Float
def _power_changed(self):
pc = self._load_calibration()
pc
if pc is not None:
self.input, _ = pc.get_input(self.power)
def _load_calibration(self):
try:
p = self._get_calibration_path()
with open(p, "rb") as f:
pc = pickle.load(f)
except:
return
return pc
def _dclicked_changed(self):
s = self.selected
if s is not None:
s.bounds = None
s.load_graph()
s.graph.add_range_selector()
bc = BoundsSelector(graph=s.graph)
info = bc.edit_traits()
if info.result:
bounds = s.graph.plots[0].default_index.metadata["selections"]
s.bounds = bounds
s.calibration_bounds = (
polyval(s.coefficients, bounds[0]),
polyval(s.coefficients, bounds[1]),
)
def _append_fired(self):
s = self.selector.selected
if s is not None:
for si in s:
trs = list(si.traits().keys()).remove("graph")
self.selected_calibrations.append(si.clone_traits(traits=trs))
def _replace_fired(self):
s = self.selector.selected
trs = list(s.traits().keys()).remove("graph")
self.selected_calibrations = s.clone_traits(traits=trs)
def _save_fired(self):
self._dump_calibration()
def _dump_calibration(self):
pc = MeterCalibration()
coeffs = []
bounds = []
for s in self.selected_calibrations:
coeffs.append(s.coefficients)
bounds.append(s.calibration_bounds)
pc.coefficients = coeffs
pc.bounds = bounds
p = self._get_calibration_path()
self.info("saving calibration to {}".format(p))
with open(p, "wb") as f:
pickle.dump(pc, f)
def _get_calibration_path(self):
p = os.path.join(
paths.hidden_dir, "{}_power_calibration".format(self.parent_name)
)
return p
def _load_graph_fired(self):
g = self.graph
g.clear()
# g.new_plot(zoom=True, pan=True,
# padding=[40, 10, 10, 40]
# )
has_bounds = False
for i, s in enumerate(self.selected_calibrations):
if s.bounds:
has_bounds = True
elif has_bounds:
g.clear()
self._plot_factory(g)
self.warning_dialog("{} does not have its bounds set".format(s.rid))
break
s.load_graph(graph=g, new_plot=i == 0)
g.redraw()
def traits_view(self):
selector_grp = Group(Item("selector", style="custom", show_label=False))
transfer_grp = VGroup(
spring,
VGroup(Item("append", show_label=False), Item("replace", show_label=False)),
spring,
)
editor = TabularEditor(
adapter=self.selector.tabular_adapter(),
editable=False,
dclicked="object.dclicked",
selected="object.selected",
)
selected_grp = Item("selected_calibrations", editor=editor, show_label=False)
data_tab = Group(
HGroup(selector_grp, transfer_grp, selected_grp),
show_border=True,
label="Data",
)
process_tab = Group(
HGroup(
Item("power"),
Item("input", format_str=" %0.3f ", style="readonly"),
spring,
Item("save", show_label=False),
Item("load_graph", show_label=False),
),
Item("graph", style="custom", show_label=False),
show_border=True,
label="Process",
)
v = View(
VGroup(data_tab, process_tab),
resizable=True,
title="Composite {} Power Calibration".format(self.parent_name),
)
return v
def _graph_default(self):
g = Graph(
container_dict={
# 'fill_padding':True,
# 'bgcolor':'red',
"padding": 5
}
)
self._plot_factory(g)
return g
def _plot_factory(self, graph):
graph.new_plot(
zoom=True,
pan=True,
padding=[50, 10, 10, 40],
xtitle="Setpoint (%)",
ytitle="Measured Power (W)",
)
def _db_default(self):
if self.parent_name == "FusionsDiode":
name = paths.diodelaser_db
else:
name = paths.co2laser_db
db = PowerCalibrationAdapter(name=name, kind="sqlite")
db.connect()
return db
def _selector_default(self):
return self.db._selector_factory()
if __name__ == "__main__":
ccm = CompositeCalibrationManager()
ccm.configure_traits()
# ============= EOF =============================================
| 30.142276
| 88
| 0.552124
| 774
| 7,415
| 5.127907
| 0.31137
| 0.018141
| 0.028219
| 0.013605
| 0.11741
| 0.081633
| 0.058453
| 0
| 0
| 0
| 0
| 0.006265
| 0.311126
| 7,415
| 245
| 89
| 30.265306
| 0.770752
| 0.157249
| 0
| 0.130682
| 0
| 0
| 0.062086
| 0.003395
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085227
| false
| 0
| 0.068182
| 0.005682
| 0.289773
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6afbd0d610e5a63b6a074ba49e684ae0359ba35a
| 3,957
|
py
|
Python
|
ttt_package/libs/best_move.py
|
Ipgnosis/tic_tac_toe
|
e1519b702531965cc647ff37c1c46d72f4b3b24e
|
[
"BSD-3-Clause"
] | null | null | null |
ttt_package/libs/best_move.py
|
Ipgnosis/tic_tac_toe
|
e1519b702531965cc647ff37c1c46d72f4b3b24e
|
[
"BSD-3-Clause"
] | 4
|
2021-03-25T19:52:40.000Z
|
2021-12-12T17:57:11.000Z
|
ttt_package/libs/best_move.py
|
Ipgnosis/tic_tac_toe
|
e1519b702531965cc647ff37c1c46d72f4b3b24e
|
[
"BSD-3-Clause"
] | null | null | null |
# refactored from make_play to simplify
# by Russell on 3/5/21
#from ttt_package.libs.move_utils import get_open_cells
from ttt_package.libs.compare import get_transposed_games, reorient_games
from ttt_package.libs.calc_game_bound import calc_game_bound
from ttt_package.libs.maxi_min import maximin
# find the best move for this agent, based on prior games in the game_history
def best_move(this_board, agent, ttt_base, probs_calc):
candidate_games = []
lower_bound = 0
upper_bound = 0
# note that len gives the number of the move about to be made
num_moves = len(this_board)
bounds_list = []
#print("best_move - this_board:", this_board)
# TRANSPOSE the current game state into 8 different games and store in a list
# the returned value is a list of dictionaries that contain the transposed game
# and the source function, to allow the game to be transposed back
tg_list = get_transposed_games(this_board)
#print("best_move: tg_list =", tg_list)
# for each of the 8 transposed versions of the current game in question
# build a list of lower and upper bound tuples for the tg_list using calc_game_bound
for tgame in tg_list:
lower_bound = calc_game_bound(tgame["transpose"], agent, 'L')
upper_bound = calc_game_bound(tgame["transpose"], agent, 'U')
bounds_tuple = (lower_bound, upper_bound)
bounds_list.append(bounds_tuple)
#print("best_move: bounds_tuple =", bounds_tuple)
# fetch the list of candidate games from the game history
# we need to look at losing and drawing games so that we can thoroughly explore the action space
# we must avoid overlooking a good move made early that resulted in a draw/loss because of a
# later bad move - these will be resolved later via backpropagation
candidate_games = ttt_base.get_games_list(bounds_list)
#print("best_move: candidate_games =", candidate_games)
# if there is at least one game that matches the current game state
if candidate_games != False:
# this is the list of games that match the transposed game list
# de-transpose the candidate games to get the right cell for the next move
# get a list of the matching detransposition games of the current game
reoriented_candidates = reorient_games(tg_list, candidate_games)
#print("best_move: number of reoriented_candidates games = ", len(reoriented_candidates))
#print("best_move: number of candidate games = ", len(candidate_games))
#print('best_move: reoriented_candidates =', reoriented_candidates)
#print('best_move: candidate_games =', candidate_games)
maximin_list = []
# iterate though the game candidates
for this_game in range(len(reoriented_candidates)):
these_probs = []
# get the probability element for the next move of this game candidate
these_probs = reoriented_candidates[this_game]["probs"][num_moves].copy(
)
# tack on the cell # of the move
these_probs.append(
reoriented_candidates[this_game]["game"][num_moves])
# append the game submission data to the list to be submitted to maximin
maximin_list.append(these_probs)
#print("maximin_list:", maximin_list)
# send the list of probabilites of the detransposed recorded games for the next move
recommended_move = maximin(maximin_list)
#print("best_move: move = ", recommended_move)
return recommended_move
else: # there are no matching games in the game history
#print("best_move: random choice...")
# return random_move(this_board)
# estimate the optimal next move
optimal_move = probs_calc.calc_next_move(this_board)
#print("This board =", this_board)
#print("Calculating optimal move =", optimal_move)
return optimal_move
| 42.548387
| 100
| 0.703058
| 561
| 3,957
| 4.761141
| 0.286988
| 0.035942
| 0.048671
| 0.026956
| 0.132535
| 0.058405
| 0.058405
| 0
| 0
| 0
| 0
| 0.002632
| 0.231994
| 3,957
| 92
| 101
| 43.01087
| 0.876275
| 0.570382
| 0
| 0
| 0
| 0
| 0.01748
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.096774
| 0
| 0.193548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6afc188b33bb84dbd980d2429af99225dafac393
| 805
|
py
|
Python
|
yard/skills/66-python/cookbook/yvhai/demo/mt/raw_thread.py
|
paser4se/bbxyard
|
d09bc6efb75618b2cef047bad9c8b835043446cb
|
[
"Apache-2.0"
] | 1
|
2016-03-29T02:01:58.000Z
|
2016-03-29T02:01:58.000Z
|
yard/skills/66-python/cookbook/yvhai/demo/mt/raw_thread.py
|
paser4se/bbxyard
|
d09bc6efb75618b2cef047bad9c8b835043446cb
|
[
"Apache-2.0"
] | 18
|
2019-02-13T09:15:25.000Z
|
2021-12-09T21:32:13.000Z
|
yard/skills/66-python/cookbook/yvhai/demo/mt/raw_thread.py
|
paser4se/bbxyard
|
d09bc6efb75618b2cef047bad9c8b835043446cb
|
[
"Apache-2.0"
] | 2
|
2020-07-05T01:01:30.000Z
|
2020-07-08T22:33:06.000Z
|
#!/usr/bin/env python3
# python 线程测试
import _thread
import time
from yvhai.demo.base import YHDemo
def print_time(thread_name, interval, times):
for cnt in range(times):
time.sleep(interval)
print(" -- %s: %s" % (thread_name, time.ctime(time.time())))
class RawThreadDemo(YHDemo):
def __init__(self):
super(RawThreadDemo, self).__init__('_thread')
@staticmethod
def main():
try:
_thread.start_new_thread(print_time, ("Thread-01", 1, 10))
_thread.start_new_thread(print_time, ("Thread-02", 2, 6))
except:
print("Error: 无法启动线程")
# 主线程无限等待
while 1:
pass
@staticmethod
def demo(args=[]):
RawThreadDemo.main()
if __name__ == '__main__':
RawThreadDemo.demo()
| 20.125
| 70
| 0.601242
| 94
| 805
| 4.840426
| 0.531915
| 0.059341
| 0.098901
| 0.087912
| 0.153846
| 0.153846
| 0.153846
| 0
| 0
| 0
| 0
| 0.018676
| 0.268323
| 805
| 39
| 71
| 20.641026
| 0.75382
| 0.050932
| 0
| 0.083333
| 0
| 0
| 0.073587
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0.041667
| 0.125
| 0
| 0.333333
| 0.208333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6afc7c91ed45303d7c201609e1cc6104aa29ad90
| 3,108
|
py
|
Python
|
rasa/utils/tensorflow/constants.py
|
praneethgb/rasa
|
5bf227f165d0b041a367d2c0bbf712ebb6a54792
|
[
"Apache-2.0"
] | 8
|
2020-09-16T17:22:13.000Z
|
2022-02-01T00:11:30.000Z
|
rasa/utils/tensorflow/constants.py
|
praneethgb/rasa
|
5bf227f165d0b041a367d2c0bbf712ebb6a54792
|
[
"Apache-2.0"
] | 216
|
2020-09-20T13:05:58.000Z
|
2022-03-28T12:10:24.000Z
|
rasa/utils/tensorflow/constants.py
|
praneethgb/rasa
|
5bf227f165d0b041a367d2c0bbf712ebb6a54792
|
[
"Apache-2.0"
] | 1
|
2022-02-01T18:23:23.000Z
|
2022-02-01T18:23:23.000Z
|
# constants for configuration parameters of our tensorflow models
LABEL = "label"
IDS = "ids"
# LABEL_PAD_ID is used to pad multi-label training examples.
# It should be < 0 to avoid index out of bounds errors by tf.one_hot.
LABEL_PAD_ID = -1
HIDDEN_LAYERS_SIZES = "hidden_layers_sizes"
SHARE_HIDDEN_LAYERS = "share_hidden_layers"
TRANSFORMER_SIZE = "transformer_size"
NUM_TRANSFORMER_LAYERS = "number_of_transformer_layers"
NUM_HEADS = "number_of_attention_heads"
UNIDIRECTIONAL_ENCODER = "unidirectional_encoder"
KEY_RELATIVE_ATTENTION = "use_key_relative_attention"
VALUE_RELATIVE_ATTENTION = "use_value_relative_attention"
MAX_RELATIVE_POSITION = "max_relative_position"
BATCH_SIZES = "batch_size"
BATCH_STRATEGY = "batch_strategy"
EPOCHS = "epochs"
RANDOM_SEED = "random_seed"
LEARNING_RATE = "learning_rate"
DENSE_DIMENSION = "dense_dimension"
CONCAT_DIMENSION = "concat_dimension"
EMBEDDING_DIMENSION = "embedding_dimension"
ENCODING_DIMENSION = "encoding_dimension"
SIMILARITY_TYPE = "similarity_type"
LOSS_TYPE = "loss_type"
NUM_NEG = "number_of_negative_examples"
MAX_POS_SIM = "maximum_positive_similarity"
MAX_NEG_SIM = "maximum_negative_similarity"
USE_MAX_NEG_SIM = "use_maximum_negative_similarity"
SCALE_LOSS = "scale_loss"
REGULARIZATION_CONSTANT = "regularization_constant"
NEGATIVE_MARGIN_SCALE = "negative_margin_scale"
DROP_RATE = "drop_rate"
DROP_RATE_ATTENTION = "drop_rate_attention"
DROP_RATE_DIALOGUE = "drop_rate_dialogue"
DROP_RATE_LABEL = "drop_rate_label"
CONSTRAIN_SIMILARITIES = "constrain_similarities"
WEIGHT_SPARSITY = "weight_sparsity" # Deprecated and superseeded by CONNECTION_DENSITY
CONNECTION_DENSITY = "connection_density"
EVAL_NUM_EPOCHS = "evaluate_every_number_of_epochs"
EVAL_NUM_EXAMPLES = "evaluate_on_number_of_examples"
INTENT_CLASSIFICATION = "intent_classification"
ENTITY_RECOGNITION = "entity_recognition"
MASKED_LM = "use_masked_language_model"
SPARSE_INPUT_DROPOUT = "use_sparse_input_dropout"
DENSE_INPUT_DROPOUT = "use_dense_input_dropout"
RANKING_LENGTH = "ranking_length"
MODEL_CONFIDENCE = "model_confidence"
BILOU_FLAG = "BILOU_flag"
RETRIEVAL_INTENT = "retrieval_intent"
USE_TEXT_AS_LABEL = "use_text_as_label"
SOFTMAX = "softmax"
MARGIN = "margin"
AUTO = "auto"
INNER = "inner"
LINEAR_NORM = "linear_norm"
COSINE = "cosine"
CROSS_ENTROPY = "cross_entropy"
BALANCED = "balanced"
SEQUENCE = "sequence"
SEQUENCE_LENGTH = f"{SEQUENCE}_lengths"
SENTENCE = "sentence"
POOLING = "pooling"
MAX_POOLING = "max"
MEAN_POOLING = "mean"
TENSORBOARD_LOG_DIR = "tensorboard_log_directory"
TENSORBOARD_LOG_LEVEL = "tensorboard_log_level"
SEQUENCE_FEATURES = "sequence_features"
SENTENCE_FEATURES = "sentence_features"
FEATURIZERS = "featurizers"
CHECKPOINT_MODEL = "checkpoint_model"
MASK = "mask"
IGNORE_INTENTS_LIST = "ignore_intents_list"
TOLERANCE = "tolerance"
POSITIVE_SCORES_KEY = "positive_scores"
NEGATIVE_SCORES_KEY = "negative_scores"
RANKING_KEY = "label_ranking"
QUERY_INTENT_KEY = "query_intent"
SCORE_KEY = "score"
THRESHOLD_KEY = "threshold"
SEVERITY_KEY = "severity"
NAME = "name"
EPOCH_OVERRIDE = "epoch_override"
| 27.75
| 87
| 0.818855
| 392
| 3,108
| 6
| 0.369898
| 0.027211
| 0.008503
| 0.013605
| 0.031463
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000715
| 0.099743
| 3,108
| 111
| 88
| 28
| 0.839886
| 0.076898
| 0
| 0
| 0
| 0
| 0.428422
| 0.184358
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6afcd2c6547b58f11a5de71fbf337c71913e7438
| 32,025
|
py
|
Python
|
client/canyons-of-mars/maze.py
|
GamesCreatorsClub/GCC-Rover
|
25a69f62a1bb01fc421924ec39f180f50d6a640b
|
[
"MIT"
] | 3
|
2018-02-13T21:39:55.000Z
|
2018-04-26T18:17:39.000Z
|
client/canyons-of-mars/maze.py
|
GamesCreatorsClub/GCC-Rover
|
25a69f62a1bb01fc421924ec39f180f50d6a640b
|
[
"MIT"
] | null | null | null |
client/canyons-of-mars/maze.py
|
GamesCreatorsClub/GCC-Rover
|
25a69f62a1bb01fc421924ec39f180f50d6a640b
|
[
"MIT"
] | null | null | null |
#
# Copyright 2016-2019 Games Creators Club
#
# MIT License
#
import math
import pyroslib
import pyroslib.logging
import time
from pyroslib.logging import log, LOG_LEVEL_ALWAYS, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG
from rover import WheelOdos, WHEEL_NAMES
from rover import normaiseAngle, angleDiference
from challenge_utils import Action, PID
SQRT2 = math.sqrt(2)
PIhalf = math.pi / 2
class MazeAttitude:
UNKNOWN = 0
LEFT_WALL = 1
RIGHT_WALL = 2
FRONT_WALL = 4
BACK_WALL = 8
NO_GAP = 0
FORWARD_GAP = 1
SIDE_GAP = 2
POINTS = [0, 45, 90, 135, 180, 225, 270, 315]
WALLS = [90, 270, 0, 180]
L0_45 = 0
L45_90 = 45
L90_135 = 90
L135_180 = 135
L180_225 = 180
L225_270 = 225
L270_315 = 270
L315_0 = 315
LINES = [L0_45, L45_90, L90_135, L135_180, L180_225, L225_270, L270_315, L315_0]
ANGLE_TOLLERANCE = 1.075
@staticmethod
def normAngle(a):
if a > PIhalf:
a = a - math.pi
elif a <= -PIhalf:
a = a + math.pi
return a
class Line:
def __init__(self, line_index, long_point_index, short_point_index, factor, adjust):
self.line_index = line_index
self.short_point_index = short_point_index
self.long_point_index = long_point_index
self.factor = factor
self.adjust = adjust
self.angle = None
def calcAngle(self, distances):
long_distance = distances[self.long_point_index]
short_distance = distances[self.short_point_index]
if long_distance is not None and short_distance is not None:
lsqrt2 = long_distance / SQRT2
self.angle = MazeAttitude.normAngle(math.atan2(lsqrt2, lsqrt2 - short_distance) * self.factor + self.adjust)
else:
self.angle = None
class Wall:
def __init__(self, distance_sensor_angle, distance_sensor_index, wall_point_kind, left_mid_point_index, left_point_index, mid_point_index, right_point_index):
self.ds_angle = distance_sensor_angle
self.ds_index = distance_sensor_index
self.wall_point_kind = wall_point_kind
self.left_mid_point_index = left_mid_point_index
self.left_point_index = left_point_index
self.mid_point_index = mid_point_index
self.right_point_index = right_point_index
self.is_front_or_back = self.ds_angle == 0 or self.ds_angle == 180
self.selected_line = None
self.angle = None
self.distance = None
def setAngle(self, angle, distances):
self.angle = angle
distance = distances[self.mid_point_index]
if distance < 1:
self.distance = 0
else:
if self.is_front_or_back:
self.distance = abs(int(math.sin(angle) * distance))
else:
self.distance = abs(int(math.cos(angle) * distance))
def setAngleAndDistance(self, angle, distance):
self.angle = angle
self.distance = distance
def tryFindingWall(self, distances, lines, points):
lmline = lines[self.left_mid_point_index]
lline = lines[self.left_point_index]
mline = lines[self.mid_point_index]
rline = lines[self.right_point_index]
dlong1 = distances[lline.long_point_index]
dmid = distances[mline.short_point_index]
dlong2 = distances[mline.long_point_index]
plong1 = points[self.left_point_index]
pmid = points[self.mid_point_index]
plong2 = points[self.right_point_index]
if dlong1 < dlong2 and plong1 != MazeAttitude.UNKNOWN and lmline.angle * MazeAttitude.ANGLE_TOLLERANCE >= lline.angle >= lmline.angle / MazeAttitude.ANGLE_TOLLERANCE:
points[self.mid_point_index] = points[lline.long_point_index]
angle = MazeAttitude.normAngle(mline.angle - PIhalf)
distance = distances[self.right_point_index] * abs(math.sin(mline.angle) / SQRT2)
self.setAngleAndDistance(angle, distance)
elif dlong1 >= dlong2 and plong2 != MazeAttitude.UNKNOWN and mline.angle * MazeAttitude.ANGLE_TOLLERANCE >= rline.angle >= mline.angle / MazeAttitude.ANGLE_TOLLERANCE:
points[self.mid_point_index] = points[rline.long_point_index]
angle = MazeAttitude.normAngle(mline.angle + PIhalf)
distance = distances[self.left_point_index] * abs(math.sin(mline.angle) / SQRT2)
self.setAngleAndDistance(angle, distance)
elif lline.angle is not None and mline.angle is not None:
if lline.angle * MazeAttitude.ANGLE_TOLLERANCE >= mline.angle >= lline.angle / MazeAttitude.ANGLE_TOLLERANCE:
if plong1 == MazeAttitude.UNKNOWN:
points[self.left_point_index] = self.wall_point_kind
if pmid == MazeAttitude.UNKNOWN:
points[self.mid_point_index] = self.wall_point_kind
if plong2 == MazeAttitude.UNKNOWN:
points[self.right_point_index] = self.wall_point_kind
self.setAngle(mline.angle, distances)
else:
if dlong1 < dlong2 and plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN:
points[self.left_point_index] = self.wall_point_kind
points[self.mid_point_index] = self.wall_point_kind
self.setAngle(lline.angle, distances)
elif dlong1 >= dlong2 and plong2 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN:
points[self.mid_point_index] = self.wall_point_kind
points[self.right_point_index] = self.wall_point_kind
self.setAngle(mline.angle, distances)
elif plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and plong2 != MazeAttitude.UNKNOWN:
points[self.left_point_index] = self.wall_point_kind
points[self.mid_point_index] = self.wall_point_kind
self.setAngle(lline.angle, distances)
elif plong1 != MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN and plong2 == MazeAttitude.UNKNOWN:
points[self.mid_point_index] = self.wall_point_kind
points[self.right_point_index] = self.wall_point_kind
self.setAngle(mline.angle, distances)
elif lline.angle is not None and plong1 == MazeAttitude.UNKNOWN and pmid == MazeAttitude.UNKNOWN:
points[self.left_point_index] = self.wall_point_kind
points[self.mid_point_index] = self.wall_point_kind
self.setAngle(lline.angle, distances)
elif mline.angle is not None and pmid == MazeAttitude.UNKNOWN and plong2 == MazeAttitude.UNKNOWN:
points[self.mid_point_index] = self.wall_point_kind
points[self.right_point_index] = self.wall_point_kind
self.setAngle(mline.angle, distances)
def __init__(self):
self.lines = {self.L315_0: self.Line(self.L315_0, 315, 0, -1, math.pi), self.L0_45: self.Line(self.L0_45, 45, 0, 1, -math.pi),
self.L45_90: self.Line(self.L45_90, 45, 90, -1, PIhalf), self.L90_135: self.Line(self.L90_135, 135, 90, 1, -PIhalf),
self.L135_180: self.Line(self.L135_180, 135, 180, -1, math.pi), self.L180_225: self.Line(self.L180_225, 225, 180, 1, -math.pi),
self.L225_270: self.Line(self.L225_270, 225, 270, -1, PIhalf), self.L270_315: self.Line(self.L270_315, 315, 270, 1, -PIhalf)}
self.right_wall = self.Wall(90, 2, self.RIGHT_WALL, 0, 45, 90, 135)
self.left_wall = self.Wall(270, 6, self.LEFT_WALL, 180, 225, 270, 315)
self.front_wall = self.Wall(0, 0, self.FRONT_WALL, 270, 315, 0, 45)
self.back_wall = self.Wall(180, 4, self.BACK_WALL, 90, 135, 180, 225)
self.left_gap = self.NO_GAP
self.right_gap = self.NO_GAP
self.walls = {self.right_wall.ds_angle: self.right_wall, self.left_wall.ds_angle: self.left_wall, self.front_wall.ds_angle: self.front_wall, self.back_wall.ds_angle: self.back_wall}
self.points = {0: 0, 45: 0, 90: 0, 135: 0, 180: 0, 225: 0, 270: 0, 315: 0}
self.distances = {0: 0, 45: 0, 90: 0, 135: 0, 180: 0, 225: 0, 270: 0, 315: 0}
def calculate(self, state):
def getPointDistance(state, angle):
distance = state.radar.radar[angle]
status = state.radar.status[angle]
if status == 0:
return distance
last_distance = state.radar.last_radar[angle]
if abs(distance - last_distance) < 100:
return distance
return None
def updateUndefinedWall(wall, preferable_wall, wall_adjust, second_wall):
if wall.angle is None and self.distances[wall.ds_angle] is not None:
if preferable_wall.angle is not None:
wall.setAngleAndDistance(self.normAngle(preferable_wall.angle + wall_adjust), self.distances[wall.mid_point_index])
else:
wall.setAngleAndDistance(self.normAngle(second_wall.angle - wall_adjust), self.distances[wall.mid_point_index])
self.points[wall.ds_angle] = wall.wall_point_kind
self.distances = {p: getPointDistance(state, p) for p in self.POINTS}
for line in self.lines:
self.lines[line].calcAngle(self.distances)
wls = [self.walls[w_ds_angle] for w_ds_angle in self.WALLS if self.distances[w_ds_angle] is not None]
wall_processing_order = sorted(wls,
key=lambda wall: self.distances[wall.ds_angle])
for wall in wall_processing_order:
wall.tryFindingWall(self.distances, self.lines, self.points)
updateUndefinedWall(self.front_wall, self.right_wall, -PIhalf, self.left_wall)
updateUndefinedWall(self.back_wall, self.right_wall, PIhalf, self.left_wall)
updateUndefinedWall(self.right_wall, self.front_wall, PIhalf, self.back_wall)
updateUndefinedWall(self.left_wall, self.front_wall, -PIhalf, self.back_wall)
# TODO calc gaps
class MoveForwardOnOdo(Action):
def __init__(self, agent, stop_action=None):
super(MoveForwardOnOdo, self).__init__(agent)
self.stop_action = stop_action
self.required_odo = {'fl': 0, 'fr': 0, 'bl': 0, 'br': 0}
def setRequiredOdo(self, distance):
for wheel_name in WHEEL_NAMES:
self.required_odo[wheel_name] = distance
def start(self):
super(MoveForwardOnOdo, self).start()
state = self.rover.getRoverState()
for wheel in self.required_odo:
self.required_odo[wheel] = WheelOdos.normaliseOdo(state.wheel_odos[wheel] + self.required_odo[wheel])
log(LOG_LEVEL_DEBUG, "Reset odo to " + str(self.required_odo) + "; starting...")
self.rover.command(pyroslib.publish, 300, 120)
# pyroslib.publish("move/steer", "300 120")
def end(self):
super(MoveForwardOnOdo, self).end()
def next(self):
state = self.rover.getRoverState()
do_stop = False
log(LOG_LEVEL_DEBUG, "Driving to " + str(self.required_odo))
for wheel_name in WHEEL_NAMES:
if state.wheel_odos[wheel_name] >= self.required_odo[wheel_name]:
do_stop = True
if state.radar.radar[0] < 1.0 or state.radar.radar[315] < 1.0 or state.radar.radar[45] < 1.0:
do_stop = True
if do_stop:
return self.stop_action
else:
return self
def execute(self):
pass
def getActionName(self):
return "Forward ODO"
class MazeAction(Action):
LEFT = -1
RIGHT = 1
def __init__(self, agent):
super(MazeAction, self).__init__(agent)
def check_next_action_conditions(self):
return self
class ChicaneAction(MazeAction):
def __init__(self, agent, left_or_right, distance, speed, next_action=None):
super(ChicaneAction, self).__init__(agent)
self.left_or_right = left_or_right
self.distance = distance
self.speed = speed
self.next_action = next_action
if self.left_or_right == MazeAction.RIGHT:
self.a1 = 45
self.a2 = 90
self.a3 = 135
else:
self.a1 = 315
self.a2 = 270
self.a3 = 225
self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, self.distance, self.speed, self)
self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, self.distance, self.speed, DriverForwardForTimeAction(self, 10, self.speed, None))
def start(self):
super(ChicaneAction, self).start()
def end(self):
super(ChicaneAction, self).end()
def next(self):
if self.left_or_right == self.LEFT:
diagonal_distance = state.radar.radar[45]
else:
diagonal_distance = state.radar.radar[315]
if self.left_or_right == self.LEFT and diagonal_distance > 800:
log(LOG_LEVEL_INFO, "Found second part of chicane, rfd={: 4d}".format(int(diagonal_distance)))
self.left_or_right = self.RIGHT
elif self.left_or_right == self.RIGHT and diagonal_distance > 800:
log(LOG_LEVEL_INFO, "Found end ofchicane - leaging, rfd={: 4d}".format(int(diagonal_distance)))
return self.next_action
return self
def execute(self):
state = self.rover.getRoverState()
front_distance = state.radar.radar[0]
gain = 60
offset = 150
# Values that worked speed=150, steer=5-7, dist=4
# self.speed = 150 # 150
speed = 50 # mm/second - TODO use odo to update to correct value!
speed_steer_fudge_factor = 5 # 5-7
speed_distance_fudge_factor = 4 # 4
min_angle = 1 * math.pi / 180
steer_speed = speed * speed_steer_fudge_factor
distance_speed = speed * speed_distance_fudge_factor
if self.left_or_right == self.RIGHT:
distance = -1000000000
distance_from_wall = state.radar.radar[90]
distance_error = distance_from_wall - self.distance
angle = 0
if abs(distance_error) < 10:
angle = 0
elif distance_error > 0 and distance_error > distance_speed:
angle = math.pi / 4
if front_distance < 450:
angle += math.pi * (450 - front_distance) / 1800 # divide with 10 and by 180 -> 450/10 - 45deg
elif distance_error < 0 and distance_error < -distance_speed:
angle = -math.pi / 4
if front_distance < 450:
angle -= math.pi * (450 - front_distance) / 1800 # divide with 10 and by 180 -> 450/10 - 45deg
else:
try:
angle = math.asin(distance_error / distance_speed)
except BaseException as ex:
log(LOG_LEVEL_ALWAYS, "Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed)))
else:
distance = 1000000000
distance_from_wall = state.radar.radar[270]
distance_error = distance_from_wall - self.distance
angle = 0
if abs(distance_error) < 10:
angle = 0
elif distance_error > 0 and distance_error > distance_speed:
angle = -math.pi / 4
if front_distance < 450:
angle -= math.pi * (450 - front_distance) / 1800 # divide with 10 and by 180 -> 450/10 - 45deg
elif distance_error < 0 and distance_error < -distance_speed:
angle = math.pi / 4
if front_distance < 450:
angle += math.pi * (450 - front_distance) / 1800 # divide with 10 and by 180 -> 450/10 - 45deg
else:
try:
angle = -math.asin(distance_error / distance_speed)
except BaseException as ex:
log(LOG_LEVEL_ALWAYS, "Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}".format(int(0), int(distance_from_wall), int(distance_error), int(distance), int(speed)))
distance = int(distance)
angle = int(angle * 180 / math.pi)
self.rover.command(pyroslib.publish, self.speed, angle, distance)
# pyroslib.publish("move/steer", str(distance) + " " + str(self.speed) + " " + str(angle))
wheel_orientations = state.wheel_odos.odos
log(LOG_LEVEL_INFO, "{:16.3f}: dist_f={: 4d} wa={: 3d} dist_w={: 4d} dist_err={: 3d} la={: 3d} ld={: 3d} ra={: 3d} rd={: 3d} s_spd={: 3d} dist_spd={: 3d} dist={: 4d} angle={: 3d} heading={: 3d} odo={:7.2f}".format(
float(time.time()),
int(front_distance),
int(0 * 180 / math.pi), int(distance_from_wall), int(distance_error),
int(0 * 180 / math.pi), int(0), int(0 * 180 / math.pi), int(0),
int(steer_speed), int(distance_speed),
int(distance), int(angle), int(state.heading.heading),
float(state.wheel_orientations.orientations['fl'])
))
def getActionName(self):
return "Chicane " + ("L" if self.left_or_right == self.LEFT else "R")
class MazeCorridorAction(MazeAction):
def __init__(self, agent, left_or_right, distance, speed, next_action=None):
super(MazeCorridorAction, self).__init__(agent)
self.left_or_right = left_or_right
self.distance = distance
self.speed = speed
self.next_action = next_action
if self.left_or_right == MazeAction.RIGHT:
self.a1 = 45
self.a2 = 90
self.a3 = 135
else:
self.a1 = 315
self.a2 = 270
self.a3 = 225
self.left_corner_action = MazeTurnAroundCornerAction(self, self.LEFT, int(self.distance * 1), self.speed, self)
self.right_corner_action = MazeTurnAroundCornerAction(self, self.RIGHT, int(self.distance * 1), self.speed, self)
# self.right_corner_action = MazeTurnAroundCornerAction(self.odo, self.radar, self.heading, self.RIGHT, self.distance, self.speed, DriverForwardForTimeActoun(10, self.speed, None))
self.been_in_chicane = False
def start(self):
super(MazeCorridorAction, self).start()
self.been_in_chicane = False
def end(self):
super(MazeCorridorAction, self).end()
def next(self):
left_diagonal_distance = state.radar.radar[315]
front_distance = state.radar.radar[0]
if state.radar.status[0] != 0 and abs(state.radar.radar_deltas[0]) > 100:
log(LOG_LEVEL_INFO, "Front distance not correct: d={:4d} s={:2d} delta={:4d}".format(front_distance, state.radar.status[0], state.radar.radar_deltas[0]))
else:
if state.left_front_distance_of_wall > 100 and front_distance < 550:
expected_diagonal_distance = 0
if state.left_wall_angle < 0:
expected_diagonal_distance = front_distance * 2 * math.cos(math.pi / 4 + state.left_wall_angle)
else:
expected_diagonal_distance = front_distance * math.cos(state.left_wall_angle) * SQRT2
if False and not self.been_in_chicane and front_distance > 300 and left_diagonal_distance > expected_diagonal_distance * 1.2:
log(LOG_LEVEL_INFO, "Found chicane... lfd={: 4d} fd={: 4d} dd={: 4d} ed={: 4d}".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance)))
self.been_in_chicane = True
return ChicaneAction(self, self.LEFT, self.distance, self.speed, next_action=self)
else:
log(LOG_LEVEL_INFO, "Found corner - turning, lfd={: 4d} fd={: 4d} dd={: 4d} ed={: 4d}".format(int(state.left_front_distance_of_wall), int(front_distance), int(left_diagonal_distance), int(expected_diagonal_distance)))
return self.left_corner_action
if front_distance < 550 and state.radar.radar_deltas[0] < 0:
left_distances = state.radar.radar[270] + state.radar.radar[315]
right_distances = state.radar.radar[90] + state.radar.radar[45]
if left_distances > right_distances:
log(LOG_LEVEL_INFO, "Found corner 2 - turning left, fd={: 4d} ld={: 4d} rd={: 4d}".format(int(front_distance), int(left_distances), int(right_distances)))
return self.left_corner_action
else:
log(LOG_LEVEL_INFO, "Found corner 2 - turning left, fd={: 4d} ld={: 4d} rd={: 4d}".format(int(front_distance), int(left_distances), int(right_distances)))
return self.right_corner_action
if state.right_front_distance_of_wall > 100 and state.left_front_distance_of_wall > 100 and front_distance < 700:
log(LOG_LEVEL_INFO, "Found final corner - turning to finish, rfd={: 4d} fd={: 4d} ".format(int(state.right_front_distance_of_wall), int(front_distance)))
return self.right_corner_action
return self
def execute(self):
state = self.rover.getRoverState()
left_diagonal_distance = state.radar.radar[315]
front_distance = state.radar.radar[0]
gain = 60
offset = 150
# Values that worked speed=150, steer=5-7, dist=4
# self.speed = 150 # 150
speed = 50 # mm/second - TODO use odo to update to correct value!
speed_steer_fudge_factor = 5 # 5-7
speed_distance_fudge_factor = 4 # 4
min_angle = 1 * math.pi / 180
steer_speed = speed * speed_steer_fudge_factor
distance_speed = speed * speed_distance_fudge_factor
if self.left_or_right == self.RIGHT:
wall_angle = state.right_wall_angle
if -min_angle < state.right_wall_angle < min_angle:
distance = 1000000000
else:
distance = steer_speed / state.right_wall_angle
if 0 <= distance < 150:
distance = 150
elif -150 < distance < 0:
distance = -150
distance = -distance
distance_from_wall = state.right_wall_distance
distance_error = distance_from_wall - self.distance
angle = 0
if abs(distance_error) < 10:
angle = 0
elif distance_error > 0 and distance_error > distance_speed:
angle = math.pi / 4
elif distance_error < 0 and distance_error < -distance_speed:
angle = -math.pi / 4
else:
try:
angle = math.asin(distance_error / distance_speed)
except BaseException as ex:
log(LOG_LEVEL_ALWAYS, "Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed)))
else:
wall_angle = state.left_wall_angle
if -min_angle < state.left_wall_angle < min_angle:
distance = 1000000000
else:
distance = steer_speed / state.left_wall_angle
if 0 <= distance < 150:
distance = 150
elif -150 < distance < 0:
distance = -150
distance_from_wall = state.left_wall_distance
distance_error = distance_from_wall - self.distance
angle = 0
if abs(distance_error) < 10:
angle = 0
elif distance_error > 0 and distance_error > distance_speed:
angle = -math.pi / 4
elif distance_error < 0 and distance_error < -distance_speed:
angle = math.pi / 4
else:
try:
angle = -math.asin(distance_error / distance_speed)
except BaseException as ex:
log(LOG_LEVEL_ALWAYS, "Domain error wa={: 3d} dw={: 4d} de={: 4d} d={: 4d} s={: 3d}".format(int(wall_angle), int(distance_from_wall), int(distance_error), int(distance), int(speed)))
distance = int(distance)
angle = int(angle * 180 / math.pi)
self.rover.command(pyroslib.publish, self.speed, angle, distance)
# pyroslib.publish("move/steer", str(distance) + " " + str(self.speed) + " " + str(angle))
wheel_orientations = state.wheel_odos.odos
#
log(LOG_LEVEL_INFO, "{:16.3f}: dist_f={: 4d} wa={: 3d} dist_w={: 4d} dist_err={: 3d} la={: 3d} ld={: 3d} ra={: 3d} rd={: 3d} s_spd={: 3d} dist_spd={: 3d} dist={: 4d} angle={: 3d} heading={: 3d} odo={:7.2f}".format(
float(time.time()),
int(front_distance),
int(wall_angle * 180 / math.pi), int(distance_from_wall), int(distance_error),
int(state.left_wall_angle * 180 / math.pi), int(state.left_front_distance_of_wall), int(state.right_wall_angle * 180 / math.pi), int(state.right_front_distance_of_wall),
int(steer_speed), int(distance_speed),
int(distance), int(angle), int(state.heading.heading),
float(state.wheel_orientations.orientations['fl'])
))
def getActionName(self):
return "Corridor"
class MazeTurnAroundCornerAction(MazeAction):
def __init__(self, agent, left_or_right, distance, speed, next_action=None):
super(MazeTurnAroundCornerAction, self).__init__(agent)
self.left_or_right = left_or_right
self.distance = distance * (1 if left_or_right == self.RIGHT else -1)
self.speed = speed
self.start_heading = 0
self.last_heading = 0
self.requested_heading = 0
self.pid = None
self.next_action = next_action
self.error = 0
def start(self):
super(MazeTurnAroundCornerAction, self).start()
state = self.rover.getRoverState()
self.start_heading = state.heading.heading
self.requested_heading = normaiseAngle(self.start_heading + 80 * -(1 if self.left_or_right == self.RIGHT else -1))
self.pid = PID(1, 0.0, 0.05, 1, 0, diff_method=angleDiference)
self.pid.process(self.requested_heading, self.start_heading)
log(LOG_LEVEL_INFO, "Starting to turn around corner at distance {:04d} at speed {:04d}, start heading {:07.3f}, requested heading {:07.3f}".format(self.distance, self.speed, self.start_heading, self.requested_heading))
self.rover.command(pyroslib.publish, self.speed, 0, self.distance)
# pyroslib.publish("move/steer", str(self.distance) + " " + str(self.speed))
def end(self):
super(MazeTurnAroundCornerAction, self).end()
def next(self):
heading = state.heading.heading
self.error = self.pid.process(self.requested_heading, heading)
if self.left_or_right == self.LEFT and self.error > 0:
return self
elif self.left_or_right == self.RIGHT and self.error < 0:
return self
else:
if self.next_action is not None:
log(LOG_LEVEL_INFO, "Finished turning around the corner - invoking next action " + self.next_action.getActionName())
else:
log(LOG_LEVEL_INFO, "Finishing turning - no next action spectified.")
return self.next_action
def execute(self):
state = self.rover.getRoverState()
heading = state.heading.heading
last_heading = self.last_heading
self.last_heading = heading
log(LOG_LEVEL_INFO, "Turning speed={:04d} h={:07.3f} lh={:07.3f} dh={:07.3f} rh={:07.3f} e={:07.3f}"
.format(self.speed, heading, last_heading, angleDiference(heading, last_heading), self.requested_heading, self.error))
def getActionName(self):
return "Turn-Around-Corner"
class DriverForwardForTimeAction(Action):
def __init__(self, agent, time, speed, next_action):
super(DriverForwardForTimeAction, self).__init__(agent)
self.time = time
self.speed = speed
self.next_action = next_action
def start(self):
self.rover.command(pyroslib.publish, self.speed, 0)
# pyroslib.publish("move/drive", "0 " + str(self.speed))
log(LOG_LEVEL_INFO, "Going forward for " + str(self.time) + " ticks.")
def end(self):
pass
def next(self):
if self.time > 0:
self.time -= 1
log(LOG_LEVEL_INFO, "Going forward for " + str(self.time) + " ticks.")
return self
return self.next_action
if __name__ == "__main__":
from rover import Radar, RoverState
radar_values = {0: 10, 45: SQRT2 * 10, 90: 10, 135: SQRT2 * 10, 180: 10, 225: SQRT2 * 10, 270: 10, 315: SQRT2 * 10}
radar_last_values = {0: 10, 45: SQRT2 * 10, 90: 10, 135: SQRT2 * 10, 180: 10, 225: SQRT2 * 10, 270: 10, 315: SQRT2 * 10}
radar_status = {0: 0, 45: 0, 90: 0, 135: 0, 180: 0, 225: 0, 270: 0, 315: 0}
attitude = MazeAttitude()
radar = Radar(0, radar_values, radar_status, Radar(0, radar_last_values, radar_status))
state = RoverState(None, None, None, radar, None, None)
def printWallLines(a):
if attitude.lines[a].angle is None:
print("{:3d} -> point too far - not calculated".format(a))
else:
angle = int(attitude.lines[a].angle * 180 / math.pi)
point = attitude.points[a]
if point is None:
print("{:3d} -> line at {:3d} angle".format(a, angle))
else:
if point == MazeAttitude.LEFT_WALL:
wall = "left wall"
elif point == MazeAttitude.RIGHT_WALL:
wall = "right wall"
elif point == MazeAttitude.FRONT_WALL:
wall = "front wall"
elif point == MazeAttitude.BACK_WALL:
wall = "back wall"
else:
wall = "no wall"
print("{:3d} -> line at {:3d} angle belogs to {:s}".format(a, angle, wall))
def printWall(w):
if w.angle is None:
print("Wall {:3d} -> is too far - not calculated".format(w.ds_angle))
else:
if w.distance is None:
print("Wall {:3d} -> has angle {:3d} but is too far - distance not calculated".format(w.ds_angle, int(w.angle * 180 / math.pi)))
else:
print("Wall {:3d} -> has angle {:3d} and is at {:3d}".format(w.ds_angle, int(w.angle * 180 / math.pi), w.distance))
def printWalls():
for p in attitude.points:
printWallLines(p)
for w in attitude.walls:
printWall(w)
print("----------------------------------------------------------")
# attitude.calculate(state)
# printWalls()
#
# state.radar.radar[0] = 5
# state.radar.radar[45] = SQRT2 * 5 * 0.9
# state.radar.radar[315] = SQRT2 * 17
# state.radar.radar[270] = SQRT2 * 13
# state.radar.radar[225] = SQRT2 * 12
# attitude.calculate(state)
# printWalls()
state.radar.radar[180] = 50
state.radar.radar[315] = 30
attitude.calculate(state)
printWalls()
| 43.75
| 238
| 0.600031
| 4,001
| 32,025
| 4.6001
| 0.07873
| 0.02934
| 0.022005
| 0.015648
| 0.627058
| 0.537517
| 0.488943
| 0.467264
| 0.42733
| 0.422766
| 0
| 0.051095
| 0.294145
| 32,025
| 731
| 239
| 43.80985
| 0.763106
| 0.040031
| 0
| 0.464991
| 0
| 0.016158
| 0.060876
| 0.001889
| 0
| 0
| 0
| 0.001368
| 0
| 1
| 0.08079
| false
| 0.003591
| 0.016158
| 0.008977
| 0.195691
| 0.023339
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6afe91d71e827ccc78b53873ca9a15887ff25298
| 5,550
|
py
|
Python
|
All_Program.py
|
TheoSaify/Yolo-Detector
|
f1ac387370982de323a4fc09109c57736b8ce8d6
|
[
"Apache-2.0"
] | null | null | null |
All_Program.py
|
TheoSaify/Yolo-Detector
|
f1ac387370982de323a4fc09109c57736b8ce8d6
|
[
"Apache-2.0"
] | null | null | null |
All_Program.py
|
TheoSaify/Yolo-Detector
|
f1ac387370982de323a4fc09109c57736b8ce8d6
|
[
"Apache-2.0"
] | null | null | null |
import cv2
from cv2 import *
import numpy as np
from matplotlib import pyplot as plt
###############################SIFT MATCH Function#################################
def SIFTMATCH(img1,img2):
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)
matchesMask = mask.ravel().tolist()
h,w = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
else:
print("Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT))
matchesMask = None
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
singlePointColor = None,
matchesMask = matchesMask, # draw only inliers
flags = 2)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
cv2.moveWindow('output', 150,150) # Move it to (40,30)
cv2.imshow('output',img3)
cv2.waitKey(0) #The function waits for specified milliseconds for any keyboard event
cv2.destroyAllWindows() #cv2.destroyAllWindows() simply destroys all the windows we created
###################################################################################################
#################################Function#########################
def CercleDetection(img1):
# Read Image
raw_image = cv2.imread(img1)
# Bilateral filtering forms a very good way to preserve edges. It is a non-linear filter and helps reduce noise
# The parameters used are: the image, window size for averaging the neighbour, sigmaColor(Sigma value in the color space.
bilateral_filtered_image = cv2.bilateralFilter(raw_image, 5, 175, 175)
# Canny edge detector to detect edges in the image It takes 3 parameters: image, lower threshold and upper threshold.
edge_detected_image = cv2.Canny(bilateral_filtered_image, 75, 200)
# Find Contours
_, contours, hierarchy = cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contour_list = []
for contour in contours:
approx = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True)
area = cv2.contourArea(contour)
if ((len(approx) > 8) & (len(approx) < 23) & (area > 50000) ):
contour_list.append(contour)
print("area %.3f"%(area))
M = cv2.moments(contour)
# calculate x,y coordinate of center
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
cX, cY = 0, 0
cv2.circle(raw_image, (cX, cY), 5, (255, 255, 255), -1)
cv2.putText(raw_image, "centroid", (cX - 25, cY - 25),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
# Draw Contours of circles
cv2.drawContours(raw_image, contour_list, -1, (0, 255, 0), 3)
# Display Images
cv2.imshow("Objects Detected",raw_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
return cX,cY
############################################################
###########################MAIN#############################
MIN_MATCH_COUNT = 10
e1 = cv2.getTickCount()
# # initialize the camera
# cam = VideoCapture(0) # 0 -> index of camera
# s, img1 = cam.read()
# ret = cam.set(3,1920);
# ret = cam.set(4,1080);
# if s: # frame captured without any errors
# cv2.namedWindow("output", cv2.WINDOW_NORMAL)
# cv2.imshow("cam-test",img1)
# waitKey(0)
# destroyWindow("cam-test")
# imwrite("Scene.jpg",img1) #save image
# del(cam)
# Scene image in Grayscale
# imgray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
imgray = cv2.imread('Scene.jpg', 0) # queryImage
# Reference Piece Image
img1 = cv2.imread('img3.jpg',0) # queryImage
# SIFT Algorithm fore Object Detection
SIFTMATCH(img1, imgray)
# image de reference
cX, cY = CercleDetection('img3.jpg')
print('cX = %.3f , cY =%.3f' % (cX, cY))
# Image Webcam
cX2, cY2 = CercleDetection('img3.jpg')
print('cX2 = %.3f , cY2 =%.3f' % (cX2, cY2))
deltaX = (cX2-cX)
deltaY = -(CY2-cY)
# Write X and Y values to File
file = open("values.txt", "w")
file.write("%.3f \n" % deltaX)
file.write("%.3f \n" % deltaY)
file.close()
#Calculate time of execution
e2 = cv2.getTickCount()
time = (e2 - e1)/ cv2.getTickFrequency()
print('time needed to execute')
print(time)
| 29.83871
| 127
| 0.571171
| 701
| 5,550
| 4.455064
| 0.39087
| 0.01537
| 0.012488
| 0.009606
| 0.03202
| 0.014089
| 0.014089
| 0.014089
| 0.014089
| 0
| 0
| 0.057466
| 0.250631
| 5,550
| 185
| 128
| 30
| 0.693436
| 0.248468
| 0
| 0.073171
| 0
| 0
| 0.060522
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.04878
| 0
| 0.085366
| 0.073171
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6aff4d7639431aa38a4d3a68b963afee4300b218
| 3,479
|
py
|
Python
|
pyxon/utils.py
|
k-j-m/Pyxon
|
a7f9b3ce524f2441e952c47acd199dd4024d2322
|
[
"MIT"
] | null | null | null |
pyxon/utils.py
|
k-j-m/Pyxon
|
a7f9b3ce524f2441e952c47acd199dd4024d2322
|
[
"MIT"
] | null | null | null |
pyxon/utils.py
|
k-j-m/Pyxon
|
a7f9b3ce524f2441e952c47acd199dd4024d2322
|
[
"MIT"
] | null | null | null |
import pyxon.decode as pd
def unobjectify(obj):
"""
Turns a python object (must be a class instance)
into the corresponding JSON data.
Example:
>>> @sprop.a # sprop annotations are needed to tell the
>>> @sprop.b # unobjectify function what parameter need
>>> @sprop.c # to be written out.
>>> class Baz(object): pass
>>> def __init__(self, a, b, c):
>>> self.a = a
>>> self.b = b
>>> self.c = c
>>>
>>> baz = Baz(a=1, b=2, c='three')
>>> unobjectify(baz)
{ 'a':1, 'b':2, 'c':'three' }
"""
cls = obj.__class__
# Create empty data
data = {}
sprops,cprops = _get_registered_props(cls)
# Add simple properties
for p in sprops:
data[p]=getattr(obj,p)
# Add calculated data
for p in cprops:
f2 = cprops[p][1]
data[p]=f2(getattr(obj,p))
data = pd.add_type_property(data, cls)
return data
def _get_registered_props(cls):
"""
Returns all of the registered properties for a given class.
Recursively calls up to parent classes that are inherited from.
"""
sprops = pd.class_sprops.get(cls,{}) # [name]
cprops = pd.class_cprops.get(cls,{}) # {name:(fn, inv_fn)}
if cls in pd.conc_to_abstract: # {ConcreteClass: (AbstractClass, _)}
parent_cls = pd.conc_to_abstract[cls][0]
parent_sprops, parent_cprops = _get_registered_props(parent_cls)
sprops = list(set(sprops).union(set(parent_sprops)))
cprops2 = parent_cprops.copy()
cprops2.update(cprops)
cprops = cprops2
return sprops,cprops
def obj(cls):
"""
Helper function returns a closure turning objectify into a
single argument function. This cuts down the amount of code
needed in class annotations by removing the need to write
lambda functions.
"""
return lambda d: objectify(d, cls)
def objectify(data, cls):
"""
Function takes JSON data and a target class as arguments
and returns an instance of the class created using the
JSON data.
I'm not sure whether it is a great idea to keep (un)objectify
separate from the decode module, since they need to access
some of the module-level parameters.
"""
# Create empty class
concrete_cls = pd.conc2(data, cls)
obj = concrete_cls()
sprops,cprops = _get_registered_props(cls)
# Add simple properties from data
for p in sprops:
setattr(obj, p, data[p])
# Add calculated properties from data
for p in cprops:
f1 = cprops[p][0]
setattr(obj, p, f1(data[p]))
return obj
def transform_map(kfun=lambda x: x, vfun=lambda x: x):
"""
Function that takes two functions as arguments and returns
a function that applies those functions over all of the
keys and values in a map and returns the transformed version
of the map.
kfun: function applied to all keys (default identity)
vfun: function applied to all values (default identity)
(k -> k') -> (v -> v') -> ((k, v) -> (k', v'))
"""
return lambda dct: dict([(kfun(k),vfun(v)) for k,v in dct.items()])
def transform_list(item_decoder=lambda x: x):
return lambda lst: map(item_decoder, lst)
def identity(x):
"""
Identity function is needed when performing transformations
on maps where some operation is needed on either the keys
or values, but not both.
"""
return x
| 28.284553
| 72
| 0.627479
| 493
| 3,479
| 4.342799
| 0.348884
| 0.011677
| 0.033629
| 0.033629
| 0.088744
| 0.078468
| 0.060719
| 0.048575
| 0.048575
| 0
| 0
| 0.005892
| 0.268181
| 3,479
| 122
| 73
| 28.516393
| 0.835035
| 0.50733
| 0
| 0.146341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.170732
| false
| 0
| 0.02439
| 0.02439
| 0.365854
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6affc41b95b69a262ac3e3eb689401cbbc182548
| 19,112
|
py
|
Python
|
AxonDeepSeg/segment.py
|
sophie685/newfileplzworklord
|
fbbb03c44dc9e4b0409364b49265f453ac80d3c0
|
[
"MIT"
] | null | null | null |
AxonDeepSeg/segment.py
|
sophie685/newfileplzworklord
|
fbbb03c44dc9e4b0409364b49265f453ac80d3c0
|
[
"MIT"
] | 8
|
2020-09-26T00:42:19.000Z
|
2022-02-10T00:41:55.000Z
|
AxonDeepSeg/segment.py
|
sophie685/newfileplzworklord
|
fbbb03c44dc9e4b0409364b49265f453ac80d3c0
|
[
"MIT"
] | null | null | null |
# Segmentation script
# -------------------
# This script lets the user segment automatically one or many images based on the default segmentation models: SEM or
# TEM.
#
# Maxime Wabartha - 2017-08-30
# Imports
import sys
from pathlib import Path
import json
import argparse
from argparse import RawTextHelpFormatter
from tqdm import tqdm
import pkg_resources
import AxonDeepSeg
import AxonDeepSeg.ads_utils as ads
from AxonDeepSeg.apply_model import axon_segmentation
from AxonDeepSeg.ads_utils import convert_path
# Global variables
SEM_DEFAULT_MODEL_NAME = "default_SEM_model_v1"
TEM_DEFAULT_MODEL_NAME = "default_TEM_model_v1"
MODELS_PATH = pkg_resources.resource_filename('AxonDeepSeg', 'models')
MODELS_PATH = Path(MODELS_PATH)
default_SEM_path = MODELS_PATH / SEM_DEFAULT_MODEL_NAME
default_TEM_path = MODELS_PATH / TEM_DEFAULT_MODEL_NAME
default_overlap = 25
# Definition of the functions
def segment_image(path_testing_image, path_model,
overlap_value, config, resolution_model,
acquired_resolution = None, verbosity_level=0):
'''
Segment the image located at the path_testing_image location.
:param path_testing_image: the path of the image to segment.
:param path_model: where to access the model
:param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less
border effects but more time to perform the segmentation.
:param config: dict containing the configuration of the network
:param resolution_model: the resolution the model was trained on.
:param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation
process.
:return: Nothing.
'''
# If string, convert to Path objects
path_testing_image = convert_path(path_testing_image)
path_model = convert_path(path_model)
if path_testing_image.exists():
# Extracting the image name and its folder path from the total path.
path_parts = path_testing_image.parts
acquisition_name = Path(path_parts[-1])
path_acquisition = Path(*path_parts[:-1])
# Get type of model we are using
selected_model = path_model.name
# Read image
img = ads.imread(str(path_testing_image))
# Generate tmp file
fp = open(path_acquisition / '__tmp_segment__.png', 'wb+')
img_name_original = acquisition_name.stem
if selected_model == "default_TEM_model_v1":
ads.imwrite(fp,255-img, format='png')
else:
ads.imwrite(fp, img, format='png')
acquisition_name = Path(fp.name).name
segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png'
# Performing the segmentation
axon_segmentation(path_acquisitions_folders=path_acquisition, acquisitions_filenames=[acquisition_name],
path_model_folder=path_model, config_dict=config, ckpt_name='model',
inference_batch_size=1, overlap_value=overlap_value,
segmentations_filenames=segmented_image_name,
resampled_resolutions=resolution_model, verbosity_level=verbosity_level,
acquired_resolution=acquired_resolution,
prediction_proba_activate=False, write_mode=True)
if verbosity_level >= 1:
print(("Image {0} segmented.".format(path_testing_image)))
# Remove temporary file used for the segmentation
fp.close()
(path_acquisition / '__tmp_segment__.png').unlink()
else:
print(("The path {0} does not exist.".format(path_testing_image)))
return None
def segment_folders(path_testing_images_folder, path_model,
overlap_value, config, resolution_model,
acquired_resolution = None,
verbosity_level=0):
'''
Segments the images contained in the image folders located in the path_testing_images_folder.
:param path_testing_images_folder: the folder where all image folders are located (the images to segment are located
in those image folders)
:param path_model: where to access the model.
:param overlap_value: the number of pixels to be used for overlap when doing prediction. Higher value means less
border effects but more time to perform the segmentation.
:param config: dict containing the configuration of the network
:param resolution_model: the resolution the model was trained on.
:param verbosity_level: Level of verbosity. The higher, the more information is given about the segmentation
process.
:return: Nothing.
'''
# If string, convert to Path objects
path_testing_images_folder = convert_path(path_testing_images_folder)
path_model = convert_path(path_model)
# Update list of images to segment by selecting only image files (not already segmented or not masks)
img_files = [file for file in path_testing_images_folder.iterdir() if (file.suffix.lower() in ('.png','.jpg','.jpeg','.tif','.tiff'))
and (not str(file).endswith(('_seg-axonmyelin.png','_seg-axon.png','_seg-myelin.png','mask.png')))]
# Pre-processing: convert to png if not already done and adapt to model contrast
for file_ in tqdm(img_files, desc="Segmentation..."):
print(path_testing_images_folder / file_)
try:
height, width, _ = ads.imread(str(path_testing_images_folder / file_)).shape
except:
try:
height, width = ads.imread(str(path_testing_images_folder / file_)).shape
except Exception as e:
raise e
image_size = [height, width]
minimum_resolution = config["trainingset_patchsize"] * resolution_model / min(image_size)
if acquired_resolution < minimum_resolution:
print("EXCEPTION: The size of one of the images ({0}x{1}) is too small for the provided pixel size ({2}).\n".format(height, width, acquired_resolution),
"The image size must be at least {0}x{0} after resampling to a resolution of {1} to create standard sized patches.\n".format(config["trainingset_patchsize"], resolution_model),
"One of the dimensions of the image has a size of {0} after resampling to that resolution.\n".format(round(acquired_resolution * min(image_size) / resolution_model)),
"Image file location: {0}".format(str(path_testing_images_folder / file_))
)
sys.exit(2)
selected_model = path_model.name
# Read image for conversion
img = ads.imread(str(path_testing_images_folder / file_))
# Generate tmpfile for segmentation pipeline
fp = open(path_testing_images_folder / '__tmp_segment__.png', 'wb+')
img_name_original = file_.stem
if selected_model == "default_TEM_model_v1":
ads.imwrite(fp,255-img, format='png')
else:
ads.imwrite(fp,img, format='png')
acquisition_name = Path(fp.name).name
segmented_image_name = img_name_original + '_seg-axonmyelin' + '.png'
axon_segmentation(path_acquisitions_folders=path_testing_images_folder, acquisitions_filenames=[acquisition_name],
path_model_folder=path_model, config_dict=config, ckpt_name='model',
inference_batch_size=1, overlap_value=overlap_value,
segmentations_filenames=[segmented_image_name],
acquired_resolution=acquired_resolution,
verbosity_level=verbosity_level,
resampled_resolutions=resolution_model, prediction_proba_activate=False,
write_mode=True)
if verbosity_level >= 1:
tqdm.write("Image {0} segmented.".format(str(path_testing_images_folder / file_)))
# Remove temporary file used for the segmentation
fp.close()
(path_testing_images_folder / '__tmp_segment__.png').unlink()
return None
def generate_default_parameters(type_acquisition, new_path):
'''
Generates the parameters used for segmentation for the default model corresponding to the type_model acquisition.
:param type_model: String, the type of model to get the parameters from.
:param new_path: Path to the model to use.
:return: the config dictionary.
'''
# If string, convert to Path objects
new_path = convert_path(new_path)
# Building the path of the requested model if it exists and was supplied, else we load the default model.
if type_acquisition == 'SEM':
if (new_path is not None) and new_path.exists():
path_model = new_path
else:
path_model = MODELS_PATH / SEM_DEFAULT_MODEL_NAME
elif type_acquisition == 'TEM':
if (new_path is not None) and new_path.exists():
path_model = new_path
else:
path_model = MODELS_PATH / TEM_DEFAULT_MODEL_NAME
path_config_file = path_model / 'config_network.json'
config = generate_config_dict(path_config_file)
return path_model, config
def generate_config_dict(path_to_config_file):
'''
Generates the dictionary version of the configuration file from the path where it is located.
:param path_to_config: relative path where the file config_network.json is located.
:return: dict containing the configuration of the network, or None if no configuration file was found at the
mentioned path.
'''
# If string, convert to Path objects
path_to_config_file = convert_path(path_to_config_file)
try:
with open(path_to_config_file, 'r') as fd:
config_network = json.loads(fd.read())
except:
raise ValueError("No configuration file available at this path.")
return config_network
def generate_resolution(type_acquisition, model_input_size):
'''
Generates the resolution to use related to the trained modeL.
:param type_acquisition: String, "SEM" or "TEM"
:param model_input_size: String or Int, the size of the input.
:return: Float, the resolution of the model.
'''
dict_size = {
"SEM":{
"512":0.1,
"256":0.2
},
"TEM":{
"512":0.01
}
}
return dict_size[str(type_acquisition)][str(model_input_size)]
# Main loop
def main(argv=None):
'''
Main loop.
:return: Exit code.
0: Success
2: Invalid argument value
3: Missing value or file
'''
print(('AxonDeepSeg v.{}'.format(AxonDeepSeg.__version__)))
ap = argparse.ArgumentParser(formatter_class=RawTextHelpFormatter)
requiredName = ap.add_argument_group('required arguments')
# Setting the arguments of the segmentation
requiredName.add_argument('-t', '--type', required=True, choices=['SEM','TEM'], help='Type of acquisition to segment. \n'+
'SEM: scanning electron microscopy samples. \n'+
'TEM: transmission electron microscopy samples. ')
requiredName.add_argument('-i', '--imgpath', required=True, nargs='+', help='Path to the image to segment or path to the folder \n'+
'where the image(s) to segment is/are located.')
ap.add_argument("-m", "--model", required=False, help='Folder where the model is located. \n'+
'The default SEM model path is: \n'+str(default_SEM_path)+'\n'+
'The default TEM model path is: \n'+str(default_TEM_path)+'\n')
ap.add_argument('-s', '--sizepixel', required=False, help='Pixel size of the image(s) to segment, in micrometers. \n'+
'If no pixel size is specified, a pixel_size_in_micrometer.txt \n'+
'file needs to be added to the image folder path. The pixel size \n'+
'in that file will be used for the segmentation.',
default=None)
ap.add_argument('-v', '--verbose', required=False, type=int, choices=list(range(0,4)), help='Verbosity level. \n'+
'0 (default) : Displays the progress bar for the segmentation. \n'+
'1: Also displays the path of the image(s) being segmented. \n'+
'2: Also displays the information about the prediction step \n'+
' for the segmentation of current sample. \n'+
'3: Also displays the patch number being processed in the current sample.',
default=0)
ap.add_argument('-o', '--overlap', required=False, type=int, help='Overlap value (in pixels) of the patches when doing the segmentation. \n'+
'Higher values of overlap can improve the segmentation at patch borders, \n'+
'but also increase the segmentation time. \n'+
'Default value: '+str(default_overlap)+'\n'+
'Recommended range of values: [10-100]. \n',
default=25)
ap._action_groups.reverse()
# Processing the arguments
args = vars(ap.parse_args(argv))
type_ = str(args["type"])
verbosity_level = int(args["verbose"])
overlap_value = int(args["overlap"])
if args["sizepixel"] is not None:
psm = float(args["sizepixel"])
else:
psm = None
path_target_list = [Path(p) for p in args["imgpath"]]
new_path = Path(args["model"]) if args["model"] else None
# Preparing the arguments to axon_segmentation function
path_model, config = generate_default_parameters(type_, new_path)
resolution_model = generate_resolution(type_, config["trainingset_patchsize"])
# Tuple of valid file extensions
validExtensions = (
".jpeg",
".jpg",
".tif",
".tiff",
".png"
)
# Going through all paths passed into arguments
for current_path_target in path_target_list:
if not current_path_target.is_dir():
if current_path_target.suffix.lower() in validExtensions:
# Handle cases if no resolution is provided on the CLI
if psm == None:
# Check if a pixel size file exists, if so read it.
if (current_path_target.parent / 'pixel_size_in_micrometer.txt').exists():
resolution_file = open(current_path_target.parent / 'pixel_size_in_micrometer.txt', 'r')
psm = float(resolution_file.read())
else:
print("ERROR: No pixel size is provided, and there is no pixel_size_in_micrometer.txt file in image folder. ",
"Please provide a pixel size (using argument -s), or add a pixel_size_in_micrometer.txt file ",
"containing the pixel size value."
)
sys.exit(3)
# Check that image size is large enough for given resolution to reach minimum patch size after resizing.
try:
height, width, _ = ads.imread(str(current_path_target)).shape
except:
try:
height, width = ads.imread(str(current_path_target)).shape
except Exception as e:
raise e
image_size = [height, width]
minimum_resolution = config["trainingset_patchsize"] * resolution_model / min(image_size)
if psm < minimum_resolution:
print("EXCEPTION: The size of one of the images ({0}x{1}) is too small for the provided pixel size ({2}).\n".format(height, width, psm),
"The image size must be at least {0}x{0} after resampling to a resolution of {1} to create standard sized patches.\n".format(config["trainingset_patchsize"], resolution_model),
"One of the dimensions of the image has a size of {0} after resampling to that resolution.\n".format(round(psm * min(image_size) / resolution_model)),
"Image file location: {0}".format(current_path_target)
)
sys.exit(2)
# Performing the segmentation over the image
segment_image(current_path_target, path_model, overlap_value, config,
resolution_model,
acquired_resolution=psm,
verbosity_level=verbosity_level)
print("Segmentation finished.")
else:
print("The path(s) specified is/are not image(s). Please update the input path(s) and try again.")
break
else:
# Handle cases if no resolution is provided on the CLI
if psm == None:
# Check if a pixel size file exists, if so read it.
if (current_path_target / 'pixel_size_in_micrometer.txt').exists():
resolution_file = open(current_path_target / 'pixel_size_in_micrometer.txt', 'r')
psm = float(resolution_file.read())
else:
print("ERROR: No pixel size is provided, and there is no pixel_size_in_micrometer.txt file in image folder. ",
"Please provide a pixel size (using argument -s), or add a pixel_size_in_micrometer.txt file ",
"containing the pixel size value."
)
sys.exit(3)
# Performing the segmentation over all folders in the specified folder containing acquisitions to segment.
segment_folders(current_path_target, path_model, overlap_value, config,
resolution_model,
acquired_resolution=psm,
verbosity_level=verbosity_level)
print("Segmentation finished.")
sys.exit(0)
# Calling the script
if __name__ == '__main__':
main()
| 45.075472
| 202
| 0.604071
| 2,238
| 19,112
| 4.950402
| 0.162198
| 0.024822
| 0.023017
| 0.03114
| 0.489936
| 0.465927
| 0.428739
| 0.399404
| 0.395884
| 0.394801
| 0
| 0.006514
| 0.317235
| 19,112
| 423
| 203
| 45.182033
| 0.842517
| 0.203118
| 0
| 0.341667
| 0
| 0.029167
| 0.21826
| 0.023843
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.045833
| 0
| 0.091667
| 0.045833
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed0176bb36b001f6300ef33bd058b934c1c2ff34
| 2,022
|
py
|
Python
|
spacy_transformers/tests/regression/test_spacy_issue6401.py
|
KennethEnevoldsen/spacy-transformers
|
fa39a94ba276ae3681d14a4b376ea50fadd574b3
|
[
"MIT"
] | null | null | null |
spacy_transformers/tests/regression/test_spacy_issue6401.py
|
KennethEnevoldsen/spacy-transformers
|
fa39a94ba276ae3681d14a4b376ea50fadd574b3
|
[
"MIT"
] | null | null | null |
spacy_transformers/tests/regression/test_spacy_issue6401.py
|
KennethEnevoldsen/spacy-transformers
|
fa39a94ba276ae3681d14a4b376ea50fadd574b3
|
[
"MIT"
] | null | null | null |
import pytest
from spacy.training.example import Example
from spacy.util import make_tempdir
from spacy import util
from thinc.api import Config
TRAIN_DATA = [
("I'm so happy.", {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}),
("I'm so angry", {"cats": {"POSITIVE": 0.0, "NEGATIVE": 1.0}}),
]
cfg_string = """
[nlp]
lang = "en"
pipeline = ["transformer","textcat"]
[components]
[components.textcat]
factory = "textcat"
[components.textcat.model]
@architectures = "spacy.TextCatEnsemble.v2"
[components.textcat.model.tok2vec]
@architectures = "spacy-transformers.TransformerListener.v1"
grad_factor = 1.0
[components.textcat.model.tok2vec.pooling]
@layers = "reduce_mean.v1"
[components.transformer]
factory = "transformer"
"""
# Xfail this until the new spaCy rc is up.
@pytest.mark.xfail
def test_transformer_pipeline_textcat():
"""Test that a pipeline with just a transformer+textcat runs and trains properly.
This used to throw an error because of shape inference issues -
cf https://github.com/explosion/spaCy/issues/6401"""
orig_config = Config().from_str(cfg_string)
nlp = util.load_model_from_config(orig_config, auto_fill=True, validate=True)
assert nlp.pipe_names == ["transformer", "textcat"]
train_examples = []
for text, annotations in TRAIN_DATA:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
optimizer = nlp.initialize(get_examples=lambda: train_examples)
for i in range(2):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
doc = nlp("We're interested at underwater basket weaving.")
cats1 = doc.cats
# ensure IO goes OK
with make_tempdir() as d:
file_path = d / "trained_nlp"
nlp.to_disk(file_path)
nlp2 = util.load_model_from_path(file_path)
doc2 = nlp2("We're interested at underwater basket weaving.")
cats2 = doc2.cats
assert cats1 == cats2
| 29.304348
| 85
| 0.678536
| 263
| 2,022
| 5.087452
| 0.498099
| 0.050822
| 0.049327
| 0.043348
| 0.058296
| 0.058296
| 0.058296
| 0
| 0
| 0
| 0
| 0.017359
| 0.202275
| 2,022
| 68
| 86
| 29.735294
| 0.812151
| 0.124629
| 0
| 0
| 0
| 0
| 0.381766
| 0.125356
| 0
| 0
| 0
| 0
| 0.042553
| 1
| 0.021277
| false
| 0
| 0.106383
| 0
| 0.12766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed0376f91f0c41a8fa993fc5f6223d8bbb5eb7cb
| 712
|
py
|
Python
|
hydra/client/repl.py
|
rpacholek/hydra
|
60e3c2eec5ab1fd1dde8e510baa5175173c66a6a
|
[
"MIT"
] | null | null | null |
hydra/client/repl.py
|
rpacholek/hydra
|
60e3c2eec5ab1fd1dde8e510baa5175173c66a6a
|
[
"MIT"
] | null | null | null |
hydra/client/repl.py
|
rpacholek/hydra
|
60e3c2eec5ab1fd1dde8e510baa5175173c66a6a
|
[
"MIT"
] | null | null | null |
import asyncio
from ..core.common.io import input
from .action_creator import ActionCreator
class REPL:
def __init__(self, action_queue, config, *args, **kwargs):
self.action_queue = action_queue
self.config = config
async def run(self):
await asyncio.sleep(1)
print("Insert command: ")
action_creator = ActionCreator()
while True:
input_data = await input("~> ")
if not input_data:
for task in asyncio.all_tasks():
task.cancel()
break
action = action_creator.parse(*input_data.split())
if action:
self.action_queue.push_action(action)
| 28.48
| 62
| 0.58427
| 79
| 712
| 5.063291
| 0.531646
| 0.11
| 0.1125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002088
| 0.327247
| 712
| 24
| 63
| 29.666667
| 0.832985
| 0
| 0
| 0
| 0
| 0
| 0.026685
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.15
| 0
| 0.25
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed037d47b7c87bc348767b05b7307204b77059ed
| 35,606
|
py
|
Python
|
train_dv3.py
|
drat/Neural-Voice-Cloning-With-Few-Samples
|
4febde43ccc143fc88d74d5fa0c5a117636778b4
|
[
"MIT"
] | 361
|
2018-08-17T14:37:29.000Z
|
2022-03-15T13:04:16.000Z
|
train_dv3.py
|
drat/Neural-Voice-Cloning-With-Few-Samples
|
4febde43ccc143fc88d74d5fa0c5a117636778b4
|
[
"MIT"
] | 22
|
2018-11-25T13:42:26.000Z
|
2020-04-29T05:16:25.000Z
|
train_dv3.py
|
drat/Neural-Voice-Cloning-With-Few-Samples
|
4febde43ccc143fc88d74d5fa0c5a117636778b4
|
[
"MIT"
] | 121
|
2018-08-30T03:53:09.000Z
|
2022-03-25T09:03:17.000Z
|
"""Trainining script for seq2seq text-to-speech synthesis model.
usage: train.py [options]
options:
--data-root=<dir> Directory contains preprocessed features.
--checkpoint-dir=<dir> Directory where to save model checkpoints [default: checkpoints].
--hparams=<parmas> Hyper parameters [default: ].
--checkpoint=<path> Restore model from checkpoint path if given.
--checkpoint-seq2seq=<path> Restore seq2seq model from checkpoint path.
--checkpoint-postnet=<path> Restore postnet model from checkpoint path.
--train-seq2seq-only Train only seq2seq model.
--train-postnet-only Train only postnet model.
--restore-parts=<path> Restore part of the model.
--log-event-path=<name> Log event path.
--reset-optimizer Reset optimizer.
--load-embedding=<path> Load embedding from checkpoint.
--speaker-id=<N> Use specific speaker of data in case for multi-speaker datasets.
-h, --help Show this help message and exit
"""
from docopt import docopt
import sys
from os.path import dirname, join
from tqdm import tqdm, trange
from datetime import datetime
# The deepvoice3 model
from dv3.deepvoice3_pytorch import frontend, builder
import dv3.audio
import dv3.lrschedule
import torch
from torch.utils import data as data_utils
from torch.autograd import Variable
from torch import nn
from torch import optim
import torch.backends.cudnn as cudnn
from torch.utils import data as data_utils
from torch.utils.data.sampler import Sampler
import numpy as np
from numba import jit
from nnmnkwii.datasets import FileSourceDataset, FileDataSource
from os.path import join, expanduser
import random
import librosa.display
from matplotlib import pyplot as plt
import sys
import os
from tensorboardX import SummaryWriter
from matplotlib import cm
from warnings import warn
from dv3.hparams import hparams, hparams_debug_string
fs = hparams.sample_rate
global_step = 0
global_epoch = 0
use_cuda = torch.cuda.is_available()
if use_cuda:
cudnn.benchmark = False
_frontend = None # to be set later
def _pad(seq, max_len, constant_values=0):
return np.pad(seq, (0, max_len - len(seq)),
mode='constant', constant_values=constant_values)
def _pad_2d(x, max_len, b_pad=0):
x = np.pad(x, [(b_pad, max_len - len(x) - b_pad), (0, 0)],
mode="constant", constant_values=0)
return x
def plot_alignment(alignment, path, info=None):
fig, ax = plt.subplots()
im = ax.imshow(
alignment,
aspect='auto',
origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if info is not None:
xlabel += '\n\n' + info
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
plt.savefig(path, format='png')
plt.close()
class TextDataSource(FileDataSource):
def __init__(self, data_root, speaker_id=None):
self.data_root = data_root
self.speaker_ids = None
self.multi_speaker = False
# If not None, filter by speaker_id
self.speaker_id = speaker_id
def collect_files(self):
meta = join(self.data_root, "train.txt")
with open(meta, "rb") as f:
lines = f.readlines()
l = lines[0].decode("utf-8").split("|")
assert len(l) == 4 or len(l) == 5
self.multi_speaker = len(l) == 5
texts = list(map(lambda l: l.decode("utf-8").split("|")[3], lines))
if self.multi_speaker:
speaker_ids = list(map(lambda l: int(l.decode("utf-8").split("|")[-1]), lines))
# Filter by speaker_id
# using multi-speaker dataset as a single speaker dataset
if self.speaker_id is not None:
indices = np.array(speaker_ids) == self.speaker_id
texts = list(np.array(texts)[indices])
self.multi_speaker = False
return texts
return texts, speaker_ids
else:
return texts
def collect_features(self, *args):
if self.multi_speaker:
text, speaker_id = args
else:
text = args[0]
seq = _frontend.text_to_sequence(text, p=hparams.replace_pronunciation_prob)
if self.multi_speaker:
return np.asarray(seq, dtype=np.int32), int(speaker_id)
else:
return np.asarray(seq, dtype=np.int32)
class _NPYDataSource(FileDataSource):
def __init__(self, data_root, col, speaker_id=None):
self.data_root = data_root
self.col = col
self.frame_lengths = []
self.speaker_id = speaker_id
def collect_files(self):
meta = join(self.data_root, "train.txt")
with open(meta, "rb") as f:
lines = f.readlines()
l = lines[0].decode("utf-8").split("|")
assert len(l) == 4 or len(l) == 5
multi_speaker = len(l) == 5
self.frame_lengths = list(
map(lambda l: int(l.decode("utf-8").split("|")[2]), lines))
paths = list(map(lambda l: l.decode("utf-8").split("|")[self.col], lines))
paths = list(map(lambda f: join(self.data_root, f), paths))
if multi_speaker and self.speaker_id is not None:
speaker_ids = list(map(lambda l: int(l.decode("utf-8").split("|")[-1]), lines))
# Filter by speaker_id
# using multi-speaker dataset as a single speaker dataset
indices = np.array(speaker_ids) == self.speaker_id
paths = list(np.array(paths)[indices])
self.frame_lengths = list(np.array(self.frame_lengths)[indices])
# aha, need to cast numpy.int64 to int
self.frame_lengths = list(map(int, self.frame_lengths))
return paths
def collect_features(self, path):
return np.load(path)
class MelSpecDataSource(_NPYDataSource):
def __init__(self, data_root, speaker_id=None):
super(MelSpecDataSource, self).__init__(data_root, 1, speaker_id)
class LinearSpecDataSource(_NPYDataSource):
def __init__(self, data_root, speaker_id=None):
super(LinearSpecDataSource, self).__init__(data_root, 0, speaker_id)
class PartialyRandomizedSimilarTimeLengthSampler(Sampler):
"""Partially randmoized sampler
1. Sort by lengths
2. Pick a small patch and randomize it
3. Permutate mini-batchs
"""
def __init__(self, lengths, batch_size=16, batch_group_size=None,
permutate=True):
self.lengths, self.sorted_indices = torch.sort(torch.LongTensor(lengths))
self.batch_size = batch_size
if batch_group_size is None:
batch_group_size = min(batch_size * 32, len(self.lengths))
if batch_group_size % batch_size != 0:
batch_group_size -= batch_group_size % batch_size
self.batch_group_size = batch_group_size
assert batch_group_size % batch_size == 0
self.permutate = permutate
def __iter__(self):
indices = self.sorted_indices.clone()
batch_group_size = self.batch_group_size
s, e = 0, 0
for i in range(len(indices) // batch_group_size):
s = i * batch_group_size
e = s + batch_group_size
random.shuffle(indices[s:e])
# Permutate batches
if self.permutate:
perm = np.arange(len(indices[:e]) // self.batch_size)
random.shuffle(perm)
indices[:e] = indices[:e].view(-1, self.batch_size)[perm, :].view(-1)
# Handle last elements
s += batch_group_size
if s < len(indices):
random.shuffle(indices[s:])
return iter(indices)
def __len__(self):
return len(self.sorted_indices)
class PyTorchDataset(object):
def __init__(self, X, Mel, Y):
self.X = X
self.Mel = Mel
self.Y = Y
# alias
self.multi_speaker = X.file_data_source.multi_speaker
def __getitem__(self, idx):
if self.multi_speaker:
text, speaker_id = self.X[idx]
return text, self.Mel[idx], self.Y[idx], speaker_id
else:
return self.X[idx], self.Mel[idx], self.Y[idx]
def __len__(self):
return len(self.X)
def sequence_mask(sequence_length, max_len=None):
if max_len is None:
max_len = sequence_length.data.max()
batch_size = sequence_length.size(0)
seq_range = torch.arange(0, max_len).long()
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
seq_range_expand = Variable(seq_range_expand)
if sequence_length.is_cuda:
seq_range_expand = seq_range_expand.cuda()
seq_length_expand = sequence_length.unsqueeze(1) \
.expand_as(seq_range_expand)
return (seq_range_expand < seq_length_expand).float()
class MaskedL1Loss(nn.Module):
def __init__(self):
super(MaskedL1Loss, self).__init__()
self.criterion = nn.L1Loss(size_average=False)
def forward(self, input, target, lengths=None, mask=None, max_len=None):
if lengths is None and mask is None:
raise RuntimeError("Should provide either lengths or mask")
# (B, T, 1)
if mask is None:
mask = sequence_mask(lengths, max_len).unsqueeze(-1)
# (B, T, D)
mask_ = mask.expand_as(input)
loss = self.criterion(input * mask_, target * mask_)
return loss / mask_.sum()
def collate_fn(batch):
"""Create batch"""
r = hparams.outputs_per_step
downsample_step = hparams.downsample_step
multi_speaker = len(batch[0]) == 4
# Lengths
input_lengths = [len(x[0]) for x in batch]
max_input_len = max(input_lengths)
target_lengths = [len(x[1]) for x in batch]
max_target_len = max(target_lengths)
if max_target_len % r != 0:
max_target_len += r - max_target_len % r
assert max_target_len % r == 0
if max_target_len % downsample_step != 0:
max_target_len += downsample_step - max_target_len % downsample_step
assert max_target_len % downsample_step == 0
# Set 0 for zero beginning padding
# imitates initial decoder states
b_pad = r
max_target_len += b_pad * downsample_step
a = np.array([_pad(x[0], max_input_len) for x in batch], dtype=np.int)
x_batch = torch.LongTensor(a)
input_lengths = torch.LongTensor(input_lengths)
target_lengths = torch.LongTensor(target_lengths)
b = np.array([_pad_2d(x[1], max_target_len, b_pad=b_pad) for x in batch],
dtype=np.float32)
mel_batch = torch.FloatTensor(b)
c = np.array([_pad_2d(x[2], max_target_len, b_pad=b_pad) for x in batch],
dtype=np.float32)
y_batch = torch.FloatTensor(c)
# text positions
text_positions = np.array([_pad(np.arange(1, len(x[0]) + 1), max_input_len)
for x in batch], dtype=np.int)
text_positions = torch.LongTensor(text_positions)
max_decoder_target_len = max_target_len // r // downsample_step
# frame positions
s, e = 1, max_decoder_target_len + 1
# if b_pad > 0:
# s, e = s - 1, e - 1
frame_positions = torch.arange(s, e).long().unsqueeze(0).expand(
len(batch), max_decoder_target_len)
# done flags
done = np.array([_pad(np.zeros(len(x[1]) // r // downsample_step - 1),
max_decoder_target_len, constant_values=1)
for x in batch])
done = torch.FloatTensor(done).unsqueeze(-1)
if multi_speaker:
speaker_ids = torch.LongTensor([x[3] for x in batch])
else:
speaker_ids = None
return x_batch, input_lengths, mel_batch, y_batch, \
(text_positions, frame_positions), done, target_lengths, speaker_ids
def time_string():
return datetime.now().strftime('%Y-%m-%d %H:%M')
def save_alignment(path, attn):
plot_alignment(attn.T, path, info="{}, {}, step={}".format(
hparams.builder, time_string(), global_step))
def prepare_spec_image(spectrogram):
# [0, 1]
spectrogram = (spectrogram - np.min(spectrogram)) / (np.max(spectrogram) - np.min(spectrogram))
spectrogram = np.flip(spectrogram, axis=1) # flip against freq axis
return np.uint8(cm.magma(spectrogram.T) * 255)
def eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker):
# harded coded
texts = [
"Scientists at the CERN laboratory say they have discovered a new particle.",
"There's a way to measure the acute emotional intelligence that has never gone out of style.",
"President Trump met with other leaders at the Group of 20 conference.",
"Generative adversarial network or variational auto-encoder.",
"Please call Stella.",
"Some have accepted this as a miracle without any physical explanation.",
]
import dv3.synthesis
synthesis._frontend = _frontend
eval_output_dir = join(checkpoint_dir, "eval")
os.makedirs(eval_output_dir, exist_ok=True)
# hard coded
speaker_ids = [0, 1, 10] if ismultispeaker else [None]
for speaker_id in speaker_ids:
speaker_str = "multispeaker{}".format(speaker_id) if speaker_id is not None else "single"
for idx, text in enumerate(texts):
signal, alignment, _, mel = synthesis.tts(
model, text, p=0, speaker_id=speaker_id, fast=False)
signal /= np.max(np.abs(signal))
# Alignment
path = join(eval_output_dir, "step{:09d}_text{}_{}_alignment.png".format(
global_step, idx, speaker_str))
save_alignment(path, alignment)
tag = "eval_averaged_alignment_{}_{}".format(idx, speaker_str)
writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)
# Mel
writer.add_image("(Eval) Predicted mel spectrogram text{}_{}".format(idx, speaker_str),
prepare_spec_image(mel), global_step)
# Audio
path = join(eval_output_dir, "step{:09d}_text{}_{}_predicted.wav".format(
global_step, idx, speaker_str))
dv3.audio.save_wav(signal, path)
try:
writer.add_audio("(Eval) Predicted audio signal {}_{}".format(idx, speaker_str),
signal, global_step, sample_rate=fs)
except Exception as e:
warn(str(e))
pass
def save_states(global_step, writer, mel_outputs, linear_outputs, attn, mel, y,
input_lengths, checkpoint_dir=None):
print("Save intermediate states at step {}".format(global_step))
# idx = np.random.randint(0, len(input_lengths))
idx = min(1, len(input_lengths) - 1)
input_length = input_lengths[idx]
# Alignment
# Multi-hop attention
if attn is not None and attn.dim() == 4:
for i, alignment in enumerate(attn):
alignment = alignment[idx].cpu().data.numpy()
tag = "alignment_layer{}".format(i + 1)
writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)
# save files as well for now
alignment_dir = join(checkpoint_dir, "alignment_layer{}".format(i + 1))
os.makedirs(alignment_dir, exist_ok=True)
path = join(alignment_dir, "step{:09d}_layer_{}_alignment.png".format(
global_step, i + 1))
save_alignment(path, alignment)
# Save averaged alignment
alignment_dir = join(checkpoint_dir, "alignment_ave")
os.makedirs(alignment_dir, exist_ok=True)
path = join(alignment_dir, "step{:09d}_alignment.png".format(global_step))
alignment = attn.mean(0)[idx].cpu().data.numpy()
save_alignment(path, alignment)
tag = "averaged_alignment"
writer.add_image(tag, np.uint8(cm.viridis(np.flip(alignment, 1).T) * 255), global_step)
# Predicted mel spectrogram
if mel_outputs is not None:
mel_output = mel_outputs[idx].cpu().data.numpy()
mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output))
writer.add_image("Predicted mel spectrogram", mel_output, global_step)
# Predicted spectrogram
if linear_outputs is not None:
linear_output = linear_outputs[idx].cpu().data.numpy()
spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output))
writer.add_image("Predicted linear spectrogram", spectrogram, global_step)
# Predicted audio signal
signal = dv3.audio.inv_spectrogram(linear_output.T)
signal /= np.max(np.abs(signal))
path = join(checkpoint_dir, "step{:09d}_predicted.wav".format(
global_step))
try:
writer.add_audio("Predicted audio signal", signal, global_step, sample_rate=fs)
except Exception as e:
warn(str(e))
pass
dv3.audio.save_wav(signal, path)
# Target mel spectrogram
if mel_outputs is not None:
mel_output = mel[idx].cpu().data.numpy()
mel_output = prepare_spec_image(dv3.audio._denormalize(mel_output))
writer.add_image("Target mel spectrogram", mel_output, global_step)
# Target spectrogram
if linear_outputs is not None:
linear_output = y[idx].cpu().data.numpy()
spectrogram = prepare_spec_image(dv3.audio._denormalize(linear_output))
writer.add_image("Target linear spectrogram", spectrogram, global_step)
def logit(x, eps=1e-8):
return torch.log(x + eps) - torch.log(1 - x + eps)
def masked_mean(y, mask):
# (B, T, D)
mask_ = mask.expand_as(y)
return (y * mask_).sum() / mask_.sum()
def spec_loss(y_hat, y, mask, priority_bin=None, priority_w=0):
masked_l1 = MaskedL1Loss()
l1 = nn.L1Loss()
w = hparams.masked_loss_weight
# L1 loss
if w > 0:
assert mask is not None
l1_loss = w * masked_l1(y_hat, y, mask=mask) + (1 - w) * l1(y_hat, y)
else:
assert mask is None
l1_loss = l1(y_hat, y)
# Priority L1 loss
if priority_bin is not None and priority_w > 0:
if w > 0:
priority_loss = w * masked_l1(
y_hat[:, :, :priority_bin], y[:, :, :priority_bin], mask=mask) \
+ (1 - w) * l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin])
else:
priority_loss = l1(y_hat[:, :, :priority_bin], y[:, :, :priority_bin])
l1_loss = (1 - priority_w) * l1_loss + priority_w * priority_loss
# Binary divergence loss
if hparams.binary_divergence_weight <= 0:
binary_div = Variable(y.data.new(1).zero_())
else:
y_hat_logits = logit(y_hat)
z = -y * y_hat_logits + torch.log(1 + torch.exp(y_hat_logits))
if w > 0:
binary_div = w * masked_mean(z, mask) + (1 - w) * z.mean()
else:
binary_div = z.mean()
return l1_loss, binary_div
@jit(nopython=True)
def guided_attention(N, max_N, T, max_T, g):
W = np.zeros((max_N, max_T), dtype=np.float32)
for n in range(N):
for t in range(T):
W[n, t] = 1 - np.exp(-(n / N - t / T)**2 / (2 * g * g))
return W
def guided_attentions(input_lengths, target_lengths, max_target_len, g=0.2):
B = len(input_lengths)
max_input_len = input_lengths.max()
W = np.zeros((B, max_target_len, max_input_len), dtype=np.float32)
for b in range(B):
W[b] = guided_attention(input_lengths[b], max_input_len,
target_lengths[b], max_target_len, g).T
return W
def train(model, data_loader, optimizer, writer,
init_lr=0.002,
checkpoint_dir=None, checkpoint_interval=None, nepochs=None,
clip_thresh=1.0,
train_seq2seq=True, train_postnet=True):
if use_cuda:
model = model.cuda()
linear_dim = model.linear_dim
r = hparams.outputs_per_step
downsample_step = hparams.downsample_step
current_lr = init_lr
binary_criterion = nn.BCELoss()
assert train_seq2seq or train_postnet
global global_step, global_epoch
while global_epoch < nepochs:
running_loss = 0.
for step, (x, input_lengths, mel, y, positions, done, target_lengths,
speaker_ids) \
in tqdm(enumerate(data_loader)):
model.train()
ismultispeaker = speaker_ids is not None
# Learning rate schedule
if hparams.lr_schedule is not None:
lr_schedule_f = getattr(dv3.lrschedule, hparams.lr_schedule)
current_lr = lr_schedule_f(
init_lr, global_step, **hparams.lr_schedule_kwargs)
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr
optimizer.zero_grad()
# Used for Position encoding
text_positions, frame_positions = positions
# Downsample mel spectrogram
if downsample_step > 1:
mel = mel[:, 0::downsample_step, :].contiguous()
# Lengths
input_lengths = input_lengths.long().numpy()
decoder_lengths = target_lengths.long().numpy() // r // downsample_step
# Feed data
x, mel, y = Variable(x), Variable(mel), Variable(y)
text_positions = Variable(text_positions)
frame_positions = Variable(frame_positions)
done = Variable(done)
target_lengths = Variable(target_lengths)
speaker_ids = Variable(speaker_ids) if ismultispeaker else None
if use_cuda:
if train_seq2seq:
x = x.cuda()
text_positions = text_positions.cuda()
frame_positions = frame_positions.cuda()
if train_postnet:
y = y.cuda()
mel = mel.cuda()
done, target_lengths = done.cuda(), target_lengths.cuda()
speaker_ids = speaker_ids.cuda() if ismultispeaker else None
# Create mask if we use masked loss
if hparams.masked_loss_weight > 0:
# decoder output domain mask
decoder_target_mask = sequence_mask(
target_lengths / (r * downsample_step),
max_len=mel.size(1)).unsqueeze(-1)
if downsample_step > 1:
# spectrogram-domain mask
target_mask = sequence_mask(
target_lengths, max_len=y.size(1)).unsqueeze(-1)
else:
target_mask = decoder_target_mask
# shift mask
decoder_target_mask = decoder_target_mask[:, r:, :]
target_mask = target_mask[:, r:, :]
else:
decoder_target_mask, target_mask = None, None
# Apply model
if train_seq2seq and train_postnet:
mel_outputs, linear_outputs, attn, done_hat = model(
x, mel, speaker_ids=speaker_ids,
text_positions=text_positions, frame_positions=frame_positions,
input_lengths=input_lengths)
elif train_seq2seq:
assert speaker_ids is None
mel_outputs, attn, done_hat, _ = model.seq2seq(
x, mel,
text_positions=text_positions, frame_positions=frame_positions,
input_lengths=input_lengths)
# reshape
mel_outputs = mel_outputs.view(len(mel), -1, mel.size(-1))
linear_outputs = None
elif train_postnet:
assert speaker_ids is None
linear_outputs = model.postnet(mel)
mel_outputs, attn, done_hat = None, None, None
# Losses
w = hparams.binary_divergence_weight
# mel:
if train_seq2seq:
mel_l1_loss, mel_binary_div = spec_loss(
mel_outputs[:, :-r, :], mel[:, r:, :], decoder_target_mask)
mel_loss = (1 - w) * mel_l1_loss + w * mel_binary_div
# done:
if train_seq2seq:
done_loss = binary_criterion(done_hat, done)
# linear:
if train_postnet:
n_priority_freq = int(hparams.priority_freq / (fs * 0.5) * linear_dim)
linear_l1_loss, linear_binary_div = spec_loss(
linear_outputs[:, :-r, :], y[:, r:, :], target_mask,
priority_bin=n_priority_freq,
priority_w=hparams.priority_freq_weight)
linear_loss = (1 - w) * linear_l1_loss + w * linear_binary_div
# Combine losses
if train_seq2seq and train_postnet:
loss = mel_loss + linear_loss + done_loss
elif train_seq2seq:
loss = mel_loss + done_loss
elif train_postnet:
loss = linear_loss
# attention
if train_seq2seq and hparams.use_guided_attention:
soft_mask = guided_attentions(input_lengths, decoder_lengths,
attn.size(-2),
g=hparams.guided_attention_sigma)
soft_mask = Variable(torch.from_numpy(soft_mask))
soft_mask = soft_mask.cuda() if use_cuda else soft_mask
attn_loss = (attn * soft_mask).mean()
loss += attn_loss
if global_step > 0 and global_step % checkpoint_interval == 0:
save_states(
global_step, writer, mel_outputs, linear_outputs, attn,
mel, y, input_lengths, checkpoint_dir)
save_checkpoint(
model, optimizer, global_step, checkpoint_dir, global_epoch,
train_seq2seq, train_postnet)
if global_step > 0 and global_step % hparams.eval_interval == 0:
eval_model(global_step, writer, model, checkpoint_dir, ismultispeaker)
# Update
loss.backward()
if clip_thresh > 0:
grad_norm = torch.nn.utils.clip_grad_norm(
model.get_trainable_parameters(), clip_thresh)
optimizer.step()
# Logs
writer.add_scalar("loss", float(loss.data[0]), global_step)
if train_seq2seq:
writer.add_scalar("done_loss", float(done_loss.data[0]), global_step)
writer.add_scalar("mel loss", float(mel_loss.data[0]), global_step)
writer.add_scalar("mel_l1_loss", float(mel_l1_loss.data[0]), global_step)
writer.add_scalar("mel_binary_div_loss", float(mel_binary_div.data[0]), global_step)
if train_postnet:
writer.add_scalar("linear_loss", float(linear_loss.data[0]), global_step)
writer.add_scalar("linear_l1_loss", float(linear_l1_loss.data[0]), global_step)
writer.add_scalar("linear_binary_div_loss", float(
linear_binary_div.data[0]), global_step)
if train_seq2seq and hparams.use_guided_attention:
writer.add_scalar("attn_loss", float(attn_loss.data[0]), global_step)
if clip_thresh > 0:
writer.add_scalar("gradient norm", grad_norm, global_step)
writer.add_scalar("learning rate", current_lr, global_step)
global_step += 1
running_loss += loss.data[0]
averaged_loss = running_loss / (len(data_loader))
writer.add_scalar("loss (per epoch)", averaged_loss, global_epoch)
print("Loss: {}".format(running_loss / (len(data_loader))))
global_epoch += 1
def save_checkpoint(model, optimizer, step, checkpoint_dir, epoch,
train_seq2seq, train_postnet):
if train_seq2seq and train_postnet:
suffix = ""
m = model
elif train_seq2seq:
suffix = "_seq2seq"
m = model.seq2seq
elif train_postnet:
suffix = "_postnet"
m = model.postnet
checkpoint_path = join(
checkpoint_dir, "checkpoint_step{:09d}{}.pth".format(global_step, suffix))
optimizer_state = optimizer.state_dict() if hparams.save_optimizer_state else None
torch.save({
"state_dict": m.state_dict(),
"optimizer": optimizer_state,
"global_step": step,
"global_epoch": epoch,
}, checkpoint_path)
print("Saved checkpoint:", checkpoint_path)
def build_model():
model = getattr(builder, hparams.builder)(
n_speakers=hparams.n_speakers,
speaker_embed_dim=hparams.speaker_embed_dim,
n_vocab=_frontend.n_vocab,
embed_dim=hparams.text_embed_dim,
mel_dim=hparams.num_mels,
linear_dim=hparams.fft_size // 2 + 1,
r=hparams.outputs_per_step,
downsample_step=hparams.downsample_step,
padding_idx=hparams.padding_idx,
dropout=hparams.dropout,
kernel_size=hparams.kernel_size,
encoder_channels=hparams.encoder_channels,
decoder_channels=hparams.decoder_channels,
converter_channels=hparams.converter_channels,
use_memory_mask=hparams.use_memory_mask,
trainable_positional_encodings=hparams.trainable_positional_encodings,
force_monotonic_attention=hparams.force_monotonic_attention,
use_decoder_state_for_postnet_input=hparams.use_decoder_state_for_postnet_input,
max_positions=hparams.max_positions,
speaker_embedding_weight_std=hparams.speaker_embedding_weight_std,
freeze_embedding=hparams.freeze_embedding,
window_ahead=hparams.window_ahead,
window_backward=hparams.window_backward,
key_projection=hparams.key_projection,
value_projection=hparams.value_projection,
)
return model
def load_checkpoint(path, model, optimizer, reset_optimizer):
global global_step
global global_epoch
print("Load checkpoint from: {}".format(path))
checkpoint = torch.load(path)
model.load_state_dict(checkpoint["state_dict"])
if not reset_optimizer:
optimizer_state = checkpoint["optimizer"]
if optimizer_state is not None:
print("Load optimizer state from {}".format(path))
optimizer.load_state_dict(checkpoint["optimizer"])
global_step = checkpoint["global_step"]
global_epoch = checkpoint["global_epoch"]
return model
def _load_embedding(path, model):
state = torch.load(path)["state_dict"]
key = "seq2seq.encoder.embed_tokens.weight"
model.seq2seq.encoder.embed_tokens.weight.data = state[key]
# https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/3
def restore_parts(path, model):
print("Restore part of the model from: {}".format(path))
state = torch.load(path)["state_dict"]
model_dict = model.state_dict()
valid_state_dict = {k: v for k, v in state.items() if k in model_dict}
model_dict.update(valid_state_dict)
model.load_state_dict(model_dict)
if __name__ == "__main__":
args = docopt(__doc__)
print("Command line args:\n", args)
checkpoint_dir = args["--checkpoint-dir"]
checkpoint_path = args["--checkpoint"]
checkpoint_seq2seq_path = args["--checkpoint-seq2seq"]
checkpoint_postnet_path = args["--checkpoint-postnet"]
load_embedding = args["--load-embedding"]
checkpoint_restore_parts = args["--restore-parts"]
speaker_id = args["--speaker-id"]
speaker_id = int(speaker_id) if speaker_id is not None else None
data_root = args["--data-root"]
if data_root is None:
data_root = join(dirname(__file__), "data", "ljspeech")
log_event_path = args["--log-event-path"]
reset_optimizer = args["--reset-optimizer"]
# Which model to be trained
train_seq2seq = args["--train-seq2seq-only"]
train_postnet = args["--train-postnet-only"]
# train both if not specified
if not train_seq2seq and not train_postnet:
print("Training whole model")
train_seq2seq, train_postnet = True, True
if train_seq2seq:
print("Training seq2seq model")
elif train_postnet:
print("Training postnet model")
else:
assert False, "must be specified wrong args"
# Override hyper parameters
hparams.parse(args["--hparams"])
print(hparams_debug_string())
assert hparams.name == "deepvoice3"
# Presets
if hparams.preset is not None and hparams.preset != "":
preset = hparams.presets[hparams.preset]
import json
hparams.parse_json(json.dumps(preset))
print("Override hyper parameters with preset \"{}\": {}".format(
hparams.preset, json.dumps(preset, indent=4)))
_frontend = getattr(frontend, hparams.frontend)
os.makedirs(checkpoint_dir, exist_ok=True)
# Input dataset definitions
X = FileSourceDataset(TextDataSource(data_root, speaker_id))
Mel = FileSourceDataset(MelSpecDataSource(data_root, speaker_id))
Y = FileSourceDataset(LinearSpecDataSource(data_root, speaker_id))
# Prepare sampler
frame_lengths = Mel.file_data_source.frame_lengths
sampler = PartialyRandomizedSimilarTimeLengthSampler(
frame_lengths, batch_size=hparams.batch_size)
# Dataset and Dataloader setup
dataset = PyTorchDataset(X, Mel, Y)
data_loader = data_utils.DataLoader(
dataset, batch_size=hparams.batch_size,
num_workers=hparams.num_workers, sampler=sampler,
collate_fn=collate_fn, pin_memory=hparams.pin_memory)
print("dataloader_prepared")
# Model
model = build_model()
if use_cuda:
model = model.cuda()
optimizer = optim.Adam(model.get_trainable_parameters(),
lr=hparams.initial_learning_rate, betas=(
hparams.adam_beta1, hparams.adam_beta2),
eps=hparams.adam_eps, weight_decay=hparams.weight_decay)
if checkpoint_restore_parts is not None:
restore_parts(checkpoint_restore_parts, model)
# Load checkpoints
if checkpoint_postnet_path is not None:
load_checkpoint(checkpoint_postnet_path, model.postnet, optimizer, reset_optimizer)
if checkpoint_seq2seq_path is not None:
load_checkpoint(checkpoint_seq2seq_path, model.seq2seq, optimizer, reset_optimizer)
if checkpoint_path is not None:
load_checkpoint(checkpoint_path, model, optimizer, reset_optimizer)
# Load embedding
if load_embedding is not None:
print("Loading embedding from {}".format(load_embedding))
_load_embedding(load_embedding, model)
# Setup summary writer for tensorboard
if log_event_path is None:
log_event_path = "log/run-test" + str(datetime.now()).replace(" ", "_")
print("Los event path: {}".format(log_event_path))
writer = SummaryWriter(log_dir=log_event_path)
# Train!
try:
train(model, data_loader, optimizer, writer,
init_lr=hparams.initial_learning_rate,
checkpoint_dir=checkpoint_dir,
checkpoint_interval=hparams.checkpoint_interval,
nepochs=hparams.nepochs,
clip_thresh=hparams.clip_thresh,
train_seq2seq=train_seq2seq, train_postnet=train_postnet)
except KeyboardInterrupt:
save_checkpoint(
model, optimizer, global_step, checkpoint_dir, global_epoch,
train_seq2seq, train_postnet)
print("Finished")
sys.exit(0)
| 37.559072
| 102
| 0.63068
| 4,493
| 35,606
| 4.747162
| 0.123303
| 0.022036
| 0.008861
| 0.006329
| 0.29992
| 0.235313
| 0.180459
| 0.162736
| 0.140935
| 0.112664
| 0
| 0.011365
| 0.26855
| 35,606
| 947
| 103
| 37.598733
| 0.807595
| 0.074173
| 0
| 0.201175
| 0
| 0
| 0.061081
| 0.007974
| 0
| 0
| 0
| 0
| 0.017621
| 1
| 0.054332
| false
| 0.002937
| 0.045521
| 0.008811
| 0.148311
| 0.023495
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed04e3a19994480a40ef35eabbb8a7e09343ee2c
| 8,898
|
py
|
Python
|
audio_som64_u_grupo1.py
|
andremsouza/swine_sound_analysis
|
5583bf91b18e8ad2dcaccb30a94c134e2eab34a5
|
[
"MIT"
] | null | null | null |
audio_som64_u_grupo1.py
|
andremsouza/swine_sound_analysis
|
5583bf91b18e8ad2dcaccb30a94c134e2eab34a5
|
[
"MIT"
] | 1
|
2021-01-20T01:56:42.000Z
|
2021-01-20T01:56:42.000Z
|
audio_som64_u_grupo1.py
|
andremsouza/swine_sound_analysis
|
5583bf91b18e8ad2dcaccb30a94c134e2eab34a5
|
[
"MIT"
] | null | null | null |
# %% [markdown]
# # Testing python-som with audio dataset
# %% [markdown]
# # Imports
# %%
import matplotlib.pyplot as plt
# import librosa as lr
# import librosa.display as lrdisp
import numpy as np
import pandas as pd
import pickle
import seaborn as sns
import sklearn.preprocessing
from python_som import SOM
FILE_PREFIX = 'som64_u_grupo1'
# %% [markdown]
# # Loading dataset
# %%
df = pd.read_csv('features_means.csv', index_col=0, verbose=True)
df.index = pd.to_datetime(df.index)
df['rac'] = False
df.loc['2020-09-22':, 'rac'] = True # type: ignore
df.sort_index(inplace=True)
# %% [markdown]
# ## Checking for and dropping duplicates
# %%
# Resetting index for duplicate analysis
df.reset_index(inplace=True)
print("Duplicates by filename:",
df.duplicated(subset=['file_name']).value_counts(),
sep='\n')
df.drop_duplicates(subset=['file_name'], inplace=True)
print("Duplicates by (datetime, ala, grupo):",
df.duplicated(subset=['datetime', 'ala', 'grupo']).value_counts(),
sep='\n')
df.drop_duplicates(subset=['datetime', 'ala', 'grupo'], inplace=True)
# Rebuilding dataframe index
df.set_index('datetime', inplace=True)
# %%
# Filtering dataset by 'group'
df = df[df['grupo'] == 1]
# %%
# Dropping tail of dataset for class balancing
# tail_size = abs(
# len(df[df['rac'].astype(int) == 1]) - len(df[df['rac'].astype(int) == 0]))
# df.drop(df.tail(tail_size).index, inplace=True)
# %% [markdown]
# ## Visualizing distribution of sample dates
# %%
df_tmp = pd.DataFrame(df['file_name'].resample('1D').count())
df_tmp['count'] = df_tmp['file_name']
del df_tmp['file_name']
df_tmp['rac'] = False
df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore
plt.figure(figsize=(10, 10))
sns.set(style="whitegrid",
palette=sns.color_palette("muted", n_colors=6, desat=1.0))
sns.barplot(y=df_tmp.index, x=df_tmp['count'], hue=df_tmp['rac'])
plt.draw()
df_tmp = pd.DataFrame(df['file_name'].resample('1H').count())
df_tmp['count'] = df_tmp['file_name']
del df_tmp['file_name']
df_tmp['rac'] = False
df_tmp.loc['2020-09-22':, 'rac'] = True # type: ignore
df_tmp = df_tmp.reset_index()
df_tmp['hour'] = df_tmp['datetime'].dt.hour
plt.figure(figsize=(10, 10))
sns.set(style="whitegrid",
palette=sns.color_palette("muted", n_colors=6, desat=1.0))
sns.barplot(y=df_tmp['hour'], x=df_tmp['count'], hue=df_tmp['rac'], orient='h')
plt.draw()
# %%
df_melt = pd.melt(df, value_vars=['rac'], value_name='ractopamine')
plt.figure(figsize=(10, 10))
sns.set(style="whitegrid",
palette=sns.color_palette("muted", n_colors=6, desat=1.0))
ax = sns.countplot(data=df_melt, x='ractopamine', hue='ractopamine')
for p in ax.patches:
ax.annotate(f'\n{p.get_height()}', (p.get_x() + 0.2, p.get_height()),
ha='center',
va='top',
color='white',
size=18)
plt.draw()
# %%
# using sklearn's MinMaxScaler
scaler = sklearn.preprocessing.MinMaxScaler(feature_range=(0, 1))
df_train = df.iloc[:, 3:-1].copy()
df_train = scaler.fit_transform(df_train)
# %%
# Defining first element of SOM shape
# Second element will be assigned based on the ratio between the
# first two principal components of the train dataset
som_x: int = 64
try:
with open(f'./{FILE_PREFIX}.obj', 'rb') as f:
som = pickle.load(f)
except FileNotFoundError:
som = SOM(x=som_x,
y=None,
input_len=df_train.shape[1],
learning_rate=0.5,
neighborhood_radius=1.0,
neighborhood_function='gaussian',
cyclic_x=True,
cyclic_y=True,
data=df_train)
# Training SOM
som.weight_initialization(mode='linear', data=df_train)
som.train(data=df_train, mode='random', verbose=True)
with open(f'./{FILE_PREFIX}.obj', 'wb') as f:
pickle.dump(som, f)
# %%
som_x, som_y = som.get_shape()
print('SOM shape:', (som_x, som_y))
# %%
# Visualizing distance matrix and activation matrix
umatrix = som.distance_matrix()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9))
sns.heatmap(umatrix.T, cmap='bone_r', ax=ax1, robust=True)
sns.heatmap(som.activation_matrix(data=df_train).T,
cmap='mako',
ax=ax2,
robust=True)
ax1.invert_yaxis()
ax2.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix_activation.png',
bbox_inches='tight',
transparent=True)
plt.draw()
# %%
# Visualizing distance matrix anc activation matrix separately
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(umatrix.T, cmap='bone_r', robust=True)
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_umatrix.png',
bbox_inches='tight',
transparent=True)
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(som.activation_matrix(data=df_train).T,
cmap='mako',
robust=True)
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_activation_matrix.png',
bbox_inches='tight',
transparent=True)
# %% [markdown]
# ## Visualizing distribution of features
# %%
for column in df.iloc[:, 3:-1].columns:
hmap = som.get_weights()[:, :, df.iloc[:, 3:-1].columns.get_loc(column)].T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap, robust=True, cmap='BrBG')
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.close(fig=fig)
# %% [markdown]
# ## Visualizing distribution of audios by metadata (day, hour, ...)
# Each node is colorized according to its most frequent label
# %%
df['days'] = df.index.date
df['days'] = (df['days'] - df['days'][0])
df['days'] = df['days'].apply(lambda x: x.days)
df['hour'] = df.index.hour
# %%
# Visualizing 'rac' distribution
class_assignments = som.label_map(np.array(df_train), np.array(df['rac']))
hmap = np.zeros((som_x, som_y))
for i, j in sorted(class_assignments.keys()):
try:
hmap[i][j] = class_assignments[(i, j)].most_common()[0][0] + 1
except Exception:
continue
hmap = hmap.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap,
cmap=sns.color_palette(palette=["#000000", "blue", "orange"],
n_colors=3),
cbar_kws={'ticks': [0, 1, 2]})
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_rac.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
# Visualizing by 'grupo'
print(df.groupby('grupo')['rac'].count())
column = 'grupo'
class_assignments = som.label_map(np.array(df_train), np.array(df[column]))
hmap = np.zeros((som_x, som_y))
for i, j in sorted(class_assignments.keys()):
try:
hmap[i][j] = class_assignments[(i, j)].most_common()[0][0]
except Exception:
hmap[i][j] = 0
hmap = hmap.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap,
cmap=sns.color_palette(palette=["#000000", "blue", "orange"],
n_colors=3),
cbar_kws={'ticks': [0, 1, 2]})
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
# Visualizing by 'days'
print(df.groupby('days')['rac'].count())
column = 'days'
class_assignments = som.label_map(np.array(df_train), np.array(df[column]))
hmap = np.zeros((som_x, som_y))
for i, j in sorted(class_assignments.keys()):
try:
hmap[i][j] = class_assignments[(i, j)].most_common()[0][0]
except Exception:
hmap[i][j] = -1
hmap = hmap.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap, cmap='viridis')
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
# Visualizing by 'hour'
print(df.groupby('hour')['rac'].count())
column = 'hour'
class_assignments = som.label_map(np.array(df_train), np.array(df[column]))
hmap = np.zeros((som_x, som_y))
for i, j in sorted(class_assignments.keys()):
try:
hmap[i][j] = class_assignments[(i, j)].most_common()[0][0]
except Exception:
hmap[i][j] = -1
hmap = hmap.T
fig = plt.figure(figsize=(16, 9))
ax = sns.heatmap(hmap,
cmap=sns.diverging_palette(150,
250,
s=100,
l=20,
sep=1,
n=26,
center='light'),
center=12)
ax.invert_yaxis()
fig.savefig(f'./output_{FILE_PREFIX}/{FILE_PREFIX}_{column}.png',
bbox_inches='tight',
transparent=True)
plt.show()
# %%
| 30.682759
| 80
| 0.615532
| 1,250
| 8,898
| 4.2336
| 0.22
| 0.020786
| 0.030234
| 0.031746
| 0.552154
| 0.522676
| 0.494709
| 0.479592
| 0.444444
| 0.427438
| 0
| 0.02194
| 0.211171
| 8,898
| 289
| 81
| 30.788927
| 0.732013
| 0.13947
| 0
| 0.512438
| 0
| 0
| 0.142462
| 0.053374
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034826
| 0
| 0.034826
| 0.029851
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed06ae9dc5fa12b66e8b0650821700fcf43fb094
| 2,286
|
py
|
Python
|
bindings/pydeck/docs/scripts/embed_examples.py
|
marsupialmarcos/deck.gl
|
c9867c1db87e492253865353f68c985019c7c613
|
[
"MIT"
] | 2
|
2021-08-11T08:05:51.000Z
|
2021-08-11T08:05:54.000Z
|
bindings/pydeck/docs/scripts/embed_examples.py
|
marsupialmarcos/deck.gl
|
c9867c1db87e492253865353f68c985019c7c613
|
[
"MIT"
] | null | null | null |
bindings/pydeck/docs/scripts/embed_examples.py
|
marsupialmarcos/deck.gl
|
c9867c1db87e492253865353f68c985019c7c613
|
[
"MIT"
] | null | null | null |
"""Script to embed pydeck examples into .rst pages with code
These populate the files you see once you click into a grid cell
on the pydeck gallery page
"""
from multiprocessing import Pool
import os
import subprocess
import sys
from const import DECKGL_URL_BASE, EXAMPLE_GLOB, GALLERY_DIR, HTML_DIR, HOSTED_STATIC_PATH
from utils import to_presentation_name, to_snake_case_string
from templates import DOC_TEMPLATE
if not os.environ.get("MAPBOX_API_KEY"):
# If running for rtfd.io, set this variable from the Admin panel
raise Exception("MAPBOX_API_KEY not set")
def create_rst(pydeck_example_file_name):
asset_name = to_snake_case_string(file_name=pydeck_example_file_name)
deckgl_docs_layer_name = asset_name.replace("_", "-")
deckgl_doc_url = None
if "layer" in deckgl_docs_layer_name:
# Don't add a deck.gl docs link if we're not referencing a layer
# Obviously very rough, should change this eventually to handle views etc
deckgl_doc_url = DECKGL_URL_BASE + deckgl_docs_layer_name
# Create new .html examples
html_fname = os.path.basename(pydeck_example_file_name).replace(".py", ".html")
# Run the pydeck example and move the .html output
subprocess.call(
"{python} {fname}; mv {html_src} {html_dest}".format(
python=sys.executable, fname=pydeck_example_file_name, html_src=html_fname, html_dest=HTML_DIR
),
shell=True,
)
python_code = open(pydeck_example_file_name, "r").read()
doc_source = DOC_TEMPLATE.render(
page_title=to_presentation_name(asset_name),
snake_name=asset_name,
python_code=python_code,
hosted_html_path=os.path.join(HOSTED_STATIC_PATH, html_fname),
deckgl_doc_url=deckgl_doc_url,
)
rst_path = os.path.join(GALLERY_DIR, asset_name + ".rst")
f = open(rst_path, "w+")
print("* Converted %s to %s" % (pydeck_example_file_name, rst_path))
f.write(doc_source)
f.close()
def main():
pool = Pool(processes=4)
candidate_files = [f for f in EXAMPLE_GLOB]
if not candidate_files:
raise Exception("No files found to convert")
subprocess.call("mkdir -p %s" % HTML_DIR, shell=True)
pool.map(create_rst, candidate_files)
if __name__ == "__main__":
main()
| 35.169231
| 106
| 0.71916
| 343
| 2,286
| 4.48688
| 0.399417
| 0.059129
| 0.066277
| 0.081871
| 0.02729
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000542
| 0.192476
| 2,286
| 64
| 107
| 35.71875
| 0.833153
| 0.185477
| 0
| 0
| 0
| 0
| 0.089141
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.159091
| 0
| 0.204545
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed0721912431ef604f57495e6aa85dbb0102f18a
| 45,482
|
py
|
Python
|
symbolicR/python/forward_kin.py
|
mharding01/augmented-neuromuscular-RT-running
|
7e1ef00d3fdf9cfa9d59fc4f3a6a0e6dd792a834
|
[
"MIT"
] | null | null | null |
symbolicR/python/forward_kin.py
|
mharding01/augmented-neuromuscular-RT-running
|
7e1ef00d3fdf9cfa9d59fc4f3a6a0e6dd792a834
|
[
"MIT"
] | null | null | null |
symbolicR/python/forward_kin.py
|
mharding01/augmented-neuromuscular-RT-running
|
7e1ef00d3fdf9cfa9d59fc4f3a6a0e6dd792a834
|
[
"MIT"
] | null | null | null |
import numpy as np
import sympy as sp
import re
import os
######################
# #
# 17 16 21 #
# 18 15 22 #
# 19 14 23 #
# 20 01 24 #
# 02 08 #
# 03 09 #
# 04 10 #
# 05 11 #
# 06 12 #
# 07 13 #
# #
######################
#
# origin: in the waist, middle point between the two pitch hip rotations
# inertial frame: located at the origin (waist), but aligned with the ground (info from IMU)
#
# Di : position vector from the anchor point of the previous body to the current body i
# (previous body is not always body i-1), expressed in the relative
# frame of the previous body
# DGi : position vector from the anchor point of body i to its COM (center of mass) G_i,
# expressed in the relative frame of the current body i
# Omi : rotational vector from the previous body to the current body i
# (previous body is not always body i-1), expressed in the relative
# frame of the previous body
# Rdi : rotational matrix between body i and its predecessor
# si : sine of the relative angle before body i
# ci : cosine of the relative angle before body i
#
# xi : absolute position vector (from origin, expressed in the inertial frame)
# of the anchor point of body i
# xgi : absolute position vector of the COM G_i of body i
# xpi : derivative of xi
# xgpi : derivative of xgi
# omi : absolute rotational vector of body i
# Ri : absolute rotational matrix
# Rti : transpose matrix of Ri
# xji : jacobian of 'xi'
# xgji : jacobian of 'xgi'
# Rji : jacobian of 'Ri'
# return true if it is a float
def isInt(value):
try:
int(value)
return True
except:
return False
# return true if it has a shape 'R%a_%b%c' (indexes %a, %b, %c also returned)
def isRot(value):
try:
a = int(value.split('_')[0].split('R')[1])
b = int(value.split('_')[1][0])
c = int(value.split('_')[1][1])
return True, a, b, c
except:
return False, -1, -1, -1
# return true if it has a shape 'x%a_%b' (indexes %a, %b also returned)
def isVec(value):
try:
a = int(value.split('_')[0].split('x')[1])
b = int(value.split('_')[1])
return True, a, b
except:
return False, -1, -1
# count the number of 'elem' in the file
def count_elem(in_file, elem):
count = 0;
with open(in_file, 'r') as f:
# loop on all the lines
for line in f:
cut_line = line.split(elem)
if len(cut_line) == 2:
count += 1
return count
# print the declaration of an element
def print_declaration_elem(in_file, out_write, elem, nb_max_line):
if count_elem(in_file, '{}'.format(elem)) >= 1:
count = 0
with open(in_file,'r') as f:
# loop on all the lines
for line in f:
cut_line_1 = line.split(elem)
cut_line_2 = line.split(' = ')
if len(cut_line_1) == 2 and len(cut_line_2) == 2:
if len(cut_line_2[0].split('[')) == 1:
if count == 0:
out_write.write(' double {}'.format(cut_line_2[0].strip()))
else:
out_write.write(', {}'.format(cut_line_2[0].strip()))
count += 1
if count >= nb_max_line:
out_write.write(';\n')
count = 0
if count != 0:
out_write.write(';\n')
# print all declarations
def print_all_declaration(in_file, out_write, nb_max_char):
count = 0
with open(in_file,'r') as f:
# loop on all the lines
for line in f:
cut_line = line.split(' = ')
if len(cut_line) == 2:
if len(cut_line[0].split('[')) == 1:
if count == 0:
out_write.write(' double {}'.format(cut_line[0].strip()))
else:
out_write.write(', {}'.format(cut_line[0].strip()))
count += len(cut_line[0].strip()) + 2
if count >= nb_max_char:
out_write.write(';\n')
count = 0
if count != 0:
out_write.write(';\n')
# get tilde matrix
def get_tilde(v):
return np.array([[0.0, -v[2], v[1]], [v[2], 0.0, -v[0]], [-v[1], v[0], 0.0]])
# get rotation matrix
def get_rotation_matrix(axis, direct, cosine, sine):
if direct:
if axis == 1:
return np.array([[1.0, 0.0, 0.0], [0.0, cosine, sine], [0.0, -sine, cosine]])
elif axis == 2:
return np.array([[cosine, 0.0, -sine], [0.0, 1.0, 0.0], [sine, 0.0, cosine]])
elif axis == 3:
return np.array([[cosine, sine, 0.0], [-sine, cosine, 0.0], [0.0, 0.0, 1.0]])
else:
return np.array([])
else:
if axis == 1:
return np.array([[1.0, 0.0, 0.0], [0.0, cosine, -sine], [0.0, sine, cosine]])
elif axis == 2:
return np.array([[cosine, 0.0, sine], [0.0, 1.0, 0.0], [-sine, 0.0, cosine]])
elif axis == 3:
return np.array([[cosine, -sine, 0.0], [sine, cosine, 0.0], [0.0, 0.0, 1.0]])
else:
return np.array([])
# get vector axis
def get_vector_axis(axis, direct, elem):
if direct:
if axis == 1:
return np.array([[elem], [0.0], [0.0]])
elif axis == 2:
return np.array([[0.0], [elem], [0.0]])
elif axis == 3:
return np.array([[0.0], [0.0], [elem]])
else:
return np.array([])
else:
if axis == 1:
return np.array([[-elem], [0.0], [0.0]])
elif axis == 2:
return np.array([[0.0], [-elem], [0.0]])
elif axis == 3:
return np.array([[0.0], [0.0], [-elem]])
else:
return np.array([])
# compute the derivative of an element (for jacobian)
def der_elem(elem_str, Rj, xj, xgj, der_var):
# element to derive (string)
elem_str = elem_str.replace('- ','-').strip()
# derivative axis
der_q = int(der_var.replace('q',''))
# detect positive/negative
elem_split = elem_str.split('-')
cur_len = len(elem_split)
if cur_len == 1: # positive
neg_flag = 0
pos_str = elem_split[0]
elif cur_len == 2: # negative
neg_flag = 1
pos_str = elem_split[1]
else:
print('Error: {} instead of 1 or 2 in negative detection !'.format(cur_len))
exit()
# compute derivative
result = 0
# cosine
if pos_str == 'c{}'.format(der_q):
result += -sp.Symbol('s{}'.format(der_q))
# sine
elif pos_str == 's{}'.format(der_q):
result += sp.Symbol('c{}'.format(der_q))
# other
else:
[rot_flag, a, b, c] = isRot(pos_str)
[vec_flag, d, e] = isVec(pos_str)
# rotation matrix
if rot_flag:
result += Rj[a-1][der_q-1][(b-1)*3+(c-1)]
# vector
elif vec_flag:
result += xj[d-1][der_q-1][e-1]
# apply negative
if neg_flag:
result = -result
return result
# compute the derivative of an expression (for jacobian)
def symbolic_jacob_der(Rj, xj, xgj, symb_var, der_var):
# list of all terms
term_list = str(symb_var).replace('- ','-').replace('-','+-').split('+')
if term_list[0] == '':
term_list.pop(0)
result = 0
# loop on all terms
for cur_term in term_list:
# detect products
cur_term_split = cur_term.split('*')
cur_len = len(cur_term_split)
# no product
if cur_len == 1:
result += der_elem(cur_term_split[0], Rj, xj, xgj, der_var)
# one product
elif cur_len == 2:
result += der_elem(cur_term_split[0], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[1].strip())
result += der_elem(cur_term_split[1], Rj, xj, xgj, der_var)*sp.Symbol(cur_term_split[0].strip())
# other
else:
print('Error: {} * counted , only implemented for 0 or 1 !'.format(cur_len-1))
exit()
return result
# write the beginning of the file
def write_file_beginning(out_file, joint_id_names):
out_file.write('/*! \n')
out_file.write(' * \\author Nicolas Van der Noot\n')
out_file.write(' * \\file forward_kinematics.cc\n')
out_file.write(' * \\brief forward kinematics computation for the COMAN model\n')
out_file.write(' */\n\n')
out_file.write('// joints enumeration\n')
out_file.write('enum {')
count = 0
for i in range(1, len(joint_id_names)):
count += 1
if i == 1:
out_file.write('{}'.format(get_string_enum(joint_id_names[i])))
elif count >= 6:
count = 0
out_file.write(',\n {}'.format(get_string_enum(joint_id_names[i])))
else:
out_file.write(', {}'.format(get_string_enum(joint_id_names[i])))
out_file.write('};\n\n')
out_file.write('/*! \\brief main kinematics computation\n')
out_file.write(' *\n')
out_file.write(' * \\param[in,out] in_out inputs and outputs class\n')
out_file.write(' *\n')
out_file.write(' * computation of:\n')
out_file.write(' * COM (center of mass) position and velocity\n')
out_file.write(' * feet position, velocity and orientation\n')
out_file.write(' * waist and torso orientaion angles and derivatives\n')
out_file.write(' *\n')
out_file.write(' * ////////////////////////\n')
out_file.write(' * // //\n')
out_file.write(' * // 17 16 21 //\n')
out_file.write(' * // 18 15 22 //\n')
out_file.write(' * // 19 14 23 //\n')
out_file.write(' * // 20 01 24 //\n')
out_file.write(' * // 02 08 //\n')
out_file.write(' * // 03 09 //\n')
out_file.write(' * // 04 10 //\n')
out_file.write(' * // 05 11 //\n')
out_file.write(' * // 06 12 //\n')
out_file.write(' * // 07 13 //\n')
out_file.write(' * // //\n')
out_file.write(' * ////////////////////////\n')
out_file.write(' *\n')
out_file.write(' * origin: in the waist, middle point between the two pitch hip rotations\n')
out_file.write(' * inertial frame: located at the origin (waist), but aligned with the ground (info from IMU)\n')
out_file.write(' *\n')
out_file.write(' * Di : position vector from the anchor point of the previous body to the current body i \n')
out_file.write(' * (previous body is not always body i-1), expressed in the relative\n')
out_file.write(' * frame of the previous body\n')
out_file.write(' * DGi : position vector from the anchor point of body i to its COM (center of mass) G_i,\n')
out_file.write(' * expressed in the relative frame of the current body i\n')
out_file.write(' * Omi : rotational vector from the previous body to the current body i \n')
out_file.write(' * (previous body is not always body i-1), expressed in the relative\n')
out_file.write(' * frame of the previous body\n')
out_file.write(' * Rdi : rotational matrix between body i and its predecessor\n')
out_file.write(' * si : sine of the relative angle before body i\n')
out_file.write(' * ci : cosine of the relative angle before body i\n')
out_file.write(' *\n')
out_file.write(' * xi : absolute position vector (from origin, expressed in the inertial frame)\n')
out_file.write(' * of the anchor point of body i\n')
out_file.write(' * xgi : absolute position vector of the COM G_i of body i\n')
out_file.write(' * xpi : derivative of xi\n')
out_file.write(' * xgpi : derivative of xgi\n')
out_file.write(' * omi : absolute rotational vector of body i\n')
out_file.write(' * Ri : absolute rotational matrix\n')
out_file.write(' * Rti : transpose matrix of Ri\n')
out_file.write(' * xji : jacobian of \'xi\'\n')
out_file.write(' * xgji : jacobian of \'xgi\'\n')
out_file.write(' * Rji : jacobian of \'Ri\'\n')
out_file.write(' */\n')
out_file.write('void ForwardKinematics::main_kinematics(KinematicsInOut &in_out)\n{\n')
# compute the center of mass position and velocity
def com_compute(out_file, nb_bodies, joint_id_names, M, xg, xgp, xgj):
out_file.write(' m_tot = ')
for i in range(0, nb_bodies):
out_file.write('{}'.format(M[i]))
if i == nb_bodies-1:
out_file.write(';\n\n')
else:
out_file.write(' + ')
out_file.write(' // global com absolute position\n')
for i in range(0, 3):
out_file.write(' in_out.r_COM[{}] = '.format(i))
flag_first = 0
for j in range(0, nb_bodies):
if flag_first:
out_file.write(' + {}*{}'.format(M[j], xg[j][i]))
else:
flag_first = 1
out_file.write('({}*xg{}_{}'.format(M[j], j+1, i+1))
if j == nb_bodies-1:
if flag_first:
out_file.write(')/m_tot;\n')
else:
out_file.write('0.0;\n')
out_file.write('\n')
out_file.write(' // global com absolute velocity\n')
for i in range(0, 3):
out_file.write(' in_out.rp_COM[{}] = '.format(i))
flag_first = 0
for j in range(0, nb_bodies):
if flag_first:
out_file.write(' + {}*xgp{}_{}'.format(M[j], j+1, i+1))
else:
flag_first = 1
out_file.write('({}*xgp{}_{}'.format(M[j], j+1, i+1))
if j == nb_bodies-1:
if flag_first:
out_file.write(')/m_tot;\n')
else:
out_file.write('0.0;\n')
out_file.write('\n')
out_file.write(' // global com jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
for i in range(1, nb_bodies):
for j in range(0, 3):
out_file.write(' in_out.r_COM_der[{}][{}] = '.format(get_string_enum(joint_id_names[i]), j))
flag_first = 0
for k in range(0, nb_bodies):
if xgj[k][i][j] != 0:
if flag_first:
out_file.write(' + {}*{}'.format(M[k], str(xgj[k][i][j])))
else:
flag_first = 1
out_file.write('({}*{}'.format(M[k], str(xgj[k][i][j])))
if k == nb_bodies-1:
if flag_first:
out_file.write(')/m_tot;\n')
else:
out_file.write('0.0;\n')
if i != nb_bodies-1:
out_file.write('\n')
else:
out_file.write(' }\n\n')
# from an orientation matrix, compute the roll, pitch, yaw angles (and derivative)
def yaw_pitch_roll_angles(out_file, angle_name, R_matrix, epsilon):
if epsilon > 0: # epsilon = 1 -> pitch angle in [-pi/2 ; pi/2]
out_file.write(' in_out.{}[0] = atan2({}, {});\n'.format(angle_name, R_matrix[5], R_matrix[8]))
out_file.write(' in_out.{}[1] = atan2(-{}, sqrt({}*{} + {}*{}));\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1]))
out_file.write(' in_out.{}[2] = atan2({}, {});\n'.format(angle_name, R_matrix[1], R_matrix[0]))
else: # epsilon = -1 -> pitch angle in [pi/2 ; 3*pi/2]
out_file.write(' in_out.{}[0] = atan2(-{}, -{});\n'.format(angle_name, R_matrix[5], R_matrix[8]))
out_file.write(' in_out.{}[1] = atan2(-{}, -sqrt({}*{} + {}*{}));\n'.format(angle_name, R_matrix[2], R_matrix[0], R_matrix[0], R_matrix[1], R_matrix[1]))
out_file.write(' in_out.{}[2] = atan2(-{}, -{});\n'.format(angle_name, R_matrix[1], R_matrix[0]))
# compute the time derivatives of 'yaw_pitch_roll_angles'
def theta_dot_compute(out_file, omega_in, omega_out, body_part):
out_file.write(' in_out.{}[0] = inv_c_y_{} * (c_z_{}*{} + s_z_{}*{});\n'.format(omega_out, body_part, body_part, omega_in[0], body_part, omega_in[1]))
out_file.write(' in_out.{}[1] = c_z_{}*{} - s_z_{}*{};\n'.format(omega_out, body_part, omega_in[1], body_part, omega_in[0]))
out_file.write(' in_out.{}[2] = inv_c_y_{} * s_y_{} * (s_z_{}*{} + c_z_{}*{}) + {};\n'.format(omega_out, body_part, body_part, body_part, omega_in[1], body_part, omega_in[0], omega_in[2]))
# angles (position and derivative) of the waist and the torso
def torso_waist_angles(out_file, R, om, waist_id, torso_id):
out_file.write(' // waist orientation matrix as angles [rad]\n')
yaw_pitch_roll_angles(out_file, 'theta_waist', R[waist_id], 1)
out_file.write('\n')
out_file.write(' // torso orientation matrix as angles [rad]\n')
yaw_pitch_roll_angles(out_file, 'theta_torso', R[torso_id], 1)
out_file.write('\n')
out_file.write(' c_y_waist = cos(in_out.theta_waist[1]);\n')
out_file.write(' c_y_torso = cos(in_out.theta_torso[1]);\n')
out_file.write(' c_z_waist = cos(in_out.theta_waist[2]);\n')
out_file.write(' c_z_torso = cos(in_out.theta_torso[2]);\n\n')
out_file.write(' s_y_waist = sin(in_out.theta_waist[1]);\n')
out_file.write(' s_y_torso = sin(in_out.theta_torso[1]);\n')
out_file.write(' s_z_waist = sin(in_out.theta_waist[2]);\n')
out_file.write(' s_z_torso = sin(in_out.theta_torso[2]);\n\n')
out_file.write(' if ((!c_y_waist) || (!c_y_torso))\n {\n')
out_file.write(' return;\n }\n\n')
out_file.write(' inv_c_y_waist = 1.0 / c_y_waist;\n')
out_file.write(' inv_c_y_torso = 1.0 / c_y_torso;\n\n')
out_file.write(' // waist orientation angle derivatives [rad/s]\n')
theta_dot_compute(out_file, om[waist_id], 'omega_waist', 'waist')
out_file.write('\n')
out_file.write(' // torso orientation angle derivatives [rad/s]\n')
theta_dot_compute(out_file, om[torso_id], 'omega_torso', 'torso')
# compute the feet position, velocity and orientation
def feet_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj, r_foot_id, l_foot_id, x_min, x_max, y_min, y_max):
# symbolic variables declarations
nb_contacts = 4
x_r_foot = x[r_foot_id]
x_l_foot = x[l_foot_id]
xp_r_foot = xp[r_foot_id]
xp_l_foot = xp[l_foot_id]
om_r_foot = om[r_foot_id]
om_l_foot = om[l_foot_id]
R_r_foot = R[r_foot_id]
R_l_foot = R[l_foot_id]
Dpt_r_foot = sp.zeros(3, 1)
Dpt_l_foot = sp.zeros(3, 1)
Dpt_r_foot[2] = sp.Symbol('DPT_3_16')
Dpt_l_foot[2] = sp.Symbol('DPT_3_29')
Dpt_r_foot_cont = nb_contacts * [None]
Dpt_l_foot_cont = nb_contacts * [None]
for i in range(0, nb_contacts):
Dpt_r_foot_cont[i] = sp.zeros(3, 1)
Dpt_l_foot_cont[i] = sp.zeros(3, 1)
Dpt_r_foot_cont[0][0] = x_min
Dpt_r_foot_cont[1][0] = x_min
Dpt_r_foot_cont[2][0] = x_max
Dpt_r_foot_cont[3][0] = x_max
Dpt_r_foot_cont[0][1] = y_min
Dpt_r_foot_cont[1][1] = y_max
Dpt_r_foot_cont[2][1] = y_min
Dpt_r_foot_cont[3][1] = y_max
for i in range(0, nb_contacts):
Dpt_r_foot_cont[i][2] = sp.Symbol('DPT_3_16')
for i in range(0, nb_contacts):
for j in range(0, 3):
Dpt_l_foot_cont[i][j] = Dpt_r_foot_cont[i][j]
x_r_cont = nb_contacts * [None]
x_l_cont = nb_contacts * [None]
# computation
om_tilde_r_foot = get_tilde(om_r_foot)
om_tilde_l_foot = get_tilde(om_l_foot)
x_r = x_r_foot + R_r_foot.T * Dpt_r_foot
x_l = x_l_foot + R_l_foot.T * Dpt_l_foot
xp_r = xp_r_foot + om_tilde_r_foot * (R_r_foot.T * Dpt_r_foot)
xp_l = xp_l_foot + om_tilde_l_foot * (R_l_foot.T * Dpt_l_foot)
for i in range(0, nb_contacts):
x_r_cont[i] = x_r_foot + R_r_foot.T * Dpt_r_foot_cont[i]
x_l_cont[i] = x_l_foot + R_l_foot.T * Dpt_l_foot_cont[i]
# writing outputs
out_file.write(' // right foot absolute position\n')
for i in range(0,3):
out_file.write(' in_out.r_Rfoot[{}] = {};\n'.format(i, x_r[i]))
out_file.write('\n')
out_file.write(' // right foot absolute velocity\n')
for i in range(0,3):
out_file.write(' in_out.rp_Rfoot[{}] = {};\n'.format(i, xp_r[i]))
out_file.write('\n')
out_file.write(' // right foot jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range (1, nb_bodies):
flag_print = 0
for j in range(0, 3):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r[j], 'q{}'.format(i+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.r_Rfoot_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // left foot absolute position\n')
for i in range(0,3):
out_file.write(' in_out.r_Lfoot[{}] = {};\n'.format(i, x_l[i]))
out_file.write('\n')
out_file.write(' // left foot absolute velocity\n')
for i in range(0,3):
out_file.write(' in_out.rp_Lfoot[{}] = {};\n'.format(i, xp_l[i]))
out_file.write('\n')
out_file.write(' // left foot jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range (1, nb_bodies):
flag_print = 0
for j in range(0, 3):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l[j], 'q{}'.format(i+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.r_Lfoot_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // right foot contact points absolute position\n')
for i in range(0, nb_contacts):
for j in range(0, 3):
out_file.write(' in_out.r_Rfoot_cont[{}][{}] = {};\n'.format(i, j, x_r_cont[i][j]))
out_file.write('\n')
out_file.write(' // right foot contact points jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range(0, nb_contacts):
for j in range (1, nb_bodies):
flag_print = 0
for k in range(0, 3):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r_cont[i][k], 'q{}'.format(j+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.r_Rfoot_cont_der[{}][{}][{}] = {};\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // left foot contact points absolute position\n')
for i in range(0, nb_contacts):
for j in range(0, 3):
out_file.write(' in_out.r_Lfoot_cont[{}][{}] = {};\n'.format(i, j, x_l_cont[i][j]))
out_file.write('\n')
out_file.write(' // left foot contact points jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range(0, nb_contacts):
for j in range (1, nb_bodies):
flag_print = 0
for k in range(0, 3):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l_cont[i][k], 'q{}'.format(j+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.r_Lfoot_cont_der[{}][{}][{}] = {};\n'.format(i, get_string_enum(joint_id_names[j]), k, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // feet absolute orientation\n')
for i in range(0, 9):
out_file.write(' in_out.Rfoot_or[{}] = {};\n'.format(i, R_r_foot[i]))
out_file.write('\n')
for i in range(0, 9):
out_file.write(' in_out.Lfoot_or[{}] = {};\n'.format(i, R_l_foot[i]))
out_file.write('\n')
out_file.write(' // right foot absolute orientation jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range (1, nb_bodies):
flag_print = 0
for j in range(0,9):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_r_foot[j], 'q{}'.format(i+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.Rfoot_or_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // left foot absolute orientation jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range (1, nb_bodies):
flag_print = 0
for j in range(0,9):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_l_foot[j], 'q{}'.format(i+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.Lfoot_or_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // right foot orientation matrix as angles [rad]\n')
yaw_pitch_roll_angles(out_file, 'theta_Rfoot', R[r_foot_id], 1)
out_file.write('\n')
out_file.write(' // left foot orientation matrix as angles [rad]\n')
yaw_pitch_roll_angles(out_file, 'theta_Lfoot', R[l_foot_id], 1)
out_file.write('\n')
out_file.write(' c_y_Rfoot = cos(in_out.theta_Rfoot[1]);\n')
out_file.write(' c_y_Lfoot = cos(in_out.theta_Lfoot[1]);\n')
out_file.write(' c_z_Rfoot = cos(in_out.theta_Rfoot[2]);\n')
out_file.write(' c_z_Lfoot = cos(in_out.theta_Lfoot[2]);\n\n')
out_file.write(' s_y_Rfoot = sin(in_out.theta_Rfoot[1]);\n')
out_file.write(' s_y_Lfoot = sin(in_out.theta_Lfoot[1]);\n')
out_file.write(' s_z_Rfoot = sin(in_out.theta_Rfoot[2]);\n')
out_file.write(' s_z_Lfoot = sin(in_out.theta_Lfoot[2]);\n\n')
out_file.write(' if ((!c_y_Rfoot) || (!c_y_Lfoot))\n {\n')
out_file.write(' return;\n }\n\n')
out_file.write(' inv_c_y_Rfoot = 1.0 / c_y_Rfoot;\n')
out_file.write(' inv_c_y_Lfoot = 1.0 / c_y_Lfoot;\n\n')
out_file.write(' // right foot orientation angle derivatives [rad/s]\n')
theta_dot_compute(out_file, om[r_foot_id], 'omega_Rfoot', 'Rfoot')
out_file.write('\n')
out_file.write(' // left foot orientation angle derivatives [rad/s]\n')
theta_dot_compute(out_file, om[l_foot_id], 'omega_Lfoot', 'Lfoot')
out_file.write('\n')
# compute the wrists position, velocity and orientation
def wrists_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj, r_elb_id, l_elb_id, r_wrist_x, r_wrist_y, r_wrist_z):
# symbolic variables declarations
x_r_elb = x[r_elb_id]
x_l_elb = x[l_elb_id]
xp_r_elb = xp[r_elb_id]
xp_l_elb = xp[l_elb_id]
om_r_elb = om[r_elb_id]
om_l_elb = om[l_elb_id]
R_r_elb = R[r_elb_id]
R_l_elb = R[l_elb_id]
Dpt_r_wrist = sp.zeros(3, 1)
Dpt_l_wrist = sp.zeros(3, 1)
Dpt_r_wrist[0] = r_wrist_x
Dpt_r_wrist[1] = r_wrist_y
Dpt_r_wrist[2] = r_wrist_z
Dpt_l_wrist[0] = r_wrist_x
Dpt_l_wrist[1] = -r_wrist_y
Dpt_l_wrist[2] = r_wrist_z
# computation
om_tilde_r_elb = get_tilde(om_r_elb)
om_tilde_l_elb = get_tilde(om_l_elb)
x_r = x_r_elb + R_r_elb.T * Dpt_r_wrist
x_l = x_l_elb + R_l_elb.T * Dpt_l_wrist
xp_r = xp_r_elb + om_tilde_r_elb * (R_r_elb.T * Dpt_r_wrist)
xp_l = xp_l_elb + om_tilde_l_elb * (R_l_elb.T * Dpt_l_wrist)
# writing outputs
out_file.write(' // right wrist absolute position\n')
for i in range(0,3):
out_file.write(' in_out.r_Rwrist[{}] = {};\n'.format(i, x_r[i]))
out_file.write('\n')
out_file.write(' // right wrist absolute velocity\n')
for i in range(0,3):
out_file.write(' in_out.rp_Rwrist[{}] = {};\n'.format(i, xp_r[i]))
out_file.write('\n')
out_file.write(' // right wrist jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range (1, nb_bodies):
flag_print = 0
for j in range(0, 3):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_r[j], 'q{}'.format(i+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.r_Rwrist_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // left wrist absolute position\n')
for i in range(0,3):
out_file.write(' in_out.r_Lwrist[{}] = {};\n'.format(i, x_l[i]))
out_file.write('\n')
out_file.write(' // left wrist absolute velocity\n')
for i in range(0,3):
out_file.write(' in_out.rp_Lwrist[{}] = {};\n'.format(i, xp_l[i]))
out_file.write('\n')
out_file.write(' // left wrist jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range (1, nb_bodies):
flag_print = 0
for j in range(0, 3):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, x_l[j], 'q{}'.format(i+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.r_Lwrist_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // wrists absolute orientation\n')
for i in range(0, 9):
out_file.write(' in_out.Rwrist_or[{}] = {};\n'.format(i, R_r_elb[i]))
out_file.write('\n')
for i in range(0, 9):
out_file.write(' in_out.Lwrist_or[{}] = {};\n'.format(i, R_l_elb[i]))
out_file.write('\n')
out_file.write(' // right wrist absolute orientation jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range (1, nb_bodies):
flag_print = 0
for j in range(0,9):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_r_elb[j], 'q{}'.format(i+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.Rwrist_or_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))
out_file.write(' }\n\n')
out_file.write(' // left wrist absolute orientation jacobian\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range (1, nb_bodies):
flag_print = 0
for j in range(0,9):
cur_jac = symbolic_jacob_der(Rj, xj, xgj, R_l_elb[j], 'q{}'.format(i+1))
if cur_jac != 0:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write(' in_out.Lwrist_or_der[{}][{}] = {};\n'.format(get_string_enum(joint_id_names[i]), j, cur_jac))
out_file.write(' }\n\n')
# get a string for the enumeration of joints
def get_string_enum(cur_string):
cur_split = cur_string.split('_')
if len(cur_split) >= 2:
new_string = cur_split[0]
for i in range(1, len(cur_split)-1):
new_string = '{}{}'.format(new_string, cur_split[i])
else:
new_string = cur_string
cur_split = filter(None, re.split("([A-Z][^A-Z]*)", new_string))
new_string = cur_split[0].upper()
for i in range(1, len(cur_split)):
new_string = '{}_{}'.format(new_string, cur_split[i].upper())
return new_string
# write the end of the file
def write_file_end(out_file):
out_file.write('}\n')
# print matrix components declaration
def write_matrix_declaration(out_file, prefix):
out_file.write(' double ')
for i in range(0,3):
for j in range(0,3):
out_file.write('{}{}{}'.format(prefix, i+1, j+1))
if i == 2 and j == 2:
out_file.write(';\n')
else:
out_file.write(', ')
# print variables declaration
def write_variables_declaration(out_file, prefix, min, max):
out_file.write(' double ')
for i in range(min, max+1):
out_file.write('{}{}'.format(prefix, i))
if i == max:
out_file.write(';\n')
else:
out_file.write(', ')
# variables initialization
def write_intialization(out_file, nb_bodies, joint_id_names):
out_file.write(' // -- variables initialization -- //\n')
out_file.write('\n // IMU - rotation matrices\n')
for i in range(0, 3):
for j in range(0, 3):
out_file.write(' IMU{}{} = in_out.IMU_Orientation[{}];\n'.format(i+1, j+1, 3*i+j))
out_file.write('\n // IMU - angles velocity\n')
for i in range(0, 3):
out_file.write(' omega_{} = in_out.IMU_Angular_Rate[{}];\n'.format(i+1, i))
out_file.write('\n // joint cosines\n')
for i in range(1, nb_bodies):
out_file.write(' c{} = cos(in_out.q_mot[{}]);\n'.format(i+1, joint_id_names[i]))
out_file.write('\n // joint sines\n')
for i in range(1, nb_bodies):
out_file.write(' s{} = sin(in_out.q_mot[{}]);\n'.format(i+1, joint_id_names[i]))
out_file.write('\n // joint relative velocities\n')
for i in range(1, nb_bodies):
out_file.write(' Om{} = in_out.qd_mot[{}];\n'.format(i+1, joint_id_names[i]))
# write symbolic vector and replace symbolic variable by its name
def write_symb_vector(out_file, vector, start_name, end_name):
new_vector = sp.zeros(3, 1)
flag_print = 0
for i in range(0,3):
if vector[i] == 0 or vector[i] == 1:
new_vector[i] = vector[i]
else:
flag_print = 1
elem_name = '{}{}{}'.format(start_name, i+1, end_name)
out_file.write(' {} = {};\n'.format(elem_name, vector[i]).replace('1.0*',''))
new_vector[i] = sp.Symbol(elem_name)
if flag_print:
out_file.write('\n')
return new_vector
# write symbolic matrix and replace symbolic variable by its name
def write_symb_matrix(out_file, matrix, start_name, end_name):
new_matrix = sp.zeros(3, 3)
flag_print = 0
for i in range(0,3):
for j in range(0,3):
if matrix[i,j] == 0 or matrix[i,j] == 1:
new_matrix[i,j] = matrix[i,j]
else:
flag_print = 1
elem_name = '{}{}{}{}'.format(start_name, i+1, j+1, end_name)
out_file.write(' {} = {};\n'.format(elem_name, matrix[i,j]).replace('1.0*',''))
new_matrix[i,j] = sp.Symbol(elem_name)
if flag_print:
out_file.write('\n')
return new_matrix
# save the symbolic vector for print
def print_save_symb_vector(vector, start_name, end_name):
new_vector = sp.zeros(3, 1)
save_vector = 3 * [None]
for i in range(0,3):
if vector[i] == 0 or vector[i] == 1:
new_vector[i] = vector[i]
save_vector[i] = None
else:
elem_name = '{}{}{}'.format(start_name, i+1, end_name)
save_vector[i] = ' {} = {};\n'.format(elem_name, vector[i]).replace('1.0*','')
new_vector[i] = sp.Symbol(elem_name)
return new_vector, save_vector
# save the symbolic matrix for print
def print_save_symb_matrix(matrix, start_name, end_name):
new_matrix = sp.zeros(3, 3)
save_matrix = 9 * [None]
for i in range(0,3):
for j in range(0,3):
if matrix[i,j] == 0 or matrix[i,j] == 1:
new_matrix[i,j] = matrix[i,j]
save_matrix[3*i+j] = None
else:
elem_name = '{}{}{}{}'.format(start_name, i+1, j+1, end_name)
save_matrix[3*i+j] = ' {} = {};\n'.format(elem_name, matrix[i,j]).replace('1.0*','')
new_matrix[i,j] = sp.Symbol(elem_name)
return new_matrix, save_matrix
# write symbolic jacobian of a rotation matrix
def write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R_matrix, index):
# loop on all the joints
for i in range (1, nb_bodies):
new_matrix = sp.zeros(3, 3)
# loop on all the matrix elements
for j in range(0, 9):
new_matrix[j] = symbolic_jacob_der(Rj, xj, xgj, R_matrix[j], 'q{}'.format(i+1))
[Rj[index-1][i], Rj_print[index-1][i]] = print_save_symb_matrix(new_matrix, 'R{}_'.format(index), '_d{}'.format(i+1))
# write symbolic jacobian of an anchor point
def write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x_vector, index):
# loop on all the joints
for i in range (1, nb_bodies):
new_vector = sp.zeros(3, 1)
# loop on all the vector elements
for j in range(0, 3):
new_vector[j] = symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1))
[xj[index-1][i], xj_print[index-1][i]] = print_save_symb_vector(new_vector, 'x{}_'.format(index), '_d{}'.format(i+1))
# write symbolic jacobian of a com point
def write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print, x_vector, index):
# loop on all the joints
for i in range (1, nb_bodies):
new_vector = sp.zeros(3, 1)
# loop on all the vector elements
for j in range(0, 3):
new_vector[j] = symbolic_jacob_der(Rj, xj, xgj, x_vector[j], 'q{}'.format(i+1))
[xgj[index-1][i], xgj_print[index-1][i]] = print_save_symb_vector(new_vector, 'xg{}_'.format(index), '_d{}'.format(i+1))
# symbolic computation
def symbolic_computation(out_file, nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt, Dg, M):
out_file.write('\n\n // -- symbolic computation -- //\n')
# Rj, xj, xgj and xgj (jacobian)
Rj = nb_bodies*[None]
xj = nb_bodies*[None]
xgj = nb_bodies*[None]
Rj_print = nb_bodies*[None]
xj_print = nb_bodies*[None]
xgj_print = nb_bodies*[None]
for i in range(0, nb_bodies):
Rj[i] = nb_bodies*[None]
xj[i] = nb_bodies*[None]
xgj[i] = nb_bodies*[None]
Rj_print[i] = nb_bodies*[None]
xj_print[i] = nb_bodies*[None]
xgj_print[i] = nb_bodies*[None]
for j in range(0, nb_bodies-1):
Rj[i][j] = sp.zeros(3, 3)
xj[i][j] = sp.zeros(3, 1)
xgj[i][j] = sp.zeros(3, 1)
Rj_print[i][j] = 9 * [None]
xj_print[i][j] = 3 * [None]
xgj_print[i][j] = 3 * [None]
# rotation matrices
out_file.write('\n // rotation matrices\n')
R = nb_bodies*[None]
Rt = nb_bodies*[None]
Rd = nb_bodies*[None]
Rd[0] = sp.zeros(3, 3)
R[0] = sp.zeros(3, 3)
for i in range(0, 3):
for j in range(0, 3):
R[0][i,j] = sp.Symbol('IMU{}{}'.format(i+1, j+1))
write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R[0], 1)
R[0] = write_symb_matrix(out_file, R[0], 'R1_', '')
Rt[0] = R[0].T
for i in range(1, nb_bodies):
Rd[i] = get_rotation_matrix(rot_axis[i], 1, sp.Symbol('c{}'.format(i+1)), sp.Symbol('s{}'.format(i+1)))
R[i] = Rd[i] * R[parent_body_index[i]]
write_symb_Rj(nb_bodies, Rj, xj, xgj, Rj_print, R[i], i+1)
R[i] = write_symb_matrix(out_file, R[i], 'R{}_'.format(i+1), '')
Rt[i] = R[i].T
# jacobian rotation matrices
out_file.write('\n // jacobian rotation matrices\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range(0, nb_bodies):
for j in range(1, nb_bodies):
flag_print = 0
for k in range(0, 9):
if Rj_print[i][j][k] != None:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write('{}'.format(Rj_print[i][j][k]))
out_file.write(' }\n')
# omega
out_file.write('\n // joint absolute velocities\n')
Om = nb_bodies*[None]
om = nb_bodies*[None]
om_tilde = nb_bodies*[None]
Om[0] = sp.zeros(3, 1)
om[0] = sp.zeros(3, 1)
for i in range(0,3):
om[0][i] = sp.Symbol('omega_{}'.format(i+1))
om[0] = write_symb_vector(out_file, om[0], 'om1_', '')
om_tilde[0] = get_tilde(om[0])
for i in range(1, nb_bodies):
parent_id = parent_body_index[i]
Om[i] = get_vector_axis(rot_axis[i], 1, sp.Symbol('Om{}'.format(i+1)))
om[i] = om[parent_id] + Rt[parent_id] * Om[i]
om[i] = write_symb_vector(out_file, om[i], 'om{}_'.format(i+1), '')
om_tilde[i] = get_tilde(om[i])
# x & xp
out_file.write('\n // anchor point absolute positions and velocities\n')
x = nb_bodies*[None]
xp = nb_bodies*[None]
x[0] = Rt[0] * Dpt[0]
xp[0] = om_tilde[0] * (Rt[0] * Dpt[0])
write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x[0], 1)
x[0] = write_symb_vector(out_file, x[0], 'x1_', '')
xp[0] = write_symb_vector(out_file, xp[0], 'xp1_', '')
for i in range(1, nb_bodies):
parent_id = parent_body_index[i]
x[i] = x[parent_id] + Rt[parent_id] * Dpt[i]
xp[i] = xp[parent_id] + om_tilde[parent_id] * (Rt[parent_id] * Dpt[i])
write_symb_xj(nb_bodies, Rj, xj, xgj, xj_print, x[i], i+1)
x[i] = write_symb_vector(out_file, x[i], 'x{}_'.format(i+1), '')
xp[i] = write_symb_vector(out_file, xp[i], 'xp{}_'.format(i+1), '')
# jacobian x
out_file.write('\n // jacobian anchor point positions\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range(0, nb_bodies):
for j in range(1, nb_bodies):
flag_print = 0
for k in range(0, 3):
if xj_print[i][j][k] != None:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write('{}'.format(xj_print[i][j][k]))
out_file.write(' }\n')
# xg & xgp
out_file.write('\n // com absolute positions and velocities\n')
xg = nb_bodies*[None]
xgp = nb_bodies*[None]
for i in range(0, nb_bodies):
xg[i] = x[i] + Rt[i] * Dg[i]
xgp[i] = xp[i] + om_tilde[i] * (Rt[i] * Dg[i])
write_symb_xgj(nb_bodies, Rj, xj, xgj, xgj_print, xg[i], i+1)
xg[i] = write_symb_vector(out_file, xg[i], 'xg{}_'.format(i+1), '')
xgp[i] = write_symb_vector(out_file, xgp[i], 'xgp{}_'.format(i+1), '')
# jacobian xg
out_file.write('\n // jacobian com absolute positions\n')
out_file.write(' if (flag_jacob)\n {\n')
flag_first = 0
for i in range(0, nb_bodies):
for j in range(1, nb_bodies):
flag_print = 0
for k in range(0, 3):
if xgj_print[i][j][k] != None:
if not flag_first:
flag_first = 1
flag_print = 1
elif not flag_print:
flag_print = 1
out_file.write('\n')
out_file.write('{}'.format(xgj_print[i][j][k]))
out_file.write(' }\n')
# results
out_file.write('\n // -- Collecting results -- //\n\n')
com_compute(out_file, nb_bodies, joint_id_names, M, xg, xgp, xgj)
feet_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj, 6, 12, -0.06, 0.08, -0.045, 0.045)
wrists_compute(out_file, joint_id_names, R, x, xp, om, Rj, xj, xgj, 19, 23, -0.02, -0.005, -0.225)
torso_waist_angles(out_file, R, om, 0, 15)
# generate the symbolic output file
def gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names, Dpt, Dg, M):
# temporary file
in_temp = './{}_temp.cc'.format(out_file_name)
file_temp = open(in_temp, 'w')
# beginning of the file
write_file_beginning(file_temp, joint_id_names)
# variables initialization
write_intialization(file_temp, nb_bodies, joint_id_names)
# symbolic computation
symbolic_computation(file_temp, nb_bodies, joint_id_names, rot_axis, parent_body_index, Dpt, Dg, M)
# end of the file
write_file_end(file_temp)
file_temp.close()
# output file
out_file = open('./{}.cc'.format(out_file_name), 'w')
with open(in_temp, 'r') as f:
# loop on all the lines
for line in f:
# declaration
if len(line.split('// -- variables initialization -- //')) != 1:
out_file.write(' // -- variables declaration -- //\n\n')
print_all_declaration(in_temp, out_file, 100)
out_file.write('\n\n')
# copy temporary file
out_file.write(line)
out_file.close()
# remove temporary file
os.remove(in_temp)
# main script
# rotation axis for each joint before body i (1:x, 2:y, 3:z)
rot_axis = np.array([0, # waist
2, 1, 3, 2, 1, 2, # right leg
2, 1, 3, 2, 1, 2, # left leg
1, 2, 3, # trunk
2, 1, 3, 2, # right arm
2, 1, 3, 2 # left arm
])
# parent index
parent_body_index = np.array([ -1, # waist
0, 1, 2, 3, 4, 5, # right leg
0, 7, 8, 9, 10, 11, # left leg
0, 13, 14, # trunk
15, 16, 17, 18, # right arm
15, 20, 21, 22 # left arm
])
nb_bodies = len(parent_body_index)
## anchor point positions
Dpt = nb_bodies*[None]
# waist
Dpt[0] = sp.Matrix([0.0, 0.0, 0.0])
# right leg
Dpt[1] = sp.Matrix([0.0, sp.Symbol('DPT_2_2'), 0.0])
Dpt[2] = sp.Matrix([0.0, sp.Symbol('DPT_2_6'), 0.0])
Dpt[3] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_8')])
Dpt[4] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_10')])
Dpt[5] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_12')])
Dpt[6] = sp.Matrix([0.0, 0.0, 0.0])
# left leg
Dpt[7] = sp.Matrix([0.0, sp.Symbol('DPT_2_3'), 0.0])
Dpt[8] = sp.Matrix([0.0, sp.Symbol('DPT_2_18'), 0.0])
Dpt[9] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_20')])
Dpt[10] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_22')])
Dpt[11] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_24')])
Dpt[12] = sp.Matrix([0.0, 0.0, 0.0])
# trunk
Dpt[13] = sp.Matrix([sp.Symbol('DPT_1_4'), 0.0, sp.Symbol('DPT_3_4')])
Dpt[14] = sp.Matrix([0.0, 0.0, 0.0])
Dpt[15] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_32')])
# right arm
Dpt[16] = sp.Matrix([sp.Symbol('DPT_1_36'), sp.Symbol('DPT_2_36'), sp.Symbol('DPT_3_36')])
Dpt[17] = sp.Matrix([0.0, sp.Symbol('DPT_2_39'), 0.0])
Dpt[18] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_41')])
Dpt[19] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_43')])
# left arm
Dpt[20] = sp.Matrix([sp.Symbol('DPT_1_37'), sp.Symbol('DPT_2_37'), sp.Symbol('DPT_3_37')])
Dpt[21] = sp.Matrix([0.0, sp.Symbol('DPT_2_46'), 0.0])
Dpt[22] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_48')])
Dpt[23] = sp.Matrix([0.0, 0.0, sp.Symbol('DPT_3_50')])
## COM positions
Dg = nb_bodies*[None]
# waist
Dg[0] = sp.Matrix([sp.Symbol('L_1_6'), sp.Symbol('L_2_6'), sp.Symbol('L_3_6')])
# right leg
Dg[1] = sp.Matrix([sp.Symbol('L_1_7') , sp.Symbol('L_2_7') , sp.Symbol('L_3_7')])
Dg[2] = sp.Matrix([sp.Symbol('L_1_8') , sp.Symbol('L_2_8') , sp.Symbol('L_3_8')])
Dg[3] = sp.Matrix([sp.Symbol('L_1_9') , sp.Symbol('L_2_9') , sp.Symbol('L_3_9')])
Dg[4] = sp.Matrix([sp.Symbol('L_1_10'), sp.Symbol('L_2_10'), sp.Symbol('L_3_10')])
Dg[5] = sp.Matrix([sp.Symbol('L_1_11'), sp.Symbol('L_2_11'), sp.Symbol('L_3_11')])
Dg[6] = sp.Matrix([sp.Symbol('L_1_12'), 0.0 , sp.Symbol('L_3_12')])
# left leg
Dg[7] = sp.Matrix([sp.Symbol('L_1_13'), sp.Symbol('L_2_13'), sp.Symbol('L_3_13')])
Dg[8] = sp.Matrix([sp.Symbol('L_1_14'), sp.Symbol('L_2_14'), sp.Symbol('L_3_14')])
Dg[9] = sp.Matrix([sp.Symbol('L_1_15'), sp.Symbol('L_2_15'), sp.Symbol('L_3_15')])
Dg[10] = sp.Matrix([sp.Symbol('L_1_16'), sp.Symbol('L_2_16'), sp.Symbol('L_3_16')])
Dg[11] = sp.Matrix([sp.Symbol('L_1_17'), sp.Symbol('L_2_17'), sp.Symbol('L_3_17')])
Dg[12] = sp.Matrix([sp.Symbol('L_1_18'), 0.0 , sp.Symbol('L_3_18')])
# trunk
Dg[13] = sp.Matrix([sp.Symbol('L_1_19'), sp.Symbol('L_2_19'), sp.Symbol('L_3_19')])
Dg[14] = sp.Matrix([sp.Symbol('L_1_20'), sp.Symbol('L_2_20'), sp.Symbol('L_3_20')])
Dg[15] = sp.Matrix([sp.Symbol('L_1_21'), sp.Symbol('L_2_21'), sp.Symbol('L_3_21')])
# right arm
Dg[16] = sp.Matrix([sp.Symbol('L_1_22'), sp.Symbol('L_2_22'), sp.Symbol('L_3_22')])
Dg[17] = sp.Matrix([sp.Symbol('L_1_23'), sp.Symbol('L_2_23'), sp.Symbol('L_3_23')])
Dg[18] = sp.Matrix([sp.Symbol('L_1_24'), sp.Symbol('L_2_24'), sp.Symbol('L_3_24')])
Dg[19] = sp.Matrix([sp.Symbol('L_1_25'), sp.Symbol('L_2_25'), sp.Symbol('L_3_25')])
# left arm
Dg[20] = sp.Matrix([sp.Symbol('L_1_26'), sp.Symbol('L_2_26'), sp.Symbol('L_3_26')])
Dg[21] = sp.Matrix([sp.Symbol('L_1_27'), sp.Symbol('L_2_27'), sp.Symbol('L_3_27')])
Dg[22] = sp.Matrix([sp.Symbol('L_1_28'), sp.Symbol('L_2_28'), sp.Symbol('L_3_28')])
Dg[23] = sp.Matrix([sp.Symbol('L_1_29'), sp.Symbol('L_2_29'), sp.Symbol('L_3_29')])
# masses
M = np.array([ 'M_6', # waist
'M_7' , 'M_8' , 'M_9' , 'M_10', 'M_11', 'M_12', # right leg
'M_13', 'M_14', 'M_15', 'M_16', 'M_17', 'M_18', # left leg
'M_19', 'M_20', 'M_21', # trunk
'M_22', 'M_23', 'M_24', 'M_25', # right arm
'M_26', 'M_27', 'M_28', 'M_29' # left arm
])
# joint names
joint_id_names = np.array(['0', # waist
'RightHipPitch_id', 'RightHipRoll_id', 'RightHipYaw_id', 'RightKneePitch_id', 'RightFootRoll_id', 'RightFootPitch_id', # right leg
'LeftHipPitch_id' , 'LeftHipRoll_id' , 'LeftHipYaw_id' , 'LeftKneePitch_id' , 'LeftFootRoll_id' , 'LeftFootPitch_id' , # left leg
'TorsoRoll_id' , 'TorsoPitch_id' , 'TorsoYaw_id' , # trunk
'RightShPitch_id' , 'RightShRoll_id' , 'RightShYaw_id' , 'RightElbPitch_id', # right arm
'LeftShPitch_id' , 'LeftShRoll_id' , 'LeftShYaw_id' , 'LeftElbPitch_id' # left arm
])
out_file_name = 'forward_kinematics'
gen_symbolic_out(out_file_name, nb_bodies, rot_axis, parent_body_index, joint_id_names, Dpt, Dg, M)
| 32.348506
| 189
| 0.633767
| 8,305
| 45,482
| 3.231788
| 0.047562
| 0.083458
| 0.123398
| 0.068294
| 0.722392
| 0.664791
| 0.587928
| 0.553279
| 0.52038
| 0.481371
| 0
| 0.03719
| 0.178224
| 45,482
| 1,405
| 190
| 32.37153
| 0.680918
| 0.093641
| 0
| 0.433637
| 0
| 0.002026
| 0.215947
| 0.026066
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032421
| false
| 0
| 0.004053
| 0.001013
| 0.067882
| 0.092199
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed07be5c12830ff2ea484a69a77d31923d6aa5cb
| 1,223
|
py
|
Python
|
examples/last.py
|
0xiso/PyMISP
|
20a340414422714dcf31389957343c663550ed1a
|
[
"BSD-2-Clause"
] | 5
|
2019-08-12T15:21:00.000Z
|
2021-10-01T01:50:52.000Z
|
examples/last.py
|
DragonDev1906/PyMISP
|
5c72dc9c33b4ae850d40ff06dfb05c27f3e80e5d
|
[
"BSD-2-Clause"
] | null | null | null |
examples/last.py
|
DragonDev1906/PyMISP
|
5c72dc9c33b4ae850d40ff06dfb05c27f3e80e5d
|
[
"BSD-2-Clause"
] | 3
|
2018-11-22T15:33:16.000Z
|
2019-09-02T14:23:35.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pymisp import PyMISP
from keys import misp_url, misp_key, misp_verifycert
import argparse
import os
import json
# Usage for pipe masters: ./last.py -l 5h | jq .
def init(url, key):
return PyMISP(url, key, misp_verifycert, 'json')
def download_last(m, last, out=None):
result = m.download_last(last)
if out is None:
if 'response' in result:
print(json.dumps(result['response']))
else:
print('No results for that time period')
exit(0)
else:
with open(out, 'w') as f:
f.write(json.dumps(result['response']))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download latest events from a MISP instance.')
parser.add_argument("-l", "--last", required=True, help="can be defined in days, hours, minutes (for example 5d or 12h or 30m).")
parser.add_argument("-o", "--output", help="Output file")
args = parser.parse_args()
if args.output is not None and os.path.exists(args.output):
print('Output file already exists, abord.')
exit(0)
misp = init(misp_url, misp_key)
download_last(misp, args.last, args.output)
| 27.795455
| 133
| 0.644317
| 175
| 1,223
| 4.388571
| 0.52
| 0.046875
| 0.028646
| 0.036458
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009484
| 0.224039
| 1,223
| 43
| 134
| 28.44186
| 0.799789
| 0.072772
| 0
| 0.142857
| 0
| 0
| 0.216622
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.178571
| 0.035714
| 0.285714
| 0.107143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed0cbfaf8410cb124a4ef21f7ca9796ba91008fc
| 1,146
|
py
|
Python
|
experiments/CUB_fewshot_raw/FRN/ResNet-12/train.py
|
Jf-Chen/FRN-main
|
5b57b9e0d7368058a8e3ba41a53c460b54ab9b91
|
[
"MIT"
] | 43
|
2021-04-27T23:42:35.000Z
|
2022-03-30T02:41:19.000Z
|
experiments/CUB_fewshot_raw/FRN/ResNet-12/train.py
|
Jf-Chen/FRN-main
|
5b57b9e0d7368058a8e3ba41a53c460b54ab9b91
|
[
"MIT"
] | 7
|
2021-05-31T10:38:17.000Z
|
2022-01-06T05:20:08.000Z
|
experiments/CUB_fewshot_raw/FRN/ResNet-12/train.py
|
Jf-Chen/FRN-main
|
5b57b9e0d7368058a8e3ba41a53c460b54ab9b91
|
[
"MIT"
] | 7
|
2021-05-18T00:37:46.000Z
|
2022-01-23T07:09:51.000Z
|
import os
import sys
import torch
import yaml
from functools import partial
sys.path.append('../../../../')
from trainers import trainer, frn_train
from datasets import dataloaders
from models.FRN import FRN
args = trainer.train_parser()
with open('../../../../config.yml', 'r') as f:
temp = yaml.safe_load(f)
data_path = os.path.abspath(temp['data_path'])
fewshot_path = os.path.join(data_path,'CUB_fewshot_raw')
pm = trainer.Path_Manager(fewshot_path=fewshot_path,args=args)
train_way = args.train_way
shots = [args.train_shot, args.train_query_shot]
train_loader = dataloaders.meta_train_dataloader(data_path=pm.train,
way=train_way,
shots=shots,
transform_type=args.train_transform_type)
model = FRN(way=train_way,
shots=[args.train_shot, args.train_query_shot],
resnet=args.resnet)
train_func = partial(frn_train.default_train,train_loader=train_loader)
tm = trainer.Train_Manager(args,path_manager=pm,train_func=train_func)
tm.train(model)
tm.evaluate(model)
| 30.157895
| 89
| 0.666667
| 153
| 1,146
| 4.745098
| 0.339869
| 0.086777
| 0.053719
| 0.046832
| 0.121212
| 0.121212
| 0.121212
| 0.121212
| 0.121212
| 0.121212
| 0
| 0
| 0.22164
| 1,146
| 38
| 90
| 30.157895
| 0.813901
| 0
| 0
| 0
| 0
| 0
| 0.051439
| 0.01918
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed0d0fb11e355951942a4b93a958119ead61c53e
| 6,037
|
py
|
Python
|
exp/DFRdatasets/simulate.py
|
zzzace2000/dropout-feature-rankin
|
7769ce822f3c0a6d23167d11f1569f59e56b1266
|
[
"CC-BY-4.0"
] | 6
|
2019-02-24T07:31:38.000Z
|
2021-12-27T08:57:38.000Z
|
exp/DFRdatasets/simulate.py
|
zzzace2000/dropout-feature-rankin
|
7769ce822f3c0a6d23167d11f1569f59e56b1266
|
[
"CC-BY-4.0"
] | 2
|
2019-01-13T11:49:35.000Z
|
2020-05-18T01:59:15.000Z
|
exp/DFRdatasets/simulate.py
|
zzzace2000/dropout-feature-rankin
|
7769ce822f3c0a6d23167d11f1569f59e56b1266
|
[
"CC-BY-4.0"
] | 6
|
2018-11-06T14:17:07.000Z
|
2021-11-06T14:30:10.000Z
|
import argparse
import argparse
import os
import numpy as np
import torch
from dataloaders.LoaderBase import LoaderBase
import exp.feature.feature_utils as feature_utils
def run_with_identifier(unit, corr_val, datasize, rank_names, loader, show_ols=True):
loader.clear_cache()
# Ex: 'nn_rank:0.01'. Then extract nn_rank and 0.01 seperately
result = {}
performance = {}
ranks = {}
for rank_name in rank_names:
the_rank_func_name = rank_name
if ':' in rank_name:
tmp = rank_name.split(':')
the_rank_func_name = tmp[0]
loader.bdnet_hyperparams['reg_coef'] = float(tmp[1])
# Run different datasizes / correlations.
# Ex: datasizes => [100_0, 200_0, 1000]
# Ex: correlations => [1000_0.1, 1000_0.2]
identifier = '%d_%f' % (datasize, corr_val)
# return a dictionary but not a rank!!!
rank_dict = getattr(loader, the_rank_func_name)(testfold=identifier)
if 'metrics' in rank_dict:
for attr_name in rank_dict['metrics']:
performance['%s_%s' % (the_rank_func_name, attr_name)] = rank_dict
metrics = loader.evaluate(rank_dict['rank'])
for attr_name in metrics:
result['%s_%s' % (the_rank_func_name, attr_name)] = metrics[attr_name]
ranks['%s_rank' % the_rank_func_name] = rank_dict['rank']
if show_ols:
performance['ols'] = loader.get_ols_error()
return result, performance, ranks
def run(mode, args):
# Get the loader
loader = LoaderBase.create(
args.dataset, {'visdom_enabled': args.visdom_enabled,
'cuda_enabled': args.cuda,
'nn_cache': args.nn_cache
})
default_params = {
'datasize': 1000, 'corr_val': -1,
'rank_names': args.rank_func, 'show_ols': False,
'loader': loader,
}
if mode == 'correlation':
corr_vals = np.arange(0., 1.0, 0.1)
# corr_vals = [0., 0.1]
containers = feature_utils.run_std_err_params(
'corr_val', values=corr_vals, repeat=args.repeat, val_func=run_with_identifier,
default_params=default_params, num_output_table=2, kept_raw=True)
else:
datasizes = [100, 200, 1000, 3000]
containers = feature_utils.run_std_err_params(
'datasize', values=datasizes, repeat=args.repeat, val_func=run_with_identifier,
default_params=default_params, num_output_table=2, kept_raw=True)
raw = containers.pop()
# Save containers and rank
folder = 'results/{}'.format(args.dataset)
if not os.path.exists(folder):
os.mkdir(folder)
filename = args.identifier + '-%s' % mode
torch.save(containers, '{}/{}.pth'.format(folder, filename))
torch.save(raw, '{}/{}_raw.pth'.format(folder, filename))
def parse_args():
# Training settings
parser = argparse.ArgumentParser(description='train rnn to predict')
# parser.add_argument('--lr', type=float, default=0.001)
# parser.add_argument('--epochs', type=int, default=100)
# parser.add_argument('--reg_coef', type=float, default=0.001)
# parser.add_argument('--batch-size', type=int, default=32)
# parser.add_argument('--batch-print', type=int, default=30)
# parser.add_argument('--save-freq', type=int, default=1)
parser.add_argument('--no-cuda', action='store_true', default=False)
parser.add_argument('--resume', type=str, default=None)
parser.add_argument('--gpu-ids', nargs='+', type=int, default=[0],
help='number of gpus to produce')
parser.add_argument('--identifier', type=str, default='0201')
# parser.add_argument('--reg_coef', type=float, default=None, help='vbd regularization coef!')
parser.add_argument('--dataset', type=str, default='GaussSimulation',
help='["wineqaulity", "OnlineNewsPopularity", '
'"ClassificationONPLoader", "RegSupport2Loader"]')
parser.add_argument('--seed', type=int, default='1234')
parser.add_argument('--repeat', type=int, default=1)
# parser.add_argument('--lookahead', type=int, default=5)
# parser.add_argument('--weighted', action='store_true', default=False)
# parser.add_argument('--reuse-rnn', action='store_true', default=False)
parser.add_argument('--modes', nargs='+', type=str,
default=['correlation'], help='correlation / sizes')
parser.add_argument('--rank_func', nargs='+', type=str,
default=['vbd_linear_rank'], help='nn_rank')
# parser.add_argument('--test_func', nargs='+', type=str, default=['no_test'],
# help='["nn_test_zero", "nn_test_retrain"]')
parser.add_argument('--visdom_enabled', action='store_true', default=True)
parser.add_argument('--no_rank_cache', action='store_true', default=False)
parser.add_argument('--no_nn_cache', action='store_true', default=False)
# parser.add_argument('--start_val', type=int, default=2)
args = parser.parse_args()
args.nn_cache = (not args.no_nn_cache)
args.rank_cache = (not args.no_rank_cache)
args.cuda = (not args.no_cuda) and torch.cuda.is_available()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
print('gpu current device:', torch.cuda.current_device())
torch.cuda.manual_seed(args.seed)
if len(args.gpu_ids) > 0:
print('start using gpu device:', args.gpu_ids)
torch.cuda.set_device(args.gpu_ids[0])
print('args:', args)
print('==================== Start =====================')
print('')
return args
if __name__ == '__main__':
args = parse_args()
if 'other_ranks' in args.rank_func:
args.rank_func.remove('other_ranks')
args.rank_func += ['marginal_rank', 'rf_rank', 'zero_rank',
'shuffle_rank', 'random_rank', 'enet_rank', 'lasso_rank']
for mode in args.modes:
run(mode, args)
| 41.068027
| 98
| 0.629617
| 766
| 6,037
| 4.723238
| 0.244125
| 0.059701
| 0.112769
| 0.024876
| 0.247651
| 0.204533
| 0.204533
| 0.16639
| 0.077944
| 0.050857
| 0
| 0.019533
| 0.219811
| 6,037
| 146
| 99
| 41.349315
| 0.74862
| 0.186848
| 0
| 0.059406
| 0
| 0
| 0.155956
| 0.014327
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029703
| false
| 0
| 0.069307
| 0
| 0.118812
| 0.049505
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed0ede6f5172ebc43a6bba82ff98dc80379f3c8f
| 10,696
|
py
|
Python
|
ucsmsdk/mometa/adaptor/AdaptorMenloQStats.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/adaptor/AdaptorMenloQStats.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/adaptor/AdaptorMenloQStats.py
|
thinkitdata/ucsmsdk
|
da6599e1dbc1207a30eabe548a7e5791af5f476b
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for AdaptorMenloQStats ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class AdaptorMenloQStatsConsts:
MENLO_QUEUE_COMPONENT_N = "N"
MENLO_QUEUE_COMPONENT_CPU = "cpu"
MENLO_QUEUE_COMPONENT_ETH = "eth"
MENLO_QUEUE_COMPONENT_FC = "fc"
MENLO_QUEUE_COMPONENT_UNKNOWN = "unknown"
MENLO_QUEUE_INDEX_0 = "0"
MENLO_QUEUE_INDEX_0_A = "0_A"
MENLO_QUEUE_INDEX_0_B = "0_B"
MENLO_QUEUE_INDEX_1 = "1"
MENLO_QUEUE_INDEX_1_A = "1_A"
MENLO_QUEUE_INDEX_1_B = "1_B"
MENLO_QUEUE_INDEX_UNKNOWN = "unknown"
SUSPECT_FALSE = "false"
SUSPECT_NO = "no"
SUSPECT_TRUE = "true"
SUSPECT_YES = "yes"
class AdaptorMenloQStats(ManagedObject):
"""This is AdaptorMenloQStats class."""
consts = AdaptorMenloQStatsConsts()
naming_props = set([u'menloQueueComponent', u'menloQueueIndex'])
mo_meta = MoMeta("AdaptorMenloQStats", "adaptorMenloQStats", "menlo-q-stats-comp-[menlo_queue_component]index-[menlo_queue_index]", VersionMeta.Version111j, "OutputOnly", 0xf, [], ["admin", "operations", "read-only"], [u'adaptorUnit'], [u'adaptorMenloQStatsHist'], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111j, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"drop_overrun_n0": MoPropertyMeta("drop_overrun_n0", "dropOverrunN0", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n0_delta": MoPropertyMeta("drop_overrun_n0_delta", "dropOverrunN0Delta", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n0_delta_avg": MoPropertyMeta("drop_overrun_n0_delta_avg", "dropOverrunN0DeltaAvg", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n0_delta_max": MoPropertyMeta("drop_overrun_n0_delta_max", "dropOverrunN0DeltaMax", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n0_delta_min": MoPropertyMeta("drop_overrun_n0_delta_min", "dropOverrunN0DeltaMin", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n1": MoPropertyMeta("drop_overrun_n1", "dropOverrunN1", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n1_delta": MoPropertyMeta("drop_overrun_n1_delta", "dropOverrunN1Delta", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n1_delta_avg": MoPropertyMeta("drop_overrun_n1_delta_avg", "dropOverrunN1DeltaAvg", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n1_delta_max": MoPropertyMeta("drop_overrun_n1_delta_max", "dropOverrunN1DeltaMax", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"drop_overrun_n1_delta_min": MoPropertyMeta("drop_overrun_n1_delta_min", "dropOverrunN1DeltaMin", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"intervals": MoPropertyMeta("intervals", "intervals", "uint", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"menlo_queue_component": MoPropertyMeta("menlo_queue_component", "menloQueueComponent", "string", VersionMeta.Version111j, MoPropertyMeta.NAMING, None, None, None, None, ["N", "cpu", "eth", "fc", "unknown"], []),
"menlo_queue_index": MoPropertyMeta("menlo_queue_index", "menloQueueIndex", "string", VersionMeta.Version111j, MoPropertyMeta.NAMING, None, None, None, None, ["0", "0_A", "0_B", "1", "1_A", "1_B", "unknown"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"suspect": MoPropertyMeta("suspect", "suspect", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"thresholded": MoPropertyMeta("thresholded", "thresholded", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"time_collected": MoPropertyMeta("time_collected", "timeCollected", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"truncate_overrun_n0": MoPropertyMeta("truncate_overrun_n0", "truncateOverrunN0", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n0_delta": MoPropertyMeta("truncate_overrun_n0_delta", "truncateOverrunN0Delta", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n0_delta_avg": MoPropertyMeta("truncate_overrun_n0_delta_avg", "truncateOverrunN0DeltaAvg", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n0_delta_max": MoPropertyMeta("truncate_overrun_n0_delta_max", "truncateOverrunN0DeltaMax", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n0_delta_min": MoPropertyMeta("truncate_overrun_n0_delta_min", "truncateOverrunN0DeltaMin", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n1": MoPropertyMeta("truncate_overrun_n1", "truncateOverrunN1", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n1_delta": MoPropertyMeta("truncate_overrun_n1_delta", "truncateOverrunN1Delta", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n1_delta_avg": MoPropertyMeta("truncate_overrun_n1_delta_avg", "truncateOverrunN1DeltaAvg", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n1_delta_max": MoPropertyMeta("truncate_overrun_n1_delta_max", "truncateOverrunN1DeltaMax", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"truncate_overrun_n1_delta_min": MoPropertyMeta("truncate_overrun_n1_delta_min", "truncateOverrunN1DeltaMin", "ulong", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"update": MoPropertyMeta("update", "update", "uint", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"dropOverrunN0": "drop_overrun_n0",
"dropOverrunN0Delta": "drop_overrun_n0_delta",
"dropOverrunN0DeltaAvg": "drop_overrun_n0_delta_avg",
"dropOverrunN0DeltaMax": "drop_overrun_n0_delta_max",
"dropOverrunN0DeltaMin": "drop_overrun_n0_delta_min",
"dropOverrunN1": "drop_overrun_n1",
"dropOverrunN1Delta": "drop_overrun_n1_delta",
"dropOverrunN1DeltaAvg": "drop_overrun_n1_delta_avg",
"dropOverrunN1DeltaMax": "drop_overrun_n1_delta_max",
"dropOverrunN1DeltaMin": "drop_overrun_n1_delta_min",
"intervals": "intervals",
"menloQueueComponent": "menlo_queue_component",
"menloQueueIndex": "menlo_queue_index",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"suspect": "suspect",
"thresholded": "thresholded",
"timeCollected": "time_collected",
"truncateOverrunN0": "truncate_overrun_n0",
"truncateOverrunN0Delta": "truncate_overrun_n0_delta",
"truncateOverrunN0DeltaAvg": "truncate_overrun_n0_delta_avg",
"truncateOverrunN0DeltaMax": "truncate_overrun_n0_delta_max",
"truncateOverrunN0DeltaMin": "truncate_overrun_n0_delta_min",
"truncateOverrunN1": "truncate_overrun_n1",
"truncateOverrunN1Delta": "truncate_overrun_n1_delta",
"truncateOverrunN1DeltaAvg": "truncate_overrun_n1_delta_avg",
"truncateOverrunN1DeltaMax": "truncate_overrun_n1_delta_max",
"truncateOverrunN1DeltaMin": "truncate_overrun_n1_delta_min",
"update": "update",
}
def __init__(self, parent_mo_or_dn, menlo_queue_component, menlo_queue_index, **kwargs):
self._dirty_mask = 0
self.menlo_queue_component = menlo_queue_component
self.menlo_queue_index = menlo_queue_index
self.child_action = None
self.drop_overrun_n0 = None
self.drop_overrun_n0_delta = None
self.drop_overrun_n0_delta_avg = None
self.drop_overrun_n0_delta_max = None
self.drop_overrun_n0_delta_min = None
self.drop_overrun_n1 = None
self.drop_overrun_n1_delta = None
self.drop_overrun_n1_delta_avg = None
self.drop_overrun_n1_delta_max = None
self.drop_overrun_n1_delta_min = None
self.intervals = None
self.sacl = None
self.status = None
self.suspect = None
self.thresholded = None
self.time_collected = None
self.truncate_overrun_n0 = None
self.truncate_overrun_n0_delta = None
self.truncate_overrun_n0_delta_avg = None
self.truncate_overrun_n0_delta_max = None
self.truncate_overrun_n0_delta_min = None
self.truncate_overrun_n1 = None
self.truncate_overrun_n1_delta = None
self.truncate_overrun_n1_delta_avg = None
self.truncate_overrun_n1_delta_max = None
self.truncate_overrun_n1_delta_min = None
self.update = None
ManagedObject.__init__(self, "AdaptorMenloQStats", parent_mo_or_dn, **kwargs)
| 76.94964
| 277
| 0.708022
| 1,159
| 10,696
| 6.181191
| 0.120794
| 0.094919
| 0.092127
| 0.156337
| 0.551787
| 0.393774
| 0.299693
| 0.294668
| 0.293132
| 0.266611
| 0
| 0.031715
| 0.150991
| 10,696
| 138
| 278
| 77.507246
| 0.757185
| 0.010845
| 0
| 0
| 0
| 0.032
| 0.345066
| 0.223768
| 0
| 0
| 0.001135
| 0
| 0
| 1
| 0.008
| false
| 0
| 0.024
| 0
| 0.216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed10c3db0c256d5bebae34542a471bf7c8fc94ae
| 6,829
|
py
|
Python
|
src/RS_model/train_mlp.py
|
CindyChen1995/MKR
|
f9ae37903dcf43b6d101dfc08644ce4a29ecbf9d
|
[
"MIT"
] | null | null | null |
src/RS_model/train_mlp.py
|
CindyChen1995/MKR
|
f9ae37903dcf43b6d101dfc08644ce4a29ecbf9d
|
[
"MIT"
] | null | null | null |
src/RS_model/train_mlp.py
|
CindyChen1995/MKR
|
f9ae37903dcf43b6d101dfc08644ce4a29ecbf9d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
Description :
Author : cmy
date: 2020/1/2
-------------------------------------------------
"""
import datetime
import heapq
import numpy as np
import tensorflow as tf
import time
from metrics import ndcg_at_k
from train import get_user_record
from DMF import DMF
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5 # maximun alloc gpu50% of MEM
config.gpu_options.allow_growth = True #allocate dynamically
def train(args, data, show_loss, show_topk, log_dir):
n_user, n_item = data[0], data[1]
train_data, eval_data, test_data = data[2], data[3], data[4]
model = DMF(args, n_user, n_item)
user_num = 100
k_list = [1, 2, 5, 10, 20, 50, 100]
train_record = get_user_record(train_data, True)
test_record = get_user_record(test_data, False)
user_list = list(set(train_record.keys()) & set(test_record.keys()))
if len(user_list) > user_num:
user_list = np.random.choice(user_list, size=user_num, replace=False)
item_set = set(list(range(n_item)))
with tf.Session(config=config) as sess,\
open(log_dir + 'result_' + str(args.epochs) + '_' + str(args.lr) + '_' + str(int(time.time())) + '.txt', 'w') as f_result:
sess.run(tf.global_variables_initializer())
for step in range(args.epochs):
f_result.write('**************************epoch_i:' + str(step) + '********************' + '\n')
# RS training
np.random.shuffle(train_data)
start = 0
batch_i = 0
while start < train_data.shape[0]:
_, loss = model.train_dmf(sess, get_feed_dict_for_dmf(model, train_data, start, start + args.batch_size, 0.5))
start += args.batch_size
if show_loss:
if (step * (len(train_data) // args.batch_size) + batch_i) % 20 == 0:
time_str = datetime.datetime.now().isoformat()
print('{}: Epoch {:>3} Batch {:>4}/{} train_loss = {:.3f}'.format(
time_str,
step,
batch_i,
(len(train_data) // args.batch_size),
loss))
# print(loss)
batch_i += 1
# CTR evaluation
# train_auc, train_acc = model.eval(sess, get_feed_dict_for_dmf(model, train_data, 0, train_data.shape[0]))
eval_auc, eval_acc = model.eval(sess, get_feed_dict_for_dmf(model, eval_data, 0, eval_data.shape[0]))
test_auc, test_acc = model.eval(sess, get_feed_dict_for_dmf(model, test_data, 0, test_data.shape[0]))
# eval_str = 'epoch %d train auc: %.4f acc: %.4f eval auc: %.4f acc: %.4f test auc: %.4f acc: %.4f' \
# % (step, train_auc, train_acc, eval_auc, eval_acc, test_auc, test_acc)
eval_str = 'epoch %d eval auc: %.4f acc: %.4f test auc: %.4f acc: %.4f' \
% (step, eval_auc, eval_acc, test_auc, test_acc)
print(eval_str)
f_result.write(eval_str + '\n')
# top-K evaluation
if show_topk:
topk_str = ''
precision, recall, f1, hr, ndcg = topk_eval(
sess, model, user_list, train_record, test_record, item_set, k_list)
print('precision: ', end='')
topk_str += 'precision: '
for i in precision:
print('%.4f\t' % i, end='')
topk_str += '%.4f\t' % i
print()
print('recall: ', end='')
topk_str += '\n' + 'recall: '
for i in recall:
print('%.4f\t' % i, end='')
topk_str += '%.4f\t' % i
print()
print('f1: ', end='')
topk_str += '\n' + 'f1: '
for i in f1:
print('%.4f\t' % i, end='')
topk_str += '%.4f\t' % i
print()
print('hr: ', end='')
topk_str += '\n' + 'hr: '
for i in hr:
print('%.4f\t' % i, end='')
topk_str += '%.4f\t' % i
print()
print('ndcg: ', end='')
topk_str += '\n' + 'ndcg: '
for i in ndcg:
print('%.4f\t' % i, end='')
topk_str += '%.4f\t' % i
print()
f_result.write(topk_str + '\n')
def get_feed_dict_for_dmf(model, data, start, end, keep_drop=0.0):
feed_dict = {model.user_indices: data[start:end, 0],
model.item_indices: data[start:end, 1],
model.labels: data[start:end, 2],
model.keep_drop: keep_drop}
return feed_dict
def topk_eval(sess, model, user_list, train_record, test_record, item_set, k_list):
precision_list = {k: [] for k in k_list}
recall_list = {k: [] for k in k_list}
hr_list = {k: [] for k in k_list}
ndcg_list = {k: [] for k in k_list}
total_test = 0
for user in user_list:
test_item_list = list(item_set - train_record[user])
item_score_map = dict()
items, scores = model.get_scores(sess, {model.user_indices: [user] * len(test_item_list),
model.item_indices: test_item_list, model.keep_drop: 0.0})
for item, score in zip(items, scores):
item_score_map[item] = score
item_score_pair_sorted = sorted(item_score_map.items(), key=lambda x: x[1], reverse=True)
item_sorted = [i[0] for i in item_score_pair_sorted]
K_max_item_score = heapq.nlargest(k_list[-1], item_score_map, key=item_score_map.get)
r = []
for i in K_max_item_score:
if i in test_record[user]:
r.append(1)
else:
r.append(0)
for k in k_list:
hit_num = len(set(item_sorted[:k]) & test_record[user])
precision_list[k].append(hit_num / k)
recall_list[k].append(hit_num / len(test_record[user]))
hr_list[k].append(hit_num)
ndcg_list[k].append(ndcg_at_k(r, k))
total_test += len(test_record[user])
precision = [np.mean(precision_list[k]) for k in k_list]
recall = [np.mean(recall_list[k]) for k in k_list]
f1 = [2 / (1 / precision[i] + 1 / recall[i]) for i in range(len(k_list))]
hr = [np.sum(hr_list[k]) / total_test for k in k_list]
ndcg = [np.mean(ndcg_list[k]) for k in k_list]
return precision, recall, f1, hr, ndcg
| 41.387879
| 130
| 0.516767
| 912
| 6,829
| 3.623904
| 0.174342
| 0.02118
| 0.030257
| 0.019062
| 0.273828
| 0.24115
| 0.214826
| 0.209985
| 0.16944
| 0.137368
| 0
| 0.020215
| 0.333577
| 6,829
| 165
| 131
| 41.387879
| 0.705999
| 0.086689
| 0
| 0.117188
| 0
| 0.007813
| 0.055779
| 0.005465
| 0.03125
| 0
| 0
| 0
| 0
| 1
| 0.023438
| false
| 0
| 0.070313
| 0
| 0.109375
| 0.132813
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed12384bdaa43735e81fa807c26ed334db11e7a1
| 84,211
|
py
|
Python
|
pylipid.py
|
glass-w/PyLipID
|
ee29f92ba6187cd22b9554a599177152ebed9c4c
|
[
"MIT"
] | null | null | null |
pylipid.py
|
glass-w/PyLipID
|
ee29f92ba6187cd22b9554a599177152ebed9c4c
|
[
"MIT"
] | null | null | null |
pylipid.py
|
glass-w/PyLipID
|
ee29f92ba6187cd22b9554a599177152ebed9c4c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 19:28:17 2019
@author: Wanling Song
"""
import mdtraj as md
import numpy as np
import pandas as pd
import argparse
import sys
from collections import defaultdict
import pickle
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import networkx as nx
import seaborn as sns
from matplotlib.ticker import MultipleLocator
from scipy.optimize import curve_fit
from scipy.sparse import coo_matrix
from scipy import sparse
from statsmodels.nonparametric.kernel_density import KDEMultivariate
import community
import warnings
from shutil import copyfile
import datetime
from itertools import product
import logomaker
import re
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.filterwarnings('ignore')
np.seterr(all='ignore')
###################################
###### Parameter settings #######
###################################
parser = argparse.ArgumentParser()
parser.add_argument("-f", nargs="+", metavar="./run/md.xtc", help="List of trajectories, seperated by space, \
Supports xtc, gro format. Used by mdtraj.load()")
parser.add_argument("-c", nargs="+", metavar="./run/system.gro", \
help="List of coordinates of trajectory, in the same order as -f, required when inputs of -f are xtc trajectories, \
Supported format: gro, pdb, etc., Used by mdtraj.load()")
parser.add_argument("-stride", default=1, metavar=1, help="Striding through trajectories. Only every stride-th will be analized." )
parser.add_argument("-dt", default=None, metavar="None", help="The time interval between two adjacent frames in the trajectories. \
If not specified, the mdtraj will deduce from the trajectories. This works for trajectories in format of e.g. xtc which \
include timestep information. For trajectories in dcd format, users have to provide the time interval manually, \
in a time unite consistent with -tu")
parser.add_argument("-tu", default="us", choices=["ns", "us"], metavar="us", \
help="Time unit for interaction duration calculation. Available options: ns, us. This will affect the unit of koff as well.")
parser.add_argument("-save_dir", default=None, metavar="None", help="The directory where all the generated results will be put in. \
The directory will be created if not existing. Using the current working directory if not specified.")
parser.add_argument("-cutoffs", nargs=2, default=(0.55, 1.0), metavar=(0.55, 1.0), \
help="Double cutoff seperated by space. In unit of nm. Default is 0.55 1.0. The double cutoffs are used to define lipid \
interactions. A continuous lipid contact with a given residue starts when the lipid moves to the given residue \
closer than the smaller cutoff; and ends when the lipid moves farther than the larger cutoff. The standard single \
cutoff can be acheived by setting the same value for both cutoffs.")
parser.add_argument("-lipids", nargs="+", metavar="POPC", default="POPC CHOL POP2", \
help="Lipid species to check, seperated by space. Should be consistent with residue names in your trajectories.")
parser.add_argument("-lipid_atoms", nargs="+", metavar="PO4", default=None, \
help="Lipid atoms to check, seperated by space. Should be consistent with the atom names in your trajectories.")
parser.add_argument("-radii", nargs="+", default=None, metavar="BB:0.26 SC1:0.23", help="Change/Define the radius of atoms/beads \
that is used for the calculation of binding site surface area. Values need to be in the unit of nm. Supported syntax is \
BB:0.26, which defines the radius of bead BB as 0.26 nm, or CA:0.12 which defines the radius of atom CA as 0.12 nm. For \
atomistic simulations, the default radii are taken from \
mdtraj https://github.com/mdtraj/mdtraj/blob/master/mdtraj/geometry/sasa.py#L56. For coarse-grained \
simulations, this script defines the radius of the MARTINI 2 beads of BB as 0.26 nm and SC1/2/3 as 0.23 nm.")
parser.add_argument("-nprot", default=1, metavar="1", \
help="num. of proteins (or chains) in the simulation system. The calculated results will be averaged among these proteins \
(or chains). The proteins (or chains) need to be identical, otherwise the averaging will fail.")
parser.add_argument("-resi_offset", default=0, metavar="0", help="Shifting the residue index. It is useful if you need to change the residue \
index in your trajectories. For example, to change the residue indeces from 5,6,7,..., to 10,11,12,..., use -resi_offset 4. \
All the outputs, including plotted figures and saved coordinates, will be changed by this.")
parser.add_argument("-resi_list", nargs="+", default=[], metavar="1-10 20-30", help="The indices of residues on which the calculations are done. \
This option is useful for those proteins with large regions that don't require calculation. Skipping those calculations could \
save time and memory. Accepted syntax include 1/ defining a range, like 1-10 (both ends included); 2/ single residue index, \
like 25 26 17. All the selections are seperated by space. For example, -resi_list 1-10 20-30 40 45 46 means selecting \
residues 1-10, 20-30, 40, 45 and 46 for calculation. The residue indices are not affected by -resi_offset, i.e. they \
should be consistent with the indices in your trajectories.")
parser.add_argument("-chain_breaks", nargs="+", default=[], metavar="100 281 420", help="Start a new chain at the X-th residue (starting at 1) in \
the trajectory topology. This identifier is independent of the residue index but checks the residue order in the topology. \
Multiple chain breaks are supported. This option is useful when the simulation system contains \
multiple differnt chains, or users want to see the difference between chains even if these chains are identical. Using this flag \
will generate seperate figures for each of the chains. But the binding site detection will still treat the proteins in the \
system collectively, i.e. those binding sites that cover at multiple chains will be identified.")
parser.add_argument("-nbootstrap", default=10, metavar=10, help="The number of samples for bootstrapping the calcultion of koff. \
The default is 10. The larger the number, the more time-consuming the calculation will be. The closer the bootstrapped \
residence time/koffs are to the original values, the more reliable those original values are. The bootstrapped results \
are ploted in each of the koff plots and plotted apposed to the original values in the figure showing residence time. ")
parser.add_argument("-save_dataset", nargs="?", default=True, const=True, metavar="True", help="Save dataset in Pickle. Default is True")
parser.add_argument("-gen_binding_poses", default=5, metavar=5, help="The num. of top-scored lipid binding poses to be generated for each binding \
site. The default is 5. A scoring function is generated for each binding site based on the sum of the probability density function of each atom/bead \
the lipid molecule. Score = sum(PDF(atom_i) * Weight(atom_i)) for atom_i in the lipid molecule. The weight function Weight(atom_i) \
is specified by the flag -score_weights.")
parser.add_argument("-save_pose_format", default="gro", metavar="gro", help="The format the generated lipid binding poses are written into. This function \
is carried out by mdtraj.save(), hence supports the formats that are included by mdtraj. ")
parser.add_argument("-score_weights", nargs="+", default=None, metavar="PO4:1 C1:1", help="The weight of each of the lipid atom/bead contributes to the scoring function. \
Top-rated lipid binding poses can be generated based on users' specification. The bounds poses of each binding site are scored based \
on the scoring function Score = sum(PDF(atom_i) * Weight(atom_i)) for atom_i in the lipid molecule.")
parser.add_argument("-letter_map", nargs="+", default=None, metavar="ARG:K GLY:G", help="Map the three-letter amino acids to one letter. This map is \
used in making logomaker figures (https://logomaker.readthedocs.io/en/latest/). The common 20 amino acids are defined \
by this script. Users need to use this flag to define maps for uncommon amino acids in their systems.")
parser.add_argument("-pdb", default=None, metavar="None", help="Provide a PDB structure onto which the binding site information will be mapped. \
Using this flag will generate a 'show_binding_site_info.py' file in the -save_dir directory, which allows users to check the \
mapped binding site information in PyMol. Users can run the generated script by 'python show_binding_site_info.py' \
to open such a PyMol session.")
parser.add_argument("-pymol_gui", nargs="?", default=True, const=True, metavar="True", help="Show the PyMol session of binding site information \
at the end of the calcution. Need to be used in conjuction with -pdb.")
args = parser.parse_args(sys.argv[1:])
##########################################
########## assisting functions ###########
##########################################
def get_atom_index_for_lipid(lipid, traj, part=None):
whole_atom_index = traj.top.select("resname {}".format(lipid))
if part != None:
parts_atom_index = [traj.topology.atom(idx).index for idx in whole_atom_index if traj.topology.atom(idx).name in part]
return parts_atom_index
else:
return whole_atom_index
class Durations():
def __init__(self, contact_residues_low, contact_residue_high, dt):
self.contact_low = contact_residues_low
self.contact_high = contact_residue_high
self.dt = dt
def cal_duration(self):
self.pointer = [np.zeros_like(self.contact_high[idx], dtype=np.int) for idx in range(len(self.contact_high))]
durations = []
for i in range(len(self.contact_low)):
for j in range(len(self.contact_low[i])):
pos = np.where(self.contact_high[i] == self.contact_low[i][j])[0][0]
if self.pointer[i][pos] == 0:
durations.append(self.get_duration(i, pos))
if len(durations) == 0:
return [0]
else:
return durations
def get_duration(self, i, j):
count = 1
self.pointer[i][j] = 1
lipid_to_search = self.contact_high[i][j]
for k in range(i+1, len(self.contact_high)):
locations = np.where(self.contact_high[k] == lipid_to_search)[0]
if len(locations) == 0:
return count * self.dt
else:
pos = locations[0]
self.pointer[k][pos] = 1
count +=1
return (count - 1) * self.dt
def cal_interaction_intensity(contact_residues_high):
"""
The probablily of finding the lipids around the selected residue plus the number of
lipids found around the selected residue, the average number of lipid per contact
"""
contact_counts = [len(item) for item in contact_residues_high]
mask = np.array(contact_counts) > 0
contact_counts_nonzero = np.array(contact_counts)[mask]
return 100 * len(contact_counts_nonzero)/len(contact_residues_high), np.nan_to_num(contact_counts_nonzero.mean())
def cal_sigma(durations, num_of_lipids, T_total, delta_t_range):
sigma = {}
for delta_t in delta_t_range:
if delta_t == 0:
sigma[delta_t] = 1
sigma0 = float(sum([restime - delta_t for restime in durations if restime >= delta_t])) / ((T_total - delta_t) * num_of_lipids)
else:
try:
sigma[delta_t] = float(sum([restime - delta_t for restime in durations if restime >= delta_t])) / ((T_total - delta_t) * num_of_lipids * sigma0)
except ZeroDivisionError:
sigma[delta_t] = 0
return sigma
def cal_restime_koff(sigma, initial_guess):
"""
fit the exponential curve y=A*e^(-k1*x)+B*e^(-k2*x)
"""
delta_t_range = list(sigma.keys())
delta_t_range.sort() # x
hist_values = np.nan_to_num([sigma[delta_t] for delta_t in delta_t_range]) # y
try:
popt, pcov = curve_fit(bi_expo, np.array(delta_t_range, dtype=np.float128), np.array(hist_values, dtype=np.float128), p0=initial_guess, maxfev=100000)
n_fitted = bi_expo(np.array(delta_t_range, dtype=np.float128), *popt)
r_squared = 1 - np.sum((np.nan_to_num(n_fitted) - np.nan_to_num(hist_values))**2)/np.sum((hist_values - np.mean(hist_values))**2)
ks = [abs(k) for k in popt[:2]]
koff = np.min(ks)
restime = 1/koff
except RuntimeError:
koff = 0
restime = 0
r_squared = 0
popt = [0, 0, 0, 0]
return restime, koff, r_squared, popt
def bi_expo(x, k1, k2, A, B):
return A*np.exp(-k1*x) + B*np.exp(-k2*x)
def check_dir(save_dir, suffix=None):
if save_dir == None:
save_dir = os.getcwd()
else:
save_dir = os.path.abspath(save_dir)
if suffix != None:
save_dir = os.path.join(save_dir, suffix)
if not os.path.isdir(save_dir):
print("Creating new director: {}".format(save_dir))
os.makedirs(save_dir)
return save_dir
def sparse_corrcoef(A, B=None):
if B is not None:
A = sparse.vstack((A, B), format='csr')
A = A.astype(np.float64)
n = A.shape[1]
# Compute the covariance matrix
rowsum = A.sum(1)
centering = rowsum.dot(rowsum.T.conjugate()) / n
C = (A.dot(A.T.conjugate()) - centering) / (n - 1)
# The correlation coefficients are given by
# C_{i,j} / sqrt(C_{i} * C_{j})
d = np.diag(C)
coeffs = C / np.sqrt(np.outer(d, d))
return coeffs
#####################################
####### Main Class object ###########
#####################################
class LipidInteraction():
def __init__(self, trajfile_list, grofile_list=None, stride=1, dt=None, cutoff=[0.55, 1.0], \
lipid="POPC", lipid_atoms=None, nprot=1, resi_list=[], resi_offset=0, save_dir=None, timeunit="us"):
if grofile_list != None:
assert len(trajfile_list) == len(grofile_list), \
"List of coordinates should be in the same order and length of list of trajectories!"
self.save_dir = check_dir(save_dir)
self.trajfile_list = trajfile_list
self.grofile_list = grofile_list
self.dt = dt
self.nrepeats = len(self.trajfile_list)
self.cutoff = np.sort(np.array(cutoff, dtype=float))
self.lipid = lipid
self.lipid_atoms = lipid_atoms
self.nprot = int(nprot)
self.timeunit = timeunit
self.koff = {}
self.sigmas = {}
self.params = {}
self.r_squared = {}
self.res_time = {}
self.koff_b = {}
self.koff_b_cv = {}
self.res_time_b = {}
self.res_time_b_cv = {}
self.r_squared_b = {}
self.interaction_duration = defaultdict(list)
self.interaction_occupancy = defaultdict(list)
self.lipid_count = defaultdict(list)
self.contact_residues_high = defaultdict(list)
self.contact_residues_low = defaultdict(list)
self.stride = int(stride)
self.resi_offset = resi_offset
self.resi_list = resi_list
self.residue_set = []
self._protein_ref = None
self._lipid_ref = None
return
def _get_traj_stats(self, traj, lipid, lipid_atoms):
lipid_atom_indices = traj.top.select("resn {}".format(self.lipid))
lipid_resi_indices = set()
for atom in lipid_atom_indices:
lipid_resi_indices.add(traj.top.atom(atom).residue.index)
num_of_lipids = len(lipid_resi_indices)
lipid_resi_indices = list(lipid_resi_indices)
lipid_resi_indices.sort()
lipid_resi_indices_original = lipid_resi_indices
if self._lipid_ref == None:
one_lipid_indices = []
for lipid_id in np.sort(traj.top.select("resn {}".format(self.lipid))):
if len(one_lipid_indices) == 0:
one_lipid_indices.append(lipid_id)
elif traj.top.atom(lipid_id).residue.index != traj.top.atom(one_lipid_indices[-1]).residue.index:
break
else:
one_lipid_indices.append(lipid_id)
self._lipid_ref = traj[0].atom_slice(np.unique(one_lipid_indices))
if lipid_atoms != None:
lipid_haystack = get_atom_index_for_lipid(lipid, traj, part=lipid_atoms)
selected_atom_indices = np.hstack([traj.top.select("protein"), lipid_haystack])
new_xyz = [frame[selected_atom_indices] for frame in traj.xyz]
reduced_frame = traj[0].atom_slice(selected_atom_indices)
reduced_top = reduced_frame.top
new_traj = md.Trajectory(new_xyz, reduced_top, time=traj.time, unitcell_lengths=traj.unitcell_lengths, \
unitcell_angles=traj.unitcell_angles)
lipid_resi_indices = [new_traj.top.atom(new_traj.top.select("protein")[-1]).residue.index+1+idx \
for idx in np.arange(num_of_lipids)]
else:
new_traj = traj
all_protein_atom_indices = new_traj.top.select("protein")
natoms_per_protein = int(len(all_protein_atom_indices)/self.nprot)
prot_atom_indices = all_protein_atom_indices[:natoms_per_protein]
nresi_per_protein = new_traj.top.atom(prot_atom_indices[-1]).residue.index - \
new_traj.top.atom(prot_atom_indices[0]).residue.index + 1
selected_protein_resi_set = []
if len(self.resi_list) == 0:
residue_set = ["{}{}".format(new_traj.top.residue(resi).resSeq+self.resi_offset, new_traj.top.residue(resi).name) \
for resi in np.arange(new_traj.top.atom(prot_atom_indices[0]).residue.index, \
new_traj.top.atom(prot_atom_indices[-1]).residue.index + 1)]
residue_set = np.array(residue_set, dtype=str) # residue id in structure instead of builtin index in mdtraj
for protein_idx in range(self.nprot):
selected_protein_resi_set.append(np.unique([new_traj.top.atom(atom_idx).residue.index \
for atom_idx in \
all_protein_atom_indices[protein_idx*natoms_per_protein:(protein_idx+1)*natoms_per_protein]]))
elif len(self.resi_list) > 0:
resi_list = np.sort(np.array(np.hstack(self.resi_list), dtype=int))
for protein_idx in range(self.nprot):
selected_protein_resi_set.append(np.unique([new_traj.top.atom(atom_idx).residue.index \
for atom_idx in \
all_protein_atom_indices[protein_idx*natoms_per_protein:(protein_idx+1)*natoms_per_protein] \
if new_traj.top.atom(atom_idx).residue.resSeq in resi_list]))
residue_set = ["{}{}".format(new_traj.top.residue(resi).resSeq+self.resi_offset, new_traj.top.residue(resi).name) \
for resi in selected_protein_resi_set[0]]
residue_set = np.array(residue_set, dtype=str)
if self._protein_ref == None:
self._protein_ref = new_traj[0].atom_slice(prot_atom_indices)
self._selected_residue_indices = selected_protein_resi_set[0]
return new_traj, {"natoms_per_protein": natoms_per_protein, "nresi_per_protein": nresi_per_protein,
"selected_protein_resi_set": selected_protein_resi_set,
"residue_set": residue_set, "num_of_lipids": num_of_lipids,
"lipid_resi_indices": lipid_resi_indices, "lipid_resi_indices_original": lipid_resi_indices_original}
def cal_interactions(self, save_dir=None, save_dataset=True, nbootstrap=10):
if save_dir == None:
self.save_dir = check_dir(self.save_dir, "Interaction_{}".format(self.lipid))
else:
self.save_dir = check_dir(save_dir, "Interaction_{}".format(self.lipid))
with open("{}/calculation_log_{}.txt".format(self.save_dir, self.lipid), "w") as f:
f.write("###### Lipid: {}\n".format(self.lipid))
f.write("###### Lipid Atoms: {}\n".format(self.lipid_atoms))
f.write("###### Cutoffs: {}\n".format(self.cutoff))
f.write("###### nprot: {}\n".format(self.nprot))
f.write("###### Trajectories:\n")
for traj_fn in self.trajfile_list:
f.write(" {}\n".format(traj_fn))
f.write("###### Coordinates:\n")
for gro_fn in self.grofile_list:
f.write(" {}\n".format(gro_fn))
f.write("\n")
row = []
col = []
data = []
self.num_of_lipids = []
self.lipid_resi_set = []
self.T_total = []
self.timesteps = []
self.nresi_per_protein = []
ncol_start = 0
for traj_idx, trajfile in enumerate(self.trajfile_list):
print("\n########## Start calculation of {} interaction in \n########## {} \n".format(self.lipid, self.trajfile_list[traj_idx]))
f.write("\n###### Start calculation of {} interaction in \n###### {} \n".format(self.lipid, self.trajfile_list[traj_idx]))
traj = md.load(trajfile, top=self.grofile_list[traj_idx], stride=self.stride)
if self.dt == None:
timestep = traj.timestep/1000000.0 if self.timeunit == "us" else traj.timestep/1000.0
else:
timestep = float(self.dt * self.stride)
self.T_total.append((traj.n_frames - 1) * timestep)
self.timesteps.append(timestep)
new_traj, traj_stats = self._get_traj_stats(traj, self.lipid, self.lipid_atoms)
self.num_of_lipids.append(traj_stats["num_of_lipids"])
self.lipid_resi_set.append(traj_stats["lipid_resi_indices_original"])
self.nresi_per_protein.append(len(traj_stats["residue_set"]))
self.residue_set = traj_stats["residue_set"] if len(traj_stats["residue_set"]) > len(self.residue_set) else self.residue_set
ncol_per_protein = traj_stats["num_of_lipids"] * new_traj.n_frames
for idx_protein in np.arange(self.nprot):
for resid, (residue_index, residue) in enumerate(zip(traj_stats["selected_protein_resi_set"][idx_protein], traj_stats["residue_set"])):
pairs = list(product([residue_index], traj_stats["lipid_resi_indices"]))
dist_matrix_resi, _ = md.compute_contacts(new_traj, pairs, scheme="closest", periodic=True)
contact_residues_low = [[] for dummy in np.arange(new_traj.n_frames)]
contact_residues_high = [[] for dummy in np.arange(new_traj.n_frames)]
frame_id_set_low, lipid_id_set_low = np.where(dist_matrix_resi <= self.cutoff[0])
frame_id_set_high, lipid_id_set_high = np.where(dist_matrix_resi <= self.cutoff[1])
for frame_id, lipid_id in zip(frame_id_set_low, lipid_id_set_low):
contact_residues_low[frame_id].append(int(lipid_id))
for frame_id, lipid_id in zip(frame_id_set_high, lipid_id_set_high):
contact_residues_high[frame_id].append(int(lipid_id))
col.append([ncol_start + ncol_per_protein*idx_protein + lipid_id*new_traj.n_frames + \
frame_id for frame_id, lipid_id in zip(frame_id_set_low, lipid_id_set_low)])
contact_low = [np.array(contact, dtype=int) for contact in contact_residues_low]
contact_high = [np.array(contact, dtype=int) for contact in contact_residues_high]
row.append([resid for dummy in np.arange(len(frame_id_set_low))])
data.append(dist_matrix_resi[frame_id_set_low, lipid_id_set_low])
self.contact_residues_high[resid].append(contact_high)
self.contact_residues_low[resid].append(contact_low)
self.interaction_duration[residue].append(Durations(contact_low, contact_high, timestep).cal_duration())
occupancy, lipidcount = cal_interaction_intensity(contact_high)
self.interaction_occupancy[residue].append(occupancy)
self.lipid_count[residue].append(lipidcount)
ncol_start += ncol_per_protein * self.nprot
###############################################
###### get some statistics for this traj ######
###############################################
durations = np.array([np.concatenate(self.interaction_duration[residue][-self.nprot:]).mean() for residue in traj_stats["residue_set"]])
duration_arg_idx = np.argsort(durations)[::-1]
occupancies = np.array([np.mean(self.interaction_occupancy[residue][-self.nprot:]) for residue in traj_stats["residue_set"]])
occupancy_arg_idx = np.argsort(occupancies)[::-1]
lipidcounts = np.array([np.mean(self.lipid_count[residue][-self.nprot:]) for residue in traj_stats["residue_set"]])
lipidcount_arg_idx = np.argsort(lipidcounts)[::-1]
log_text = "10 residues that showed longest average interaction durations ({}):\n".format(self.timeunit)
for residue, duration in zip(traj_stats["residue_set"][duration_arg_idx][:10], durations[duration_arg_idx][:10]):
log_text += "{:^8s} -- {:^8.3f}\n".format(residue, duration)
log_text += "10 residues that showed highest lipid occupancy (100%):\n"
for residue, occupancy in zip(traj_stats["residue_set"][occupancy_arg_idx][:10], occupancies[occupancy_arg_idx][:10]):
log_text += "{:^8s} -- {:^8.2f}\n".format(residue, occupancy)
log_text += "10 residues that have the largest number of surrounding lipids (count):\n"
for residue, lipidcount in zip(traj_stats["residue_set"][lipidcount_arg_idx][:10], lipidcounts[lipidcount_arg_idx][:10]):
log_text += "{:^8s} -- {:^8.2f}\n".format(residue, lipidcount)
print(log_text)
f.write(log_text)
row = np.concatenate(row)
col = np.concatenate(col)
data = np.concatenate(data)
contact_info = coo_matrix((data, (row, col)), shape=(max(self.nresi_per_protein), ncol_start))
self.interaction_covariance = sparse_corrcoef(contact_info)
###################################################
############ calculate and plot koffs #############
###################################################
koff_dir = check_dir(self.save_dir, "Koffs_{}".format(self.lipid))
for residue in self.residue_set:
duration_raw = np.concatenate(self.interaction_duration[residue])
if np.sum(duration_raw) > 0:
bootstrap_results = self.bootstrap(duration_raw, residue, "{}/{}_{}.pdf".format(koff_dir, self.lipid, residue), \
nbootstrap=nbootstrap)
self.sigmas[residue] = bootstrap_results["sigma"]
self.koff[residue] = bootstrap_results["koff"]
self.res_time[residue] = bootstrap_results["restime"]
self.params[residue] = bootstrap_results["params"]
self.r_squared[residue] = bootstrap_results["r_squared"]
self.koff_b[residue] = bootstrap_results["koff_b_avg"]
self.koff_b_cv[residue] = bootstrap_results["koff_b_cv"]
self.res_time_b[residue] = bootstrap_results["res_time_b_avg"]
self.res_time_b_cv[residue] = bootstrap_results["res_time_b_cv"]
self.r_squared_b[residue] = bootstrap_results["r_squared_b_avg"]
else:
delta_t_range = np.arange(0, self.T_total[traj_idx], np.min(self.timesteps))
self.sigmas[residue] = {key:value for key, value in zip(delta_t_range, np.zeros(len(delta_t_range)))}
self.koff[residue] = 0
self.res_time[residue] = 0
self.params[residue] = [0, 0, 0, 0]
self.r_squared[residue] = 0.0
self.koff_b[residue] = 0
self.koff_b_cv[residue] = 0
self.res_time_b[residue] = 0
self.res_time_b_cv[residue] = 0
self.r_squared_b[residue] = 0.0
##############################################
########## wrapping up dataset ###############
##############################################
T_max = np.max(self.T_total)
Res_Time = np.array([self.res_time[residue] for residue in self.residue_set])
Capped = Res_Time > T_max
Res_Time[Capped] = T_max
Res_Time_B = np.array([self.res_time_b[residue] for residue in self.residue_set])
Capped = Res_Time_B > T_max
Res_Time_B[Capped] = T_max
dataset = pd.DataFrame({"Residue": [residue for residue in self.residue_set],
"Residue idx": self._selected_residue_indices,
"Occupancy": np.array([np.mean(self.interaction_occupancy[residue]) \
for residue in self.residue_set]),
"Occupancy_std": np.array([np.std(self.interaction_occupancy[residue]) \
for residue in self.residue_set]),
"Duration": np.array([np.mean(np.concatenate(self.interaction_duration[residue])) \
for residue in self.residue_set]),
"Duration_std": np.array([np.std(np.concatenate(self.interaction_duration[residue])) \
for residue in self.residue_set]),
"Residence Time": Res_Time,
"Capped": Capped,
"R squared": np.array([self.r_squared[residue] for residue in self.residue_set]),
"Koff": np.array([self.koff[residue] for residue in self.residue_set]),
"Residence Time_boot": Res_Time_B,
"Residence Time_boot_cv": np.array([self.res_time_b_cv[residue] for residue in self.residue_set]),
"Koff_boot": np.array([self.koff_b[residue] for residue in self.residue_set]),
"Koff_boot_cv": np.array([self.koff_b_cv[residue] for residue in self.residue_set]),
"R squared_boot": np.array([self.r_squared_b[residue] for residue in self.residue_set]),
"LipidCount": np.array([np.mean(self.lipid_count[residue]) \
for residue in self.residue_set]),
"LipidCount_std": np.array([np.std(self.lipid_count[residue]) \
for residue in self.residue_set])})
dataset.to_csv("{}/Interactions_{}.csv".format(self.save_dir, self.lipid), index=False)
self.dataset = dataset
reminder = """
NOTE:
Occupancy: percentage of frames where lipid is in contact
with the given residue (0-100%);
Duration: Average length of a continuous interaction of lipid
with the given residue (in unit of {timeunit});
LipidCount: Average number of lipid surrounding the given residue within the longer cutoff;
Koff: Koff of lipid with the given residue (in unit of ({timeunit})^(-1));
""".format(**{"timeunit": self.timeunit})
print(reminder)
print()
if save_dataset:
dataset_dir = check_dir(self.save_dir, "Dataset")
with open("{}/interaction_durations_{}.pickle".format(dataset_dir, self.lipid), "wb") as f:
pickle.dump(self.interaction_duration, f, 2)
with open("{}/sigmas_{}.pickle".format(dataset_dir, self.lipid), "wb") as f:
pickle.dump(self.sigmas, f, 2)
with open("{}/curve_fitting_params_{}.pickle".format(dataset_dir, self.lipid), "wb") as f:
pickle.dump(self.params, f, 2)
with open("{}/interaction_covariance_matrix_{}.pickle".format(dataset_dir, self.lipid), "wb") as f:
pickle.dump(self.interaction_covariance, f, 2)
return
def bootstrap(self, durations, label, fig_fn, nbootstrap=10):
"""
bootstrap durations to calculate koffs, return bootstrapped values
"""
initial_guess = (1., 1., 1., 1.)
##### prep for plotting ######
plt.rcParams["font.size"] = 10
plt.rcParams["font.weight"] = "bold"
if self.timeunit == "ns":
xlabel = "Duration (ns)"
elif self.timeunit == "us":
xlabel = r"Duration ($\mu s$)"
fig = plt.figure(1, figsize=(8.2, 3.5))
left, width = 0.0975, 0.23
bottom, height = 0.17, 0.75
left_h = left + width + 0.0375
rect_scatter = [left, bottom, width, height]
rect_histy = [left_h, bottom, width, height]
axScatter = fig.add_axes(rect_scatter)
axHisty = fig.add_axes(rect_histy)
######## start bootstrapping ######
delta_t_range = np.arange(0, np.min(self.T_total), np.min(self.timesteps))
duration_sampled_set = [np.random.choice(durations, size=len(durations)) for dummy in range(nbootstrap)]
koff1_sampled_set = []
koff2_sampled_set = []
restime_sampled_set = []
r_squared_sampled_set = []
for duration_sampled in duration_sampled_set:
sigma_sampled = cal_sigma(duration_sampled, len(duration_sampled), np.max(self.T_total), delta_t_range)
hist_values_sampled = np.array([sigma_sampled[delta_t] for delta_t in delta_t_range])
axHisty.plot(delta_t_range, hist_values_sampled, color="gray", alpha=0.5)
restime_sampled, koff_sampled, r_squared_sampled, params_sampled = cal_restime_koff(sigma_sampled, initial_guess)
n_fitted = bi_expo(np.array(delta_t_range), *params_sampled)
r_squared_sampled = 1 - np.sum((np.nan_to_num(n_fitted) - np.nan_to_num(hist_values_sampled))**2)/np.sum((hist_values_sampled - np.mean(hist_values_sampled))**2)
ks_sampled = [abs(k) for k in params_sampled[:2]]
ks_sampled.sort()
koff1_sampled_set.append(ks_sampled[0])
koff2_sampled_set.append(ks_sampled[1])
restime_sampled_set.append(restime_sampled)
r_squared_sampled_set.append(r_squared_sampled)
######## plot original data #########
sigma = cal_sigma(durations, len(durations), np.max(self.T_total), delta_t_range)
x = np.sort(durations)
y = np.arange(len(x)) + 1
axScatter.scatter(x[::-1], y, label=label, s=10)
axScatter.set_xlim(0, x[-1] * 1.1)
axScatter.legend(loc="upper right", prop={"size": 10}, frameon=False)
axScatter.set_ylabel("Sorted Index", fontsize=10, weight="bold")
axScatter.set_xlabel(xlabel, fontsize=10, weight="bold")
hist_values = np.array([sigma[delta_t] for delta_t in delta_t_range])
axHisty.scatter(delta_t_range, hist_values, zorder=8, s=3, label="sigma func.")
axHisty.yaxis.set_label_position("right")
axHisty.yaxis.tick_right()
axHisty.set_xlabel(r"$\Delta t$", fontsize=10, weight="bold")
axHisty.set_ylabel("Probability", fontsize=10, weight="bold")
axHisty.set_yticks([0, 0.25, 0.5, 0.75, 1.0])
axHisty.set_ylim(-0.1, 1.1)
restime, koff, r_squared, params = cal_restime_koff(sigma, initial_guess)
n_fitted = bi_expo(np.array(delta_t_range), *params)
r_squared = 1 - np.sum((np.nan_to_num(n_fitted) - np.nan_to_num(hist_values))**2)/np.sum((hist_values - np.mean(hist_values))**2)
ks = [abs(k) for k in params[:2]]
ks.sort()
axHisty.plot(delta_t_range, n_fitted, 'r--', linewidth=3, zorder=10, label="Fitted biexpo.")
axHisty.legend(loc="upper right", prop={"size": 10}, frameon=False)
######### labels ############
if self.timeunit == "ns":
text = "{:18s} = {:.3f} ns$^{{-1}} $\n".format("$k_{{off1}}$", ks[0])
text += "{:18s} = {:.3f} ns$^{{-1}} $\n".format("$k_{{off2}}$", ks[1])
text += "{:14s} = {:.4f}\n".format("$R^2$", r_squared)
text += "{:18s} = {:.3f} ns$^{{-1}}$ ({:3.1f}%)\n".format("$k_{{off1, boot}}$", np.mean(koff1_sampled_set), 100*np.std(koff1_sampled_set)/np.mean(koff1_sampled_set))
text += "{:18s} = {:.3f} ns$^{{-1}}$ ({:3.1f}%)\n".format("$k_{{off2, boot}}$", np.mean(koff2_sampled_set), 100*np.std(koff2_sampled_set)/np.mean(koff2_sampled_set))
text += "{:18s} = {:.4f}\n".format("$R^2$$_{{boot, avg}}$", np.mean(r_squared_sampled_set))
elif self.timeunit == "us":
text = "{:18s} = {:.3f} $\mu s^{{-1}} $\n".format("$k_{{off1}}$", ks[0])
text += "{:18s} = {:.3f} $\mu s^{{-1}} $\n".format("$k_{{off2}}$", ks[1])
text += "{:14s} = {:.4f}\n".format("$R^2$", r_squared)
text += "{:18s} = {:.3f} $\mu s^{{-1}}$ ({:3.1f}%)\n".format("$k_{{off1, boot}}$", np.mean(koff1_sampled_set), 100*np.std(koff1_sampled_set)/np.mean(koff1_sampled_set))
text += "{:18s} = {:.3f} $\mu s^{{-1}}$ ({:3.1f}%)\n".format("$k_{{off2, boot}}$", np.mean(koff2_sampled_set), 100*np.std(koff2_sampled_set)/np.mean(koff2_sampled_set))
text += "{:18s} = {:.4f}\n".format("$R^2$$_{{boot, avg}}$", np.mean(r_squared_sampled_set))
axHisty.text(1.4, 1.0, text, verticalalignment='top', horizontalalignment='left', transform=axHisty.transAxes, \
fontdict={"size": 8, "weight": "bold"})
plt.savefig(fig_fn, dpi=300)
plt.close()
return {"koff": koff, "restime": restime, "sigma": sigma, "params": params, "r_squared": r_squared,
"koff_b_avg": np.mean(koff1_sampled_set), "koff_b_cv": np.std(koff1_sampled_set)/np.mean(koff1_sampled_set),
"res_time_b_avg": np.mean(restime_sampled_set), "res_time_b_cv": np.std(restime_sampled_set)/np.mean(restime_sampled_set),
"r_squared_b_avg": np.mean(r_squared_sampled_set)}
def cal_interaction_network(self, save_dir=None, pdb=None, pymol_gui=True, save_dataset=True, nbootstrap=10, \
radii=None, gen_binding_poses=5, score_weights=None, save_pose_format="pdb", kde_bw=0.15):
Residue_property_book = {"ARG": "Pos. Charge", "HIS": "Pos. Charge", "LYS": "Pos. Charge",
"ASP": "Neg. Charge", "GLU": "Neg. Charge",
"SER": "Polar", "THR": "Polar", "ASN": "Polar", "GLN": "Polar",
"CYS": "Special", "SEC": "Special", "GLY": "Special", "PRO": "Special",
"ALA": "Hydrophobic", "VAL": "Hydrophobic", "ILE": "Hydrophobic", "LEU": "Hydrophobic",
"MET": "Hydrophobic", "PHE": "Hydrophobic", "TYR": "Hydrophobic", "TRP": "Hydrophobic"}
MARTINI_CG_radii = {"BB": 0.26, "SC1": 0.23, "SC2": 0.23, "SC3": 0.23}
if radii == None:
radii_book = MARTINI_CG_radii
else:
radii_book = {**MARTINI_CG_radii, **radii}
if save_dir == None:
save_dir = check_dir(self.save_dir, "Binding_Sites_{}".format(self.lipid))
else:
save_dir = check_dir(save_dir, "Binding_Sites_{}".format(self.lipid))
interaction_covariance = np.nan_to_num(self.interaction_covariance)
f = open("{}/BindingSites_Info_{}.txt".format(save_dir, self.lipid), "w")
##### write out info ######
reminder = """
# Occupancy: percentage of frames where lipid is in contact with the given residue (0-100%);
# Duration/Residence Time: average length of a continuous interaction of lipid with the given residue (in unit of {timeunit});
# Koff: Koff of lipid with the given residue/binding site (in unit of ({timeunit})^(-1));
# Pos. Charge: ARG, HIS, LYS;
# Neg. Charge: ASP, GLU;
# Polar: SER, THR, ASN, GLN;
# Hydrophobic: ALA, VAL, ILE, LEU, MET, PHE, TYR, TRP;
# Special: CYS, SEC, GLY, PRO.
""".format(**{"timeunit": self.timeunit})
f.write(reminder)
f.write("\n")
binding_site_id = 0
covariance_network = np.copy(interaction_covariance)
covariance_network[covariance_network < 0.0] = 0.0
residue_network_raw = nx.Graph(covariance_network)
part = community.best_partition(residue_network_raw, weight='weight')
values = [part.get(node) for node in residue_network_raw.nodes()]
binding_site_identifiers = np.ones(len(self.residue_set), dtype=int) * 999
self.interaction_duration_BS = defaultdict(list)
self.interaction_occupancy_BS = defaultdict(list)
self.lipid_count_BS = defaultdict(list)
self.sigmas_BS = {}
self.params_BS = {}
BS_restime = np.zeros(len(self.residue_set))
BS_koff = np.zeros(len(self.residue_set))
BS_rsquared = np.zeros(len(self.residue_set))
BS_duration = np.zeros(len(self.residue_set))
BS_lipidcount = np.zeros(len(self.residue_set))
BS_occupancy = np.zeros(len(self.residue_set))
BS_koff_b = np.zeros(len(self.residue_set))
BS_koff_b_cv = np.zeros(len(self.residue_set))
BS_restime_b = np.zeros(len(self.residue_set))
BS_restime_b_cv = np.zeros(len(self.residue_set))
BS_rsquared_b = np.zeros(len(self.residue_set))
BS_surface_area = np.zeros(len(self.residue_set))
t_total_max = np.max(self.T_total)
node_list_set = []
for value in range(max(values)):
node_list = [k for k,v in part.items() if v == value]
if len(node_list) >= 3:
binding_site_identifiers[node_list] = binding_site_id
node_list_set.append(node_list)
binding_site_id += 1
########### cal site koff and surface area ############
if len(node_list_set) > 0:
surface_area_all = defaultdict(list)
self._coordinate_pool = [[] for dummy in np.arange(len(node_list_set))]
for traj_idx, trajfile in enumerate(self.trajfile_list):
traj = md.load(trajfile, top=self.grofile_list[traj_idx], stride=self.stride)
if self.dt == None:
timestep = traj.timestep/1000000.0 if self.timeunit == "us" else traj.timestep/1000.0
else:
timestep = float(self.dt)
protein_indices_all = traj.top.select("protein")
natoms_per_protein = int(len(protein_indices_all)/self.nprot)
for idx_protein in np.arange(self.nprot):
protein_indices = protein_indices_all[idx_protein*natoms_per_protein:(idx_protein+1)*natoms_per_protein]
for binding_site_id, node_list in enumerate(node_list_set):
contact_BS_low = []
contact_BS_high = []
list_to_take = traj_idx*self.nprot+idx_protein
for frame_idx in range(len(self.contact_residues_high[node_list[0]][list_to_take])):
contact_high_frame = np.unique(np.concatenate([self.contact_residues_high[node][list_to_take][frame_idx] for node in node_list]))
contact_low_frame = np.unique(np.concatenate([self.contact_residues_low[node][list_to_take][frame_idx] for node in node_list]))
contact_BS_high.append(contact_high_frame)
contact_BS_low.append(contact_low_frame)
self.interaction_duration_BS[binding_site_id].append(Durations(contact_BS_low, contact_BS_high, timestep).cal_duration())
occupancy, lipidcount = cal_interaction_intensity(contact_BS_high)
self.interaction_occupancy_BS[binding_site_id].append(occupancy)
self.lipid_count_BS[binding_site_id].append(lipidcount)
########### store lipid binding poses ############
for frame_id in range(len(contact_BS_low)):
for lipid_id in contact_BS_low[frame_id]:
lipid_index = self.lipid_resi_set[traj_idx][lipid_id]
lipid_indices = np.sort([atom.index for atom in traj.top.residue(lipid_index).atoms])
self._coordinate_pool[binding_site_id].append([np.copy(traj.xyz[frame_id, np.hstack([protein_indices, lipid_indices])]), \
np.copy(traj.unitcell_angles[frame_id]), \
np.copy(traj.unitcell_lengths[frame_id])])
### calculate area ###
new_xyz = []
for frame in traj.xyz:
new_frame = frame[protein_indices]
new_xyz.append(new_frame)
reduced_frame = traj[0].atom_slice(protein_indices)
reduced_top = reduced_frame.top
if reduced_top.residue(0).index != 0:
starting_index = reduced_top.residue(0).index
for residue in reduced_top.residues:
residue.index -= starting_index
new_traj = md.Trajectory(new_xyz, reduced_top, time=traj.time, unitcell_lengths=traj.unitcell_lengths, unitcell_angles=traj.unitcell_angles)
areas = md.shrake_rupley(new_traj, mode='residue', change_radii=radii_book)
for binding_site_id, node_list in enumerate(node_list_set):
surface_area_all[binding_site_id].append(areas[:, node_list].sum(axis=1))
########### write and plot results ###########
for binding_site_id in np.arange(len(node_list_set)):
duration_raw = np.concatenate(self.interaction_duration_BS[binding_site_id])
mask = (binding_site_identifiers == binding_site_id)
bootstrap_results = self.bootstrap(duration_raw, "BS id: {}".format(binding_site_id), "{}/BS_koff_id{}.pdf".format(save_dir, binding_site_id), nbootstrap=nbootstrap)
self.sigmas_BS[binding_site_id] = bootstrap_results["sigma"]
self.params_BS[binding_site_id] = bootstrap_results["params"]
BS_restime[mask] = bootstrap_results["restime"]
BS_koff[mask] = bootstrap_results["koff"]
BS_rsquared[mask] = bootstrap_results["r_squared"]
BS_koff_b[mask] = bootstrap_results["koff_b_avg"]
BS_koff_b_cv[mask] = bootstrap_results["koff_b_cv"]
BS_restime_b[mask] = bootstrap_results["res_time_b_avg"]
BS_restime_b_cv[mask] = bootstrap_results["res_time_b_cv"]
BS_rsquared_b[mask] = bootstrap_results["r_squared_b_avg"]
bs_area = np.concatenate(surface_area_all[binding_site_id]).mean()
BS_surface_area[mask] = bs_area
############# write results ###############
f.write("# Binding site {}\n".format(binding_site_id))
BS_restime[mask] = bootstrap_results["restime"] if bootstrap_results["restime"] <= t_total_max else t_total_max
if bootstrap_results["restime"] <= t_total_max:
f.write("{:20s} {:10.3f} {:5s} R squared: {:7.4f}\n".format(" BS Residence Time:", bootstrap_results["restime"], self.timeunit, bootstrap_results["r_squared"]))
else:
f.write("{:20s} {:10.3f} {:5s}** R squared: {:7.4f}\n".format(" BS Residence Time:", t_total_max, self.timeunit, bootstrap_results["r_squared"]))
f.write("{:20s} {:10.3f}\n".format(" BS koff:", bootstrap_results["koff"]))
f.write("{:20s} {:10.3f} +- {:10.3f}\n".format(" BS koff Bootstrap:", bootstrap_results["koff_b_avg"], bootstrap_results["koff_b_cv"]))
duration = np.mean(np.concatenate(self.interaction_duration_BS[binding_site_id]))
BS_duration[mask] = duration
f.write("{:20s} {:10.3f} {:5s}\n".format(" BS Duration:", duration, self.timeunit))
occupancy = np.mean(self.interaction_occupancy_BS[binding_site_id])
BS_occupancy[mask] = occupancy
f.write("{:20s} {:10.3f} %\n".format(" BS Lipid Occupancy:", occupancy))
lipidcount = np.mean(self.lipid_count_BS[binding_site_id])
BS_lipidcount[mask] = lipidcount
f.write("{:20s} {:10.3f}\n".format(" BS Lipid Count:", lipidcount))
f.write("{:20s} {:10.3f} nm^2 +- {:10.3f}\n".format(" BS Surface Area:", bs_area, np.concatenate(surface_area_all[binding_site_id]).std()))
res_stats = {"Pos. Charge": 0, "Neg. Charge": 0, "Polar": 0, "Special": 0, "Hydrophobic": 0}
for residue in self.residue_set[mask]:
res_stats[Residue_property_book[re.findall("[a-zA-Z]+$", residue)[0]]] += 1
BS_num_resi = np.sum(mask)
f.write("{:20s} {:10s}\n".format(" Pos. Charge:", "/".join([str(res_stats["Pos. Charge"]), str(BS_num_resi)])))
f.write("{:20s} {:10s}\n".format(" Neg. Charge:", "/".join([str(res_stats["Neg. Charge"]), str(BS_num_resi)])))
f.write("{:20s} {:10s}\n".format(" Polar:", "/".join([str(res_stats["Polar"]), str(BS_num_resi)])))
f.write("{:20s} {:10s}\n".format(" Hydrophobic:", "/".join([str(res_stats["Hydrophobic"]), str(BS_num_resi)])))
f.write("{:20s} {:10s}\n".format(" Special:", "/".join([str(res_stats["Special"]), str(BS_num_resi)])))
f.write("{:^9s}{:^9s}{:^13s}{:^11s}{:^10s}{:^10s}{:^10s}{:^13s}{:^10s}{:^10s}\n".format("Residue", "Duration", "Duration std", \
"Res. Time", "R squared", "Occupancy", "Occu. std", "Lipid Count", "L. C. std", "Koff"))
for residue in self.residue_set[mask]:
f.write("{Residue:^9s}{Duration:^9.3f}{Duration_std:^13.3f}{Residence Time:^11.3f}{R squared:^10.4f}{Occupancy:^10.3f}{Occupancy_std:^10.3f}{LipidCount:^13.3f}{LipidCount_std:^10.3f}{Koff:^10.4f}\n".format(\
**self.dataset[self.dataset["Residue"]==residue].to_dict("records")[0] ))
f.write("\n")
f.write("\n")
f.close()
######################## plot area stats ##########################
bs_id_set = []
bs_area_set = []
for binding_site_id in surface_area_all.keys():
bs_area_set.append(np.concatenate(surface_area_all[binding_site_id]))
bs_id_set.append([binding_site_id for dummy in np.arange(len(np.concatenate(surface_area_all[binding_site_id])))])
d_area = pd.DataFrame({"BS id": np.concatenate(bs_id_set), "Area (nm^2)": np.concatenate(bs_area_set)})
plt.rcParams["font.size"] = 8
plt.rcParams["font.weight"] = "bold"
if len(surface_area_all.keys()) <= 8:
fig, ax = plt.subplots(figsize=(4.5, 2.8))
elif len(surface_area_all.keys()) > 8 and len(surface_area_all.keys()) <= 15:
fig, ax = plt.subplots(figsize=(6.5, 2.8))
else:
fig, ax = plt.subplots(figsize=(9.5, 3))
sns.violinplot(x="BS id", y="Area (nm^2)", data=d_area, palette="Set3", bw=.2, cut=1, linewidth=1, ax=ax)
ax.set_xlabel("BS id", fontsize=8, weight="bold")
ax.set_ylabel(r"Surface Area (nm$^2$)", fontsize=8, weight="bold")
ax.set_title("{} Binding Site Surface Area".format(self.lipid), fontsize=8, weight="bold")
plt.tight_layout()
plt.savefig("{}/BS_surface_area.pdf".format(save_dir), dpi=300)
plt.close()
################ update dataset ########################
self.dataset["Binding site"] = binding_site_identifiers
self.dataset["BS Residence Time"] = BS_restime
self.dataset["BS koff"] = BS_koff
self.dataset["BS Duration"] = BS_duration
self.dataset["BS Occupancy"] = BS_occupancy
self.dataset["BS LipidCount"] = BS_lipidcount
self.dataset["BS R squared"] = BS_rsquared
self.dataset["BS Residence Time_boot"] = BS_restime_b
self.dataset["BS Residence Time_boot_cv"] = BS_restime_b_cv
self.dataset["BS koff_boot"] = BS_koff_b
self.dataset["BS koff_boot_cv"] = BS_koff_b_cv
self.dataset["BS R squared_boot"] = BS_rsquared_b
self.dataset["BS Surface Area"] = BS_surface_area
self.dataset.to_csv("{}/Interactions_{}.csv".format(self.save_dir, self.lipid), index=False)
################ save dataset ###################
if save_dataset:
dataset_dir = check_dir(self.save_dir, "Dataset")
with open("{}/BS_interaction_duration_{}.pickle".format(dataset_dir, self.lipid), "wb") as f:
pickle.dump(self.interaction_duration_BS, f, 2)
with open("{}/BS_sigmas_{}.pickle".format(dataset_dir, self.lipid), "wb") as f:
pickle.dump(self.sigmas_BS, f, 2)
with open("{}/BS_curve_fitting_params_{}.pickle".format(dataset_dir, self.lipid), "wb") as f:
pickle.dump(self.params_BS, f, 2)
with open("{}/BS_surface_area_{}.pickle".format(dataset_dir, self.lipid), "wb") as f:
pickle.dump(surface_area_all, f, 2)
################## generate binding poses ################
if gen_binding_poses > 0 and len(node_list_set) > 0:
coords_save_dir = check_dir(save_dir, "Binding_Poses")
lipid_atom_map = {atom.index:atom.name for atom in self._lipid_ref.top.atoms}
weights = {name:1 for index, name in lipid_atom_map.items()}
if score_weights != None:
weights.update(score_weights)
binding_site_id_set = np.arange(len(self._coordinate_pool))
if len(self.resi_list) == 0:
selected_protein_atoms = [[atom.index for atom in residue.atoms] for residue in self._protein_ref.top.residues]
else:
selected_protein_atoms = [[atom.index for atom in residue.atoms] for residue in self._protein_ref.top.residues \
if residue.resSeq in self.resi_list]
lipid_atoms = [self._protein_ref.n_atoms + atom_idx for atom_idx in np.arange(self._lipid_ref.n_atoms)]
joined_top = self._protein_ref.top.join(self._lipid_ref.top)
for binding_site_id in binding_site_id_set:
num_of_poses = gen_binding_poses if gen_binding_poses <= len(self._coordinate_pool[binding_site_id]) \
else len(self._coordinate_pool[binding_site_id])
node_list = node_list_set[binding_site_id]
new_traj = md.Trajectory([frame[0] for frame in self._coordinate_pool[binding_site_id]], joined_top, \
time=np.arange(len(self._coordinate_pool[binding_site_id])), \
unitcell_angles=[frame[1] for frame in self._coordinate_pool[binding_site_id]], \
unitcell_lengths=[frame[2] for frame in self._coordinate_pool[binding_site_id]])
dist_per_atom = [[md.compute_distances(new_traj, list(product([lipid_atoms[idx]], selected_protein_atoms[resi])), periodic=True).min(axis=1) \
for resi in node_list] for idx in np.arange(self._lipid_ref.n_atoms)]
kde_funcs = {}
var_type = ""
bw = []
for idx in range(len(dist_per_atom[0])):
var_type += "c"
bw.append(kde_bw)
try:
for atom_idx in np.arange(self._lipid_ref.n_atoms):
kde_funcs[atom_idx] = KDEMultivariate(data=np.array(dist_per_atom[atom_idx]).T, \
var_type=var_type, bw=bw)
### evaluate binding poses ###
scores = np.sum([weights[lipid_atom_map[idx]] * kde_funcs[idx].pdf() \
for idx in np.arange(self._lipid_ref.n_atoms)], axis=0)
selected_indices = np.argsort(scores)[::-1][:num_of_poses]
###############################
for pose_id in np.arange(num_of_poses, dtype=int):
new_traj[selected_indices[pose_id]].save("{}/BSid{}_No{}.{}".format(coords_save_dir, \
binding_site_id, pose_id, save_pose_format))
except ValueError:
with open("{}/Error.txt".format(coords_save_dir), "a+") as error_file:
error_file.write("BSid {}: Pose generation error -- possibly due to insufficient number of binding event.\n".format(binding_site_id))
######################################################################
###### show binding site residues with scaled spheres in pymol #######
######################################################################
if pdb != None:
############ check if pdb has a path to it ##########
pdb_new_loc = os.path.join(self.save_dir, os.path.basename(pdb))
copyfile(pdb, pdb_new_loc)
struct_ref = md.load(pdb_new_loc)
########### write out a pymol pml file ###############
binding_site_id += 1
text = """
import pandas as pd
import numpy as np
import mdtraj as md
import pymol
from pymol import cmd
pymol.finish_launching()
dataset = pd.read_csv("{HOME_DIR}/Interactions_{LIPID}.csv")
residue_set = np.array(dataset["Residue"].tolist())
binding_site_id = {BINDING_SITE_ID}
binding_site_identifiers = np.array(dataset["Binding site"].tolist())
struct_ref = md.load("{PDB}")
######### calculate scale ###############
residue_idx_set = dataset["Residue idx"]
interactions = np.zeros(residue_idx_set.max()+1)
values_to_check = dataset["Residence Time"]
interactions[residue_idx_set] = values_to_check
MID = values_to_check.quantile(0.5)
SCALES = 1.5 / 5 + np.exp(-30 * (interactions - MID))
######################################
######## some pymol settings #########
cmd.set("retain_order", 1)
cmd.set("cartoon_oval_length", 1.0)
cmd.set("cartoon_oval_width", 0.3)
cmd.set("cartoon_color", "white")
cmd.set("stick_radius", 0.35)
##################################
cmd.load("{PDB}", "Prot_{LIPID}")
prefix = "Prot_{LIPID}"
cmd.hide("everything")
cmd.show("cartoon", prefix)
cmd.center(prefix)
cmd.orient(prefix)
colors = np.array([np.random.choice(np.arange(256, dtype=float), size=3) for dummy in range(binding_site_id)])
colors /= 255.0
""".format(**{"HOME_DIR": self.save_dir, "LIPID": self.lipid, "BINDING_SITE_ID": binding_site_id, "PDB": pdb_new_loc})
text += r"""
for bs_id in np.arange(binding_site_id):
cmd.set_color("tmp_{}".format(bs_id), list(colors[bs_id]))
for selected_residue in np.where(binding_site_identifiers == bs_id)[0]:
selected_residue_index = residue_idx_set[selected_residue]
selected_atom_indices = np.array([atom.index for atom in struct_ref.top.residue(selected_residue_index).atoms], dtype=str)
selected_resid = struct_ref.top.residue(selected_residue_index).resSeq
selected_resn = struct_ref.top.residue(selected_residue_index).name
cmd.select("BS{}_{}{}".format(bs_id, selected_resid, selected_resn), "rank {} and (not name C+O+N)".format("+".join(selected_atom_indices)))
cmd.show("spheres", "BS{}_{}{}".format(bs_id, selected_resid, selected_resn))
cmd.set("sphere_scale", SCALES[selected_residue_index], selection="BS{}_{}{}".format(bs_id, selected_resid, selected_resn))
cmd.color("tmp_{}".format(bs_id), "BS{}_{}{}".format(bs_id, selected_resid, selected_resn))
cmd.group("BS{}".format(bs_id), "BS{}_*".format(bs_id))
"""
with open("{}/show_binding_sites_info.py".format(self.save_dir), "w") as f:
f.write(text)
################## Launch a pymol session #######################
if pymol_gui:
import pymol
from pymol import cmd
pymol.finish_launching(['pymol', '-q'])
##### do some pymol settings #####
residue_idx_set = self.dataset["Residue idx"]
interactions = np.zeros(residue_idx_set.max()+1)
values_to_check = self.dataset["Residence Time"]
interactions[residue_idx_set] = values_to_check
MID = values_to_check.quantile(0.5)
SCALES = 1.5 / 5 + np.exp(-30 * (interactions - MID))
##### do some pymol settings #####
cmd.set("retain_order", 1)
cmd.set("cartoon_oval_length", 1.0)
cmd.set("cartoon_oval_width", 0.3)
cmd.set("cartoon_color", "white")
cmd.set("stick_radius", 0.35)
##################################
cmd.load(pdb_new_loc, "Prot_{}".format(self.lipid))
prefix = "Prot_{}".format(self.lipid)
cmd.hide("everything")
cmd.show("cartoon", prefix)
cmd.center(prefix)
cmd.orient(prefix)
colors = np.array([np.random.choice(np.arange(256, dtype=float), size=3) for dummy in range(binding_site_id)])
colors /= 255.0
for bs_id in np.arange(binding_site_id):
cmd.set_color("tmp_{}".format(bs_id), list(colors[bs_id]))
for selected_residue in np.where(binding_site_identifiers == bs_id)[0]:
selected_residue_index = residue_idx_set[selected_residue]
selected_atom_indices = np.array([atom.index for atom in struct_ref.top.residue(selected_residue_index).atoms], dtype=str)
selected_resid = struct_ref.top.residue(selected_residue_index).resSeq
selected_resn = struct_ref.top.residue(selected_residue_index).name
cmd.select("{}_BS{}_{}{}".format(self.lipid, bs_id, selected_resid, selected_resn), \
"rank {} and (not name C+O+N)".format("+".join(selected_atom_indices)))
cmd.show("spheres", "{}_BS{}_{}{}".format(self.lipid, bs_id, selected_resid, selected_resn))
cmd.set("sphere_scale", SCALES[selected_residue_index], selection="{}_BS{}_{}{}".format(self.lipid, bs_id, selected_resid, selected_resn))
cmd.color("tmp_{}".format(bs_id), "{}_BS{}_{}{}".format(self.lipid, bs_id, selected_resid, selected_resn))
cmd.group("{}_BS{}".format(self.lipid, bs_id), "{}_BS{}_*".format(self.lipid, bs_id))
return
def plot_interactions(self, item="Duration", save_dir=None, letter_map=None, chain_breaks=[]):
if save_dir == None:
save_dir = check_dir(self.save_dir, "Figures_{}".format(self.lipid))
else:
save_dir = check_dir(save_dir, "Figures_{}".format(self.lipid))
### single-letter dictionary ###
single_letter = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M'}
if letter_map != None:
single_letter.update(letter_map)
if len(chain_breaks) == 0:
chain_break_points = [0, len(self.dataset)]
no_break = True
else:
chain_break_points = [0]
for points in chain_breaks:
chain_break_points.append(points)
chain_break_points.append(len(self.dataset))
no_break = False
plt.rcParams["font.size"] = 8
plt.rcParams["font.weight"] = "bold"
for point_idx in np.arange(1, len(chain_break_points), dtype=int):
dataset = self.dataset[chain_break_points[point_idx-1]:chain_break_points[point_idx]]
data = dataset[item]
if len(data) == 0:
continue
resi = np.array([int(re.findall("^[0-9]+", residue)[0]) for residue in self.residue_set])[chain_break_points[point_idx-1]:chain_break_points[point_idx]]
SL_resn = [single_letter[re.findall("[a-zA-Z]+$", residue)[0]] for residue in self.residue_set][chain_break_points[point_idx-1]:chain_break_points[point_idx]]
width = 1
sns.set_style("ticks", {'xtick.major.size': 5.0, 'ytick.major.size': 5.0})
if item == "Residence Time":
if len(data) <= 500:
fig = plt.figure(figsize=(5.5, 5))
elif len(data) > 500 and len(data) <= 1500:
fig = plt.figure(figsize=(7.5, 5))
else:
fig = plt.figure(figsize=(9, 6))
ax_R2 = fig.add_axes([0.18, 0.79, 0.75, 0.10])
ax_capped = fig.add_axes([0.18, 0.71, 0.75, 0.05])
ax_data = fig.add_axes([0.18, 0.50, 0.75, 0.18])
ax_boot = fig.add_axes([0.18, 0.22, 0.75, 0.18])
ax_boot_cv = fig.add_axes([0.18, 0.08, 0.75, 0.10])
ax_boot.xaxis.tick_top()
ax_boot.invert_yaxis()
ax_boot_cv.invert_yaxis()
for ax in [ax_data, ax_capped, ax_R2, ax_boot, ax_boot_cv]:
ax.yaxis.set_ticks_position('left')
ax.spines['right'].set_visible(False)
for ax in [ax_capped, ax_R2, ax_boot_cv]:
ax.xaxis.set_ticks_position('none')
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.set_xticklabels([])
ax_data.spines['top'].set_visible(False)
ax_boot.spines['bottom'].set_visible(False)
if len(data) > 1000:
ax_data.xaxis.set_major_locator(MultipleLocator(200))
ax_data.xaxis.set_minor_locator(MultipleLocator(50))
ax_boot.xaxis.set_major_locator(MultipleLocator(200))
ax_boot.xaxis.set_minor_locator(MultipleLocator(50))
elif len(data) <= 1000 and len(data) > 100:
ax_data.xaxis.set_major_locator(MultipleLocator(100))
ax_data.xaxis.set_minor_locator(MultipleLocator(10))
ax_boot.xaxis.set_major_locator(MultipleLocator(100))
ax_boot.xaxis.set_minor_locator(MultipleLocator(10))
elif len(data) <= 100:
ax_data.xaxis.set_major_locator(MultipleLocator(10))
ax_data.xaxis.set_minor_locator(MultipleLocator(1))
ax_boot.xaxis.set_major_locator(MultipleLocator(10))
ax_boot.xaxis.set_minor_locator(MultipleLocator(1))
if self.timeunit == "ns":
timeunit = " (ns) "
elif self.timeunit == "us":
timeunit = r" ($\mu s$)"
ax_data.bar(resi, data, width, linewidth=0, color="#F75C03")
ax_data.set_ylabel("Res. Time {}".format(timeunit), fontsize=8, weight="bold", va="center")
ax_data.set_xlabel("Residue Index", fontsize=8, weight="bold")
ax_capped.plot(resi, dataset["Capped"]*1, linewidth=0, marker="+", markerfacecolor="#38040E", \
markeredgecolor="#38040E", markersize=2)
ax_capped.set_ylim(0.9, 1.1)
ax_capped.set_yticks([1.0])
ax_capped.set_yticklabels(["Capped"], fontsize=8, weight="bold")
ax_capped.set_xlim(ax_data.get_xlim())
mask = dataset["R squared"] > 0
ax_R2.plot(resi[mask], dataset["R squared"][mask], linewidth=0, marker="+", markerfacecolor="#0FA3B1", markeredgecolor="#0FA3B1", \
markersize=2)
ax_R2.set_xlim(ax_data.get_xlim())
ax_R2.set_ylabel(r"$R^2$", fontsize=8, weight="bold", va="center")
ax_R2.set_title("{} {}".format(self.lipid, item), fontsize=8, weight="bold")
ax_boot.bar(resi, dataset["Residence Time_boot"], width, linewidth=0, color="#F75C03")
ax_boot.set_xlim(ax_data.get_xlim())
ax_boot.set_ylabel("Res. Time \n Boot. {}".format(timeunit), fontsize=8, weight="bold", va="center")
ax_boot.set_xticklabels([])
mask = dataset["R squared_boot"] > 0
mask = dataset["Residence Time_boot_cv"] > 0
ax_boot_cv.plot(resi[mask], dataset["Residence Time_boot_cv"][mask], linewidth=0, marker="+", markerfacecolor="#0FA3B1", markeredgecolor="#F7B538",
markersize=2)
ax_boot_cv.set_ylabel("Coef. Var.", fontsize=8, weight="bold", va="center")
ax_boot_cv.set_xlim(ax_data.get_xlim())
for ax in [ax_data, ax_capped, ax_R2, ax_boot, ax_boot_cv]:
ax.yaxis.set_label_coords(-0.15, 0.5, transform=ax.transAxes)
if no_break:
plt.savefig("{}/{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid), dpi=300)
else:
plt.savefig("{}/{}_{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid, str(point_idx)), dpi=300)
plt.close()
###### logomater #####
df = pd.DataFrame({"Resid": resi, "Resn": SL_resn, "Data": data})
matrix = df.pivot(index="Resid", columns='Resn', values="Data").fillna(0)
n_rows = 1 + resi[-1]//100 - resi[0]//100
start = (resi[0]//100)*100
length = start + 100 - resi[0]
fig, axes = plt.subplots(n_rows, 1, figsize=(4.5, 1.3*n_rows), sharey=True)
plt.subplots_adjust(hspace=0.5)
for idx, ax in enumerate(np.atleast_1d(axes)):
if idx == (n_rows - 1):
logomaker.Logo(matrix[(idx-1)*100 + length:], color_scheme="chemistry", ax=ax)
ax.set_xlabel("Residue Index", fontsize=8, weight="bold")
elif idx == 0:
logomaker.Logo(matrix[:length], color_scheme="chemistry", ax=ax)
else:
logomaker.Logo(matrix[(idx-1)*100+length:idx*100+length], color_scheme="chemistry", ax=ax)
ax.xaxis.set_major_locator(MultipleLocator(20))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.set_xlim(idx*100+start, (idx+1)*100+start)
ax.set_ylim(0, data.max()*1.05)
ax.set_ylabel("Res. Time {}".format(timeunit), fontsize=8, weight="bold", va="center")
for label in ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels():
plt.setp(label, fontsize=8, weight="bold")
plt.tight_layout()
if no_break:
plt.savefig("{}/{}_logo_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid), dpi=300)
else:
plt.savefig("{}/{}_logo_{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid, str(point_idx)), dpi=300)
plt.close()
else:
fig, ax = plt.subplots(1, 1, figsize=(4.5,2.8))
ax.bar(resi, data, width, linewidth=0, color=sns.xkcd_rgb["red"])
sns.despine(fig, top=True, right=True, trim=False)
if len(data) > 1000:
ax.xaxis.set_major_locator(MultipleLocator(200))
ax.xaxis.set_minor_locator(MultipleLocator(50))
elif len(data) <= 1000:
ax.xaxis.set_major_locator(MultipleLocator(100))
ax.xaxis.set_minor_locator(MultipleLocator(10))
ax.set_xlabel("Residue Index", fontsize=8, weight="bold")
if self.timeunit == "ns":
timeunit = " (ns) "
elif self.timeunit == "us":
timeunit = r" ($\mu s$)"
if item == "Duration":
ylabel = item + timeunit
elif item == "Occupancy":
ylabel = item + " 100% "
elif item == "LipidCount":
ylabel = "Num. of Lipids"
ax.set_ylabel(ylabel, fontsize=8, weight="bold")
for label in ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels():
plt.setp(label, fontsize=8, weight="bold")
ax.set_title("{} {}".format(self.lipid, item), fontsize=8, weight="bold")
plt.tight_layout()
if no_break:
plt.savefig("{}/{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid), dpi=300)
else:
plt.savefig("{}/{}_{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid, str(point_idx)), dpi=300)
plt.close()
###### logomater #####
df = pd.DataFrame({"Resid": resi, "Resn": SL_resn, "Data": data})
matrix = df.pivot(index="Resid", columns='Resn', values="Data").fillna(0)
n_rows = 1 + resi[-1]//100 - resi[0]//100
start = (resi[0]//100)*100
length = start + 100 - resi[0]
fig, axes = plt.subplots(n_rows, 1, figsize=(4.5, 1.3*n_rows), sharey=True)
plt.subplots_adjust(hspace=0.5)
for idx, ax in enumerate(np.atleast_1d(axes)):
if idx == (n_rows - 1):
logomaker.Logo(matrix[(idx-1)*100 + length:], color_scheme="chemistry", ax=ax)
ax.set_xlabel("Residue Index", fontsize=8, weight="bold")
elif idx == 0:
logomaker.Logo(matrix[:length], color_scheme="chemistry", ax=ax)
else:
logomaker.Logo(matrix[(idx-1)*100+length:idx*100+length], color_scheme="chemistry", ax=ax)
ax.xaxis.set_major_locator(MultipleLocator(20))
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.set_xlim(idx*100+start, (idx+1)*100+start)
ax.set_ylim(0, data.max()*1.05)
ax.set_ylabel(ylabel, fontsize=8, weight="bold", va="center")
for label in ax.xaxis.get_ticklabels() + ax.yaxis.get_ticklabels():
plt.setp(label, fontsize=8, weight="bold")
plt.tight_layout()
if no_break:
plt.savefig("{}/{}_logo_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid), dpi=300)
else:
plt.savefig("{}/{}_logo_{}_{}.pdf".format(save_dir, "_".join(item.split()), self.lipid, str(point_idx)), dpi=300)
plt.close()
return
def write_to_pdb(self, item, save_dir=None):
if save_dir == None:
save_dir = check_dir(self.save_dir, "Coordinates_{}".format(self.lipid))
else:
save_dir = check_dir(save_dir, "Coordinates_{}".format(self.lipid))
##### load coords ######
data = self.dataset[item]
coords = self._protein_ref.xyz[0]
table, _ = self._protein_ref.top.to_dataframe()
atom_idx_set = table.serial
resid_set = table.resSeq + self.resi_offset
atom_name_set = table.name
resn_set = table.resName
chainID = [chr(65+int(idx)) for idx in table.chainID]
data_expanded = np.zeros(len(table))
residue_indices = np.array([atom.residue.index for atom in self._protein_ref.top.atoms])
for value, selected_residue_index in zip(data, self._selected_residue_indices):
locations = np.where(residue_indices == selected_residue_index)[0]
data_expanded[locations] = value
######## write out coords ###########
fn = "{}/Coords_{}.pdb".format(save_dir, "_".join(item.split()))
with open(fn, "w") as f:
for idx in np.arange(self._protein_ref.n_atoms):
coords_dictionary = {"HEADER": "ATOM",
"ATOM_ID": atom_idx_set[idx],
"ATOM_NAME": atom_name_set[idx],
"SPARE": "",
"RESN": resn_set[idx],
"CHAIN_ID": chainID[idx],
"RESI": resid_set[idx],
"COORDX": coords[idx, 0] * 10,
"COORDY": coords[idx, 1] * 10,
"COORDZ": coords[idx, 2] * 10,
"OCCUP": 1.0,
"BFACTOR": data_expanded[idx]}
row = "{HEADER:6s}{ATOM_ID:5d} ".format(**coords_dictionary) +\
"{ATOM_NAME:^4s}{SPARE:1s}{RESN:3s} ".format(**coords_dictionary) +\
"{CHAIN_ID:1s}{RESI:4d}{SPARE:1s} ".format(**coords_dictionary) +\
"{COORDX:8.3f}{COORDY:8.3f}{COORDZ:8.3f}{OCCUP:6.2f}{BFACTOR:6.2f}\n".format(**coords_dictionary)
f.write(row)
f.write("TER")
return
######################################################
########### Load params and do calculation ###########
######################################################
if __name__ == '__main__':
trajfile_list = args.f
grofile_list = args.c
lipid_set = args.lipids
cutoff = [float(data) for data in args.cutoffs]
save_dir = check_dir(args.save_dir)
#######################################################################
######## write a backup file of params for reproducibility ############
fn = os.path.join(save_dir, "pylipid_backup_{}.txt".format(datetime.datetime.now().strftime("%Y_%m_%d_%H%M")))
with open(fn, "w") as f:
f.write("##### Record params for reproducibility #####\n")
f.write("python {}\n".format(" ".join(sys.argv)))
######################################################################
######################### process resi_list ##########################
resi_list = []
if len(args.resi_list) > 0:
for item in args.resi_list:
if "-" in item:
item_list = item.split("-")
resi_list.append(np.arange(int(item_list[0]), int(item_list[-1])+1))
else:
resi_list.append(int(item))
resi_list = np.hstack(resi_list)
#######################################################################
############################ change of radii ##########################
##### mdtraj default radii:
##### https://github.com/mdtraj/mdtraj/blob/b28df2cd6e5c35fa006fe3c24728857880793abb/mdtraj/geometry/sasa.py#L56
if args.radii == None:
radii_book = None
else:
radii_book = {}
for item in args.radii:
radius = item.split(":")
radii_book[radius[0]] = float(radius[1])
#######################################################################
################# score weight for kde calculation ####################
if args.score_weights == None:
score_weights = None
else:
score_weights = {}
for item in args.score_weights:
weight = item.split(":")
score_weights[weight[0]] = float(weight[1])
#######################################################################
################# map three letter to single letter ###################
letter_map = None
if args.letter_map != None:
letter_map = {}
for item in args.letter_map:
letter_map[item.split(":")[0]] = item.split(":")[1]
#######################################################################
################# process chain breaks ################################
chain_breaks = [] if len(args.chain_breaks) == 0 else [int(num)-1 for num in args.chain_breaks]
#######################################################################
for lipid in lipid_set:
li = LipidInteraction(trajfile_list, grofile_list, stride=int(args.stride), dt=args.dt, cutoff=cutoff, lipid=lipid, \
lipid_atoms=args.lipid_atoms, nprot=args.nprot, timeunit=args.tu, resi_offset=int(args.resi_offset), \
resi_list=resi_list, save_dir=args.save_dir)
li.cal_interactions(save_dataset=args.save_dataset, nbootstrap=int(args.nbootstrap))
li.plot_interactions(item="Duration", letter_map=letter_map, chain_breaks=chain_breaks)
li.plot_interactions(item="Residence Time", letter_map=letter_map, chain_breaks=chain_breaks)
li.plot_interactions(item="Occupancy", letter_map=letter_map, chain_breaks=chain_breaks)
li.plot_interactions(item="LipidCount", letter_map=letter_map, chain_breaks=chain_breaks)
li.write_to_pdb(item="Duration")
li.write_to_pdb(item="Residence Time")
li.write_to_pdb(item="Occupancy")
li.write_to_pdb(item="LipidCount")
li.cal_interaction_network(pdb=args.pdb, save_dataset=args.save_dataset, \
pymol_gui=args.pymol_gui, radii=radii_book, gen_binding_poses=int(args.gen_binding_poses), \
score_weights=score_weights, save_pose_format=args.save_pose_format)
| 62.332346
| 227
| 0.574723
| 10,588
| 84,211
| 4.346713
| 0.090008
| 0.01408
| 0.013558
| 0.007648
| 0.494405
| 0.426287
| 0.361102
| 0.310171
| 0.283467
| 0.253851
| 0
| 0.020265
| 0.274537
| 84,211
| 1,350
| 228
| 62.378519
| 0.733075
| 0.018941
| 0
| 0.20678
| 0
| 0.033898
| 0.129863
| 0.026538
| 0
| 0
| 0
| 0
| 0.000847
| 1
| 0.014407
| false
| 0
| 0.026271
| 0.000847
| 0.058475
| 0.004237
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed123b848cc69e55c673d2f62ec3999397f7c2b8
| 547
|
py
|
Python
|
main.py
|
yaakiyu/rt-bot
|
f68bca95c516e08c31ecc846524dcea4c8ba1503
|
[
"BSD-4-Clause"
] | null | null | null |
main.py
|
yaakiyu/rt-bot
|
f68bca95c516e08c31ecc846524dcea4c8ba1503
|
[
"BSD-4-Clause"
] | null | null | null |
main.py
|
yaakiyu/rt-bot
|
f68bca95c516e08c31ecc846524dcea4c8ba1503
|
[
"BSD-4-Clause"
] | null | null | null |
# RT by Rext
from asyncio import run
from discord import Intents, Status, Game, AllowedMentions
from core.bot import RT
from data import SECRET
try: from uvloop import install
except ModuleNotFoundError: ...
else: install()
intents = Intents.default()
intents.message_content = True
intents.members = True
bot = RT(
allowed_mentions=AllowedMentions(everyone=False), intents=intents,
status=Status.dnd, activity=Game("起動")
)
bot.print("Now loading...")
try: run(bot.start(SECRET["token"]))
except KeyboardInterrupt: bot.print("Bye")
| 21.038462
| 70
| 0.753199
| 72
| 547
| 5.694444
| 0.569444
| 0.063415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131627
| 547
| 26
| 71
| 21.038462
| 0.863158
| 0.018282
| 0
| 0
| 0
| 0
| 0.044776
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.294118
| 0
| 0.294118
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed1329b2789d579e2ef82e7b330a86a58150d0b6
| 13,014
|
py
|
Python
|
hiplot/fetchers_demo.py
|
dmitryvinn/hiplot
|
52fe8b195a4e254240eb1a0847953fa3c1957a43
|
[
"MIT"
] | 1
|
2022-03-21T15:46:17.000Z
|
2022-03-21T15:46:17.000Z
|
hiplot/fetchers_demo.py
|
ai2ys/hiplot
|
148f7c4eba11c6393957a819169f3cf07c469bec
|
[
"MIT"
] | null | null | null |
hiplot/fetchers_demo.py
|
ai2ys/hiplot
|
148f7c4eba11c6393957a819169f3cf07c469bec
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import uuid
import random
import math
import time
import typing as t
from . import experiment as hip
# Demos from the README. If one of those is modified, please modify the readme as well
def demo_change_column_properties() -> hip.Experiment:
data = [{"param": 1, "loss": 10, "hidden_field": "value1", "c": "red"}, {"param": 2, "loss": 5, "hidden_field": "value2", "c": "black"}]
exp = hip.Experiment.from_iterable(data)
exp.parameters_definition["c"].colors = {"red": "rgb(255, 0, 0)", "black": "rgb(0, 0, 0)"}
exp.parameters_definition["loss"].type = hip.ValueType.NUMERIC_LOG
exp.display_data(hip.Displays.PARALLEL_PLOT).update({
'hide': ['hidden_field'], # This column won't appear in the parallel plot
'order': ['c'] # Column `c` will be displayed first the in parallel plot
})
return exp
def demo_basic_usage() -> hip.Experiment:
data = [{'dropout': 0.1, 'lr': 0.001, 'loss': 10.0, 'optimizer': 'SGD'},
{'dropout': 0.15, 'lr': 0.01, 'loss': 3.5, 'optimizer': 'Adam'},
{'dropout': 0.3, 'lr': 0.1, 'loss': 4.5, 'optimizer': 'Adam'}]
return hip.Experiment.from_iterable(data)
def demo_line_xy() -> hip.Experiment:
# DEMO_LINE_XY_BEGIN
exp = hip.Experiment()
exp.display_data(hip.Displays.XY).update({
'axis_x': 'generation',
'axis_y': 'loss',
})
for i in range(200):
dp = hip.Datapoint(
uid=str(i),
values={
'generation': i,
'param': 10 ** random.uniform(-1, 1),
'loss': random.uniform(-5, 5),
})
if i > 10:
from_parent = random.choice(exp.datapoints[-10:])
dp.from_uid = from_parent.uid # <-- Connect the parent to the child
dp.values['loss'] += from_parent.values['loss'] # type: ignore
dp.values['param'] *= from_parent.values['param'] # type: ignore
exp.datapoints.append(dp)
# DEMO_LINE_XY_END
return exp
def demo_bug_uid() -> hip.Experiment:
return hip.Experiment.from_iterable([{'a': 1, 'b': 2, 'uid': 50.0}, {'a': 2, 'b': 3, 'uid': 49.33}])
def demo(n: int = 100) -> hip.Experiment:
xp = hip.Experiment()
xp.display_data(hip.Displays.XY).update({
'axis_x': 'time',
'axis_y': 'exp_metric',
})
# Some fake PBT-ish data
def fake_params() -> t.Dict[str, hip.DisplayableType]:
r = random.random()
p: t.Dict[str, hip.DisplayableType] = {
"lr": 10 ** random.uniform(-5, 0),
"seed": random.uniform(0, 10),
"name": uuid.uuid4().hex[:6],
"optimizer": random.choice(["sgd", "adam", "adamw"]),
"r": r,
"c": random.choice(["red", "green", "black"]),
}
if r < 0.1:
del p['optimizer']
if r > 0.3:
p["optionA"] = random.uniform(1, 5)
else:
p["optionB"] = random.uniform(1, 5)
if r < 0.2:
p["pctile"] = -1.0
elif r < 0.5:
p["pctile"] = random.uniform(-1.0, 10.0)
elif r < 0.8:
p["pctile"] = 10 ** random.uniform(1, 2)
else:
p["pctile"] = random.uniform(100, 101)
if random.random() > 0.3:
p["special_values"] = random.uniform(1, 5)
else:
p["special_values"] = random.choice([math.inf, -math.inf, math.nan])
return p
def fake_metrics(tm: float) -> t.Dict[str, hip.DisplayableType]:
return {
"exp_metric": 10 ** random.uniform(-5, 0),
"pct_success": random.uniform(10, 90),
"chkpt": uuid.uuid4().hex[:6],
"time": tm + random.uniform(-0.2, 0.2),
"force_numericlog": random.uniform(1, 100),
'timestamp': int(time.time() + (task_idx * 2000)),
}
current_pop: t.List[t.Dict[str, t.Any]] = [dict(uid=f"init{i}", params=fake_params(), last_ckpt_uid=None) for i in range(10)]
continue_num = 0
for task_idx in range(n):
# All drop checkpoints
for p in current_pop:
ckpt_uid = f"{p['uid']}_{uuid.uuid4().hex[:6]}"
xp.datapoints.append(hip.Datapoint(uid=ckpt_uid, from_uid=p['last_ckpt_uid'], values={**p['params'], **fake_metrics(task_idx)}))
p['last_ckpt_uid'] = ckpt_uid
# Randomly drop some
current_pop = [p for p in current_pop if random.random() > 0.3]
# Respawn as needed
for _ in range(10 - len(current_pop)):
continue_num += 1
parent = random.choice(xp.datapoints[-10:])
current_pop.append(dict(uid=f"continue{continue_num}", params=fake_params(), last_ckpt_uid=parent.uid))
xp.parameters_definition["c"].colors = {"red": "rgb(255, 0, 0)", "green": "rgb(0, 255, 0)", "black": "rgb(0, 0, 0)"}
xp.parameters_definition["force_numericlog"].type = hip.ValueType.NUMERIC_LOG
xp.parameters_definition["pctile"].type = hip.ValueType.NUMERIC_PERCENTILE
xp.parameters_definition["timestamp"].type = hip.ValueType.TIMESTAMP
return xp
def demo_customize() -> hip.Experiment:
exp = demo()
# EXPERIMENT_SETTINGS_SNIPPET2_BEGIN
# Provide configuration for the parallel plot
exp.display_data(hip.Displays.PARALLEL_PLOT).update({
# Hide some columns in the parallel plot
'hide': ['optionB'],
# Specify the order for others
'order': ['time'], # Put column time first on the left
})
# Provide configuration for the table with all the rows
exp.display_data(hip.Displays.TABLE).update({
# Don't display `uid` and `from_uid` columns to the user
'hide': ['uid', 'from_uid'],
# In the table, order rows by default
'order_by': [['pct_success', 'desc']],
# Specify the order for columns
'order': ['time'], # Put column time first on the left
})
# Provide configuration for the XY graph
exp.display_data(hip.Displays.XY).update({
# Default X axis for the XY plot
'axis_x': 'time',
# Default Y axis
'axis_y': 'lr',
# Configure lines
'lines_thickness': 1.0,
'lines_opacity': 0.1,
# Configure dots
'dots_thickness': 2.0,
'dots_opacity': 0.3,
})
# EXPERIMENT_SETTINGS_SNIPPET2_END
return exp
def demo_force_scale() -> hip.Experiment:
xp = hip.Experiment()
for _ in range(100):
values = [abs(random.gauss(0.0, 1.0)) for _ in range(4)]
xp.datapoints.append(hip.Datapoint({
f"value{i}": v / sum(values)
for i, v in enumerate(values)
}))
for i in range(4):
xp.parameters_definition[f"value{i}"].force_range(0.0, 1.0)
return xp
def demo_distribution(**kwargs: t.Any) -> hip.Experiment:
xp = hip.Experiment.from_iterable([{
'cat': random.choice(["a", "b", "c", "d", "e", "f", "g", "h"]),
'numeric': random.uniform(0.0, 1.0),
} for i in range(1000)])
xp.display_data(hip.Displays.DISTRIBUTION).update(kwargs)
return xp
def demo_bool() -> hip.Experiment:
return hip.Experiment.from_iterable([
{"bool": True},
{"bool": False}
])
def demo_color_interpolate() -> hip.Experiment:
exp = demo()
exp.parameters_definition["exp_metric"].colormap = "interpolateSinebow"
return exp
def demo_color_scheme_ylrd() -> hip.Experiment:
exp = demo()
exp.parameters_definition["exp_metric"].colormap = "schemeYlOrRd"
return exp
def demo_color_scheme_accent() -> hip.Experiment:
exp = demo()
exp.parameters_definition["exp_metric"].colormap = "schemeAccent"
return exp
def demo_color_interpolate_inverse() -> hip.Experiment:
exp = demo_color_interpolate()
assert exp.parameters_definition["exp_metric"].colormap is not None
exp.parameters_definition["exp_metric"].colormap += "#inverse"
return exp
def demo_axis_style() -> hip.Experiment:
data: t.List[t.Dict[str, t.Any]] = []
for _ in range(100):
data.append({
**{
f'param{i}': random.uniform(0, 1)
for i in range(6)
},
'loss': random.uniform(0, 100),
'metric': 10 ** random.uniform(0, 10)
})
xp = hip.Experiment.from_iterable(data)
for i in range(6):
xp.parameters_definition[f"param{i}"].label_css = "badge badge-pill badge-secondary"
xp.parameters_definition["loss"].label_css = "badge badge-pill badge-primary"
xp.parameters_definition["metric"].label_css = "badge badge-pill badge-info"
return xp
def demo_categorical() -> hip.Experiment:
data: t.List[t.Dict[str, t.Any]] = []
for _ in range(100):
data.append({
'cat_num_05': random.randint(0, 5),
'cat_num_15': random.randint(0, 10),
'cat_num_25': random.randint(0, 25),
'cat_str_05': f's{random.randint(0, 5)}',
'cat_str_15': f's{random.randint(0, 15)}',
'cat_str_25': f's{random.randint(0, 25)}',
})
xp = hip.Experiment.from_iterable(data)
for param in ["cat_num_05", "cat_num_15", "cat_num_25"]:
xp.parameters_definition[param].type = hip.ValueType.CATEGORICAL
xp.colorby = 'cat_num_25'
return xp
def demo_long_names() -> hip.Experiment:
return hip.Experiment.from_iterable([
{
'some very very long name for a field': random.randint(0, 5),
'this one is also very long': random.randint(0, 10),
'another.long.one.but.with.dots': random.randint(0, 25),
}
for _ in range(100)
])
def demo_force_constant_pplot() -> hip.Experiment:
exp = hip.Experiment.from_iterable([
{'uid': 123, 'a': 1, 'b': 3},
{'uid': 345, 'a': 2, 'b': 3}
])
exp.parameters_definition["b"].force_range(0, 100)
return exp
def demo_first_value_nan() -> hip.Experiment:
return hip.Experiment.from_iterable([
{},
{'a': None},
{'a': 2},
{'a': 2.1},
{'a': 2.2},
{'a': 5.5},
{'a': math.nan},
])
def demo_weighted_rows() -> hip.Experiment:
experiment = hip.Experiment.from_iterable([
{'w': 1.0, 'a': 1, 'b': 1},
{'w': 2.0, 'a': 2, 'b': 1},
{'w': -2.0, 'a': 2, 'b': 1},
{'w': math.inf, 'a': 2, 'b': 2},
{'w': 'not_a_number', 'a': 2, 'b': 3},
{'w': None, 'a': 3, 'b': 3},
{'a': 4, 'b': 3},
])
experiment.weightcolumn = "w"
return experiment
def demo_3xcols() -> hip.Experiment:
xp = demo()
for i in range(2):
new_xp = demo()
for dp, new_dp in zip(xp.datapoints, new_xp.datapoints):
dp.values.update({
f"{k}{i}": v
for k, v in new_dp.values.items()
})
return xp
def demo_col_html() -> hip.Experiment:
COL1 = "<h1>col1</h1>"
COL2 = "col_2"
experiment = hip.Experiment.from_iterable([
{COL1: 1.0, COL2: 1},
{COL1: 2.0, COL2: 2},
{COL1: 3.0, COL2: 3},
])
experiment.parameters_definition[COL2].label_html = "col<sub>2</sub>"
return experiment
def demo_disable_table() -> hip.Experiment:
experiment = demo()
experiment.enabledDisplays.remove(hip.Displays.TABLE)
return experiment
def demo_big_floats() -> hip.Experiment:
return hip.Experiment.from_iterable(
{
'bigfloat': math.nan if i < 10 else 10 ** random.uniform(15, 32),
}
for i in range(100)
)
README_DEMOS: t.Dict[str, t.Callable[[], hip.Experiment]] = {
"demo": demo,
"demo_3xcols": demo_3xcols,
"demo_big": lambda: demo(1000),
"demo_change_column_properties": demo_change_column_properties,
"demo_basic_usage": demo_basic_usage,
"demo_line_xy": demo_line_xy,
"demo_bug_uid": demo_bug_uid,
"demo_force_scale": demo_force_scale,
"demo_distribution_cat": lambda: demo_distribution(axis="cat"),
"demo_distribution_num": lambda: demo_distribution(axis="numeric"),
"demo_distribution_num_100bins": lambda: demo_distribution(axis="numeric", nbins=100),
"demo_bool": demo_bool,
"demo_color_interpolate": demo_color_interpolate,
"demo_color_scheme_ylrd": demo_color_scheme_ylrd,
"demo_color_scheme_accent": demo_color_scheme_accent,
"demo_axis_style": demo_axis_style,
"demo_categorical": demo_categorical,
"demo_customize": demo_customize,
"demo_long_names": demo_long_names,
"demo_force_constant_pplot": demo_force_constant_pplot,
"demo_color_interpolate_inverse": demo_color_interpolate_inverse,
"demo_first_value_nan": demo_first_value_nan,
"demo_weighted_rows": demo_weighted_rows,
"demo_col_html": demo_col_html,
"demo_disable_table": demo_disable_table,
"demo_big_floats": demo_big_floats,
}
| 34.247368
| 140
| 0.593592
| 1,749
| 13,014
| 4.242424
| 0.171527
| 0.070081
| 0.029784
| 0.043801
| 0.336388
| 0.189084
| 0.14434
| 0.1031
| 0.081536
| 0.058625
| 0
| 0.03507
| 0.250653
| 13,014
| 379
| 141
| 34.337731
| 0.7258
| 0.08245
| 0
| 0.216949
| 0
| 0
| 0.159039
| 0.025863
| 0
| 0
| 0
| 0
| 0.00339
| 1
| 0.084746
| false
| 0
| 0.020339
| 0.020339
| 0.189831
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed14a6749afbe24501971f360abe8e3e8754902d
| 423
|
py
|
Python
|
barcode.py
|
kallangerard/grocery-barcode-scanner
|
0a866c5b20c43355b642c0b78ba09d5cf4b0383c
|
[
"MIT"
] | null | null | null |
barcode.py
|
kallangerard/grocery-barcode-scanner
|
0a866c5b20c43355b642c0b78ba09d5cf4b0383c
|
[
"MIT"
] | null | null | null |
barcode.py
|
kallangerard/grocery-barcode-scanner
|
0a866c5b20c43355b642c0b78ba09d5cf4b0383c
|
[
"MIT"
] | null | null | null |
import logging
import groceries.api as groceries
import barcodescanner.scan as barcode
def main():
grocy = groceries.GrocyAPIClient()
while True:
scanner = barcode.Scan()
line = scanner.PollScanner()
if line != None:
response = grocy.consume_barcode(line)
logging.debug(response)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
main()
| 22.263158
| 50
| 0.65721
| 45
| 423
| 5.977778
| 0.555556
| 0.089219
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.248227
| 423
| 18
| 51
| 23.5
| 0.845912
| 0
| 0
| 0
| 0
| 0
| 0.018913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed152979dba20d65fa46d571939edbfd7eb69a09
| 790
|
py
|
Python
|
setup.py
|
mr-sk/easy-icm-runner
|
01cf9d7d8e4ef13afc18dbdda2862035121f3624
|
[
"MIT"
] | null | null | null |
setup.py
|
mr-sk/easy-icm-runner
|
01cf9d7d8e4ef13afc18dbdda2862035121f3624
|
[
"MIT"
] | null | null | null |
setup.py
|
mr-sk/easy-icm-runner
|
01cf9d7d8e4ef13afc18dbdda2862035121f3624
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="easy-icm-runner",
version="1.0.6",
author="Bachir El Koussa",
author_email="bgkoussa@gmail.com",
description="A wrapper for IBM ICMs Scheduler API Calls",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/equinoxfitness/easy-icm-runner/",
#packages=setuptools.find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
py_modules = ['icm_runner'],
install_requires=[
'requests',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 30.384615
| 93
| 0.641772
| 89
| 790
| 5.573034
| 0.752809
| 0.120968
| 0.052419
| 0.120968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006319
| 0.198734
| 790
| 25
| 94
| 31.6
| 0.777251
| 0.111392
| 0
| 0.090909
| 0
| 0
| 0.419401
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045455
| 0
| 0.045455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed17f5890d6b8f37c6b6d897dfdeee0fd244dace
| 544
|
py
|
Python
|
config.py
|
lyth031/ptb_lm
|
71f687fdf41c6b981a306269c1341ea8a8347bb6
|
[
"MIT"
] | null | null | null |
config.py
|
lyth031/ptb_lm
|
71f687fdf41c6b981a306269c1341ea8a8347bb6
|
[
"MIT"
] | null | null | null |
config.py
|
lyth031/ptb_lm
|
71f687fdf41c6b981a306269c1341ea8a8347bb6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
class Config(object):
def __init__(self):
self.init_scale = 0.1
self.learning_rate = 1.0
self.max_grad_norm = 5
self.num_layers = 2
self.slice_size = 30
self.hidden_size = 200
self.max_epoch = 13
self.keep_prob = 0.8
self.lr_const_epoch = 4
self.lr_decay = 0.7
self.batch_size = 30
self.vocab_size = 10000
self.rnn_model = "gru"
self.data_path = "./data/"
self.save_path = "../out/cudnn/gru/"
| 24.727273
| 44
| 0.549632
| 77
| 544
| 3.61039
| 0.597403
| 0.05036
| 0.071942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071625
| 0.332721
| 544
| 21
| 45
| 25.904762
| 0.694215
| 0.038603
| 0
| 0
| 0
| 0
| 0.052023
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed1909657faffd879b4424a3c02025a1afc7ff54
| 1,272
|
py
|
Python
|
pymel/__init__.py
|
GlenWalker/pymel
|
8b69b72e1bb726a66792707af39626a987bf5c21
|
[
"BSD-3-Clause"
] | null | null | null |
pymel/__init__.py
|
GlenWalker/pymel
|
8b69b72e1bb726a66792707af39626a987bf5c21
|
[
"BSD-3-Clause"
] | null | null | null |
pymel/__init__.py
|
GlenWalker/pymel
|
8b69b72e1bb726a66792707af39626a987bf5c21
|
[
"BSD-3-Clause"
] | null | null | null |
# copyright Chad Dombrova chadd@luma-pictures.com
# created at luma pictures www.luma-pictures.com
"""
*******************************
PyMEL
*******************************
PyMEL makes python scripting in Maya work the way it should. Maya's command module is a direct
translation of MEL commands into python functions. The result is a very awkward and unpythonic syntax which
does not take advantage of python's strengths -- particularly, a flexible, object-oriented design. PyMEL
builds on the cmds module by organizing many of its commands into a class hierarchy, and by
customizing them to operate in a more succinct and intuitive way.
=======================================
Special Thanks
=======================================
Special thanks to those studios with the foresight to support an open-source project of this nature: Luma Pictures,
Attitude Studio, and ImageMovers Digital.
"""
__versiontuple__ = (1, 2, 0)
__version_suffix__ = 'a1'
__version__ = '.'.join(str(x) for x in __versiontuple__) + __version_suffix__
__authors__ = ['Chad Dombrova', 'Paul Molodowitch', 'Olivier Renouard', 'Ofer Koren']
import sys
assert sys.version_info > (2, 7), ("pymel version %s is compatible with Maya2016/python2.7 or later" % __version__)
| 37.411765
| 116
| 0.676887
| 166
| 1,272
| 5
| 0.668675
| 0.057831
| 0.036145
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011132
| 0.152516
| 1,272
| 33
| 117
| 38.545455
| 0.758813
| 0.71305
| 0
| 0
| 0
| 0
| 0.342776
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed1aeb45638b0f4e22adf71d2445a7c3b1908ff3
| 3,121
|
py
|
Python
|
Fusion/deltat.py
|
coylen/pySG
|
6af1b8387c256f8898e2198c635c8e4b72ec3942
|
[
"MIT"
] | 264
|
2015-10-07T19:31:15.000Z
|
2022-03-31T23:34:59.000Z
|
deltat.py
|
CharlesAO/micropython-fusion
|
fe72a6870357c8f9b1cae78b98564412382943d9
|
[
"MIT"
] | 14
|
2015-11-13T02:40:30.000Z
|
2022-01-22T10:44:48.000Z
|
deltat.py
|
CharlesAO/micropython-fusion
|
fe72a6870357c8f9b1cae78b98564412382943d9
|
[
"MIT"
] | 75
|
2015-12-12T00:26:28.000Z
|
2022-03-23T13:32:30.000Z
|
# deltat.py time difference calculation for sensor fusion
# Released under the MIT License (MIT)
# Copyright (c) 2018 Peter Hinch
# Provides TimeDiff function and DeltaT class.
# The following notes cover special cases. Where the device performing fusion
# is linked to the IMU and is running MicroPython no special treatment is
# needed.
# The special cases are:
# 1. Device connected to the IMU is linked to a separate platform doing fusion.
# 2. Either or both are not running MicroPython.
# If the device providing the vectors is not running on MicroPython the user
# must supply timestamps and a function capable of differencing these. The
# function is passed to the Fusion constructor and the timestamp is provided
# along with the vector, being the time when the vector was acquired.
# If the device providing the vectors is running MicroPython but fusion is
# being performed on a device which is not, the user must provide their own
# implementation of ticks_diff which accounts for MicroPython rollover and
# must supply the returned ticks_us() values as a timestamp.
# Under MicroPython TimeDiff(start, end) uses time.ticks_diff.
# A DeltaT instance, called with function call syntax, returns a time
# difference from the previous call as a float value. Units seconds.
# If running under MicroPython and no time differencing function is supplied
# to the Fusion constructor it uses time.ticks_us as its time source and a
# default timediff function using time.ticks_diff() with a division by 1e6.
# If time differencing function is supplied a timestamp must be passsed as an
# arg to instance calls of Fusion.update() or Fusion.update_nomag(). In the
# async version the user supplied read_coro() must return a timestamp with the
# vector.
# On 1st pass dt evidently can't be computed. A notional value of 100μs is
# returned. The Madgwick algorithm takes seconds to stabilise.
try:
import utime as time
except ImportError:
import time
is_micropython = hasattr(time, 'ticks_diff')
class DeltaT():
def __init__(self, timediff):
if timediff is None:
self.expect_ts = False
if is_micropython:
self.timediff = lambda start, end : time.ticks_diff(start, end)/1000000
else:
raise ValueError('You must define a timediff function')
else:
self.expect_ts = True
self.timediff = timediff
self.start_time = None
def __call__(self, ts):
if self.expect_ts:
if ts is None:
raise ValueError('Timestamp expected but not supplied.')
else:
if is_micropython:
ts = time.ticks_us()
else:
raise RuntimeError('Not MicroPython: provide timestamps and a timediff function')
# ts is now valid
if self.start_time is None: # 1st call: self.start_time is invalid
self.start_time = ts
return 0.0001 # 100μs notional delay. 1st reading is invalid in any case
dt = self.timediff(ts, self.start_time)
self.start_time = ts
return dt
| 41.065789
| 97
| 0.707786
| 451
| 3,121
| 4.831486
| 0.379157
| 0.024782
| 0.035796
| 0.018357
| 0.079853
| 0.029371
| 0.029371
| 0
| 0
| 0
| 0
| 0.012236
| 0.240628
| 3,121
| 75
| 98
| 41.613333
| 0.907173
| 0.621275
| 0
| 0.25
| 0
| 0
| 0.121951
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.09375
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed1b14ee7367d8cae14cb322f5cd81df68be3c15
| 11,336
|
py
|
Python
|
colour/models/rgb/datasets/sony.py
|
wenh06/colour
|
445fdad2711ae39c95b4375166905568d24a95f4
|
[
"BSD-3-Clause"
] | 1
|
2021-09-09T01:53:40.000Z
|
2021-09-09T01:53:40.000Z
|
colour/models/rgb/datasets/sony.py
|
wenh06/colour
|
445fdad2711ae39c95b4375166905568d24a95f4
|
[
"BSD-3-Clause"
] | null | null | null |
colour/models/rgb/datasets/sony.py
|
wenh06/colour
|
445fdad2711ae39c95b4375166905568d24a95f4
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Sony Colourspaces
=================
Defines the *Sony* colourspaces:
- :attr:`colour.models.RGB_COLOURSPACE_S_GAMUT`.
- :attr:`colour.models.RGB_COLOURSPACE_S_GAMUT3`.
- :attr:`colour.models.RGB_COLOURSPACE_S_GAMUT3_CINE`.
- :attr:`colour.models.RGB_COLOURSPACE_VENICE_S_GAMUT3`.
- :attr:`colour.models.RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE`.
Notes
-----
- The *Venice S-Gamut3* and *Venice S-Gamut3.Cine* primaries and whitepoint
were derived with the following `Google Colab Notebook \
<https://colab.research.google.com/drive/1ZGTij7jT8eZRMPUkyWlv_x5ix5Q5twMB>`__.
References
----------
- :cite:`Gaggioni` : Gaggioni, H., Dhanendra, P., Yamashita, J., Kawada, N.,
Endo, K., & Clark, C. (n.d.). S-Log: A new LUT for digital production
mastering and interchange applications (Vol. 709, pp. 1-13).
http://pro.sony.com/bbsccms/assets/files/mkt/cinema/solutions/slog_manual.pdf
- :cite:`SonyCorporation` : Sony Corporation. (n.d.). S-Log Whitepaper (pp.
1-17). http://www.theodoropoulos.info/attachments/076_on%20S-Log.pdf
- :cite:`SonyCorporationd` : Sony Corporation. (n.d.). Technical Summary
for S-Gamut3.Cine/S-Log3 and S-Gamut3/S-Log3 (pp. 1-7).
http://community.sony.com/sony/attachments/sony/\
large-sensor-camera-F5-F55/12359/2/\
TechnicalSummary_for_S-Gamut3Cine_S-Gamut3_S-Log3_V1_00.pdf
- :cite:`SonyCorporatione` : Sony Corporation. (n.d.).
S-Gamut3_S-Gamut3Cine_Matrix.xlsx.
https://community.sony.com/sony/attachments/sony/\
large-sensor-camera-F5-F55/12359/3/S-Gamut3_S-Gamut3Cine_Matrix.xlsx
- :cite:`SonyElectronicsCorporation2020` : Sony Electronics Corporation.
(2020). IDT.Sony.Venice_SLog3_SGamut3.ctl. https://github.com/ampas/\
aces-dev/blob/710ecbe52c87ce9f4a1e02c8ddf7ea0d6b611cc8/transforms/ctl/idt/\
vendorSupplied/sony/IDT.Sony.Venice_SLog3_SGamut3.ctl
- :cite:`SonyElectronicsCorporation2020a` : Sony Electronics Corporation.
(2020). IDT.Sony.Venice_SLog3_SGamut3Cine.ctl. https://github.com/ampas/\
aces-dev/blob/710ecbe52c87ce9f4a1e02c8ddf7ea0d6b611cc8/transforms/ctl/idt/\
vendorSupplied/sony/IDT.Sony.Venice_SLog3_SGamut3Cine.ctl
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.colorimetry import CCS_ILLUMINANTS
from colour.models.rgb import (RGB_Colourspace, log_encoding_SLog2,
log_decoding_SLog2, log_encoding_SLog3,
log_decoding_SLog3, normalised_primary_matrix)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'PRIMARIES_S_GAMUT', 'WHITEPOINT_NAME_S_GAMUT', 'CCS_WHITEPOINT_S_GAMUT',
'MATRIX_S_GAMUT_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT',
'RGB_COLOURSPACE_S_GAMUT', 'PRIMARIES_S_GAMUT3',
'WHITEPOINT_NAME_S_GAMUT3', 'CCS_WHITEPOINT_S_GAMUT3',
'MATRIX_S_GAMUT3_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT3',
'RGB_COLOURSPACE_S_GAMUT3', 'PRIMARIES_S_GAMUT3_CINE',
'WHITEPOINT_NAME_S_GAMUT3_CINE', 'CCS_WHITEPOINT_S_GAMUT3_CINE',
'MATRIX_S_GAMUT3_CINE_TO_XYZ', 'MATRIX_XYZ_TO_S_GAMUT3_CINE',
'RGB_COLOURSPACE_S_GAMUT3_CINE', 'PRIMARIES_VENICE_S_GAMUT3',
'WHITEPOINT_NAME_VENICE_S_GAMUT3', 'CCS_WHITEPOINT_VENICE_S_GAMUT3',
'MATRIX_VENICE_S_GAMUT3_TO_XYZ', 'MATRIX_XYZ_TO_VENICE_S_GAMUT3',
'RGB_COLOURSPACE_VENICE_S_GAMUT3', 'PRIMARIES_VENICE_S_GAMUT3_CINE',
'WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE',
'CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE',
'MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ', 'MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE',
'RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE'
]
PRIMARIES_S_GAMUT = np.array([
[0.7300, 0.2800],
[0.1400, 0.8550],
[0.1000, -0.0500],
])
"""
*S-Gamut* colourspace primaries.
PRIMARIES_S_GAMUT : ndarray, (3, 2)
"""
WHITEPOINT_NAME_S_GAMUT = 'D65'
"""
*S-Gamut* colourspace whitepoint name.
WHITEPOINT_NAME_S_GAMUT : unicode
"""
CCS_WHITEPOINT_S_GAMUT = (CCS_ILLUMINANTS[
'CIE 1931 2 Degree Standard Observer'][WHITEPOINT_NAME_S_GAMUT])
"""
*S-Gamut* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_S_GAMUT : ndarray
"""
MATRIX_S_GAMUT_TO_XYZ = np.array([
[0.7064827132, 0.1288010498, 0.1151721641],
[0.2709796708, 0.7866064112, -0.0575860820],
[-0.0096778454, 0.0046000375, 1.0941355587],
])
"""
*S-Gamut* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_S_GAMUT_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_S_GAMUT = np.array([
[1.5073998991, -0.2458221374, -0.1716116808],
[-0.5181517271, 1.3553912409, 0.1258786682],
[0.0155116982, -0.0078727714, 0.9119163656],
])
"""
*CIE XYZ* tristimulus values to *S-Gamut* colourspace matrix.
MATRIX_XYZ_TO_S_GAMUT : array_like, (3, 3)
"""
RGB_COLOURSPACE_S_GAMUT = RGB_Colourspace(
'S-Gamut',
PRIMARIES_S_GAMUT,
CCS_WHITEPOINT_S_GAMUT,
WHITEPOINT_NAME_S_GAMUT,
MATRIX_S_GAMUT_TO_XYZ,
MATRIX_XYZ_TO_S_GAMUT,
log_encoding_SLog2,
log_decoding_SLog2,
)
RGB_COLOURSPACE_S_GAMUT.__doc__ = """
*S-Gamut* colourspace.
References
----------
:cite:`Gaggioni`, :cite:`SonyCorporation`
RGB_COLOURSPACE_S_GAMUT : RGB_Colourspace
"""
PRIMARIES_S_GAMUT3 = PRIMARIES_S_GAMUT
"""
*S-Gamut3* colourspace primaries.
PRIMARIES_S_GAMUT3 : ndarray, (3, 2)
"""
WHITEPOINT_NAME_S_GAMUT3 = WHITEPOINT_NAME_S_GAMUT
"""
*S-Gamut3* colourspace whitepoint name.
WHITEPOINT_NAME_S_GAMUT3 : unicode
"""
CCS_WHITEPOINT_S_GAMUT3 = CCS_WHITEPOINT_S_GAMUT
"""
*S-Gamut3* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_S_GAMUT3 : ndarray
"""
MATRIX_S_GAMUT3_TO_XYZ = MATRIX_S_GAMUT_TO_XYZ
"""
*S-Gamut3* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_S_GAMUT3_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_S_GAMUT3 = MATRIX_XYZ_TO_S_GAMUT
"""
*CIE XYZ* tristimulus values to *S-Gamut3* colourspace matrix.
MATRIX_XYZ_TO_S_GAMUT3 : array_like, (3, 3)
"""
RGB_COLOURSPACE_S_GAMUT3 = RGB_Colourspace(
'S-Gamut3',
PRIMARIES_S_GAMUT3,
CCS_WHITEPOINT_S_GAMUT3,
WHITEPOINT_NAME_S_GAMUT3,
MATRIX_S_GAMUT3_TO_XYZ,
MATRIX_XYZ_TO_S_GAMUT3,
log_encoding_SLog3,
log_decoding_SLog3,
)
RGB_COLOURSPACE_S_GAMUT3.__doc__ = """
*S-Gamut3* colourspace.
References
----------
:cite:`SonyCorporationd`
RGB_COLOURSPACE_S_GAMUT3 : RGB_Colourspace
"""
PRIMARIES_S_GAMUT3_CINE = np.array([
[0.76600, 0.27500],
[0.22500, 0.80000],
[0.08900, -0.08700],
])
"""
*S-Gamut3.Cine* colourspace primaries.
PRIMARIES_S_GAMUT3_CINE : ndarray, (3, 2)
"""
WHITEPOINT_NAME_S_GAMUT3_CINE = WHITEPOINT_NAME_S_GAMUT
"""
*S-Gamut3.Cine* colourspace whitepoint name.
WHITEPOINT_NAME_S_GAMUT3_CINE : unicode
"""
CCS_WHITEPOINT_S_GAMUT3_CINE = CCS_WHITEPOINT_S_GAMUT
"""
*S-Gamut3.Cine* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_S_GAMUT3_CINE : ndarray
"""
MATRIX_S_GAMUT3_CINE_TO_XYZ = np.array([
[0.5990839208, 0.2489255161, 0.1024464902],
[0.2150758201, 0.8850685017, -0.1001443219],
[-0.0320658495, -0.0276583907, 1.1487819910],
])
"""
*S-Gamut3.Cine* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_S_GAMUT3_CINE_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_S_GAMUT3_CINE = np.array([
[1.8467789693, -0.5259861230, -0.2105452114],
[-0.4441532629, 1.2594429028, 0.1493999729],
[0.0408554212, 0.0156408893, 0.8682072487],
])
"""
*CIE XYZ* tristimulus values to *S-Gamut3.Cine* colourspace matrix.
MATRIX_XYZ_TO_S_GAMUT3_CINE : array_like, (3, 3)
"""
RGB_COLOURSPACE_S_GAMUT3_CINE = RGB_Colourspace(
'S-Gamut3.Cine',
PRIMARIES_S_GAMUT3_CINE,
CCS_WHITEPOINT_S_GAMUT3_CINE,
WHITEPOINT_NAME_S_GAMUT3_CINE,
MATRIX_S_GAMUT3_CINE_TO_XYZ,
MATRIX_XYZ_TO_S_GAMUT3_CINE,
log_encoding_SLog3,
log_decoding_SLog3,
)
RGB_COLOURSPACE_S_GAMUT3_CINE.__doc__ = """
*S-Gamut3.Cine* colourspace.
References
----------
:cite:`SonyCorporatione`
RGB_COLOURSPACE_S_GAMUT3_CINE : RGB_Colourspace
"""
PRIMARIES_VENICE_S_GAMUT3 = np.array([
[0.740464264304292, 0.279364374750660],
[0.089241145423286, 0.893809528608105],
[0.110488236673827, -0.052579333080476],
])
"""
*Venice S-Gamut3* colourspace primaries.
PRIMARIES_VENICE_S_GAMUT3 : ndarray, (3, 2)
"""
WHITEPOINT_NAME_VENICE_S_GAMUT3 = WHITEPOINT_NAME_S_GAMUT
"""
*Venice S-Gamut3* colourspace whitepoint name.
WHITEPOINT_NAME_VENICE_S_GAMUT3 : unicode
"""
CCS_WHITEPOINT_VENICE_S_GAMUT3 = CCS_WHITEPOINT_S_GAMUT
"""
*Venice S-Gamut3* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_VENICE_S_GAMUT3 : ndarray
"""
MATRIX_VENICE_S_GAMUT3_TO_XYZ = normalised_primary_matrix(
PRIMARIES_VENICE_S_GAMUT3, CCS_WHITEPOINT_VENICE_S_GAMUT3)
"""
*Venice S-Gamut3* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_VENICE_S_GAMUT3_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_VENICE_S_GAMUT3 = np.linalg.inv(MATRIX_VENICE_S_GAMUT3_TO_XYZ)
"""
*CIE XYZ* tristimulus values to *Venice S-Gamut3* colourspace matrix.
MATRIX_XYZ_TO_VENICE_S_GAMUT3 : array_like, (3, 3)
"""
RGB_COLOURSPACE_VENICE_S_GAMUT3 = RGB_Colourspace(
'Venice S-Gamut3',
PRIMARIES_VENICE_S_GAMUT3,
CCS_WHITEPOINT_VENICE_S_GAMUT3,
WHITEPOINT_NAME_VENICE_S_GAMUT3,
MATRIX_VENICE_S_GAMUT3_TO_XYZ,
MATRIX_XYZ_TO_VENICE_S_GAMUT3,
log_encoding_SLog3,
log_decoding_SLog3,
)
RGB_COLOURSPACE_VENICE_S_GAMUT3.__doc__ = """
*Venice S-Gamut3* colourspace.
References
----------
:cite:`SonyElectronicsCorporation2020`
RGB_COLOURSPACE_VENICE_S_GAMUT3 : RGB_Colourspace
"""
PRIMARIES_VENICE_S_GAMUT3_CINE = np.array([
[0.775901871567345, 0.274502392854799],
[0.188682902773355, 0.828684937020288],
[0.101337382499301, -0.089187517306263],
])
"""
*Venice S-Gamut3.Cine* colourspace primaries.
PRIMARIES_VENICE_S_GAMUT3_CINE : ndarray, (3, 2)
"""
WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE = WHITEPOINT_NAME_S_GAMUT
"""
*Venice S-Gamut3.Cine* colourspace whitepoint name.
WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE : unicode
"""
CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE = CCS_WHITEPOINT_S_GAMUT
"""
*Venice S-Gamut3.Cine* colourspace whitepoint chromaticity coordinates.
CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE : ndarray
"""
MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ = normalised_primary_matrix(
PRIMARIES_VENICE_S_GAMUT3_CINE, CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE)
"""
*Venice S-Gamut3.Cine* colourspace to *CIE XYZ* tristimulus values matrix.
MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ : array_like, (3, 3)
"""
MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE = np.linalg.inv(
MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ)
"""
*CIE XYZ* tristimulus values to *Venice S-Gamut3.Cine* colourspace matrix.
MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE : array_like, (3, 3)
"""
RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE = RGB_Colourspace(
'Venice S-Gamut3.Cine',
PRIMARIES_VENICE_S_GAMUT3_CINE,
CCS_WHITEPOINT_VENICE_S_GAMUT3_CINE,
WHITEPOINT_NAME_VENICE_S_GAMUT3_CINE,
MATRIX_VENICE_S_GAMUT3_CINE_TO_XYZ,
MATRIX_XYZ_TO_VENICE_S_GAMUT3_CINE,
log_encoding_SLog3,
log_decoding_SLog3,
)
RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE.__doc__ = """
*Venice S-Gamut3.Cine* colourspace.
References
----------
:cite:`SonyElectronicsCorporation2020a`
RGB_COLOURSPACE_VENICE_S_GAMUT3_CINE : RGB_Colourspace
"""
| 28.918367
| 81
| 0.763585
| 1,548
| 11,336
| 5.144703
| 0.152455
| 0.123933
| 0.117529
| 0.076846
| 0.728152
| 0.681065
| 0.549724
| 0.464591
| 0.34003
| 0.257785
| 0
| 0.097827
| 0.119001
| 11,336
| 391
| 82
| 28.992327
| 0.699609
| 0.188691
| 0
| 0.176136
| 0
| 0
| 0.272826
| 0.169164
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022727
| 0
| 0.022727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed1b3de2cf4ee0d6a94657c3843653ea66d9ad27
| 723
|
py
|
Python
|
network.py
|
QiaoZhongzheng/EWC-sample-PMNIST
|
cd5e10b401582ab7f0dcd7a1e38aed6552192484
|
[
"MIT"
] | null | null | null |
network.py
|
QiaoZhongzheng/EWC-sample-PMNIST
|
cd5e10b401582ab7f0dcd7a1e38aed6552192484
|
[
"MIT"
] | null | null | null |
network.py
|
QiaoZhongzheng/EWC-sample-PMNIST
|
cd5e10b401582ab7f0dcd7a1e38aed6552192484
|
[
"MIT"
] | null | null | null |
# -*- coding: UTF-8 -*-
'''=================================================
@Project -> File :EWC -> network
@IDE :PyCharm
@Author :Qiao Zhongzheng
@Date :2021/6/23 20:28
@Desc :
=================================================='''
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, Conv2D,LeakyReLU,MaxPool2D,Flatten,Input
def fcnn():
input = Input(shape=784,dtype='float32',name='input')
# x = Dense(128,activation='relu')(input)
# x = Dense(64,activation='relu')(x)
# x = Dense(32,activation='relu')(x)
x = Dense(256,activation='relu')(input)
x = Dense(256,activation='relu')(x)
output = Dense(10,activation='softmax')(x)
return Model(input, output)
| 36.15
| 83
| 0.562932
| 85
| 723
| 4.788235
| 0.576471
| 0.07371
| 0.081081
| 0.09828
| 0.275184
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05475
| 0.141079
| 723
| 20
| 84
| 36.15
| 0.600644
| 0.477178
| 0
| 0
| 0
| 0
| 0.072973
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed1ce2b7ae05d6c1c0bdabe48547eeeb53fe25e1
| 1,949
|
py
|
Python
|
src/deep_dialog/usersims/usersim.py
|
CommissarSilver/TC-Bot
|
4579706a18028b5da9b8a7807fb2e2d4043dcaf8
|
[
"MIT"
] | 1
|
2021-05-29T01:32:49.000Z
|
2021-05-29T01:32:49.000Z
|
D3Q/src/deep_dialog/usersims/usersim.py
|
Yuqing2018/D3Q_Python3
|
2a9918494e3f0ed18f9b7560b1e6f13119fbee91
|
[
"MIT"
] | null | null | null |
D3Q/src/deep_dialog/usersims/usersim.py
|
Yuqing2018/D3Q_Python3
|
2a9918494e3f0ed18f9b7560b1e6f13119fbee91
|
[
"MIT"
] | null | null | null |
"""
Created on June 7, 2016
a rule-based user simulator
@author: xiul, t-zalipt
"""
import random
class UserSimulator:
""" Parent class for all user sims to inherit from """
def __init__(self, movie_dict=None, act_set=None, slot_set=None, start_set=None, params=None):
""" Constructor shared by all user simulators """
self.movie_dict = movie_dict
self.act_set = act_set
self.slot_set = slot_set
self.start_set = start_set
self.max_turn = params['max_turn']
self.slot_err_probability = params['slot_err_probability']
self.slot_err_mode = params['slot_err_mode']
self.intent_err_probability = params['intent_err_probability']
def initialize_episode(self):
""" Initialize a new episode (dialog)"""
print ("initialize episode called, generating goal")
self.goal = random.choice(self.start_set)
self.goal['request_slots']['ticket'] = 'UNK'
episode_over, user_action = self._sample_action()
assert (episode_over != 1),' but we just started'
return user_action
def next(self, system_action):
pass
def set_nlg_model(self, nlg_model):
self.nlg_model = nlg_model
def set_nlu_model(self, nlu_model):
self.nlu_model = nlu_model
def add_nl_to_action(self, user_action):
""" Add NL to User Dia_Act """
user_nlg_sentence = self.nlg_model.convert_diaact_to_nl(user_action, 'usr')
user_action['nl'] = user_nlg_sentence
if self.simulator_act_level == 1:
user_nlu_res = self.nlu_model.generate_dia_act(user_action['nl']) # NLU
if user_nlu_res != None:
#user_nlu_res['diaact'] = user_action['diaact'] # or not?
user_action.update(user_nlu_res)
| 30.453125
| 99
| 0.606978
| 248
| 1,949
| 4.443548
| 0.358871
| 0.072595
| 0.036298
| 0.027223
| 0.058076
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005109
| 0.297075
| 1,949
| 64
| 100
| 30.453125
| 0.79927
| 0.146742
| 0
| 0
| 0
| 0
| 0.09834
| 0.014049
| 0
| 0
| 0
| 0
| 0.032258
| 1
| 0.193548
| false
| 0.032258
| 0.032258
| 0
| 0.290323
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed2176e9a9c0aa2fe888859654d1422302e6cce3
| 325
|
py
|
Python
|
hackerrank/Algorithms/Correctness and the Loop Invariant/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | 4
|
2020-07-24T01:59:50.000Z
|
2021-07-24T15:14:08.000Z
|
hackerrank/Algorithms/Correctness and the Loop Invariant/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
hackerrank/Algorithms/Correctness and the Loop Invariant/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
def insertion_sort(l):
for i in range(1, len(l)):
j = i - 1
key = l[i]
while (j >= 0) and (l[j] > key):
l[j + 1] = l[j]
j -= 1
l[j + 1] = key
m = int(input().strip())
ar = [int(i) for i in input().strip().split()]
insertion_sort(ar)
print(" ".join(map(str, ar)))
| 21.666667
| 46
| 0.443077
| 56
| 325
| 2.535714
| 0.446429
| 0.070423
| 0.084507
| 0.056338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028169
| 0.344615
| 325
| 14
| 47
| 23.214286
| 0.638498
| 0
| 0
| 0
| 0
| 0
| 0.003077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.083333
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed2393de8d1d80d0e5b9c6610dd11fbf42e62a83
| 18,305
|
py
|
Python
|
tests/scripts/thread-cert/thread_cert.py
|
lmaciejonczyk/openthread
|
9ca79ddd9af3d4e3f78cb6e611a3117a71b2198c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/scripts/thread-cert/thread_cert.py
|
lmaciejonczyk/openthread
|
9ca79ddd9af3d4e3f78cb6e611a3117a71b2198c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/scripts/thread-cert/thread_cert.py
|
lmaciejonczyk/openthread
|
9ca79ddd9af3d4e3f78cb6e611a3117a71b2198c
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import json
import logging
import os
import signal
import subprocess
import sys
import time
import traceback
import unittest
from typing import Optional, Callable
import config
import debug
from node import Node, OtbrNode, HostNode
from pktverify import utils as pvutils
PACKET_VERIFICATION = int(os.getenv('PACKET_VERIFICATION', 0))
if PACKET_VERIFICATION:
from pktverify.addrs import ExtAddr, EthAddr
from pktverify.packet_verifier import PacketVerifier
PORT_OFFSET = int(os.getenv('PORT_OFFSET', "0"))
ENV_THREAD_VERSION = os.getenv('THREAD_VERSION', '1.1')
DEFAULT_PARAMS = {
'is_mtd': False,
'is_bbr': False,
'is_otbr': False,
'is_host': False,
'mode': 'rdn',
'panid': 0xface,
'allowlist': None,
'version': ENV_THREAD_VERSION,
}
"""Default configurations when creating nodes."""
EXTENDED_ADDRESS_BASE = 0x166e0a0000000000
"""Extended address base to keep U/L bit 1. The value is borrowed from Thread Test Harness."""
class NcpSupportMixin():
""" The mixin to check whether a test case supports NCP.
"""
SUPPORT_NCP = True
def __init__(self, *args, **kwargs):
if os.getenv('NODE_TYPE', 'sim') == 'ncp-sim' and not self.SUPPORT_NCP:
# 77 means skip this test case in automake tests
sys.exit(77)
super().__init__(*args, **kwargs)
class TestCase(NcpSupportMixin, unittest.TestCase):
"""The base class for all thread certification test cases.
The `topology` member of sub-class is used to create test topology.
"""
USE_MESSAGE_FACTORY = True
TOPOLOGY = None
CASE_WIRESHARK_PREFS = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
self._start_time = None
self._do_packet_verification = PACKET_VERIFICATION and hasattr(self, 'verify')
def setUp(self):
try:
self._setUp()
except:
traceback.print_exc()
for node in list(self.nodes.values()):
try:
node.destroy()
except Exception:
traceback.print_exc()
raise
def _setUp(self):
"""Create simulator, nodes and apply configurations.
"""
self._clean_up_tmp()
self.simulator = config.create_default_simulator(use_message_factory=self.USE_MESSAGE_FACTORY)
self.nodes = {}
os.environ['LD_LIBRARY_PATH'] = '/tmp/thread-wireshark'
if self._has_backbone_traffic():
self._prepare_backbone_network()
self._start_backbone_sniffer()
self._initial_topology = initial_topology = {}
for i, params in self.TOPOLOGY.items():
params = self._parse_params(params)
initial_topology[i] = params
logging.info("Creating node %d: %r", i, params)
if params['is_otbr']:
nodeclass = OtbrNode
elif params['is_host']:
nodeclass = HostNode
else:
nodeclass = Node
node = nodeclass(
i,
is_mtd=params['is_mtd'],
simulator=self.simulator,
name=params.get('name'),
version=params['version'],
is_bbr=params['is_bbr'],
)
self.nodes[i] = node
if node.is_host:
continue
self.nodes[i].set_panid(params['panid'])
self.nodes[i].set_mode(params['mode'])
if 'partition_id' in params:
self.nodes[i].set_preferred_partition_id(params['partition_id'])
if 'channel' in params:
self.nodes[i].set_channel(params['channel'])
if 'masterkey' in params:
self.nodes[i].set_masterkey(params['masterkey'])
if 'network_name' in params:
self.nodes[i].set_network_name(params['network_name'])
if 'router_selection_jitter' in params:
self.nodes[i].set_router_selection_jitter(params['router_selection_jitter'])
if 'router_upgrade_threshold' in params:
self.nodes[i].set_router_upgrade_threshold(params['router_upgrade_threshold'])
if 'router_downgrade_threshold' in params:
self.nodes[i].set_router_downgrade_threshold(params['router_downgrade_threshold'])
if 'router_eligible' in params:
self.nodes[i].set_router_eligible(params['router_eligible'])
if 'prefer_router_id' in params:
self.nodes[i].prefer_router_id(params['prefer_router_id'])
if 'timeout' in params:
self.nodes[i].set_timeout(params['timeout'])
if 'active_dataset' in params:
self.nodes[i].set_active_dataset(params['active_dataset']['timestamp'],
panid=params['active_dataset'].get('panid'),
channel=params['active_dataset'].get('channel'),
channel_mask=params['active_dataset'].get('channel_mask'),
master_key=params['active_dataset'].get('master_key'),
security_policy=params['active_dataset'].get('security_policy'))
if 'pending_dataset' in params:
self.nodes[i].set_pending_dataset(params['pending_dataset']['pendingtimestamp'],
params['pending_dataset']['activetimestamp'],
panid=params['pending_dataset'].get('panid'),
channel=params['pending_dataset'].get('channel'),
delay=params['pending_dataset'].get('delay'))
if 'key_switch_guardtime' in params:
self.nodes[i].set_key_switch_guardtime(params['key_switch_guardtime'])
if 'key_sequence_counter' in params:
self.nodes[i].set_key_sequence_counter(params['key_sequence_counter'])
if 'network_id_timeout' in params:
self.nodes[i].set_network_id_timeout(params['network_id_timeout'])
if 'context_reuse_delay' in params:
self.nodes[i].set_context_reuse_delay(params['context_reuse_delay'])
if 'max_children' in params:
self.nodes[i].set_max_children(params['max_children'])
if 'bbr_registration_jitter' in params:
self.nodes[i].set_bbr_registration_jitter(params['bbr_registration_jitter'])
# we have to add allowlist after nodes are all created
for i, params in initial_topology.items():
allowlist = params['allowlist']
if not allowlist:
continue
for j in allowlist:
rssi = None
if isinstance(j, tuple):
j, rssi = j
self.nodes[i].add_allowlist(self.nodes[j].get_addr64(), rssi=rssi)
self.nodes[i].enable_allowlist()
self._inspector = debug.Inspector(self)
self._collect_test_info_after_setup()
def inspect(self):
self._inspector.inspect()
def tearDown(self):
"""Destroy nodes and simulator.
"""
if self._do_packet_verification and os.uname().sysname != "Linux":
raise NotImplementedError(
f'{self.test_name}: Packet Verification not available on {os.uname().sysname} (Linux only).')
if self._do_packet_verification:
time.sleep(3)
if self._has_backbone_traffic():
# Stop Backbone sniffer before stopping nodes so that we don't capture Codecov Uploading traffic
self._stop_backbone_sniffer()
for node in list(self.nodes.values()):
node.stop()
node.destroy()
self.simulator.stop()
if self._has_backbone_traffic():
self._remove_backbone_network()
pcap_filename = self._merge_thread_backbone_pcaps()
else:
pcap_filename = self._get_thread_pcap_filename()
if self._do_packet_verification:
self._test_info['pcap'] = pcap_filename
test_info_path = self._output_test_info()
self._verify_packets(test_info_path)
def flush_all(self):
"""Flush away all captured messages of all nodes.
"""
for i in list(self.nodes.keys()):
self.simulator.get_messages_sent_by(i)
def flush_nodes(self, nodes):
"""Flush away all captured messages of specified nodes.
Args:
nodes (list): nodes whose messages to flush.
"""
for i in nodes:
if i in list(self.nodes.keys()):
self.simulator.get_messages_sent_by(i)
def _clean_up_tmp(self):
"""
Clean up node files in tmp directory
"""
os.system(f"rm -f tmp/{PORT_OFFSET}_*.flash tmp/{PORT_OFFSET}_*.data tmp/{PORT_OFFSET}_*.swap")
def _verify_packets(self, test_info_path: str):
pv = PacketVerifier(test_info_path, self.CASE_WIRESHARK_PREFS)
pv.add_common_vars()
self.verify(pv)
print("Packet verification passed: %s" % test_info_path, file=sys.stderr)
@property
def test_name(self):
return os.getenv('TEST_NAME', 'current')
def collect_ipaddrs(self):
if not self._do_packet_verification:
return
test_info = self._test_info
for i, node in self.nodes.items():
ipaddrs = node.get_addrs()
test_info['ipaddrs'][i] = ipaddrs
if not node.is_host:
mleid = node.get_mleid()
test_info['mleids'][i] = mleid
def collect_rloc16s(self):
if not self._do_packet_verification:
return
test_info = self._test_info
test_info['rloc16s'] = {}
for i, node in self.nodes.items():
if not node.is_host:
test_info['rloc16s'][i] = '0x%04x' % node.get_addr16()
def collect_rlocs(self):
if not self._do_packet_verification:
return
test_info = self._test_info
test_info['rlocs'] = {}
for i, node in self.nodes.items():
if node.is_host:
continue
test_info['rlocs'][i] = node.get_rloc()
def collect_leader_aloc(self, node):
if not self._do_packet_verification:
return
test_info = self._test_info
test_info['leader_aloc'] = self.nodes[node].get_addr_leader_aloc()
def collect_extra_vars(self, **vars):
if not self._do_packet_verification:
return
for k in vars.keys():
assert isinstance(k, str), k
test_vars = self._test_info.setdefault("extra_vars", {})
test_vars.update(vars)
def _collect_test_info_after_setup(self):
"""
Collect test info after setUp
"""
if not self._do_packet_verification:
return
test_info = self._test_info = {
'script': os.path.abspath(sys.argv[0]),
'testcase': self.test_name,
'start_time': time.ctime(self._start_time),
'pcap': '',
'extaddrs': {},
'ethaddrs': {},
'ipaddrs': {},
'mleids': {},
'topology': self._initial_topology,
'backbone': {
'interface': config.BACKBONE_DOCKER_NETWORK_NAME,
'prefix': config.BACKBONE_PREFIX,
},
'domain_prefix': config.DOMAIN_PREFIX,
'env': {
'PORT_OFFSET': config.PORT_OFFSET,
},
}
for i, node in self.nodes.items():
if not node.is_host:
extaddr = node.get_addr64()
test_info['extaddrs'][i] = ExtAddr(extaddr).format_octets()
if node.is_host or node.is_otbr:
ethaddr = node.get_ether_mac()
test_info['ethaddrs'][i] = EthAddr(ethaddr).format_octets()
def _output_test_info(self):
"""
Output test info to json file after tearDown
"""
filename = f'{self.test_name}.json'
with open(filename, 'wt') as ofd:
ofd.write(json.dumps(self._test_info, indent=1, sort_keys=True))
return filename
def _get_thread_pcap_filename(self):
current_pcap = self.test_name + '.pcap'
return os.path.abspath(current_pcap)
def assure_run_ok(self, cmd, shell=False):
if not shell and isinstance(cmd, str):
cmd = cmd.split()
proc = subprocess.run(cmd, stdout=sys.stdout, stderr=sys.stderr, shell=shell)
print(">>> %s => %d" % (cmd, proc.returncode), file=sys.stderr)
proc.check_returncode()
def _parse_params(self, params: Optional[dict]) -> dict:
params = params or {}
if params.get('is_bbr') or params.get('is_otbr'):
# BBRs must use thread version 1.2
assert params.get('version', '1.2') == '1.2', params
params['version'] = '1.2'
elif params.get('is_host'):
# Hosts must not specify thread version
assert params.get('version', '') == '', params
params['version'] = ''
if params:
params = dict(DEFAULT_PARAMS, **params)
else:
params = DEFAULT_PARAMS.copy()
return params
def _has_backbone_traffic(self):
for param in self.TOPOLOGY.values():
if param and (param.get('is_otbr') or param.get('is_host')):
return True
return False
def _prepare_backbone_network(self):
network_name = config.BACKBONE_DOCKER_NETWORK_NAME
self.assure_run_ok(
f'docker network create --driver bridge --ipv6 --subnet {config.BACKBONE_PREFIX} -o "com.docker.network.bridge.name"="{network_name}" {network_name} || true',
shell=True)
def _remove_backbone_network(self):
network_name = config.BACKBONE_DOCKER_NETWORK_NAME
self.assure_run_ok(f'docker network rm {network_name}', shell=True)
def _start_backbone_sniffer(self):
# don't know why but I have to create the empty bbr.pcap first, otherwise tshark won't work
# self.assure_run_ok("truncate --size 0 bbr.pcap && chmod 664 bbr.pcap", shell=True)
pcap_file = self._get_backbone_pcap_filename()
try:
os.remove(pcap_file)
except FileNotFoundError:
pass
dumpcap = pvutils.which_dumpcap()
self._dumpcap_proc = subprocess.Popen([dumpcap, '-i', config.BACKBONE_DOCKER_NETWORK_NAME, '-w', pcap_file],
stdout=sys.stdout,
stderr=sys.stderr)
time.sleep(0.2)
assert self._dumpcap_proc.poll() is None, 'tshark terminated unexpectedly'
logging.info('Backbone sniffer launched successfully: pid=%s', self._dumpcap_proc.pid)
def _get_backbone_pcap_filename(self):
backbone_pcap = self.test_name + '_backbone.pcap'
return os.path.abspath(backbone_pcap)
def _get_merged_pcap_filename(self):
backbone_pcap = self.test_name + '_merged.pcap'
return os.path.abspath(backbone_pcap)
def _stop_backbone_sniffer(self):
self._dumpcap_proc.send_signal(signal.SIGTERM)
self._dumpcap_proc.__exit__(None, None, None)
logging.info('Backbone sniffer terminated successfully: pid=%s' % self._dumpcap_proc.pid)
def _merge_thread_backbone_pcaps(self):
thread_pcap = self._get_thread_pcap_filename()
backbone_pcap = self._get_backbone_pcap_filename()
merged_pcap = self._get_merged_pcap_filename()
mergecap = pvutils.which_mergecap()
self.assure_run_ok(f'{mergecap} -w {merged_pcap} {thread_pcap} {backbone_pcap}', shell=True)
return merged_pcap
def wait_until(self, cond: Callable[[], bool], timeout: int, go_interval: int = 1):
while True:
self.simulator.go(go_interval)
if cond():
break
timeout -= go_interval
if timeout <= 0:
raise RuntimeError(f'wait failed after {timeout} seconds')
def wait_node_state(self, nodeid: int, state: str, timeout: int):
self.wait_until(lambda: self.nodes[nodeid].get_state() == state, timeout)
| 36.536926
| 170
| 0.607648
| 2,163
| 18,305
| 4.90846
| 0.218215
| 0.030517
| 0.021663
| 0.023265
| 0.233776
| 0.178864
| 0.144862
| 0.107469
| 0.076575
| 0.076575
| 0
| 0.005246
| 0.291887
| 18,305
| 500
| 171
| 36.61
| 0.81384
| 0.138978
| 0
| 0.164671
| 0
| 0.008982
| 0.13348
| 0.024599
| 0
| 0
| 0.001554
| 0
| 0.011976
| 1
| 0.092814
| false
| 0.005988
| 0.047904
| 0.002994
| 0.203593
| 0.011976
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed242bad81da2f05250a803d492c40b86c9b59d3
| 1,770
|
py
|
Python
|
FaceMaskDetection with webcam.py
|
Anurag-Varma/facemask-detection
|
9ac681261e246e6ab1837c576d933dc7324e3a92
|
[
"MIT"
] | 1
|
2021-07-13T09:16:12.000Z
|
2021-07-13T09:16:12.000Z
|
FaceMaskDetection with webcam.py
|
Anurag-Varma/facemask-detection
|
9ac681261e246e6ab1837c576d933dc7324e3a92
|
[
"MIT"
] | null | null | null |
FaceMaskDetection with webcam.py
|
Anurag-Varma/facemask-detection
|
9ac681261e246e6ab1837c576d933dc7324e3a92
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
from keras.models import model_from_json
from keras.preprocessing.image import img_to_array
#load model
model = model_from_json(open("fer.json", "r").read()) #change the path accoring to files
#load weights
model.load_weights('fer.h5') #change the path accoring to files
detection_model_path="C:/Users/panur/.spyder-py3/FaceMaskDetection/cascadeH5.xml" #change the path accoring to files
face_detection = cv2.CascadeClassifier(detection_model_path)
ret=1
flag=True
cap = cv2.VideoCapture(0) #default 0 for webcam
frameRate = cap.get(30)
while(cap.isOpened()):
ret, fm=cap.read()
fm = cv2.resize(fm, (224, 224))
file = cv2.cvtColor(fm, cv2.COLOR_BGR2RGB)
orig_frame = file
frame = file
faces = face_detection.detectMultiScale(frame,scaleFactor=1.1,minNeighbors=5,minSize=(30,30),flags=cv2.CASCADE_SCALE_IMAGE)
if len(faces) :
faces = sorted(faces, reverse=True,key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
(fX, fY, fW, fH) = faces
roi = frame[fY:fY + fH, fX:fX + fW]
roi = cv2.resize(roi, (48, 48),3)
roi = frame.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
preds=model.predict_classes(roi)[0]
if preds==0:
print("Mask worn")
test='Mask worn'
elif preds==1:
print("Danger: No Mask")
test='Danger: No Mask'
cv2.putText(fm,test, (fX-15, fY - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
cv2.rectangle(fm, (fX, fY), (fX + fW, fY + fH),(0, 0, 255), 2)
cv2.imshow("Live Video", fm)
k=cv2.waitKey(25) #Press ESC to stop/exit
if k == 27:
ret=0
break
print("closed")
cap.release()
cv2.destroyAllWindows()
| 28.095238
| 124
| 0.641243
| 275
| 1,770
| 4.043636
| 0.454545
| 0.024281
| 0.035072
| 0.056655
| 0.091727
| 0.07554
| 0
| 0
| 0
| 0
| 0
| 0.055914
| 0.211864
| 1,770
| 63
| 125
| 28.095238
| 0.741219
| 0.092655
| 0
| 0
| 0
| 0
| 0.088806
| 0.036273
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.068182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed24c0adb8cc7ebd6871903b985fa571a276f939
| 5,391
|
py
|
Python
|
toggl.py
|
ulrikpedersen/toggl-gnome-applet
|
ae48358414d14d44ef5731c59f1813bac97e3257
|
[
"Unlicense"
] | null | null | null |
toggl.py
|
ulrikpedersen/toggl-gnome-applet
|
ae48358414d14d44ef5731c59f1813bac97e3257
|
[
"Unlicense"
] | 1
|
2017-11-21T09:36:06.000Z
|
2017-11-21T09:36:06.000Z
|
toggl.py
|
ulrikpedersen/toggl-gnome-applet
|
ae48358414d14d44ef5731c59f1813bac97e3257
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
import logging
from datetime import datetime
logging.basicConfig(level=logging.WARNING)
import os
import urllib2, base64, json
import dateutil.parser
def from_ISO8601( str_iso8601 ):
return dateutil.parser.parse(str_iso8601)
def to_ISO8601( timestamp ):
return timestamp.isoformat()
def convert_time_strings(toggl_dicts):
timestamp_fields = ['at',
'created_at',
'start',
'stop']
result = []
for tdict in toggl_dicts:
d = tdict
for tsf in timestamp_fields:
if tdict.has_key(tsf):
d[tsf] = from_ISO8601(tdict[tsf])
result.append(d)
return result
class Toggl:
def __init__(self, api_token=None):
self.log = logging.getLogger("Toggl")
self.log.setLevel(logging.DEBUG)
self.toggl_domain = "www.toggl.com"
self.toggl_api = "https://%s/api/v8/" % self.toggl_domain
self.report_api = "https://%s/reports/api/v2" % self.toggl_domain
self._api_token = api_token
# Search for an Toggl API token in a list of files
# No validation of the collected token
# TODO: encryption of tokenfiles could be nice
tokenfiles = [os.path.expanduser(f) for f in ['.toggltoken', '~/.toggltoken', '~/.togglapplet/.toggltoken']]
for tf in tokenfiles:
if os.path.exists( tf ):
try:
f = open(tf)
self._api_token = f.read().strip()
f.close()
except:
self.log.exception("Could not read token from " + tf)
self._api_token = None
if self._api_token: break
def send_request( self, api_call_url ):
''' Send a request or command to Toggl, retrieve and parse the json response.
returns a list of dictionary objects.
Throws an exception if the http response is not OK (200) or if no JSON can be decoded from the response.
'''
request = urllib2.Request( api_call_url )
self.log.debug("http request url = \'%s\'", request.get_full_url())
# username:password
# Use base64.standard_b64encode instead of replace...
user_pass = base64.encodestring('%s:%s' % (self._api_token, 'api_token')).replace('\n', '')
request.add_header("Authorization", "Basic %s" % user_pass)
opener = urllib2.build_opener(
urllib2.HTTPHandler(),
urllib2.HTTPSHandler(),
urllib2.ProxyHandler({'https': 'http://wwwcache.rl.ac.uk:8080'}))
urllib2.install_opener(opener)
result = urllib2.urlopen(request, timeout = 3.0) # with no data, this is a http GET.
self.log.debug("http request result: code=%s url=\'%s\'", result.getcode(), result.geturl())
js = json.load(result)
#self.log.debug("JSON raw result: %s" % json.dumps(js,sort_keys=True, indent=4, separators=(',', ': ')))
return js
def get_workspaces(self):
self.log.debug("get_workspaces()")
js = self.send_request(self.toggl_api + "workspaces")
js = convert_time_strings(js)
return js
def get_default_workspace(self):
self.log.debug("get_default_workspace()")
wid = self.get_user()['default_wid']
js = self.send_request(self.toggl_api + "workspaces/%s"%str(wid))
js = convert_time_strings([js['data']])
return js[0]
def get_default_workspace_id(self):
self.log.debug("get_default_workspace_id()")
ws = self.get_default_workspace()
self.log.debug(ws)
return ws['id']
def get_projects(self, wid=None):
self.log.debug("get_projects(wid=%s)"%str(wid))
if wid:
js = self.send_request(self.toggl_api + "workspaces/%s/projects"%str(wid))
else:
js = []
for w in self.get_workspaces():
js += self.send_request(self.toggl_api + "workspaces/%s/projects"%str(w['id']))
js = convert_time_strings(js)
return js
def get_current_entry(self):
'''get the currently active time entry'''
self.log.debug("get_current_entry()")
js = self.send_request(self.toggl_api + "time_entries/current")
self.log.debug( js )
js = convert_time_strings(js['data'])
return js
def get_range_entries(self, start_end=None):
'''Get a list of entries in a range (max 1000 entries).
If no start-end range is defined, the default is to return all entries
from the last 9 days.
start_end: tuple with start and end date'''
self.log.debug("get_range_entries()")
query = "time_entries"
if start_end:
start, end = start_end
if type(start) == datetime.datetime:
start = to_ISO8601(start)
if type(end) == datetime.datetime:
end = to_ISO8601(end)
query += "?start_date=%s&end_date=%s"%(start, end)
js = self.send_request(self.toggl_api + query)
js = convert_time_strings(js)
return js
def get_user(self):
self.log.debug("get_user()")
js = self.send_request(self.toggl_api + "me")
return js['data']
| 38.234043
| 116
| 0.586533
| 682
| 5,391
| 4.472141
| 0.280059
| 0.034426
| 0.047213
| 0.034426
| 0.218689
| 0.179016
| 0.179016
| 0.127541
| 0.105246
| 0.069836
| 0
| 0.01632
| 0.295307
| 5,391
| 141
| 117
| 38.234043
| 0.786523
| 0.148024
| 0
| 0.07619
| 0
| 0
| 0.12829
| 0.032073
| 0
| 0
| 0
| 0.007092
| 0
| 1
| 0.114286
| false
| 0.019048
| 0.047619
| 0.019048
| 0.27619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed2565f6739ecf83e80e8445b8eb480489832a2d
| 432
|
py
|
Python
|
gs_divergence/symmetrized_geodesical_skew_divergence.py
|
ISMHinoLab/geodesical_skew_divergence
|
293648a30e86bdd14749af5b107f1d3687d67700
|
[
"MIT"
] | 7
|
2021-04-01T09:21:49.000Z
|
2022-03-24T05:28:22.000Z
|
gs_divergence/symmetrized_geodesical_skew_divergence.py
|
ISMHinoLab/geodesical_skew_divergence
|
293648a30e86bdd14749af5b107f1d3687d67700
|
[
"MIT"
] | 21
|
2021-04-01T02:56:54.000Z
|
2021-05-07T01:02:09.000Z
|
gs_divergence/symmetrized_geodesical_skew_divergence.py
|
ISMHinoLab/geodesical_skew_divergence
|
293648a30e86bdd14749af5b107f1d3687d67700
|
[
"MIT"
] | 2
|
2021-04-12T15:00:17.000Z
|
2021-04-26T03:10:26.000Z
|
from typing import Optional
import torch
from gs_divergence import gs_div
def symmetrized_gs_div(
input: torch.Tensor,
target: torch.Tensor,
alpha: float = -1,
lmd: float = 0.5,
reduction: Optional[str] = 'sum',
) -> torch.Tensor:
lhs = gs_div(input, target, alpha=alpha, lmd=lmd, reduction=reduction)
rhs = gs_div(target, input, alpha=alpha, lmd=lmd, reduction=reduction)
return (lhs + rhs) / 2
| 24
| 74
| 0.680556
| 61
| 432
| 4.721311
| 0.42623
| 0.069444
| 0.069444
| 0.111111
| 0.236111
| 0.236111
| 0
| 0
| 0
| 0
| 0
| 0.011594
| 0.201389
| 432
| 17
| 75
| 25.411765
| 0.823188
| 0
| 0
| 0
| 0
| 0
| 0.006944
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed25675ecd1960dc8b520c3cb84ff43fb7cab0a0
| 1,946
|
py
|
Python
|
train.py
|
kushaliitm/deep-learning
|
ab8e23d1414d3b79bbe4a3acd57a475f6def7277
|
[
"MIT"
] | null | null | null |
train.py
|
kushaliitm/deep-learning
|
ab8e23d1414d3b79bbe4a3acd57a475f6def7277
|
[
"MIT"
] | null | null | null |
train.py
|
kushaliitm/deep-learning
|
ab8e23d1414d3b79bbe4a3acd57a475f6def7277
|
[
"MIT"
] | null | null | null |
import argparse
import helper as hp
import torch
import os
import json
parser = argparse.ArgumentParser(description = 'train.py')
parser.add_argument('--data-dir', nargs = '*', action = "store", default = "./flowers/", help = "folder path for data")
parser.add_argument('--save-dir', action = "store", required=True, help = "filepath for saving checkpoint")
parser.add_argument('--learning-rate', action = "store", default = 0.001, help = "learning rate for the optimizer")
parser.add_argument('--epoch-num', action = "store", type = int, default = 3, help = "epoch value")
parser.add_argument('--architecture', action = "store", default = "vgg16", type = str, help = "specify the neural network structure: vgg16 or densenet121")
parser.add_argument('--hidden-size', type = int, action = "store", default = 1000, help = "state the units for fc2")
parser.add_argument('--optimizer', action='store', default='adam', help='Optimizer to optimize')
pa = parser.parse_args()
pa = vars(pa)
print(pa)
data_path = pa['data_dir']
save_dir = pa["save_dir"]
learning_rate = pa['learning_rate']
architecture = pa['architecture']
hidden_size = pa['hidden_size']
epoch_number = pa['epoch_num']
if (not os.path.exists(f'experiments/{save_dir}')):
os.makedirs(f'experiments/{save_dir}')
file_path = f'experiments/{save_dir}/checkpoint.pt'
# saving parameters
with open(f'experiments/{save_dir}/parameters.json', 'w') as f:
json.dump(pa, f)
# load the data - data_load() from help.py
print('Loading data')
train_loader, validation_loader, test_loader = hp.load_data(data_path)
criterion = torch.nn.NLLLoss()
# build model
print(f'Loading weights from {architecture}')
model, optimizer = hp.get_model_and_optimizer(pa)
# train model
print('Training model')
hp.train_model(model, optimizer, learning_rate,train_loader,validation_loader,criterion,epoch_number, file_path)
# checkpoint the model
print("model has been successfully trained")
| 37.423077
| 155
| 0.731757
| 274
| 1,946
| 5.058394
| 0.364964
| 0.045455
| 0.085859
| 0.054834
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009913
| 0.118705
| 1,946
| 51
| 156
| 38.156863
| 0.798251
| 0.052929
| 0
| 0
| 0
| 0
| 0.335874
| 0.064235
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed261100e8ba320319dd45fa75984055274a92f9
| 11,020
|
py
|
Python
|
models/blip.py
|
lmathia2/BLIP
|
8ca42256e83654858856d40886509be8fbca51a7
|
[
"BSD-3-Clause"
] | null | null | null |
models/blip.py
|
lmathia2/BLIP
|
8ca42256e83654858856d40886509be8fbca51a7
|
[
"BSD-3-Clause"
] | null | null | null |
models/blip.py
|
lmathia2/BLIP
|
8ca42256e83654858856d40886509be8fbca51a7
|
[
"BSD-3-Clause"
] | null | null | null |
'''
* Copyright (c) 2022, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
* By Junnan Li
'''
import warnings
warnings.filterwarnings("ignore")
from models.vit import VisionTransformer, interpolate_pos_embed
from models.med import BertConfig, BertModel, BertLMHeadModel
from transformers import BertTokenizer
import torch
from torch import nn
import torch.nn.functional as F
import os
from urllib.parse import urlparse
from timm.models.hub import download_cached_file
class BLIP_Base(nn.Module):
def __init__(self,
med_config = 'configs/med_config.json',
image_size = 224,
vit = 'base',
vit_grad_ckpt = False,
vit_ckpt_layer = 0,
):
"""
Args:
med_config (str): path for the mixture of encoder-decoder model's configuration file
image_size (int): input image size
vit (str): model size of vision transformer
"""
super().__init__()
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
self.tokenizer = init_tokenizer()
med_config = BertConfig.from_json_file(med_config)
med_config.encoder_width = vision_width
self.text_encoder = BertModel(config=med_config, add_pooling_layer=False)
def forward(self, image, caption, mode):
assert mode in ['image', 'text', 'multimodal'], "mode parameter must be image, text, or multimodal"
text = self.tokenizer(caption, return_tensors="pt").to(image.device)
if mode=='image':
# return image features
image_embeds = self.visual_encoder(image)
return image_embeds
elif mode=='text':
# return text features
text_output = self.text_encoder(text.input_ids, attention_mask = text.attention_mask,
return_dict = True, mode = 'text')
return text_output.last_hidden_state
elif mode=='multimodal':
# return multimodel features
image_embeds = self.visual_encoder(image)
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
text.input_ids[:,0] = self.tokenizer.enc_token_id
output = self.text_encoder(text.input_ids,
attention_mask = text.attention_mask,
encoder_hidden_states = image_embeds,
encoder_attention_mask = image_atts,
return_dict = True,
)
return output.last_hidden_state
class BLIP_Decoder(nn.Module):
def __init__(self,
med_config = 'configs/med_config.json',
image_size = 384,
vit = 'base',
vit_grad_ckpt = False,
vit_ckpt_layer = 0,
prompt = 'a picture of ',
):
"""
Args:
med_config (str): path for the mixture of encoder-decoder model's configuration file
image_size (int): input image size
vit (str): model size of vision transformer
"""
super().__init__()
self.visual_encoder, vision_width = create_vit(vit,image_size, vit_grad_ckpt, vit_ckpt_layer)
self.tokenizer = init_tokenizer()
med_config = BertConfig.from_json_file(med_config)
med_config.encoder_width = vision_width
self.text_decoder = BertLMHeadModel(config=med_config)
self.prompt = prompt
self.prompt_length = len(self.tokenizer(self.prompt).input_ids)-1
def forward(self, image, caption):
image_embeds = self.visual_encoder(image)
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
text = self.tokenizer(caption, padding='longest', truncation=True, max_length=40, return_tensors="pt").to(image.device)
text.input_ids[:,0] = self.tokenizer.bos_token_id
decoder_targets = text.input_ids.masked_fill(text.input_ids == self.tokenizer.pad_token_id, -100)
decoder_targets[:,:self.prompt_length] = -100
decoder_output = self.text_decoder(text.input_ids,
attention_mask = text.attention_mask,
encoder_hidden_states = image_embeds,
encoder_attention_mask = image_atts,
labels = decoder_targets,
return_dict = True,
)
loss_lm = decoder_output.loss
return loss_lm
def generate(self, image, sample=False, num_beams=5, max_length=30, min_length=10, top_p=0.9, repetition_penalty=1.0):
image_embeds = self.visual_encoder(image)
if not sample:
image_embeds = image_embeds.repeat_interleave(num_beams,dim=0)
image_atts = torch.ones(image_embeds.size()[:-1],dtype=torch.long).to(image.device)
model_kwargs = {"encoder_hidden_states": image_embeds, "encoder_attention_mask":image_atts}
prompt = [self.prompt] * image.size(0)
input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(image.device)
input_ids[:,0] = self.tokenizer.bos_token_id
input_ids = input_ids[:, :-1]
if sample:
#nucleus sampling
outputs = self.text_decoder.generate(input_ids=input_ids,
max_length=max_length,
min_length=min_length,
do_sample=True,
top_p=top_p,
num_return_sequences=3,
eos_token_id=self.tokenizer.sep_token_id,
pad_token_id=self.tokenizer.pad_token_id,
repetition_penalty=1.1,
**model_kwargs)
else:
#beam search
outputs = self.text_decoder.generate(input_ids=input_ids,
max_length=max_length,
min_length=min_length,
num_beams=num_beams,
num_return_sequences=3,
eos_token_id=self.tokenizer.sep_token_id,
pad_token_id=self.tokenizer.pad_token_id,
repetition_penalty=repetition_penalty,
**model_kwargs)
captions = []
for output in outputs:
caption = self.tokenizer.decode(output, skip_special_tokens=True)
captions.append(caption[len(self.prompt):])
return captions
def blip_decoder(pretrained='',**kwargs):
model = BLIP_Decoder(**kwargs)
if pretrained:
model,msg = load_checkpoint(model,pretrained)
assert(len(msg.missing_keys)==0)
return model
def blip_feature_extractor(pretrained='',**kwargs):
model = BLIP_Base(**kwargs)
if pretrained:
model,msg = load_checkpoint(model,pretrained)
assert(len(msg.missing_keys)==0)
return model
def init_tokenizer():
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer.add_special_tokens({'bos_token':'[DEC]'})
tokenizer.add_special_tokens({'additional_special_tokens':['[ENC]']})
tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0]
return tokenizer
def create_vit(vit, image_size, use_grad_checkpointing=False, ckpt_layer=0, drop_path_rate=0):
assert vit in ['base', 'large'], "vit parameter must be base or large"
if vit=='base':
vision_width = 768
visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=12,
num_heads=12, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer,
drop_path_rate=0 or drop_path_rate
)
elif vit=='large':
vision_width = 1024
visual_encoder = VisionTransformer(img_size=image_size, patch_size=16, embed_dim=vision_width, depth=24,
num_heads=16, use_grad_checkpointing=use_grad_checkpointing, ckpt_layer=ckpt_layer,
drop_path_rate=0.1 or drop_path_rate
)
return visual_encoder, vision_width
def is_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https")
def load_checkpoint(model,url_or_filename):
if is_url(url_or_filename):
cached_file = download_cached_file(url_or_filename, check_hash=False, progress=True)
checkpoint = torch.load(cached_file, map_location='cpu')
elif os.path.isfile(url_or_filename):
checkpoint = torch.load(url_or_filename, map_location='cpu')
else:
raise RuntimeError('checkpoint url or path is invalid')
state_dict = checkpoint['model']
state_dict['visual_encoder.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],model.visual_encoder)
if 'visual_encoder_m.pos_embed' in model.state_dict().keys():
state_dict['visual_encoder_m.pos_embed'] = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'],
model.visual_encoder_m)
for key in model.state_dict().keys():
if key in state_dict.keys():
if state_dict[key].shape!=model.state_dict()[key].shape:
del state_dict[key]
msg = model.load_state_dict(state_dict,strict=False)
print('load checkpoint from %s'%url_or_filename)
return model,msg
| 45.916667
| 128
| 0.551996
| 1,169
| 11,020
| 4.905902
| 0.205304
| 0.023714
| 0.018134
| 0.014647
| 0.506016
| 0.465911
| 0.447428
| 0.433304
| 0.425981
| 0.398256
| 0
| 0.010323
| 0.36706
| 11,020
| 239
| 129
| 46.108787
| 0.8119
| 0.062886
| 0
| 0.356725
| 0
| 0
| 0.052467
| 0.023493
| 0
| 0
| 0
| 0
| 0.023392
| 1
| 0.064327
| false
| 0
| 0.05848
| 0
| 0.19883
| 0.005848
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed26aa5f1dfa9b893282f08b252dba7679012685
| 28,524
|
py
|
Python
|
venv/Lib/site-packages/pandas/tests/reshape/merge/test_multi.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/tests/reshape/merge/test_multi.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/tests/reshape/merge/test_multi.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
import numpy as np
from numpy.random import randn
import pytest
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series
import pandas._testing as tm
from pandas.core.reshape.concat import concat
from pandas.core.reshape.merge import merge
@pytest.fixture
def left():
"""left dataframe (not multi-indexed) for multi-index join tests"""
# a little relevant example with NAs
key1 = ["bar", "bar", "bar", "foo", "foo", "baz", "baz", "qux", "qux", "snap"]
key2 = ["two", "one", "three", "one", "two", "one", "two", "two", "three", "one"]
data = np.random.randn(len(key1))
return DataFrame({"key1": key1, "key2": key2, "data": data})
@pytest.fixture
def right():
"""right dataframe (multi-indexed) for multi-index join tests"""
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["key1", "key2"],
)
return DataFrame(
np.random.randn(10, 3), index=index, columns=["j_one", "j_two", "j_three"]
)
@pytest.fixture
def left_multi():
return DataFrame(
dict(
Origin=["A", "A", "B", "B", "C"],
Destination=["A", "B", "A", "C", "A"],
Period=["AM", "AM", "IP", "AM", "OP"],
TripPurp=["hbw", "nhb", "hbo", "nhb", "hbw"],
Trips=[1987, 3647, 2470, 4296, 4444],
),
columns=["Origin", "Destination", "Period", "TripPurp", "Trips"],
).set_index(["Origin", "Destination", "Period", "TripPurp"])
@pytest.fixture
def right_multi():
return DataFrame(
dict(
Origin=["A", "A", "B", "B", "C", "C", "E"],
Destination=["A", "B", "A", "B", "A", "B", "F"],
Period=["AM", "AM", "IP", "AM", "OP", "IP", "AM"],
LinkType=["a", "b", "c", "b", "a", "b", "a"],
Distance=[100, 80, 90, 80, 75, 35, 55],
),
columns=["Origin", "Destination", "Period", "LinkType", "Distance"],
).set_index(["Origin", "Destination", "Period", "LinkType"])
@pytest.fixture
def on_cols_multi():
return ["Origin", "Destination", "Period"]
@pytest.fixture
def idx_cols_multi():
return ["Origin", "Destination", "Period", "TripPurp", "LinkType"]
class TestMergeMulti:
def setup_method(self):
self.index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
self.to_join = DataFrame(
np.random.randn(10, 3),
index=self.index,
columns=["j_one", "j_two", "j_three"],
)
# a little relevant example with NAs
key1 = ["bar", "bar", "bar", "foo", "foo", "baz", "baz", "qux", "qux", "snap"]
key2 = [
"two",
"one",
"three",
"one",
"two",
"one",
"two",
"two",
"three",
"one",
]
data = np.random.randn(len(key1))
self.data = DataFrame({"key1": key1, "key2": key2, "data": data})
def test_merge_on_multikey(self, left, right, join_type):
on_cols = ["key1", "key2"]
result = left.join(right, on=on_cols, how=join_type).reset_index(drop=True)
expected = pd.merge(left, right.reset_index(), on=on_cols, how=join_type)
tm.assert_frame_equal(result, expected)
result = left.join(right, on=on_cols, how=join_type, sort=True).reset_index(
drop=True
)
expected = pd.merge(
left, right.reset_index(), on=on_cols, how=join_type, sort=True
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("sort", [False, True])
def test_left_join_multi_index(self, left, right, sort):
icols = ["1st", "2nd", "3rd"]
def bind_cols(df):
iord = lambda a: 0 if a != a else ord(a)
f = lambda ts: ts.map(iord) - ord("a")
return f(df["1st"]) + f(df["3rd"]) * 1e2 + df["2nd"].fillna(0) * 1e4
def run_asserts(left, right, sort):
res = left.join(right, on=icols, how="left", sort=sort)
assert len(left) < len(res) + 1
assert not res["4th"].isna().any()
assert not res["5th"].isna().any()
tm.assert_series_equal(res["4th"], -res["5th"], check_names=False)
result = bind_cols(res.iloc[:, :-2])
tm.assert_series_equal(res["4th"], result, check_names=False)
assert result.name is None
if sort:
tm.assert_frame_equal(res, res.sort_values(icols, kind="mergesort"))
out = merge(left, right.reset_index(), on=icols, sort=sort, how="left")
res.index = np.arange(len(res))
tm.assert_frame_equal(out, res)
lc = list(map(chr, np.arange(ord("a"), ord("z") + 1)))
left = DataFrame(np.random.choice(lc, (5000, 2)), columns=["1st", "3rd"])
left.insert(1, "2nd", np.random.randint(0, 1000, len(left)))
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
left["4th"] = bind_cols(left)
right["5th"] = -bind_cols(right)
right.set_index(icols, inplace=True)
run_asserts(left, right, sort)
# inject some nulls
left.loc[1::23, "1st"] = np.nan
left.loc[2::37, "2nd"] = np.nan
left.loc[3::43, "3rd"] = np.nan
left["4th"] = bind_cols(left)
i = np.random.permutation(len(left))
right = left.iloc[i, :-1]
right["5th"] = -bind_cols(right)
right.set_index(icols, inplace=True)
run_asserts(left, right, sort)
@pytest.mark.parametrize("sort", [False, True])
def test_merge_right_vs_left(self, left, right, sort):
# compare left vs right merge with multikey
on_cols = ["key1", "key2"]
merged_left_right = left.merge(
right, left_on=on_cols, right_index=True, how="left", sort=sort
)
merge_right_left = right.merge(
left, right_on=on_cols, left_index=True, how="right", sort=sort
)
# Reorder columns
merge_right_left = merge_right_left[merged_left_right.columns]
tm.assert_frame_equal(merged_left_right, merge_right_left)
def test_merge_multiple_cols_with_mixed_cols_index(self):
# GH29522
s = pd.Series(
range(6),
pd.MultiIndex.from_product([["A", "B"], [1, 2, 3]], names=["lev1", "lev2"]),
name="Amount",
)
df = pd.DataFrame(
{"lev1": list("AAABBB"), "lev2": [1, 2, 3, 1, 2, 3], "col": 0}
)
result = pd.merge(df, s.reset_index(), on=["lev1", "lev2"])
expected = pd.DataFrame(
{
"lev1": list("AAABBB"),
"lev2": [1, 2, 3, 1, 2, 3],
"col": [0] * 6,
"Amount": range(6),
}
)
tm.assert_frame_equal(result, expected)
def test_compress_group_combinations(self):
# ~ 40000000 possible unique groups
key1 = tm.rands_array(10, 10000)
key1 = np.tile(key1, 2)
key2 = key1[::-1]
df = DataFrame({"key1": key1, "key2": key2, "value1": np.random.randn(20000)})
df2 = DataFrame(
{"key1": key1[::2], "key2": key2[::2], "value2": np.random.randn(10000)}
)
# just to hit the label compression code path
merge(df, df2, how="outer")
def test_left_join_index_preserve_order(self):
on_cols = ["k1", "k2"]
left = DataFrame(
{
"k1": [0, 1, 2] * 8,
"k2": ["foo", "bar"] * 12,
"v": np.array(np.arange(24), dtype=np.int64),
}
)
index = MultiIndex.from_tuples([(2, "bar"), (1, "foo")])
right = DataFrame({"v2": [5, 7]}, index=index)
result = left.join(right, on=on_cols)
expected = left.copy()
expected["v2"] = np.nan
expected.loc[(expected.k1 == 2) & (expected.k2 == "bar"), "v2"] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == "foo"), "v2"] = 7
tm.assert_frame_equal(result, expected)
result.sort_values(on_cols, kind="mergesort", inplace=True)
expected = left.join(right, on=on_cols, sort=True)
tm.assert_frame_equal(result, expected)
# test join with multi dtypes blocks
left = DataFrame(
{
"k1": [0, 1, 2] * 8,
"k2": ["foo", "bar"] * 12,
"k3": np.array([0, 1, 2] * 8, dtype=np.float32),
"v": np.array(np.arange(24), dtype=np.int32),
}
)
index = MultiIndex.from_tuples([(2, "bar"), (1, "foo")])
right = DataFrame({"v2": [5, 7]}, index=index)
result = left.join(right, on=on_cols)
expected = left.copy()
expected["v2"] = np.nan
expected.loc[(expected.k1 == 2) & (expected.k2 == "bar"), "v2"] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == "foo"), "v2"] = 7
tm.assert_frame_equal(result, expected)
result = result.sort_values(on_cols, kind="mergesort")
expected = left.join(right, on=on_cols, sort=True)
tm.assert_frame_equal(result, expected)
def test_left_join_index_multi_match_multiindex(self):
left = DataFrame(
[
["X", "Y", "C", "a"],
["W", "Y", "C", "e"],
["V", "Q", "A", "h"],
["V", "R", "D", "i"],
["X", "Y", "D", "b"],
["X", "Y", "A", "c"],
["W", "Q", "B", "f"],
["W", "R", "C", "g"],
["V", "Y", "C", "j"],
["X", "Y", "B", "d"],
],
columns=["cola", "colb", "colc", "tag"],
index=[3, 2, 0, 1, 7, 6, 4, 5, 9, 8],
)
right = DataFrame(
[
["W", "R", "C", 0],
["W", "Q", "B", 3],
["W", "Q", "B", 8],
["X", "Y", "A", 1],
["X", "Y", "A", 4],
["X", "Y", "B", 5],
["X", "Y", "C", 6],
["X", "Y", "C", 9],
["X", "Q", "C", -6],
["X", "R", "C", -9],
["V", "Y", "C", 7],
["V", "R", "D", 2],
["V", "R", "D", -1],
["V", "Q", "A", -3],
],
columns=["col1", "col2", "col3", "val"],
).set_index(["col1", "col2", "col3"])
result = left.join(right, on=["cola", "colb", "colc"], how="left")
expected = DataFrame(
[
["X", "Y", "C", "a", 6],
["X", "Y", "C", "a", 9],
["W", "Y", "C", "e", np.nan],
["V", "Q", "A", "h", -3],
["V", "R", "D", "i", 2],
["V", "R", "D", "i", -1],
["X", "Y", "D", "b", np.nan],
["X", "Y", "A", "c", 1],
["X", "Y", "A", "c", 4],
["W", "Q", "B", "f", 3],
["W", "Q", "B", "f", 8],
["W", "R", "C", "g", 0],
["V", "Y", "C", "j", 7],
["X", "Y", "B", "d", 5],
],
columns=["cola", "colb", "colc", "tag", "val"],
index=[3, 3, 2, 0, 1, 1, 7, 6, 6, 4, 4, 5, 9, 8],
)
tm.assert_frame_equal(result, expected)
result = left.join(right, on=["cola", "colb", "colc"], how="left", sort=True)
expected = expected.sort_values(["cola", "colb", "colc"], kind="mergesort")
tm.assert_frame_equal(result, expected)
def test_left_join_index_multi_match(self):
left = DataFrame(
[["c", 0], ["b", 1], ["a", 2], ["b", 3]],
columns=["tag", "val"],
index=[2, 0, 1, 3],
)
right = DataFrame(
[
["a", "v"],
["c", "w"],
["c", "x"],
["d", "y"],
["a", "z"],
["c", "r"],
["e", "q"],
["c", "s"],
],
columns=["tag", "char"],
).set_index("tag")
result = left.join(right, on="tag", how="left")
expected = DataFrame(
[
["c", 0, "w"],
["c", 0, "x"],
["c", 0, "r"],
["c", 0, "s"],
["b", 1, np.nan],
["a", 2, "v"],
["a", 2, "z"],
["b", 3, np.nan],
],
columns=["tag", "val", "char"],
index=[2, 2, 2, 2, 0, 1, 1, 3],
)
tm.assert_frame_equal(result, expected)
result = left.join(right, on="tag", how="left", sort=True)
expected2 = expected.sort_values("tag", kind="mergesort")
tm.assert_frame_equal(result, expected2)
# GH7331 - maintain left frame order in left merge
result = merge(left, right.reset_index(), how="left", on="tag")
expected.index = np.arange(len(expected))
tm.assert_frame_equal(result, expected)
def test_left_merge_na_buglet(self):
left = DataFrame(
{
"id": list("abcde"),
"v1": randn(5),
"v2": randn(5),
"dummy": list("abcde"),
"v3": randn(5),
},
columns=["id", "v1", "v2", "dummy", "v3"],
)
right = DataFrame(
{
"id": ["a", "b", np.nan, np.nan, np.nan],
"sv3": [1.234, 5.678, np.nan, np.nan, np.nan],
}
)
result = merge(left, right, on="id", how="left")
rdf = right.drop(["id"], axis=1)
expected = left.join(rdf)
tm.assert_frame_equal(result, expected)
def test_merge_na_keys(self):
data = [
[1950, "A", 1.5],
[1950, "B", 1.5],
[1955, "B", 1.5],
[1960, "B", np.nan],
[1970, "B", 4.0],
[1950, "C", 4.0],
[1960, "C", np.nan],
[1965, "C", 3.0],
[1970, "C", 4.0],
]
frame = DataFrame(data, columns=["year", "panel", "data"])
other_data = [
[1960, "A", np.nan],
[1970, "A", np.nan],
[1955, "A", np.nan],
[1965, "A", np.nan],
[1965, "B", np.nan],
[1955, "C", np.nan],
]
other = DataFrame(other_data, columns=["year", "panel", "data"])
result = frame.merge(other, how="outer")
expected = frame.fillna(-999).merge(other.fillna(-999), how="outer")
expected = expected.replace(-999, np.nan)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("klass", [None, np.asarray, Series, Index])
def test_merge_datetime_index(self, klass):
# see gh-19038
df = DataFrame(
[1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"]
)
df.index = pd.to_datetime(df.index)
on_vector = df.index.year
if klass is not None:
on_vector = klass(on_vector)
expected = DataFrame({"a": [1, 2, 3], "key_1": [2016, 2017, 2018]})
result = df.merge(df, on=["a", on_vector], how="inner")
tm.assert_frame_equal(result, expected)
expected = DataFrame(
{"key_0": [2016, 2017, 2018], "a_x": [1, 2, 3], "a_y": [1, 2, 3]}
)
result = df.merge(df, on=[df.index.year], how="inner")
tm.assert_frame_equal(result, expected)
def test_join_multi_levels(self):
# GH 3662
# merge multi-levels
household = DataFrame(
dict(
household_id=[1, 2, 3],
male=[0, 1, 0],
wealth=[196087.3, 316478.7, 294750],
),
columns=["household_id", "male", "wealth"],
).set_index("household_id")
portfolio = DataFrame(
dict(
household_id=[1, 2, 2, 3, 3, 3, 4],
asset_id=[
"nl0000301109",
"nl0000289783",
"gb00b03mlx29",
"gb00b03mlx29",
"lu0197800237",
"nl0000289965",
np.nan,
],
name=[
"ABN Amro",
"Robeco",
"Royal Dutch Shell",
"Royal Dutch Shell",
"AAB Eastern Europe Equity Fund",
"Postbank BioTech Fonds",
np.nan,
],
share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0],
),
columns=["household_id", "asset_id", "name", "share"],
).set_index(["household_id", "asset_id"])
result = household.join(portfolio, how="inner")
expected = (
DataFrame(
dict(
male=[0, 1, 1, 0, 0, 0],
wealth=[196087.3, 316478.7, 316478.7, 294750.0, 294750.0, 294750.0],
name=[
"ABN Amro",
"Robeco",
"Royal Dutch Shell",
"Royal Dutch Shell",
"AAB Eastern Europe Equity Fund",
"Postbank BioTech Fonds",
],
share=[1.00, 0.40, 0.60, 0.15, 0.60, 0.25],
household_id=[1, 2, 2, 3, 3, 3],
asset_id=[
"nl0000301109",
"nl0000289783",
"gb00b03mlx29",
"gb00b03mlx29",
"lu0197800237",
"nl0000289965",
],
)
)
.set_index(["household_id", "asset_id"])
.reindex(columns=["male", "wealth", "name", "share"])
)
tm.assert_frame_equal(result, expected)
# equivalency
result = merge(
household.reset_index(),
portfolio.reset_index(),
on=["household_id"],
how="inner",
).set_index(["household_id", "asset_id"])
tm.assert_frame_equal(result, expected)
result = household.join(portfolio, how="outer")
expected = concat(
[
expected,
(
DataFrame(
dict(share=[1.00]),
index=MultiIndex.from_tuples(
[(4, np.nan)], names=["household_id", "asset_id"]
),
)
),
],
axis=0,
sort=True,
).reindex(columns=expected.columns)
tm.assert_frame_equal(result, expected)
# invalid cases
household.index.name = "foo"
with pytest.raises(
ValueError, match="cannot join with no overlapping index names"
):
household.join(portfolio, how="inner")
portfolio2 = portfolio.copy()
portfolio2.index.set_names(["household_id", "foo"])
with pytest.raises(ValueError, match="columns overlap but no suffix specified"):
portfolio2.join(portfolio, how="inner")
def test_join_multi_levels2(self):
# some more advanced merges
# GH6360
household = DataFrame(
dict(
household_id=[1, 2, 2, 3, 3, 3, 4],
asset_id=[
"nl0000301109",
"nl0000301109",
"gb00b03mlx29",
"gb00b03mlx29",
"lu0197800237",
"nl0000289965",
np.nan,
],
share=[1.0, 0.4, 0.6, 0.15, 0.6, 0.25, 1.0],
),
columns=["household_id", "asset_id", "share"],
).set_index(["household_id", "asset_id"])
log_return = DataFrame(
dict(
asset_id=[
"gb00b03mlx29",
"gb00b03mlx29",
"gb00b03mlx29",
"lu0197800237",
"lu0197800237",
],
t=[233, 234, 235, 180, 181],
log_return=[0.09604978, -0.06524096, 0.03532373, 0.03025441, 0.036997],
)
).set_index(["asset_id", "t"])
expected = (
DataFrame(
dict(
household_id=[2, 2, 2, 3, 3, 3, 3, 3],
asset_id=[
"gb00b03mlx29",
"gb00b03mlx29",
"gb00b03mlx29",
"gb00b03mlx29",
"gb00b03mlx29",
"gb00b03mlx29",
"lu0197800237",
"lu0197800237",
],
t=[233, 234, 235, 233, 234, 235, 180, 181],
share=[0.6, 0.6, 0.6, 0.15, 0.15, 0.15, 0.6, 0.6],
log_return=[
0.09604978,
-0.06524096,
0.03532373,
0.09604978,
-0.06524096,
0.03532373,
0.03025441,
0.036997,
],
)
)
.set_index(["household_id", "asset_id", "t"])
.reindex(columns=["share", "log_return"])
)
# this is the equivalency
result = merge(
household.reset_index(),
log_return.reset_index(),
on=["asset_id"],
how="inner",
).set_index(["household_id", "asset_id", "t"])
tm.assert_frame_equal(result, expected)
expected = (
DataFrame(
dict(
household_id=[1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 4],
asset_id=[
"nl0000301109",
"nl0000301109",
"gb00b03mlx29",
"gb00b03mlx29",
"gb00b03mlx29",
"gb00b03mlx29",
"gb00b03mlx29",
"gb00b03mlx29",
"lu0197800237",
"lu0197800237",
"nl0000289965",
None,
],
t=[None, None, 233, 234, 235, 233, 234, 235, 180, 181, None, None],
share=[
1.0,
0.4,
0.6,
0.6,
0.6,
0.15,
0.15,
0.15,
0.6,
0.6,
0.25,
1.0,
],
log_return=[
None,
None,
0.09604978,
-0.06524096,
0.03532373,
0.09604978,
-0.06524096,
0.03532373,
0.03025441,
0.036997,
None,
None,
],
)
)
.set_index(["household_id", "asset_id", "t"])
.reindex(columns=["share", "log_return"])
)
result = merge(
household.reset_index(),
log_return.reset_index(),
on=["asset_id"],
how="outer",
).set_index(["household_id", "asset_id", "t"])
tm.assert_frame_equal(result, expected)
class TestJoinMultiMulti:
def test_join_multi_multi(
self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi
):
# Multi-index join tests
expected = (
pd.merge(
left_multi.reset_index(),
right_multi.reset_index(),
how=join_type,
on=on_cols_multi,
)
.set_index(idx_cols_multi)
.sort_index()
)
result = left_multi.join(right_multi, how=join_type).sort_index()
tm.assert_frame_equal(result, expected)
def test_join_multi_empty_frames(
self, left_multi, right_multi, join_type, on_cols_multi, idx_cols_multi
):
left_multi = left_multi.drop(columns=left_multi.columns)
right_multi = right_multi.drop(columns=right_multi.columns)
expected = (
pd.merge(
left_multi.reset_index(),
right_multi.reset_index(),
how=join_type,
on=on_cols_multi,
)
.set_index(idx_cols_multi)
.sort_index()
)
result = left_multi.join(right_multi, how=join_type).sort_index()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("box", [None, np.asarray, Series, Index])
def test_merge_datetime_index(self, box):
# see gh-19038
df = DataFrame(
[1, 2, 3], ["2016-01-01", "2017-01-01", "2018-01-01"], columns=["a"]
)
df.index = pd.to_datetime(df.index)
on_vector = df.index.year
if box is not None:
on_vector = box(on_vector)
expected = DataFrame({"a": [1, 2, 3], "key_1": [2016, 2017, 2018]})
result = df.merge(df, on=["a", on_vector], how="inner")
tm.assert_frame_equal(result, expected)
expected = DataFrame(
{"key_0": [2016, 2017, 2018], "a_x": [1, 2, 3], "a_y": [1, 2, 3]}
)
result = df.merge(df, on=[df.index.year], how="inner")
tm.assert_frame_equal(result, expected)
def test_single_common_level(self):
index_left = pd.MultiIndex.from_tuples(
[("K0", "X0"), ("K0", "X1"), ("K1", "X2")], names=["key", "X"]
)
left = pd.DataFrame(
{"A": ["A0", "A1", "A2"], "B": ["B0", "B1", "B2"]}, index=index_left
)
index_right = pd.MultiIndex.from_tuples(
[("K0", "Y0"), ("K1", "Y1"), ("K2", "Y2"), ("K2", "Y3")], names=["key", "Y"]
)
right = pd.DataFrame(
{"C": ["C0", "C1", "C2", "C3"], "D": ["D0", "D1", "D2", "D3"]},
index=index_right,
)
result = left.join(right)
expected = pd.merge(
left.reset_index(), right.reset_index(), on=["key"], how="inner"
).set_index(["key", "X", "Y"])
tm.assert_frame_equal(result, expected)
def test_join_multi_wrong_order(self):
# GH 25760
# GH 28956
midx1 = pd.MultiIndex.from_product([[1, 2], [3, 4]], names=["a", "b"])
midx3 = pd.MultiIndex.from_tuples([(4, 1), (3, 2), (3, 1)], names=["b", "a"])
left = pd.DataFrame(index=midx1, data={"x": [10, 20, 30, 40]})
right = pd.DataFrame(index=midx3, data={"y": ["foo", "bar", "fing"]})
result = left.join(right)
expected = pd.DataFrame(
index=midx1,
data={"x": [10, 20, 30, 40], "y": ["fing", "foo", "bar", np.nan]},
)
tm.assert_frame_equal(result, expected)
| 33.876485
| 89
| 0.425186
| 3,037
| 28,524
| 3.872242
| 0.128416
| 0.021769
| 0.033163
| 0.045918
| 0.603401
| 0.551616
| 0.504337
| 0.464456
| 0.406378
| 0.375
| 0
| 0.088753
| 0.408673
| 28,524
| 841
| 90
| 33.916766
| 0.608466
| 0.021526
| 0
| 0.454412
| 0
| 0
| 0.099826
| 0
| 0
| 0
| 0
| 0
| 0.057353
| 1
| 0.039706
| false
| 0
| 0.011765
| 0.005882
| 0.064706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed26d3056f6a5641259d87e73533b42ec4832318
| 11,894
|
py
|
Python
|
test/api/test_histories.py
|
mmiladi/galaxy
|
7857b152cd10d9490ac2433ff2905ca1a47ee32c
|
[
"CC-BY-3.0"
] | 1
|
2021-04-26T08:46:21.000Z
|
2021-04-26T08:46:21.000Z
|
test/api/test_histories.py
|
mmiladi/galaxy
|
7857b152cd10d9490ac2433ff2905ca1a47ee32c
|
[
"CC-BY-3.0"
] | null | null | null |
test/api/test_histories.py
|
mmiladi/galaxy
|
7857b152cd10d9490ac2433ff2905ca1a47ee32c
|
[
"CC-BY-3.0"
] | 1
|
2018-12-09T13:50:28.000Z
|
2018-12-09T13:50:28.000Z
|
# -*- coding: utf-8 -*-
from requests import (
get,
post,
put
)
from base import api # noqa: I100
from base.populators import ( # noqa: I100
DatasetCollectionPopulator,
DatasetPopulator,
wait_on
)
class HistoriesApiTestCase(api.ApiTestCase):
def setUp(self):
super(HistoriesApiTestCase, self).setUp()
self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
self.dataset_collection_populator = DatasetCollectionPopulator(self.galaxy_interactor)
def test_create_history(self):
# Create a history.
create_response = self._create_history("TestHistory1")
created_id = create_response["id"]
# Make sure new history appears in index of user's histories.
index_response = self._get("histories").json()
indexed_history = [h for h in index_response if h["id"] == created_id][0]
self.assertEquals(indexed_history["name"], "TestHistory1")
def test_show_history(self):
history_id = self._create_history("TestHistoryForShow")["id"]
show_response = self._show(history_id)
self._assert_has_key(
show_response,
'id', 'name', 'annotation', 'size', 'contents_url',
'state', 'state_details', 'state_ids'
)
state_details = show_response["state_details"]
state_ids = show_response["state_ids"]
states = [
'discarded', 'empty', 'error', 'failed_metadata', 'new',
'ok', 'paused', 'queued', 'running', 'setting_metadata', 'upload'
]
assert isinstance(state_details, dict)
assert isinstance(state_ids, dict)
self._assert_has_keys(state_details, *states)
self._assert_has_keys(state_ids, *states)
def test_show_most_recently_used(self):
history_id = self._create_history("TestHistoryRecent")["id"]
show_response = self._get("histories/most_recently_used").json()
assert show_response["id"] == history_id
def test_index_order(self):
slightly_older_history_id = self._create_history("TestHistorySlightlyOlder")["id"]
newer_history_id = self._create_history("TestHistoryNewer")["id"]
index_response = self._get("histories").json()
assert index_response[0]["id"] == newer_history_id
assert index_response[1]["id"] == slightly_older_history_id
def test_delete(self):
# Setup a history and ensure it is in the index
history_id = self._create_history("TestHistoryForDelete")["id"]
index_response = self._get("histories").json()
assert index_response[0]["id"] == history_id
show_response = self._show(history_id)
assert not show_response["deleted"]
# Delete the history
self._delete("histories/%s" % history_id)
# Check can view it - but it is deleted
show_response = self._show(history_id)
assert show_response["deleted"]
# Verify it is dropped from history index
index_response = self._get("histories").json()
assert len(index_response) == 0 or index_response[0]["id"] != history_id
# Add deleted filter to index to view it
index_response = self._get("histories", {"deleted": "true"}).json()
assert index_response[0]["id"] == history_id
def test_purge(self):
history_id = self._create_history("TestHistoryForPurge")["id"]
data = {'purge': True}
self._delete("histories/%s" % history_id, data=data)
show_response = self._show(history_id)
assert show_response["deleted"]
assert show_response["purged"]
def test_undelete(self):
history_id = self._create_history("TestHistoryForDeleteAndUndelete")["id"]
self._delete("histories/%s" % history_id)
self._post("histories/deleted/%s/undelete" % history_id)
show_response = self._show(history_id)
assert not show_response["deleted"]
def test_update(self):
history_id = self._create_history("TestHistoryForUpdating")["id"]
self._update(history_id, {"name": "New Name"})
show_response = self._show(history_id)
assert show_response["name"] == "New Name"
unicode_name = u'桜ゲノム'
self._update(history_id, {"name": unicode_name})
show_response = self._show(history_id)
assert show_response["name"] == unicode_name, show_response
quoted_name = "'MooCow'"
self._update(history_id, {"name": quoted_name})
show_response = self._show(history_id)
assert show_response["name"] == quoted_name
self._update(history_id, {"deleted": True})
show_response = self._show(history_id)
assert show_response["deleted"], show_response
self._update(history_id, {"deleted": False})
show_response = self._show(history_id)
assert not show_response["deleted"]
self._update(history_id, {"published": True})
show_response = self._show(history_id)
assert show_response["published"]
self._update(history_id, {"genome_build": "hg18"})
show_response = self._show(history_id)
assert show_response["genome_build"] == "hg18"
self._update(history_id, {"annotation": "The annotation is cool"})
show_response = self._show(history_id)
assert show_response["annotation"] == "The annotation is cool"
self._update(history_id, {"annotation": unicode_name})
show_response = self._show(history_id)
assert show_response["annotation"] == unicode_name, show_response
self._update(history_id, {"annotation": quoted_name})
show_response = self._show(history_id)
assert show_response["annotation"] == quoted_name
def test_update_invalid_attribute(self):
history_id = self._create_history("TestHistoryForInvalidUpdating")["id"]
put_response = self._update(history_id, {"invalidkey": "moo"})
assert "invalidkey" not in put_response.json()
def test_update_invalid_types(self):
history_id = self._create_history("TestHistoryForUpdatingInvalidTypes")["id"]
for str_key in ["name", "annotation"]:
assert self._update(history_id, {str_key: False}).status_code == 400
for bool_key in ['deleted', 'importable', 'published']:
assert self._update(history_id, {bool_key: "a string"}).status_code == 400
assert self._update(history_id, {"tags": "a simple string"}).status_code == 400
assert self._update(history_id, {"tags": [True]}).status_code == 400
def test_invalid_keys(self):
invalid_history_id = "1234123412341234"
assert self._get("histories/%s" % invalid_history_id).status_code == 400
assert self._update(invalid_history_id, {"name": "new name"}).status_code == 400
assert self._delete("histories/%s" % invalid_history_id).status_code == 400
assert self._post("histories/deleted/%s/undelete" % invalid_history_id).status_code == 400
def test_create_anonymous_fails(self):
post_data = dict(name="CannotCreate")
# Using lower-level _api_url will cause key to not be injected.
histories_url = self._api_url("histories")
create_response = post(url=histories_url, data=post_data)
self._assert_status_code_is(create_response, 403)
def test_import_export(self):
history_name = "for_export"
history_id = self.dataset_populator.new_history(name=history_name)
self.dataset_populator.new_dataset(history_id, content="1 2 3")
imported_history_id = self._reimport_history(history_id, history_name)
contents_response = self._get("histories/%s/contents" % imported_history_id)
self._assert_status_code_is(contents_response, 200)
contents = contents_response.json()
assert len(contents) == 1
imported_content = self.dataset_populator.get_history_dataset_content(
history_id=imported_history_id,
dataset_id=contents[0]["id"]
)
assert imported_content == "1 2 3\n"
def test_import_export_collection(self):
from nose.plugins.skip import SkipTest
raise SkipTest("Collection import/export not yet implemented")
history_name = "for_export_with_collections"
history_id = self.dataset_populator.new_history(name=history_name)
self.dataset_collection_populator.create_list_in_history(history_id, contents=["Hello", "World"])
imported_history_id = self._reimport_history(history_id, history_name)
contents_response = self._get("histories/%s/contents" % imported_history_id)
self._assert_status_code_is(contents_response, 200)
contents = contents_response.json()
assert len(contents) == 3
def _reimport_history(self, history_id, history_name):
# Ensure the history is ready to go...
self.dataset_populator.wait_for_history(history_id, assert_ok=True)
# Export the history.
download_path = self._export(history_id)
# Create download for history
full_download_url = "%s%s?key=%s" % (self.url, download_path, self.galaxy_interactor.api_key)
download_response = get(full_download_url)
self._assert_status_code_is(download_response, 200)
def history_names():
history_index = self._get("histories")
return dict((h["name"], h) for h in history_index.json())
import_name = "imported from archive: %s" % history_name
assert import_name not in history_names()
import_data = dict(archive_source=full_download_url, archive_type="url")
import_response = self._post("histories", data=import_data)
self._assert_status_code_is(import_response, 200)
def has_history_with_name():
histories = history_names()
return histories.get(import_name, None)
imported_history = wait_on(has_history_with_name, desc="import history")
imported_history_id = imported_history["id"]
self.dataset_populator.wait_for_history(imported_history_id)
return imported_history_id
def test_create_tag(self):
post_data = dict(name="TestHistoryForTag")
history_id = self._post("histories", data=post_data).json()["id"]
tag_data = dict(value="awesometagvalue")
tag_url = "histories/%s/tags/awesometagname" % history_id
tag_create_response = self._post(tag_url, data=tag_data)
self._assert_status_code_is(tag_create_response, 200)
def _export(self, history_id):
export_url = self._api_url("histories/%s/exports" % history_id, use_key=True)
put_response = put(export_url)
self._assert_status_code_is(put_response, 202)
def export_ready_response():
put_response = put(export_url)
if put_response.status_code == 202:
return None
return put_response
put_response = wait_on(export_ready_response, desc="export ready")
self._assert_status_code_is(put_response, 200)
response = put_response.json()
self._assert_has_keys(response, "download_url")
download_path = response["download_url"]
return download_path
def _show(self, history_id):
return self._get("histories/%s" % history_id).json()
def _update(self, history_id, data):
update_url = self._api_url("histories/%s" % history_id, use_key=True)
put_response = put(update_url, json=data)
return put_response
def _create_history(self, name):
post_data = dict(name=name)
create_response = self._post("histories", data=post_data).json()
self._assert_has_keys(create_response, "name", "id")
self.assertEquals(create_response["name"], name)
return create_response
# TODO: (CE) test_create_from_copy
| 41.442509
| 105
| 0.673701
| 1,443
| 11,894
| 5.202356
| 0.142065
| 0.098308
| 0.034634
| 0.039963
| 0.465299
| 0.362195
| 0.261622
| 0.234714
| 0.22126
| 0.214999
| 0
| 0.010303
| 0.21658
| 11,894
| 286
| 106
| 41.587413
| 0.795342
| 0.040609
| 0
| 0.197183
| 0
| 0
| 0.129092
| 0.028697
| 0
| 0
| 0
| 0.003497
| 0.244131
| 1
| 0.112676
| false
| 0
| 0.122066
| 0.004695
| 0.28169
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed275088d36ba770ab503299abfac84060ea0475
| 2,136
|
py
|
Python
|
AllProjects/Projects/Spider/DownloadImage.py
|
CoderXAndZ/PycharmProjects
|
94b3cc68d39614a4291bd63d4811dab61eb2e64a
|
[
"MIT"
] | 3
|
2018-08-30T03:53:25.000Z
|
2020-07-02T09:27:39.000Z
|
Spider/DownloadImage.py
|
CoderXAndZ/Python_demo
|
b1b1067f3ee2de82b4b2678ac9d09a5a0bf7d698
|
[
"MIT"
] | null | null | null |
Spider/DownloadImage.py
|
CoderXAndZ/Python_demo
|
b1b1067f3ee2de82b4b2678ac9d09a5a0bf7d698
|
[
"MIT"
] | 4
|
2020-07-01T02:29:33.000Z
|
2021-06-07T09:32:30.000Z
|
#! /usr/local/bin/python3
# -*- coding: UTF-8 -*-
# 抓取 妹子图 并存储
import urllib.request
import os
import random
def open_url(url):
request = urllib.request.Request(url)
request.add_header('User-Agent',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:60.0) Gecko/20100101 Firefox/60.0')
# 添加代理,改变 ip
iplist = ['182.90.94.113:53281', '119.28.152.208:80', '116.226.219.94:9797', ]
proxy_support = urllib.request.ProxyHandler({'http': random.choice(iplist)})
opener = urllib.request.build_opener(proxy_support) # 创建
urllib.request.install_opener(opener) # 安装
# 访问网页
response = urllib.request.urlopen(request)
html = response.read()
return html
# 获取图片id
def get_page(url):
html = open_url(url).decode('utf-8')
a = html.find('current-comment-page') + 23
b = html.find(']',a)
print("图片id是:",html[a:b])
return html[a:b]
# 根据 url 获取图片添加到数组并返回
def find_imgs(url):
html = open_url(url).decode('utf-8')
print("html内容:", html)
imgs_addrs = []
a = html.find('img src=')
while a != -1: # 找到字符串
print("找到字符串a")
b = html.find('.gif',a,a+255)
if b != -1:
print("找到字符串b")
imgs_addrs.append(html[a+9:b+4])
else:
print("未找到字符串b")
b = a + 9
a = html.find('img src=',b)
return imgs_addrs
# 保存图片
def save_imgs(folder,imgs_addrs):
print("folder", folder, "imgs_addrs", imgs_addrs)
for each in imgs_addrs:
filename = each.split('/')[-1]
with open(filename,'wb') as f:
img = open_url(each)
f.write(img)
# 下载图片
def download_img(folder='Image',pages=10):
if os.path.exists(folder) == False:
os.mkdir(folder)
os.chdir(folder)
url = 'http://jandan.net/ooxx/'
page_num = int(get_page(url))
for i in range(pages):
page_num -= i
page_url = url + 'page-' + str(page_num) + '#comments'
print("页面链接是:",page_url)
# 图片列表
imgs_addrs = find_imgs(page_url)
save_imgs(folder,imgs_addrs)
if __name__ == '__main__':
download_img()
| 24.837209
| 108
| 0.586142
| 303
| 2,136
| 4
| 0.452145
| 0.066832
| 0.024752
| 0.023102
| 0.107261
| 0.044554
| 0.044554
| 0.044554
| 0
| 0
| 0
| 0.050346
| 0.256086
| 2,136
| 86
| 109
| 24.837209
| 0.712398
| 0.059457
| 0
| 0.035088
| 0
| 0.017544
| 0.154732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087719
| false
| 0
| 0.052632
| 0
| 0.192982
| 0.122807
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed2762c6cacc123594407c4391a955c0922acc79
| 7,324
|
py
|
Python
|
utility/extractor_batch.py
|
BA-HanseML/NF_Prj_MIMII_Dataset
|
c9dd130a48c5ee28491a3f9369ace8f7217753d6
|
[
"MIT"
] | 10
|
2020-08-25T21:12:32.000Z
|
2021-11-04T22:14:37.000Z
|
utility/extractor_batch.py
|
BA-HanseML/NF_Prj_MIMII_Dataset
|
c9dd130a48c5ee28491a3f9369ace8f7217753d6
|
[
"MIT"
] | 44
|
2020-05-04T11:37:55.000Z
|
2021-09-26T04:12:23.000Z
|
utility/extractor_batch.py
|
ArneSch/NF_Prj_MIMII_Dataset
|
c9dd130a48c5ee28491a3f9369ace8f7217753d6
|
[
"MIT"
] | 4
|
2020-11-24T02:14:13.000Z
|
2021-07-01T08:52:59.000Z
|
print('load extractor_batch')
# Utility to run multiple feature extraction
# diagrams over many files with multiple threats
import pandas as pd
import os
import sys
import glob
from tqdm.auto import tqdm
from queue import Queue
from threading import Thread
from datetime import datetime
import time
import logging
# thread class
class ExtractorDiagramThread(Thread):
def __init__(self, queue,extdia ,wn):
Thread.__init__(self)
self.queue = queue
self.wn = wn
self.extdia = extdia
self.stop = False
def run(self):
while not self.stop:
# Get the work from the queue and expand the tuple
file_path, target_class = self.queue.get()
# execute diagaram
self.extdia.execute_diagram(file_path,target_class)
self.queue.task_done()
def IfStrReturnList(s):
if type(s) == str:
return [s]
else:
return s
def time_stemp_str():
now = datetime.now()
return (now.strftime("%Y-%m-%d %H:%M:%S"))
class LoggerWrap():
def __init__(self):
self.logger = logging.getLogger('feature_extraction_batch')
if (self.logger.hasHandlers()):
self.logger.handlers.clear()
self.logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
self.fh = logging.FileHandler('feature_extraction_batch.log')
self.fh.setLevel(logging.DEBUG)
self.logger.addHandler(self.fh)
def close(self):
print('close log file')
#print(self.fh)
self.fh.close()
logging.shutdown()
def log(self,s):
m = time_stemp_str() + ': ' + s
self.logger.info(m)
print(m)
def get_file_list(machine, snr, id, target_class_map,
FileCountLimit,
datset_folder_from_base,
base_folder):
flist = []
tlsit = []
tn = {}
fn = {}
for tc in target_class_map:
fn[tc] = sorted( \
glob.glob( \
os.path.abspath( "{base}/{SNR}/{machine}/id_{ID}/{n}/*.{ext}".format(
base=base_folder+datset_folder_from_base,
SNR=snr,
machine=machine,ID=id,
n=tc,
ext='wav' ))))
if FileCountLimit:
if FileCountLimit < len(fn[tc]):
fn[tc] = fn[tc][:FileCountLimit]
tn[tc] = np.ones(len(fn[tc]), dtype='int')*target_class_map[tc]
for tc in target_class_map:
flist+= fn[tc]
tlsit+=(list((tn[tc])))
return flist, tlsit
def multithreadpolltracker(queue, total):
last = total
done_l = 0
pbar = tqdm(total=total)
while not queue.empty():
time.sleep(0.05)
if last > queue.qsize():
done = total-int(queue.qsize())
#print(done, end ="--")
pbar.update(done-done_l)
done_l = done
last = queue.qsize()
queue.join()
done = total
pbar.update(done)
# Main Function
def extractor_batch(base_folder, target_folder, extdia,
FileFindDict = {'SNR': '6dB',
'machine': 'pump',
'ID': ['00']},
n_jobs = 1,
target_class_map = {'abnormal':1, 'normal': 0},
FileCountLimit = None,
datset_folder_from_base = 'dataset',
augment = False, # create one augmentation for a given target class i.e. 'normal'
DeviceType = 0, # 0 continuses or 1 sporatic
fHP = None, # simple FIR HP to cut of very low freq to not overload MEL
main_channel = 0): # assuming a DOA was able to get mein direction (pseudo DOA ...)
lw = LoggerWrap()
base_folder_full = os.path.abspath(base_folder)
target_folder_full = os.path.abspath(base_folder+target_folder)
os.makedirs(target_folder_full, exist_ok=True)
lw.log('Target folder will be: ' + target_folder_full)
lw.log('Extractor diagram is fof type: ' + str(extdia))
for m in IfStrReturnList(FileFindDict['machine']):
for snr in IfStrReturnList(FileFindDict['SNR']):
for id in IfStrReturnList(FileFindDict['ID']):
lw.log('-'*44 )
lw.log('Working on machinepart:' + m + ' SNR:' + snr + ' ID:' + id )
ts = time.time()
# create file list for ID batch
filelist, targetlist = get_file_list(m, snr, id,
target_class_map,
FileCountLimit,
datset_folder_from_base,
base_folder)
lw.log('Files to process: ' + str(len(filelist)) )
# start processing
if n_jobs == 1: # in the notebook
ed = extdia(base_folder,0,main_channel,augment,DeviceType,fHP)
pbar= tqdm(total = len(filelist))
for f,tc in (zip(filelist, targetlist)):
ed.execute_diagram(f,tc)
pbar.update()
outport_akkulist_tofile(base_folder,target_folder,ed,m,snr,id)
lw.log('list for the id pickled' )
else: # to threads
# create the threads and akku diagram
edl = []
wl = []
queue = Queue()
for w in range(n_jobs):
edl.append(extdia(base_folder,w,main_channel,augment,DeviceType,fHP))
worker = ExtractorDiagramThread(queue,edl[w],w)
worker.daemon = True
worker.start()
wl.append(worker)
# fill the Queue
lw.log('multithread mode filling the queue' )
for f,tc in (zip(filelist, targetlist)):
queue.put((f, tc))
multithreadpolltracker(queue, len(filelist))
for w in wl:
w.stop = True
lw.log('multithread mode all threads done' )
joinlist = outport_akkulist_join(exdia_list=edl)
outport_akkulist_tofile(base_folder, target_folder, joinlist, m, snr, id)
lw.log('multithread mode list joined and pickled for the id' )
del edl # trying to fiht the memory leak
del joinlist
tneeded_sec = np.round(time.time()- ts,2)
tneeded_min = np.round(tneeded_sec/60,2)
lw.log('total time needed for the ID: ' + str(tneeded_sec) + 'sec' + ' = ' + str(tneeded_min) + 'min')
lw.close()
| 38.34555
| 120
| 0.497679
| 780
| 7,324
| 4.542308
| 0.297436
| 0.031047
| 0.023709
| 0.031047
| 0.151284
| 0.127576
| 0.099915
| 0.059272
| 0.059272
| 0.035563
| 0
| 0.005068
| 0.407291
| 7,324
| 191
| 121
| 38.34555
| 0.811103
| 0.087111
| 0
| 0.066225
| 0
| 0
| 0.075973
| 0.014515
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066225
| false
| 0
| 0.066225
| 0
| 0.172185
| 0.019868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed27ebcd5adfdfd408f83681c96698af5592ab3f
| 393
|
py
|
Python
|
multitenancy/context_processors.py
|
cmalek/django-site-multitenancy
|
1b943f63c0d6247529805e05dcced68ceffa2a69
|
[
"Apache-2.0"
] | null | null | null |
multitenancy/context_processors.py
|
cmalek/django-site-multitenancy
|
1b943f63c0d6247529805e05dcced68ceffa2a69
|
[
"Apache-2.0"
] | null | null | null |
multitenancy/context_processors.py
|
cmalek/django-site-multitenancy
|
1b943f63c0d6247529805e05dcced68ceffa2a69
|
[
"Apache-2.0"
] | null | null | null |
from .models import Tenant
def tenant(request):
"""
Return context variables required by apps that use django-site-multitenancy.
If there is no 'tenant' attribute in the request, extract one from the request.
"""
if hasattr(request, 'tenant'):
tenant = request.tenant
else:
tenant = Tenant.objects.get_current(request)
return {'tenant': tenant}
| 24.5625
| 83
| 0.6743
| 49
| 393
| 5.387755
| 0.632653
| 0.136364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.234097
| 393
| 15
| 84
| 26.2
| 0.877076
| 0.399491
| 0
| 0
| 0
| 0
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed2903bbc29759d94352b5e8a5d0370e20ce8b16
| 361
|
py
|
Python
|
pset_classes/class_basics/solutions/p1.py
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 5
|
2019-04-08T20:05:37.000Z
|
2019-12-04T20:48:45.000Z
|
pset_classes/class_basics/solutions/p1.py
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 8
|
2019-04-15T15:16:05.000Z
|
2022-02-12T10:33:32.000Z
|
pset_classes/class_basics/solutions/p1.py
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 2
|
2019-04-10T00:14:42.000Z
|
2020-02-26T20:35:21.000Z
|
"""
Person class
"""
# Create a Person class with the following properties
# 1. name
# 2. age
# 3. social security number
class Person:
def __init__(self, name, age, social_number):
self.name = name
self.age = age
self.social = social_number
p1 = Person("John", 36, "111-11-1111")
print(p1.name)
print(p1.age)
print(p1.social)
| 15.695652
| 53
| 0.645429
| 53
| 361
| 4.283019
| 0.490566
| 0.092511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 0.227147
| 361
| 22
| 54
| 16.409091
| 0.749104
| 0.293629
| 0
| 0
| 0
| 0
| 0.061475
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.222222
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed29375d22bb9febc1771fb6ca4eaac661a4c75a
| 19,452
|
py
|
Python
|
python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py
|
huangxu96/Paddle
|
372ac08a171d76c745deaab0feed2d587798f734
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py
|
huangxu96/Paddle
|
372ac08a171d76c745deaab0feed2d587798f734
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/contrib/slim/tests/test_imperative_out_scale.py
|
huangxu96/Paddle
|
372ac08a171d76c745deaab0feed2d587798f734
|
[
"Apache-2.0"
] | null | null | null |
# copyright (c) 2018 paddlepaddle authors. all rights reserved.
#
# licensed under the apache license, version 2.0 (the "license");
# you may not use this file except in compliance with the license.
# you may obtain a copy of the license at
#
# http://www.apache.org/licenses/license-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the license is distributed on an "as is" basis,
# without warranties or conditions of any kind, either express or implied.
# see the license for the specific language governing permissions and
# limitations under the license.
from __future__ import print_function
import os
import numpy as np
import random
import unittest
import logging
import warnings
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid import core
from paddle.fluid.optimizer import AdamOptimizer
from paddle.fluid.framework import IrGraph
from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware
from paddle.fluid.contrib.slim.quantization import OutScaleForTrainingPass, OutScaleForInferencePass, QuantizationTransformPass
from paddle.fluid.dygraph.container import Sequential
from paddle.fluid.dygraph.io import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX
from paddle.nn.layer import ReLU, LeakyReLU, Sigmoid, Softmax, PReLU
from paddle.nn import Linear, Conv2D, Softmax, BatchNorm2D, MaxPool2D
from paddle.fluid.dygraph.nn import Pool2D
from paddle.fluid.log_helper import get_logger
from paddle.fluid.dygraph import nn
paddle.enable_static()
os.environ["CPU_NUM"] = "1"
if core.is_compiled_with_cuda():
fluid.set_flags({"FLAGS_cudnn_deterministic": True})
_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
def get_vaild_warning_num(warning, w):
num = 0
for i in range(len(w)):
if warning in str(w[i].message):
num += 1
return num
def StaticLenet(data, num_classes=10, classifier_activation='softmax'):
conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
conv1 = fluid.layers.conv2d(
data,
num_filters=6,
filter_size=3,
stride=1,
padding=1,
param_attr=conv2d_w1_attr,
bias_attr=False)
batch_norm1 = layers.batch_norm(conv1)
relu1 = layers.relu(batch_norm1)
pool1 = fluid.layers.pool2d(
relu1, pool_size=2, pool_type='max', pool_stride=2)
conv2 = fluid.layers.conv2d(
pool1,
num_filters=16,
filter_size=5,
stride=1,
padding=0,
param_attr=conv2d_w2_attr,
bias_attr=conv2d_b2_attr)
batch_norm2 = layers.batch_norm(conv2)
prelu1 = layers.prelu(batch_norm2, mode='all')
pool2 = fluid.layers.pool2d(
prelu1, pool_size=2, pool_type='max', pool_stride=2)
fc1 = fluid.layers.fc(input=pool2,
size=120,
param_attr=fc_w1_attr,
bias_attr=fc_b1_attr)
leaky_relu1 = layers.leaky_relu(fc1, alpha=0.01)
fc2 = fluid.layers.fc(input=leaky_relu1,
size=84,
param_attr=fc_w2_attr,
bias_attr=fc_b2_attr)
sigmoid1 = layers.sigmoid(fc2)
fc3 = fluid.layers.fc(input=sigmoid1,
size=num_classes,
param_attr=fc_w3_attr,
bias_attr=fc_b3_attr)
softmax1 = layers.softmax(fc3, use_cudnn=True)
return softmax1
class ImperativeLenet(fluid.dygraph.Layer):
def __init__(self, num_classes=10):
super(ImperativeLenet, self).__init__()
conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1")
conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2")
fc_w1_attr = fluid.ParamAttr(name="fc_w_1")
fc_w2_attr = fluid.ParamAttr(name="fc_w_2")
fc_w3_attr = fluid.ParamAttr(name="fc_w_3")
conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2")
fc_b1_attr = fluid.ParamAttr(name="fc_b_1")
fc_b2_attr = fluid.ParamAttr(name="fc_b_2")
fc_b3_attr = fluid.ParamAttr(name="fc_b_3")
self.features = Sequential(
Conv2D(
in_channels=1,
out_channels=6,
kernel_size=3,
stride=1,
padding=1,
weight_attr=conv2d_w1_attr,
bias_attr=False),
BatchNorm2D(6),
ReLU(),
Pool2D(
pool_size=2, pool_type='max', pool_stride=2),
Conv2D(
in_channels=6,
out_channels=16,
kernel_size=5,
stride=1,
padding=0,
weight_attr=conv2d_w2_attr,
bias_attr=conv2d_b2_attr),
BatchNorm2D(16),
PReLU(),
MaxPool2D(
kernel_size=2, stride=2))
self.fc = Sequential(
Linear(
in_features=400,
out_features=120,
weight_attr=fc_w1_attr,
bias_attr=fc_b1_attr),
LeakyReLU(),
Linear(
in_features=120,
out_features=84,
weight_attr=fc_w2_attr,
bias_attr=fc_b2_attr),
Sigmoid(),
Linear(
in_features=84,
out_features=num_classes,
weight_attr=fc_w3_attr,
bias_attr=fc_b3_attr),
Softmax())
def forward(self, inputs):
x = self.features(inputs)
x = fluid.layers.flatten(x, 1)
x = self.fc(x)
return x
class TestImperativeOutSclae(unittest.TestCase):
def test_out_scale_acc(self):
def _build_static_lenet(main, startup, is_test=False, seed=1000):
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
main.random_seed = seed
startup.random_seed = seed
img = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(
name='label', shape=[1], dtype='int64')
prediction = StaticLenet(img)
if not is_test:
loss = fluid.layers.cross_entropy(
input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
else:
avg_loss = prediction
return img, label, avg_loss
reader = paddle.batch(
paddle.dataset.mnist.test(), batch_size=32, drop_last=True)
weight_quantize_type = 'abs_max'
activation_quantize_type = 'moving_average_abs_max'
param_init_map = {}
seed = 1000
lr = 0.001
dynamic_out_scale_list = []
static_out_scale_list = []
# imperative train
_logger.info(
"--------------------------dynamic graph qat--------------------------"
)
imperative_out_scale = ImperativeQuantAware(
weight_quantize_type=weight_quantize_type,
activation_quantize_type=activation_quantize_type)
with fluid.dygraph.guard():
np.random.seed(seed)
fluid.default_main_program().random_seed = seed
fluid.default_startup_program().random_seed = seed
lenet = ImperativeLenet()
fixed_state = {}
for name, param in lenet.named_parameters():
p_shape = param.numpy().shape
p_value = param.numpy()
if name.endswith("bias"):
value = np.zeros_like(p_value).astype('float32')
else:
value = np.random.normal(
loc=0.0, scale=0.01, size=np.product(p_shape)).reshape(
p_shape).astype('float32')
fixed_state[name] = value
param_init_map[param.name] = value
lenet.set_dict(fixed_state)
imperative_out_scale.quantize(lenet)
adam = AdamOptimizer(
learning_rate=lr, parameter_list=lenet.parameters())
dynamic_loss_rec = []
lenet.train()
for batch_id, data in enumerate(reader()):
x_data = np.array([x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
y_data = np.array(
[x[1] for x in data]).astype('int64').reshape(-1, 1)
img = fluid.dygraph.to_variable(x_data)
label = fluid.dygraph.to_variable(y_data)
out = lenet(img)
loss = fluid.layers.cross_entropy(out, label)
avg_loss = fluid.layers.mean(loss)
avg_loss.backward()
adam.minimize(avg_loss)
lenet.clear_gradients()
dynamic_loss_rec.append(avg_loss.numpy()[0])
if batch_id % 100 == 0:
_logger.info('{}: {}'.format('loss', avg_loss.numpy()))
lenet.eval()
param_save_path = "test_save_quantized_model/lenet.pdparams"
save_dict = lenet.state_dict()
paddle.save(save_dict, param_save_path)
path = "./dynamic_outscale_infer_model/lenet"
dynamic_save_dir = "./dynamic_outscale_infer_model"
imperative_out_scale.save_quantized_model(
layer=lenet,
path=path,
input_spec=[
paddle.static.InputSpec(
shape=[None, 1, 28, 28], dtype='float32')
])
_logger.info(
"--------------------------static graph qat--------------------------"
)
static_loss_rec = []
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
main = fluid.Program()
infer = fluid.Program()
startup = fluid.Program()
static_img, static_label, static_loss = _build_static_lenet(
main, startup, False, seed)
infer_img, _, infer_pre = _build_static_lenet(infer, startup, True,
seed)
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
opt = AdamOptimizer(learning_rate=lr)
opt.minimize(static_loss)
scope = core.Scope()
with fluid.scope_guard(scope):
exe.run(startup)
for param in main.all_parameters():
if "batch_norm" in param.name:
param_name = param.name.replace("norm", "norm2d")
elif 'prelu' in param.name:
param_name = param.name.replace("prelu", 'p_re_lu')
else:
param_name = param.name
param_tensor = scope.var(param.name).get_tensor()
param_tensor.set(param_init_map[param_name], place)
main_graph = IrGraph(core.Graph(main.desc), for_test=False)
infer_graph = IrGraph(core.Graph(infer.desc), for_test=True)
transform_pass = QuantizationTransformPass(
scope=scope,
place=place,
activation_quantize_type=activation_quantize_type,
weight_quantize_type=weight_quantize_type,
quantizable_op_type=['conv2d', 'depthwise_conv2d', 'mul'])
transform_pass.apply(main_graph)
transform_pass.apply(infer_graph)
outscale_pass = OutScaleForTrainingPass(scope=scope, place=place)
outscale_pass.apply(main_graph)
build_strategy = fluid.BuildStrategy()
build_strategy.fuse_all_reduce_ops = False
binary = fluid.CompiledProgram(main_graph.graph).with_data_parallel(
loss_name=static_loss.name, build_strategy=build_strategy)
feeder = fluid.DataFeeder(
feed_list=[static_img, static_label], place=place)
with fluid.scope_guard(scope):
for batch_id, data in enumerate(reader()):
loss_v, = exe.run(binary,
feed=feeder.feed(data),
fetch_list=[static_loss])
static_loss_rec.append(loss_v[0])
if batch_id % 100 == 0:
_logger.info('{}: {}'.format('loss', loss_v))
scale_inference_pass = OutScaleForInferencePass(scope=scope)
scale_inference_pass.apply(infer_graph)
save_program = infer_graph.to_program()
static_save_dir = "./static_outscale_infer_model"
with fluid.scope_guard(scope):
fluid.io.save_inference_model(
dirname=static_save_dir,
feeded_var_names=[infer_img.name],
target_vars=[infer_pre],
executor=exe,
main_program=save_program,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX)
rtol = 1e-05
atol = 1e-08
for i, (loss_d,
loss_s) in enumerate(zip(dynamic_loss_rec, static_loss_rec)):
diff = np.abs(loss_d - loss_s)
if diff > (atol + rtol * np.abs(loss_s)):
_logger.info(
"diff({}) at {}, dynamic loss = {}, static loss = {}".
format(diff, i, loss_d, loss_s))
break
self.assertTrue(
np.allclose(
np.array(dynamic_loss_rec),
np.array(static_loss_rec),
rtol=rtol,
atol=atol,
equal_nan=True),
msg='Failed to do the imperative qat.')
# load dynamic model
[dynamic_inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=dynamic_save_dir,
executor=exe,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX))
# load static model
[static_inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=static_save_dir,
executor=exe,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX))
dynamic_ops = dynamic_inference_program.global_block().ops
static_ops = static_inference_program.global_block().ops
for op in dynamic_ops[:]:
if op.type == "flatten2" or 'fake' in op.type:
dynamic_ops.remove(op)
for op in static_ops[:]:
if 'fake' in op.type:
static_ops.remove(op)
op_count = 0
for i in range(len(dynamic_ops)):
if dynamic_ops[i].has_attr("out_threshold"):
op_count += 1
self.assertTrue(dynamic_ops[i].type == static_ops[i].type)
self.assertTrue(dynamic_ops[i].attr("out_threshold") ==
static_ops[i].attr("out_threshold"))
self.assertTrue(op_count == 13)
class TestSaveQuanztizedModelFromCheckPoint(unittest.TestCase):
def test_save_quantized_model(self):
weight_quantize_type = 'abs_max'
activation_quantize_type = 'moving_average_abs_max'
load_param_path = "test_save_quantized_model/lenet.pdparams"
path = "./dynamic_outscale_infer_model_from_checkpoint/lenet"
dynamic_model_save_dir = "./dynamic_outscale_infer_model_from_checkpoint"
static_model_save_dir = "./static_outscale_infer_model"
imperative_out_scale = ImperativeQuantAware(
weight_quantize_type=weight_quantize_type,
activation_quantize_type=activation_quantize_type)
with fluid.dygraph.guard():
lenet = ImperativeLenet()
load_dict = paddle.load(load_param_path)
imperative_out_scale.quantize(lenet)
lenet.set_dict(load_dict)
imperative_out_scale.save_quantized_model(
layer=lenet,
path=path,
input_spec=[
paddle.static.InputSpec(
shape=[None, 1, 28, 28], dtype='float32')
])
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = fluid.Executor(place)
# load dynamic model
[dynamic_inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=dynamic_model_save_dir,
executor=exe,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX))
# load static model
[static_inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=static_model_save_dir,
executor=exe,
model_filename="lenet" + INFER_MODEL_SUFFIX,
params_filename="lenet" + INFER_PARAMS_SUFFIX))
dynamic_ops = dynamic_inference_program.global_block().ops
static_ops = static_inference_program.global_block().ops
for op in dynamic_ops[:]:
if op.type == "flatten2" or 'fake' in op.type:
dynamic_ops.remove(op)
for op in static_ops[:]:
if 'fake' in op.type:
static_ops.remove(op)
op_count = 0
for i in range(len(dynamic_ops)):
if dynamic_ops[i].has_attr("out_threshold"):
op_count += 1
self.assertTrue(dynamic_ops[i].type == static_ops[i].type)
self.assertTrue(dynamic_ops[i].attr("out_threshold") ==
static_ops[i].attr("out_threshold"))
self.assertTrue(op_count == 13)
class TestSaveQuantizedModel_Warning(unittest.TestCase):
def test_warning(self):
path = "./dynamic_outscale_infer_model_with_warnings/lenet"
imperative_out_scale = ImperativeQuantAware()
with fluid.dygraph.guard():
lenet = ImperativeLenet()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
imperative_out_scale.save_quantized_model(
layer=lenet,
path=path,
input_spec=[
paddle.static.InputSpec(
shape=[None, 1, 28, 28], dtype='float32')
])
warning_message = "Warning: No Layer of the model while to be saved contains the out_threshold attribute, " \
"so the generated inference model would not contain the out_threshold."
num = get_vaild_warning_num(warning_message, w)
assert num == 1
if __name__ == '__main__':
unittest.main()
| 38.826347
| 127
| 0.585955
| 2,232
| 19,452
| 4.814964
| 0.177419
| 0.015074
| 0.030148
| 0.036847
| 0.476226
| 0.425793
| 0.381781
| 0.3494
| 0.3427
| 0.311715
| 0
| 0.021148
| 0.316934
| 19,452
| 500
| 128
| 38.904
| 0.787687
| 0.034649
| 0
| 0.369412
| 0
| 0
| 0.07128
| 0.030389
| 0
| 0
| 0
| 0
| 0.018824
| 1
| 0.018824
| false
| 0.018824
| 0.051765
| 0
| 0.089412
| 0.002353
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed297c05f15fda89099111b92e19812bf53e5838
| 4,289
|
py
|
Python
|
grr/server/hunts/results.py
|
nickamon/grr
|
ad1936c74728de00db90f6fafa47892b54cfc92d
|
[
"Apache-2.0"
] | null | null | null |
grr/server/hunts/results.py
|
nickamon/grr
|
ad1936c74728de00db90f6fafa47892b54cfc92d
|
[
"Apache-2.0"
] | 1
|
2018-05-08T21:15:51.000Z
|
2018-05-08T21:15:51.000Z
|
grr/server/hunts/results.py
|
nickamon/grr
|
ad1936c74728de00db90f6fafa47892b54cfc92d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Classes to store and manage hunt results.
"""
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib.rdfvalues import structs as rdf_structs
from grr_response_proto import jobs_pb2
from grr.server import access_control
from grr.server import aff4
from grr.server import data_store
from grr.server import sequential_collection
from grr.server.aff4_objects import aff4_queue
class HuntResultNotification(rdf_structs.RDFProtoStruct):
protobuf = jobs_pb2.HuntResultNotification
rdf_deps = [
rdfvalue.RDFDatetime,
rdfvalue.RDFURN,
]
def ResultRecord(self):
# TODO(amoser): The subpath could be part of the notification.
return data_store.Record(
queue_id=self.result_collection_urn,
timestamp=self.timestamp,
suffix=self.suffix,
subpath="Results",
value=None)
RESULT_NOTIFICATION_QUEUE = rdfvalue.RDFURN("aff4:/hunt_results_queue")
class HuntResultQueue(aff4_queue.Queue):
"""A global queue of hunt results which need to be processed."""
rdf_type = HuntResultNotification
@classmethod
def ClaimNotificationsForCollection(cls,
token=None,
start_time=None,
lease_time=200,
collection=None):
"""Return unclaimed hunt result notifications for collection.
Args:
token: The security token to perform database operations with.
start_time: If set, an RDFDateTime indicating at what point to start
claiming notifications. Only notifications with a timestamp after this
point will be claimed.
lease_time: How long to claim the notifications for.
collection: The urn of the collection to find notifications for. If unset,
the earliest (unclaimed) notification will determine the collection.
Returns:
A pair (collection, results) where collection is the collection
that notifications were retrieved for and results is a list of
Record objects which identify GrrMessage within the result
collection.
"""
class CollectionFilter(object):
def __init__(self, collection):
self.collection = collection
def FilterRecord(self, notification):
if self.collection is None:
self.collection = notification.result_collection_urn
return self.collection != notification.result_collection_urn
f = CollectionFilter(collection)
results = []
with aff4.FACTORY.OpenWithLock(
RESULT_NOTIFICATION_QUEUE,
aff4_type=HuntResultQueue,
lease_time=300,
blocking=True,
blocking_sleep_interval=15,
blocking_lock_timeout=600,
token=token) as queue:
for record in queue.ClaimRecords(
record_filter=f.FilterRecord,
start_time=start_time,
timeout=lease_time,
limit=100000):
results.append(record)
return (f.collection, results)
@classmethod
def DeleteNotifications(cls, records, token=None):
"""Delete hunt notifications."""
cls.DeleteRecords(records, token=token)
class HuntResultCollection(sequential_collection.GrrMessageCollection):
"""Sequential HuntResultCollection."""
@classmethod
def StaticAdd(cls,
collection_urn,
rdf_value,
mutation_pool=None,
timestamp=None,
suffix=None,
**kwargs):
ts = super(HuntResultCollection, cls).StaticAdd(
collection_urn,
rdf_value,
mutation_pool=mutation_pool,
timestamp=timestamp,
suffix=suffix,
**kwargs)
HuntResultQueue.StaticAdd(
RESULT_NOTIFICATION_QUEUE,
HuntResultNotification(
result_collection_urn=collection_urn, timestamp=ts[0],
suffix=ts[1]),
mutation_pool=mutation_pool)
return ts
class ResultQueueInitHook(registry.InitHook):
pre = [aff4.AFF4InitHook]
def Run(self):
try:
with aff4.FACTORY.Create(
RESULT_NOTIFICATION_QUEUE,
HuntResultQueue,
mode="w",
token=aff4.FACTORY.root_token):
pass
except access_control.UnauthorizedAccess:
pass
| 30.41844
| 80
| 0.669853
| 458
| 4,289
| 6.131004
| 0.362445
| 0.022436
| 0.023148
| 0.027066
| 0.055556
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0.010117
| 0.262532
| 4,289
| 140
| 81
| 30.635714
| 0.877648
| 0.22616
| 0
| 0.129032
| 0
| 0
| 0.009895
| 0.007421
| 0
| 0
| 0
| 0.007143
| 0
| 1
| 0.075269
| false
| 0.021505
| 0.096774
| 0.010753
| 0.311828
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed298e1020b52cd93721c03300f989d51ef1134b
| 4,748
|
py
|
Python
|
src/simple_sharepoint/site.py
|
NodeJSmith/py-simple-rest-sharepoint
|
77ee5f76364e7b6096228945ed7e3bd637214a66
|
[
"MIT"
] | null | null | null |
src/simple_sharepoint/site.py
|
NodeJSmith/py-simple-rest-sharepoint
|
77ee5f76364e7b6096228945ed7e3bd637214a66
|
[
"MIT"
] | null | null | null |
src/simple_sharepoint/site.py
|
NodeJSmith/py-simple-rest-sharepoint
|
77ee5f76364e7b6096228945ed7e3bd637214a66
|
[
"MIT"
] | null | null | null |
"""
Module for higher level SharePoint REST api actions - utilize methods in the api.py module
"""
class Site():
def __init__(self, sp):
self.sp = sp
@property
def info(self):
endpoint = "_api/site"
value = self.sp.get(endpoint).json()
return value
@property
def web(self):
endpoint = "_api/web"
value = self.sp.get(endpoint).json()
return value
@property
def contextinfo(self):
return self.sp.contextinfo
@property
def contenttypes(self):
endpoint = "_api/web/contenttypes"
value = self.sp.get(endpoint).json().get('value')
return value
@property
def eventreceivers(self):
endpoint = "_api/web/eventreceivers"
value = self.sp.get(endpoint).json().get('value')
return value
@property
def features(self):
endpoint = "_api/web/features"
value = self.sp.get(endpoint).json().get('value')
return value
@property
def fields(self):
endpoint = "_api/web/fields"
value = self.sp.get(endpoint).json().get('value')
return value
@property
def lists(self):
endpoint = "_api/web/lists"
value = self.sp.get(endpoint).json().get('value')
return value
@property
def siteusers(self):
endpoint = "_api/web/siteusers"
value = self.sp.get(endpoint).json().get('value')
return value
@property
def groups(self):
endpoint = "_api/web/sitegroups"
value = self.sp.get(endpoint).json().get('value')
return value
@property
def roleassignments(self):
endpoint = "_api/web/roleassignments"
value = self.sp.get(endpoint).json().get('value')
return value
# def set_title_field_to_optional(self, list_title):
# """Sets the Title field in the given list to optional
# :param list_title: str: title of SharePoint list
# """
# # TODO - this likely is not necessary anymore, since we are not creating new lists
# field_rec = [x for x in self.get_field(list_title)
# if x['InternalName'] == "Title"][0]
# if field_rec and field_rec.get('Required'):
# body = {'Required': False}
# self.update_list_field(field_rec, list_title, body)
# def check_field_exists(self, list_title, field_title):
# """Check that a field exists to avoid error from attempting to access non-existent field
# :param list_title: str: title of SharePoint list
# :param field_title: str: title of field in SharePoint list
# :returns: bool
# """
# field_rec = self._get_first_or_none(
# "InternalName", field_title, list_data=self.get_list_fields(list_title))
# return field_rec is not None
# def update_list_field(self, field_rec, list_title, body):
# """Given a field record, a list title, and the json body to update with, updates the SharePoint list field
# :param field_rec: dict: field record from SharePoint field query
# :param list_title: str: title of SharePoint list
# :param body: dict: dictionary structured for SharePoint REST api fields endpoint
# """
# field_id = field_rec.get('Id')
# update_field_url = "_api/web/lists/GetByTitle('{0}')/fields('{1}')".format(
# list_title, field_id)
# response = self.sp.post(url=update_field_url, json=body)
# response.raise_for_status()
# def get_email_from_sharepoint_id(self, sharepoint_id: int):
# """Returns email address from a SharePoint integer user id value
# :param sp_user_id: int: SharePoint user id
# :returns: str
# """
# return self._get_first_or_none("Id", sharepoint_id, list_data=self.siteusers).get("Email")
# def get_sharepoint_id_from_email(self, email):
# """Returns SharePoint integer user ID from an email address
# :param username: str: email address
# :returns: int
# """
# return self._get_first_or_none("Email", email, list_data=self.siteusers).get("Id")
def _get_first_or_none(self, compare_column, compare_value, list_data=None, url=None):
if not list_data and not url:
return ValueError("either list_data or url must be provided")
if not list_data:
list_data = self.sp.get(url).json().get('value')
try:
return [x for x in list_data if x[compare_column] == compare_value][0]
except IndexError as e:
return None
# TODO Add large file upload with chunking
# https://github.com/JonathanHolvey/sharepy/issues/23
| 31.03268
| 116
| 0.619208
| 607
| 4,748
| 4.680395
| 0.220758
| 0.031679
| 0.034847
| 0.049278
| 0.286167
| 0.248152
| 0.231257
| 0.231257
| 0.217881
| 0.18761
| 0
| 0.001732
| 0.270219
| 4,748
| 152
| 117
| 31.236842
| 0.818182
| 0.488627
| 0
| 0.476923
| 0
| 0
| 0.106481
| 0.02862
| 0
| 0
| 0
| 0.006579
| 0
| 1
| 0.2
| false
| 0
| 0
| 0.015385
| 0.430769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed2abc9ecde219ec6fc0610ffdc2709ba8c49de3
| 6,317
|
py
|
Python
|
pypeit/tests/test_metadata.py
|
rcooke-ast/PYPIT
|
0cb9c4cb422736b855065a35aefc2bdba6d51dd0
|
[
"BSD-3-Clause"
] | null | null | null |
pypeit/tests/test_metadata.py
|
rcooke-ast/PYPIT
|
0cb9c4cb422736b855065a35aefc2bdba6d51dd0
|
[
"BSD-3-Clause"
] | null | null | null |
pypeit/tests/test_metadata.py
|
rcooke-ast/PYPIT
|
0cb9c4cb422736b855065a35aefc2bdba6d51dd0
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import glob
import shutil
import yaml
from IPython import embed
import pytest
import numpy as np
from pypeit.par.util import parse_pypeit_file
from pypeit.pypeitsetup import PypeItSetup
from pypeit.tests.tstutils import dev_suite_required, data_path
from pypeit.metadata import PypeItMetaData
from pypeit.spectrographs.util import load_spectrograph
from pypeit.scripts.setup import Setup
def test_read_combid():
# ------------------------------------------------------------------
# In case of failed tests
setup_dir = data_path('setup_files')
if os.path.isdir(setup_dir):
shutil.rmtree(setup_dir)
config_dir = data_path('shane_kast_blue_A')
if os.path.isdir(config_dir):
shutil.rmtree(config_dir)
# ------------------------------------------------------------------
# Generate the pypeit file with the comb_id
droot = data_path('b')
pargs = Setup.parse_args(['-r', droot, '-s', 'shane_kast_blue', '-c=all', '-b',
'--extension=fits.gz', '--output_path={:s}'.format(data_path(''))])
Setup.main(pargs)
shutil.rmtree(setup_dir)
pypeit_file = os.path.join(config_dir, 'shane_kast_blue_A.pypeit')
cfg_lines, data_files, frametype, usrdata, setups, _ = parse_pypeit_file(pypeit_file)
# Get the spectrograph
spectrograph = None
for l in cfg_lines:
if 'spectrograph' in l:
spectrograph = load_spectrograph(l.split(' ')[-1])
break
assert spectrograph is not None, 'Did not appropriately read spectrograph'
# Set the metadata
pmd = PypeItMetaData(spectrograph, spectrograph.default_pypeit_par(), files=data_files,
usrdata=usrdata, strict=False)
indx = pmd['filename'] == 'b27.fits.gz'
assert pmd['comb_id'][indx] == [1], 'Incorrect combination group ID'
assert pmd['comb_id'][np.where(~indx)[0]][0] == -1, 'Incorrect combination group ID'
shutil.rmtree(config_dir)
@dev_suite_required
def test_lris_red_multi_400():
file_list = glob.glob(os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',
'multi_400_8500_d560', '*.fits.gz'))
cfg_lines = ['[rdx]',
'spectrograph = keck_lris_red']
ps = PypeItSetup(file_list, cfg_lines=cfg_lines)
ps.build_fitstbl()
ps.get_frame_types(flag_unknown=True)
cfgs = ps.fitstbl.unique_configurations()
ps.fitstbl.set_configurations(cfgs)
ps.fitstbl.set_calibration_groups() #global_frames=['bias', 'dark'])
# Test
assert np.all(ps.fitstbl['setup'] == 'A')
@dev_suite_required
def test_lris_red_multi():
file_list = glob.glob(os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',
'multi*', '*.fits*'))
cfg_lines = ['[rdx]',
'spectrograph = keck_lris_red']
ps = PypeItSetup(file_list, cfg_lines=cfg_lines)
ps.build_fitstbl()
ps.get_frame_types(flag_unknown=True)
cfgs = ps.fitstbl.unique_configurations()
ps.fitstbl.set_configurations(cfgs)
ps.fitstbl.set_calibration_groups() #global_frames=['bias', 'dark'])
@dev_suite_required
def test_lris_red_multi_calib():
file_list = glob.glob(os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',
'multi_400_8500_d560', '*.fits.gz'))
cfg_lines = ['[rdx]',
'spectrograph = keck_lris_red']
ps = PypeItSetup(file_list, cfg_lines=cfg_lines)
ps.build_fitstbl()
ps.get_frame_types(flag_unknown=True)
cfgs = ps.fitstbl.unique_configurations()
ps.fitstbl.set_configurations(cfgs)
ps.fitstbl.set_calibration_groups() #global_frames=['bias', 'dark'])
cfile = data_path('test.calib')
ps.fitstbl.write_calib(cfile)
with open(cfile, 'r') as f:
calib = yaml.load(f, Loader=yaml.FullLoader)
assert np.array_equal(list(calib['A'].keys()), ['--', 1]), \
'Calibrations dictionary read incorrectly.'
os.remove(cfile)
@dev_suite_required
def test_lris_red_multi_run():
# Perform the setup
file_list = glob.glob(os.path.join(os.environ['PYPEIT_DEV'], 'RAW_DATA', 'keck_lris_red',
'multi*', '*.fits*'))
cfg_lines = ['[rdx]',
'spectrograph = keck_lris_red']
ps = PypeItSetup(file_list, cfg_lines=cfg_lines)
ps.run(setup_only=True)
# Test
#assert len(ps.setup_dict) == 2, 'Should find two setups'
assert len(ps.fitstbl) >= 40, 'Should find 40+ files'
arcs = ps.fitstbl['filename'][ps.fitstbl.find_frames('arc')]
assert len(arcs) >= 2, 'Should find two or more arcs'
assert 'r170320_2017.fits.gz' in arcs, \
'Should have identified r170320_2017.fits.gz as an arc'
assert 'r170816_0057.fits' in ps.fitstbl['filename'][ps.fitstbl.find_frames('science')], \
'Should have identified r170816_0057.fits as a science frame'
# Clean-up
#os.remove('keck_lris_red.lst')
#os.remove('keck_lris_red.setups')
os.remove('keck_lris_red.sorted')
@dev_suite_required
def test_lris_blue_pypeit_overwrite():
f = os.path.join(os.environ['PYPEIT_DEV'],
'pypeit_files/keck_lris_blue_long_400_3400_d560.pypeit')
assert os.path.isfile(f), 'Could not find pypeit file.'
cfg_lines, data_files, frametype, usrdata, setups, _ = parse_pypeit_file(f, file_check=False)
# Change the dev path
for i in range(len(data_files)):
path_list = data_files[i].split('/')
for j,p in enumerate(path_list):
if p == 'RAW_DATA':
break
data_files[i] = os.path.join(os.environ['PYPEIT_DEV'], '/'.join(path_list[j:]))
# Read the fits table with and without the user data
spectrograph = load_spectrograph('keck_lris_blue')
par = spectrograph.default_pypeit_par()
fitstbl = PypeItMetaData(spectrograph, par, files=data_files)
fitstbl_usr = PypeItMetaData(spectrograph, par, files=data_files, usrdata=usrdata)
assert fitstbl['target'][0] == 'unknown', 'Grating name changed in file header'
assert fitstbl_usr['target'][0] == 'test', 'Grating name changed in pypeit file'
assert fitstbl['target'][0] != fitstbl_usr['target'][0], \
'Fits header value and input pypeit file value expected to be different.'
| 38.054217
| 97
| 0.653158
| 840
| 6,317
| 4.671429
| 0.233333
| 0.036697
| 0.030836
| 0.018349
| 0.436035
| 0.407238
| 0.366972
| 0.334353
| 0.298675
| 0.298675
| 0
| 0.017991
| 0.199303
| 6,317
| 165
| 98
| 38.284848
| 0.757809
| 0.088175
| 0
| 0.386555
| 0
| 0
| 0.20993
| 0.013415
| 0
| 0
| 0
| 0
| 0.109244
| 1
| 0.05042
| false
| 0
| 0.109244
| 0
| 0.159664
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed2b4e055e79bf5051229abe9c3b6aceb972d7aa
| 15,930
|
py
|
Python
|
tortoise/query_utils.py
|
DDevine/tortoise-orm
|
414737a78e98ffd247174590720f5c90aeac4dde
|
[
"Apache-2.0"
] | 1
|
2020-05-15T19:50:12.000Z
|
2020-05-15T19:50:12.000Z
|
tortoise/query_utils.py
|
Tomes111/tortoise-orm
|
8b55499a228e44f33fec9099f4d559c77c73beb7
|
[
"Apache-2.0"
] | null | null | null |
tortoise/query_utils.py
|
Tomes111/tortoise-orm
|
8b55499a228e44f33fec9099f4d559c77c73beb7
|
[
"Apache-2.0"
] | null | null | null |
from copy import copy
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Type, cast
from pypika import Table
from pypika.terms import Criterion
from tortoise.exceptions import FieldError, OperationalError
from tortoise.fields.relational import BackwardFKRelation, ManyToManyFieldInstance, RelationalField
if TYPE_CHECKING: # pragma: nocoverage
from tortoise.models import Model
from tortoise.queryset import QuerySet
def _process_filter_kwarg(
model: "Type[Model]", key: str, value: Any, table: Table
) -> Tuple[Criterion, Optional[Tuple[Table, Criterion]]]:
join = None
if value is None and f"{key}__isnull" in model._meta.filters:
param = model._meta.get_filter(f"{key}__isnull")
value = True
else:
param = model._meta.get_filter(key)
pk_db_field = model._meta.db_pk_column
if param.get("table"):
join = (
param["table"],
table[pk_db_field] == param["table"][param["backward_key"]],
)
if param.get("value_encoder"):
value = param["value_encoder"](value, model)
criterion = param["operator"](param["table"][param["field"]], value)
else:
field_object = model._meta.fields_map[param["field"]]
encoded_value = (
param["value_encoder"](value, model, field_object)
if param.get("value_encoder")
else model._meta.db.executor_class._field_to_db(field_object, value, model)
)
criterion = param["operator"](table[param["source_field"]], encoded_value)
return criterion, join
def _get_joins_for_related_field(
table: Table, related_field: RelationalField, related_field_name: str
) -> List[Tuple[Table, Criterion]]:
required_joins = []
related_table: Table = related_field.related_model._meta.basetable
if isinstance(related_field, ManyToManyFieldInstance):
through_table = Table(related_field.through)
required_joins.append(
(
through_table,
table[related_field.model._meta.db_pk_column]
== through_table[related_field.backward_key],
)
)
required_joins.append(
(
related_table,
through_table[related_field.forward_key]
== related_table[related_field.related_model._meta.db_pk_column],
)
)
elif isinstance(related_field, BackwardFKRelation):
to_field_source_field = (
related_field.to_field_instance.source_field
or related_field.to_field_instance.model_field_name
)
if table == related_table:
related_table = related_table.as_(f"{table.get_table_name()}__{related_field_name}")
required_joins.append(
(
related_table,
table[to_field_source_field] == related_table[related_field.relation_source_field],
)
)
else:
to_field_source_field = (
related_field.to_field_instance.source_field
or related_field.to_field_instance.model_field_name
)
from_field = related_field.model._meta.fields_map[related_field.source_field] # type: ignore
from_field_source_field = from_field.source_field or from_field.model_field_name
related_table = related_table.as_(f"{table.get_table_name()}__{related_field_name}")
required_joins.append(
(related_table, related_table[to_field_source_field] == table[from_field_source_field],)
)
return required_joins
class EmptyCriterion(Criterion): # type: ignore
def __or__(self, other: Criterion) -> Criterion:
return other
def __and__(self, other: Criterion) -> Criterion:
return other
def __bool__(self) -> bool:
return False
def _and(left: Criterion, right: Criterion) -> Criterion:
if left and not right:
return left
return left & right
def _or(left: Criterion, right: Criterion) -> Criterion:
if left and not right:
return left
return left | right
class QueryModifier:
"""
Internal structure used to generate SQL Queries.
"""
def __init__(
self,
where_criterion: Optional[Criterion] = None,
joins: Optional[List[Tuple[Table, Criterion]]] = None,
having_criterion: Optional[Criterion] = None,
) -> None:
self.where_criterion: Criterion = where_criterion or EmptyCriterion()
self.joins = joins if joins else []
self.having_criterion: Criterion = having_criterion or EmptyCriterion()
def __and__(self, other: "QueryModifier") -> "QueryModifier":
return QueryModifier(
where_criterion=_and(self.where_criterion, other.where_criterion),
joins=self.joins + other.joins,
having_criterion=_and(self.having_criterion, other.having_criterion),
)
def __or__(self, other: "QueryModifier") -> "QueryModifier":
if self.having_criterion or other.having_criterion:
# TODO: This could be optimized?
result_having_criterion = _or(
_and(self.where_criterion, self.having_criterion),
_and(other.where_criterion, other.having_criterion),
)
return QueryModifier(
joins=self.joins + other.joins, having_criterion=result_having_criterion
)
if self.where_criterion and other.where_criterion:
return QueryModifier(
where_criterion=self.where_criterion | other.where_criterion,
joins=self.joins + other.joins,
)
return QueryModifier(
where_criterion=self.where_criterion or other.where_criterion,
joins=self.joins + other.joins,
)
def __invert__(self) -> "QueryModifier":
if not self.where_criterion and not self.having_criterion:
return QueryModifier(joins=self.joins)
if self.having_criterion:
# TODO: This could be optimized?
return QueryModifier(
joins=self.joins,
having_criterion=_and(self.where_criterion, self.having_criterion).negate(),
)
return QueryModifier(where_criterion=self.where_criterion.negate(), joins=self.joins)
def get_query_modifiers(self) -> Tuple[Criterion, List[Tuple[Table, Criterion]], Criterion]:
"""
Returns a tuple of the query criterion.
"""
return self.where_criterion, self.joins, self.having_criterion
class Q:
"""
Q Expression container.
Q Expressions are a useful tool to compose a query from many small parts.
:param join_type: Is the join an AND or OR join type?
:param args: Inner ``Q`` expressions that you want to wrap.
:param kwargs: Filter statements that this Q object should encapsulate.
"""
__slots__ = (
"children",
"filters",
"join_type",
"_is_negated",
"_annotations",
"_custom_filters",
)
AND = "AND"
OR = "OR"
def __init__(self, *args: "Q", join_type: str = AND, **kwargs: Any) -> None:
if args and kwargs:
newarg = Q(join_type=join_type, **kwargs)
args = (newarg,) + args
kwargs = {}
if not all(isinstance(node, Q) for node in args):
raise OperationalError("All ordered arguments must be Q nodes")
#: Contains the sub-Q's that this Q is made up of
self.children: Tuple[Q, ...] = args
#: Contains the filters applied to this Q
self.filters: Dict[str, Any] = kwargs
if join_type not in {self.AND, self.OR}:
raise OperationalError("join_type must be AND or OR")
#: Specifies if this Q does an AND or OR on its children
self.join_type = join_type
self._is_negated = False
self._annotations: Dict[str, Any] = {}
self._custom_filters: Dict[str, Dict[str, Any]] = {}
def __and__(self, other: "Q") -> "Q":
"""
Returns a binary AND of Q objects, use ``AND`` operator.
:raises OperationalError: AND operation requires a Q node
"""
if not isinstance(other, Q):
raise OperationalError("AND operation requires a Q node")
return Q(self, other, join_type=self.AND)
def __or__(self, other: "Q") -> "Q":
"""
Returns a binary OR of Q objects, use ``OR`` operator.
:raises OperationalError: OR operation requires a Q node
"""
if not isinstance(other, Q):
raise OperationalError("OR operation requires a Q node")
return Q(self, other, join_type=self.OR)
def __invert__(self) -> "Q":
"""
Returns a negated instance of the Q object, use ``~`` operator.
"""
q = Q(*self.children, join_type=self.join_type, **self.filters)
q.negate()
return q
def negate(self) -> None:
"""
Negates the curent Q object. (mutation)
"""
self._is_negated = not self._is_negated
def _resolve_nested_filter(
self, model: "Type[Model]", key: str, value: Any, table: Table
) -> QueryModifier:
related_field_name = key.split("__")[0]
related_field = cast(RelationalField, model._meta.fields_map[related_field_name])
required_joins = _get_joins_for_related_field(table, related_field, related_field_name)
modifier = Q(**{"__".join(key.split("__")[1:]): value}).resolve(
model=related_field.related_model,
annotations=self._annotations,
custom_filters=self._custom_filters,
table=required_joins[-1][0],
)
return QueryModifier(joins=required_joins) & modifier
def _resolve_custom_kwarg(
self, model: "Type[Model]", key: str, value: Any, table: Table
) -> QueryModifier:
having_info = self._custom_filters[key]
annotation = self._annotations[having_info["field"]]
annotation_info = annotation.resolve(model, table)
operator = having_info["operator"]
overridden_operator = model._meta.db.executor_class.get_overridden_filter_func(
filter_func=operator
)
if overridden_operator:
operator = overridden_operator
if annotation_info["field"].is_aggregate:
modifier = QueryModifier(having_criterion=operator(annotation_info["field"], value))
else:
modifier = QueryModifier(where_criterion=operator(annotation_info["field"], value))
return modifier
def _resolve_regular_kwarg(
self, model: "Type[Model]", key: str, value: Any, table: Table
) -> QueryModifier:
if key not in model._meta.filters and key.split("__")[0] in model._meta.fetch_fields:
modifier = self._resolve_nested_filter(model, key, value, table)
else:
criterion, join = _process_filter_kwarg(model, key, value, table)
joins = [join] if join else []
modifier = QueryModifier(where_criterion=criterion, joins=joins)
return modifier
def _get_actual_filter_params(
self, model: "Type[Model]", key: str, value: Table
) -> Tuple[str, Any]:
filter_key = key
if key in model._meta.fk_fields or key in model._meta.o2o_fields:
field_object = model._meta.fields_map[key]
if hasattr(value, "pk"):
filter_value = value.pk
else:
filter_value = value
filter_key = cast(str, field_object.source_field)
elif key in model._meta.m2m_fields:
if hasattr(value, "pk"):
filter_value = value.pk
else:
filter_value = value
elif (
key.split("__")[0] in model._meta.fetch_fields
or key in self._custom_filters
or key in model._meta.filters
):
filter_value = value
else:
allowed = sorted(
model._meta.fields | model._meta.fetch_fields | set(self._custom_filters)
)
raise FieldError(f"Unknown filter param '{key}'. Allowed base values are {allowed}")
return filter_key, filter_value
def _resolve_kwargs(self, model: "Type[Model]", table: Table) -> QueryModifier:
modifier = QueryModifier()
for raw_key, raw_value in self.filters.items():
key, value = self._get_actual_filter_params(model, raw_key, raw_value)
if key in self._custom_filters:
filter_modifier = self._resolve_custom_kwarg(model, key, value, table)
else:
filter_modifier = self._resolve_regular_kwarg(model, key, value, table)
if self.join_type == self.AND:
modifier &= filter_modifier
else:
modifier |= filter_modifier
if self._is_negated:
modifier = ~modifier
return modifier
def _resolve_children(self, model: "Type[Model]", table: Table) -> QueryModifier:
modifier = QueryModifier()
for node in self.children:
node_modifier = node.resolve(model, self._annotations, self._custom_filters, table)
if self.join_type == self.AND:
modifier &= node_modifier
else:
modifier |= node_modifier
if self._is_negated:
modifier = ~modifier
return modifier
def resolve(
self,
model: "Type[Model]",
annotations: Dict[str, Any],
custom_filters: Dict[str, Dict[str, Any]],
table: Table,
) -> QueryModifier:
"""
Resolves the logical Q chain into the parts of a SQL statement.
:param model: The Model this Q Expression should be resolved on.
:param annotations: Extra annotations one wants to inject into the resultset.
:param custom_filters: Pre-resolved filters to be passed though.
:param table: ``pypika.Table`` to keep track of the virtual SQL table
(to allow self referential joins)
"""
self._annotations = annotations
self._custom_filters = custom_filters
if self.filters:
return self._resolve_kwargs(model, table)
return self._resolve_children(model, table)
class Prefetch:
"""
Prefetcher container. One would directly use this when wanting to attach a custom QuerySet
for specialised prefetching.
:param relation: Related field name.
:param queryset: Custom QuerySet to use for prefetching.
"""
__slots__ = ("relation", "queryset")
def __init__(self, relation: str, queryset: "QuerySet") -> None:
self.relation = relation
self.queryset = queryset
self.queryset.query = copy(self.queryset.model._meta.basequery)
def resolve_for_queryset(self, queryset: "QuerySet") -> None:
"""
Called internally to generate prefetching query.
:param queryset: Custom QuerySet to use for prefetching.
:raises OperationalError: If field does not exist in model.
"""
relation_split = self.relation.split("__")
first_level_field = relation_split[0]
if first_level_field not in queryset.model._meta.fetch_fields:
raise OperationalError(
f"relation {first_level_field} for {queryset.model._meta.db_table} not found"
)
forwarded_prefetch = "__".join(relation_split[1:])
if forwarded_prefetch:
if first_level_field not in queryset._prefetch_map.keys():
queryset._prefetch_map[first_level_field] = set()
queryset._prefetch_map[first_level_field].add(
Prefetch(forwarded_prefetch, self.queryset)
)
else:
queryset._prefetch_queries[first_level_field] = self.queryset
| 37.748815
| 101
| 0.629881
| 1,825
| 15,930
| 5.235068
| 0.127671
| 0.035169
| 0.020724
| 0.013188
| 0.388738
| 0.312539
| 0.259996
| 0.181285
| 0.153548
| 0.149571
| 0
| 0.000868
| 0.277087
| 15,930
| 421
| 102
| 37.83848
| 0.82876
| 0.107972
| 0
| 0.225806
| 0
| 0
| 0.058002
| 0.008851
| 0
| 0
| 0
| 0.002375
| 0
| 1
| 0.083871
| false
| 0
| 0.025806
| 0.012903
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed2c2633a7cab56346726bab1085f5ed14b0a1bf
| 1,372
|
py
|
Python
|
L3_numpy_pandas_2D/B_NumPy_Axis.py
|
angelmtenor/IDAFC
|
9d23746fd02e4eda2569d75b3c7a1383277e6e78
|
[
"MIT"
] | null | null | null |
L3_numpy_pandas_2D/B_NumPy_Axis.py
|
angelmtenor/IDAFC
|
9d23746fd02e4eda2569d75b3c7a1383277e6e78
|
[
"MIT"
] | null | null | null |
L3_numpy_pandas_2D/B_NumPy_Axis.py
|
angelmtenor/IDAFC
|
9d23746fd02e4eda2569d75b3c7a1383277e6e78
|
[
"MIT"
] | null | null | null |
import numpy as np
# Change False to True for this block of code to see what it does
# NumPy axis argument
if True:
a = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
])
print(a.sum())
print(a.sum(axis=0))
print(a.sum(axis=1))
# Subway ridership for 5 stations on 10 different days
ridership = np.array([
[0, 0, 2, 5, 0],
[1478, 3877, 3674, 2328, 2539],
[1613, 4088, 3991, 6461, 2691],
[1560, 3392, 3826, 4787, 2613],
[1608, 4802, 3932, 4477, 2705],
[1576, 3933, 3909, 4979, 2685],
[95, 229, 255, 496, 201],
[2, 0, 1, 27, 0],
[1438, 3785, 3589, 4174, 2215],
[1342, 4043, 4009, 4665, 3033]
])
def min_and_max_riders_per_day(ridership):
"""
Fill in this function. First, for each subway station, calculate the
mean ridership per day. Then, out of all the subway stations, return the
maximum and minimum of these values. That is, find the maximum
mean-ridership-per-day and the minimum mean-ridership-per-day for any
subway station.
"""
mean_ridership_per_day = ridership.mean(axis=0)
max_daily_ridership = mean_ridership_per_day.max() # Replace this with your code
min_daily_ridership = mean_ridership_per_day.min() # Replace this with your code
return max_daily_ridership, min_daily_ridership
print(min_and_max_riders_per_day(ridership))
| 28.583333
| 85
| 0.652332
| 217
| 1,372
| 4
| 0.525346
| 0.0553
| 0.110599
| 0.131336
| 0.198157
| 0.145161
| 0.069124
| 0
| 0
| 0
| 0
| 0.170616
| 0.23105
| 1,372
| 47
| 86
| 29.191489
| 0.652133
| 0.35277
| 0
| 0.071429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.035714
| 0
| 0.107143
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed2e075475e31924b346949bce0058c5690b9443
| 7,930
|
py
|
Python
|
trainval.py
|
JanAlexanderPersonal/covid19_weak_supervision
|
5599e48c9945f1e08a2731740bc8f6e44a031703
|
[
"Apache-2.0"
] | null | null | null |
trainval.py
|
JanAlexanderPersonal/covid19_weak_supervision
|
5599e48c9945f1e08a2731740bc8f6e44a031703
|
[
"Apache-2.0"
] | null | null | null |
trainval.py
|
JanAlexanderPersonal/covid19_weak_supervision
|
5599e48c9945f1e08a2731740bc8f6e44a031703
|
[
"Apache-2.0"
] | null | null | null |
from haven import haven_chk as hc
from haven import haven_results as hr
from haven import haven_utils as hu
import torch
import torchvision
import tqdm
import pandas as pd
import pprint
import itertools
import os
import pylab as plt
import exp_configs
import time
import numpy as np
from src import models
from src import datasets
from src import utils as ut
from pprint import pformat
import argparse
from torch.utils.data import sampler
from torch.utils.data.sampler import RandomSampler
from torch.backends import cudnn
from torch.nn import functional as F
from torch.utils.data import DataLoader
cudnn.benchmark = True
import logging
def setupLogging():
"""Setup the logger for this module
"""
# Create the Logger
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
logger_formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
handler.setFormatter(logger_formatter)
root_logger.addHandler(handler)
def trainval(exp_dict, savedir_base, datadir, reset=False, num_workers=0):
# bookkeepting stuff
# ==================
pprint.pprint(exp_dict)
exp_id = hu.hash_dict(exp_dict)
savedir = os.path.join(savedir_base, exp_id)
if reset:
hc.delete_and_backup_experiment(savedir)
os.makedirs(savedir, exist_ok=True)
hu.save_json(os.path.join(savedir, "exp_dict.json"), exp_dict)
print("Experiment saved in %s" % savedir)
logger.info(f'start trainval with experiment dict {pformat(exp_dict)}')
input('press enter')
# set seed
# ==================
seed = 42
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Dataset
# ==================
# train set
train_set = datasets.get_dataset(dataset_dict=exp_dict["dataset"],
split="train",
datadir=datadir,
exp_dict=exp_dict,
dataset_size=exp_dict['dataset_size'])
# val set
val_set = datasets.get_dataset(dataset_dict=exp_dict["dataset"],
split="val",
datadir=datadir,
exp_dict=exp_dict,
dataset_size=exp_dict['dataset_size'])
# test set
test_set = datasets.get_dataset(dataset_dict=exp_dict["dataset"],
split="test",
datadir=datadir,
exp_dict=exp_dict,
dataset_size=exp_dict['dataset_size'])
# val_sampler = torch.utils.data.SequentialSampler(val_set)
val_loader = DataLoader(val_set,
# sampler=val_sampler,
batch_size=1,
collate_fn=ut.collate_fn,
num_workers=num_workers)
test_loader = DataLoader(test_set,
# sampler=val_sampler,
batch_size=1,
collate_fn=ut.collate_fn,
num_workers=num_workers)
# Model
# ==================
print('get model')
model = models.get_model(model_dict=exp_dict['model'],
exp_dict=exp_dict,
train_set=train_set).cuda()
# model.opt = optimizers.get_optim(exp_dict['opt'], model)
model_path = os.path.join(savedir, "model.pth")
score_list_path = os.path.join(savedir, "score_list.pkl")
print(model)
if os.path.exists(score_list_path):
# resume experiment
model.load_state_dict(hu.torch_load(model_path))
score_list = hu.load_pkl(score_list_path)
s_epoch = score_list[-1]['epoch'] + 1
else:
# restart experiment
score_list = []
s_epoch = 0
# Train & Val
# ==================
print("Starting experiment at epoch %d" % (s_epoch))
model.waiting = 0
model.val_score_best = -np.inf
train_sampler = torch.utils.data.RandomSampler(
train_set, replacement=True,
num_samples=2*len(test_set))
train_loader = DataLoader(train_set,
sampler=train_sampler,
collate_fn=ut.collate_fn,
batch_size=exp_dict["batch_size"],
drop_last=True,
num_workers=num_workers)
for e in range(s_epoch, exp_dict['max_epoch']):
# Validate only at the start of each cycle
score_dict = {}
test_dict = model.val_on_loader(test_loader,
savedir_images=os.path.join(savedir, "images"),
n_images=3)
# Train the model
train_dict = model.train_on_loader(train_loader)
# Validate the model
val_dict = model.val_on_loader(val_loader)
score_dict["val_score"] = val_dict["val_score"]
# Get new score_dict
score_dict.update(train_dict)
score_dict["epoch"] = e
score_dict["waiting"] = model.waiting
model.waiting += 1
# Add to score_list and save checkpoint
score_list += [score_dict]
# Save Best Checkpoint
score_df = pd.DataFrame(score_list)
if score_dict["val_score"] >= model.val_score_best:
test_dict = model.val_on_loader(test_loader,
savedir_images=os.path.join(savedir, "images"),
n_images=3)
score_dict.update(test_dict)
hu.save_pkl(os.path.join(savedir, "score_list_best.pkl"), score_list)
# score_df.to_csv(os.path.join(savedir, "score_best_df.csv"))
hu.torch_save(os.path.join(savedir, "model_best.pth"),
model.get_state_dict())
model.waiting = 0
model.val_score_best = score_dict["val_score"]
print("Saved Best: %s" % savedir)
# Report & Save
score_df = pd.DataFrame(score_list)
# score_df.to_csv(os.path.join(savedir, "score_df.csv"))
print("\n", score_df.tail(), "\n")
hu.torch_save(model_path, model.get_state_dict())
hu.save_pkl(score_list_path, score_list)
print("Checkpoint Saved: %s" % savedir)
if model.waiting > 100:
break
print('Experiment completed et epoch %d' % e)
if __name__ == "__main__":
setupLogging()
parser = argparse.ArgumentParser()
logger = logging.getLogger(__name__)
parser.add_argument('-e', '--exp_group_list', nargs="+")
parser.add_argument('-sb', '--savedir_base', required=True)
parser.add_argument('-d', '--datadir', default=None)
parser.add_argument("-r", "--reset", default=0, type=int)
parser.add_argument("-ei", "--exp_id", default=None)
parser.add_argument("-j", "--run_jobs", default=0, type=int)
parser.add_argument("-nw", "--num_workers", type=int, default=0)
args = parser.parse_args()
# Collect experiments
# ===================
if args.exp_id is not None:
# select one experiment
savedir = os.path.join(args.savedir_base, args.exp_id)
exp_dict = hu.load_json(os.path.join(savedir, "exp_dict.json"))
exp_list = [exp_dict]
else:
# select exp group
exp_list = []
for exp_group_name in args.exp_group_list:
exp_list += exp_configs.EXP_GROUPS[exp_group_name]
for exp_dict in exp_list:
# do trainval
trainval(exp_dict=exp_dict,
savedir_base=args.savedir_base,
datadir=args.datadir,
reset=args.reset,
num_workers=args.num_workers)
| 33.459916
| 82
| 0.577427
| 945
| 7,930
| 4.586243
| 0.212698
| 0.048454
| 0.027688
| 0.043147
| 0.27665
| 0.231426
| 0.209045
| 0.180434
| 0.180434
| 0.164282
| 0
| 0.00367
| 0.312863
| 7,930
| 236
| 83
| 33.601695
| 0.791705
| 0.100883
| 0
| 0.192308
| 0
| 0
| 0.081653
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012821
| false
| 0
| 0.160256
| 0
| 0.173077
| 0.070513
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed2e9fbfce1e8173a04660813facfdf161e513ee
| 598
|
py
|
Python
|
pyxq/app/__init__.py
|
goodchinas/pyxq
|
c7f6ea63084c18178049451f30f32f04080a511c
|
[
"MIT"
] | 4
|
2019-12-17T11:05:53.000Z
|
2020-06-01T05:41:02.000Z
|
pyxq/app/__init__.py
|
goodchinas/pyxq
|
c7f6ea63084c18178049451f30f32f04080a511c
|
[
"MIT"
] | null | null | null |
pyxq/app/__init__.py
|
goodchinas/pyxq
|
c7f6ea63084c18178049451f30f32f04080a511c
|
[
"MIT"
] | 2
|
2019-11-13T01:11:53.000Z
|
2019-12-17T10:55:44.000Z
|
from .. import ba, cb, actor
from ..service import account
class A0(ba.App):
center: cb.CallBackManager
a: account.Account
def __init__(self, stg: actor.GateWay):
a = account.Account()
center = cb.CallBackManager()
stg.init(a=a, center=center, broker=cb.CallBackManager())
actor.Broker(
a=a, center=center,
gateway=stg.broker,
exchange=actor.Exchange(center=center, broker=cb.CallBackManager()).broker)
self.center = center
self.a = a
def route(self, x: ba.Msg):
self.center.route(x=x)
| 27.181818
| 87
| 0.610368
| 75
| 598
| 4.813333
| 0.306667
| 0.188366
| 0.127424
| 0.077562
| 0.193906
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002273
| 0.264214
| 598
| 21
| 88
| 28.47619
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.117647
| 0
| 0.411765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed2f62da1eeae65673f61b0ca0f66640810c94b0
| 7,802
|
py
|
Python
|
gva/data/validator/__init__.py
|
gva-jhabte/gva-data
|
7a605ff01faa3fd38e91a324341d6166f17544a7
|
[
"Apache-2.0"
] | null | null | null |
gva/data/validator/__init__.py
|
gva-jhabte/gva-data
|
7a605ff01faa3fd38e91a324341d6166f17544a7
|
[
"Apache-2.0"
] | null | null | null |
gva/data/validator/__init__.py
|
gva-jhabte/gva-data
|
7a605ff01faa3fd38e91a324341d6166f17544a7
|
[
"Apache-2.0"
] | null | null | null |
"""
Schema Validation
Tests a dictionary against a schema to test for conformity.
Schema definition is similar to - but not the same as - avro schemas
Supported Types:
- string - a character sequence
- format
- numeric - a number
- min:
- max
- date - a datetime.date or an iso format date or time
- boolean - a boolean or a binary value (true/false, on/off, yes/no)
- symbols
- other - not one of the above, but a required field
- nullable - Python Falsy (None, 0, Empty String, etc)
- enum -
- symbols
Example Schema:
{
"name": "Table Name",
"fields": [
{"name": "id", "type": "string"},
{"name": "country", "type": ["string", "nullable"]},
{"name": "followers", "type": ["string", "nullable"]}
]
}
Notes:
- type(var).__name__ in (set) is faster than isinstance
"""
import datetime
from typing import List, Any, Union, Callable
import os
import re
from ...utils.json import serialize, parse
VALID_BOOLEAN_VALUES = ("true", "false", "on", "off", "yes", "no", "0", "1")
DEFAULT_MIN = -9223372036854775808
DEFAULT_MAX = 9223372036854775807
class is_string():
__slots__ = ['pattern', 'regex']
def __init__(self, **kwargs):
self.regex = None
self.pattern = kwargs.get('format')
if self.pattern:
self.regex = re.compile(self.pattern)
def __call__(self, value: Any) -> bool:
if self.pattern is None:
return type(value).__name__ == "str"
else:
return self.regex.match(str(value))
def __str__(self):
if self.pattern:
return f'string ({self.pattern})'
else:
return 'string'
class is_valid_enum():
__slots__ = ['symbols']
def __init__(self, **kwargs):
"""
-> "type": "enum", "symbols": ["up", "down"]
symbols: list of allowed values (case sensitive)
"""
self.symbols = kwargs.get('symbols', ())
def __call__(self, value: Any) -> bool:
return value and value in self.symbols
def __str__(self):
return f'enum {self.symbols}'
class is_boolean(is_valid_enum):
def __init__(self, **kwargs):
"""
is_boolean is a specific case of is_valid_enum
- it defaults to a set of true/false values
- the check is case insensitive
"""
super().__init__()
if len(self.symbols) == 0:
self.symbols = VALID_BOOLEAN_VALUES
def __call__(self, value: Any) -> bool:
return super().__call__(str(value).lower())
class is_numeric():
__slots__ = ['min', 'max']
def __init__(self, **kwargs):
"""
-> "type": "numeric", "min": 0, "max": 100
min: low end of valid range
max: high end of valid range
"""
self.min = kwargs.get('min', DEFAULT_MIN)
self.max = kwargs.get('max', DEFAULT_MAX)
def __call__(self, value: Any) -> bool:
try:
n = float(value)
except (ValueError, TypeError):
return False
return self.min <= n <= self.max
def __str__(self):
if self.min == DEFAULT_MIN and self.max == DEFAULT_MAX:
return 'numeric'
if not self.min == DEFAULT_MIN and not self.max == DEFAULT_MAX:
return f'numeric ({self.min} - {self.max})'
if not self.min == DEFAULT_MIN and self.max == DEFAULT_MAX:
return f'numeric ({self.min} - infinity)'
if self.min == DEFAULT_MIN and not self.max == DEFAULT_MAX:
return f'numeric (infinity - {self.max})'
def is_date(value: Any) -> bool:
try:
if type(value).__name__ in ("datetime", "date", "time"):
return True
datetime.datetime.fromisoformat(value)
return True
except (ValueError, TypeError):
return False
def is_null(value: Any) -> bool:
return not value
def other_validator(value: Any) -> bool:
return True
def is_list(value: Any) -> bool:
return type(value).__name__ == 'list'
"""
Create a dictionary of the validator functions
"""
SIMPLE_VALIDATORS = {
"date": is_date,
"nullable": is_null,
"other": other_validator,
"list": is_list,
"array": is_list,
}
COMPLEX_VALIDATORS = {
"enum": is_valid_enum,
"numeric": is_numeric,
"string": is_string,
"boolean": is_boolean
}
def get_validators(
type_descriptor: Union[List[str], str],
**kwargs):
"""
For a given type definition (the ["string", "nullable"] bit), return
the matching validator functions (the _is_x ones) as a list.
"""
if not type(type_descriptor).__name__ == 'list':
type_descriptor = [type_descriptor] # type:ignore
validators: List[Any] = []
for descriptor in type_descriptor:
if descriptor in COMPLEX_VALIDATORS:
validators.append(COMPLEX_VALIDATORS[descriptor](**kwargs))
else:
validators.append(SIMPLE_VALIDATORS[descriptor])
return validators
def field_validator(value, validators: set) -> bool:
"""
Execute a set of validator functions (the _is_x) against a value.
Return True if any of the validators are True.
"""
return any([True for validator in validators if validator(value)])
class Schema():
def __init__(self, definition: Union[dict, str]):
"""
Compile a validator for a given schema.
paramaters:
- definition: a dictionary, text representation of a dictionary (JSON)
or a JSON file containing a schema definition
"""
# if we have a schema as a string, load it into a dictionary
if type(definition).__name__ == 'str':
if os.path.exists(definition): # type:ignore
definition = parse(open(definition, mode='r').read()) # type:ignore
else:
definition = parse(definition) # type:ignore
try:
# read the schema and look up the validators
self._validators = {
item.get('name'): get_validators(
item['type'],
symbols=item.get('symbols'),
min=item.get('min', DEFAULT_MIN), # 64bit signed (not a limit, just a default)
max=item.get('max', DEFAULT_MAX), # 64bit signed (not a limit, just a default)
format=item.get('format'))
for item in definition.get('fields', []) #type:ignore
}
except KeyError:
raise ValueError("Invalid type specified in schema - valid types are: string, numeric, date, boolean, nullable, list, enum")
if len(self._validators) == 0:
raise ValueError("Invalid schema specification")
def validate(self, subject: dict = {}, raise_exception=False) -> bool:
result = True
self.last_error = ''
for key, value in self._validators.items():
if not field_validator(subject.get(key), self._validators.get(key, [other_validator])):
result = False
for v in value:
self.last_error += f"'{key}' ({subject.get(key)}) did not pass validator {str(v)}.\n"
if raise_exception and not result:
raise ValueError(F"Record does not conform to schema - {self.last_error}. ")
return result
def __call__(self, subject: dict = {}, raise_exception=False) -> bool:
# wrap the validate function
return self.validate(subject=subject, raise_exception=raise_exception)
| 32.781513
| 137
| 0.57575
| 914
| 7,802
| 4.729759
| 0.218818
| 0.018506
| 0.022207
| 0.020819
| 0.161925
| 0.118436
| 0.09808
| 0.06477
| 0.049965
| 0.044876
| 0
| 0.009444
| 0.30787
| 7,802
| 238
| 138
| 32.781513
| 0.791111
| 0.238529
| 0
| 0.2
| 0
| 0.007407
| 0.106748
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.148148
| false
| 0.007407
| 0.037037
| 0.051852
| 0.414815
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed3077484491f3ce68c431839bde2db924a8e7be
| 2,350
|
py
|
Python
|
fuzzers/ECP5/050-pio_routing/fuzzer.py
|
umarcor/prjtrellis
|
9b3db7ba9a02e7d2f49c52ce062d5b22e320004c
|
[
"MIT"
] | 256
|
2018-03-05T00:28:46.000Z
|
2022-03-04T22:33:29.000Z
|
fuzzers/ECP5/050-pio_routing/fuzzer.py
|
umarcor/prjtrellis
|
9b3db7ba9a02e7d2f49c52ce062d5b22e320004c
|
[
"MIT"
] | 70
|
2018-03-12T21:55:02.000Z
|
2020-06-22T12:06:08.000Z
|
fuzzers/ECP5/050-pio_routing/fuzzer.py
|
umarcor/prjtrellis
|
9b3db7ba9a02e7d2f49c52ce062d5b22e320004c
|
[
"MIT"
] | 68
|
2018-03-12T21:05:01.000Z
|
2021-03-14T21:08:33.000Z
|
from fuzzconfig import FuzzConfig
import interconnect
import nets
import pytrellis
import re
jobs = [
{
"pos": [(47, 0), (48, 0), (49, 0)],
"cfg": FuzzConfig(job="PIOROUTEL", family="ECP5", device="LFE5U-45F", ncl="pioroute.ncl",
tiles=["MIB_R47C0:PICL0", "MIB_R48C0:PICL1", "MIB_R49C0:PICL2"])
},
{
"pos": [(47, 90), (48, 90), (49, 90)],
"cfg": FuzzConfig(job="PIOROUTER", family="ECP5", device="LFE5U-45F", ncl="pioroute.ncl",
tiles=["MIB_R47C90:PICR0", "MIB_R48C90:PICR1", "MIB_R49C90:PICR2"])
},
{
"pos": [(0, 22), (1, 23), (0, 22), (1, 23)],
"cfg": FuzzConfig(job="PIOROUTET", family="ECP5", device="LFE5U-45F", ncl="pioroute.ncl",
tiles=["MIB_R0C22:PIOT0", "MIB_R0C23:PIOT1", "MIB_R1C22:PICT0", "MIB_R1C23:PICT1"])
},
{
"pos": [(71, 11), (71, 12), (70, 11), (70, 12)],
"cfg": FuzzConfig(job="PIOROUTET", family="ECP5", device="LFE5U-45F", ncl="pioroute.ncl",
tiles=["MIB_R71C11:PICB0", "MIB_R71C12:PICB1"])
},
{
"pos": [(71, 18), (70, 18)],
"cfg": FuzzConfig(job="PIOROUTESB", family="ECP5", device="LFE5U-45F", ncl="pioroute_spicb.ncl",
tiles=["MIB_R71C18:SPICB0"])
},
]
def main():
pytrellis.load_database("../../../database")
for job in jobs:
cfg = job["cfg"]
cfg.setup()
def nn_filter(net, netnames):
return not nets.is_cib(net)
orig_tiles = cfg.tiles
for pos in job["pos"]:
# Put fixed connections in the most appropriate tile
target_tile = None
for tile in orig_tiles:
if "R{}C{}".format(pos[0], pos[1]) in tile:
target_tile = tile
break
if target_tile is not None:
cfg.tiles = [target_tile] + [_ for _ in orig_tiles if _ != orig_tiles]
else:
cfg.tiles = orig_tiles
interconnect.fuzz_interconnect(config=cfg, location=pos,
netname_predicate=nn_filter,
netname_filter_union=False,
func_cib=True)
if __name__ == "__main__":
main()
| 36.71875
| 109
| 0.50383
| 264
| 2,350
| 4.32197
| 0.397727
| 0.056968
| 0.070114
| 0.092025
| 0.235758
| 0.235758
| 0.235758
| 0.205083
| 0.205083
| 0.205083
| 0
| 0.087877
| 0.336596
| 2,350
| 63
| 110
| 37.301587
| 0.644003
| 0.021277
| 0
| 0.035714
| 0
| 0
| 0.194082
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.089286
| 0.017857
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed32adc4f5470b9099ab1976e987f8994269a9b8
| 6,841
|
py
|
Python
|
convnet3d/backend/tensorflow_backend.py
|
yecharlie/convnet3d
|
0b2771eec149b196ef59b58d09eef71c9b201d40
|
[
"MIT"
] | 6
|
2020-03-12T10:28:41.000Z
|
2021-11-18T16:17:20.000Z
|
convnet3d/backend/tensorflow_backend.py
|
yecharlie/convnet3d
|
0b2771eec149b196ef59b58d09eef71c9b201d40
|
[
"MIT"
] | null | null | null |
convnet3d/backend/tensorflow_backend.py
|
yecharlie/convnet3d
|
0b2771eec149b196ef59b58d09eef71c9b201d40
|
[
"MIT"
] | 1
|
2019-08-01T02:50:05.000Z
|
2019-08-01T02:50:05.000Z
|
import tensorflow as tf
def _is_tensor(x):
"""Returns `True` if `x` is a symbolic tensor-like object.
From http://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/image_ops_impl.py
Args:
x: A python object to check.
Returns:
`True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`.
"""
return isinstance(x, (tf.Tensor, tf.Variable))
def _ImageDimensions(image, rank):
"""Returns the dimensions of an image tensor.
From http://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/image_ops_impl.py
Args:
image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`.
rank: The expected rank of the image
Returns:
A list of corresponding to the dimensions of the
input image. Dimensions that are statically known are python integers,
otherwise they are integer scalar tensors.
"""
if image.get_shape().is_fully_defined():
return image.get_shape().as_list()
else:
static_shape = image.get_shape().with_rank(rank).as_list()
dynamic_shape = tf.unstack(tf.shape(image), rank)
return [
s if s is not None else d for s, d in zip(static_shape, dynamic_shape)
]
def _CheckAtLeast4DImage(image, require_static=True):
"""Assert that we are working with properly shaped image.
(modified) From http://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/image_ops_impl.py
Args:
image: >= 4-D Tensor of size [*, height, width, depth, channels]
require_static: If `True`, requires that all dimensions of `image` are
known and non-zero.
Raises:
ValueError: if image.shape is not a [>= 4] vector.
Returns:
An empty list, if `image` has fully defined dimensions. Otherwise, a list
containing an assert op is returned.
"""
try:
if image.get_shape().ndims is None:
image_shape = image.get_shape().with_rank(4)
else:
image_shape = image.get_shape().with_rank_at_least(4)
except ValueError:
raise ValueError("'image' must be at least four-dimensional.")
if require_static and not image_shape.is_fully_defined():
raise ValueError('\'image\' must be fully defined.')
if any(x == 0 for x in image_shape):
raise ValueError(
'all dims of \'image.shape\' must be > 0: %s' % image_shape)
if not image_shape.is_fully_defined():
return [
tf.assert_positive(
tf.shape(image),
['all dims of "image.shape " must be > 0.'])
]
else:
return []
def uniform(*args, **kwargs):
return tf.random.uniform(*args, **kwargs)
def pad(*args, **kwargs):
return tf.pad(*args, **kwargs)
def top_k(*args, **kwargs):
return tf.math.top_k(*args, **kwargs)
def non_max_suppression_overlaps(*args, **kwargs):
return tf.image.non_max_suppression_overlaps(*args, **kwargs)
def gather_nd(*args, **kwargs):
return tf.gather_nd(*args, **kwargs)
def clip_by_value(*args, **kwargs):
return tf.clip_by_value(*args, **kwargs)
def meshgrid(*args, **kwargs):
return tf.meshgrid(*args, **kwargs)
def map_fn(*args, **kwargs):
return tf.map_fn(*args, **kwargs)
def where(*args, **kwargs):
return tf.where(*args, **kwargs)
def crop_to_bounding_box_3d(image, box, target_size):
'''Crops an 3d image to a specificed bounding box. When the size of box is smaller than 'target_size', then the surroundings of image is evenly (approximately) padded with zero. The 'box' with size = 0 is allowed.
Args:
image: 5-D Tensor of shape '[batch, heigh, width, depth, channels]' or
4-D Tensor of shape '[heights, width, depth, channels]'
box: 1-D Tensor of shape '[6,]' representing the cropped area.
target_size: The ultimate bounding box size.
Returns:
if 'image' was 5-D, a 5-D float Tensor of shape '[batch_size] + target_size + [channels]'
if 'image' was 4-D, a 5-D float Tensor of shape 'target_size + [channels]'
'''
with tf.name_scope(None, 'crop_to_bounding_box_3d', [image]):
image = tf.convert_to_tensor(image, name='image')
is_batch = True
image_shape = image.get_shape()
if image_shape.ndims == 4:
is_batch = False
image = tf.expand_dims(image, 0)
elif image_shape.ndims is None:
is_batch = False
image = tf.expand_dims(image, 0)
image.set_shape([None] * 5)
elif image_shape.ndims != 5:
raise ValueError('\'image\' must have either 4 or 5 dimensions.')
assert_ops = _CheckAtLeast4DImage(image, require_static=False)
# Never mind what are the real meaning of height/width/depth. They are mimics from the tensorflow API 's writting convention.
batch, height, width, depth, channels = _ImageDimensions(image, rank=5)
# print('crop_to_bounding_box_3d height:',height)
box_size = box[1::2] - box[::2]
assert_ops.append(tf.assert_greater_equal([height, width, depth], box[1::2], ['The remote corner of box must not exceed image boundaries.']))
assert_ops.append(tf.assert_non_negative(box[::2], ['The near corner of box must be non negative.']))
assert_ops.append(tf.assert_non_negative(box_size, ['The box size should be non negative.']))
assert_ops.append(tf.assert_greater_equal(target_size, box_size, ['The target size should be not less than box size. ']))
with tf.control_dependencies(assert_ops):
image = image
# tf.with_dependencies(assert_ops, image)
cropped = tf.slice(
image, tf.stack([0, box[0], box[2], box[4], 0]),
tf.stack([-1, box_size[0], box_size[1], box_size[2] , -1])
)
def _max(x, y):
if _is_tensor(x) or _is_tensor(y):
return tf.maximum(x, y)
else:
return max(x, y)
padding_offsets = _max((target_size - box_size) // 2, 0)
after_padding_size = target_size - padding_offsets - box_size
paddings = tf.reshape(
tf.stack([
0, 0, padding_offsets[0], after_padding_size[0],
padding_offsets[1], after_padding_size[1], # noqa: E131
padding_offsets[2], after_padding_size[2], 0, 0 # noqa: E131
]), [5, 2])
padded = tf.pad(cropped, paddings)
result_shape = [
None if _is_tensor(i) else i
for i in [batch, target_size[0], target_size[1], target_size[2], channels]
]
padded.set_shape(result_shape)
if not is_batch:
padded = tf.squeeze(padded, axis=[0])
return padded
| 35.630208
| 217
| 0.627686
| 958
| 6,841
| 4.325679
| 0.215031
| 0.043436
| 0.034749
| 0.039093
| 0.25555
| 0.215251
| 0.159266
| 0.132963
| 0.080116
| 0.063224
| 0
| 0.013954
| 0.256249
| 6,841
| 191
| 218
| 35.816754
| 0.800511
| 0.317205
| 0
| 0.1
| 0
| 0
| 0.085962
| 0.005109
| 0
| 0
| 0
| 0
| 0.07
| 1
| 0.14
| false
| 0
| 0.01
| 0.09
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed333974ac7fc15e4814ac031f495886accd5008
| 1,169
|
py
|
Python
|
extensions/everywhere.py
|
cobrab11/black1-bot
|
47c1a80029d6183fc990960b422bb3155360702d
|
[
"Apache-2.0"
] | 3
|
2015-10-15T15:40:17.000Z
|
2021-06-08T05:39:21.000Z
|
extensions/everywhere.py
|
cobrab11/black1-bot
|
47c1a80029d6183fc990960b422bb3155360702d
|
[
"Apache-2.0"
] | 1
|
2019-04-06T11:54:56.000Z
|
2019-04-07T00:57:49.000Z
|
extensions/everywhere.py
|
cobrab11/black1-bot
|
47c1a80029d6183fc990960b422bb3155360702d
|
[
"Apache-2.0"
] | 3
|
2015-10-26T14:49:57.000Z
|
2018-03-04T15:34:11.000Z
|
# BS mark.1-55
# /* coding: utf-8 */
# BlackSmith plugin
# everywhere_plugin.py
# Coded by: WitcherGeralt (WitcherGeralt@jabber.ru)
# http://witcher-team.ucoz.ru/
def handler_everywhere(type, source, body):
if body:
args = body.split()
if len(args) >= 2:
mtype = args[0].strip().lower()
if mtype == u'чат':
msgtype = 'public'
elif mtype == u'приват':
msgtype = 'private'
else:
msgtype = False
if msgtype:
command = args[1].strip().lower()
if len(args) >= 3:
Parameters = body[((body.lower()).find(command) + (len(command) + 1)):].strip()
else:
Parameters = ''
if len(Parameters) <= 96:
if COMMANDS.has_key(command):
for conf in GROUPCHATS.keys():
call_command_handlers(command, msgtype, [source[0], conf, source[2]], Parameters, command)
else:
reply(type, source, u'Нет такой команды.')
else:
reply(type, source, u'Слишком длинные параметры.')
else:
reply(type, source, u'Тип указан некорректно.')
else:
reply(type, source, u'инвалид синтакс')
else:
reply(type, source, u'я не умею читать мысли')
command_handler(handler_everywhere, 100, "everywhere")
| 27.186047
| 97
| 0.637297
| 154
| 1,169
| 4.792208
| 0.493506
| 0.081301
| 0.088076
| 0.128726
| 0.135501
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01726
| 0.207015
| 1,169
| 42
| 98
| 27.833333
| 0.778857
| 0.130026
| 0
| 0.21875
| 0
| 0
| 0.134653
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0
| 0
| 0.03125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed38d1c43b9d70e3ac1c1d8a3bb7c7068b2ec679
| 6,946
|
py
|
Python
|
tests/test_azure.py
|
The-Academic-Observatory/mag-archiver
|
76988020047b4ab9eb2d125f5141dfa7297a6fb3
|
[
"Apache-2.0"
] | null | null | null |
tests/test_azure.py
|
The-Academic-Observatory/mag-archiver
|
76988020047b4ab9eb2d125f5141dfa7297a6fb3
|
[
"Apache-2.0"
] | 5
|
2020-07-22T03:51:17.000Z
|
2021-08-08T21:56:00.000Z
|
tests/test_azure.py
|
The-Academic-Observatory/mag-archiver
|
76988020047b4ab9eb2d125f5141dfa7297a6fb3
|
[
"Apache-2.0"
] | 1
|
2020-07-19T22:39:20.000Z
|
2020-07-19T22:39:20.000Z
|
# Copyright 2020 Curtin University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: James Diprose
import os
import unittest
import uuid
from azure.storage.blob import ContainerClient, BlobClient
from mag_archiver.azure import list_containers, copy_container, list_blobs, create_container, delete_container, \
create_blob, delete_table, create_table
def generate_names(num_names, prefix):
# Create containers
names = []
for i in range(num_names):
name = make_unique_name(prefix)
names.append(name)
return names
def make_unique_name(prefix: str):
return f"{prefix}-{str(uuid.uuid4())}"
class TestAzure(unittest.TestCase):
account_name: str
account_key: str
def __init__(self, *args, **kwargs):
super(TestAzure, self).__init__(*args, **kwargs)
self.account_name = os.getenv('TEST_AZURE_STORAGE_ACCOUNT_NAME')
self.account_key = os.getenv('TEST_AZURE_STORAGE_ACCOUNT_KEY')
def test_create_table(self):
table_name = 'TestCreateTable'
try:
# Create a table
result = create_table(self.account_name, self.account_key, table_name)
self.assertTrue(result)
finally:
# Cleanup
delete_table(self.account_name, self.account_key, table_name)
def test_delete_table(self):
table_name = 'TestDeleteTable'
try:
# Create a table
create_table(self.account_name, self.account_key, table_name)
# Delete table
result = delete_table(self.account_name, self.account_key, table_name)
self.assertTrue(result)
finally:
# Cleanup
delete_table(self.account_name, self.account_key, table_name)
def test_create_container(self):
container_name = make_unique_name('test-create-container')
try:
# Create a container
result: ContainerClient = create_container(self.account_name, self.account_key, container_name)
self.assertIsInstance(result, ContainerClient)
self.assertEqual(result.container_name, container_name)
finally:
# Cleanup
delete_container(self.account_name, self.account_key, container_name)
def test_create_blob(self):
container_name = make_unique_name('test-create-blob')
try:
# Create a container for the blob
create_container(self.account_name, self.account_key, container_name)
# Create a blob
blob_name = make_unique_name('test-create-blob') + '.txt'
blob_data = 'Hello world!'
result: BlobClient = create_blob(self.account_name, self.account_key, container_name, blob_name, blob_data)
self.assertIsInstance(result, BlobClient)
self.assertEqual(result.blob_name, blob_name)
finally:
# Cleanup
delete_container(self.account_name, self.account_key, container_name)
def test_list_containers(self):
num_containers = 3
names = generate_names(num_containers, 'test-list-containers')
try:
# Create containers
for name in names:
create_container(self.account_name, self.account_key, name)
# List containers
containers = list_containers(self.account_name, self.account_key)
self.assertEqual(len(containers), num_containers + 1)
finally:
# Cleanup
for name in names:
delete_container(self.account_name, self.account_key, name)
def test_delete_container(self):
container_name = make_unique_name('test-delete-container')
try:
create_container(self.account_name, self.account_key, container_name)
delete_container(self.account_name, self.account_key, container_name)
containers = list_containers(self.account_name, self.account_key)
container_names = [c.name for c in containers]
self.assertNotIn(container_name, container_names)
finally:
# Cleanup
delete_container(self.account_name, self.account_key, container_name)
def test_list_blobs(self):
container_name = make_unique_name('test-list-blobs')
try:
# Create container to store the blobs
create_container(self.account_name, self.account_key, container_name)
# Create the blobs
num_blobs = 3
blob_data = 'Hello world!'
names = generate_names(num_blobs, 'test-list-blobs')
for name in names:
file_name = f'{name}.txt'
create_blob(self.account_name, self.account_key, container_name, file_name, blob_data)
# Check that we can find the blobs
blobs = list_blobs(self.account_name, self.account_key, container_name)
self.assertEqual(len(blobs), num_blobs)
finally:
# Cleanup
delete_container(self.account_name, self.account_key, container_name)
def test_copy_container(self):
source_container = make_unique_name('test-copy-container-source')
target_container = make_unique_name('test-copy-container-target')
target_folder = 'target-folder'
try:
# Create containers and blobs
create_container(self.account_name, self.account_key, source_container)
create_container(self.account_name, self.account_key, target_container)
# Create blobs in source container
num_blobs = 3
blob_data = 'Hello world!'
names = generate_names(num_blobs, 'test-copy-container')
for name in names:
file_name = f'{name}.txt'
create_blob(self.account_name, self.account_key, source_container, file_name, blob_data)
# Copy blobs from one container to another
copy_container(self.account_name, self.account_key, source_container, target_container, target_folder)
# Check results
blobs = list_blobs(self.account_name, self.account_key, target_container)
self.assertEqual(len(blobs), num_blobs)
finally:
# Delete container
delete_container(self.account_name, self.account_key, source_container)
delete_container(self.account_name, self.account_key, target_container)
| 39.022472
| 119
| 0.662971
| 829
| 6,946
| 5.294331
| 0.171291
| 0.145363
| 0.099111
| 0.145363
| 0.536797
| 0.532695
| 0.518569
| 0.467305
| 0.406471
| 0.289132
| 0
| 0.002527
| 0.259286
| 6,946
| 177
| 120
| 39.242938
| 0.850535
| 0.145263
| 0
| 0.394495
| 0
| 0
| 0.065615
| 0.031027
| 0
| 0
| 0
| 0
| 0.091743
| 1
| 0.100917
| false
| 0
| 0.045872
| 0.009174
| 0.192661
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed3a7989a6ca1353a9b8069ec10dfdff872950bd
| 16,283
|
py
|
Python
|
tensorflow/contrib/framework/python/framework/tensor_util_test.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/framework/python/framework/tensor_util_test.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/framework/python/framework/tensor_util_test.py
|
uve/tensorflow
|
e08079463bf43e5963acc41da1f57e95603f8080
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tensor_util tests."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import numpy as np
from tensorflow.contrib.framework.python.framework import tensor_util
from tensorflow.contrib.framework.python.ops import variables as variables_lib2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class LocalVariabletest(test.TestCase):
def test_local_variable(self):
with self.cached_session() as sess:
self.assertEquals([], variables_lib.local_variables())
value0 = 42
variables_lib2.local_variable(value0)
value1 = 43
variables_lib2.local_variable(value1)
variables = variables_lib.local_variables()
self.assertEquals(2, len(variables))
self.assertRaises(errors_impl.OpError, sess.run, variables)
variables_lib.variables_initializer(variables).run()
self.assertAllEqual(set([value0, value1]), set(sess.run(variables)))
class ReduceSumNTest(test.TestCase):
def test_reduce_sum_n(self):
with self.cached_session():
a = constant_op.constant(1)
b = constant_op.constant([2])
c = constant_op.constant([[3, 4], [5, 6]])
self.assertEqual(21, tensor_util.reduce_sum_n([a, b, c]).eval())
class AssertScalarIntTest(test.TestCase):
def test_assert_scalar_int(self):
tensor_util.assert_scalar_int(constant_op.constant(3, dtype=dtypes.int32))
tensor_util.assert_scalar_int(constant_op.constant(3, dtype=dtypes.int64))
tensor_util.assert_scalar_int(3)
with self.assertRaisesRegexp(ValueError, "Expected integer"):
tensor_util.assert_scalar_int(
constant_op.constant(
3, dtype=dtypes.float32))
with self.assertRaisesRegexp(ValueError, "Expected scalar"):
tensor_util.assert_scalar_int(
constant_op.constant(
[3, 4], dtype=dtypes.int32))
class WithShapeTest(test.TestCase):
def _assert_with_shape(self, tensor, expected_value, expected_shape,
unexpected_shapes):
for unexpected_shape in unexpected_shapes:
self.assertRaises(ValueError, tensor_util.with_shape, unexpected_shape,
tensor)
pattern = (
r"\[Wrong shape for %s \[expected\] \[actual\].\] \[%s\] \[%s\]" %
(tensor.name, " ".join([str(dim) for dim in unexpected_shape]),
" ".join([str(dim) for dim in expected_shape])))
self.assertRaisesRegexp(errors_impl.OpError,
re.compile(pattern),
tensor_util.with_shape(
constant_op.constant(unexpected_shape),
tensor).eval)
expected_placeholder = array_ops.placeholder(dtypes.float32)
self.assertRaisesRegexp(errors_impl.OpError,
re.compile(pattern),
tensor_util.with_same_shape(expected_placeholder,
tensor).eval,
{expected_placeholder: np.ones(unexpected_shape)})
self.assertIs(tensor, tensor_util.with_shape(expected_shape, tensor))
self.assertIs(
tensor,
tensor_util.with_same_shape(
constant_op.constant(
1, shape=expected_shape), tensor))
tensor_with_shape = tensor_util.with_shape(
constant_op.constant(expected_shape), tensor)
np.testing.assert_array_equal(expected_value, tensor_with_shape.eval())
tensor_with_same_shape = tensor_util.with_same_shape(expected_placeholder,
tensor)
np.testing.assert_array_equal(expected_value,
tensor_with_same_shape.eval({
expected_placeholder:
np.ones(expected_shape)
}))
def test_with_shape_invalid_expected_shape(self):
with self.cached_session():
self.assertRaisesRegexp(ValueError, "Invalid rank",
tensor_util.with_shape, [[1], [2]],
constant_op.constant(1.0))
def test_with_shape_invalid_type(self):
with self.cached_session():
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape, [1.1],
constant_op.constant([1.0]))
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape,
np.array([1.1]), constant_op.constant(1.0))
self.assertRaisesRegexp(ValueError, "Invalid dtype",
tensor_util.with_shape,
constant_op.constant(np.array([1.1])),
constant_op.constant(1.0))
def test_with_shape_0(self):
with self.cached_session():
value = 42
shape = [0]
unexpected_shapes = [[1], [2], [1, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_1(self):
with self.cached_session():
value = [42]
shape = [1]
unexpected_shapes = [[0], [2], [1, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2(self):
with self.cached_session():
value = [42, 43]
shape = [2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2x2(self):
with self.cached_session():
value = [[42, 43], [44, 45]]
shape = [2, 2]
unexpected_shapes = [[0], [1], [2, 1]]
self._assert_with_shape(
constant_op.constant(
value, shape=shape),
value,
shape,
unexpected_shapes)
def test_with_shape_2x2_with_partial_expected_shape(self):
with self.cached_session():
value = [[42, 43], [44, 45]]
actual_shape = [2, 2]
tensor = constant_op.constant(value, shape=actual_shape)
partial_expected_shape = tensor_shape.TensorShape([None, 2])
# Won't raise any exception here:
tensor_with_shape = tensor_util.with_shape(partial_expected_shape, tensor)
np.testing.assert_array_equal(value, tensor_with_shape.eval())
def test_with_shape_none(self):
with self.cached_session():
tensor_no_shape = array_ops.placeholder(dtypes.float32)
compatible_shape = [2, 2]
with_present_2x2 = tensor_util.with_shape(compatible_shape,
tensor_no_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tensor_util.with_shape(
constant_op.constant(compatible_shape), tensor_no_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(array_2x2,
tensor_2x2.eval({
tensor_no_shape: array_2x2
}))
self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape",
tensor_2x2.eval,
{tensor_no_shape: [42.0, 43.0]})
self.assertRaisesRegexp(errors_impl.OpError, "Wrong shape",
tensor_2x2.eval, {tensor_no_shape: [42.0]})
def test_with_shape_partial(self):
with self.cached_session():
tensor_partial_shape = array_ops.placeholder(dtypes.float32)
tensor_partial_shape.set_shape([None, 2])
for incompatible_shape in [[0], [1]]:
self.assertRaisesRegexp(
ValueError, "Shapes must be equal rank, but are 2 and 1",
tensor_util.with_shape, incompatible_shape, tensor_partial_shape)
for incompatible_shape in [[1, 2, 1]]:
self.assertRaisesRegexp(ValueError, "Dimensions must be equal",
tensor_util.with_shape, incompatible_shape,
tensor_partial_shape)
for incompatible_shape in [[2, 1]]:
self.assertRaisesRegexp(
ValueError,
r"Dimension 1 in both shapes must be equal, but are 2 and 1. "
r"Shapes are \[\?,2\] and \[2,1\].",
tensor_util.with_shape, incompatible_shape, tensor_partial_shape)
compatible_shape = [2, 2]
with_present_2x2 = tensor_util.with_shape(compatible_shape,
tensor_partial_shape)
self.assertEquals(compatible_shape, with_present_2x2.get_shape().dims)
with_future_2x2 = tensor_util.with_shape(
constant_op.constant(compatible_shape), tensor_partial_shape)
array_2x2 = [[42.0, 43.0], [44.0, 45.0]]
for tensor_2x2 in [with_present_2x2, with_future_2x2]:
np.testing.assert_array_equal(array_2x2,
tensor_2x2.eval({
tensor_partial_shape: array_2x2
}))
self.assertRaises(ValueError, tensor_2x2.eval,
{tensor_partial_shape: [42.0, 43.0]})
self.assertRaises(ValueError, tensor_2x2.eval,
{tensor_partial_shape: [42.0]})
class RemoveSqueezableDimensionsTest(test.TestCase):
def testRemoveSqueezableDimensions(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticLabel_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=True,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_extraPredictionDim_staticLabel(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=False,
predictions_have_extra_dim=True,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_staticPrediction_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=False,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_static(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraLabelDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=False,
labels_have_static_shape=True,
labels_have_extra_dim=True)
def testRemoveSqueezableDimensions_staticPrediction_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=True,
labels_have_static_shape=False,
labels_have_extra_dim=False)
def testRemoveSqueezableDimensions_static_extraPredictionDim(self):
self._testRemoveSqueezableDimensions(
predictions_have_static_shape=True,
predictions_have_extra_dim=True,
labels_have_static_shape=True,
labels_have_extra_dim=False)
# TODO(ptucker): Replace this with parameterized test.
def _testRemoveSqueezableDimensions(self, predictions_have_static_shape,
predictions_have_extra_dim,
labels_have_static_shape,
labels_have_extra_dim):
assert not (predictions_have_extra_dim and labels_have_extra_dim)
predictions_value = (0, 1, 1, 0, 0, 1, 0)
labels_value = (0, 0, 1, 1, 0, 0, 0)
input_predictions_value = ([[p] for p in predictions_value] if
predictions_have_extra_dim else
predictions_value)
input_labels_value = ([[l] for l in labels_value] if labels_have_extra_dim
else labels_value)
with ops.Graph().as_default() as g:
feed_dict = {}
if predictions_have_static_shape:
predictions = constant_op.constant(
input_predictions_value, dtype=dtypes.int32)
else:
predictions = array_ops.placeholder(
dtype=dtypes.int32, name="predictions")
feed_dict[predictions] = input_predictions_value
if labels_have_static_shape:
labels = constant_op.constant(input_labels_value, dtype=dtypes.int32)
else:
labels = array_ops.placeholder(dtype=dtypes.int32, name="labels")
feed_dict[labels] = input_labels_value
squeezed_predictions, squeezed_labels = (
tensor_util.remove_squeezable_dimensions(predictions, labels))
with self.session(g):
variables_lib.local_variables_initializer().run()
self.assertAllClose(
predictions_value, squeezed_predictions.eval(feed_dict=feed_dict))
self.assertAllClose(
labels_value, squeezed_labels.eval(feed_dict=feed_dict))
if __name__ == "__main__":
test.main()
| 41.966495
| 81
| 0.63772
| 1,752
| 16,283
| 5.591895
| 0.128425
| 0.031234
| 0.036746
| 0.03103
| 0.682556
| 0.568439
| 0.534653
| 0.511075
| 0.480147
| 0.451159
| 0
| 0.021612
| 0.278204
| 16,283
| 387
| 82
| 42.074935
| 0.811963
| 0.048824
| 0
| 0.468553
| 0
| 0
| 0.023145
| 0
| 0
| 0
| 0
| 0.002584
| 0.141509
| 1
| 0.081761
| false
| 0
| 0.050314
| 0
| 0.147799
| 0.003145
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed3b7de286ac4f4bebceec94867062df2dc32542
| 8,194
|
py
|
Python
|
libs/linux/wpa_cli.py
|
hpagseddy/ZPUI
|
b82819e523987639c2dfab417f9895d7cd7ce049
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
libs/linux/wpa_cli.py
|
hpagseddy/ZPUI
|
b82819e523987639c2dfab417f9895d7cd7ce049
|
[
"Apache-2.0",
"MIT"
] | 2
|
2020-01-17T00:44:53.000Z
|
2020-01-19T21:10:48.000Z
|
libs/linux/wpa_cli.py
|
hpagseddy/ZPUI
|
b82819e523987639c2dfab417f9895d7cd7ce049
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-01-14T22:44:27.000Z
|
2020-01-14T22:44:27.000Z
|
from subprocess import check_output, CalledProcessError
from ast import literal_eval
from time import sleep
from helpers import setup_logger
logger = setup_logger(__name__, "warning")
current_interface = None
#wpa_cli related functions and objects
def wpa_cli_command(*command):
run = ["wpa_cli"]
if current_interface:
run += ["-i"+current_interface]
try:
return check_output(run + list(command))
except CalledProcessError as e:
raise WPAException(command[0], e.returncode, output=e.output, args=command[1:])
class WPAException(Exception):
def __init__(self, command, exit_code, args=None, output=None):
self.command = command
self.code = exit_code
self.args = args
if args != []:
message = "'wpa_cli {}' returned {}".format(self.command, self.code)
else:
message = "'wpa_cli {} {}' returned {}".format(self.command, ' '.join(args), self.code)
if output:
message += "\n Output: {}".format(output)
super(WPAException, self).__init__(message)
#wpa_cli command wrappers and their helpers
def connect_new_network(network_info):
#First, looking in the known networks
conf_networks = list_configured_networks()
network_found = False
for network in conf_networks:
if network_info['ssid'] == network['ssid']:
network_found = True
select_network(network['network id'])
return True
#Then, if it's an open network, just connecting
if is_open_network(network_info):
network_id = add_network()
logger.info(set_network(network_id, 'ssid', '"'+network_info['ssid']+'"'))
set_network(network_id, 'key_mgmt', 'NONE')
select_network(network_id)
return True
#Else, there's not enough implemented as for now
if not network_found:
logger.warning("Hell, I dunno.")
return False
def is_open_network(network_info):
#Might be an approach which doesn't take some things into account
return not is_wpa_enabled(network_info)
def is_wpa_enabled(network_info):
flags = parse_network_flags(network_info['flags'])
wpa_enabled = False
for flag in flags:
if flag.startswith('WPA'):
wpa_enabled = True
return wpa_enabled
def parse_network_flags(flag_string):
#Flags go each after another, enclosed in "[]" braces
flags = [flag.strip('[]') for flag in flag_string.split('][')] #If anybody knows a better way, do commit
return flags
#wpa_cli commands
def get_interfaces():
output = process_output(wpa_cli_command("interface"))
output = output[1:] #First line removed by process_output, second line says "Available interfaces"
return output
def set_active_interface(interface_name):
#TODO output check
global current_interface
# try to set the module's interface variable, then check status
# if status check fails, set the variable back to what it was
# and re-raise the exception
last_interface = current_interface
try:
current_interface = interface_name
output = process_output(wpa_cli_command("status"))
except:
current_interface = last_interface
raise
# else: all went well
#if output == "Connected to interface '{}'".format(interface_name):
def get_current_interface():
#TODO: check without wireless adapter plugged in
output = process_output(wpa_cli_command("ifname"))
return output[0]
def connection_status():
#TODO: check without wireless adapter plugged in
parameters = {}
output = process_output(wpa_cli_command("status"))
for line in output:
if '=' not in line:
continue
else:
param, value = line.split('=',1)
parameters[param] = value
return parameters
def list_configured_networks():
#Gives a nice table with first row as header and tab-separated elements, so I'll use process_table function
output = process_output(wpa_cli_command("list_networks"))
#As of wpa_supplicant 2.3-1, header elements are ['network id', 'ssid', 'bssid', 'flags']
networks = process_table(output[0], output[1:])
return networks
def dict_configured_networks_by_ssid():
networks = list_configured_networks()
return {n["ssid"]:n for n in networks}
def dict_configured_networks_by_id():
networks = list_configured_networks()
return {n["network id"]:n for n in networks}
def select_network(network_id):
return ok_fail_command("select_network", str(network_id))
def enable_network(network_id):
return ok_fail_command("enable_network", str(network_id))
def remove_network(network_id):
return ok_fail_command("remove_network", str(network_id))
def save_config():
return ok_fail_command("save_config")
def disable_network(network_id):
return ok_fail_command("disable_network", str(network_id))
def initiate_scan():
return ok_fail_command("scan")
def disconnect():
return ok_fail_command("disconnect")
def reconnect():
return ok_fail_command("reconnect")
def parse_string_from_cli(ssid):
return literal_eval("'{}'".format(ssid))
def get_scan_results():
#Currently I know of no way to know if the scan results got updated since last time scan was initiated
output = process_output(wpa_cli_command("scan_results"))
#As of wpa_supplicant 2.3-1, header elements are ['bssid', 'frequency', 'signal level', 'flags', 'ssid']
networks = process_table(output[0], output[1:])
# Filtering SSIDs to allow for using Unicode SSIDs
for network in networks:
network["ssid"] = parse_string_from_cli(network["ssid"])
return networks
def add_network():
return int_fail_command("add_network")
def set_network(network_id, param_name, value):
if param_name == "ssid":
value = 'P'+value
return ok_fail_command("set_network", str(network_id), param_name, value)
def get_network(network_id, param_name):
output = wpa_cli_command("get_network", str(network_id), param_name)
value = process_output(output)[0]
if value.startswith("'") or value.startswith('"'):
value = literal_eval(value)
return value
#Helper commands
def ok_fail_command(command_name, *args):
#Wrapper around commands which return either "OK" or "FAIL"
#Might fail if the wireless dongle gets unplugged or something
output = process_output(wpa_cli_command(command_name, *[str(arg) for arg in args]))
if output[0] == "OK":
return True
else:
raise WPAException(command_name, output[0], args)
def int_fail_command(command_name, *args):
output = process_output(wpa_cli_command(command_name, *[str(arg) for arg in args]))
try:
return int(output[0])
except:
raise WPAException(command_name, output[0], args)
def process_table(header, contents):
#Takes a tab-separated table and returns a list of dicts, each dict representing a row and having column_name:value mappings
table = []
#I'm going to split the header to column names and use those for dictionary keys so that there's no need to hard-code values
column_names = [name.strip(' ') for name in header.split(' / ')]
for line in contents:
row = {}
values = line.split('\t')
for i, value in enumerate(values):
column_name = column_names[i]
row[column_name] = value
table.append(row)
return table
def process_output(output):
#First line of output of wpa_cli (almost?) always says "Selected interface: $INT"
# but only if the interface is not passed using "wpa_cli -iinterface".
lines = output.split('\n')
if not current_interface:
lines = lines[1:] #First line has the "Selected interface: $INT"
return [line.strip(' ') for line in lines if line] #Removing all whitespace and not counting empty lines
if __name__ == "__main__":
print(get_current_interface())
print(get_interfaces())
print(list_configured_networks())
print(connection_status())
print(initiate_scan())
for i in range(7):
sleep(1)
print(get_scan_results())
print(initiate_scan())
print(initiate_scan())
| 35.167382
| 128
| 0.691726
| 1,112
| 8,194
| 4.880396
| 0.234712
| 0.031509
| 0.02635
| 0.031509
| 0.264603
| 0.199558
| 0.144831
| 0.051594
| 0.036116
| 0.036116
| 0
| 0.003538
| 0.206615
| 8,194
| 232
| 129
| 35.318966
| 0.831257
| 0.228948
| 0
| 0.156627
| 0
| 0
| 0.059975
| 0
| 0
| 0
| 0
| 0.00431
| 0
| 1
| 0.180723
| false
| 0
| 0.024096
| 0.066265
| 0.391566
| 0.048193
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed3d3135e9d1178c7b64b4da6cccbf9abc64c76d
| 1,727
|
py
|
Python
|
system/lib/update_musl.py
|
RyanCargan/emscripten
|
6d3859f88e1d6394395760153c0a8cfa6a876ac7
|
[
"MIT"
] | 6,541
|
2019-01-17T22:13:18.000Z
|
2022-03-31T07:20:21.000Z
|
system/lib/update_musl.py
|
RyanCargan/emscripten
|
6d3859f88e1d6394395760153c0a8cfa6a876ac7
|
[
"MIT"
] | 7,584
|
2019-01-17T22:58:27.000Z
|
2022-03-31T23:10:22.000Z
|
system/lib/update_musl.py
|
RyanCargan/emscripten
|
6d3859f88e1d6394395760153c0a8cfa6a876ac7
|
[
"MIT"
] | 1,275
|
2019-01-19T16:18:04.000Z
|
2022-03-30T19:32:35.000Z
|
#!/usr/bin/env python3
# Copyright 2021 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""Simple script for updating musl from external git repo.
The upstream sources, along with our local changes, live at:
https://github.com/emscripten-core/musl
To update musl first make sure all changes from the emscripten repo
are present in the `emscripten` branch of the above repo. Then run
`git merge v<musl_version>` to pull in the latest musl changes from
a given musl version. Once any merge conflict are resolved those
change can then be copied back into emscripten using this script.
"""
import os
import sys
import shutil
import subprocess
script_dir = os.path.abspath(os.path.dirname(__file__))
local_src = os.path.join(script_dir, 'libc', 'musl')
exclude_dirs = (
# Top level directories we don't include
'tools', 'obj', 'lib', 'crt', 'musl', 'compat',
# Parts of src we don't build
'malloc',
# Arch-specific code we don't use
'arm', 'x32', 'sh', 'i386', 'x86_64', 'aarch64', 'riscv64',
's390x', 'mips', 'mips64', 'mipsn32', 'powerpc', 'powerpc64',
'm68k', 'microblaze', 'or1k', 'generic')
musl_dir = os.path.abspath(sys.argv[1])
def should_ignore(name):
return name in exclude_dirs or name[0] == '.'
def ignore(dirname, contents):
return [c for c in contents if should_ignore(c)]
def main():
assert os.path.exists(musl_dir)
# Remove old version
shutil.rmtree(local_src)
# Copy new version into place
shutil.copytree(musl_dir, local_src, ignore=ignore)
if __name__ == '__main__':
main()
| 28.311475
| 78
| 0.72264
| 265
| 1,727
| 4.611321
| 0.596226
| 0.02455
| 0.01473
| 0.026187
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022207
| 0.165605
| 1,727
| 60
| 79
| 28.783333
| 0.825815
| 0.525188
| 0
| 0
| 0
| 0
| 0.176617
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 1
| 0.130435
| false
| 0
| 0.173913
| 0.086957
| 0.391304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed3dd47b74e1445e656b23816bec9c93f6315a3e
| 3,358
|
py
|
Python
|
src/scout_apm/instruments/pymongo.py
|
xiamx/scout_apm_python
|
d03dab45f65cf7d1030e11fabf6da4cf6e72ee59
|
[
"MIT"
] | null | null | null |
src/scout_apm/instruments/pymongo.py
|
xiamx/scout_apm_python
|
d03dab45f65cf7d1030e11fabf6da4cf6e72ee59
|
[
"MIT"
] | null | null | null |
src/scout_apm/instruments/pymongo.py
|
xiamx/scout_apm_python
|
d03dab45f65cf7d1030e11fabf6da4cf6e72ee59
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
# Used in the exec() call below.
from scout_apm.core.monkey import monkeypatch_method, unpatch_method # noqa: F401
from scout_apm.core.tracked_request import TrackedRequest # noqa: F401
logger = logging.getLogger(__name__)
class Instrument(object):
PYMONGO_METHODS = [
"aggregate",
"bulk_write",
"count",
"create_index",
"create_indexes",
"delete_many",
"delete_one",
"distinct",
"drop",
"drop_index",
"drop_indexes",
"ensure_index",
"find_and_modify",
"find_one",
"find_one_and_delete",
"find_one_and_replace",
"find_one_and_update",
"group",
"inline_map_reduce",
"insert",
"insert_many",
"insert_one",
"map_reduce",
"reindex",
"remove",
"rename",
"replace_one",
"save",
"update",
"update_many",
"update_one",
]
def __init__(self):
self.installed = False
def installable(self):
try:
from pymongo.collection import Collection # noqa: F401
except ImportError:
logger.info("Unable to import for PyMongo instruments")
return False
if self.installed:
logger.warn("PyMongo Instruments are already installed.")
return False
return True
def install(self):
if not self.installable():
logger.info("PyMongo instruments are not installable. Skipping.")
return False
self.installed = True
try:
from pymongo.collection import Collection # noqa: F401
# There is no way the import can fail if self.installable() succeeded.
except ImportError: # pragma: no cover
logger.info(
"Unable to import for PyMongo instruments. Instrument install failed."
)
return False
for method_str in self.__class__.PYMONGO_METHODS:
try:
code_str = """
@monkeypatch_method(Collection)
def {method_str}(original, self, *args, **kwargs):
tr = TrackedRequest.instance()
name = '/'.join(['MongoDB', self.name, '{camel_name}'])
span = tr.start_span(operation=name, ignore_children=True)
span.tag('name', self.name)
try:
return original(*args, **kwargs)
finally:
tr.stop_span()
""".format(
method_str=method_str,
camel_name="".join(c.title() for c in method_str.split("_")),
)
exec(code_str)
logger.info("Instrumented PyMongo Collection.%s", method_str)
except Exception as e:
logger.warn(
"Unable to instrument for PyMongo Collection.%s: %r", method_str, e
)
return False
return True
def uninstall(self):
if not self.installed:
logger.info("PyMongo instruments are not installed. Skipping.")
return False
self.installed = False
from pymongo.collection import Collection
for method_str in self.__class__.PYMONGO_METHODS:
unpatch_method(Collection, method_str)
| 28.700855
| 87
| 0.579512
| 349
| 3,358
| 5.352436
| 0.361032
| 0.043362
| 0.01606
| 0.043362
| 0.255353
| 0.175589
| 0.139186
| 0.139186
| 0
| 0
| 0
| 0.005307
| 0.326683
| 3,358
| 116
| 88
| 28.948276
| 0.820876
| 0.047647
| 0
| 0.21875
| 0
| 0
| 0.317241
| 0.040125
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.114583
| 0
| 0.270833
| 0.010417
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed3e5d031590d8e6dff347577131d013386c5855
| 3,235
|
py
|
Python
|
measures/tests/factories.py
|
uktrade/tamato
|
4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca
|
[
"MIT"
] | 14
|
2020-03-25T11:11:29.000Z
|
2022-03-08T20:41:33.000Z
|
measures/tests/factories.py
|
uktrade/tamato
|
4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca
|
[
"MIT"
] | 352
|
2020-03-25T10:42:09.000Z
|
2022-03-30T15:32:26.000Z
|
measures/tests/factories.py
|
uktrade/tamato
|
4ba2ffb25eea2887e4e081c81da7634cd7b4f9ca
|
[
"MIT"
] | 3
|
2020-08-06T12:22:41.000Z
|
2022-01-16T11:51:12.000Z
|
import random
from typing import Optional
import factory
from common.tests import factories
from measures.sheet_importers import MeasureSheetRow
class MeasureSheetRowFactory(factory.Factory):
"""
A factory that produces a row that might be read from a sheet of measures as
recognised by the :class:`measures.sheet_importers.MeasureSheetRow`
importer.
The factory references a MeasureFactory to do the production of an actual
Measure, and then references the data produced by the MeasureFactory to
build up a row of string values.
The values are then built into a tuple in the order specified in the
`MeasureSheetRow` importer.
"""
class Meta:
model = tuple
exclude = ["measure"]
measure = factory.SubFactory(factories.MeasureFactory)
item_id = factory.SelfAttribute("measure.goods_nomenclature.item_id")
measure_type_description = factory.SelfAttribute("measure.measure_type.description")
duty_sentence = factory.sequence(lambda n: f"{n}.00%")
origin_description = factory.LazyAttribute(
lambda m: m.measure.geographical_area.get_description().description,
)
excluded_origin_descriptions = factory.LazyAttribute(
lambda m: random.choice(MeasureSheetRow.separators).join(
e.excluded_geographical_area.get_description().description
for e in m.measure.exclusions.all()
),
)
quota_order_number = factory.LazyAttribute(
lambda m: m.measure.order_number.order_number
if m.measure.order_number
else m.measure.dead_order_number,
)
additional_code_id = factory.LazyAttribute(
lambda m: m.measure.additional_code.type.sid + m.measure.additional_code.code
if m.measure.additional_code
else m.measure.dead_additional_code,
)
validity_start_date = factory.SelfAttribute("measure.valid_between.lower")
validity_end_date = factory.SelfAttribute("measure.valid_between.upper")
regulation_id = factory.SelfAttribute("measure.generating_regulation.regulation_id")
footnote_ids = factory.LazyAttribute(
lambda m: random.choice(MeasureSheetRow.separators).join(
f.footnote_type.footnote_type_id + f.footnote_id
for f in m.measure.footnotes.all()
),
)
@factory.lazy_attribute
def conditions(self) -> Optional[str]:
"""Returns a string that can be parsed by the
:class:`measures.parsers.ConditionSentenceParser`."""
if not self.measure.conditions.exists():
return None
parts = []
for c in self.measure.conditions.all():
part = []
part.append(c.condition_code.code)
if c.required_certificate:
part.append("cert:")
part.append(
f"{c.required_certificate.certificate_type.sid}-{c.required_certificate.sid}",
)
part.append(f"({c.action.code}):")
parts.append(" ".join(part))
return f"Cond: {'; '.join(parts)}"
@classmethod
def _create(cls, model_class, *args, **kwargs):
data = [kwargs[k] for k in MeasureSheetRow.columns]
return super()._create(model_class, data)
| 37.616279
| 98
| 0.681607
| 379
| 3,235
| 5.672823
| 0.364116
| 0.037209
| 0.062791
| 0.062791
| 0.190233
| 0.152093
| 0.063256
| 0.063256
| 0.063256
| 0
| 0
| 0.000799
| 0.226584
| 3,235
| 85
| 99
| 38.058824
| 0.858513
| 0.162597
| 0
| 0.065574
| 0
| 0
| 0.112745
| 0.089367
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032787
| false
| 0
| 0.081967
| 0
| 0.393443
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed3f22bc9cd6901491dd74a1964aa19636812550
| 581
|
py
|
Python
|
tools/test_net.py
|
by-liu/SegLossBia
|
9cc639c04084cda9d5fb20ea34699db7e0beaf5c
|
[
"MIT"
] | 18
|
2021-04-20T17:03:20.000Z
|
2022-03-12T05:56:24.000Z
|
tools/test_net.py
|
by-liu/SegLossBia
|
9cc639c04084cda9d5fb20ea34699db7e0beaf5c
|
[
"MIT"
] | null | null | null |
tools/test_net.py
|
by-liu/SegLossBia
|
9cc639c04084cda9d5fb20ea34699db7e0beaf5c
|
[
"MIT"
] | 1
|
2021-07-08T17:44:15.000Z
|
2021-07-08T17:44:15.000Z
|
import sys
import logging
from seglossbias.utils import mkdir, setup_logging
from seglossbias.engine import default_argument_parser, load_config, DefaultTester
logger = logging.getLogger(__name__)
def setup(args):
cfg = load_config(args)
mkdir(cfg.OUTPUT_DIR)
setup_logging(output_dir=cfg.OUTPUT_DIR)
return cfg
def main():
args = default_argument_parser().parse_args()
cfg = setup(args)
logger.info("Launch command : ")
logger.info(" ".join(sys.argv))
tester = DefaultTester(cfg)
tester.test()
if __name__ == "__main__":
main()
| 20.75
| 82
| 0.717728
| 74
| 581
| 5.310811
| 0.459459
| 0.068702
| 0.111959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175559
| 581
| 27
| 83
| 21.518519
| 0.820459
| 0
| 0
| 0
| 0
| 0
| 0.04475
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.210526
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed402a46ff5fc00fad7e1376b1ab297b4013340b
| 7,808
|
py
|
Python
|
jp_doodle/quantity_forest.py
|
lingruiluo/jp_doodle
|
b3935208821898f22ab504c2b26dd4d37f08f0e4
|
[
"BSD-2-Clause"
] | 43
|
2018-10-10T08:38:07.000Z
|
2022-03-19T22:44:42.000Z
|
jp_doodle/quantity_forest.py
|
firmfrol19/jp_doodle
|
cb34938edaedbe15590ebe8024060dd97bb69fa9
|
[
"BSD-2-Clause"
] | 8
|
2018-09-17T19:49:45.000Z
|
2020-08-24T15:51:16.000Z
|
jp_doodle/quantity_forest.py
|
firmfrol19/jp_doodle
|
cb34938edaedbe15590ebe8024060dd97bb69fa9
|
[
"BSD-2-Clause"
] | 5
|
2019-06-13T15:53:55.000Z
|
2020-11-13T01:22:56.000Z
|
from jp_doodle import doodle_files
qf_js = doodle_files.vendor_path("js/quantity_forest.js")
from jp_doodle import dual_canvas
import jp_proxy_widget
import os
from subprocess import check_output
import pprint
if bytes != str:
unicode = str
def directory_usage(directory, epsilon=0.02):
if not os.path.isdir(directory):
return None
ls = os.listdir(directory)
result = {}
total = 0.0
for fn in ls:
path = os.path.join(directory, fn)
try:
usage = check_output(["du", "-s", path])
except Exception:
pass
else:
usage = unicode(usage, "utf8") # py 3
[snum, sname] = usage.strip().split("\t")
num = float(snum)
total += num
result[fn] = (path, num)
final = {}
other = 0
for fn in result:
(path, num) = result[fn]
portion = num/total
if portion < epsilon:
other += num
else:
final[fn] = {"name": fn, "file_size": num, "percent": portion*100, "id": path}
if other>epsilon:
final["*other"] = {"name": "*other", "file_size": other, "percent": other*100/total, "id": "*" + directory}
return final
RIGHT = {"x": 1, "y":0}
UP = {"x": 0, "y":1}
class FileSystemExplorer:
color_counter = 333
opacity = 0.5
def __init__(self, canvas_widget, path, width=600, enable_deletions=False,
horizontal=False, x_vector=None, y_vector=None,
dy=50, dh=20, epsilon=0.02, degrees=15, font="normal 10px Arial",
background="rgba(244,230,255,0.8)", opacity=0.7,
clearHeight=300,
):
self.opacity = opacity
if y_vector is None:
y_vector = UP
if horizontal:
y_vector = RIGHT
if x_vector is None:
x_vector = RIGHT
if horizontal:
x_vector = UP
self.epsilon = epsilon
self.enable_deletions = enable_deletions
path = os.path.expanduser(path)
path = os.path.abspath(path)
self.color_cache = {}
self.usage_cache = {}
self.id_to_data = {}
self.expanded = {}
self.widget = canvas_widget
self.path = path
members = self.directory_members(path)
self.widget = canvas_widget
canvas_widget.load_js_files([qf_js])
canvas_widget.js_init("""
var forest_config = {
top_label: top_label,
roots: members,
width: width,
dy: dy,
dh: dh,
id_click: id_click,
degrees: degrees,
background: background,
x_vector: x_vector,
y_vector: y_vector,
font: font,
clearHeight: clearHeight,
}
element.quantity_forest(forest_config);
element.detail = $("<div>Initialized</div>").appendTo(element);
element.show_detail = function(identity, info) {
var d = element.detail
d.html("<div/>");
for (key in info) {
$("<div>" + key + " : " + info[key] + "<div>").appendTo(d);
}
if (!identity.startsWith("*")) {
var deleter = $("<a>delete " + identity + "</a>").appendTo(d);
deleter.on("click", function() { delete_id(identity); });
}
};
""",
width=width,
members=members,
dy=dy, dh=dh,
id_click=self.id_click,
top_label=path,
delete_id=self.delete_id,
degrees=degrees,
x_vector=x_vector,
y_vector=y_vector,
font=font,
background=background,
clearHeight=clearHeight,
)
if enable_deletions:
self.widget.element.detail.html("<div>DELETIONS ARE ENABLED!</div>");
def directory_usage(self, directory):
cache = self.usage_cache
if directory in cache:
return cache[directory]
usage = directory_usage(directory, self.epsilon)
cache[directory] = usage
if not usage:
return usage
for u in usage.values():
u["parent"] = directory
self.id_to_data[u["id"]] = u
return usage
def get_color(self, identity):
cache = self.color_cache
if identity in cache:
return cache[identity]
result = cache[identity] = self.pick_color()
return result
def pick_color(self):
self.color_counter += 1
counter = self.color_counter
rgb = [0, 0, 0]
for i in range(8):
for j in range(3):
rgb[j] = (rgb[j] << 1) | (counter & 1)
counter = (counter >> 1)
# darken
for i in range(3):
rgb[i] = (rgb[i] * 200) // 255
return "rgba(%s,%s,%s,%s)" % (tuple(rgb) + (self.opacity,))
def delete_id(self, identity):
try:
self.widget.element.css("cursor", "wait")
self.widget.element.detail.html("<div>attempting delete...</div>")
self.delete_id1(identity)
finally:
self.widget.element.css("cursor", "default")
def delete_id1(self, identity):
if self.enable_deletions:
# for simplicity for now just clear the usage cache
self.usage_cache = {}
cmd = ["rm", "-rf", identity]
self.widget.element["print"](repr(cmd))
#w.element.css("cursor", "wait")
try:
#try:
checked = check_output(cmd)
#finally:
#w.element.css("cursor", "default")
except Exception as e:
self.widget.element.detail.html("<div>delete " + repr((identity, e)) + " failed</div>");
else:
roots = self.directory_members(self.path)
#pprint.pprint(roots)
self.widget.element.reset_roots(roots)
self.widget.element.detail.html("<div>" + repr(identity) + " deleted</div>");
else:
self.widget.element.detail.html("<div>delete " + repr(identity) + " disabled</div>");
def id_click(self, identity):
try:
self.widget.element.css("cursor", "wait")
self.widget.element.detail.html("<div>click...</div>")
self.expanded[identity] = not self.expanded.get(identity, False)
roots = self.directory_members(self.path)
#pprint.pprint(roots)
self.widget.element.reset_roots(roots)
#self.widget.element.detail.html("<div>expand " + repr(identity) + "</div>");
self.widget.element.show_detail(identity, self.id_to_data[identity])
finally:
self.widget.element.css("cursor", "default")
def directory_members(self, directory):
self.expanded[directory] = True
usage = self.directory_usage(directory)
if not usage:
return []
result = []
sorter = [(u["percent"], u["name"]) for u in usage.values()]
for (pct, filename) in reversed(sorted(sorter)):
u = usage[filename]
identity = u["id"]
expanded = self.expanded.get(identity, False)
children = None
if expanded:
children = self.directory_members(identity)
r = {
"id": identity,
"label": u["name"],
"size": u["file_size"],
"children": children,
"expanded": expanded,
"color": self.get_color(identity),
}
result.append(r)
return result
| 34.702222
| 115
| 0.523309
| 858
| 7,808
| 4.648019
| 0.22028
| 0.042628
| 0.063942
| 0.040371
| 0.199097
| 0.17653
| 0.161484
| 0.161484
| 0.161484
| 0.111836
| 0
| 0.014018
| 0.351306
| 7,808
| 224
| 116
| 34.857143
| 0.773347
| 0.032531
| 0
| 0.149254
| 0
| 0.004975
| 0.201511
| 0.0232
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044776
| false
| 0.004975
| 0.029851
| 0
| 0.139303
| 0.00995
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ed40391b29c9dd84fbde6dce8cec5a7ba2c96f0b
| 3,139
|
py
|
Python
|
LeetCode-All-Solution/Python3/LC-0035-Search-Insert-Position.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
LeetCode-All-Solution/Python3/LC-0035-Search-Insert-Position.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
LeetCode-All-Solution/Python3/LC-0035-Search-Insert-Position.py
|
YuweiYin/Algorithm_YuweiYin
|
28648fac59c5a4e3c907978cbd1b3e662ba18fd5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0035-Search-Insert-Position.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-01-01
=================================================================="""
import sys
import time
from typing import List
"""
LeetCode - 0035 - (Easy) - Search Insert Position
https://leetcode.com/problems/search-insert-position/
Description:
Given a sorted array of distinct integers and a target value, return the index if the target is found.
If not, return the index where it would be if it were inserted in order.
Requirement:
You must write an algorithm with O(log n) runtime complexity.
Example 1:
Input: nums = [1,3,5,6], target = 5
Output: 2
Example 2:
Input: nums = [1,3,5,6], target = 2
Output: 1
Example 3:
Input: nums = [1,3,5,6], target = 7
Output: 4
Constraints:
1 <= nums.length <= 10^4
-10^4 <= nums[i] <= 10^4
nums contains distinct values sorted in ascending order.
-10^4 <= target <= 10^4
"""
class Solution:
def searchInsert(self, nums: List[int], target: int) -> int:
# exception case
if not isinstance(nums, list) or len(nums) == 0:
return 0
# main method: (loop) binary search of sorted list
return self._searchInsert(nums, target)
def _searchInsert(self, nums: List[int], target: int) -> int:
start_index, end_index = 0, len(nums) - 1
insert_index = 0
while start_index <= end_index:
cur_index = (end_index + start_index) >> 1 # current cursor
cur_num = nums[cur_index] # cache variable
if start_index == end_index: # border case: must decide the insert position now
return start_index if (target <= cur_num) else (start_index + 1)
if cur_num == target: # 1. hit the target
return cur_index
elif cur_num < target: # 2. go right
start_index = cur_index + 1 # change interval
insert_index = start_index # adjust the possible insert index
else: # 3. go left
end_index = cur_index - 1 # change interval
insert_index = cur_index # adjust the possible insert index
return insert_index
def main():
# Example 1: Output: 2
# nums = [1, 3, 5, 6]
# target = 5
# Example 2: Output: 1
# nums = [1, 3, 5, 6]
# target = 2
# Example 3: Output: 4
# nums = [1,3,5,6]
# target = 7
# Example 4: Output: 0
# nums = [1, 3, 5, 6]
# target = 0
# Example 5: Output: 0
nums = [1, 3]
target = 0
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.searchInsert(nums, target)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| 28.798165
| 107
| 0.572794
| 416
| 3,139
| 4.225962
| 0.346154
| 0.025597
| 0.027304
| 0.027873
| 0.20876
| 0.197383
| 0.151877
| 0.09215
| 0.047782
| 0
| 0
| 0.046256
| 0.27684
| 3,139
| 108
| 108
| 29.064815
| 0.728194
| 0.292768
| 0
| 0
| 0
| 0
| 0.026444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.081081
| 0
| 0.324324
| 0.081081
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|