hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8dd4b41483a626f400f453b5b8e1334969260b41 | 649 | py | Python | scripts/partition.py | Ho011/learning_python | 7117c0009f13675c65e23f3650b44dc0bce6a657 | [
"MIT"
] | null | null | null | scripts/partition.py | Ho011/learning_python | 7117c0009f13675c65e23f3650b44dc0bce6a657 | [
"MIT"
] | null | null | null | scripts/partition.py | Ho011/learning_python | 7117c0009f13675c65e23f3650b44dc0bce6a657 | [
"MIT"
] | null | null | null | from typing import (
Callable,
Any,
Iterable,
Tuple,
List,
)
def partition(predicate: Callable, values: Iterable[Any])-> Tuple[List[Any], List[Any]]:
"""
Split the values into two sets, based on the return value of the function
(True/False). e.g.:
>>> partition(lambda x: x > 3, range(5))
[0, 1, 2, 3], [4]
"""
results = ([], [])
for item in values:
results[predicate(item)].append(item)
return results
if __name__ == '__main__':
nums = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
even_nums, odd_nums = partition(lambda x: x & 1, nums)
print(even_nums)
print(odd_nums)
| 23.178571 | 88 | 0.57319 |
3fae8bb9f5d71dd8ba765f37a034e4a0699f073c | 3,045 | py | Python | python_modules/dagster/dagster/core/snap/dagster_types.py | rpatil524/dagster | 6f918d94cbd543ab752ab484a65e3a40fd441716 | [
"Apache-2.0"
] | 1 | 2021-01-31T19:16:29.000Z | 2021-01-31T19:16:29.000Z | python_modules/dagster/dagster/core/snap/dagster_types.py | rpatil524/dagster | 6f918d94cbd543ab752ab484a65e3a40fd441716 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster/dagster/core/snap/dagster_types.py | rpatil524/dagster | 6f918d94cbd543ab752ab484a65e3a40fd441716 | [
"Apache-2.0"
] | 1 | 2021-12-08T18:13:19.000Z | 2021-12-08T18:13:19.000Z | from collections import namedtuple
from dagster import check
from dagster.core.definitions.pipeline_definition import PipelineDefinition
from dagster.core.types.dagster_type import DagsterType, DagsterTypeKind
from dagster.serdes import whitelist_for_serdes
def build_dagster_type_namespace_snapshot(pipeline_def):
check.inst_param(pipeline_def, "pipeline_def", PipelineDefinition)
return DagsterTypeNamespaceSnapshot(
{dt.key: build_dagster_type_snap(dt) for dt in pipeline_def.all_dagster_types()}
)
def build_dagster_type_snap(dagster_type):
check.inst_param(dagster_type, "dagster_type", DagsterType)
return DagsterTypeSnap(
kind=dagster_type.kind,
key=dagster_type.key,
name=dagster_type.unique_name if dagster_type.has_unique_name else None,
display_name=dagster_type.display_name,
description=dagster_type.description,
is_builtin=dagster_type.is_builtin,
type_param_keys=dagster_type.type_param_keys,
loader_schema_key=dagster_type.loader_schema_key,
materializer_schema_key=dagster_type.materializer_schema_key,
)
@whitelist_for_serdes
class DagsterTypeNamespaceSnapshot(
namedtuple("_DagsterTypeNamespaceSnapshot", "all_dagster_type_snaps_by_key")
):
def __new__(cls, all_dagster_type_snaps_by_key):
return super(DagsterTypeNamespaceSnapshot, cls).__new__(
cls,
all_dagster_type_snaps_by_key=check.dict_param(
all_dagster_type_snaps_by_key,
"all_dagster_type_snaps_by_key",
key_type=str,
value_type=DagsterTypeSnap,
),
)
def get_dagster_type_snap(self, key):
check.str_param(key, "key")
return self.all_dagster_type_snaps_by_key[key]
@whitelist_for_serdes
class DagsterTypeSnap(
namedtuple(
"_DagsterTypeSnap",
"kind key name description display_name is_builtin type_param_keys "
"loader_schema_key materializer_schema_key ",
)
):
def __new__(
cls,
kind,
key,
name,
description,
display_name,
is_builtin,
type_param_keys,
loader_schema_key=None,
materializer_schema_key=None,
):
return super(DagsterTypeSnap, cls).__new__(
cls,
kind=check.inst_param(kind, "kind", DagsterTypeKind),
key=check.str_param(key, "key"),
name=check.opt_str_param(name, "name"),
display_name=check.str_param(display_name, "display_name"),
description=check.opt_str_param(description, "description"),
is_builtin=check.bool_param(is_builtin, "is_builtin"),
type_param_keys=check.list_param(type_param_keys, "type_param_keys", of_type=str),
loader_schema_key=check.opt_str_param(loader_schema_key, "loader_schema_key"),
materializer_schema_key=check.opt_str_param(
materializer_schema_key, "materializer_schema_key"
),
)
| 35.823529 | 94 | 0.700821 |
56372afe02986dd0191d4824081238859364e046 | 31 | py | Python | ecs/components/teleportscroll.py | joehowells/7drl2020 | ec92c0870fb9ee975530d6a92c1b96634040ebc4 | [
"MIT"
] | null | null | null | ecs/components/teleportscroll.py | joehowells/7drl2020 | ec92c0870fb9ee975530d6a92c1b96634040ebc4 | [
"MIT"
] | 2 | 2020-03-25T10:30:31.000Z | 2020-03-25T20:13:43.000Z | ecs/components/teleportscroll.py | joehowells/two-button-berserker | ec92c0870fb9ee975530d6a92c1b96634040ebc4 | [
"MIT"
] | null | null | null | class TeleportScroll:
pass
| 10.333333 | 21 | 0.741935 |
3c83b01b284c6e208fc6f436a38e87deb9f2f283 | 588 | py | Python | homeassistant/components/sql/__init__.py | davyike/core | 13cc7583ed5c7de43c56b43db8fdc9879a853666 | [
"Apache-2.0"
] | 3 | 2019-10-02T04:40:26.000Z | 2020-02-16T13:19:08.000Z | homeassistant/components/sql/__init__.py | davyike/core | 13cc7583ed5c7de43c56b43db8fdc9879a853666 | [
"Apache-2.0"
] | 18 | 2021-11-03T06:21:27.000Z | 2022-03-31T06:20:57.000Z | homeassistant/components/sql/__init__.py | davyike/core | 13cc7583ed5c7de43c56b43db8fdc9879a853666 | [
"Apache-2.0"
] | null | null | null | """The sql component."""
from __future__ import annotations
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .const import PLATFORMS
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up SQL from a config entry."""
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload SQL config entry."""
return await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
| 26.727273 | 78 | 0.77381 |
591da389da0abc51e6378a44dc427da1718ad387 | 1,036 | py | Python | api/predict.py | devSessions/crvi | 1ecc68d6c968294bcc5ceea747604ee237f6080c | [
"MIT"
] | 25 | 2017-12-31T06:51:54.000Z | 2021-11-17T11:29:30.000Z | api/predict.py | amittomar-1/crvi | 1ecc68d6c968294bcc5ceea747604ee237f6080c | [
"MIT"
] | 23 | 2020-01-28T21:34:12.000Z | 2022-03-11T23:11:54.000Z | api/predict.py | amittomar-1/crvi | 1ecc68d6c968294bcc5ceea747604ee237f6080c | [
"MIT"
] | 11 | 2018-01-04T12:30:33.000Z | 2020-12-01T18:08:59.000Z | import os
import numpy as np
import keras
from PIL import Image
# import requests
# from io import BytesIO
class predict(object):
def __init__(self,url):
self.url=url
def predict_only(self):
# Load and resize the image using PIL.
# response = requests.get(self.url)
# img = Image.open(BytesIO(response.content))
img = Image.open(self.url)
# Set Input Shape
input_shape = (224, 224)
# img = PIL.Image.open(image_path)
img_resized = img.resize(input_shape, Image.LANCZOS)
# Convert the PIL image to a numpy-array with the proper shape.
img_array = np.expand_dims(np.array(img_resized), axis=0)
# Load Model from File
model = keras.models.load_model('finetune_18.model')
# Make Predictions
pred = model.predict_classes(img_array)
if (pred[0] == 0 ):
#print("Rs.10")
return ("Rs.10")
elif (pred[0] == 1):
#print("Rs.20")
return ("Rs.20")
| 25.268293 | 71 | 0.591699 |
3da28692d01340096fb2f69dd9d500a0ba43377b | 1,569 | py | Python | crawling/models.py | difara/wmss_new_repo | 7f6f81340161d2dcf028761cc47bfa13b28227c6 | [
"MIT"
] | 1 | 2021-01-04T17:06:02.000Z | 2021-01-04T17:06:02.000Z | crawling/models.py | difara/wmss_new_repo | 7f6f81340161d2dcf028761cc47bfa13b28227c6 | [
"MIT"
] | null | null | null | crawling/models.py | difara/wmss_new_repo | 7f6f81340161d2dcf028761cc47bfa13b28227c6 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Pencarian_Keyword(models.Model):
url = models.CharField(max_length = 100)
keyword = models.CharField(max_length = 100)
def __str__(self):
return self.url
class Hasil_Pencarian_Keyword(models.Model):
word_result = models.CharField(max_length = 1000)
def __str__(self):
return self.word_result
class Tabel_Berita(models.Model):
id_berita = models.AutoField(primary_key=True)
judul_berita = models.CharField(max_length = 500)
konten_berita = models.TextField(null = True)
user_id = models.ForeignKey(User, null=True, unique=False)
def __str__(self):
return self.judul_berita
class Kalimat(models.Model):
berita = models.ForeignKey(Tabel_Berita, on_delete=models.SET_NULL, null=True)
kalimat = models.TextField()
tipe = models.CharField(max_length=10, null=True)
clean = models.TextField()
f2 = models.DecimalField(max_digits=5, decimal_places=4)
f4 = models.DecimalField(max_digits=5, decimal_places=4)
f5 = models.DecimalField(max_digits=5, decimal_places=4)
accepted = models.BooleanField(default=False)
index_kalimat = models.IntegerField()
class Preproses(models.Model):
berita = models.ForeignKey(Tabel_Berita, on_delete=models.SET_NULL, null=True)
hasil_proses = models.TextField()
user_id = models.ForeignKey(User, null=True, unique=False) | 36.488372 | 82 | 0.737412 |
bbc917a979304d0ea7f1b7fa415b1ba44d84837d | 983 | py | Python | Scripts/simulation/server_commands/royalty_commands.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/server_commands/royalty_commands.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/simulation/server_commands/royalty_commands.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\server_commands\royalty_commands.py
# Compiled at: 2014-06-24 23:41:50
# Size of source mod 2**32: 843 bytes
from server_commands.argument_helpers import OptionalTargetParam, get_optional_target
import sims4.commands
@sims4.commands.Command('royalty.give_royalties')
def give_royalties(opt_sim: OptionalTargetParam=None, _connection=None):
sim = get_optional_target(opt_sim, _connection)
if sim is None:
sims4.commands.output('Target Sim could not be found.', _connection)
return False
royalty_tracker = sim.sim_info.royalty_tracker
if royalty_tracker is None:
sims4.commands.output('Royalty Tracker not found for Sim.', _connection)
return False
royalty_tracker.update_royalties_and_get_paid()
return True | 46.809524 | 107 | 0.757884 |
a4c436bc3f5f812e6c75c8e9588587e124fab485 | 659 | py | Python | parser/team27/G-27/execution/symbol/error.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | parser/team27/G-27/execution/symbol/error.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | parser/team27/G-27/execution/symbol/error.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | class T_error():
def __init__(self, tipo, token, descripcion, row, column):
self.tipo = tipo
self.token = token
self.description = descripcion
self.row = row
self.column = column
def toString(self):
valor = self.token
if not isinstance(self.token, str):
valor = str(valor)
return "Tipo: " + self.tipo + " Token: " + valor + " Descripcion: " + self.description + " Linea: " + self.row + " Columna: " + self.column
def getTipo(self):
return self.tipo
def getLinea(self):
return self.row
def getDescripcion(self):
return self.description | 29.954545 | 148 | 0.582701 |
fd27c5f6c427fbd8c6eb5c7d6d6edde9118b2c19 | 4,716 | py | Python | kite-exp/telemetry-analysis/completions-metrics/analysis/data.py | kiteco/kiteco-public | 74aaf5b9b0592153b92f7ed982d65e15eea885e3 | [
"BSD-3-Clause"
] | 17 | 2022-01-10T11:01:50.000Z | 2022-03-25T03:21:08.000Z | kite-exp/telemetry-analysis/completions-metrics/analysis/data.py | kiteco/kiteco-public | 74aaf5b9b0592153b92f7ed982d65e15eea885e3 | [
"BSD-3-Clause"
] | 1 | 2022-01-13T14:28:47.000Z | 2022-01-13T14:28:47.000Z | kite-exp/telemetry-analysis/completions-metrics/analysis/data.py | kiteco/kiteco-public | 74aaf5b9b0592153b92f7ed982d65e15eea885e3 | [
"BSD-3-Clause"
] | 7 | 2022-01-07T03:58:10.000Z | 2022-03-24T07:38:20.000Z | from typing import Callable, Dict
import datetime
from enum import Enum
import json
import numpy as np
import pandas as pd
import tqdm
def read_logs(filename: str) -> pd.DataFrame:
"""
Read completions metrics from a JSON file (as produced by digest-comp-logs)
:return: A DataFrame where each row represents the completion metrics for one kite_status event for a given user.
The index is the timestamp of the event.
"""
with open(filename, 'r') as f:
lines = f.readlines()
records = []
for line in tqdm.tqdm(lines):
records.append(_process_record(json.loads(line)))
df = pd.io.json.json_normalize(records)
df['timestamp'] = pd.to_datetime(df['timestamp'])
df = df.set_index('timestamp')
return df
def _process_record(rec: Dict) -> Dict:
out = {
'total_events': 1,
'user_id': rec['user_id'],
'timestamp': rec['timestamp'],
'client_version': rec['client_version'],
'shown': rec['completions_shown'],
'at_least_one_shown': rec['completions_at_least_one_shown'],
'at_least_one_new_shown': rec['completions_at_least_one_new_shown'],
'triggered': rec['completions_triggered'],
'requested': rec['completions_requested'],
'requested_expected': rec['completions_requested_expected'],
'requested_unexpected': rec['completions_requested_unexpected'],
'requested_error': rec['completions_requested_error'],
}
for breakdown in ["selected", "selected_2", "completed", "at_least_one_shown"]:
mtac_count = 0
total_count = 0
for typ in ["attribute_model", "traditional", "call_model", "keyword_model", "expr_model"]:
bd = rec[f"completions_{breakdown}_by_source"]
if not bd:
bd = {}
count = bd.get(typ, 0)
total_count += count
if typ in ("call_model", "expr_model"):
mtac_count += count
out[f"{breakdown}_{typ}"] = count
out[f"{breakdown}_mtac"] = mtac_count
out[f"{breakdown}_num"] = total_count
return out
class AggType(Enum):
MEAN = 'mean'
MEDIAN = 'median'
def limit_to_weekdays(df: pd.DataFrame) -> pd.DataFrame:
return df[~df.index.weekday.isin((5, 6))]
def daily_user_agg(
df: pd.DataFrame,
metric_fn: Callable[[pd.DataFrame], pd.Series],
agg_type: AggType,
min_user_events: int = 10) -> pd.Series:
"""
Produces a daily timeseries of the given metric, aggregated for each day by user. Only weekdays are included.
:param df: the completions metrics DataFrame, as returned by read_logs()
:param metric_fn: a function mapping the completions dataframe to a series representing the desired metric
e.g. lambda df: df.at_least_one_shown
:param agg_type: how to aggregate the users
:param min_user_events: for each day, limit the aggregation to those users that had at least this many events
in that day
"""
daily = limit_to_weekdays(df.resample('D').sum())
days = list(daily.index)
r: Dict[pd.Timestamp, float] = {}
for day in days:
day_end = day + datetime.timedelta(days=1)
# get all events for the given day
for_day = df[(df.index >= day) & (df.index < day_end)]
# limit the events to those coming from users that had at least user_n_events in that day
counts_by_user = for_day.groupby(['user_id']).size()
engaged_users = set(counts_by_user[counts_by_user >= min_user_events].index)
for_day = for_day[for_day.user_id.isin(engaged_users)]
# add up stats by day by user
by_date_user = for_day.groupby([pd.Grouper(freq='D'), 'user_id']).sum()
# select the series representing the metric we're interested in
metric_series = metric_fn(by_date_user)
# remove inf values (which may have resulted from division by zero)
metric_series = metric_series.replace([np.inf, -np.inf], np.nan).dropna()
# for each day, group the metric for all users by the desired aggregation function
grouped = metric_series.groupby(level=['timestamp'])
if agg_type == AggType.MEAN:
user_agg = grouped.mean()
elif agg_type == AggType.MEDIAN:
user_agg = grouped.median()
else:
raise ValueError("unknown agg type: {}".format(agg_type))
# at this point we should have a series that has either one value for the given day (or zero if there are
# no users)
assert len(user_agg) in {0, 1}
r[day] = user_agg.sum()
# throw away the last day since it may be incomplete
return pd.Series(r)[:-1]
| 34.933333 | 117 | 0.647159 |
f6fc73ca0e585da7e83fb6a9655e5f2007dde37c | 3,660 | py | Python | src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py | dctelus/transformers | 6786cbc4b14ebff0ac59c768cadd109391db9a08 | [
"Apache-2.0"
] | 8,028 | 2018-11-05T15:19:44.000Z | 2019-07-16T09:14:59.000Z | src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py | arron1227/transformers | b18dfd95e1f60ae65a959a7b255fc06522170d1b | [
"Apache-2.0"
] | 731 | 2018-11-05T21:35:52.000Z | 2019-07-16T09:51:26.000Z | src/transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py | arron1227/transformers | b18dfd95e1f60ae65a959a7b255fc06522170d1b | [
"Apache-2.0"
] | 2,106 | 2018-11-05T15:29:15.000Z | 2019-07-16T08:51:57.000Z | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BERT checkpoint."""
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
GLUE_TASKS_NUM_LABELS = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def convert_xlnet_checkpoint_to_pytorch(
tf_checkpoint_path, bert_config_file, pytorch_dump_folder_path, finetuning_task=None
):
# Initialise PyTorch model
config = XLNetConfig.from_json_file(bert_config_file)
finetuning_task = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}")
config.finetuning_task = finetuning_task
config.num_labels = GLUE_TASKS_NUM_LABELS[finetuning_task]
model = XLNetForSequenceClassification(config)
elif "squad" in finetuning_task:
config.finetuning_task = finetuning_task
model = XLNetForQuestionAnswering(config)
else:
model = XLNetLMHeadModel(config)
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(model, config, tf_checkpoint_path)
# Save pytorch-model
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
print(f"Save PyTorch model to {os.path.abspath(pytorch_weights_dump_path)}")
torch.save(model.state_dict(), pytorch_weights_dump_path)
print(f"Save configuration file to {os.path.abspath(pytorch_config_dump_path)}")
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
args = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 32.389381 | 117 | 0.714481 |
409a959aadfdb74670716e8b305762ccfcef599a | 14,622 | py | Python | src/blade/command_args.py | kerr-huang/blade | a98f35a8bbed13a14213c7c696485b742a13e7aa | [
"BSD-3-Clause"
] | null | null | null | src/blade/command_args.py | kerr-huang/blade | a98f35a8bbed13a14213c7c696485b742a13e7aa | [
"BSD-3-Clause"
] | null | null | null | src/blade/command_args.py | kerr-huang/blade | a98f35a8bbed13a14213c7c696485b742a13e7aa | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Chong peng <michaelpeng@tencent.com>
# Date: October 20, 2011
"""
This is the CmdOptions module which parses the users'
input and provides hint for users.
"""
import os
import platform
import shlex
import console
from argparse import ArgumentParser
class CmdArguments(object):
"""CmdArguments
Parses user's input and provides hint.
blade {command} [options] targets
"""
def __init__(self):
"""Init the class. """
(self.options, others) = self._cmd_parse()
# If '--' in arguments, use all other arguments after it as run
# arguments
if '--' in others:
pos = others.index('--')
self.targets = others[:pos]
self.options.args = others[pos + 1:]
else:
self.targets = others
self.options.args = []
for t in self.targets:
if t.startswith('-'):
console.error_exit('unregconized option %s, use blade [action] '
'--help to get all the options' % t)
command = self.options.command
# Check the options with different sub command
actions = {
'build': self._check_build_command,
'run': self._check_run_command,
'test': self._check_test_command,
'clean': self._check_clean_command,
'query': self._check_query_command
}
actions[command]()
def _check_run_targets(self):
"""check that run command should have only one target. """
err = False
targets = []
if len(self.targets) == 0:
err = True
elif self.targets[0].find(':') == -1:
err = True
if err:
console.error_exit('Please specify a single target to run: '
'blade run //target_path:target_name (or '
'a_path:target_name)')
if self.options.command == 'run' and len(self.targets) > 1:
console.warning('run command will only take one target to build and run')
if self.targets[0].startswith('//'):
targets.append(self.targets[0][2:])
else:
targets.append(self.targets[0])
self.targets = targets
if self.options.runargs:
console.warning('--runargs has been deprecated, please put all run'
' arguments after a "--"')
self.options.args = shlex.split(self.options.runargs) + self.options.args
def _check_test_options(self):
"""check that test command options. """
if self.options.testargs:
console.warning('--testargs has been deprecated, please put all test'
' arguments after a "--" ')
self.options.args = shlex.split(self.options.testargs) + self.options.args
def _check_query_targets(self):
"""check query targets, should have a leaset one target. """
err = False
targets = []
if len(self.targets) == 0:
err = True
for target in self.targets:
if target.find(':') == -1:
err = True
break
if target.startswith('//'):
targets.append(target[2:])
else:
targets.append(target)
if err:
console.error_exit('Please specify targets in this way: blade query'
' //target_path:target_name (or path:target_name)')
self.targets = targets
def _check_plat_and_profile_options(self):
"""check platform and profile options. """
if (self.options.profile != 'debug' and
self.options.profile != 'release'):
console.error_exit('--profile must be "debug" or "release".')
if self.options.m is None:
self.options.m = self._arch_bits()
else:
if not (self.options.m == '32' or self.options.m == '64'):
console.error_exit("--m must be '32' or '64'")
# TODO(phongchen): cross compile checking
if self.options.m == '64' and platform.machine() != 'x86_64':
console.error_exit('Sorry, 64-bit environment is required for '
'building 64-bit targets.')
def _check_color_options(self):
"""check color options. """
if self.options.color == 'yes':
console.color_enabled = True
elif self.options.color == 'no':
console.color_enabled = False
elif self.options.color == 'auto' or self.options.color is None:
pass
else:
console.error_exit('--color can only be yes, no or auto.')
def _check_clean_options(self):
"""check the clean options. """
self._check_plat_and_profile_options()
self._check_color_options()
def _check_query_options(self):
"""check query action options. """
if not self.options.deps and not self.options.depended:
console.error_exit('please specify --deps, --depended or both to '
'query target')
def _check_build_options(self):
"""check the building options. """
self._check_plat_and_profile_options()
self._check_color_options()
if self.options.cache_dir is None:
self.options.cache_dir = os.environ.get('BLADE_CACHE_DIR')
if self.options.cache_dir:
self.options.cache_dir = os.path.expanduser(self.options.cache_dir)
if self.options.cache_size is None:
self.options.cache_size = os.environ.get('BLADE_CACHE_SIZE')
if self.options.cache_size == 'unlimited':
self.options.cache_size = -1
if self.options.cache_size is None:
self.options.cache_size = 2 * 1024 * 1024 * 1024
else:
self.options.cache_size = int(self.options.cache_size) * 1024 * 1024 * 1024
def _check_build_command(self):
"""check build options. """
self._check_build_options()
def _check_run_command(self):
"""check run options and the run targets. """
self._check_build_options()
self._check_run_targets()
def _check_test_command(self):
"""check test optios. """
self._check_build_options()
self._check_test_options()
def _check_clean_command(self):
"""check clean options. """
self._check_clean_options()
def _check_query_command(self):
"""check query options. """
self._check_plat_and_profile_options()
self._check_color_options()
self._check_query_options()
self._check_query_targets()
def __add_plat_profile_arguments(self, parser):
"""Add plat and profile arguments. """
parser.add_argument('-m',
dest='m',
help=('Generate code for a 32-bit(-m32) or '
'64-bit(-m64) environment, '
'default is autodetect.'))
parser.add_argument('-p',
'--profile',
dest='profile',
default='release',
help=('Build profile: debug or release, '
'default is release.'))
def __add_generate_arguments(self, parser):
"""Add generate related arguments. """
parser.add_argument(
'--generate-dynamic', dest='generate_dynamic',
action='store_true', default=False,
help='Generate dynamic libraries.')
parser.add_argument(
'--generate-java', dest='generate_java',
action='store_true', default=False,
help='Generate java files for proto_library, thrift_library and '
'swig_library.')
parser.add_argument(
'--generate-php', dest='generate_php',
action='store_true', default=False,
help='Generate php files for proto_library and swig_library.')
def __add_build_actions_arguments(self, parser):
"""Add build related action arguments. """
parser.add_argument(
'--generate-scons-only', dest='scons_only',
action='store_true', default=False,
help='Generate scons script for debug purpose.')
parser.add_argument(
'-j', '--jobs', dest='jobs', type=int, default=0,
help=('Specifies the number of jobs (commands) to '
'run simultaneously.'))
parser.add_argument(
'-k', '--keep-going', dest='keep_going',
action='store_true', default=False,
help='Continue as much as possible after an error.')
parser.add_argument(
'--verbose', dest='verbose', action='store_true',
default=False, help='Show all details.')
parser.add_argument(
'--no-test', dest='no_test', action='store_true',
default=False, help='Do not build the test targets.')
def __add_color_arguments(self, parser):
"""Add color argument. """
parser.add_argument(
'--color', dest='color', default='auto',
help='Enable color: yes, no or auto, default is auto.')
def __add_cache_arguments(self, parser):
"""Add cache related arguments. """
parser.add_argument(
'--cache-dir', dest='cache_dir', type=str,
help='Specifies location of shared cache directory.')
parser.add_argument(
'--cache-size', dest='cache_size', type=str,
help='Specifies cache size of shared cache directory in Gigabytes.'
'"unlimited" for unlimited. ')
def __add_coverage_arguments(self, parser):
"""Add coverage arguments. """
parser.add_argument(
'--gprof', dest='gprof',
action='store_true', default=False,
help='Add build options to support GNU gprof.')
parser.add_argument(
'--gcov', dest='gcov',
action='store_true', default=False,
help='Add build options to support GNU gcov to do coverage test.')
def _add_query_arguments(self, parser):
"""Add query arguments for parser. """
self.__add_plat_profile_arguments(parser)
self.__add_color_arguments(parser)
parser.add_argument(
'--deps', dest='deps',
action='store_true', default=False,
help='Show all targets that depended by the target being queried.')
parser.add_argument(
'--depended', dest='depended',
action='store_true', default=False,
help='Show all targets that depened on the target being queried.')
parser.add_argument(
'--output-to-dot', dest='output_to_dot', type=str,
help='The name of file to output query results as dot(graphviz) '
'format.')
def _add_clean_arguments(self, parser):
"""Add clean arguments for parser. """
self.__add_plat_profile_arguments(parser)
self.__add_generate_arguments(parser)
self.__add_color_arguments(parser)
def _add_test_arguments(self, parser):
"""Add test command arguments. """
parser.add_argument(
'--testargs', dest='testargs', type=str,
help='Command line arguments to be passed to tests.')
parser.add_argument(
'--full-test', action='store_true',
dest='fulltest', default=False,
help='Enable full test, default is incremental test.')
parser.add_argument(
'-t', '--test-jobs', dest='test_jobs', type=int, default=1,
help=('Specifies the number of tests to run simultaneously.'))
parser.add_argument(
'--show-details', action='store_true',
dest='show_details', default=False,
help='Shows the test result in detail and provides a file.')
def _add_run_arguments(self, parser):
"""Add run command arguments. """
parser.add_argument(
'--runargs', dest='runargs', type=str,
help='Command line arguments to be passed to the single run target.')
def _add_build_arguments(self, parser):
"""Add building arguments for parser. """
self.__add_plat_profile_arguments(parser)
self.__add_build_actions_arguments(parser)
self.__add_color_arguments(parser)
self.__add_cache_arguments(parser)
self.__add_generate_arguments(parser)
self.__add_coverage_arguments(parser)
def _cmd_parse(self):
"""Add command options, add options whthin this method."""
blade_cmd_help = 'blade <subcommand> [options...] [targets...]'
arg_parser = ArgumentParser(prog='blade', description=blade_cmd_help)
sub_parser = arg_parser.add_subparsers(
dest='command',
help='Available subcommands')
build_parser = sub_parser.add_parser(
'build',
help='Build specified targets')
run_parser = sub_parser.add_parser(
'run',
help='Build and runs a single target')
test_parser = sub_parser.add_parser(
'test',
help='Build the specified targets and runs tests')
clean_parser = sub_parser.add_parser(
'clean',
help='Remove all Blade-created output')
query_parser = sub_parser.add_parser(
'query',
help='Execute a dependency graph query')
self._add_build_arguments(build_parser)
self._add_build_arguments(run_parser)
self._add_build_arguments(test_parser)
self._add_run_arguments(run_parser)
self._add_test_arguments(test_parser)
self._add_clean_arguments(clean_parser)
self._add_query_arguments(query_parser)
return arg_parser.parse_known_args()
def _arch_bits(self):
"""Platform arch."""
if 'x86_64' == platform.machine():
return '64'
else:
return '32'
def get_command(self):
"""Return blade command. """
return self.options.command
def get_options(self):
"""Returns the command options, which should be used by blade manager."""
return self.options
def get_targets(self):
"""Returns the targets from command line."""
return self.targets
| 36.831234 | 87 | 0.580495 |
22b973a35a13fc0c68b8d21bba577c70edee827c | 96 | py | Python | modelmapper/exceptions.py | wearefair/modelmapper | 7f420ceca0312373fdb0ab81fa3f6a3f8fb86086 | [
"MIT"
] | 5 | 2018-06-29T22:13:22.000Z | 2019-03-15T18:28:07.000Z | modelmapper/exceptions.py | wearefair/modelmapper | 7f420ceca0312373fdb0ab81fa3f6a3f8fb86086 | [
"MIT"
] | 13 | 2018-06-29T22:40:02.000Z | 2020-12-15T05:29:04.000Z | modelmapper/exceptions.py | wearefair/modelmapper | 7f420ceca0312373fdb0ab81fa3f6a3f8fb86086 | [
"MIT"
] | 1 | 2019-08-05T17:47:02.000Z | 2019-08-05T17:47:02.000Z | class NothingToProcess(ValueError):
pass
class FileAlreadyProcessed(ValueError):
pass
| 13.714286 | 39 | 0.770833 |
2a838a64341573d0363534370bd80a8b2ea52126 | 2,150 | py | Python | tests/test_validation.py | 0x1EE7/tomodachi | 8147b16d8be19b80b3bd7c5d8ed21c9863eaaa83 | [
"MIT"
] | null | null | null | tests/test_validation.py | 0x1EE7/tomodachi | 8147b16d8be19b80b3bd7c5d8ed21c9863eaaa83 | [
"MIT"
] | null | null | null | tests/test_validation.py | 0x1EE7/tomodachi | 8147b16d8be19b80b3bd7c5d8ed21c9863eaaa83 | [
"MIT"
] | null | null | null | import pytest
from tomodachi.validation.validation import validate_field_regex, \
RegexMissmatchException
from tomodachi.validation.validation import validate_field_length, \
TooSmallException, TooLargeException
def test_regex_success() -> None:
validate_field_regex('af047ca5-e8f4-48a9-ab01-1d948f635f95', r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$')
def test_regex_failure() -> None:
with pytest.raises(RegexMissmatchException):
validate_field_regex('a94a8fe5ccb19ba61c4c0873d391e987982fbbd3', r'^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$')
def test_string_length_success_1() -> None:
validate_field_length('19840801', 6, 8)
def test_string_length_success_2() -> None:
validate_field_length('840801', 6, 8)
def test_string_length_success_3() -> None:
validate_field_length('a94a8fe5ccb19ba61c4c0873d391e987982fbbd3')
def test_string_length_success_4() -> None:
validate_field_length('')
def test_string_length_too_large() -> None:
with pytest.raises(TooLargeException):
validate_field_length('1984-08-01', 6, 8)
def test_string_length_too_small() -> None:
with pytest.raises(TooSmallException):
validate_field_length('8481', 6, 8)
def test_string_length_empty() -> None:
with pytest.raises(TooSmallException):
validate_field_length('', 1)
def test_list_length_success_1() -> None:
validate_field_length(['a', 'b', 'c', 'd', 'e'], 2, 5)
def test_list_length_success_2() -> None:
validate_field_length(['a', 'b'], 2, 5)
def test_list_length_success_3() -> None:
validate_field_length([])
def test_list_length_success_4() -> None:
validate_field_length(['a', 'b', 'c', 'd', 'e', 'f'])
def test_list_length_too_large() -> None:
with pytest.raises(TooLargeException):
validate_field_length(['a', 'b', 'c', 'd', 'e', 'f'], 2, 5)
def test_list_length_too_small() -> None:
with pytest.raises(TooSmallException):
validate_field_length(['a'], 2, 5)
def test_list_length_empty() -> None:
with pytest.raises(TooSmallException):
validate_field_length([], 1)
| 27.564103 | 139 | 0.70093 |
6f80077bffef2133f5f65836b06b806c71147de7 | 4,453 | py | Python | src/path_finder.py | brandon-fremin/LightningNetworkAnalysis | c7b174e01327173ee71ef9caaa27f97ff89b969c | [
"MIT"
] | null | null | null | src/path_finder.py | brandon-fremin/LightningNetworkAnalysis | c7b174e01327173ee71ef9caaa27f97ff89b969c | [
"MIT"
] | null | null | null | src/path_finder.py | brandon-fremin/LightningNetworkAnalysis | c7b174e01327173ee71ef9caaa27f97ff89b969c | [
"MIT"
] | null | null | null | import random
from src.dijkstras import Graph, Node_Distance
import matplotlib.pyplot as plt
from src.distance import dist_d, cost_d
def path_finder(source, target, amount, all_nodes, channel_balances, verify_function, distance_function, alpha=1.0, show_plot=False, print_text=True):
graph = Graph(len(all_nodes))
for key, node in all_nodes.items():
self_id = node["id"]
in_graph = False
for key, channel in node["channels"].items():
# Remove some edges for no reason
if random.uniform(0, 1) > alpha:
continue
if verify_function(node, channel, channel_balances, amount):
peer_id = channel["peer_id"]
distance = distance_function(channel, amount)
graph.Add_Into_Adjlist(self_id, Node_Distance(peer_id, distance))
in_graph = True
# Add a self edge if a node has no edges to carry amount through it
if not in_graph:
graph.Add_Into_Adjlist(self_id, Node_Distance(self_id, 1))
distances, prevs = graph.Dijkstras_Shortest_Path(source, False)
optimal_route = None
if (print_text):
print("*************************************************************")
print(f"Optimal Path:\nSource: {source}\nTarget: {target}\nAmount: {amount}\nDist : {distances[target]} units\n")
if distances[target] != float("inf"):
curr = target
route = []
while curr != "_sentinel":
route.insert(0, curr)
curr = prevs[curr]
if print_text:
print("Explicit Route: ")
[print(f"{r}") for r in route]
optimal_route = route
if show_plot:
x, y = [], []
for r in route:
if not all_nodes[r]["geo"]:
continue
x.append(all_nodes[r]["geo"]["longitude"])
y.append(all_nodes[r]["geo"]["latitude"])
plt.plot(x, y)
plt.scatter(x, y, c="red", s=50)
plt.xlim([-180, 180])
plt.ylim([-90, 90])
plt.show()
elif print_text:
print("No explicit route :(")
if print_text:
print("*************************************************************")
return distances, prevs, optimal_route
def get_disconnected_keys(all_nodes, channel_balances):
# Two popular nodes that should be connected for forseable future
source = '028a8e53d70bc0eb7b5660943582f10b7fd6c727a78ad819ba8d45d6a638432c49'
target = '020ca546d600037181b7cbcd094818100d780d32fd9f210e14390e0d10b7ec71fb'
amount = 0
def trivial_verifier(node, channel, channel_balances, amount):
return True
distances, prevs, optimal_route = path_finder(source, target, amount,
all_nodes, channel_balances,
trivial_verifier,
dist_d,
alpha=1)
print()
counter = 0
disconnected_keys = []
for key in all_nodes.keys():
if distances[key] == float('inf'):
disconnected_keys.append(key)
counter = counter + 1
print(f"There are {counter} disconnected keys")
return set(disconnected_keys)
def path_cost(optimal_route, all_nodes, amount):
if len(optimal_route) == 0:
return 0
cost = 0
for i in range(len(optimal_route) - 1):
node_id = optimal_route[i]
next_node_id = optimal_route[i + 1]
for channel in all_nodes[node_id]["channels"].values():
if channel["peer_id"] == next_node_id:
cost = cost + cost_d(channel, amount)
break
return cost
def failure_node(optimal_route, all_nodes, channel_balances, amount):
if not optimal_route or len(optimal_route) == 0:
return None, None
for i in range(len(optimal_route) - 1):
node_id = optimal_route[i]
next_node_id = optimal_route[i + 1]
for channel in all_nodes[node_id]["channels"].values():
if channel["peer_id"] == next_node_id:
channel_id = channel["short_channel_id"]
if channel_balances[channel_id][node_id] < amount:
# print_json(channel_balances[channel["short_channel_id"]])
return node_id, channel_id
return None, None | 36.801653 | 150 | 0.569728 |
506123e9cb0e8f34dcd60092472d02a7171bcff4 | 874 | py | Python | plugins/rapid7_tcell/komand_rapid7_tcell/actions/list_packages/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/rapid7_tcell/komand_rapid7_tcell/actions/list_packages/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/rapid7_tcell/komand_rapid7_tcell/actions/list_packages/action.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | import komand
from .schema import ListPackagesInput, ListPackagesOutput
# Custom imports below
class ListPackages(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="list_packages",
description="Fetch details for all seen packages (matching the provided criteria)",
input=ListPackagesInput(),
output=ListPackagesOutput(),
)
def run(self, params={}):
app_id = params.get("app_id")
from_ = params.get("from")
to = params.get("to")
per_page = params.get("per_page", 10)
page = params.get("page", 1)
packages = self.connection.api.list_packages(app_id, from_, to, per_page, page)
if packages is None:
packages = {"total": 0, "packages": []}
return packages
def test(self):
return {}
| 28.193548 | 95 | 0.607551 |
aa02beb54922c1f0e0ef4ae060a0db877558fbbb | 665 | py | Python | utils/db_migration/versions/from_1_2_to_1_3-add_task_owner.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 71 | 2016-11-13T03:26:45.000Z | 2022-02-22T08:13:04.000Z | utils/db_migration/versions/from_1_2_to_1_3-add_task_owner.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 3 | 2021-07-01T08:09:05.000Z | 2022-01-28T03:38:36.000Z | utils/db_migration/versions/from_1_2_to_1_3-add_task_owner.py | Yuanmessi/Bold-Falcon | 00fcaba0b3d9c462b9d20ecb256ff85db5d119e2 | [
"BSD-3-Clause"
] | 36 | 2016-12-13T11:37:56.000Z | 2021-11-11T12:20:10.000Z | # Copyright (C) 2010-2014 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
"""Database migration from Cuckoo 1.2 to Cuckoo 1.3.
Added task owner used by the Distributed API.
Revision ID: 3aa42d870199
Revises: 18eee46c6f81
Create Date: 2014-12-04 11:19:49.388410
"""
# Revision identifiers, used by Alembic.
revision = "3aa42d870199"
down_revision = "495d5a6edef3"
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column("tasks", sa.Column("owner", sa.String(length=64), nullable=True))
def downgrade():
op.drop_column("tasks", "owner")
| 24.62963 | 83 | 0.741353 |
8ad8460075ad777257aab0f660c49aa193c072d4 | 804 | py | Python | lib/transactions/tests/test_commands.py | muffinresearch/solitude | 6cb37f591956111b54e5c4098602be21c8f4b438 | [
"BSD-3-Clause"
] | null | null | null | lib/transactions/tests/test_commands.py | muffinresearch/solitude | 6cb37f591956111b54e5c4098602be21c8f4b438 | [
"BSD-3-Clause"
] | null | null | null | lib/transactions/tests/test_commands.py | muffinresearch/solitude | 6cb37f591956111b54e5c4098602be21c8f4b438 | [
"BSD-3-Clause"
] | null | null | null | import csv
from datetime import datetime
from tempfile import NamedTemporaryFile
from nose.tools import eq_
import test_utils
from lib.transactions.models import Transaction
from lib.transactions.management.commands.stats_log import generate_log
from lib.sellers.tests.utils import make_seller_paypal
class TestLog(test_utils.TestCase):
def test_filter(self):
seller, paypal, product = make_seller_paypal('some:other:uuid')
self.first = Transaction.objects.create(provider=1,
seller_product=product, uuid='uuid')
name = NamedTemporaryFile().name
generate_log(datetime.today(), name)
with open(name, 'rb') as csvfile:
output = csv.reader(csvfile)
eq_(next(output)[0], 'version')
eq_(next(output)[1], 'uuid')
| 32.16 | 71 | 0.710199 |
3fbd06d7ccdfa2baf16ea29fe18a8571bab807b5 | 6,479 | py | Python | src/container/mesos-manager/mesos_manager/common/logger.py | casek14/contrail-controller | 18e2572635370b3cb6da2731af049cbeb934f2bb | [
"Apache-2.0"
] | 1 | 2019-01-11T06:16:10.000Z | 2019-01-11T06:16:10.000Z | src/container/mesos-manager/mesos_manager/common/logger.py | chnyda/contrail-controller | 398f13bb5bad831550389be6ac3eb3e259642664 | [
"Apache-2.0"
] | null | null | null | src/container/mesos-manager/mesos_manager/common/logger.py | chnyda/contrail-controller | 398f13bb5bad831550389be6ac3eb3e259642664 | [
"Apache-2.0"
] | 1 | 2020-06-08T11:50:36.000Z | 2020-06-08T11:50:36.000Z | #
# Copyright (c) 2016 Juniper Networks, Inc. All rights reserved.
#
"""
Contrail Mesos Manager logger
"""
import logging
import socket
from cfgm_common.uve.nodeinfo.ttypes import NodeStatusUVE, NodeStatus
import discoveryclient.client as client
from mesos_manager.sandesh.mesos_manager import ttypes as sandesh
from pysandesh.connection_info import ConnectionState
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from pysandesh.sandesh_base import Sandesh, SandeshSystem
from pysandesh.sandesh_logger import SandeshLogger
from sandesh_common.vns.constants import (ModuleNames, Module2NodeType,
NodeTypeNames, INSTANCE_ID_DEFAULT, HttpPortMesosManager)
from sandesh_common.vns.ttypes import Module
class MesosManagerLogger(object):
def __init__(self, args=None):
self._args = args
# Initialize module parameters.
self.module = {}
self.module["id"] = Module.MESOS_MANAGER
self.module["name"] = ModuleNames[self.module["id"]]
self.module["node_type"] = Module2NodeType[self.module["id"]]
self.module["node_type_name"] = NodeTypeNames[self.module["node_type"]]
self.module["hostname"] = socket.gethostname()
self.module["table"] = "ObjectConfigNode"
if self._args.worker_id:
self.module["instance_id"] = self._args.worker_id
else:
self.module["instance_id"] = INSTANCE_ID_DEFAULT
# Initialize discovery client
if self._args.disc_server_ip and self._args.disc_server_port:
self.module["discovery"] = client.DiscoveryClient(
self._args.disc_server_ip, self._args.disc_server_port,
self.module["name"])
# Init Sandesh.
self.sandesh_init()
def syslog(self, log_msg, level):
# Log to syslog.
self._sandesh.logger().log(
SandeshLogger.get_py_logger_level(level), log_msg)
def log(self, log_msg, level=SandeshLevel.SYS_DEBUG, fun=None):
# If a sandesh function is provided, use the function.
# If not, revert to syslog.
if fun:
log = fun(level=level, log_msg=log_msg, sandesh=self._sandesh)
log.send(sandesh=self._sandesh)
else:
self.syslog(log_msg, level)
# EMERGENCY.
def emergency(self, log_msg, log_fun=None):
log_level = SandeshLevel.SYS_EMERG
logging_fun = log_fun if log_fun else sandesh.MesosManagerEmergencyLog
# Log to syslog.
self.syslog(log_msg, log_level)
# Log using the desired logging function.
self.log(log_msg, level=log_level, fun=logging_fun)
# ALERT.
def alert(self, log_msg, log_fun=None):
log_level = SandeshLevel.SYS_ALERT
logging_fun = log_fun if log_fun else sandesh.MesosManagerAlertLog
# Log to syslog.
self.syslog(log_msg, log_level)
# Log using the desired logging function.
self.log(log_msg, level=log_level, fun=logging_fun)
# CRITICAL.
def critical(self, log_msg, log_fun=None):
log_level = SandeshLevel.SYS_CRIT
logging_fun = log_fun if log_fun else sandesh.MesosManagerCriticalLog
# Log to syslog.
self.syslog(log_msg, log_level)
# Log using the desired logging function.
self.log(log_msg, level=log_level, fun=logging_fun)
# CRITICAL.
def error(self, log_msg, log_fun=None):
log_level = SandeshLevel.SYS_ERR
logging_fun = log_fun if log_fun else sandesh.MesosManagerErrorLog
# Log to syslog.
self.syslog(log_msg, log_level)
# Log using the desired logging function.
self.log(log_msg, level=log_level, fun=logging_fun)
# WARNING.
def warning(self, log_msg, log_fun=None):
log_level = SandeshLevel.SYS_WARN
logging_fun = log_fun if log_fun else sandesh.MesosManagerWarningLog
# Log to syslog.
self.syslog(log_msg, log_level)
# Log using the desired logging function.
self.log(log_msg, level=log_level, fun=logging_fun)
# NOTICE.
def notice(self, log_msg, log_fun=None):
log_level = SandeshLevel.SYS_NOTICE
logging_fun = log_fun if log_fun else sandesh.MesosManagerNoticeLog
# Log to syslog.
self.syslog(log_msg, log_level)
# Log using the desired logging function.
self.log(log_msg, level=log_level, fun=logging_fun)
# INFO.
def info(self, log_msg, log_fun=None):
log_level = SandeshLevel.SYS_INFO
logging_fun = log_fun if log_fun else sandesh.MesosManagerInfoLog
# Log using the desired logging function.
self.log(log_msg, level=log_level, fun=logging_fun)
# DEBUG.
def debug(self, log_msg, log_fun=None):
log_level = SandeshLevel.SYS_DEBUG
logging_fun = log_fun if log_fun else sandesh.MesosManagerDebugLog
# Log using the desired logging function.
self.log(log_msg, level=log_level, fun=logging_fun)
def sandesh_init(self):
""" Init Sandesh """
self._sandesh = Sandesh()
# Reset sandesh send rate limit value.
if self._args.sandesh_send_rate_limit is not None:
SandeshSystem.set_sandesh_send_rate_limit(
self._args.sandesh_send_rate_limit)
# Initialize Sandesh generator.
self._sandesh.init_generator(
self.module['name'], self.module['hostname'],
self.module['node_type_name'], self.module['instance_id'],
self._args.collectors, 'mesos_manager_context',
int(self._args.http_server_port),
['cfgm_common', 'mesos_manager.sandesh'],
self.module['discovery'], logger_class=self._args.logger_class,
logger_config_file=self._args.logging_conf,
config=self._args.sandesh_config)
# Set Sandesh logging params.
self._sandesh.set_logging_params(
enable_local_log=self._args.log_local,
category=self._args.log_category,
level=self._args.log_level, file=self._args.log_file,
enable_syslog=self._args.use_syslog,
syslog_facility=self._args.syslog_facility)
# Connect to collector.
ConnectionState.init(self._sandesh, self.module['hostname'],
self.module['name'], self.module['instance_id'],
staticmethod(ConnectionState.get_process_state_cb),
NodeStatusUVE, NodeStatus, self.module['table'])
| 35.79558 | 79 | 0.672943 |
20b8ffc062982e1686b34d805376623342b10fb2 | 9,513 | py | Python | pandas/tests/indexes/timedeltas/test_constructors.py | oricou/pandas | 9405e58d9268041f5416711c051cf5429a19bf49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 4 | 2020-03-31T23:31:24.000Z | 2021-08-06T13:47:39.000Z | pandas/tests/indexes/timedeltas/test_constructors.py | oricou/pandas | 9405e58d9268041f5416711c051cf5429a19bf49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 6 | 2021-05-31T01:10:55.000Z | 2021-07-19T00:37:03.000Z | pandas/tests/indexes/timedeltas/test_constructors.py | oricou/pandas | 9405e58d9268041f5416711c051cf5429a19bf49 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2021-06-16T07:19:12.000Z | 2021-12-16T10:24:44.000Z | from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
Timedelta,
TimedeltaIndex,
timedelta_range,
to_timedelta,
)
import pandas._testing as tm
from pandas.core.arrays.timedeltas import (
TimedeltaArray,
sequence_to_td64ns,
)
class TestTimedeltaIndex:
def test_array_of_dt64_nat_raises(self):
# GH#39462
nat = np.datetime64("NaT", "ns")
arr = np.array([nat], dtype=object)
# TODO: should be TypeError?
msg = "Invalid type for timedelta scalar"
with pytest.raises(ValueError, match=msg):
TimedeltaIndex(arr)
with pytest.raises(ValueError, match=msg):
TimedeltaArray._from_sequence(arr)
with pytest.raises(ValueError, match=msg):
sequence_to_td64ns(arr)
@pytest.mark.parametrize("unit", ["Y", "y", "M"])
def test_unit_m_y_raises(self, unit):
msg = "Units 'M', 'Y', and 'y' are no longer supported"
with pytest.raises(ValueError, match=msg):
TimedeltaIndex([1, 3, 7], unit)
def test_int64_nocopy(self):
# GH#23539 check that a copy isn't made when we pass int64 data
# and copy=False
arr = np.arange(10, dtype=np.int64)
tdi = TimedeltaIndex(arr, copy=False)
assert tdi._data._data.base is arr
def test_infer_from_tdi(self):
# GH#23539
# fast-path for inferring a frequency if the passed data already
# has one
tdi = timedelta_range("1 second", periods=10 ** 7, freq="1s")
result = TimedeltaIndex(tdi, freq="infer")
assert result.freq == tdi.freq
# check that inferred_freq was not called by checking that the
# value has not been cached
assert "inferred_freq" not in getattr(result, "_cache", {})
def test_infer_from_tdi_mismatch(self):
# GH#23539
# fast-path for invalidating a frequency if the passed data already
# has one and it does not match the `freq` input
tdi = timedelta_range("1 second", periods=100, freq="1s")
msg = (
"Inferred frequency .* from passed values does "
"not conform to passed frequency"
)
with pytest.raises(ValueError, match=msg):
TimedeltaIndex(tdi, freq="D")
with pytest.raises(ValueError, match=msg):
# GH#23789
TimedeltaArray(tdi, freq="D")
with pytest.raises(ValueError, match=msg):
TimedeltaIndex(tdi._data, freq="D")
with pytest.raises(ValueError, match=msg):
TimedeltaArray(tdi._data, freq="D")
def test_dt64_data_invalid(self):
# GH#23539
# passing tz-aware DatetimeIndex raises, naive or ndarray[datetime64]
# raise as of GH#29794
dti = pd.date_range("2016-01-01", periods=3)
msg = "cannot be converted to timedelta64"
with pytest.raises(TypeError, match=msg):
TimedeltaIndex(dti.tz_localize("Europe/Brussels"))
with pytest.raises(TypeError, match=msg):
TimedeltaIndex(dti)
with pytest.raises(TypeError, match=msg):
TimedeltaIndex(np.asarray(dti))
def test_float64_ns_rounded(self):
# GH#23539 without specifying a unit, floats are regarded as nanos,
# and fractional portions are truncated
tdi = TimedeltaIndex([2.3, 9.7])
expected = TimedeltaIndex([2, 9])
tm.assert_index_equal(tdi, expected)
# integral floats are non-lossy
tdi = TimedeltaIndex([2.0, 9.0])
expected = TimedeltaIndex([2, 9])
tm.assert_index_equal(tdi, expected)
# NaNs get converted to NaT
tdi = TimedeltaIndex([2.0, np.nan])
expected = TimedeltaIndex([Timedelta(nanoseconds=2), pd.NaT])
tm.assert_index_equal(tdi, expected)
def test_float64_unit_conversion(self):
# GH#23539
tdi = TimedeltaIndex([1.5, 2.25], unit="D")
expected = TimedeltaIndex([Timedelta(days=1.5), Timedelta(days=2.25)])
tm.assert_index_equal(tdi, expected)
def test_construction_base_constructor(self):
arr = [Timedelta("1 days"), pd.NaT, Timedelta("3 days")]
tm.assert_index_equal(pd.Index(arr), TimedeltaIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), TimedeltaIndex(np.array(arr)))
arr = [np.nan, pd.NaT, Timedelta("1 days")]
tm.assert_index_equal(pd.Index(arr), TimedeltaIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)), TimedeltaIndex(np.array(arr)))
def test_constructor(self):
expected = TimedeltaIndex(
[
"1 days",
"1 days 00:00:05",
"2 days",
"2 days 00:00:02",
"0 days 00:00:03",
]
)
result = TimedeltaIndex(
[
"1 days",
"1 days, 00:00:05",
np.timedelta64(2, "D"),
timedelta(days=2, seconds=2),
pd.offsets.Second(3),
]
)
tm.assert_index_equal(result, expected)
# unicode
result = TimedeltaIndex(
[
"1 days",
"1 days, 00:00:05",
np.timedelta64(2, "D"),
timedelta(days=2, seconds=2),
pd.offsets.Second(3),
]
)
expected = TimedeltaIndex(
["0 days 00:00:00", "0 days 00:00:01", "0 days 00:00:02"]
)
tm.assert_index_equal(TimedeltaIndex(range(3), unit="s"), expected)
expected = TimedeltaIndex(
["0 days 00:00:00", "0 days 00:00:05", "0 days 00:00:09"]
)
tm.assert_index_equal(TimedeltaIndex([0, 5, 9], unit="s"), expected)
expected = TimedeltaIndex(
["0 days 00:00:00.400", "0 days 00:00:00.450", "0 days 00:00:01.200"]
)
tm.assert_index_equal(TimedeltaIndex([400, 450, 1200], unit="ms"), expected)
def test_constructor_iso(self):
# GH #21877
expected = timedelta_range("1s", periods=9, freq="s")
durations = [f"P0DT0H0M{i}S" for i in range(1, 10)]
result = to_timedelta(durations)
tm.assert_index_equal(result, expected)
def test_constructor_coverage(self):
rng = timedelta_range("1 days", periods=10.5)
exp = timedelta_range("1 days", periods=10)
tm.assert_index_equal(rng, exp)
msg = "periods must be a number, got foo"
with pytest.raises(TypeError, match=msg):
timedelta_range(start="1 days", periods="foo", freq="D")
msg = (
r"TimedeltaIndex\(\) must be called with a collection of some kind, "
"'1 days' was passed"
)
with pytest.raises(TypeError, match=msg):
TimedeltaIndex("1 days")
# generator expression
gen = (timedelta(i) for i in range(10))
result = TimedeltaIndex(gen)
expected = TimedeltaIndex([timedelta(i) for i in range(10)])
tm.assert_index_equal(result, expected)
# NumPy string array
strings = np.array(["1 days", "2 days", "3 days"])
result = TimedeltaIndex(strings)
expected = to_timedelta([1, 2, 3], unit="d")
tm.assert_index_equal(result, expected)
from_ints = TimedeltaIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# non-conforming freq
msg = (
"Inferred frequency None from passed values does not conform to "
"passed frequency D"
)
with pytest.raises(ValueError, match=msg):
TimedeltaIndex(["1 days", "2 days", "4 days"], freq="D")
msg = (
"Of the four parameters: start, end, periods, and freq, exactly "
"three must be specified"
)
with pytest.raises(ValueError, match=msg):
timedelta_range(periods=10, freq="D")
def test_constructor_name(self):
idx = timedelta_range(start="1 days", periods=1, freq="D", name="TEST")
assert idx.name == "TEST"
# GH10025
idx2 = TimedeltaIndex(idx, name="something else")
assert idx2.name == "something else"
def test_constructor_no_precision_raises(self):
# GH-24753, GH-24739
msg = "with no precision is not allowed"
with pytest.raises(ValueError, match=msg):
TimedeltaIndex(["2000"], dtype="timedelta64")
with pytest.raises(ValueError, match=msg):
pd.Index(["2000"], dtype="timedelta64")
def test_constructor_wrong_precision_raises(self):
msg = r"dtype timedelta64\[us\] cannot be converted to timedelta64\[ns\]"
with pytest.raises(ValueError, match=msg):
TimedeltaIndex(["2000"], dtype="timedelta64[us]")
def test_explicit_none_freq(self):
# Explicitly passing freq=None is respected
tdi = timedelta_range(1, periods=5)
assert tdi.freq is not None
result = TimedeltaIndex(tdi, freq=None)
assert result.freq is None
result = TimedeltaIndex(tdi._data, freq=None)
assert result.freq is None
def test_from_categorical(self):
tdi = timedelta_range(1, periods=5)
cat = pd.Categorical(tdi)
result = TimedeltaIndex(cat)
tm.assert_index_equal(result, tdi)
ci = pd.CategoricalIndex(tdi)
result = TimedeltaIndex(ci)
tm.assert_index_equal(result, tdi)
| 34.34296 | 85 | 0.597393 |
816dd98bd22fbcd8dc82835cc6ea5dbf536948d0 | 7,622 | py | Python | OpenSeesAPI/Model/Element/GeomTransf.py | alok230460/Open | ccd7c43c82c13bc87e6c208302f7448060b856ba | [
"MIT",
"Unlicense"
] | 41 | 2015-09-08T09:41:13.000Z | 2022-03-26T08:40:02.000Z | OpenSeesAPI/Model/Element/GeomTransf.py | alok230460/Open | ccd7c43c82c13bc87e6c208302f7448060b856ba | [
"MIT",
"Unlicense"
] | 4 | 2015-09-11T17:20:46.000Z | 2016-05-02T20:42:58.000Z | OpenSeesAPI/Model/Element/GeomTransf.py | alok230460/Open | ccd7c43c82c13bc87e6c208302f7448060b856ba | [
"MIT",
"Unlicense"
] | 31 | 2015-09-11T17:01:36.000Z | 2021-11-08T17:48:27.000Z | """
This class is used to create the following OpenSees TCL Commands:
The following contain information about transfType? and the args required for each of the available geometric transformation types:
Linear Transformation
PDelta Transformation
Corotational Transformation
"""
__author__ = 'Nasser'
from OpenSeesAPI.OpenSees import OpenSees
class Linear(OpenSees):
"""
For a two-dimensional problem:
geomTransf Linear $transfTag <-jntOffset $dXi $dYi $dXj $dYj>
For a three-dimensional problem:
geomTransf Linear $transfTag $vecxzX $vecxzY $vecxzZ <-jntOffset $dXi $dYi $dZi $dXj $dYj $dZj>
$transfTag integer tag identifying transformation
$vecxzX $vecxzY $vecxzZ X, Y, and Z components of vecxz, the vector used to define the local x-z plane of the local-coordinate system. The local y-axis is defined by taking the cross product of the vecxz vector and the x-axis.
These components are specified in the global-coordinate system X,Y,Z and define a vector that is in a plane parallel to the x-z plane of the local-coordinate system.
These items need to be specified for the three-dimensional problem.
$dXi $dYi $dZi joint offset values -- offsets specified with respect to the global coordinate system for element-end node i (the number of arguments depends on the dimensions of the current model). The offset vector is oriented from node i to node j as shown in a figure below. (optional)
$dXj $dYj $dZj joint offset values -- offsets specified with respect to the global coordinate system for element-end node j (the number of arguments depends on the dimensions of the current model). The offset vector is oriented from node j to node i as shown in a figure below. (optional)
A refresher on Euclidean Geometry and Coordinate Systems:
A single vector may be defined by two points. It has length, direction, and location in space. When this vector is used to define a coordinate axis, only its direction is important. Now any 2 vectors, Vr and Vs, not parallel, define a plane that is parallel to them both. The cross-product of these vectors define a third vector, Vt, that is perpendicular to both Vr and Vs and hence normal to the plane: Vt = Vr X Vs.
The element coordinate system is specified as follows:
The x-axis is a vector given by the two element nodes; The vector vecxz is a vector the user specifies that must not be parallel to the x-axis. The x-axis along with the vecxz Vector define the xz plane. The local y-axis is defined by taking the cross product of the x-axis vector and the vecxz vector (Vy = Vxz X Vx). The local z-axis is then found simply by taking the cross product of the y-axis and x-axis vectors (Vz = Vx X Vy). The section is attached to the element such that the y-z coordinate system used to specify the section corresponds to the y-z axes of the element.
"""
def __init__(self, id, VectorX=None, VectorY=None, VectorZ=None, **kwargs):
self._id = id
self._VectorX = VectorX
self._VectorY = VectorY
self._VectorZ = VectorZ
self.__dict__.update(kwargs)
if self._VectorX == None:
self._CommandLine = 'geomTransf Linear %d'%(self.id)
else:
self._CommandLine = 'geomTransf Linear %d %d %d %d'%(self.id, self._VectorX, self._VectorY, self._VectorZ)
class PDelta(OpenSees):
"""
For a two-dimensional problem:
geomTransf PDelta $transfTag <-jntOffset $dXi $dYi $dXj $dYj>
For a three-dimensional problem:
geomTransf PDelta $transfTag $vecxzX $vecxzY $vecxzZ <-jntOffset $dXi $dYi $dZi $dXj $dYj $dZj>
$transfTag integer tag identifying transformation
$vecxzX $vecxzY $vecxzZ X, Y, and Z components of vecxz, the vector used to define the local x-z plane of the local-coordinate system. The local y-axis is defined by taking the cross product of the vecxz vector and the x-axis.
These components are specified in the global-coordinate system X,Y,Z and define a vector that is in a plane parallel to the x-z plane of the local-coordinate system.
These items need to be specified for the three-dimensional problem.
$dXi $dYi $dZi joint offset values -- offsets specified with respect to the global coordinate system for element-end node i (the number of arguments depends on the dimensions of the current model). The offset vector is oriented from node i to node j as shown in a figure below. (optional)
$dXj $dYj $dZj joint offset values -- offsets specified with respect to the global coordinate system for element-end node j (the number of arguments depends on the dimensions of the current model). The offset vector is oriented from node j to node i as shown in a figure below. (optional)
"""
def __init__(self, id, VectorX=None, VectorY=None, VectorZ=None, jinOffset = None, **kwargs):
self._id = id
self._VectorX = VectorX
self._VectorY = VectorY
self._VectorZ = VectorZ
self._jinOffset = jinOffset
self.__dict__.update(kwargs)
if self._VectorX == None:
self._CommandLine = 'geomTransf PDelta %d'%(self.id)
else:
if self._jinOffset == None:
self._CommandLine = 'geomTransf PDelta %d %d %d %d'%(
self.id, self._VectorX, self._VectorY, self._VectorZ)
else:
self._CommandLine = 'geomTransf PDelta %d %d %d %d -jntOffset %s' % (
self.id, self._VectorX, self._VectorY, self._VectorZ, ''.join([' %f'%x for x in self._jinOffset]))
class Corotational(OpenSees):
"""
For a two-dimensional problem:
geomTransf Corotational $transfTag <-jntOffset $dXi $dYi $dXj $dYj>
For a three-dimensional problem:
geomTransf Corotational $transfTag $vecxzX $vecxzY $vecxzZ
PDelta
$transfTag integer tag identifying transformation
$vecxzX $vecxzY $vecxzZ X, Y, and Z components of vecxz, the vector used to define the local x-z plane of the local-coordinate system. The local y-axis is defined by taking the cross product of the vecxz vector and the x-axis.
These components are specified in the global-coordinate system X,Y,Z and define a vector that is in a plane parallel to the x-z plane of the local-coordinate system.
These items need to be specified for the three-dimensional problem.
$dXi $dYi joint offset values -- absolute offsets specified with respect to the global coordinate system for element-end node i (optional)
$dXj $dYj joint offset values -- absolute offsets specified with respect to the global coordinate system for element-end node j (optional)
The element coordinate system is specified as follows:
The x-axis is the axis connecting the two element nodes; the y- and z-axes are then defined using a vector that lies on a plane parallel to the local x-z plane -- vecxz. The local y-axis is defined by taking the cross product of the vecxz vector and the x-axis. The z-axis by taking cross product of x and new y. The section is attached to the element such that the y-z coordinate system used to specify the section corresponds to the y-z axes of the element.
"""
def __init__(self, id, VectorX=None, VectorY=None, VectorZ=None, **kwargs):
self._id = id
self._VectorX = VectorX
self._VectorY = VectorY
self._VectorZ = VectorZ
self.__dict__.update(kwargs)
if self._VectorX == None:
self._CommandLine = 'geomTransf Corotational %d'%(self.id)
else:
self._CommandLine = 'geomTransf Corotational %d %d %d %d'%(self.id, self._VectorX, self._VectorY, self._VectorZ) | 69.926606 | 584 | 0.722645 |
6364ef749dd9d8398d497a38ec6dc5537127d2b5 | 8,427 | py | Python | python/tvm/contrib/hexagon.py | mozga-intel/tvm | 544724439efb9a795c92bd7ec9f7929e41c843c6 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 3 | 2021-05-08T17:04:39.000Z | 2021-07-11T17:40:54.000Z | python/tvm/contrib/hexagon.py | mozga-intel/tvm | 544724439efb9a795c92bd7ec9f7929e41c843c6 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | python/tvm/contrib/hexagon.py | mozga-intel/tvm | 544724439efb9a795c92bd7ec9f7929e41c843c6 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | 2 | 2021-09-30T21:06:03.000Z | 2022-02-25T00:52:12.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Utility for Hexagon backend"""
import functools as ft
import os
import tvm
import tvm.ir
import tvm.contrib.cc as cc
from .._ffi.registry import register_func
# Linking Hexagon shared libraries.
#
# link_shared(name-of-shared-library, list-of-objects, kw-args)
#
# To use a custom linker, define a function that returns the path to the
# linker, and pass it to 'register_linker':
#
# def custom_linker_path():
# return '/path/to/hexagon/linker'
#
# register_linker(custom_linker_path)
#
# Subsequent calls to 'link_shared' will use the newly registered linker.
hexagon_toolchain_root = os.environ.get("HEXAGON_TOOLCHAIN") or "" # pylint: disable=invalid-name
hexagon_link_main = os.path.join( # pylint: disable=invalid-name
hexagon_toolchain_root, "bin", "hexagon-link"
)
def register_linker(f):
"""Register a function that will return the path to the Hexagon linker."""
return register_func("tvm.contrib.hexagon.hexagon_link", f, True)
@register_func("tvm.contrib.hexagon.hexagon_link")
def hexagon_link():
"""Return path to the Hexagon linker."""
return hexagon_link_main
@register_func("tvm.contrib.hexagon.link_shared")
def link_shared(so_name, objs, **kwargs):
"""Link shared library on Hexagon using the registered Hexagon linker.
Parameters
----------
so_name : str
Name of the shared library file.
objs : list[str,StringImm]
kwargs : additional arguments:
'verbose' - print additional information
Returns
-------
ret_val : int
This function returns 0 at the moment.
"""
# The list of object files can be passed as built-in Python strings,
# or as tvm.tir.StringImm's.
def to_str(s):
if isinstance(s, tvm.tir.StringImm):
return s.value
assert isinstance(s, str), 'argument "' + str(s) + '" should be a string or StrImm'
return s
objs = [to_str(s) for s in objs]
linker = tvm.get_global_func("tvm.contrib.hexagon.hexagon_link")()
if kwargs.get("verbose"):
print("tvm.contrib.hexagon.link_shared:")
print(" Using linker:", linker)
print(" Library name:", so_name)
print(" Object files:", objs)
if not os.access(linker, os.X_OK):
message = 'The linker "' + linker + '" does not exist or is not executable.'
if not os.environ.get("HEXAGON_TOOLCHAIN"):
message += (
" The environment variable HEXAGON_TOOLCHAIN is unset. Please export "
+ "HEXAGON_TOOLCHAIN in your environment, so that ${HEXAGON_TOOLCHAIN}/bin/"
+ "hexagon-link exists."
)
else:
message += (
" Please verify the value of the HEXAGON_LINKER environment variable "
+ '(currently set to "'
+ hexagon_toolchain_root
+ '").'
)
raise Exception(message)
libpath = os.path.join(hexagon_toolchain_root, "target", "hexagon", "lib", "v66", "G0")
cc.create_shared(
so_name,
objs,
# pylint: disable=bad-whitespace
options=[
"-Bdynamic",
"-shared",
"-export-dynamic",
os.path.join(libpath, "pic", "libgcc.so"),
],
cc=linker,
)
return 0
### VTCM
vtcm_size = 4 * 1024 * 1024 # pylint: disable=invalid-name
@register_func("tvm.info.mem.local.vtcm")
def mem_info_vtcm():
# pylint: disable=bad-whitespace
return tvm.ir.make_node(
"MemoryInfo",
unit_bits=8,
max_num_bits=vtcm_size * 8,
max_simd_bits=128 * 8,
head_address=tvm.runtime.const(100, "uint32"),
)
def lower_vtcm_(get_alloc, get_free, def_align, func, mod, ctx): # pylint: disable=unused-argument
"""Generic VTCM allocation
Parameters
----------
get_alloc : function: tir.Allocate, int -> tir.expr (dtype='handle')
The VTCM allocation function. It takes an Allocate statement, and the required
alignment, and returns a pointer to the allocated VTCM buffer.
get_free : function: tir.expr (dtype='handle') -> None
The VTCM deallocation function. It takes the address of the allocated buffer
and frees it. It returns no value.
def_align : int
The default alignment that will be passed to the allocation function, if the
program does not specify the alignment via a 'storage_alignment' attribute.
func : tir.PrimFunc
mod : tvm.IRModule
ctx : transform.PassContext
Returns
-------
stmt : tvm.stmt
Transformed function body.
"""
vtcm_buffers = []
alignments = {}
def buf_align(var):
"""Determine the alignment of the buffer with variable 'var'."""
if var in alignments and alignments[var]:
return alignments[var][-1]
return def_align
def visit(stmt):
"""Collect information about VTCM buffers and their alignments."""
if isinstance(stmt, tvm.tir.AttrStmt):
if stmt.attr_key == "storage_alignment":
if not stmt.node in alignments:
alignments[stmt.node] = []
alignments[stmt.node].append(stmt.value)
elif isinstance(stmt, tvm.tir.Allocate):
scope = stmt.buffer_var.type_annotation.storage_scope
if scope == "local.vtcm":
vtcm_buffers.append(stmt.buffer_var)
def mutate(stmt):
"""Insert calls to VTCM allocation and deallocation routines."""
if isinstance(stmt, tvm.tir.AttrStmt):
if stmt.attr_key == "storage_alignment":
alignments[stmt.node].pop()
return stmt
if isinstance(stmt, tvm.tir.Allocate):
var = stmt.buffer_var
scope = var.type_annotation.storage_scope
if scope == "local.vtcm":
vtcm_buffers.pop()
if var in vtcm_buffers:
is_null = tvm.tir.call_intrin("bool", tvm.ir.Op.get("tir.isnullptr"), var)
throw_error = tvm.tir.call_intrin(
"int32", tvm.ir.Op.get("tir.tvm_throw_last_error")
)
body_w_free = tvm.tir.SeqStmt([stmt.body, tvm.tir.Evaluate(get_free(var))])
body_w_check = tvm.tir.IfThenElse(
is_null, tvm.tir.Evaluate(throw_error), body_w_free
)
return tvm.tir.LetStmt(
stmt.buffer_var, get_alloc(stmt, buf_align(var)), body_w_check
)
return stmt
raise ValueError("Wrong argument type (" + type(stmt) + ") to 'mutate'")
f = func.with_body(
tvm.tir.stmt_functor.ir_transform(
func.body, visit, mutate, ["tir.Allocate", "tir.AttrStmt"]
)
)
return f
def ir_lower_vtcm():
"""Create a VTCM lowering pass.
VTCM memory has to be allocated using special functions.
"""
def get_alloc(stmt, align):
assert isinstance(stmt, tvm.tir.Allocate)
return tvm.tir.call_extern(
"handle",
"HexagonBackendAllocateVTCM",
ft.reduce(lambda x, y: x * y, stmt.extents, 1),
align,
)
def get_free(var):
return tvm.tir.call_extern("handle", "HexagonBackendFreeVTCM", var)
# pylint: disable=bad-whitespace
@tvm.tir.transform.prim_func_pass(opt_level=0, name="Lower VTCM pass")
def transform(func, mod, ctx):
return lower_vtcm_(get_alloc, get_free, 2048, func, mod, ctx)
return transform
def ir_lower_vtcm_pass():
return [(3, ir_lower_vtcm())]
| 33.708 | 99 | 0.630711 |
0b9a2ae0eb1c9482c63c8d182cb9124e6614941a | 40,802 | py | Python | lcp-gen2/util.py | glevand/txt-trusted-boot | 9ebaff953944375cbe735dc27b10b646e17a3a76 | [
"BSD-3-Clause"
] | null | null | null | lcp-gen2/util.py | glevand/txt-trusted-boot | 9ebaff953944375cbe735dc27b10b646e17a3a76 | [
"BSD-3-Clause"
] | null | null | null | lcp-gen2/util.py | glevand/txt-trusted-boot | 9ebaff953944375cbe735dc27b10b646e17a3a76 | [
"BSD-3-Clause"
] | 1 | 2021-11-17T17:36:15.000Z | 2021-11-17T17:36:15.000Z | #!/usr/bin/python
# Copyright (c) 2013, Intel Corporation. All rights reserved.
# using print() built infunction, disable print statement
from __future__ import print_function
try:
import os, sys
except ImportError:
raise ImportError, "import OS failed"
from defines import DEFINES
from struct import pack, unpack
import array
import M2Crypto
from asn1spec import *
# utility routines
class UTILS( object ):
def checkFileTag(self, file, expectedTag):
"""checkFileTag - Return True if the specified file starts with the specified tag, else False"""
# expectedTag either:
# PCRD+TAG = "PCRD" for .pcr files
# HASH_TAG = "HASH" for .hash files
# read the 1st 4 bytes from the file (i.e. the actualTag) and compare that to the expectedTag
#
tagLength = 4
myFile = open(file, "rb")
actualTag = myFile.read(tagLength)
myFile.close()
#print("UTILS::checkFileTag: file=%s expectedTag=%s actualTag=%s" % (file, expectedTag, actualTag)) # DBGDBG
if(actualTag == expectedTag):
return True
else:
return False
def invalidHashFileMsg(self, file, actHashAlg, expHashAlg, hashFileLength):
"""invalidHashFileMsg - display status bar msg if hash file is invalid"""
print ("Invalid hash format for this element's HashAlg. file %s, Exp Alg=%d, Act Alg=%d, Length=%d" %
(file, expHashAlg, actHashAlg, hashFileLength))
# Some functions, such as Wx's ListBox.ChangeValue() takes a string
# But the data is stored as a list of strins
# so form a single string containing everything in list
def formStringFromListOfStrings(self, list):
"""formStringFromListOfStrings - return a single string formed from a list of strings"""
string = ''
index = 0
for eachString in list:
if(index != 0): # if not the 1st entry, need a LF before the new entry
string += "\n"
string += eachString
index += 1
#print("thisString=%s, string=%s" % (eachString, string))
return string
def formFileName(self, base, ext):
"""formFileName - return name formed from base.ext"""
seperator = '.'
filenameList = [base, ext]
return(seperator.join(filenameList))
# return True if file verified and key was extracted, else False
def verifyKeyFile(self, file, type, currentList):
"""verifyKeyFile - verify the key file and extract the key"""
# Need currentList.KeySize and currentList.PubKeyQx
key = self.getKeyFromFile(file, type)
expectedKeySize = int(currentList.KeySize) / 8
if type == DEFINES.KEY_FILE_TYPE['PUBLIC_ECDSA']:
# ECC public key is two value of the key size
match = len(key) == expectedKeySize *2
if match:
# key is flipped for little endian so the latter half is the Qx
currentList.PubKeyQx = key[expectedKeySize:expectedKeySize*2]
currentList.PubKeyQy = key[0:expectedKeySize]
elif type == DEFINES.KEY_FILE_TYPE['PUBLIC_RSASSA']:
match = len(key) == expectedKeySize
if match:
currentList.PubKeyData = key
else:
keylength = len(key)
match = len(key) == expectedKeySize
if not match:
print("Key size mismatch actual=%d expected=%d" %(len(key), expectedKeySize))
#if(type == KEY_FILE_TYPE_PRIVATE):
# expectedHeader = "-----BEGIN RSA PRIVATE KEY-----"
# expectedFooter = "-----END RSA PRIVATE KEY-----"
# expectedType = "Private"
#elif(type == KEY_FILE_TYPE_PUBLIC):
# expectedHeader = "-----BEGIN PUBLIC KEY-----"
# expectedFooter = "-----END PUBLIC KEY-----"
# expectedType = "Public"
#else:
# print("invalid key file parameter = %d specified" % (type)) # DBGDBG
# return False
#
#expHdrLen = len(expectedHeader)
#try:
# f = open(file, "rb")
# actualHeader = f.read(expHdrLen)
#except:
# self.StatusBar.SetStatusText("Error opening key file %s" % (file))
# # open failed, can't close
# return False
#
#if(actualHeader == expectedHeader):
# self.StatusBar.SetStatusText("") # clear any previous error msgs
#else:
# self.StatusBar.SetStatusText("File %s is not a %s key file. Expected header %s" % (file, expectedType, expectedHeader))
# f.close()
# return False
#
## key file header is OK
## read the rest of the file, including line endings
## then strip out the line endings and the footer
## Expect Unix style line emdings: 0x0A" per openssl .pem format
## but also handle Windows style line endings: 0x0D, 0x0A
## max file length for keySize=3072 is <3072 since could have different
## number of characters on each line and different len endings (0A or 0D0A)
## just don't want to read in a [potentially very large] user specified file size
#maxLength = 3072
#f.seek (0, os.SEEK_END)
#actualFileSize = f.tell() - expHdrLen - 1 # size of the rest of the file
#if(actualFileSize < maxLength):
# maxLength = actualFileSize
#
## read the base64 encoded data from the key file into a binary string
#f.seek (expHdrLen+1, os.SEEK_SET) # back to just after the header
#try:
# rawData = f.read(maxLength)
#except:
# self.StatusBar.SetStatusText("Error reading key data from file %s" % (file))
# f.close()
# return False
#
##print("verifyKeyFile: read %d bytes:\n%s" % (len(rawData), rawData)) # DBGDBG
##print("verifyKeyFile: read %d" % (len(rawData))) # DBGDBG
## Verify the footer
#if expectedFooter in rawData:
# self.StatusBar.SetStatusText("") # clear any previous error msgs
#else:
# self.StatusBar.SetStatusText("File %s is not a %s key file. Expected footer %s" %
# (file, expectedType, expectedFooter))
# f.close()
# return False
#
## strip off the footer
#footerPos = rawData.find('-') # find 1st '-'
#rawData = rawData[0:footerPos-1]
##print("verifyKeyFile: w/o footer %d bytes:\n%s" % (len(rawData), rawData)) # DBGDBG
##print("verifyKeyFile: w/o footer %d bytes" % (len(rawData))) # DBGDBG
#
## Verify that the file matches the current KeySize
##
## Key File sizes: 1024 2048 3072 for key + footer incl. LR/CR
## public file range: 240-260 420-440 590-610
## private file range: 850-870 1640-1685 2390-2430
##
#keySize = self.keySizeEdit.GetValue()
##print("verifyKeyFile: keySize = %s, type = %s, .pem's length = %d" % (keySize, expectedType, maxLength)) # DBGDBG
#currentList = self.pdef.getCurrentListObject()
#misMatch = False
#if(expectedType == "Public"):
# if(keySize == "1024"):
# if not ((240 <= maxLength) and (maxLength <= 260)):
# print("verifyKeyFile: Public key file, size not 1024!") # DBGDBG
# misMatch = True
# elif(keySize == "2048"):
# if not ((420 <= maxLength) and (maxLength <= 440)):
# print("verifyKeyFile: Public key file, size not 2048!") # DBGDBG
# misMatch = True
# elif(keySize == "3072"):
# if not ((590 <= maxLength) and (maxLength <= 610)):
# print("verifyKeyFile: Public key file, size not 3072!") # DBGDBG
# misMatch = True
# if(misMatch == True):
# self.StatusBar.SetStatusText("Key size mismatch. File %s is not %s." % (file, keySize))
# self.pubKeyFileEdit.SetValue("")
# f.close()
# return False
# else:
# keyData = self.decodePublicKeyModulus(file, keySize, expectedHeader, expectedFooter)
# currentList.PubKeyData = keyData # save the public key modulus
# #print("verifyKeyFile: Public Key. Length: %d. Data:\n%s" % (len(keyData), currentList.PubKeyData)) # DBGDBG
#
#elif(expectedType == "Private"):
# if(keySize == "1024"):
# if not ((850 <= maxLength) and (maxLength <= 870)):
# print("verifyKeyFile: Private key file, size not 1024!") # DBGDBG
# misMatch = True
# elif(keySize == "2048"):
# if not ((1640 <= maxLength) and (maxLength <= 1685)):
# print("verifyKeyFile: Private key file, size not 20481") # DBGDBG
# misMatch = True
# elif(keySize == "3072"):
# if not ((2390 <= maxLength) and (maxLength <= 2430)):
# print("verifyKeyFile: Private key file, size not 3072!") # DBGDBG
# misMatch = True
# if(misMatch == True):
# self.StatusBar.SetStatusText("Key size mismatch. File %s is not %s." % (file, keySize))
# self.pvtKeyFileEdit.SetValue("")
# f.close()
# return False
# # Note: Don't need to save the private key data, pycrypto reads it from the file directly
#
#f.close()
return match
# Verify that the file is a valid HASH_FILE with a HASH_TAG, hashAlg and hashSize
# [with 1 or more SHA1 hashes] - legacy support from the TPM1.2 LCP1 Spec
# or a raw data file [i.e. no header] with ONLY 1 20 bit SHA1 or 32 bit SHA256 hash
#
# if OK, return a list containing: [True, HashFileModeXXX] defined in defines.py
# else return a list containing: [False, HashFileModeNull]
# if file format or length is invalid
# or if the length indicates the hash does not correspond to the elements.HashAlg
#
#TODO: Bill: verifyHashFile- LCP2 spec deletes the HASH_FILE struct def for files with multiple hashes??
#TODO: Bill: verifyHashFile- still supporting files with multiple SHA1 hashes [with the hdr] or raw SHA1 or 256 files
#
def verifyHashFile(self, file, hashAlg):
"""verifyHashFile - return a list indicating if the file is valid and its type"""
# HASH_FILE's are structured as:
# - SHA1 or SHA256 hash data only, for raw SHA1 or SHA256 files with 1 hash
# - SHA1 hash files with a header [defined below] containing 1 or more SHA1 hashes
#
# Where the header is:
# HASH_TAG = 0x48534148 = "HASH"
# typedef struct {
# UINT32 tag HASH_TAG; # 4 bytes "HASH"
# UINT8 hashAlg ; # 1 byte SHA1_ALG = 4
# UINT8 hashSize ; # 1 byte SHA1 = 20
# UINT8 reserve[10] ; # 12 bytes
# SHA1 - SHA256 hash ; # 20 bytes
# } HASH_FILE; # ________
# File Size # 36 bytes
# read the 1st 4 bytes from the file (i.e. the actualTag) and compare that to the expectedTag
#
function = "verifyHashFile"
mode = DEFINES.HashFileMode['RawSHA256'] # default
hashFileLengthHdrSha1 = 36 # header + SHA1 hash data
hashFileLengthRawSha1 = DEFINES.DIGEST_SIZE['SHA1'] # raw hash files have only the hash data, no header
hashFileLengthRawSha256 = DEFINES.DIGEST_SIZE['SHA256']
#hashFileLengthRawSha384 = 48
#hashFileLengthRawSha512 = 64
try:
f = open(file, "rb")
except:
print("Error opening hash file %s" % (file))
return [False, DEFINES.HashFileMode['HdrNull']]
try:
hashAlgStr = (key for key,val in DEFINES.TPM_ALG_HASH.items() if hashAlg == val).next()
if hashAlgStr == 'SHA1_LEGACY':
hashAlgStr = 'SHA1'
except StopIteration:
print("Unsupported hash algorithm (%d)" %(hashAlg))
return [False, DEFINES.HashFileMode['HdrNull']]
#
# check the file size to determine the type of file being read
#
f.seek (0, os.SEEK_END)
actualSize = f.tell()
if(actualSize == hashFileLengthHdrSha1):
mode = DEFINES.HashFileMode['HdrSHA1']
hashFileLength = hashFileLengthHdrSha1
# read the data
#
f.seek (0, os.SEEK_SET) # back to the begininng
data = array.array ("B") # Load file into data array
try:
data.fromfile (f, hashFileLength)
except:
print ("Error reading hash from file %s" % (file))
f.close()
return [False, DEFINES.HashFileMode['HdrNull']]
if(hashAlg != DEFINES.TPM_ALG_HASH['SHA1'] and hashAlg != DEFINES.TPM_ALG_HASH['SHA1_LEGACY']):
self.invalidHashFileMsg(file, hashAlg, DEFINES.TPM_ALG_HASH['SHA1'], hashFileLength)
return [False, DEFINES.HashFileMode['HdrNull']]
expectedHashTag = "HASH" # 0x48415348
expectedSha1HashAlg = 4 # '\x04'
expectedSha1HashSize = 20 # '\x14'
actualHashTag = data[0:4].tostring() # 0:3 inclusive = 0:4 exclusive of 4
actualHashAlg = data[4]
actualHashSize = data[5]
if(actualHashTag != expectedHashTag):
# check if a raw file matching the element's HashAlg length
print ("File: %s invalid tag = %s, expected %s" % (file, actualHashTag, expectedHashTag))
return [False, DEFINES.HashFileMode['HdrNull']]
if(actualHashAlg != expectedSha1HashAlg):
print ("File: %s invalid hashAlg = 0x%x, expected 0x%x" %
(file, actualHashAlg, expectedSha1HashAlg))
return [False, DEFINES.HashFileMode['HdrNull']]
if(actualHashSize != expectedSha1HashSize):
print ("File: %s invalid hashSize = 0x%x, expected 0x%x" %
(file, actualHashSize, expectedSha1HashSize))
return [False, DEFINES.HashFileMode['HdrNull']]
elif actualSize == DEFINES.DIGEST_SIZE[hashAlgStr]:
modeStr = 'Raw' + hashAlgStr
mode = DEFINES.HashFileMode[modeStr]
hashFileLength = DEFINES.DIGEST_SIZE[hashAlgStr]
else:
self.invalidHashFileMsg(file, hashAlg, DEFINES.TPM_ALG_HASH[hashAlgStr], actualSize)
print ("File: %s invalid size = %d, expected %d" % (file, actualSize, DEFINES.DIGEST_SIZE[hashAlgStr]))
return [False, DEFINES.HashFileMode['HdrNull']]
print("verifyHashFile - HashAlg=%d, Mode=%d, Len=%d" % (hashAlg, mode, hashFileLength)) # DBGDBG
f.close()
# handle SHA1 files with headers
#if(mode == DEFINES.HashFileMode['HdrSHA1']):
#
#else:
# print("verifyHashFile - Error: invalid mode = %d, aborting." % (mode))
# return [False, DEFINES.HashFileMode['HdrNull']]
return [True, mode]
# Determine the type of and validate the pcrFile, except for the numHashes field
# This includes checking that the file's type and hashAlg matches the element's expected HashAlg
# Return [True, PcrFileModePcrXShaYYY if ok, see defines.py
# [False, PcrFileModeNull] othewise
#
def verifyPcrFile(self, file, elementExpAlg):
"""verifyPcrFile - Validate the pcrFile"""
# 2 types of PCR dump files are supported: PCRD and PCR2
# PCR Dump File format
# typedef struct {
# UINT32 tag PCRD_TAG; # 4 bytes "PCRD" = 0x44524350
# UINT8 hashAlg ; # 1 byte SHA1_ALG = 4 SHA256_ALG = 0x0b
# UINT8 hashSize ; # 1 byte SHA1 = 20 SHA256 = 32
# UINT8 numHashes ; # 1 byte number of hashes in the file
# UINT8 reserve[9] ; # 9 bytes
# SHA1 pcrs[24] ; # 20 bytes * numHashes
# } HASH_FILE; # ________
# File Size # 16 + (NumHashes * HashSize) bytes
# Typically all 24 PCRs included so size for SHA1 = 16 + 24*20 = 496 = 0x1f0
# LCP tool only requires the 1st 8 PCRs, if they are selected via pcr0to7SelectionBitMask
# I.e. if the bit mask 0-7 = 1 then that PCR is required
#
# - PCR2 PCR dump File format - from App. C.1.2
# typedef struct {
# UINT32 tag PCR2_TAG; # 4 bytes "PCR2" = 0x32524350
# UINT16 hashAlg ; # 2 bytes TPM_ALG_SHAXXXXX
# UINT8 count ; # 1 byte number of valid digests
# UINT8 reserve ; # 1 byte
# UINT16 size ; # 2 bytes size of hash
# union {
# HASH20 pcrs[24] ; # 20 bytes * 24 of which count are used per the pcrSelection mash
# HASH32 pcrs[24] ; # 32 bytes * 24 of which count are used per the pcrSelection mash
# } HASH_FILE; # ________
# File Size # 8 + (count * HashSize) bytes, assuming no holes in the mask
function = "verifyPcrFile"
expectedPcrdTag = "PCRD" # 0x44524350
expectedPcr2Tag = "PCR2" # 0x32524350
try:
f = open(file, "rb")
except:
print ("Error opening PCR data file %s" % (file))
return [False, DEFINES.PcrFileMode['Null']]
# Determine if this is a PCRD or PCR2 file
# Based on the type, ensure the file's HaskAlg matches the element's ExpAlg
f.seek (0, os.SEEK_SET) # back to the begininng
data = array.array ("B") # Load file into data array
try:
data.fromfile (f, DEFINES.PCRFileMinHdrSize)
except:
print ("Error reading PCR data from file %s" % (file))
f.close()
return [False, DEFINES.PcrFileMode['Null']]
try:
elementExpAlgStr = (key for key,val in DEFINES.TPM_ALG_HASH.items() if val == elementExpAlg).next()
except StopIteration:
print ("Unsupported hash algorithm %d" %(elementExpAlg))
fileHashTag = data[0:4].tostring() # 0:3 inclusive = 0:4 exclusive of 4
numHashes = data[6] # PCRD 'numHashes' same as PCR2 'count'
fileType = DEFINES.PcrFileMode['Null']
if(fileHashTag == expectedPcrdTag): # PCRD file
fileType = DEFINES.PcrFileMode['PcrdSHA1']
fileActualHashAlg = data[4] # HashAlg is a UINT8
if(elementExpAlg != DEFINES.TPM_ALG_HASH['SHA1']):
print ("%s file %s SHA1 hash algorithm does not match the PCONF element's expected algorithm: 0x%x" %
(expectedPcrdTag, file, elementExpAlg))
return [False, DEFINES.PcrFileMode['Pcrd']]
else:
actAlg = DEFINES.TPM_ALG_HASH['SHA1'] # LCP2 PCRD format only supports SHA1 per C.1.1
minFileLength = DEFINES.PCRDFileHdrSize + (8*DEFINES.DIGEST_SIZE['SHA1']) # min PCRD SHA1 file has 8 hash's
elif(fileHashTag == expectedPcr2Tag): # PCR2 file
# HashAlg in [4:5] is little endian so for current algs high byte=data[5]=0 and low byte=hashAlg
fileActualHashAlg = unpack('<H', bytearray(data[4:6]))[0]
#print("verifyPcrFile - elementExpAlg=%d, fileActualHashAlg=%x%x" % (elementExpAlg, fileActualHashAlgHi,fileActualHashAlgLow)) # DBGDBG
if(elementExpAlg == fileActualHashAlg):
actAlg = fileActualHashAlg
fileType = DEFINES.PcrFileMode['Pcr2']
minFileLength = DEFINES.PCR2FileHdrSize + (8*DEFINES.DIGEST_SIZE[elementExpAlgStr]) # min PCR2 SHA1 file has 8 hash's
else:
print("%s file: %s hashAlg: 0x%x%x does not match the PCONF element's expected algorithm: 0x%02x" %
(expectedPcr2Tag, file, data[4], data[5], elementExpAlg))
return [False, DEFINES.PcrFileMode['Null']]
else:
print ("File: %s invalid tag = %s, expected %s or %s" % (file, fileHashTag, expectedPcrdTag, expectedPcr2Tag))
return [False, DEFINES.PcrFileMode['Null']]
print("verifyPcrFile: %s, tag=%s alg=%i, numHashes=%i" % (file, fileHashTag, actAlg, numHashes)) # DBGDBG
# check the min file size
f.seek (0, os.SEEK_END)
actualFileSize = f.tell ()
if(actualFileSize < minFileLength):
print ("File: %s invalid size = %d, expected >= %d" % (file, actualFileSize, minFileLength))
f.close()
return [False, DEFINES.PcrFileMode['Null']]
# file looks ok, except for the numHashes field which is checked by build.hashPcrInfoFromFile() when it reads the data
# can't check that yet since don't know which PCRs will be selected
f.close()
return [True, fileType]
# Verify that the pcr file contains the expected number of pcr hashes
# Return True if ok or if no PCR's are selected, else return False
#
# file - is the specified pcr file
# fileType - indicates the type of PCR data file per the PcrFileModeXXXXX defines
# pcr0to7SelectionBitMask - is a mask indicating the selected PCRs
#
def verifyPcrInfoNumHashes(self, file, fileType, hashAlg, pcr0to7SelectionBitMask):
"""verifyPcrInfoNumHashes - verify the pcr file contains enough hashes"""
function = "verifyPcrInfoNumHashes"
# check for case when no PCR's were selected
# nothing to hash in that case
if(pcr0to7SelectionBitMask == 0):
print("Warning: No PCR was selected. Nothing to hash: pcr0to7SelectionBitMask=0")
print ("Note: No PCR was selected. If desired, select PCR's and click 'Apply PCR Selection'")
return True
try:
hashAlgStr = (key for key,val in DEFINES.TPM_ALG_HASH.items() if val == hashAlg).next()
except StopIteration:
print("verifyPcrInfoNumHashes - invalid fileType=%x hash alg!!!" % (file, fileType, hashAlg)) # Should NEVER get here
if(fileType == DEFINES.PcrFileMode['Pcrd']):
minFileLength = DEFINES.PCRDFileHdrSize + (8*DEFINES.DIGEST_SIZE['SHA1']) # min PCRD SHA1 file has 8 hash's
elif(fileType == DEFINES.PcrFileMode['Pcr2']):
minFileLength = DEFINES.PCR2FileHdrSize + (8*DEFINES.DIGEST_SIZE[hashAlgStr]) # min PCR2 SHA1 file has 8 hash's
else:
print("verifyPcrInfoNumHashes - invalid fileType=%x!!!" % (file, fileType)) # Should NEVER get here
print("verifyPcrInfoNumHashes %s, type %d, pcr0to7SelectionBitMask=0x%x" % (file, fileType, pcr0to7SelectionBitMask)) # DBGDBG
try:
f = open(file, "rb")
except:
print ("Error opening PCR data file %s" % (file))
return False
f.seek (0, os.SEEK_END)
actualFileSize = f.tell ()
f.seek (0, os.SEEK_SET) # back to the begininng
data = array.array ("B") # Load file into data array
try:
data.fromfile (f, minFileLength)
except:
print ("Error reading PCR data from file %s" % (file))
f.close()
return False
f.close()
numHashes = data[6] # same for both PCRD and PCR2 files
# now that we have NumHashes, check the actual file size
#print("verifyPcrInfoNumHashes - numHashes=%x" % (numHashes)) # DBGDBG
if(numHashes == 0):
print ("File: %s invalid numHashes = 0" % (file))
return False
if(fileType == DEFINES.PcrFileMode['Pcrd']):
expectedFileSize = DEFINES.PCRDFileHdrSize + (numHashes * DEFINES.DIGEST_SIZE['SHA1'])
elif(fileType == DEFINES.PcrFileMode['Pcr2']):
expectedFileSize = DEFINES.PCR2FileHdrSize + (numHashes * DEFINES.DIGEST_SIZE[hashAlgStr])
else:
print("verifyPcrInfoNumHashes - invalid fileType=%d" % (fileType)) # DBGDBG Should NEVER get here
return False
if(actualFileSize < expectedFileSize):
print ("File: %s invalid File size = 0x%x=%i, expected 0x%x" %
(file, actualFileSize, actualFileSize, expectedFileSize))
return False
# verify that numHashes >= the largest selected hash
# must have at least that many hashes in the PCR dump file
#print("verifyPcrInfoNumHashes - verify numHashes are in the file") # DBGDBG
mask = 0x80
bit = 8
while(bit > 0):
if(mask & pcr0to7SelectionBitMask):
break # found the largest selected PCR
else:
bit -= 1
mask >>= 1
if(bit > numHashes): # not enough hashes in the PCR dump file, abort
print ("Too few hashes in PCR dump file: %s, numHashes = 0x%x, but max selected hash = 0x%x" %
(file, numHashes, bit))
return False
return True
# Get hash data from the specified file and return it using global _GlobalHashData
# Note: no file format checking is done here as files were validated when added to the list
# Return False if the hash couldn't be extracted, else return True
#
def getHashFromFile(self, file, hashAlg):
"""getHashFromFile - validate the hash file and extract the hash data"""
global _GlobalHashData
print("getHashFromFile - file %s, HashAlg=%d" % (file, hashAlg)) # DBGDBG
result = self.verifyHashFile(file, hashAlg)
# verifyHashFile() returns a list with [status, fileType]
fileType = result[1]
if(result[0] == False):
return [] # should have detected this when file was selected
fileType = result[1]
data = array.array ("B") # Load file into data array
try:
hashAlgStr = (key for key,val in DEFINES.TPM_ALG_HASH.items() if hashAlg == val).next()
except StopIteration:
print("Unsupported hash algorithm (%d)" %(hashAlg))
if(fileType == DEFINES.HashFileMode['HdrNull']):
print("getHashFromFile - Error: invalid mode = %d, aborting." % (result[1]))
return []
elif(fileType == DEFINES.HashFileMode['HdrSHA1']):
hashFileLength = 36
elif(fileType in DEFINES.HashFileMode.values()): # All other raw modes
hashFileLength = DEFINES.DIGEST_SIZE[hashAlgStr]
else:
print("getHashFromFile - Error: invalid mode = %d, aborting." % (result[1]))
return []
try:
f = open(file, "rb")
f.seek (0, os.SEEK_SET) # beginning of file
data.fromfile (f, hashFileLength)
except:
print("getHashFromFile - Error reading hash from file %s" % (file))
f.close()
return []
f.close()
#print("getHashFromFile - data = %s, len=%d" % (data, len(data))) # DBGDBG
# handle all the flavors of hash files
if(fileType == DEFINES.HashFileMode['HdrSHA1']):
_GlobalHashData = data[16:36].tolist() # 20 bytes 16:36 exclusive
print("getHashFromFile: %s, Hdr tag=%s alg=%i, size=%i, hash=%s, len=%d" %
(file, data[0:4].tostring(), data[4], data[5], _GlobalHashData, len(_GlobalHashData))) # DBGDBG
else:
_GlobalHashData = data.tolist()
assert len(data) == DEFINES.DIGEST_SIZE[hashAlgStr], "Error: File size (%d bytes) mismatch with %s size (%d bytes)" %(len(data), hashAlgStr, DEFINES.DIGEST_SIZE[hashAlgStr])
print("getHashFromFile: %s, raw %s hash=%s, len=%d" % (file, hashAlgStr, _GlobalHashData, len(_GlobalHashData))) # DBGDBG
return _GlobalHashData;
# Get the specified PCR data (indicated by the thisPcr mask)
# Return False if couldn't read the hashes from the PCR file
# othewise return True
#
# file - is the specified pcr file
# fileType - indicates the type of PCR data file per the PcrFileModeXXXXX defines
# thisPcr - the specified PCR data to find [0 < thisPcr < 8]
# hashSize - is the size of the pcr data to extract
#
def getPcrInfoFromFile(self, file, fileType, thisPcr, hashSize, statusBar):
"""getPcrInfoFromFile - get the pcr data from the specified file"""
global _GlobalPcrHash
if(fileType == DEFINES.PcrFileMode['Pcrd']):
hdrSize = DEFINES.PCRDFileHdrSize
elif(fileType == DEFINES.PcrFileMode['Pcr2']):
hdrSize = DEFINES.PCR2FileHdrSize
else:
print("verifyPcrInfoNumHashes - invalid fileType=%d" % (fileType)) # DBGDBG Should NEVER get here
return False
#print("getPcrInfoFromFile %s type %d, pcr %d, hashSize=%d" % (file, fileType, thisPcr, hashSize)) # DBGDBG
pcrData = array.array ("B")
try:
f = open(file, "rb")
except:
print("getPcrInfoFromFile: open of file %s failed" % (file)) # should never get here, file's been opened for verif. earlier
return False
# read TPM1 or TPM2 PCR tag to determine the file format to use?
tag = f.read(4)
# read the selected pcr's hash data
if tag == 'PCR2':
# PCR2 file format
pos = hdrSize + (thisPcr * (hashSize + 2)) # There's a 2 byte size field precedes each PCR value.
f.seek (pos, os.SEEK_SET)
# Read the size of the PCR value and compare to the expected hash size
size = f.read(2)
pcrSize = unpack("<H", size)[0]
if pcrSize == hashSize:
pos += 2
else:
print ("Error reading hash from file %s at position %d with size %d" % (file, pos, hashSize))
f.close()
return False
elif tag == 'PCRD':
# PCR1 file format
pos = hdrSize + (thisPcr * hashSize)
else:
print ("Error invalid PCR tag format for file %s" % (file))
print("getPcrInfoFromFile - Read hash %d @pos%d, size %d" % (thisPcr, pos, hashSize)) # DBGDBG
try:
# calculate the start of the selected pcr's data
f.seek (pos, os.SEEK_SET) # back to the next hash where pos depends on if PCRD or PCR2 file
#print("getPcrInfoFromFile - Seek done") # DBGDBG
pcrData.fromfile(f, hashSize)
except:
print ("Error reading hash from file %s at position %d with size %d" % (file, pos, hashSize))
f.close()
return False
f.close()
_GlobalPcrHash = pcrData[0:hashSize].tolist() # extract the data
#print("getPcrInfoFromFile: %s, _GlobalPcrHash=%s" % (file, _GlobalPcrHash)) # DBGDBG
return _GlobalPcrHash
# Open pcrFile
# Copy selected PCR measurements (pcrSelect) to temp buffer
# calculate composite hash of temp buffer
# return the composite hash in _GlobalPcrHash
# return False if couldn't read the hashes from the PCR file
# othewise return True [including if pcr0to7SelectionBitMask = 0 so nothing to hash]
#
def hashPcrInfoFromFile(self, file, pcr0to7SelectionBitMask, numSelectedPcrs):
"""hashPcrInfoFromFile - hash the pcr data from the specified file and return the hash"""
# TODO: This function is use by PCONF legacy only?
# PCR Dump File format
# typedef struct {
# UINT32 tag PCRD_TAG; # 4 bytes "PCRD" = 0x44524340
# UINT8 hashAlg ; # 1 byte SHA1_ALG = 4 SHA256_ALG = 0x0b
# UINT8 hashSize ; # 1 byte SHA1 = 20 SHA256 = 32
# UINT8 numHashes ; # 1 byte number of hashes in the file
# UINT8 reserve[9] ; # 9 bytes
# SHA1 pcrs[24] ; # 20 bytes * numHashes
# } HASH_FILE; # ________
# File Size # 16 + (NumHashes * HashSize) bytes
# Typically all 24 PCRs included so size for SHA1 = 16 + 24*20 = 496 = 0x1f0
# LCP tool only requires the 1st 8 PCRs, if they are selected via pcr0to7SelectionBitMask
# I.e. if the bit mask 0-7 = 1 then that PCR is required
#
global _GlobalPcrHash
#_GlobalPcrHash = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] # DBGDBG
_GlobalPcrHash = b'0000000000000000000000000000000000000000'
# check for case when no PCR's were selected
# nothing to hash in that case
if(pcr0to7SelectionBitMask == 0):
print("Warning: No PCR was selected. Nothing to hash: pcr0to7SelectionBitMask=0")
print ("Note: No PCR was selected. If desired, select PCR's and click Update")
return True
try:
f = open(file, "rb")
except:
print ("Error opening PCR data file %s" % (file))
return False
# Get the file size of PCR dump
f.seek (0, os.SEEK_END)
actualFileSize = f.tell ()
f.seek (0, os.SEEK_SET) # back to the begininng
header = array.array("B") # Read PCR header to determine type
data = array.array ("B") # Load file into data array
# Read the header portion. 16-bytes should cover current PCRD and PCR2 file format.
try:
header.fromfile (f, 16) # PCRD header size is 16 bytes. PCR2 header size is 8 bytes.
except:
print ("Error reading PCR data from file %s" % (file))
f.close()
return False
# Read the number of valid hashes
numHashes = header[6]
# If there are no hash in the dump file, the PCR dump is not valid
if(numHashes == 0):
print ("File: %s invalid numHashes = 0" % (file))
return False
# now that we have NumHashes, check the actual file size
# Check the tag in the PCR file
tag = header[0:4].tostring()
if tag == 'PCR2':
expectedPcrHashSize = unpack("<H", header[8:10])[0] # every measurement in the PCR dump file should be the same size
pcrStartPos = 8 # position of the first PCR measurement in the file
minFileLength = pcrStartPos + (8 * (2 + expectedPcrHashSize)) # PCR2 file structure has 8 bytes followed by size and value for each measurement. min file has 8 hash's
expectedFileSize = pcrStartPos + (numHashes * (2 + expectedPcrHashSize)) # include the 2-byte size field for each PCR value
elif tag == 'PCRD':
expectedPcrHashSize = 0x14 # for TPM1 only SHA1 is supported which has length of 20 bytes.
pcrStartPos = 16 # position of the first PCR measurement in the file
minFileLength = pcrStartPos + (8 * expectedPcrHashSize) # PCRD file structure has 16 bytes followed by value of each measurements. min file has 8 hash's
expectedFileSize = pcrStartPos + (numHashes * expectedPcrHashSize)
# Check PCR dump for valid file size
if(actualFileSize < expectedFileSize):
print ("File: %s invalid FileSize = 0x%x=%i, expected 0x%x" %
(file, actualFileSize, actualFileSize, expectedFileSize))
return False
# read min pcr values
try:
f.seek (0, os.SEEK_SET)
data.fromfile (f, minFileLength)
except:
print ("Error reading PCR data from file %s" % (file))
f.close()
return False
f.close()
# verify that numHashes >= the largest selected hash
# must have at least that many hashes in the PCR dump file
mask = 0x80
bit = 8
while(bit > 0):
if(mask & pcr0to7SelectionBitMask):
break # found the largest selected PCR
else:
bit -= 1
mask >>= 1
if(bit > numHashes): # not enough hashes in the PCR dump file, abort
print ("Too few hashes in PCR dump file: %s, numHashes = 0x%x, but max selected hash = 0x%x" %
(file, numHashes, bit))
return False
# file looks ok, get the data
temp = array.array ("B") # temp array
hashLength = expectedPcrHashSize
numSelectedPcrs = 0
mask = 0x01
bit = 0
pos = 0
while(bit < 8):
if(mask & pcr0to7SelectionBitMask):
if tag == 'PCR2':
pos = pcrStartPos + 2 + bit * (2 + hashLength)
elif tag == 'PCRD':
pos = pcrStartPos + bit * hashLength
temp += data[pos:pos+hashLength]
numSelectedPcrs += 1
print("hashPcrInfoFromFile - Read hash %d, mask 0x%x from file %s, select=0x%x, numSelectedPcrs=%d, len(temp)=%d" %
(bit, mask, file, pcr0to7SelectionBitMask, numSelectedPcrs, len(temp))) # DBGDBG
mask <<= 1
bit += 1
pos += hashLength
#print("hashPcrInfoFromFile TypeOf: tempList=%s, tempList[0]=%s, _GlobalPcrHash=%s, _GlobalPcrHash[0]=%s" %
# (type(tempList), type(tempList[0]), type(_GlobalPcrHash), type(_GlobalPcrHash[0]))) # DBGDBG
#pcrHash = hashlib.sha1()
hashAlg = header[4] # TODO: is hashAlg determined by GUI or from PCR file.
hashAlgStr = None
try:
hashAlgStr = (key for key,val in DEFINES.TPM_ALG_HASH.items() if hashAlg == val).next()
except StopIteration:
print ("Error unsupported hash algorithm (%d)" %(hashAlg))
# Set hash algorithm
pcrHash = None
if 'SM3' in hashAlgStr:
pcrHash = sm3()
else:
pcrHash = M2Crypto.EVP.MessageDigest(hashAlgStr.lower())
if tag == 'PCR2':
pcrHash.update(temp)
elif tag == 'PCRD':
#pcrHash = hashlib.sha1()
pcrHash = M2Crypto.EVP.MessageDigest('sha1')
# The PCR composite hash consists of: TPM_PCR_COMPOSITE structure
# UINT16 sizeOfSelect # BigEndian = 00 03
# UINT8 pcr_select[3]
# UINT32 valueSize # BigEndian = 20 * NumberOfSelectedHashes
# UINT8 pcrValue[] # all the selected PCR hashes
data = pack("<BBBBB", 0, 3, pcr0to7SelectionBitMask, 0, 0) # pack sezeOfSelect and pcr_select[3]
pcrHash.update(data)
valueSize = numSelectedPcrs * DEFINES.DIGEST_SIZE['SHA1']
data = pack(">L", valueSize) # Note '>' for BigEndian packing of valueSize
pcrHash.update(data)
# pack pcrValue[]
pcrHash.update(temp)
# hash.digest() Returns the digest of the strings passed to the update() method so far.
# This is a string of digest_size bytes [which may contain non-ASCII characters, including null bytes]
# Note: cannot pass this string thru struct.pack() which takes ints
_GlobalPcrHash = pcrHash.digest()
#print("hashPcrInfoFromFile: %s, Generated hash: Length=%d HexData=%s " %
# (file, hashLength, pcrHash.hexdigest())) # DBGDBG
return _GlobalPcrHash
def getKeyFromFile(self, file, type):
# Read the key file from the
with open(file, 'rb') as kf:
mb = M2Crypto.BIO.MemoryBuffer(kf.read())
kf.close()
pem = mb.getvalue()
pem_lines = pem.split('\n')
key = None
# Find line index of BEGIN and END header/footer
der = ''
foundbegin = False
for line in pem_lines:
if ('END' in line) and ('PUBLIC' in line or 'PRIVATE' in line):
break
if foundbegin:
der += line.strip()
if ('BEGIN' in line) and ('PUBLIC' in line or 'PRIVATE' in line):
foundbegin = True
try: # in case ASN1 can't decode the ASN1 notation..
if type == DEFINES.KEY_FILE_TYPE['PRIVATE_RSASSA']:
asn, substrate = der_decoder.decode(der.decode('base64'), asn1Spec=RSAPrivateKey())
rsapvt = asn.getComponentByName('privateExponent') # returns univ.Integer()
octet = univ.OctetString(hexValue=format(int(rsapvt), '0x')) # convert to Octet
key = octet.asOctets()
elif type == DEFINES.KEY_FILE_TYPE['PUBLIC_RSASSA']:
# This decodes DER encoded ASN1 public key
asn, substrate = der_decoder.decode(der.decode('base64'), asn1Spec=SubjectPublicKeyInfo())
bits = asn.getComponentByName('subjectPublicKey')
# second level decode for RSAPublicKey()
bits_string = ''.join(map(str, bits))
octet = univ.OctetString(binValue=bits_string)
rsaasn, substrate = der_decoder.decode(octet.asOctets(), asn1Spec=RSAPublicKey())
rsapub = rsaasn.getComponentByName('modulus') # returns univ.Integer()
octet = univ.OctetString(hexValue=format(int(rsapub), '0x')) # convert to Octet
key = octet.asOctets()
elif type == DEFINES.KEY_FILE_TYPE['PRIVATE_ECDSA']:
asn, substrate = der_decoder.decode(der.decode('base64'), asn1Spec=ECPrivateKey())
ecpvt = asn.getComponentByName('privateKey') # returns univ.OctetString()
key = ecpvt.asOctets()
elif type == DEFINES.KEY_FILE_TYPE['PUBLIC_ECDSA']:
# This decodes DER encoded ASN1 public key
asn, substrate = der_decoder.decode(der.decode('base64'), asn1Spec=SubjectPublicKeyInfo())
bits = asn.getComponentByName('subjectPublicKey') # returns univ.BitString()
# DSAPublicKey is Integer() so no decoding is needed, but need to remove the prefix 0x04.
bits_string = ''.join(map(str, bits[8:])) # the first byte specifies the compress alg?
octet = univ.OctetString(binValue=bits_string)
key = octet.asOctets()
except Exception as ex:
print ("Exception: unable to decode pem file")
print (ex)
if key != None:
keyLE = key[::-1] # little endian
else:
keyLE = ''
return keyLE
# the last function in the file doesn't show up in the scope list in Understand for some reason!
def stub(self):
pass
| 44.062635 | 185 | 0.613769 |
25c24c6e06c47ee2c1647b16790ae37d675b9d9b | 7,888 | py | Python | docs/conf.py | KirillVladimirov/Glauci | a0548ff3fade71e01dacef3a420c13ddde42a0fb | [
"Apache-1.1"
] | null | null | null | docs/conf.py | KirillVladimirov/Glauci | a0548ff3fade71e01dacef3a420c13ddde42a0fb | [
"Apache-1.1"
] | 8 | 2019-03-19T11:00:52.000Z | 2019-03-19T19:37:00.000Z | docs/conf.py | KirillVladimirov/Glauci | a0548ff3fade71e01dacef3a420c13ddde42a0fb | [
"Apache-1.1"
] | null | null | null | # glauci_site documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "glauci_site"
copyright = """2019, Vladimirov Kirill"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "glauci_sitedoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"index",
"glauci_site.tex",
"glauci_site Documentation",
"""Vladimirov Kirill""",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "glauci_site", "glauci_site Documentation", ["""Vladimirov Kirill"""], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"glauci_site",
"glauci_site Documentation",
"""Vladimirov Kirill""",
"glauci_site",
"""Collecting words for smart learning""",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 32.065041 | 87 | 0.700685 |
6ed65429649d9471a6cd277bde3bd19003beb8cd | 3,426 | py | Python | models/distances.py | smsharma/gamma-gp | 573a9a7ecbf71a1e6c0d20e3d6ef189538c8b4e5 | [
"MIT"
] | 4 | 2020-12-01T01:23:44.000Z | 2021-04-12T11:07:35.000Z | models/distances.py | smsharma/gamma-gp | 573a9a7ecbf71a1e6c0d20e3d6ef189538c8b4e5 | [
"MIT"
] | null | null | null | models/distances.py | smsharma/gamma-gp | 573a9a7ecbf71a1e6c0d20e3d6ef189538c8b4e5 | [
"MIT"
] | null | null | null | import gpytorch
import torch
class Distances:
@staticmethod
def default_postprocess_script(x):
return x
@classmethod
def covar_dist_gcd(self, x1, x2, diag=False, last_dim_is_batch=False, dist_postprocess_func=default_postprocess_script, postprocess=True, lengthscale=1):
r"""
This is a helper method for computing the Euclidean distance between
all pairs of points in x1 and x2.
Args:
:attr:`x1` (Tensor `n x d` or `b1 x ... x bk x n x d`):
First set of data.
:attr:`x2` (Tensor `m x d` or `b1 x ... x bk x m x d`):
Second set of data.
:attr:`diag` (bool):
Should we return the whole distance matrix, or just the diagonal? If True, we must have `x1 == x2`.
:attr:`last_dim_is_batch` (tuple, optional):
Is the last dimension of the data a batch dimension or not?
:attr:`square_dist` (bool):
Should we square the distance matrix before returning?
Returns:
(:class:`Tensor`, :class:`Tensor) corresponding to the distance matrix between `x1` and `x2`.
The shape depends on the kernel's mode
* `diag=False`
* `diag=False` and `last_dim_is_batch=True`: (`b x d x n x n`)
* `diag=True`
* `diag=True` and `last_dim_is_batch=True`: (`b x d x n`)
"""
if last_dim_is_batch:
x1 = x1.transpose(-1, -2).unsqueeze(-1)
x2 = x2.transpose(-1, -2).unsqueeze(-1)
x1_eq_x2 = torch.equal(x1, x2)
# torch scripts expect tensors
postprocess = torch.tensor(postprocess)
res = None
if diag:
# Special case the diagonal because we can return all zeros most of the time.
if x1_eq_x2:
res = torch.zeros(*x1.shape[:-2], x1.shape[-2], dtype=x1.dtype, device=x1.device) / lengthscale
if postprocess:
res = dist_postprocess_func(res)
return res
else:
res = self.gcd(x1, x2) / lengthscale
if postprocess:
res = dist_postprocess_func(res)
return res
else:
res = self.gcd_p(x1, x2) / lengthscale
if postprocess:
res = dist_postprocess_func(res)
return res
@staticmethod
def gcd(ang1, ang2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
lon1, lat1 = ang1.T
lon2, lat2 = ang2.T
dlon = lon2 - lon1
dlat = lat2 - lat1
a = torch.sin(dlat / 2) ** 2 + torch.cos(lat1) * torch.cos(lat2) * torch.sin(dlon / 2) ** 2
return 2 * torch.asin(torch.sqrt(a))
@staticmethod
def gcd_p(ang1, ang2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
x_1, x_2 = ang1, ang2
x_1_u = torch.unsqueeze(x_1, -2)
x_2_u = torch.unsqueeze(x_2, -3)
d = torch.sin((x_2_u - x_1_u) / 2) ** 2
lat1 = torch.unsqueeze(x_1[:, 1], -1)
lat2 = torch.unsqueeze(x_2[:, 1], -2)
cos_prod = torch.cos(lat1) * torch.cos(lat2)
a = d[:, :, 0] * cos_prod + d[:, :, 1]
return 2 * torch.asin(torch.sqrt(a))
| 34.959184 | 157 | 0.549329 |
dbc92ec7af41f3500c5664f1396cbec40e1379f2 | 6,581 | py | Python | v0.9.2/signrpc/signer_pb2_grpc.py | lncm/lnd-proto | 8caa6558efe043413560f807ef44b11699901d76 | [
"MIT"
] | 2 | 2020-02-10T09:46:06.000Z | 2020-04-09T19:30:30.000Z | v0.9.2/signrpc/signer_pb2_grpc.py | lncm/lnd-rpc | 8caa6558efe043413560f807ef44b11699901d76 | [
"MIT"
] | 1 | 2020-02-04T16:34:35.000Z | 2020-02-04T16:34:35.000Z | v0.9.2/signrpc/signer_pb2_grpc.py | lncm/lnd-proto | 8caa6558efe043413560f807ef44b11699901d76 | [
"MIT"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from signrpc import signer_pb2 as signrpc_dot_signer__pb2
class SignerStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SignOutputRaw = channel.unary_unary(
'/signrpc.Signer/SignOutputRaw',
request_serializer=signrpc_dot_signer__pb2.SignReq.SerializeToString,
response_deserializer=signrpc_dot_signer__pb2.SignResp.FromString,
)
self.ComputeInputScript = channel.unary_unary(
'/signrpc.Signer/ComputeInputScript',
request_serializer=signrpc_dot_signer__pb2.SignReq.SerializeToString,
response_deserializer=signrpc_dot_signer__pb2.InputScriptResp.FromString,
)
self.SignMessage = channel.unary_unary(
'/signrpc.Signer/SignMessage',
request_serializer=signrpc_dot_signer__pb2.SignMessageReq.SerializeToString,
response_deserializer=signrpc_dot_signer__pb2.SignMessageResp.FromString,
)
self.VerifyMessage = channel.unary_unary(
'/signrpc.Signer/VerifyMessage',
request_serializer=signrpc_dot_signer__pb2.VerifyMessageReq.SerializeToString,
response_deserializer=signrpc_dot_signer__pb2.VerifyMessageResp.FromString,
)
self.DeriveSharedKey = channel.unary_unary(
'/signrpc.Signer/DeriveSharedKey',
request_serializer=signrpc_dot_signer__pb2.SharedKeyRequest.SerializeToString,
response_deserializer=signrpc_dot_signer__pb2.SharedKeyResponse.FromString,
)
class SignerServicer(object):
# missing associated documentation comment in .proto file
pass
def SignOutputRaw(self, request, context):
"""*
SignOutputRaw is a method that can be used to generated a signature for a
set of inputs/outputs to a transaction. Each request specifies details
concerning how the outputs should be signed, which keys they should be
signed with, and also any optional tweaks. The return value is a fixed
64-byte signature (the same format as we use on the wire in Lightning).
If we are unable to sign using the specified keys, then an error will be
returned.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ComputeInputScript(self, request, context):
"""*
ComputeInputScript generates a complete InputIndex for the passed
transaction with the signature as defined within the passed SignDescriptor.
This method should be capable of generating the proper input script for
both regular p2wkh output and p2wkh outputs nested within a regular p2sh
output.
Note that when using this method to sign inputs belonging to the wallet,
the only items of the SignDescriptor that need to be populated are pkScript
in the TxOut field, the value in that same field, and finally the input
index.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SignMessage(self, request, context):
"""*
SignMessage signs a message with the key specified in the key locator. The
returned signature is fixed-size LN wire format encoded.
The main difference to SignMessage in the main RPC is that a specific key is
used to sign the message instead of the node identity private key.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def VerifyMessage(self, request, context):
"""*
VerifyMessage verifies a signature over a message using the public key
provided. The signature must be fixed-size LN wire format encoded.
The main difference to VerifyMessage in the main RPC is that the public key
used to sign the message does not have to be a node known to the network.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeriveSharedKey(self, request, context):
"""
DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key
derivation between the ephemeral public key in the request and the node's
key specified in the key_loc parameter (or the node's identity private key
if no key locator is specified):
P_shared = privKeyNode * ephemeralPubkey
The resulting shared public key is serialized in the compressed format and
hashed with sha256, resulting in the final key length of 256bit.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SignerServicer_to_server(servicer, server):
rpc_method_handlers = {
'SignOutputRaw': grpc.unary_unary_rpc_method_handler(
servicer.SignOutputRaw,
request_deserializer=signrpc_dot_signer__pb2.SignReq.FromString,
response_serializer=signrpc_dot_signer__pb2.SignResp.SerializeToString,
),
'ComputeInputScript': grpc.unary_unary_rpc_method_handler(
servicer.ComputeInputScript,
request_deserializer=signrpc_dot_signer__pb2.SignReq.FromString,
response_serializer=signrpc_dot_signer__pb2.InputScriptResp.SerializeToString,
),
'SignMessage': grpc.unary_unary_rpc_method_handler(
servicer.SignMessage,
request_deserializer=signrpc_dot_signer__pb2.SignMessageReq.FromString,
response_serializer=signrpc_dot_signer__pb2.SignMessageResp.SerializeToString,
),
'VerifyMessage': grpc.unary_unary_rpc_method_handler(
servicer.VerifyMessage,
request_deserializer=signrpc_dot_signer__pb2.VerifyMessageReq.FromString,
response_serializer=signrpc_dot_signer__pb2.VerifyMessageResp.SerializeToString,
),
'DeriveSharedKey': grpc.unary_unary_rpc_method_handler(
servicer.DeriveSharedKey,
request_deserializer=signrpc_dot_signer__pb2.SharedKeyRequest.FromString,
response_serializer=signrpc_dot_signer__pb2.SharedKeyResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'signrpc.Signer', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 43.873333 | 90 | 0.754141 |
b2bc88b894dce5fbc7ed9d881da31e0553c983bf | 487 | py | Python | test_1C.py | edwardengland/Flood-Warning-System | ee765d619e8de3899cb0619e57020af275e8d71b | [
"MIT"
] | null | null | null | test_1C.py | edwardengland/Flood-Warning-System | ee765d619e8de3899cb0619e57020af275e8d71b | [
"MIT"
] | null | null | null | test_1C.py | edwardengland/Flood-Warning-System | ee765d619e8de3899cb0619e57020af275e8d71b | [
"MIT"
] | null | null | null | from floodsystem.stationdata import build_station_list as stations
from haversine import haversine
def test_1C():
def stations_within_radius(stations, centre, r):
stations_within_r_of_x = []
for i in stations:
distance_station_centre = haversine(i.coord, centre)
if distance_station_centre <= r:
stations_within_r_of_x.append(i)
assert len(distance_station_centre) == len(stations)
return(stations_within_r_of_x)
| 34.785714 | 66 | 0.706366 |
bd5670c18d93a979167a550b7ce6126e76c39064 | 1,653 | py | Python | tests/conftest.py | sturmianseq/zenmake | 44f1131c1ab677d8c3c930150c63a7dde4ef7de0 | [
"BSD-3-Clause"
] | 2 | 2019-10-14T05:05:34.000Z | 2022-03-28T04:55:00.000Z | tests/conftest.py | sturmianseq/zenmake | 44f1131c1ab677d8c3c930150c63a7dde4ef7de0 | [
"BSD-3-Clause"
] | 42 | 2020-08-25T07:59:32.000Z | 2021-11-15T03:12:29.000Z | tests/conftest.py | sturmianseq/zenmake | 44f1131c1ab677d8c3c930150c63a7dde4ef7de0 | [
"BSD-3-Clause"
] | 1 | 2021-08-13T13:59:51.000Z | 2021-08-13T13:59:51.000Z | # coding=utf-8
#
# pylint: skip-file
"""
Copyright (c) 2019, Alexander Magola. All rights reserved.
license: BSD 3-Clause License, see LICENSE for more details.
"""
import os
import types
import platform as _platform
import pytest
from zm import pyutils
from zm.autodict import AutoDict
from zm.buildconf import loader as bconfloader
@pytest.fixture(scope = "session", autouse = True)
def beforeAllTests(request):
# Additional check for pyenv
if 'PYENV_VERSION' in os.environ:
realVersion = _platform.python_version()
envVersion = os.environ['PYENV_VERSION']
assert envVersion in (realVersion, 'system')
@pytest.fixture
def unsetEnviron(monkeypatch):
from zm.waf.assist import getMonitoredEnvVarNames
varnames = getMonitoredEnvVarNames()
for v in varnames:
#os.environ.pop(v, None)
monkeypatch.delenv(v, raising = False)
@pytest.fixture
def testingBuildConf():
buildconf = types.ModuleType('buildconf')
buildconf.__file__ = os.path.abspath('buildconf.py')
bconfloader.applyDefaults(buildconf, True, os.path.dirname(buildconf.__file__))
# AutoDict is more useful in tests
for k, v in vars(buildconf).items():
if isinstance(v, pyutils.maptype):
setattr(buildconf, k, AutoDict(v))
return AutoDict(vars(buildconf))
def pytest_report_header(config):
from zm.waf import wrappers
from zm import sysinfo
sysinfo.printSysInfo()
return ""
@pytest.hookimpl(hookwrapper = True, tryfirst = True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
setattr(item, "rep_" + rep.when, rep)
| 27.098361 | 83 | 0.714459 |
636dabeafd58c1d29c1c28152350b289387fe706 | 2,431 | py | Python | tests/gof/test_cmodule.py | abdalazizrashid/Theano-PyMC | 90fa750461e91fb6281d494ae86404e2153fd7eb | [
"BSD-3-Clause"
] | null | null | null | tests/gof/test_cmodule.py | abdalazizrashid/Theano-PyMC | 90fa750461e91fb6281d494ae86404e2153fd7eb | [
"BSD-3-Clause"
] | null | null | null | tests/gof/test_cmodule.py | abdalazizrashid/Theano-PyMC | 90fa750461e91fb6281d494ae86404e2153fd7eb | [
"BSD-3-Clause"
] | null | null | null | """
We don't have real tests for the cache, but it would be great to make them!
But this one tests a current behavior that isn't good: the c_code isn't
deterministic based on the input type and the op.
"""
import numpy as np
import aesara
from aesara.gof.cmodule import GCC_compiler
class MyOp(aesara.compile.ops.DeepCopyOp):
nb_called = 0
def c_code_cache_version(self):
return ()
def c_code(self, node, name, inames, onames, sub):
MyOp.nb_called += 1
(iname,) = inames
(oname,) = onames
fail = sub["fail"]
itype = node.inputs[0].type.__class__
if itype in self.c_code_and_version:
code, version = self.c_code_and_version[itype]
rand = np.random.rand()
return ('printf("%(rand)s\\n");' + code) % locals()
# Else, no C code
return super(aesara.compile.ops.DeepCopyOp, self).c_code(
node, name, inames, onames, sub
)
def test_inter_process_cache():
# When an op with c_code, but no version. If we have 2 apply node
# in the graph with different inputs variable(so they don't get
# merged) but the inputs variable have the same type, do we reuse
# the same module? Even if they would generate different c_code?
# Currently this test show that we generate the c_code only once.
#
# This is to know if the c_code can add information specific to the
# node.inputs[*].owner like the name of the variable.
x, y = aesara.tensor.dvectors("xy")
f = aesara.function([x, y], [MyOp()(x), MyOp()(y)])
f(np.arange(60), np.arange(60))
if aesara.config.mode == "FAST_COMPILE" or aesara.config.cxx == "":
assert MyOp.nb_called == 0
else:
assert MyOp.nb_called == 1
# What if we compile a new function with new variables?
x, y = aesara.tensor.dvectors("xy")
f = aesara.function([x, y], [MyOp()(x), MyOp()(y)])
f(np.arange(60), np.arange(60))
if aesara.config.mode == "FAST_COMPILE" or aesara.config.cxx == "":
assert MyOp.nb_called == 0
else:
assert MyOp.nb_called == 1
def test_flag_detection():
# Check that the code detecting blas flags does not raise any exception.
# It used to happen on python 3 because of improper string handling,
# but was not detected because that path is not usually taken,
# so we test it here directly.
GCC_compiler.try_flags(["-lblas"])
| 34.239437 | 76 | 0.646647 |
bcbc5ac2f781c3d9081d0a4a49b9ef958bc31ac5 | 265 | py | Python | geeksaga/__init__.py | geekflow/archive | a94bd50363d1cc0004d8d0984599432a31e70035 | [
"MIT"
] | null | null | null | geeksaga/__init__.py | geekflow/archive | a94bd50363d1cc0004d8d0984599432a31e70035 | [
"MIT"
] | null | null | null | geeksaga/__init__.py | geekflow/archive | a94bd50363d1cc0004d8d0984599432a31e70035 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
geeksaga.init
~~~~~~~~~~~~~~
geeksaga init
:copyright: (c) 2014 by geeksaga.
:license: MIT LICENSE 2.0, see license for more details.
"""
import sys
if 'lib' not in sys.path:
sys.path.insert(0, 'lib')
pass | 16.5625 | 60 | 0.558491 |
167976b7f9d3e1a6336ee804d546ecc5b9269222 | 242 | py | Python | main/test_fake.py | IFRCGo/ifrcgo-api | c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a | [
"MIT"
] | null | null | null | main/test_fake.py | IFRCGo/ifrcgo-api | c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a | [
"MIT"
] | null | null | null | main/test_fake.py | IFRCGo/ifrcgo-api | c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a | [
"MIT"
] | null | null | null | from django.test import TestCase
class FakeTest(TestCase):
"""
This test is for running migrations only
docker-compose run --rm serve ./manage.py test -v 2 --pattern="test_fake.py""
"""
def test_fake(self):
pass
| 22 | 81 | 0.652893 |
6037543f6e10db3a0b3d87f6fa6bf2d6945e54ba | 1,194 | py | Python | settings.py | peterjc/MDT-INTR-EXP-MET | 86d422b83ea5ec2820cd8b31bc86b56ecbe034dd | [
"MIT"
] | null | null | null | settings.py | peterjc/MDT-INTR-EXP-MET | 86d422b83ea5ec2820cd8b31bc86b56ecbe034dd | [
"MIT"
] | 18 | 2021-08-11T08:21:03.000Z | 2022-01-17T11:26:15.000Z | settings.py | peterjc/MDT-INTR-EXP-MET | 86d422b83ea5ec2820cd8b31bc86b56ecbe034dd | [
"MIT"
] | 1 | 2021-08-20T15:42:29.000Z | 2021-08-20T15:42:29.000Z | from os import environ
SESSION_CONFIG_DEFAULTS = dict(real_world_currency_per_point=0.1, participation_fee=5)
SESSION_CONFIGS = [dict(name='farmer_framing', framing=0, num_demo_participants=None, app_sequence=['risk_attitude', 'volunteering', 'questionnaire_and_payment']), dict(name='community_centre_framing', framing=1, num_demo_participants=None, app_sequence=['risk_attitude', 'volunteering', 'questionnaire_and_payment'])]
LANGUAGE_CODE = 'en'
REAL_WORLD_CURRENCY_CODE = 'GBP'
USE_POINTS = True
DEMO_PAGE_INTRO_HTML = ''
PARTICIPANT_FIELDS = ['risk_attitude_msg', 'volunteering_msg']
SESSION_FIELDS = []
ROOMS = []
ADMIN_USERNAME = 'admin'
# for security, best to set admin password in an environment variable
ADMIN_PASSWORD = environ.get('OTREE_ADMIN_PASSWORD')
SECRET_KEY = 'blahblah'
# if an app is included in SESSION_CONFIGS, you don't need to list it here
INSTALLED_APPS = ['otree']
DEMO_PAGE_TITLE = 'MDT project INTR-EXP-MET - oTree Demo'
DEMO_PAGE_INTRO_HTML = """
Risk attitude lottery game based on Holt and Laury (2002), followed by an interactive multi-player game about volunteering. <a href="https://github.com/peterjc/MDT-INTR-EXP-MET">Source code on GitHub</a>.
"""
| 45.923077 | 318 | 0.78392 |
4dda9668527acadbb3981e8afc96b62e4ac4a870 | 3,724 | py | Python | provisioning/miniprov/hare_mp/store.py | vaibhavparatwar/cortx-hare | b3c01a592e692a62ccdad03f587722b9a92f5adc | [
"Apache-2.0"
] | null | null | null | provisioning/miniprov/hare_mp/store.py | vaibhavparatwar/cortx-hare | b3c01a592e692a62ccdad03f587722b9a92f5adc | [
"Apache-2.0"
] | null | null | null | provisioning/miniprov/hare_mp/store.py | vaibhavparatwar/cortx-hare | b3c01a592e692a62ccdad03f587722b9a92f5adc | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
from typing import Any, List
from cortx.utils.conf_store import Conf
from hare_mp.types import MissingKeyError
class ValueProvider:
def get(self, key: str, allow_null: bool = False) -> Any:
ret = self._raw_get(key)
if ret is None and not allow_null:
raise MissingKeyError(key)
return ret
def _raw_get(self, key: str) -> str:
raise NotImplementedError()
def get_machine_id(self) -> str:
raise NotImplementedError()
def get_cluster_id(self) -> str:
raise NotImplementedError()
def get_storage_set_index(self) -> int:
raise NotImplementedError()
def get_storage_set_nodes(self) -> List[str]:
raise NotImplementedError()
class ConfStoreProvider(ValueProvider):
def __init__(self, url: str):
self.url = url
# Note that we don't instantiate Conf class on purpose.
#
# Conf is a 'singleton' (as it is understood by its authors).
# In fact it is a class with static methods only.
#
# fail_reload flag is required to be False otherwise the error as
# follows will be thrown when ConfStoreProvider is instantiated not for
# the first time in the current address space:
#
# ConfError: error(22): conf index hare already exists
#
# Reason: although Conf has static methods only, it does have a state.
# That state is also static...
conf = Conf
conf.load('hare', url, fail_reload=False)
self.conf = conf
def _raw_get(self, key: str) -> str:
return self.conf.get('hare', key)
def get_machine_id(self) -> str:
with open('/etc/machine-id', 'r') as f:
machine_id = f.readline().strip('\n')
return machine_id
def get_cluster_id(self) -> str:
machine_id = self.get_machine_id()
cluster_id = self.get(f'server_node>{machine_id}>cluster_id')
return cluster_id
def get_storage_set_index(self) -> int:
i = 0
cluster_id = self.get_cluster_id()
machine_id = self.get_machine_id()
storage_set_id = self.get((f'server_node>{machine_id}>'
f'storage_set_id'))
for storage_set in self.get(f'cluster>{cluster_id}>storage_set'):
if storage_set['name'] == storage_set_id:
return i
i += 1
raise RuntimeError('No storage set found. Is ConfStore data valid?')
def get_hostname(self) -> str:
machine_id = self.get_machine_id()
hostname = self._raw_get(f'server_node>{machine_id}>hostname')
return hostname
def get_storage_set_nodes(self) -> List[str]:
cluster_id = self.get_cluster_id()
storage_set_index = self.get_storage_set_index()
server_nodes_key = (f'cluster>{cluster_id}>'
f'storage_set[{storage_set_index}]>server_nodes')
return self.get(server_nodes_key)
| 34.165138 | 79 | 0.652524 |
9c144afba4f8f43ece8dc7d8f7b7b71059f36b6a | 433 | py | Python | plotly/validators/contour/_ids.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 2 | 2018-12-03T15:20:42.000Z | 2018-12-03T15:20:47.000Z | plotly/validators/contour/_ids.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 27 | 2020-04-28T21:23:12.000Z | 2021-06-25T15:36:38.000Z | plotly/validators/contour/_ids.py | gnestor/plotly.py | a8ae062795ddbf9867b8578fe6d9e244948c15ff | [
"MIT"
] | 2 | 2019-06-17T01:35:57.000Z | 2020-11-03T01:07:19.000Z | import _plotly_utils.basevalidators
class IdsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name='ids', parent_name='contour', **kwargs):
super(IdsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'data'),
**kwargs
)
| 30.928571 | 75 | 0.644342 |
be7aba63728a73e4a0c706791196efa28a097ed0 | 1,570 | py | Python | var/spack/repos/builtin/packages/qwt/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9 | 2018-04-18T07:51:40.000Z | 2021-09-10T03:56:57.000Z | var/spack/repos/builtin/packages/qwt/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907 | 2018-04-18T11:17:57.000Z | 2022-03-31T13:20:25.000Z | var/spack/repos/builtin/packages/qwt/package.py | xiki-tempula/spack | 9d66c05e93ab8a933fc59915040c0e0c86a4aac4 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29 | 2018-11-05T16:14:23.000Z | 2022-02-03T16:07:09.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Qwt(QMakePackage):
"""The Qwt library contains GUI Components and utility classes which are
primarily useful for programs with a technical background. Beside a
framework for 2D plots it provides scales, sliders, dials, compasses,
thermometers, wheels and knobs to control or display values, arrays, or
ranges of type double.
"""
homepage = "http://qwt.sourceforge.net/"
url = "https://sourceforge.net/projects/qwt/files/qwt/6.1.3/qwt-6.1.3.tar.bz2"
version('6.1.4', sha256='1529215329e51fc562e0009505a838f427919a18b362afff441f035b2d9b5bd9')
version('6.1.3', sha256='f3ecd34e72a9a2b08422fb6c8e909ca76f4ce5fa77acad7a2883b701f4309733')
version('5.2.2', sha256='36bf2ee51ca9c74fde1322510ffd39baac0db60d5d410bb157968a78d9c1464b')
variant('designer', default=False,
description="Build extensions to QT designer")
patch('no-designer.patch', when='~designer')
depends_on('qt+opengl')
depends_on('qt+tools', when='+designer')
# Qwt 6.1.1 and older use a constant that was removed in Qt 5.4
# https://bugs.launchpad.net/ubuntu/+source/qwt-qt5/+bug/1485213
depends_on('qt@:5.3', when='@:6.1.1')
def patch(self):
# Subvert hardcoded prefix
filter_file(r'/usr/local/qwt-\$\$(QWT_)?VERSION.*',
self.prefix, 'qwtconfig.pri')
| 41.315789 | 95 | 0.70828 |
c6447e83e831f0526bbf335fa0ce420df00e6e6c | 13,602 | py | Python | routine_qiime2_analyses/_routine_q2_phylo.py | FranckLejzerowicz/routine_qiime2_analyses | 855470e734b21de6cb420ee1d5145241a6b4782b | [
"BSD-3-Clause"
] | null | null | null | routine_qiime2_analyses/_routine_q2_phylo.py | FranckLejzerowicz/routine_qiime2_analyses | 855470e734b21de6cb420ee1d5145241a6b4782b | [
"BSD-3-Clause"
] | null | null | null | routine_qiime2_analyses/_routine_q2_phylo.py | FranckLejzerowicz/routine_qiime2_analyses | 855470e734b21de6cb420ee1d5145241a6b4782b | [
"BSD-3-Clause"
] | null | null | null | # ----------------------------------------------------------------------------
# Copyright (c) 2020, Franck Lejzerowicz.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import sys
from skbio.tree import TreeNode
from os.path import isfile, splitext
import pandas as pd
from routine_qiime2_analyses._routine_q2_xpbs import run_xpbs, print_message
from routine_qiime2_analyses._routine_q2_io_utils import (
get_job_folder,
get_analysis_folder,
get_wol_tree,
get_sepp_tree,
get_raref_tab_meta_pds
)
from routine_qiime2_analyses._routine_q2_cmds import (
write_fragment_insertion, write_seqs_fasta)
from routine_qiime2_analyses._routine_q2_cmds import run_import
def run_sepp(i_datasets_folder: str, datasets: dict, datasets_read: dict,
datasets_phylo: dict, datasets_rarefs: dict, prjct_nm: str,
i_sepp_tree: str, trees: dict, force: bool, qiime_env: str,
chmod: str, noloc: bool, slurm: bool, run_params: dict,
filt_raref: str, jobs: bool) -> None:
"""
Run SEPP on the datasets composed or 16S deblur sequences (e.g. from redbiom/Qiita).
:param i_datasets_folder: Path to the folder containing the data/metadata subfolders.
:param datasets: dataset -> [tsv/biom path, meta path]
:param datasets_read: dataset -> [tsv table, meta table]
:param datasets_phylo: to be updated with ('tree_to_use', 'corrected_or_not') per dataset.
:param prjct_nm: Short nick name for your project.
:param i_sepp_tree: database to use for sepp phylogeny reads placement.
:param trees: to be update with tree to use for a dataset phylogenetic analyses.
:param force: Force the re-writing of scripts for all commands.
:param qiime_env: name of your qiime2 conda environment (e.g. qiime2-2019.10).
:param chmod: whether to change permission of output files (defalt: 775).
"""
# check whether there's dataset(s) that may use the reference tree (i.e. features are DNA sequences)
sepp_datasets = [dat for dat, (tree, correction) in datasets_phylo.items() if tree == 'amplicon']
if len(sepp_datasets):
ref_tree_qza = get_sepp_tree(i_sepp_tree)
job_folder = get_job_folder(i_datasets_folder, 'phylo')
job_folder2 = get_job_folder(i_datasets_folder, 'phylo/chunks')
main_written = 0
main_sh = '%s/1_run_sepp_%s%s.sh' % (job_folder, prjct_nm, filt_raref)
with open(main_sh, 'w') as main_o:
for dat, tsv_metas_fps_ in datasets.items():
written = 0
if dat not in sepp_datasets:
continue
out_sh = '%s/run_sepp_%s_%s%s.sh' % (job_folder2, prjct_nm, dat, filt_raref)
if slurm:
out_pbs = '%s.slm' % splitext(out_sh)[0]
else:
out_pbs = '%s.pbs' % splitext(out_sh)[0]
with open(out_sh, 'w') as cur_sh:
for idx, tsv_metas_fps in enumerate(tsv_metas_fps_):
tsv, meta = tsv_metas_fps
if not isinstance(datasets_read[dat][idx][0], pd.DataFrame) and datasets_read[dat][idx][0] == 'raref':
qza_raw_in = '%s/data/tab_%s_inTree.qza' % (i_datasets_folder, dat)
if isfile(qza_raw_in) and not force:
odir_sepp = get_analysis_folder(i_datasets_folder, 'phylo/%s' % dat)
out_fp_sepp_tree = '%s/tree_%s.qza' % (odir_sepp, dat)
# if idx:
# trees[dat].append((qza_raw_in, out_fp_sepp_tree))
# else:
# trees[dat] = [(qza_raw_in, out_fp_sepp_tree)]
if not idx:
trees[dat] = (qza_raw_in, out_fp_sepp_tree)
print('Using the non rarefied tree (no need to recompute)...\nExiting')
continue
elif not isfile(tsv):
print('Must have run rarefaction to use it further...\nExiting')
sys.exit(0)
tsv_pd, meta_pd = get_raref_tab_meta_pds(meta, tsv)
datasets_read[dat][idx] = [tsv_pd, meta_pd]
else:
tsv_pd, meta_pd = datasets_read[dat][idx]
qza = '%s.qza' % splitext(tsv)[0]
if not isfile(qza):
print('Need to first import %s to .qza to do reads placement '
'(see "# Import tables to qiime2")\nExiting...' % tsv)
sys.exit(0)
cur_raref = datasets_rarefs[dat][idx]
qza_in = '%s_inTree%s.qza' % (splitext(tsv)[0], cur_raref)
qza_in_tsv = '%s.tsv' % splitext(qza_in)[0]
qza_out = '%s_notInTree%s.qza' % (splitext(tsv)[0], cur_raref)
odir_seqs = get_analysis_folder(i_datasets_folder, 'seqs/%s' % dat)
odir_sepp = get_analysis_folder(i_datasets_folder, 'phylo/%s' % dat)
out_fp_seqs_rad = '%s/seq_%s%s' % (odir_seqs, dat, cur_raref)
out_fp_seqs_fasta = '%s.fasta' % out_fp_seqs_rad
out_fp_seqs_qza = '%s.qza' % out_fp_seqs_rad
out_fp_sepp_tree = '%s/tree_%s%s.qza' % (odir_sepp, dat, cur_raref)
# if idx:
# trees[dat].append((qza_in, out_fp_sepp_tree))
# else:
# trees[dat] = [(qza_in, out_fp_sepp_tree)]
if not idx:
trees[dat] = (qza_in, out_fp_sepp_tree)
written = 0
if force or not isfile(out_fp_seqs_qza):
cmd = write_seqs_fasta(out_fp_seqs_fasta,
out_fp_seqs_qza, tsv_pd)
cur_sh.write('echo "%s"\n' % cmd)
cur_sh.write('%s\n\n' % cmd)
written += 1
main_written += 1
if force or not isfile(out_fp_sepp_tree) or not isfile(qza_in_tsv):
cmd = write_fragment_insertion(
out_fp_seqs_qza, ref_tree_qza,
out_fp_sepp_tree, qza,
qza_in, qza_in_tsv, qza_out)
cur_sh.write('echo "%s"\n' % cmd)
cur_sh.write('%s\n\n' % cmd)
written += 1
main_written += 1
run_xpbs(out_sh, out_pbs, '%s.spp.%s%s' % (prjct_nm, dat, filt_raref), qiime_env,
run_params["time"], run_params["n_nodes"], run_params["n_procs"],
run_params["mem_num"], run_params["mem_dim"],
chmod, written, 'single', main_o, noloc, slurm, jobs)
if main_written:
print_message("# Fragment insertion using SEPP (%s)" % ', '.join(sepp_datasets), 'sh', main_sh, jobs)
def shear_tree(
i_datasets_folder: str, datasets: dict, datasets_read: dict,
datasets_phylo: dict, datasets_features: dict, prjct_nm: str,
i_wol_tree: str, trees: dict, datasets_rarefs: dict, force: bool,
qiime_env: str, chmod: str, noloc: bool, slurm: bool, run_params: dict,
filt_raref: str, jobs: bool) -> None:
"""
Get the sub-tree from the Web of Life tree that corresponds to the gOTUs-labeled features.
:param i_datasets_folder: Path to the folder containing the data/metadata subfolders.
:param datasets_read: dataset -> [tsv table, meta table]
:param datasets_phylo: to be updated with ('tree_to_use', 'corrected_or_not') per dataset.
:param datasets_features: dataset -> list of features names in the dataset tsv / biom file.
:param prjct_nm: Short nick name for your project.
:param i_wol_tree: default on barnacle /projects/wol/profiling/dbs/wol/phylogeny/wol_tree.nwk.
:param trees: to be update with tree to use for a dataset phylogenetic analyses.
:param force: Force the re-writing of scripts for all commands.
:param qiime_env: name of your qiime2 conda environment (e.g. qiime2-2019.10).
:param chmod: whether to change permission of output files (defalt: 775).
"""
# check whether there's dataset(s) that may use
# the Web of Life tree (i.e. features contain gID)
wol_datasets = [dat for dat, (tree, correction)
in datasets_phylo.items() if tree == 'wol']
if len(wol_datasets):
job_folder = get_job_folder(i_datasets_folder, 'phylo')
job_folder2 = get_job_folder(i_datasets_folder, 'phylo/chunks')
i_wol_tree = get_wol_tree(i_wol_tree)
wol = TreeNode.read(i_wol_tree)
main_written = 0
main_sh = '%s/0_run_import_trees_%s%s.sh' % (job_folder, prjct_nm, filt_raref)
with open(main_sh, 'w') as main_o:
for dat, tsv_metas_fps_ in datasets.items():
written = 0
if dat not in wol_datasets:
continue
out_sh = '%s/run_import_tree_%s_%s%s.sh' % (job_folder2, prjct_nm, dat, filt_raref)
if slurm:
out_pbs = out_sh.replace('.sh', '.slm')
else:
out_pbs = out_sh.replace('.sh', '.pbs')
with open(out_sh, 'w') as o:
for idx, tsv_metas_fps in enumerate(tsv_metas_fps_):
tsv, meta = tsv_metas_fps
if not isinstance(datasets_read[dat][idx][0], pd.DataFrame) and datasets_read[dat][idx][0] == 'raref':
if not isfile(tsv):
print('Must have run rarefaction to use it further...\nExiting')
sys.exit(0)
tsv_pd, meta_pd = get_raref_tab_meta_pds(meta, tsv)
datasets_read[dat][idx] = [tsv_pd, meta_pd]
else:
tsv_pd, meta_pd = datasets_read[dat][idx]
cur_raref = datasets_rarefs[dat][idx]
cur_datasets_features = dict(
gid for gid in datasets_features[dat].items() if gid[1] in tsv_pd.index)
analysis_folder = get_analysis_folder(i_datasets_folder, 'phylo/%s' % dat)
wol_features_fpo = '%s/tree_%s%s.nwk' % (analysis_folder, dat, cur_raref)
wol_features_qza = wol_features_fpo.replace('.nwk', '.qza')
# if idx:
# trees[dat].append(('', wol_features_qza))
# else:
# trees[dat] = [('', wol_features_qza)]
if not idx:
trees[dat] = ('', wol_features_qza)
if force or not isfile(wol_features_qza):
wol_features = wol.shear(list(cur_datasets_features.keys()))
# rename the tip per the features names associated with each gID
for tip in wol_features.tips():
tip.name = cur_datasets_features[tip.name]
wol_features.write(wol_features_fpo)
cmd = run_import(wol_features_fpo, wol_features_qza, "Phylogeny[Rooted]")
o.write("echo '%s'\n" % cmd)
o.write('%s\n\n' % cmd)
written += 1
main_written += 1
run_xpbs(out_sh, out_pbs, '%s.shr.%s%s' % (prjct_nm, dat, filt_raref), qiime_env,
run_params["time"], run_params["n_nodes"], run_params["n_procs"],
run_params["mem_num"], run_params["mem_dim"],
chmod, written, 'single', main_o, noloc, slurm, jobs)
if main_written:
print_message("# Shear Web of Life tree to features' genome IDs (%s)" % ', '.join(wol_datasets), 'sh', main_sh, jobs)
def get_precomputed_trees(
i_datasets_folder: str, datasets: dict, datasets_filt_map: dict,
datasets_phylo: dict, trees: dict) -> None:
"""
:param i_datasets_folder: Path to the folder containing the data/metadata subfolders.
:param datasets: dataset -> [tsv/biom path, meta path]
:param datasets_phylo: to be updated with ('tree_to_use', 'corrected_or_not') per dataset.
:param trees: to be update with tree to use for a dataset phylogenetic analyses.
"""
for dat in datasets:
if dat in datasets_filt_map:
dat_tree = datasets_filt_map[dat]
else:
dat_tree = dat
analysis_folder = get_analysis_folder(i_datasets_folder, 'phylo/%s' % dat_tree)
tree_qza = '%s/tree_%s.qza' % (analysis_folder, dat_tree)
if isfile(tree_qza):
trees[dat] = ('', tree_qza)
datasets_phylo[dat] = ('precpu', 0)
| 55.292683 | 129 | 0.542788 |
87bd538f7e73520049a7f3faeed35fe07c385cc6 | 7,108 | py | Python | atlassian/jira8.py | simonrupf/atlassian-python-api | 390b70101c84e933018605edd5fef549b35de030 | [
"Apache-2.0"
] | 1 | 2019-08-10T15:24:52.000Z | 2019-08-10T15:24:52.000Z | atlassian/jira8.py | simonrupf/atlassian-python-api | 390b70101c84e933018605edd5fef549b35de030 | [
"Apache-2.0"
] | 1 | 2020-07-20T14:06:17.000Z | 2020-08-11T20:21:59.000Z | atlassian/jira8.py | simonrupf/atlassian-python-api | 390b70101c84e933018605edd5fef549b35de030 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import logging
from .rest_client import AtlassianRestAPI
log = logging.getLogger(__name__)
class Jira8(AtlassianRestAPI):
# API/2 Get permissions
def get_permissions(self, project_id=None, project_key=None, issue_id=None, issue_key=None):
"""
Returns all permissions in the system and whether the currently logged in user has them.
You can optionally provide a specific context
to get permissions for (projectKey OR projectId OR issueKey OR issueId)
:param project_id: str
:param project_key: str
:param issue_id: str
:param issue_key: str
:return:
"""
url = 'rest/api/2/mypermissions'
params = {}
if project_id:
params['projectId'] = project_id
if project_key:
params['projectKey'] = project_key
if issue_id:
params['issueId'] = issue_id
if issue_key:
params['issueKey'] = issue_key
return self.get(url, params=params)
def get_all_permissions(self):
"""
Returns all permissions that are present in the JIRA instance - Global, Project
and the global ones added by plugins
:return: All permissions
"""
url = 'rest/api/2/permissions'
return self.get(url)
# Application properties
def get_property(self, key=None, permission_level=None, key_filter=None):
"""
Returns an application property
:param key: str
:param permission_level: str
:param key_filter: str
:return: list or item
"""
url = 'rest/api/2/application-properties'
params = {}
if key:
params['key'] = key
if permission_level:
params['permissionLevel'] = permission_level
if key_filter:
params['keyFilter'] = key_filter
return self.get(url, params=params)
def set_property(self, property_id, value):
url = 'rest/api/2/application-properties/{}'.format(property_id)
data = {'id': property_id, 'value': value}
return self.put(url, data=data)
def get_advanced_settings(self):
"""
Returns the properties that are displayed on the "General Configuration > Advanced Settings" page
:return:
"""
url = 'rest/api/2/application-properties/advanced-settings'
return self.get(url)
# Application roles
def get_all_application_roles(self):
"""
Returns all ApplicationRoles in the system
:return:
"""
url = 'rest/api/2/applicationrole'
return self.get(url)
def get_application_role(self, role_key):
"""
Returns the ApplicationRole with passed key if it exists
:param role_key: str
:return:
"""
url = 'rest/api/2/applicationrole/{}'.format(role_key)
return self.get(url)
# Attachments
def get_attachment(self, attachment_id):
"""
Returns the meta-data for an attachment, including the URI of the actual attached file
:param attachment_id: int
:return:
"""
url = 'rest/api/2/attachment/{}'.format(attachment_id)
return self.get(url)
def remove_attachment(self, attachment_id):
"""
Remove an attachment from an issue
:param attachment_id: int
:return: if success, return None
"""
url = 'rest/api/2/attachment/{}'.format(attachment_id)
return self.delete(url)
def get_attachment_meta(self):
"""
Returns the meta information for an attachments,
specifically if they are enabled and the maximum upload size allowed
:return:
"""
url = 'rest/api/2/attachment/meta'
return self.get(url)
# Issues
def create_issue(self, fields, update_history=False):
"""
Creates an issue or a sub-task from a JSON representation
:param fields: JSON data
:param update_history: bool (if true then the user's project history is updated)
:return:
"""
url = 'rest/api/2/issue'
data = {'fields': fields}
params = {}
if update_history is True:
params['updateHistory'] = 'true'
else:
params['updateHistory'] = 'false'
return self.post(url, params=params, data=data)
def create_issues(self, list_of_issues_data):
"""
Creates issues or sub-tasks from a JSON representation
Creates many issues in one bulk operation
:param list_of_issues_data: list of JSON data
:return:
"""
url = 'rest/api/2/issue/bulk'
data = {'issueUpdates': list_of_issues_data}
return self.post(url, data=data)
def delete_issue(self, issue_id_or_key, delete_subtasks=True):
"""
Delete an issue
If the issue has subtasks you must set the parameter delete_subtasks = True to delete the issue
You cannot delete an issue without its subtasks also being deleted
:param issue_id_or_key:
:param delete_subtasks:
:return:
"""
url = 'rest/api/2/issue/{}'.format(issue_id_or_key)
params = {}
if delete_subtasks is True:
params['deleteSubtasks'] = 'true'
else:
params['deleteSubtasks'] = 'false'
log.warning('Removing issue {}...'.format(issue_id_or_key))
return self.delete(url, params=params)
def edit_issue(self, issue_id_or_key, fields, notify_users=True):
"""
Edits an issue from a JSON representation
The issue can either be updated by setting explicit the field
value(s) or by using an operation to change the field value
:param issue_id_or_key: str
:param fields: JSON
:param notify_users: bool
:return:
"""
url = 'rest/api/2/issue/{}'.format(issue_id_or_key)
params = {}
data = {'update': fields}
if notify_users is True:
params['notifyUsers'] = 'true'
else:
params['notifyUsers'] = 'false'
return self.put(url, data=data, params=params)
def get_issue(self, issue_id_or_key, fields=None, properties=None, update_history=True):
"""
Returns a full representation of the issue for the given issue key
By default, all fields are returned in this get-issue resource
:param issue_id_or_key: str
:param fields: str
:param properties: str
:param update_history: bool
:return: issue
"""
url = 'rest/api/2/issue/{}'.format(issue_id_or_key)
params = {}
if fields is not None:
params['fields'] = fields
if properties is not None:
params['properties'] = properties
if update_history is True:
params['updateHistory'] = 'true'
if update_history is False:
params['updateHistory'] = 'false'
return self.get(url, params=params)
| 29.251029 | 105 | 0.602279 |
e39eb9c2419db9e8c60efc95b9ee5288a0045420 | 8,227 | py | Python | aql/entity/aql_entity.py | menify/aqualid | 58b6d552d677272aaa3bbfb0fe5601c46e126c7e | [
"MIT"
] | 1 | 2019-04-22T09:09:46.000Z | 2019-04-22T09:09:46.000Z | aql/entity/aql_entity.py | JamesLinus/aqualid | 58b6d552d677272aaa3bbfb0fe5601c46e126c7e | [
"MIT"
] | 1 | 2015-01-10T23:12:29.000Z | 2015-01-10T23:12:29.000Z | aql/entity/aql_entity.py | menify/aqualid | 58b6d552d677272aaa3bbfb0fe5601c46e126c7e | [
"MIT"
] | null | null | null | #
# Copyright (c) 2014-2015 The developers of Aqualid project
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from aql.util_types import to_sequence, cast_str
from aql.utils import simple_object_signature
from .aql_entity_pickler import pickleable
__all__ = (
'EntityBase', 'SignatureEntity', 'SimpleEntity', 'NullEntity',
)
# ==============================================================================
class ErrorEntityNameEmpty(Exception):
def __init__(self):
msg = "Entity name is empty"
super(ErrorEntityNameEmpty, self).__init__(msg)
# ==============================================================================
class ErrorSignatureEntityInvalidDataType(Exception):
def __init__(self, data):
msg = "Signature data type must be bytes or bytearray, " \
"actual type: '%s'" % (type(data),)
super(ErrorSignatureEntityInvalidDataType, self).__init__(msg)
class ErrorTextEntityInvalidDataType(Exception):
def __init__(self, text):
msg = "Text data type must be string, actual type: '%s'" % (
type(text),)
super(ErrorTextEntityInvalidDataType, self).__init__(msg)
# ==============================================================================
class EntityBase (object):
__slots__ = ('id', 'name', 'signature', 'tags')
# -----------------------------------------------------------
def __new__(cls, name, signature, tags=None):
self = super(EntityBase, cls).__new__(cls)
if name is not NotImplemented:
if not name:
raise ErrorEntityNameEmpty()
self.name = name
if signature is not NotImplemented:
self.signature = signature
self.tags = frozenset(to_sequence(tags))
return self
# -----------------------------------------------------------
def __hash__(self):
return hash(self.id)
# -----------------------------------------------------------
def get(self):
"""
Returns value of the entity
"""
raise NotImplementedError(
"Abstract method. It should be implemented in a child class.")
# -----------------------------------------------------------
def get_id(self):
cls = self.__class__
return simple_object_signature((self.name,
cls.__name__,
cls.__module__))
# -----------------------------------------------------------
def get_name(self):
raise NotImplementedError(
"Abstract method. It should be implemented in a child class.")
# -----------------------------------------------------------
def get_signature(self):
raise NotImplementedError(
"Abstract method. It should be implemented in a child class.")
# -----------------------------------------------------------
def __getattr__(self, attr):
if attr == 'signature':
self.signature = signature = self.get_signature()
return signature
elif attr == 'name':
self.name = name = self.get_name()
return name
elif attr == 'id':
self.id = entity_id = self.get_id()
return entity_id
raise AttributeError("Unknown attribute: '%s'" % (attr,))
# -----------------------------------------------------------
def __getnewargs__(self):
raise NotImplementedError(
"Abstract method. It should be implemented in a child class.")
# -----------------------------------------------------------
def is_actual(self):
"""
Checks whether the entity is actual or not
"""
return bool(self.signature)
# -----------------------------------------------------------
def get_actual(self):
"""
Returns an actual entity.
If the current entity is actual then it will be simply returned.
"""
return self
# -----------------------------------------------------------
def __getstate__(self):
return {}
def __setstate__(self, state):
pass
# -----------------------------------------------------------
def __eq__(self, other):
return (self.id == other.id) and \
(self.signature == other.signature)
def __ne__(self, other):
return not self.__eq__(other)
# -----------------------------------------------------------
def __str__(self):
return cast_str(self.get())
# -----------------------------------------------------------
def remove(self):
pass
# ==============================================================================
@pickleable
class SimpleEntity (EntityBase):
__slots__ = ('data', )
def __new__(cls, data=None, name=None, signature=None, tags=None):
if data is None:
signature = None
else:
if signature is None:
signature = simple_object_signature(data)
if not name:
name = signature
self = super(SimpleEntity, cls).__new__(cls, name, signature, tags)
self.data = data
return self
# -----------------------------------------------------------
def get(self):
return self.data
# -----------------------------------------------------------
def __getnewargs__(self):
tags = self.tags
if not tags:
tags = None
name = self.name
if name == self.signature:
name = None
return self.data, name, self.signature, tags
# ==============================================================================
@pickleable
class NullEntity (EntityBase):
def __new__(cls):
name = 'N'
signature = None
return super(NullEntity, cls).__new__(cls, name, signature)
# -----------------------------------------------------------
def get(self):
return None
# -----------------------------------------------------------
def __getnewargs__(self):
return tuple()
# -----------------------------------------------------------
def is_actual(self):
return False
# ==============================================================================
@pickleable
class SignatureEntity (EntityBase):
def __new__(cls, data=None, name=None, tags=None):
if data is not None:
if not isinstance(data, (bytes, bytearray)):
raise ErrorSignatureEntityInvalidDataType(data)
if not name:
name = data
return super(SignatureEntity, cls).__new__(cls, name, data, tags)
# -----------------------------------------------------------
def get(self):
return self.signature
# -----------------------------------------------------------
def __getnewargs__(self):
tags = self.tags
if not tags:
tags = None
name = self.name
if name == self.signature:
name = None
return self.signature, name, tags
# ==============================================================================
| 28.467128 | 80 | 0.473076 |
a40d600f17d45dfd0190e0cb420c7afbc337a08a | 11,129 | py | Python | src/sdk/pynni/nni/nas/pytorch/mutables.py | LIIXII/nni | f1ce1648b24d2668c2eb8fa02b158a7b6da80ea4 | [
"MIT"
] | 2 | 2020-04-19T15:57:46.000Z | 2020-04-28T18:14:19.000Z | src/sdk/pynni/nni/nas/pytorch/mutables.py | bobo4u/nni | f1ce1648b24d2668c2eb8fa02b158a7b6da80ea4 | [
"MIT"
] | null | null | null | src/sdk/pynni/nni/nas/pytorch/mutables.py | bobo4u/nni | f1ce1648b24d2668c2eb8fa02b158a7b6da80ea4 | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import torch.nn as nn
from nni.nas.pytorch.utils import global_mutable_counting
logger = logging.getLogger(__name__)
class Mutable(nn.Module):
"""
Mutable is designed to function as a normal layer, with all necessary operators' weights.
States and weights of architectures should be included in mutator, instead of the layer itself.
Mutable has a key, which marks the identity of the mutable. This key can be used by users to share
decisions among different mutables. In mutator's implementation, mutators should use the key to
distinguish different mutables. Mutables that share the same key should be "similar" to each other.
Currently the default scope for keys is global. By default, the keys uses a global counter from 1 to
produce unique ids.
Parameters
----------
key : str
The key of mutable.
Notes
-----
The counter is program level, but mutables are model level. In case multiple models are defined, and
you want to have `counter` starting from 1 in the second model, it's recommended to assign keys manually
instead of using automatic keys.
"""
def __init__(self, key=None):
super().__init__()
if key is not None:
if not isinstance(key, str):
key = str(key)
logger.warning("Warning: key \"%s\" is not string, converted to string.", key)
self._key = key
else:
self._key = self.__class__.__name__ + str(global_mutable_counting())
self.init_hook = self.forward_hook = None
def __deepcopy__(self, memodict=None):
raise NotImplementedError("Deep copy doesn't work for mutables.")
def __call__(self, *args, **kwargs):
self._check_built()
return super().__call__(*args, **kwargs)
def set_mutator(self, mutator):
if "mutator" in self.__dict__:
raise RuntimeError("`set_mutator` is called more than once. Did you parse the search space multiple times? "
"Or did you apply multiple fixed architectures?")
self.__dict__["mutator"] = mutator
def forward(self, *inputs):
raise NotImplementedError
@property
def key(self):
"""
Read-only property of key.
"""
return self._key
@property
def name(self):
"""
After the search space is parsed, it will be the module name of the mutable.
"""
return self._name if hasattr(self, "_name") else "_key"
@name.setter
def name(self, name):
self._name = name
def _check_built(self):
if not hasattr(self, "mutator"):
raise ValueError(
"Mutator not set for {}. You might have forgotten to initialize and apply your mutator. "
"Or did you initialize a mutable on the fly in forward pass? Move to `__init__` "
"so that trainer can locate all your mutables. See NNI docs for more details.".format(self))
def __repr__(self):
return "{} ({})".format(self.name, self.key)
class MutableScope(Mutable):
"""
Mutable scope marks a subgraph/submodule to help mutators make better decisions.
If not annotated with mutable scope, search space will be flattened as a list. However, some mutators might
need to leverage the concept of a "cell". So if a module is defined as a mutable scope, everything in it will
look like "sub-search-space" in the scope. Scopes can be nested.
There are two ways mutators can use mutable scope. One is to traverse the search space as a tree during initialization
and reset. The other is to implement `enter_mutable_scope` and `exit_mutable_scope`. They are called before and after
the forward method of the class inheriting mutable scope.
Mutable scopes are also mutables that are listed in the mutator.mutables (search space), but they are not supposed
to appear in the dict of choices.
Parameters
----------
key : str
Key of mutable scope.
"""
def __init__(self, key):
super().__init__(key=key)
def __call__(self, *args, **kwargs):
try:
self._check_built()
self.mutator.enter_mutable_scope(self)
return super().__call__(*args, **kwargs)
finally:
self.mutator.exit_mutable_scope(self)
class LayerChoice(Mutable):
"""
Layer choice selects one of the ``op_candidates``, then apply it on inputs and return results.
In rare cases, it can also select zero or many.
Layer choice does not allow itself to be nested.
Parameters
----------
op_candidates : list of nn.Module
A module list to be selected from.
reduction : str
``mean``, ``concat``, ``sum`` or ``none``. Policy if multiples are selected.
If ``none``, a list is returned. ``mean`` returns the average. ``sum`` returns the sum.
``concat`` concatenate the list at dimension 1.
return_mask : bool
If ``return_mask``, return output tensor and a mask. Otherwise return tensor only.
key : str
Key of the input choice.
Attributes
----------
length : int
Number of ops to choose from.
"""
def __init__(self, op_candidates, reduction="sum", return_mask=False, key=None):
super().__init__(key=key)
self.length = len(op_candidates)
self.choices = nn.ModuleList(op_candidates)
self.reduction = reduction
self.return_mask = return_mask
def forward(self, *inputs):
"""
Returns
-------
tuple of tensors
Output and selection mask. If ``return_mask`` is ``False``, only output is returned.
"""
out, mask = self.mutator.on_forward_layer_choice(self, *inputs)
if self.return_mask:
return out, mask
return out
class InputChoice(Mutable):
"""
Input choice selects ``n_chosen`` inputs from ``choose_from`` (contains ``n_candidates`` keys). For beginners,
use ``n_candidates`` instead of ``choose_from`` is a safe option. To get the most power out of it, you might want to
know about ``choose_from``.
The keys in ``choose_from`` can be keys that appear in past mutables, or ``NO_KEY`` if there are no suitable ones.
The keys are designed to be the keys of the sources. To help mutators make better decisions,
mutators might be interested in how the tensors to choose from come into place. For example, the tensor is the
output of some operator, some node, some cell, or some module. If this operator happens to be a mutable (e.g.,
``LayerChoice`` or ``InputChoice``), it has a key naturally that can be used as a source key. If it's a
module/submodule, it needs to be annotated with a key: that's where a :class:`MutableScope` is needed.
In the example below, ``input_choice`` is a 4-choose-any. The first 3 is semantically output of cell1, output of cell2,
output of cell3 with respectively. Notice that an extra max pooling is followed by cell1, indicating x1 is not
"actually" the direct output of cell1.
.. code-block:: python
class Cell(MutableScope):
pass
class Net(nn.Module):
def __init__(self):
self.cell1 = Cell("cell1")
self.cell2 = Cell("cell2")
self.op = LayerChoice([conv3x3(), conv5x5()], key="op")
self.input_choice = InputChoice(choose_from=["cell1", "cell2", "op", InputChoice.NO_KEY])
def forward(self, x):
x1 = max_pooling(self.cell1(x))
x2 = self.cell2(x)
x3 = self.op(x)
x4 = torch.zeros_like(x)
return self.input_choice([x1, x2, x3, x4])
Parameters
----------
n_candidates : int
Number of inputs to choose from.
choose_from : list of str
List of source keys to choose from. At least of one of ``choose_from`` and ``n_candidates`` must be fulfilled.
If ``n_candidates`` has a value but ``choose_from`` is None, it will be automatically treated as ``n_candidates``
number of empty string.
n_chosen : int
Recommended inputs to choose. If None, mutator is instructed to select any.
reduction : str
``mean``, ``concat``, ``sum`` or ``none``. See :class:`LayerChoice`.
return_mask : bool
If ``return_mask``, return output tensor and a mask. Otherwise return tensor only.
key : str
Key of the input choice.
"""
NO_KEY = ""
def __init__(self, n_candidates=None, choose_from=None, n_chosen=None,
reduction="sum", return_mask=False, key=None):
super().__init__(key=key)
# precondition check
assert n_candidates is not None or choose_from is not None, "At least one of `n_candidates` and `choose_from`" \
"must be not None."
if choose_from is not None and n_candidates is None:
n_candidates = len(choose_from)
elif choose_from is None and n_candidates is not None:
choose_from = [self.NO_KEY] * n_candidates
assert n_candidates == len(choose_from), "Number of candidates must be equal to the length of `choose_from`."
assert n_candidates > 0, "Number of candidates must be greater than 0."
assert n_chosen is None or 0 <= n_chosen <= n_candidates, "Expected selected number must be None or no more " \
"than number of candidates."
self.n_candidates = n_candidates
self.choose_from = choose_from.copy()
self.n_chosen = n_chosen
self.reduction = reduction
self.return_mask = return_mask
def forward(self, optional_inputs):
"""
Forward method of LayerChoice.
Parameters
----------
optional_inputs : list or dict
Recommended to be a dict. As a dict, inputs will be converted to a list that follows the order of
``choose_from`` in initialization. As a list, inputs must follow the semantic order that is the same as
``choose_from``.
Returns
-------
tuple of tensors
Output and selection mask. If ``return_mask`` is ``False``, only output is returned.
"""
optional_input_list = optional_inputs
if isinstance(optional_inputs, dict):
optional_input_list = [optional_inputs[tag] for tag in self.choose_from]
assert isinstance(optional_input_list, list), \
"Optional input list must be a list, not a {}.".format(type(optional_input_list))
assert len(optional_inputs) == self.n_candidates, \
"Length of the input list must be equal to number of candidates."
out, mask = self.mutator.on_forward_input_choice(self, optional_input_list)
if self.return_mask:
return out, mask
return out
| 40.469091 | 123 | 0.635547 |
145303c5523f5021488010f8facc5c7bd46a9cd0 | 469 | py | Python | 05for.py | Ulyssesss/Learn-Python | 03f0b0ff52158ea239e89e8604c2744fe9a6e341 | [
"Apache-2.0"
] | 1 | 2017-08-27T15:36:25.000Z | 2017-08-27T15:36:25.000Z | 05for.py | Ulyssesss/learn-python | 03f0b0ff52158ea239e89e8604c2744fe9a6e341 | [
"Apache-2.0"
] | null | null | null | 05for.py | Ulyssesss/learn-python | 03f0b0ff52158ea239e89e8604c2744fe9a6e341 | [
"Apache-2.0"
] | null | null | null | a = range(5)
b = range(3, 5)
print(a)
print(b)
print(list(a))
print(list(b))
print(tuple(a))
print(tuple(b))
names = ['Jack', 'Bob', 'Tony']
for name in names:
print(name)
for x in range(10):
print(x)
print('-----')
i = 0
while i < 3:
print(i)
i = i + 1
print('-----')
j = 0
while j < 100:
print(j)
j = j + 1;
if j > 3:
break
print('-----')
k = 0
while k < 10:
k = k + 1
if k % 2 == 0:
continue
print(k)
| 11.166667 | 31 | 0.471215 |
2466726e5c726acbc73ffed0478270f455ddb6a7 | 494 | py | Python | ex9.py | BjornChrisnach/Learn_python_3_The_hard_way | ab187c4755d4878724761bbe5f28678fce27cfc7 | [
"MIT"
] | null | null | null | ex9.py | BjornChrisnach/Learn_python_3_The_hard_way | ab187c4755d4878724761bbe5f28678fce27cfc7 | [
"MIT"
] | null | null | null | ex9.py | BjornChrisnach/Learn_python_3_The_hard_way | ab187c4755d4878724761bbe5f28678fce27cfc7 | [
"MIT"
] | null | null | null | # Here's some new strange stuff. remember type it exactly.
# store the days in days
days = "Mon Tue Wed Thu Fri Sat Sun"
# store the months in months
months = "Jan\nFeb\nMar\nApr\nMay\nJun\njul\nAug"
# print the days
print("Here are the days: ", days)
# print the months
print("Here are the months: ", months)
# print multiple strings
print("""
There's something going on here.
With the three double-quotes.
We'll be able to type as much as we like.
Even 4 lines if we want, or 5, or 6.
""") | 26 | 58 | 0.712551 |
7476889b700b2bdd183843104eb74ddd3b5a7336 | 16,277 | py | Python | main.py | mohamed-abdelaziz721/Soud-Equalizer | 1cc78fd37eca098727582c3569c1bb68148852fe | [
"MIT"
] | null | null | null | main.py | mohamed-abdelaziz721/Soud-Equalizer | 1cc78fd37eca098727582c3569c1bb68148852fe | [
"MIT"
] | null | null | null | main.py | mohamed-abdelaziz721/Soud-Equalizer | 1cc78fd37eca098727582c3569c1bb68148852fe | [
"MIT"
] | null | null | null | import sys, os, shutil
from librosa.core.spectrum import _spectrogram
import numpy as np
import pyqtgraph as pg
from PyQt5 import QtWidgets as qtw
from PyQt5 import QtCore as qtc
from PyQt5 import QtGui as qtg
from datetime import datetime
import pyqtgraph
import pyqtgraph.exporters
import matplotlib.pyplot as plt
from main_layout import Ui_MainWindow
import scipy.io.wavfile
import librosa.display
from PyQt5.QtMultimedia import QMediaPlayer, QMediaContent
from PDF import PDF
def time_stamp(ms):
hours, remainder = divmod(ms, 3_600_000)
minutes, remainder = divmod(remainder, 60_000)
seconds, _ = divmod(remainder, 1_000)
return ("%d:%02d:%02d" % (hours, minutes, seconds)) if hours else ("%d:%02d" % (minutes, seconds))
class MainWindow(qtw.QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.show()
self.samples = None
self.sampling_rate = None
self.samples_after = None
self.first_turn = True # This Prevents the Spectrogram range variables from being overwritten
self.PLOT_DIR = 'Plots'
self.PDF_DIR = 'PDFs'
self.ui.save_session_data.clicked.connect(lambda: self.save_session())
self.ui.actionSave.triggered.connect(lambda: self.save_session())
self.audio_player_before = QMediaPlayer()
self.audio_player_after = QMediaPlayer()
self.audio_player_before.setNotifyInterval(1)
self.audio_player_after.setNotifyInterval(1)
self.bands_powers = [0.0, 0.25, 0.50, 0.75, 1.0, 2.0, 3.0, 4.0, 5.0]
self.spectrogram_power_range = {'min': np.array([]), 'max': np.array([])}
self.ui.min_pixel_intensity.sliderReleased.connect(lambda: self.spectrogram_pixels_intensity('min'))
self.ui.max_pixel_intensity.sliderReleased.connect(lambda: self.spectrogram_pixels_intensity('max'))
self.spectrogram_time_min, self.spectrogram_time_max = 0, 0 # Sync With Play
self.band_slider = {}
self.band_label = {}
for index in range(10):
self.band_slider[index] = getattr(self.ui, 'band_{}'.format(index+1))
self.band_label[index] = getattr(self.ui, 'band_{}_label'.format(index+1))
for slider in self.band_slider.values():
slider.setDisabled(True)
slider.setStyleSheet('selection-background-color: grey')
for index, slider in self.band_slider.items():
slider.sliderReleased.connect(lambda index=index: self.slider_gain_updated(index))
self.available_palettes = ['twilight', 'Blues', 'Greys', 'ocean', 'nipy_spectral']
self.current_color_palette = self.available_palettes[0]
self.modified_signal = np.array([])
self.current_slider_gain = [1.0] * 10
self.controlers = {'before': [], 'after': []}
for button, function in zip(['zoom_in', 'zoom_out', 'jump_forward', 'jump_back'], [self.zoomin, self.zoomout, self.forward, self.back]):
self.controlers['before'].append((getattr(self.ui, '{}_btn_before'.format(button)), function))
self.controlers['after'].append((getattr(self.ui, '{}_btn_after'.format(button)), function))
for channel in self.controlers.values():
for signal in channel:
signal[0].clicked.connect(signal[1])
self.plot_widget = {
'before': self.ui.graph_before,
'after': self.ui.graph_after
}
self.spectrogram_widget = {
'before': self.ui.spectrogram_before,
'after': self.ui.spectrogram_after
}
self.data_line = {
'before': None,
'after': None
}
self.playback_position = {
'before': None,
'after': None
}
self.time_seeker = {
'before': self.ui.time_seeker_before,
'after': self.ui.time_seeker_after
}
self.total_time = {
'before': self.ui.total_time_before,
'after': self.ui.total_time_after
}
self.ui.actionExit.triggered.connect(self.close)
self.ui.actionNew.triggered.connect(self.new_instance)
self.ui.actionOpen.triggered.connect(self.open_audio_file)
self.ui.play_btn_before.clicked.connect(self.audio_player_before.play)
self.ui.pause_btn_before.clicked.connect(self.audio_player_before.pause)
self.ui.stop_btn_before.clicked.connect(self.audio_player_before.stop)
self.ui.play_btn_after.clicked.connect(self.audio_player_after.play)
self.ui.pause_btn_after.clicked.connect(self.audio_player_after.pause)
self.ui.stop_btn_after.clicked.connect(self.audio_player_after.stop)
self.ui.palettes_box.currentTextChanged.connect(self.change_palette)
self.ui.playback_speed_before.currentIndexChanged.connect(lambda: self.audio_player_before.setPlaybackRate(float(self.ui.playback_speed_before.currentText()[1:])))
self.audio_player_before.durationChanged.connect(lambda duration: self.update_duration(duration, 'before'))
self.ui.playback_speed_after.currentIndexChanged.connect(lambda: self.audio_player_after.setPlaybackRate(float(self.ui.playback_speed_after.currentText()[1:])))
self.audio_player_after.durationChanged.connect(lambda duration: self.update_duration(duration, 'after'))
def new_instance(self):
self.new_instance = MainWindow()
self.new_instance.show()
def open_audio_file(self):
path = qtw.QFileDialog.getOpenFileName(None, 'Load Audio', './', "Audio File(*.wav)")[0]
for slider in self.band_slider.values():
slider.setDisabled(False)
slider.setStyleSheet('selection-background-color: blue')
self.ui.max_pixel_intensity.setDisabled(False)
self.ui.max_pixel_intensity.setStyleSheet('selection-background-color: blue')
self.ui.min_pixel_intensity.setDisabled(False)
self.ui.min_pixel_intensity.setStyleSheet('selection-background-color: blue')
self.audio_player_before.setMedia(QMediaContent(qtc.QUrl.fromLocalFile(path)))
self.audio_player_before.positionChanged.connect(lambda position: self.update_timestamp(position, self.ui.current_time_before, self.ui.time_seeker_before, 'before'))
self.ui.time_seeker_before.valueChanged.connect(self.audio_player_before.setPosition)
self.sampling_rate, self.samples = scipy.io.wavfile.read(path)
self.plot_graph(self.samples, self.sampling_rate, 'before')
self.plot_spectrogram(self.samples, self.sampling_rate, 'before')
self.modify_signal()
def plot_graph(self, samples, sampling_rate, widget):
peak_value = np.amax(samples)
normalized_data = samples / peak_value
length = samples.shape[0] / sampling_rate
time = list(np.linspace(0, length, samples.shape[0]))
drawing_pen = pg.mkPen(color=(255, 0, 0), width=0.5)
self.plot_widget[widget].removeItem(self.data_line[widget])
self.data_line[widget] = self.plot_widget[widget].plot(
time, normalized_data, pen=drawing_pen)
self.plot_widget[widget].plotItem.setLabel(axis='left', text='Normalized Amplitude')
self.plot_widget[widget].plotItem.setLabel(axis='bottom', text='time [s]')
self.plot_widget[widget].plotItem.getViewBox().setLimits(xMin=0, xMax=np.max(time), yMin=-1.1, yMax=1.1)
self.spectrogram_time_min, self.spectrogram_time_max = self.plot_widget[widget].plotItem.getAxis('bottom').range
self.playback_position[widget] = pyqtgraph.LinearRegionItem(values=(0, 0))
self.plot_widget[widget].plotItem.getViewBox().addItem(self.playback_position[widget])
def plot_spectrogram(self, samples, sampling_rate, widget):
self.spectrogram_widget[widget].getFigure().clear()
spectrogram_axes = self.spectrogram_widget[widget].getFigure().add_subplot(111)
data = samples.astype('float32')
frequency_magnitude = np.abs(librosa.stft(data))**2
mel_spectrogram = librosa.feature.melspectrogram(S=frequency_magnitude, y=data, sr=sampling_rate, n_mels=128)
decibel_spectrogram = librosa.power_to_db(mel_spectrogram, ref=np.max)
if self.first_turn:
min_intensity = np.ceil(np.amin(decibel_spectrogram))
max_intensity = np.ceil(np.amax(decibel_spectrogram))
self.spectrogram_power_range['min'] = np.linspace(min_intensity, min_intensity/2, 10).astype('int')
self.spectrogram_power_range['min'] = np.append(self.spectrogram_power_range['min'], np.array([self.spectrogram_power_range['min'][0]]))
self.spectrogram_power_range['max'] = np.linspace((min_intensity+1)/2, max_intensity, 10).astype('int')
self.spectrogram_power_range['max'] = np.append(self.spectrogram_power_range['max'], np.array([self.spectrogram_power_range['max'][-1]]))
self.ui.min_pixel_intensity_lab.setText(str(self.spectrogram_power_range['min'][-1]))
self.ui.max_pixel_intensity_lab.setText(str(self.spectrogram_power_range['max'][-1]))
self.first_turn = False
spectrogram_image = librosa.display.specshow(decibel_spectrogram, x_axis='time', y_axis='mel', sr=sampling_rate,
ax=spectrogram_axes, cmap=self.current_color_palette, vmin=self.spectrogram_power_range['min'][-1], vmax=self.spectrogram_power_range['max'][-1])
self.spectrogram_widget[widget].getFigure().colorbar(spectrogram_image, ax=spectrogram_axes, format='%+2.0f dB')
spectrogram_axes.set(xlim=[self.spectrogram_time_min, self.spectrogram_time_max])
self.spectrogram_widget[widget].draw()
def modify_signal(self):
frequency_content = np.fft.rfftfreq(len(self.samples), d=1/self.sampling_rate)
modified_signal = np.fft.rfft(self.samples)
for index, slider_gain in enumerate(self.current_slider_gain):
frequency_range_min = (index + 0) * self.sampling_rate / (2 * 10)
frequency_range_max = (index + 1) * self.sampling_rate / (2 * 10)
range_min_frequency = frequency_content > frequency_range_min
range_max_frequency = frequency_content <= frequency_range_max
slider_min_max = []
for is_in_min_frequency, is_in_max_frequency in zip(range_min_frequency, range_max_frequency):
slider_min_max.append(is_in_min_frequency and is_in_max_frequency)
modified_signal[slider_min_max] *= slider_gain
self.samples_after = np.fft.irfft(modified_signal)
self.save_output_wav()
self.plot_graph(self.samples_after, self.sampling_rate, 'after')
self.plot_spectrogram(self.samples_after, self.sampling_rate, 'after')
def spectrogram_pixels_intensity(self, widget):
slider = getattr(self.ui, '{}_pixel_intensity'.format(widget))
self.spectrogram_power_range[widget][-1] = self.spectrogram_power_range[widget][int(slider.value())]
label = getattr(self.ui, '{}_pixel_intensity_lab'.format(widget))
label.setText(str(self.spectrogram_power_range[widget][-1]))
self.plot_spectrogram(self.samples, self.sampling_rate, 'before')
self.plot_spectrogram(self.samples_after, self.sampling_rate, 'after')
def change_palette(self):
self.current_color_palette = self.available_palettes[self.ui.palettes_box.currentIndex()]
self.plot_spectrogram(self.samples, self.sampling_rate, 'before')
self.plot_spectrogram(self.samples_after, self.sampling_rate, 'after')
def slider_gain_updated(self, index):
slider_gain = self.bands_powers[self.band_slider[index].value()]
self.band_label[index].setText(f'{slider_gain}')
self.current_slider_gain[index] = slider_gain
self.modify_signal()
def update_duration(self, duration, widget):
self.time_seeker[widget].setMaximum(duration)
if duration >= 0:
self.total_time[widget].setText(time_stamp(duration))
def update_timestamp(self, position, currentTimeLabel, timeSlider, widget):
if position >= 0:
currentTimeLabel.setText(time_stamp(position))
timeSlider.blockSignals(True)
timeSlider.setValue(position)
timeSlider.blockSignals(False)
self.playback_position[widget].setRegion((position/1000, position/1000))
minRange, maxRange = self.plot_widget[widget].plotItem.getAxis('bottom').range
if(position >= maxRange * 1000):
self.plot_widget[widget].plotItem.getViewBox().translateBy((maxRange-minRange), 0)
self.synchronize()
if(position <= minRange * 1000):
self.plot_widget[widget].plotItem.getViewBox().translateBy(-minRange)
self.synchronize()
def zoomin(self) -> None:
self.ui.graph_before.plotItem.getViewBox().scaleBy((0.75, 1.0))
self.synchronize()
def zoomout(self) -> None:
self.ui.graph_before.plotItem.getViewBox().scaleBy((1.25, 1.0))
self.synchronize()
def back(self):
self.ui.graph_before.plotItem.getViewBox().translateBy((-0.5, 0.0))
self.synchronize()
def forward(self):
self.ui.graph_before.plotItem.getViewBox().translateBy((0.5, 0.0))
self.synchronize()
def synchronize(self):
self.ui.graph_before.plotItem.getViewBox().setXLink(self.ui.graph_after.plotItem)
self.spectrogram_time_min, self.spectrogram_time_max = self.ui.graph_before.plotItem.getAxis('bottom').range
self.plot_spectrogram(self.samples, self.sampling_rate, 'before')
self.plot_spectrogram(self.samples_after, self.sampling_rate, 'after')
def save_output_wav(self):
try:
shutil.rmtree('wav')
os.mkdir('wav')
except:
os.mkdir('wav')
self.now = datetime.now()
self.now = f'{self.now:%Y-%m-%d %H-%M-%S.%f %p}'
scipy.io.wavfile.write(f"wav/SBME{self.now}.wav", self.sampling_rate, self.samples_after.astype(np.int16))
path = os.listdir('wav')
self.audio_player_after.setMedia(QMediaContent(qtc.QUrl.fromLocalFile(f'wav/{path[0]}')))
self.audio_player_after.positionChanged.connect(lambda position: self.update_timestamp(position, self.ui.current_time_after, self.ui.time_seeker_after, 'after'))
self.ui.time_seeker_after.valueChanged.connect(self.audio_player_after.setPosition)
def save_session(self):
if not self.sampling_rate:
qtw.QMessageBox.information(
self, 'failed', 'You have to plot a signal first')
return
try:
shutil.rmtree(self.PLOT_DIR)
os.mkdir(self.PLOT_DIR)
except FileNotFoundError:
os.mkdir(self.PLOT_DIR)
for index, channel in enumerate(['before', 'after']):
exporter = pg.exporters.ImageExporter(
self.plot_widget[channel].scene())
exporter.export(f'{self.PLOT_DIR}/plot-{index}.png')
self.spectrogram_widget[channel].fig.savefig(f'{self.PLOT_DIR}/spec-{index}.png')
pdf = PDF()
plots_per_page = pdf.construct(self.PLOT_DIR)
for page_images in plots_per_page:
pdf.print_page(page_images, self.PLOT_DIR)
outFile = qtw.QFileDialog.getSaveFileName(
None, 'Save Session', './', "PDF File(*.pdf)")
pdf.output(outFile[0], 'F')
try:
shutil.rmtree(self.PLOT_DIR)
except:
pass
qtw.QMessageBox.information(self, 'success', 'PDF has been created')
sampling_rate, samples = scipy.io.wavfile.read(f"wav/SBME{self.now}.wav")
outFile = qtw.QFileDialog.getSaveFileName(
None, 'Save Session', './', "Wav File(*.wav)")
scipy.io.wavfile.write(outFile[0], sampling_rate, samples.astype(np.int16))
qtw.QMessageBox.information(self, 'success', 'Wav has been saved')
if __name__ == '__main__':
app = qtw.QApplication(sys.argv)
app.setStyle("Fusion")
mw = MainWindow()
sys.exit(app.exec_()) | 44.84022 | 173 | 0.67623 |
c7f4c5208ef7644eb0cba721c0758433a8d11914 | 85,211 | py | Python | nova/tests/unit/virt/libvirt/test_imagebackend.py | LinShuicheng/stx-nova | 0b03ed64e2c3aa32eb07bd7e315ca1248d9c451c | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/virt/libvirt/test_imagebackend.py | LinShuicheng/stx-nova | 0b03ed64e2c3aa32eb07bd7e315ca1248d9c451c | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/virt/libvirt/test_imagebackend.py | LinShuicheng/stx-nova | 0b03ed64e2c3aa32eb07bd7e315ca1248d9c451c | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 Grid Dynamics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import errno
import os
import shutil
import tempfile
from castellan import key_manager
import ddt
import fixtures
import mock
from oslo_concurrency import lockutils
from oslo_config import fixture as config_fixture
from oslo_utils import imageutils
from oslo_utils import units
from oslo_utils import uuidutils
from nova.compute import utils as compute_utils
import nova.conf
from nova import context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_processutils
from nova import utils
from nova.virt.image import model as imgmodel
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt.storage import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
CONF = nova.conf.CONF
class FakeSecret(object):
def value(self):
return base64.b64decode("MTIzNDU2Cg==")
class FakeConn(object):
def secretLookupByUUIDString(self, uuid):
return FakeSecret()
@ddt.ddt
class _ImageTestCase(object):
def mock_create_image(self, image):
def create_image(fn, base, size, *args, **kwargs):
fn(target=base, *args, **kwargs)
image.create_image = create_image
def setUp(self):
super(_ImageTestCase, self).setUp()
self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF))
self.INSTANCES_PATH = tempfile.mkdtemp(suffix='instances')
self.fixture.config(disable_process_locking=True,
group='oslo_concurrency')
self.flags(instances_path=self.INSTANCES_PATH)
self.INSTANCE = objects.Instance(id=1, uuid=uuidutils.generate_uuid())
self.DISK_INFO_PATH = os.path.join(self.INSTANCES_PATH,
self.INSTANCE['uuid'], 'disk.info')
self.NAME = 'fake.vm'
self.TEMPLATE = 'template'
self.CONTEXT = context.get_admin_context()
self.PATH = os.path.join(
libvirt_utils.get_instance_path(self.INSTANCE), self.NAME)
# TODO(mikal): rename template_dir to base_dir and template_path
# to cached_image_path. This will be less confusing.
self.TEMPLATE_DIR = os.path.join(CONF.instances_path, '_base')
self.TEMPLATE_PATH = os.path.join(self.TEMPLATE_DIR, 'template')
# Ensure can_fallocate is not initialised on the class
if hasattr(self.image_class, 'can_fallocate'):
del self.image_class.can_fallocate
# This will be used to mock some decorations like utils.synchronize
def _fake_deco(func):
return func
self._fake_deco = _fake_deco
def tearDown(self):
super(_ImageTestCase, self).tearDown()
shutil.rmtree(self.INSTANCES_PATH)
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('os.access', lambda p, w: True)
with mock.patch.object(image, 'get_disk_size', return_value=self.SIZE):
# Call twice to verify testing fallocate is only called once.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(),
['fallocate -l 1 %s.fallocate_test' % self.PATH,
'fallocate -n -l %s %s' % (self.SIZE, self.PATH),
'fallocate -n -l %s %s' % (self.SIZE, self.PATH)])
def test_prealloc_image_without_write_access(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
with test.nested(
mock.patch.object(image, 'exists', lambda: True),
mock.patch.object(image, '_can_fallocate', lambda: True),
mock.patch.object(image, 'get_disk_size', lambda _: self.SIZE)
) as (mock_exists, mock_can, mock_get):
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('os.access', lambda p, w: False)
# Testing fallocate is only called when user has write access.
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_libvirt_fs_info(self):
image = self.image_class(self.INSTANCE, self.NAME)
fs = image.libvirt_fs_info("/mnt")
# check that exception hasn't been raised and the method
# returned correct object
self.assertIsInstance(fs, vconfig.LibvirtConfigGuestFilesys)
self.assertEqual(fs.target_dir, "/mnt")
if image.is_block_dev:
self.assertEqual(fs.source_type, "block")
self.assertEqual(fs.source_dev, image.path)
else:
self.assertEqual(fs.source_type, "file")
self.assertEqual(fs.source_file, image.path)
def test_libvirt_info(self):
image = self.image_class(self.INSTANCE, self.NAME)
extra_specs = {
'quota:disk_read_bytes_sec': 10 * units.Mi,
'quota:disk_read_iops_sec': 1 * units.Ki,
'quota:disk_write_bytes_sec': 20 * units.Mi,
'quota:disk_write_iops_sec': 2 * units.Ki,
'quota:disk_total_bytes_sec': 30 * units.Mi,
'quota:disk_total_iops_sec': 3 * units.Ki,
}
disk_info = {
'bus': 'virtio',
'dev': '/dev/vda',
'type': 'cdrom',
}
disk = image.libvirt_info(disk_info,
cache_mode="none",
extra_specs=extra_specs,
hypervisor_version=4004001,
boot_order="1")
self.assertIsInstance(disk, vconfig.LibvirtConfigGuestDisk)
self.assertEqual("/dev/vda", disk.target_dev)
self.assertEqual("virtio", disk.target_bus)
self.assertEqual("none", disk.driver_cache)
self.assertEqual("cdrom", disk.source_device)
self.assertEqual("1", disk.boot_order)
self.assertEqual(10 * units.Mi, disk.disk_read_bytes_sec)
self.assertEqual(1 * units.Ki, disk.disk_read_iops_sec)
self.assertEqual(20 * units.Mi, disk.disk_write_bytes_sec)
self.assertEqual(2 * units.Ki, disk.disk_write_iops_sec)
self.assertEqual(30 * units.Mi, disk.disk_total_bytes_sec)
self.assertEqual(3 * units.Ki, disk.disk_total_iops_sec)
@mock.patch('nova.virt.disk.api.get_disk_size')
def test_get_disk_size(self, get_disk_size):
get_disk_size.return_value = 2361393152
image = self.image_class(self.INSTANCE, self.NAME)
self.assertEqual(2361393152, image.get_disk_size(image.path))
get_disk_size.assert_called_once_with(image.path)
def _test_libvirt_info_scsi_with_unit(self, disk_unit):
# The address should be set if bus is scsi and unit is set.
# Otherwise, it should not be set at all.
image = self.image_class(self.INSTANCE, self.NAME)
disk_info = {
'bus': 'scsi',
'dev': '/dev/sda',
'type': 'disk',
}
disk = image.libvirt_info(disk_info, cache_mode='none', extra_specs={},
hypervisor_version=4004001,
disk_unit=disk_unit)
if disk_unit:
self.assertEqual(0, disk.device_addr.controller)
self.assertEqual(disk_unit, disk.device_addr.unit)
else:
self.assertIsNone(disk.device_addr)
@ddt.data(5, None)
def test_libvirt_info_scsi_with_unit(self, disk_unit):
self._test_libvirt_info_scsi_with_unit(disk_unit)
class FlatTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Flat
super(FlatTestCase, self).setUp()
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
@mock.patch.object(os.path, 'exists')
def test_cache(self, mock_exists, mock_ensure):
self.stub_out('nova.virt.libvirt.imagebackend.Flat.correct_format',
lambda _: None)
mock_exists.side_effect = [False, False, False]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH), mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
fn(target=self.TEMPLATE_PATH)
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_ensure.assert_called_once_with(self.TEMPLATE_DIR)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_image_exists(self, mock_exists):
self.stub_out('nova.virt.libvirt.imagebackend.Flat.correct_format',
lambda _: None)
mock_exists.side_effect = [True, True, True]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH), mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_base_dir_exists(self, mock_exists):
self.stub_out('nova.virt.libvirt.imagebackend.Flat.correct_format',
lambda _: None)
mock_exists.side_effect = [True, False, False]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH), mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
fn(target=self.TEMPLATE_PATH)
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_template_exists(self, mock_exists):
self.stub_out('nova.virt.libvirt.imagebackend.Flat.correct_format',
lambda _: None)
mock_exists.side_effect = [True, False, True]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH), mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch('os.path.exists')
def test_cache_generating_resize(self, mock_path_exists):
# Test for bug 1608934
# The Flat backend doesn't write to the image cache when creating a
# non-image backend. Test that we don't try to get the disk size of
# a non-existent backend.
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
# Lets assume the base image cache directory already exists
existing = set([base_dir])
def fake_exists(path):
# Return True only for files previously created during
# execution. This allows us to test that we're not calling
# get_disk_size() on something which hasn't been previously
# created.
return path in existing
def fake_get_disk_size(path):
# get_disk_size will explode if called on a path which doesn't
# exist. Specific exception not important for this test.
if path not in existing:
raise AssertionError
# Not important, won't actually be called by patched code.
return 2 * units.Gi
def fake_template(target=None, **kwargs):
# The template function we pass to cache. Calling this will
# cause target to be created.
existing.add(target)
mock_path_exists.side_effect = fake_exists
image = self.image_class(self.INSTANCE, self.NAME)
# We're not testing preallocation
image.preallocate = False
with test.nested(
mock.patch.object(image, 'exists'),
mock.patch.object(image, 'correct_format'),
mock.patch.object(image, 'get_disk_size'),
mock.patch.object(image, 'resize_image')
) as (
mock_disk_exists, mock_correct_format, mock_get_disk_size,
mock_resize_image
):
# Assume the disk doesn't already exist
mock_disk_exists.return_value = False
# This won't actually be executed since change I46b5658e,
# but this is how the unpatched code will fail. We include this
# here as a belt-and-braces sentinel.
mock_get_disk_size.side_effect = fake_get_disk_size
# Try to create a 2G image
image.cache(fake_template, 'fake_cache_name', 2 * units.Gi)
# The real assertion is that the above call to cache() didn't
# raise AssertionError which, if we get here, it clearly didn't.
self.assertFalse(image.resize_image.called)
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch('nova.privsep.path.utime')
def test_create_image(self, mock_utime, mock_sync, mock_copy, mock_extend):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None, image_id=None)
mock_copy.assert_called_once_with(self.TEMPLATE_PATH, self.PATH)
fn.assert_called_once_with(target=self.TEMPLATE_PATH, image_id=None)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_extend.called)
mock_utime.assert_called()
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch.object(imagebackend.utils, 'synchronized')
def test_create_image_generated(self, mock_sync, mock_copy, mock_extend):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
fn.assert_called_once_with(target=self.PATH)
self.assertFalse(mock_copy.called)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_extend.called)
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch.object(images, 'qemu_img_info',
return_value=imageutils.QemuImgInfo())
@mock.patch('nova.privsep.path.utime')
def test_create_image_extend(self, mock_utime, mock_qemu, mock_sync,
mock_copy, mock_extend):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
mock_qemu.return_value.virtual_size = 1024
fn(target=self.TEMPLATE_PATH, image_id=None)
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE, image_id=None)
mock_copy.assert_called_once_with(self.TEMPLATE_PATH, self.PATH)
self.assertTrue(mock_sync.called)
mock_extend.assert_called_once_with(
imgmodel.LocalFileImage(self.PATH, imgmodel.FORMAT_RAW),
self.SIZE)
mock_qemu.assert_called_once_with(self.TEMPLATE_PATH)
mock_utime.assert_called()
@mock.patch.object(os.path, 'exists')
@mock.patch.object(imagebackend.images, 'qemu_img_info')
def test_correct_format(self, mock_qemu, mock_exist):
mock_exist.side_effect = [True, False, True]
info = mock.MagicMock()
info.file_format = 'foo'
mock_qemu.return_value = info
image = self.image_class(self.INSTANCE, self.NAME, path=self.PATH)
self.assertEqual(image.driver_format, 'foo')
mock_qemu.assert_called_once_with(self.PATH)
mock_exist.assert_has_calls([mock.call(self.PATH),
mock.call(self.DISK_INFO_PATH),
mock.call(CONF.instances_path)])
@mock.patch.object(images, 'qemu_img_info',
side_effect=exception.InvalidDiskInfo(
reason='invalid path'))
def test_resolve_driver_format(self, fake_qemu_img_info):
image = self.image_class(self.INSTANCE, self.NAME)
driver_format = image.resolve_driver_format()
self.assertEqual(driver_format, 'raw')
def test_get_model(self):
image = self.image_class(self.INSTANCE, self.NAME)
model = image.get_model(FakeConn())
self.assertEqual(imgmodel.LocalFileImage(self.PATH,
imgmodel.FORMAT_RAW),
model)
class Qcow2TestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = units.Gi
def setUp(self):
self.image_class = imagebackend.Qcow2
super(Qcow2TestCase, self).setUp()
self.QCOW2_BASE = (self.TEMPLATE_PATH +
'_%d' % (self.SIZE / units.Gi))
@mock.patch.object(os.path, 'exists')
def test_cache(self, mock_exists):
mock_exists.side_effect = [False, True, False, True, False, False]
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(CONF.instances_path),
mock.call(self.TEMPLATE_DIR),
mock.call(self.INSTANCES_PATH),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
image.cache(fn, self.TEMPLATE)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_image_exists(self, mock_exists):
mock_exists.side_effect = [False, True, True, True, True]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_base_dir_exists(self, mock_exists):
mock_exists.side_effect = [False, True, True, False, False]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_template_exists(self, mock_exists):
mock_exists.side_effect = [False, True, True, False, True]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch('nova.virt.libvirt.utils.create_cow_image')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch('nova.privsep.path.utime')
def test_create_image(self, mock_utime, mock_extend, mock_create,
mock_sync):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
mock_create.assert_called_once_with(self.TEMPLATE_PATH, self.PATH)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_extend.called)
mock_utime.assert_called()
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch('nova.virt.libvirt.utils.create_cow_image')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Image, 'verify_base_size')
@mock.patch('nova.privsep.path.utime')
def test_create_image_with_size(self, mock_utime, mock_verify, mock_exist,
mock_extend, mock_create, mock_sync):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
mock_exist.side_effect = [False, True, False, False, False]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_PATH),
mock.call(self.PATH),
mock.call(self.PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, self.SIZE)
mock_create.assert_called_once_with(self.TEMPLATE_PATH, self.PATH)
mock_extend.assert_called_once_with(
imgmodel.LocalFileImage(self.PATH, imgmodel.FORMAT_QCOW2),
self.SIZE)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_exist.assert_has_calls(exist_calls)
self.assertTrue(mock_sync.called)
mock_utime.assert_called()
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch('nova.virt.libvirt.utils.create_cow_image')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Qcow2, 'get_disk_size')
@mock.patch('nova.privsep.path.utime')
def test_create_image_too_small(self, mock_utime, mock_get, mock_exist,
mock_extend, mock_create, mock_sync):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
mock_get.return_value = self.SIZE
fn = mock.MagicMock()
mock_exist.side_effect = [False, True, True]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(exception.FlavorDiskSmallerThanImage,
image.create_image, fn, self.TEMPLATE_PATH, 1)
mock_get.assert_called_once_with(self.TEMPLATE_PATH)
mock_exist.assert_has_calls(exist_calls)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_create.called)
self.assertFalse(mock_extend.called)
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch('nova.virt.libvirt.utils.create_cow_image')
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Image, 'verify_base_size')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch('nova.privsep.path.utime')
def test_generate_resized_backing_files(self, mock_utime, mock_copy,
mock_verify, mock_exist,
mock_extend, mock_get,
mock_create, mock_sync):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
mock_get.return_value = self.QCOW2_BASE
fn = mock.MagicMock()
mock_exist.side_effect = [False, True, False, True, False, True]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(CONF.instances_path),
mock.call(self.TEMPLATE_PATH),
mock.call(self.PATH),
mock.call(self.QCOW2_BASE),
mock.call(self.PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
mock_get.assert_called_once_with(self.PATH)
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, self.SIZE)
mock_copy.assert_called_once_with(self.TEMPLATE_PATH,
self.QCOW2_BASE)
mock_extend.assert_called_once_with(
imgmodel.LocalFileImage(self.QCOW2_BASE,
imgmodel.FORMAT_QCOW2), self.SIZE)
mock_exist.assert_has_calls(exist_calls)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_create.called)
mock_utime.assert_called()
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch('nova.virt.libvirt.utils.create_cow_image')
@mock.patch('nova.virt.libvirt.utils.get_disk_backing_file')
@mock.patch.object(imagebackend.disk, 'extend')
@mock.patch.object(os.path, 'exists', side_effect=[])
@mock.patch.object(imagebackend.Image, 'verify_base_size')
@mock.patch('nova.privsep.path.utime')
def test_qcow2_exists_and_has_no_backing_file(self, mock_utime,
mock_verify, mock_exist,
mock_extend, mock_get,
mock_create, mock_sync):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
mock_get.return_value = None
fn = mock.MagicMock()
mock_exist.side_effect = [False, True, False, True, True]
exist_calls = [mock.call(self.DISK_INFO_PATH),
mock.call(self.INSTANCES_PATH),
mock.call(self.TEMPLATE_PATH),
mock.call(self.PATH),
mock.call(self.PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
mock_get.assert_called_once_with(self.PATH)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, self.SIZE)
mock_exist.assert_has_calls(exist_calls)
self.assertTrue(mock_sync.called)
self.assertFalse(mock_create.called)
self.assertFalse(mock_extend.called)
def test_resolve_driver_format(self):
image = self.image_class(self.INSTANCE, self.NAME)
driver_format = image.resolve_driver_format()
self.assertEqual(driver_format, 'qcow2')
def test_get_model(self):
image = self.image_class(self.INSTANCE, self.NAME)
model = image.get_model(FakeConn())
self.assertEqual(imgmodel.LocalFileImage(self.PATH,
imgmodel.FORMAT_QCOW2),
model)
class LvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Lvm
super(LvmTestCase, self).setUp()
self.flags(images_volume_group=self.VG, group='libvirt')
self.flags(enabled=False, group='ephemeral_storage_encryption')
self.INSTANCE['ephemeral_key_uuid'] = None
self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
self.PATH = os.path.join('/dev', self.VG, self.LV)
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
@mock.patch('nova.privsep.utils.supports_direct_io', return_value=True)
@mock.patch.object(imagebackend.lvm, 'create_volume')
@mock.patch.object(imagebackend.disk, 'get_disk_size',
return_value=TEMPLATE_SIZE)
@mock.patch('nova.privsep.qemu.convert_image')
def _create_image(self, sparse, mock_convert_image, mock_get, mock_create,
mock_ignored, mock_disk_op_sema):
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
mock_create.assert_called_once_with(self.VG, self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_get.assert_called_once_with(self.TEMPLATE_PATH)
path = '/dev/%s/%s_%s' % (self.VG, self.INSTANCE.uuid, self.NAME)
mock_convert_image.assert_called_once_with(
self.TEMPLATE_PATH, path, None, 'raw', CONF.instances_path, False)
mock_disk_op_sema.__enter__.assert_called_once()
@mock.patch.object(imagebackend.lvm, 'create_volume')
def _create_image_generated(self, sparse, mock_create):
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE, ephemeral_size=None)
mock_create.assert_called_once_with(self.VG, self.LV,
self.SIZE, sparse=sparse)
fn.assert_called_once_with(target=self.PATH, ephemeral_size=None)
@mock.patch.object(compute_utils, 'disk_ops_semaphore')
@mock.patch('nova.privsep.utils.supports_direct_io', return_value=True)
@mock.patch.object(imagebackend.disk, 'resize2fs')
@mock.patch.object(imagebackend.lvm, 'create_volume')
@mock.patch.object(imagebackend.disk, 'get_disk_size',
return_value=TEMPLATE_SIZE)
@mock.patch('nova.privsep.qemu.convert_image')
def _create_image_resize(self, sparse, mock_convert_image, mock_get,
mock_create, mock_resize, mock_ignored,
mock_disk_op_sema):
fn = mock.MagicMock()
fn(target=self.TEMPLATE_PATH)
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
mock_create.assert_called_once_with(self.VG, self.LV,
self.SIZE, sparse=sparse)
mock_get.assert_called_once_with(self.TEMPLATE_PATH)
mock_convert_image.assert_called_once_with(
self.TEMPLATE_PATH, self.PATH, None, 'raw',
CONF.instances_path, False)
mock_disk_op_sema.__enter__.assert_called_once()
mock_resize.assert_called_once_with(self.PATH, run_as_root=True)
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
@mock.patch.object(os.path, 'exists')
def test_cache(self, mock_exists, mock_ensure):
mock_exists.side_effect = [False, False, False]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_ensure.assert_called_once_with(self.TEMPLATE_DIR)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(os.path, 'exists')
def test_cache_image_exists(self, mock_exists):
mock_exists.side_effect = [True, True, True]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
@mock.patch.object(os.path, 'exists', side_effect=[True, False, False])
def test_cache_base_dir_exists(self, mock_exists, mock_ensure):
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH),
mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_exists.assert_has_calls(exist_calls)
mock_ensure.assert_not_called()
@mock.patch('os.path.exists', autospec=True)
@mock.patch('nova.utils.synchronized', autospec=True)
@mock.patch.object(imagebackend, 'lvm', autospec=True)
@mock.patch.object(imagebackend.fileutils, 'ensure_tree', autospec=True)
def test_cache_ephemeral(self, mock_ensure, mock_lvm, mock_synchronized,
mock_exists):
# Ignores its arguments and returns the wrapped function unmodified
def fake_synchronized(*args, **kwargs):
def outer(fn):
def wrapper(*wargs, **wkwargs):
fn(*wargs, **wkwargs)
return wrapper
return outer
mock_synchronized.side_effect = fake_synchronized
# Fake exists returns true for paths which have been added to the
# exists set
exists = set()
def fake_exists(path):
return path in exists
mock_exists.side_effect = fake_exists
# Fake create_volume causes exists to return true for the volume
def fake_create_volume(vg, lv, size, sparse=False):
exists.add(os.path.join('/dev', vg, lv))
mock_lvm.create_volume.side_effect = fake_create_volume
# Assert that when we call cache() for an ephemeral disk with the
# Lvm backend, we call fetch_func with a target of the Lvm disk
size_gb = 1
size = size_gb * units.Gi
fetch_func = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(fetch_func, self.TEMPLATE,
ephemeral_size=size_gb, size=size)
mock_ensure.assert_called_once_with(self.TEMPLATE_DIR)
mock_lvm.create_volume.assert_called_once_with(self.VG, self.LV, size,
sparse=False)
fetch_func.assert_called_once_with(target=self.PATH,
ephemeral_size=size_gb)
mock_synchronized.assert_called()
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_resize(True)
@mock.patch.object(imagebackend.lvm, 'create_volume',
side_effect=RuntimeError)
@mock.patch.object(imagebackend.disk, 'get_disk_size',
return_value=TEMPLATE_SIZE)
@mock.patch.object(imagebackend.lvm, 'remove_volumes')
def test_create_image_negative(self, mock_remove, mock_get, mock_create):
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE)
mock_create.assert_called_once_with(self.VG, self.LV,
self.SIZE, sparse=False)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_get.assert_called_once_with(self.TEMPLATE_PATH)
mock_remove.assert_called_once_with([self.PATH])
@mock.patch.object(imagebackend.lvm, 'create_volume')
@mock.patch.object(imagebackend.lvm, 'remove_volumes')
def test_create_image_generated_negative(self, mock_remove, mock_create):
fn = mock.MagicMock()
fn.side_effect = RuntimeError
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError, image.create_image, fn,
self.TEMPLATE_PATH, self.SIZE,
ephemeral_size=None)
mock_create.assert_called_once_with(self.VG, self.LV, self.SIZE,
sparse=False)
fn.assert_called_once_with(target=self.PATH, ephemeral_size=None)
mock_remove.assert_called_once_with([self.PATH])
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('nova.virt.libvirt.imagebackend.Lvm.exists',
lambda *a, **kw: True)
self.stub_out('nova.virt.libvirt.imagebackend.Lvm.get_disk_size',
lambda *a, **kw: self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
class EncryptedLvmTestCase(_ImageTestCase, test.NoDBTestCase):
VG = 'FakeVG'
TEMPLATE_SIZE = 512
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Lvm
super(EncryptedLvmTestCase, self).setUp()
self.flags(enabled=True, group='ephemeral_storage_encryption')
self.flags(cipher='aes-xts-plain64',
group='ephemeral_storage_encryption')
self.flags(key_size=512, group='ephemeral_storage_encryption')
self.flags(fixed_key='00000000000000000000000000000000'
'00000000000000000000000000000000',
group='key_manager')
self.flags(images_volume_group=self.VG, group='libvirt')
self.LV = '%s_%s' % (self.INSTANCE['uuid'], self.NAME)
self.LV_PATH = os.path.join('/dev', self.VG, self.LV)
self.PATH = os.path.join('/dev/mapper',
imagebackend.dmcrypt.volume_name(self.LV))
self.key_manager = key_manager.API()
self.INSTANCE['ephemeral_key_uuid'] =\
self.key_manager.create_key(self.CONTEXT, 'AES', 256)
self.KEY = self.key_manager.get(self.CONTEXT,
self.INSTANCE['ephemeral_key_uuid']).get_encoded()
self.lvm = imagebackend.lvm
self.disk = imagebackend.disk
self.utils = imagebackend.utils
self.libvirt_utils = imagebackend.libvirt_utils
self.dmcrypt = imagebackend.dmcrypt
def _create_image(self, sparse):
with test.nested(
mock.patch('nova.privsep.utils.supports_direct_io',
return_value=True),
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch('nova.privsep.qemu.convert_image'),
mock.patch.object(compute_utils, 'disk_ops_semaphore')):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.TEMPLATE_SIZE,
context=self.CONTEXT)
compute_utils.disk_ops_semaphore.__enter__.assert_called_once()
fn.assert_called_with(context=self.CONTEXT,
target=self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(self.VG,
self.LV,
self.TEMPLATE_SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
nova.privsep.qemu.convert_image.assert_called_with(
self.TEMPLATE_PATH, self.PATH, None, 'raw',
CONF.instances_path, False)
def _create_image_generated(self, sparse):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch('nova.privsep.qemu.convert_image')):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
fn.assert_called_with(target=self.PATH,
ephemeral_size=None, context=self.CONTEXT)
def _create_image_resize(self, sparse):
with test.nested(
mock.patch('nova.privsep.utils.supports_direct_io',
return_value=True),
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch('nova.privsep.qemu.convert_image'),
mock.patch.object(compute_utils, 'disk_ops_semaphore')):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE,
context=self.CONTEXT)
compute_utils.disk_ops_semaphore.__enter__.assert_called_once()
fn.assert_called_with(context=self.CONTEXT,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=sparse)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
nova.privsep.qemu.convert_image.assert_called_with(
self.TEMPLATE_PATH, self.PATH, None, 'raw',
CONF.instances_path, False)
self.disk.resize2fs.assert_called_with(self.PATH, run_as_root=True)
def test_create_image(self):
self._create_image(False)
def test_create_image_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image(True)
def test_create_image_generated(self):
self._create_image_generated(False)
def test_create_image_generated_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_generated(True)
def test_create_image_resize(self):
self._create_image_resize(False)
def test_create_image_resize_sparsed(self):
self.flags(sparse_logical_volumes=True, group='libvirt')
self._create_image_resize(True)
def test_create_image_negative(self):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
self.lvm.create_volume.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
context=self.CONTEXT)
fn.assert_called_with(
context=self.CONTEXT,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(
self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_encrypt_negative(self):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
self.dmcrypt.create_volume.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
context=self.CONTEXT)
fn.assert_called_with(
context=self.CONTEXT,
target=self.TEMPLATE_PATH)
self.disk.get_disk_size.assert_called_with(self.TEMPLATE_PATH)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.dmcrypt.volume_name(self.LV),
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_generated_negative(self):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
fn.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
fn.assert_called_with(
target=self.PATH,
ephemeral_size=None,
context=self.CONTEXT)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_create_image_generated_encrypt_negative(self):
with test.nested(
mock.patch.object(self.lvm, 'create_volume', mock.Mock()),
mock.patch.object(self.lvm, 'remove_volumes', mock.Mock()),
mock.patch.object(self.disk, 'resize2fs', mock.Mock()),
mock.patch.object(self.disk, 'get_disk_size',
mock.Mock(return_value=self.TEMPLATE_SIZE)),
mock.patch.object(self.dmcrypt, 'create_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'delete_volume', mock.Mock()),
mock.patch.object(self.dmcrypt, 'list_volumes', mock.Mock()),
mock.patch.object(self.utils, 'execute', mock.Mock())):
fn = mock.Mock()
fn.side_effect = RuntimeError()
image = self.image_class(self.INSTANCE, self.NAME)
self.assertRaises(
RuntimeError,
image.create_image,
fn,
self.TEMPLATE_PATH,
self.SIZE,
ephemeral_size=None,
context=self.CONTEXT)
self.lvm.create_volume.assert_called_with(
self.VG,
self.LV,
self.SIZE,
sparse=False)
self.dmcrypt.create_volume.assert_called_with(
self.PATH.rpartition('/')[2],
self.LV_PATH,
CONF.ephemeral_storage_encryption.cipher,
CONF.ephemeral_storage_encryption.key_size,
self.KEY)
self.dmcrypt.delete_volume.assert_called_with(
self.PATH.rpartition('/')[2])
self.lvm.remove_volumes.assert_called_with([self.LV_PATH])
def test_prealloc_image(self):
self.flags(preallocate_images='space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('nova.virt.libvirt.imagebackend.Lvm.exists',
lambda *a, **kw: True)
self.stub_out('nova.virt.libvirt.imagebackend.Lvm.get_disk_size',
lambda *a, **kw: self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_get_model(self):
image = self.image_class(self.INSTANCE, self.NAME)
model = image.get_model(FakeConn())
self.assertEqual(imgmodel.LocalBlockImage(self.PATH),
model)
@ddt.ddt
class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
FSID = "FakeFsID"
POOL = "FakePool"
USER = "FakeUser"
CONF = "FakeConf"
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Rbd
super(RbdTestCase, self).setUp()
self.flags(images_rbd_pool=self.POOL,
rbd_user=self.USER,
images_rbd_ceph_conf=self.CONF,
group='libvirt')
self.libvirt_utils = imagebackend.libvirt_utils
self.utils = imagebackend.utils
# mock out the cephclients for avoiding ImportError exception
rbd_utils.rbd = mock.Mock()
rbd_utils.rados = mock.Mock()
@mock.patch.object(os.path, 'exists', return_value=False)
@mock.patch.object(imagebackend.Rbd, 'exists', return_value=False)
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
def test_cache(self, mock_ensure, mock_img_exist, mock_os_exist):
image = self.image_class(self.INSTANCE, self.NAME)
fn = mock.MagicMock()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_ensure.assert_called_once_with(self.TEMPLATE_DIR)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
mock_img_exist.assert_called_with()
mock_os_exist.assert_has_calls([
mock.call(self.TEMPLATE_DIR), mock.call(self.TEMPLATE_PATH)
])
@mock.patch.object(os.path, 'exists')
@mock.patch.object(imagebackend.Rbd, 'exists')
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
def test_cache_base_dir_exists(self, mock_ensure,
mock_img_exist, mock_os_exist):
mock_os_exist.side_effect = [True, False]
mock_img_exist.return_value = False
image = self.image_class(self.INSTANCE, self.NAME)
fn = mock.MagicMock()
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_img_exist.assert_called_once_with()
mock_os_exist.assert_has_calls([
mock.call(self.TEMPLATE_DIR), mock.call(self.TEMPLATE_PATH)
])
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
@mock.patch.object(os.path, 'exists', return_value=True)
@mock.patch.object(imagebackend.Rbd, 'exists', return_value=True)
def test_cache_image_exists(self, mock_img_exist, mock_os_exist):
image = self.image_class(self.INSTANCE, self.NAME)
image.cache(None, self.TEMPLATE)
mock_img_exist.assert_called_once_with()
mock_os_exist.assert_has_calls([
mock.call(self.TEMPLATE_DIR), mock.call(self.TEMPLATE_PATH)
])
@mock.patch.object(os.path, 'exists')
@mock.patch.object(imagebackend.Rbd, 'exists')
def test_cache_template_exists(self, mock_img_exist, mock_os_exist):
mock_os_exist.return_value = True
mock_img_exist.return_value = False
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(None, self.TEMPLATE)
mock_img_exist.assert_called_once_with()
mock_os_exist.assert_has_calls([
mock.call(self.TEMPLATE_DIR), mock.call(self.TEMPLATE_PATH)
])
@mock.patch.object(imagebackend.Rbd, 'exists')
def test_create_image(self, mock_exists):
fn = mock.MagicMock()
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
mock_exists.return_value = False
image.create_image(fn, self.TEMPLATE_PATH, None)
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--image-format=2', '--id', self.USER,
'--conf', self.CONF)
self.assertEqual(fake_processutils.fake_execute_get_log(),
[' '.join(cmd)])
mock_exists.assert_has_calls([mock.call(), mock.call()])
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
@mock.patch.object(images, 'qemu_img_info')
@mock.patch.object(os.path, 'exists', return_value=False)
def test__remove_non_raw_cache_image_not_exists(
self, mock_exists, mock_qemu):
image = self.image_class(self.INSTANCE, self.NAME)
image._remove_non_raw_cache_image(self.TEMPLATE_PATH)
mock_qemu.assert_not_called()
@mock.patch.object(os, 'remove')
@mock.patch.object(images, 'qemu_img_info',
return_value=imageutils.QemuImgInfo())
@mock.patch.object(os.path, 'exists', return_value=True)
def test__remove_non_raw_cache_image_with_raw_cache(
self, mock_exists, mock_qemu, mock_remove):
mock_qemu.return_value.file_format = 'raw'
image = self.image_class(self.INSTANCE, self.NAME)
image._remove_non_raw_cache_image(self.TEMPLATE_PATH)
mock_remove.assert_not_called()
@mock.patch.object(os, 'remove')
@mock.patch.object(images, 'qemu_img_info',
return_value=imageutils.QemuImgInfo())
@mock.patch.object(os.path, 'exists', return_value=True)
def test__remove_non_raw_cache_image_with_qcow2_cache(
self, mock_exists, mock_qemu, mock_remove):
mock_qemu.return_value.file_format = 'qcow2'
image = self.image_class(self.INSTANCE, self.NAME)
image._remove_non_raw_cache_image(self.TEMPLATE_PATH)
mock_remove.assert_called_once_with(self.TEMPLATE_PATH)
@mock.patch.object(images, 'qemu_img_info',
return_value=imageutils.QemuImgInfo())
@mock.patch.object(rbd_utils.RBDDriver, 'resize')
@mock.patch.object(imagebackend.Rbd, 'verify_base_size')
@mock.patch.object(imagebackend.Rbd, 'get_disk_size')
@mock.patch.object(imagebackend.Rbd, 'exists')
def test_create_image_resize(self, mock_exists, mock_get,
mock_verify, mock_resize, mock_qemu):
fn = mock.MagicMock()
full_size = self.SIZE * 2
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
mock_exists.return_value = False
mock_qemu.return_value.file_format = 'raw'
mock_get.return_value = self.SIZE
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--image-format=2', '--id', self.USER,
'--conf', self.CONF)
image.create_image(fn, self.TEMPLATE_PATH, full_size)
self.assertEqual(fake_processutils.fake_execute_get_log(),
[' '.join(cmd)])
mock_exists.assert_has_calls([mock.call(), mock.call()])
mock_get.assert_called_once_with(rbd_name)
mock_resize.assert_called_once_with(rbd_name, full_size)
mock_verify.assert_called_once_with(self.TEMPLATE_PATH, full_size)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
@mock.patch.object(images, 'qemu_img_info',
return_value=imageutils.QemuImgInfo())
@mock.patch.object(imagebackend.Rbd, 'get_disk_size')
@mock.patch.object(imagebackend.Rbd, 'exists')
def test_create_image_already_exists(self, mock_exists, mock_get,
mock_qemu):
rbd_utils.rbd.RBD_FEATURE_LAYERING = 1
image = self.image_class(self.INSTANCE, self.NAME)
mock_exists.return_value = True
mock_qemu.return_value.file_format = 'raw'
mock_get.return_value = self.SIZE
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
fn = mock.MagicMock()
image.create_image(fn, self.TEMPLATE_PATH, self.SIZE)
mock_exists.assert_has_calls([mock.call(), mock.call()])
mock_get.assert_has_calls([mock.call(self.TEMPLATE_PATH),
mock.call(rbd_name)])
def test_prealloc_image(self):
CONF.set_override('preallocate_images', 'space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('nova.virt.libvirt.imagebackend.Rbd.exists',
lambda *a, **kw: True)
self.stub_out('nova.virt.libvirt.imagebackend.Rbd.get_disk_size',
lambda *a, **kw: self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
self.assertEqual(fake_processutils.fake_execute_get_log(), [])
def test_parent_compatible(self):
self.assertEqual(utils.getargspec(imagebackend.Image.libvirt_info),
utils.getargspec(self.image_class.libvirt_info))
def test_image_path(self):
conf = "FakeConf"
pool = "FakePool"
user = "FakeUser"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self.flags(rbd_user=user, group='libvirt')
image = self.image_class(self.INSTANCE, self.NAME)
rbd_path = "rbd:%s/%s:id=%s:conf=%s" % (pool, image.rbd_name,
user, conf)
self.assertEqual(image.path, rbd_path)
def test_get_disk_size(self):
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(image.driver, 'size') as size_mock:
size_mock.return_value = 2361393152
self.assertEqual(2361393152, image.get_disk_size(image.path))
size_mock.assert_called_once_with(image.rbd_name)
@mock.patch.object(images, 'qemu_img_info',
return_value=imageutils.QemuImgInfo())
def test_create_image_too_small(self, mock_qemu):
mock_qemu.return_value.file_format = 'raw'
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(image, 'driver') as driver_mock:
driver_mock.exists.return_value = True
driver_mock.size.return_value = 2
self.assertRaises(exception.FlavorDiskSmallerThanImage,
image.create_image, mock.MagicMock(),
self.TEMPLATE_PATH, 1)
driver_mock.size.assert_called_once_with(image.rbd_name)
@mock.patch.object(rbd_utils.RBDDriver, "get_mon_addrs")
def test_libvirt_info(self, mock_mon_addrs):
def get_mon_addrs():
hosts = ["server1", "server2"]
ports = ["1899", "1920"]
return hosts, ports
mock_mon_addrs.side_effect = get_mon_addrs
super(RbdTestCase, self).test_libvirt_info()
@ddt.data(5, None)
@mock.patch.object(rbd_utils.RBDDriver, "get_mon_addrs")
def test_libvirt_info_scsi_with_unit(self, disk_unit, mock_mon_addrs):
def get_mon_addrs():
hosts = ["server1", "server2"]
ports = ["1899", "1920"]
return hosts, ports
mock_mon_addrs.side_effect = get_mon_addrs
super(RbdTestCase, self)._test_libvirt_info_scsi_with_unit(disk_unit)
@mock.patch.object(rbd_utils.RBDDriver, "get_mon_addrs")
def test_get_model(self, mock_mon_addrs):
pool = "FakePool"
user = "FakeUser"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(rbd_user=user, group='libvirt')
self.flags(rbd_secret_uuid="3306a5c4-8378-4b3c-aa1f-7b48d3a26172",
group='libvirt')
def get_mon_addrs():
hosts = ["server1", "server2"]
ports = ["1899", "1920"]
return hosts, ports
mock_mon_addrs.side_effect = get_mon_addrs
image = self.image_class(self.INSTANCE, self.NAME)
model = image.get_model(FakeConn())
self.assertEqual(imgmodel.RBDImage(
self.INSTANCE["uuid"] + "_fake.vm",
"FakePool",
"FakeUser",
b"MTIzNDU2Cg==",
["server1:1899", "server2:1920"]),
model)
@mock.patch.object(rbd_utils.RBDDriver, 'flatten')
def test_flatten(self, mock_flatten):
image = self.image_class(self.INSTANCE, self.NAME)
image.flatten()
mock_flatten.assert_called_once_with(image.rbd_name, pool=self.POOL)
def test_import_file(self):
image = self.image_class(self.INSTANCE, self.NAME)
@mock.patch.object(image, 'exists')
@mock.patch.object(image.driver, 'remove_image')
@mock.patch.object(image.driver, 'import_image')
def _test(mock_import, mock_remove, mock_exists):
mock_exists.return_value = True
image.import_file(self.INSTANCE, mock.sentinel.file,
mock.sentinel.remote_name)
name = '%s_%s' % (self.INSTANCE.uuid,
mock.sentinel.remote_name)
mock_exists.assert_called_once_with()
mock_remove.assert_called_once_with(name)
mock_import.assert_called_once_with(mock.sentinel.file, name)
_test()
@mock.patch.object(imagebackend.Rbd, 'exists')
@mock.patch.object(rbd_utils.RBDDriver, 'remove_image')
@mock.patch.object(rbd_utils.RBDDriver, 'import_image')
def test_import_file_not_found(self, mock_import, mock_remove,
mock_exists):
image = self.image_class(self.INSTANCE, self.NAME)
mock_exists.return_value = False
image.import_file(self.INSTANCE, mock.sentinel.file,
mock.sentinel.remote_name)
name = '%s_%s' % (self.INSTANCE.uuid,
mock.sentinel.remote_name)
mock_exists.assert_called_once_with()
self.assertFalse(mock_remove.called)
mock_import.assert_called_once_with(mock.sentinel.file, name)
def test_get_parent_pool(self):
image = self.image_class(self.INSTANCE, self.NAME)
with mock.patch.object(rbd_utils.RBDDriver, 'parent_info') as mock_pi:
mock_pi.return_value = [self.POOL, 'fake-image', 'fake-snap']
parent_pool = image._get_parent_pool(self.CONTEXT, 'fake-image',
self.FSID)
self.assertEqual(self.POOL, parent_pool)
def test_get_parent_pool_no_parent_info(self):
image = self.image_class(self.INSTANCE, self.NAME)
rbd_uri = 'rbd://%s/%s/fake-image/fake-snap' % (self.FSID, self.POOL)
with test.nested(mock.patch.object(rbd_utils.RBDDriver, 'parent_info'),
mock.patch.object(imagebackend.IMAGE_API, 'get'),
) as (mock_pi, mock_get):
mock_pi.side_effect = exception.ImageUnacceptable(image_id='test',
reason='test')
mock_get.return_value = {'locations': [{'url': rbd_uri}]}
parent_pool = image._get_parent_pool(self.CONTEXT, 'fake-image',
self.FSID)
self.assertEqual(self.POOL, parent_pool)
def test_get_parent_pool_non_local_image(self):
image = self.image_class(self.INSTANCE, self.NAME)
rbd_uri = 'rbd://remote-cluster/remote-pool/fake-image/fake-snap'
with test.nested(
mock.patch.object(rbd_utils.RBDDriver, 'parent_info'),
mock.patch.object(imagebackend.IMAGE_API, 'get')
) as (mock_pi, mock_get):
mock_pi.side_effect = exception.ImageUnacceptable(image_id='test',
reason='test')
mock_get.return_value = {'locations': [{'url': rbd_uri}]}
self.assertRaises(exception.ImageUnacceptable,
image._get_parent_pool, self.CONTEXT,
'fake-image', self.FSID)
def test_direct_snapshot(self):
image = self.image_class(self.INSTANCE, self.NAME)
test_snap = 'rbd://%s/%s/fake-image-id/snap' % (self.FSID, self.POOL)
with test.nested(
mock.patch.object(rbd_utils.RBDDriver, 'get_fsid',
return_value=self.FSID),
mock.patch.object(image, '_get_parent_pool',
return_value=self.POOL),
mock.patch.object(rbd_utils.RBDDriver, 'create_snap'),
mock.patch.object(rbd_utils.RBDDriver, 'clone'),
mock.patch.object(rbd_utils.RBDDriver, 'flatten'),
mock.patch.object(image, 'cleanup_direct_snapshot')
) as (mock_fsid, mock_parent, mock_create_snap, mock_clone,
mock_flatten, mock_cleanup):
location = image.direct_snapshot(self.CONTEXT, 'fake-snapshot',
'fake-format', 'fake-image-id',
'fake-base-image')
mock_fsid.assert_called_once_with()
mock_parent.assert_called_once_with(self.CONTEXT,
'fake-base-image',
self.FSID)
mock_create_snap.assert_has_calls([mock.call(image.rbd_name,
'fake-snapshot',
protect=True),
mock.call('fake-image-id',
'snap',
pool=self.POOL,
protect=True)])
mock_clone.assert_called_once_with(mock.ANY, 'fake-image-id',
dest_pool=self.POOL)
mock_flatten.assert_called_once_with('fake-image-id',
pool=self.POOL)
mock_cleanup.assert_called_once_with(mock.ANY)
self.assertEqual(test_snap, location)
def test_direct_snapshot_cleans_up_on_failures(self):
image = self.image_class(self.INSTANCE, self.NAME)
test_snap = 'rbd://%s/%s/%s/snap' % (self.FSID, image.driver.pool,
image.rbd_name)
with test.nested(
mock.patch.object(rbd_utils.RBDDriver, 'get_fsid',
return_value=self.FSID),
mock.patch.object(image, '_get_parent_pool',
return_value=self.POOL),
mock.patch.object(rbd_utils.RBDDriver, 'create_snap'),
mock.patch.object(rbd_utils.RBDDriver, 'clone',
side_effect=exception.Forbidden('testing')),
mock.patch.object(rbd_utils.RBDDriver, 'flatten'),
mock.patch.object(image, 'cleanup_direct_snapshot')) as (
mock_fsid, mock_parent, mock_create_snap, mock_clone,
mock_flatten, mock_cleanup):
self.assertRaises(exception.Forbidden, image.direct_snapshot,
self.CONTEXT, 'snap', 'fake-format',
'fake-image-id', 'fake-base-image')
mock_create_snap.assert_called_once_with(image.rbd_name, 'snap',
protect=True)
self.assertFalse(mock_flatten.called)
mock_cleanup.assert_called_once_with(dict(url=test_snap))
def test_cleanup_direct_snapshot(self):
image = self.image_class(self.INSTANCE, self.NAME)
test_snap = 'rbd://%s/%s/%s/snap' % (self.FSID, image.driver.pool,
image.rbd_name)
with test.nested(
mock.patch.object(rbd_utils.RBDDriver, 'remove_snap'),
mock.patch.object(rbd_utils.RBDDriver, 'destroy_volume')
) as (mock_rm, mock_destroy):
# Ensure that the method does nothing when no location is provided
image.cleanup_direct_snapshot(None)
self.assertFalse(mock_rm.called)
# Ensure that destroy_volume is not called
image.cleanup_direct_snapshot(dict(url=test_snap))
mock_rm.assert_called_once_with(image.rbd_name, 'snap', force=True,
ignore_errors=False,
pool=image.driver.pool)
self.assertFalse(mock_destroy.called)
def test_cleanup_direct_snapshot_destroy_volume(self):
image = self.image_class(self.INSTANCE, self.NAME)
test_snap = 'rbd://%s/%s/%s/snap' % (self.FSID, image.driver.pool,
image.rbd_name)
with test.nested(
mock.patch.object(rbd_utils.RBDDriver, 'remove_snap'),
mock.patch.object(rbd_utils.RBDDriver, 'destroy_volume')
) as (mock_rm, mock_destroy):
# Ensure that destroy_volume is called
image.cleanup_direct_snapshot(dict(url=test_snap),
also_destroy_volume=True)
mock_rm.assert_called_once_with(image.rbd_name, 'snap',
force=True,
ignore_errors=False,
pool=image.driver.pool)
mock_destroy.assert_called_once_with(image.rbd_name,
pool=image.driver.pool)
class PloopTestCase(_ImageTestCase, test.NoDBTestCase):
SIZE = 1024
def setUp(self):
self.image_class = imagebackend.Ploop
super(PloopTestCase, self).setUp()
self.utils = imagebackend.utils
@mock.patch.object(imagebackend.fileutils, 'ensure_tree')
@mock.patch.object(os.path, 'exists')
def test_cache(self, mock_exists, mock_ensure):
mock_exists.side_effect = [False, False, False]
exist_calls = [mock.call(self.TEMPLATE_DIR),
mock.call(self.PATH), mock.call(self.TEMPLATE_PATH)]
fn = mock.MagicMock()
image = self.image_class(self.INSTANCE, self.NAME)
self.mock_create_image(image)
image.cache(fn, self.TEMPLATE)
mock_ensure.assert_called_once_with(self.TEMPLATE_DIR)
mock_exists.assert_has_calls(exist_calls)
fn.assert_called_once_with(target=self.TEMPLATE_PATH)
@mock.patch.object(imagebackend.Ploop, 'get_disk_size',
return_value=2048)
@mock.patch.object(imagebackend.utils, 'synchronized')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch('nova.privsep.libvirt.ploop_restore_descriptor')
@mock.patch.object(imagebackend.disk, 'extend')
def test_create_image(self, mock_extend, mock_ploop_restore_descriptor,
mock_copy, mock_sync, mock_get):
mock_sync.side_effect = lambda *a, **kw: self._fake_deco
fn = mock.MagicMock()
img_path = os.path.join(self.PATH, "root.hds")
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, 2048, image_id=None)
mock_copy.assert_called_once_with(self.TEMPLATE_PATH, img_path)
mock_ploop_restore_descriptor.assert_called_once_with(self.PATH,
img_path,
"raw")
self.assertTrue(mock_sync.called)
fn.assert_called_once_with(target=self.TEMPLATE_PATH, image_id=None)
mock_extend.assert_called_once_with(
imgmodel.LocalFileImage(self.PATH, imgmodel.FORMAT_PLOOP),
2048)
def test_create_image_generated(self):
fn = mock.Mock()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, 2048, ephemeral_size=2)
fn.assert_called_with(target=self.PATH,
ephemeral_size=2)
def test_prealloc_image(self):
self.flags(preallocate_images='space')
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self)
image = self.image_class(self.INSTANCE, self.NAME)
def fake_fetch(target, *args, **kwargs):
return
self.stub_out('os.path.exists', lambda _: True)
self.stub_out('nova.virt.libvirt.imagebackend.Ploop.exists',
lambda *a, **kw: True)
self.stub_out('nova.virt.libvirt.imagebackend.Ploop.get_disk_size',
lambda *a, **kw: self.SIZE)
image.cache(fake_fetch, self.TEMPLATE_PATH, self.SIZE)
class BackendTestCase(test.NoDBTestCase):
INSTANCE = objects.Instance(id=1, uuid=uuidutils.generate_uuid())
NAME = 'fake-name.suffix'
def setUp(self):
super(BackendTestCase, self).setUp()
self.flags(enabled=False, group='ephemeral_storage_encryption')
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
self.INSTANCE['ephemeral_key_uuid'] = None
def get_image(self, use_cow, image_type):
return imagebackend.Backend(use_cow).by_name(self.INSTANCE, self.NAME,
image_type)
def _test_image(self, image_type, image_not_cow, image_cow):
image1 = self.get_image(False, image_type)
image2 = self.get_image(True, image_type)
def assertIsInstance(instance, class_object):
failure = ('Expected %s,' +
' but got %s.') % (class_object.__name__,
instance.__class__.__name__)
self.assertIsInstance(instance, class_object, msg=failure)
assertIsInstance(image1, image_not_cow)
assertIsInstance(image2, image_cow)
def test_image_flat(self):
self._test_image('raw', imagebackend.Flat, imagebackend.Flat)
def test_image_flat_preallocate_images(self):
self.flags(preallocate_images='space')
raw = imagebackend.Flat(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertTrue(raw.preallocate)
def test_image_flat_native_io(self):
self.flags(preallocate_images="space")
raw = imagebackend.Flat(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertEqual(raw.driver_io, "native")
def test_image_qcow2(self):
self._test_image('qcow2', imagebackend.Qcow2, imagebackend.Qcow2)
def test_image_qcow2_preallocate_images(self):
self.flags(preallocate_images='space')
qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertTrue(qcow.preallocate)
def test_image_qcow2_native_io(self):
self.flags(preallocate_images="space")
qcow = imagebackend.Qcow2(self.INSTANCE, 'fake_disk', '/tmp/xyz')
self.assertEqual(qcow.driver_io, "native")
def test_image_lvm_native_io(self):
def _test_native_io(is_sparse, driver_io):
self.flags(images_volume_group='FakeVG', group='libvirt')
self.flags(sparse_logical_volumes=is_sparse, group='libvirt')
lvm = imagebackend.Lvm(self.INSTANCE, 'fake_disk')
self.assertEqual(lvm.driver_io, driver_io)
_test_native_io(is_sparse=False, driver_io="native")
_test_native_io(is_sparse=True, driver_io=None)
def test_image_lvm(self):
self.flags(images_volume_group='FakeVG', group='libvirt')
self._test_image('lvm', imagebackend.Lvm, imagebackend.Lvm)
@mock.patch.object(rbd_utils, 'rbd')
@mock.patch.object(rbd_utils, 'rados')
def test_image_rbd(self, mock_rados, mock_rbd):
conf = "FakeConf"
pool = "FakePool"
self.flags(images_rbd_pool=pool, group='libvirt')
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self._test_image('rbd', imagebackend.Rbd, imagebackend.Rbd)
def test_image_default(self):
self._test_image('default', imagebackend.Flat, imagebackend.Qcow2)
class UtimeWorkaroundTestCase(test.NoDBTestCase):
ERROR_STUB = "sentinel.path: [Errno 13] Permission Denied"
def setUp(self):
super(UtimeWorkaroundTestCase, self).setUp()
self.mock_utime = self.useFixture(
fixtures.MockPatch('nova.privsep.path.utime')).mock
def test_update_utime_no_error(self):
# If utime doesn't raise an error we shouldn't raise or log anything
imagebackend._update_utime_ignore_eacces(mock.sentinel.path)
self.mock_utime.assert_called_once_with(mock.sentinel.path)
self.assertNotIn(self.ERROR_STUB, self.stdlog.logger.output)
def test_update_utime_eacces(self):
# If utime raises EACCES we should log the error, but ignore it
e = OSError()
e.errno = errno.EACCES
e.strerror = "Permission Denied"
self.mock_utime.side_effect = e
imagebackend._update_utime_ignore_eacces(mock.sentinel.path)
self.mock_utime.assert_called_once_with(mock.sentinel.path)
self.assertIn(self.ERROR_STUB, self.stdlog.logger.output)
def test_update_utime_eio(self):
# If utime raises any other error we should raise it
e = OSError()
e.errno = errno.EIO
e.strerror = "IO Error"
self.mock_utime.side_effect = e
ex = self.assertRaises(
OSError, imagebackend._update_utime_ignore_eacces,
mock.sentinel.path)
self.assertIs(ex, e)
self.mock_utime.assert_called_once_with(mock.sentinel.path)
self.assertNotIn(self.ERROR_STUB, self.stdlog.logger.output)
| 43.968524 | 79 | 0.623957 |
b0458f0dfb00a7a8b3cd980c9c4ff982e3fba499 | 39,458 | py | Python | singer_sdk/streams/core.py | meltano/sdk | 83dde4fe922f9f91bd3c57277849a2a2daa8f09a | [
"Apache-2.0"
] | 13 | 2021-06-21T17:30:32.000Z | 2021-12-06T18:45:34.000Z | singer_sdk/streams/core.py | meltano/sdk | 83dde4fe922f9f91bd3c57277849a2a2daa8f09a | [
"Apache-2.0"
] | null | null | null | singer_sdk/streams/core.py | meltano/sdk | 83dde4fe922f9f91bd3c57277849a2a2daa8f09a | [
"Apache-2.0"
] | null | null | null | """Stream abstract class."""
import abc
import copy
import datetime
import json
import logging
from os import PathLike
from pathlib import Path
from types import MappingProxyType
from typing import (
Any,
Callable,
Dict,
Generator,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
import pendulum
import requests
import singer
from singer import RecordMessage, SchemaMessage, StateMessage
from singer.schema import Schema
from singer_sdk.exceptions import InvalidStreamSortException, MaxRecordsLimitException
from singer_sdk.helpers._catalog import pop_deselected_record_properties
from singer_sdk.helpers._compat import final
from singer_sdk.helpers._singer import (
Catalog,
CatalogEntry,
MetadataMapping,
SelectionMask,
)
from singer_sdk.helpers._state import (
finalize_state_progress_markers,
get_starting_replication_value,
get_state_partitions_list,
get_writeable_state_dict,
increment_state,
log_sort_error,
reset_state_progress_markers,
write_replication_key_signpost,
write_starting_replication_value,
)
from singer_sdk.helpers._typing import conform_record_data_types, is_datetime_type
from singer_sdk.helpers._util import utc_now
from singer_sdk.mapper import SameRecordTransform, StreamMap
from singer_sdk.plugin_base import PluginBase as TapBaseClass
# Replication methods
REPLICATION_FULL_TABLE = "FULL_TABLE"
REPLICATION_INCREMENTAL = "INCREMENTAL"
REPLICATION_LOG_BASED = "LOG_BASED"
FactoryType = TypeVar("FactoryType", bound="Stream")
METRICS_LOG_LEVEL_SETTING = "metrics_log_level"
class Stream(metaclass=abc.ABCMeta):
"""Abstract base class for tap streams."""
STATE_MSG_FREQUENCY = 10000 # Number of records between state messages
_MAX_RECORDS_LIMIT: Optional[int] = None
# Used for nested stream relationships
parent_stream_type: Optional[Type["Stream"]] = None
ignore_parent_replication_key: bool = False
def __init__(
self,
tap: TapBaseClass,
schema: Optional[Union[str, PathLike, Dict[str, Any], Schema]] = None,
name: Optional[str] = None,
) -> None:
"""Init tap stream.
Args:
tap: Singer Tap this stream belongs to.
schema: JSON schema for records in this stream.
name: Name of this stream.
Raises:
ValueError: TODO
FileNotFoundError: TODO
"""
if name:
self.name: str = name
if not self.name:
raise ValueError("Missing argument or class variable 'name'.")
self.logger: logging.Logger = tap.logger
self.tap_name: str = tap.name
self._config: dict = dict(tap.config)
self._tap = tap
self._tap_state = tap.state
self._tap_input_catalog: Optional[Catalog] = None
self._stream_maps: Optional[List[StreamMap]] = None
self.forced_replication_method: Optional[str] = None
self._replication_key: Optional[str] = None
self._primary_keys: Optional[List[str]] = None
self._state_partitioning_keys: Optional[List[str]] = None
self._schema_filepath: Optional[Path] = None
self._metadata: Optional[MetadataMapping] = None
self._mask: Optional[SelectionMask] = None
self._schema: dict
self.child_streams: List[Stream] = []
if schema:
if isinstance(schema, (PathLike, str)):
if not Path(schema).is_file():
raise FileNotFoundError(
f"Could not find schema file '{self.schema_filepath}'."
)
self._schema_filepath = Path(schema)
elif isinstance(schema, dict):
self._schema = schema
elif isinstance(schema, Schema):
self._schema = schema.to_dict()
else:
raise ValueError(
f"Unexpected type {type(schema).__name__} for arg 'schema'."
)
if self.schema_filepath:
self._schema = json.loads(Path(self.schema_filepath).read_text())
if not self.schema:
raise ValueError(
f"Could not initialize schema for stream '{self.name}'. "
"A valid schema object or filepath was not provided."
)
@property
def stream_maps(self) -> List[StreamMap]:
"""Get stream transformation maps.
The 0th item is the primary stream map. List should not be empty.
Returns:
A list of one or more map transformations for this stream.
"""
if self._stream_maps:
return self._stream_maps
if self._tap.mapper:
self._stream_maps = self._tap.mapper.stream_maps[self.name]
self.logger.info(
f"Tap has custom mapper. Using {len(self.stream_maps)} provided map(s)."
)
else:
self.logger.info(
f"No custom mapper provided for '{self.name}'. "
"Using SameRecordTransform as default."
)
self._stream_maps = [
SameRecordTransform(
stream_alias=self.name,
raw_schema=self.schema,
key_properties=self.primary_keys,
)
]
return self._stream_maps
@property
def is_timestamp_replication_key(self) -> bool:
"""Check is replication key is a timestamp.
Developers can override to `True` in order to force this value, although this
should not be required in most use cases since the type can generally be
accurately detected from the JSON Schema.
Returns:
True if the stream uses a timestamp-based replication key.
"""
if not self.replication_key:
return False
type_dict = self.schema.get("properties", {}).get(self.replication_key)
return is_datetime_type(type_dict)
def get_starting_replication_key_value(
self, context: Optional[dict]
) -> Optional[Any]:
"""Get starting replication key.
Will return the value of the stream's replication key when `--state` is passed.
If no prior state exists, will return `None`.
Developers should use this method to seed incremental processing for
non-datetime replication keys. For datetime and date replication keys, use
:meth:`~singer_sdk.Stream.get_starting_timestamp()`
Args:
context: Stream partition or context dictionary.
Returns:
Starting replication value.
"""
state = self.get_context_state(context)
return get_starting_replication_value(state)
def get_starting_timestamp(
self, context: Optional[dict]
) -> Optional[datetime.datetime]:
"""Get starting replication timestamp.
Will return the value of the stream's replication key when `--state` is passed.
If no state exists, will return `start_date` if set, or `None` if neither
the stream state nor `start_date` is set.
Developers should use this method to seed incremental processing for date
and datetime replication keys. For non-datetime replication keys, use
:meth:`~singer_sdk.Stream.get_starting_replication_key_value()`
Args:
context: Stream partition or context dictionary.
Returns:
`start_date` from config, or state value if using timestamp replication.
Raises:
ValueError: If the replication value is not a valid timestamp.
"""
value = self.get_starting_replication_key_value(context)
if value is None:
return None
if not self.is_timestamp_replication_key:
raise ValueError(
f"The replication key {self.replication_key} is not of timestamp type"
)
return cast(datetime.datetime, pendulum.parse(value))
@final
@property
def selected(self) -> bool:
"""Check if stream is selected.
Returns:
True if the stream is selected.
"""
return self.mask.get((), True)
@final
@property
def has_selected_descendents(self) -> bool:
"""Check descendents.
Returns:
True if any child streams are selected, recursively.
"""
for child in self.child_streams or []:
if child.selected or child.has_selected_descendents:
return True
return False
@final
@property
def descendent_streams(self) -> List["Stream"]:
"""Get child streams.
Returns:
A list of all children, recursively.
"""
result: List[Stream] = list(self.child_streams) or []
for child in self.child_streams:
result += child.descendent_streams or []
return result
def _write_replication_key_signpost(
self,
context: Optional[dict],
value: Union[datetime.datetime, str, int, float],
) -> None:
"""Write the signpost value, if available.
Args:
context: Stream partition or context dictionary.
value: TODO
Returns:
TODO
"""
if not value:
return
state = self.get_context_state(context)
write_replication_key_signpost(state, value)
def _write_starting_replication_value(self, context: Optional[dict]) -> None:
"""Write the starting replication value, if available.
Args:
context: Stream partition or context dictionary.
"""
value = None
state = self.get_context_state(context)
if self.replication_key:
replication_key_value = state.get("replication_key_value")
if replication_key_value and self.replication_key == state.get(
"replication_key"
):
value = replication_key_value
elif "start_date" in self.config:
value = self.config["start_date"]
write_starting_replication_value(state, value)
def get_replication_key_signpost(
self, context: Optional[dict]
) -> Optional[Union[datetime.datetime, Any]]:
"""Get the replication signpost.
For timestamp-based replication keys, this defaults to `utc_now()`. For
non-timestamp replication keys, default to `None`. For consistency in subsequent
calls, the value will be frozen (cached) at its initially called state, per
partition argument if applicable.
Developers may optionally override this method in advanced use cases such
as unsorted incremental streams or complex hierarchical stream scenarios.
For more info: :doc:`/implementation/state`
Args:
context: Stream partition or context dictionary.
Returns:
Max allowable bookmark value for this stream's replication key.
"""
if self.is_timestamp_replication_key:
return utc_now()
return None
@property
def schema_filepath(self) -> Optional[Path]:
"""Get path to schema file.
Returns:
Path to a schema file for the stream or `None` if n/a.
"""
return self._schema_filepath
@property
def schema(self) -> dict:
"""Get schema.
Returns:
JSON Schema dictionary for this stream.
"""
return self._schema
@property
def primary_keys(self) -> Optional[List[str]]:
"""Get primary keys.
Returns:
A list of primary key(s) for the stream.
"""
if not self._primary_keys:
return []
return self._primary_keys
@primary_keys.setter
def primary_keys(self, new_value: List[str]) -> None:
"""Set primary key(s) for the stream.
Args:
new_value: TODO
"""
self._primary_keys = new_value
@property
def state_partitioning_keys(self) -> Optional[List[str]]:
"""Get state partition keys.
If not set, a default partitioning will be inherited from the stream's context.
If an empty list is set (`[]`), state will be held in one bookmark per stream.
Returns:
Partition keys for the stream state bookmarks.
"""
return self._state_partitioning_keys
@state_partitioning_keys.setter
def state_partitioning_keys(self, new_value: Optional[List[str]]) -> None:
"""Set partition keys for the stream state bookmarks.
If not set, a default partitioning will be inherited from the stream's context.
If an empty list is set (`[]`), state will be held in one bookmark per stream.
Args:
new_value: the new list of keys
"""
self._state_partitioning_keys = new_value
@property
def replication_key(self) -> Optional[str]:
"""Get replication key.
Returns:
Replication key for the stream.
"""
if not self._replication_key:
return None
return self._replication_key
@replication_key.setter
def replication_key(self, new_value: str) -> None:
"""Set replication key for the stream.
Args:
new_value: TODO
"""
self._replication_key = new_value
@property
def is_sorted(self) -> bool:
"""Check if stream is sorted.
When `True`, incremental streams will attempt to resume if unexpectedly
interrupted.
This setting enables additional checks which may trigger
`InvalidStreamSortException` if records are found which are unsorted.
Returns:
`True` if stream is sorted. Defaults to `False`.
"""
return False
@property
def metadata(self) -> MetadataMapping:
"""Get stream metadata.
Metadata attributes (`inclusion`, `selected`, etc.) are part of the Singer spec.
Metadata from an input catalog will override standard metadata.
Returns:
A mapping from property breadcrumbs to metadata objects.
"""
if self._metadata is not None:
return self._metadata
if self._tap_input_catalog:
catalog_entry = self._tap_input_catalog.get_stream(self.tap_stream_id)
if catalog_entry:
self._metadata = catalog_entry.metadata
return self._metadata
self._metadata = MetadataMapping.get_standard_metadata(
schema=self.schema,
replication_method=self.forced_replication_method,
key_properties=self.primary_keys or [],
valid_replication_keys=(
[self.replication_key] if self.replication_key else None
),
schema_name=None,
)
# If there's no input catalog, select all streams
if self._tap_input_catalog is None:
self._metadata.root.selected = True
return self._metadata
@property
def _singer_catalog_entry(self) -> CatalogEntry:
"""Return catalog entry as specified by the Singer catalog spec.
Returns:
TODO
"""
return CatalogEntry(
tap_stream_id=self.tap_stream_id,
stream=self.name,
schema=Schema.from_dict(self.schema),
metadata=self.metadata,
key_properties=self.primary_keys or [],
replication_key=self.replication_key,
replication_method=self.replication_method,
is_view=None,
database=None,
table=None,
row_count=None,
stream_alias=None,
)
@property
def _singer_catalog(self) -> Catalog:
"""TODO.
Returns:
TODO
"""
return Catalog([(self.tap_stream_id, self._singer_catalog_entry)])
@property
def config(self) -> Mapping[str, Any]:
"""Get stream configuration.
Returns:
A frozen (read-only) config dictionary map.
"""
return MappingProxyType(self._config)
@property
def tap_stream_id(self) -> str:
"""Return a unique stream ID.
Default implementations will return `self.name` but this behavior may be
overridden if required by the developer.
Returns:
Unique stream ID.
"""
return self.name
@property
def replication_method(self) -> str:
"""Get replication method.
Returns:
Replication method to be used for this stream.
"""
if self.forced_replication_method:
return str(self.forced_replication_method)
if self.replication_key:
return REPLICATION_INCREMENTAL
return REPLICATION_FULL_TABLE
# State properties:
@property
def tap_state(self) -> dict:
"""Return a writeable state dict for the entire tap.
Note: This dictionary is shared (and writable) across all streams.
This method is internal to the SDK and should not need to be overridden.
Developers may access this property but this is not recommended except in
advanced use cases. Instead, developers should access the latest stream
replication key values using :meth:`~singer_sdk.Stream.get_starting_timestamp()`
for timestamp keys, or
:meth:`~singer_sdk.Stream.get_starting_replication_key_value()` for
non-timestamp keys.
Returns:
A writeable state dict for the entire tap.
"""
return self._tap_state
def get_context_state(self, context: Optional[dict]) -> dict:
"""Return a writable state dict for the given context.
Gives a partitioned context state if applicable; else returns stream state.
A blank state will be created in none exists.
This method is internal to the SDK and should not need to be overridden.
Developers may access this property but this is not recommended except in
advanced use cases. Instead, developers should access the latest stream
replication key values using
:meth:`~singer_sdk.Stream.get_starting_timestamp()` for timestamp keys, or
:meth:`~singer_sdk.Stream.get_starting_replication_key_value()` for
non-timestamp keys.
Partition level may be overridden by
:attr:`~singer_sdk.Stream.state_partitioning_keys` if set.
Args:
context: Stream partition or context dictionary.
Returns:
A partitioned context state if applicable; else returns stream state.
A blank state will be created in none exists.
"""
state_partition_context = self._get_state_partition_context(context)
if state_partition_context:
return get_writeable_state_dict(
self.tap_state,
self.name,
state_partition_context=state_partition_context,
)
return self.stream_state
@property
def stream_state(self) -> dict:
"""Get writable state.
This method is internal to the SDK and should not need to be overridden.
Developers may access this property but this is not recommended except in
advanced use cases. Instead, developers should access the latest stream
replication key values using :meth:`~singer_sdk.Stream.get_starting_timestamp()`
for timestamp keys, or
:meth:`~singer_sdk.Stream.get_starting_replication_key_value()` for
non-timestamp keys.
A blank state entry will be created if one doesn't already exist.
Returns:
A writable state dict for this stream.
"""
return get_writeable_state_dict(self.tap_state, self.name)
# Partitions
@property
def partitions(self) -> Optional[List[dict]]:
"""Get stream partitions.
Developers may override this property to provide a default partitions list.
By default, this method returns a list of any partitions which are already
defined in state, otherwise None.
Returns:
A list of partition key dicts (if applicable), otherwise `None`.
"""
result: List[dict] = []
for partition_state in (
get_state_partitions_list(self.tap_state, self.name) or []
):
result.append(partition_state["context"])
return result or None
# Private bookmarking methods
def _increment_stream_state(
self, latest_record: Dict[str, Any], *, context: Optional[dict] = None
) -> None:
"""Update state of stream or partition with data from the provided record.
Raises InvalidStreamSortException is self.is_sorted = True and unsorted data is
detected.
Args:
latest_record: TODO
context: Stream partition or context dictionary.
Raises:
ValueError: TODO
"""
state_dict = self.get_context_state(context)
if latest_record:
if self.replication_method in [
REPLICATION_INCREMENTAL,
REPLICATION_LOG_BASED,
]:
if not self.replication_key:
raise ValueError(
f"Could not detect replication key for '{self.name}' stream"
f"(replication method={self.replication_method})"
)
treat_as_sorted = self.is_sorted
if not treat_as_sorted and self.state_partitioning_keys is not None:
# Streams with custom state partitioning are not resumable.
treat_as_sorted = False
increment_state(
state_dict,
replication_key=self.replication_key,
latest_record=latest_record,
is_sorted=treat_as_sorted,
)
# Private message authoring methods:
def _write_state_message(self) -> None:
"""Write out a STATE message with the latest state."""
singer.write_message(StateMessage(value=self.tap_state))
def _generate_schema_messages(self) -> Generator[SchemaMessage, None, None]:
"""Generate schema messages from stream maps.
Yields:
Schema message objects.
"""
bookmark_keys = [self.replication_key] if self.replication_key else None
for stream_map in self.stream_maps:
schema_message = SchemaMessage(
stream_map.stream_alias,
stream_map.transformed_schema,
stream_map.transformed_key_properties,
bookmark_keys,
)
yield schema_message
def _write_schema_message(self) -> None:
"""Write out a SCHEMA message with the stream schema."""
for schema_message in self._generate_schema_messages():
singer.write_message(schema_message)
@property
def mask(self) -> SelectionMask:
"""Get a boolean mask for stream and property selection.
Returns:
A mapping of breadcrumbs to boolean values, representing stream and field
selection.
"""
if self._mask is None:
self._mask = self.metadata.resolve_selection()
return self._mask
def _generate_record_messages(
self,
record: dict,
) -> Generator[RecordMessage, None, None]:
"""Write out a RECORD message.
Args:
record: A single stream record.
Yields:
Record message objects.
"""
pop_deselected_record_properties(record, self.schema, self.mask, self.logger)
record = conform_record_data_types(
stream_name=self.name,
row=record,
schema=self.schema,
logger=self.logger,
)
for stream_map in self.stream_maps:
mapped_record = stream_map.transform(record)
# Emit record if not filtered
if mapped_record is not None:
record_message = RecordMessage(
stream=stream_map.stream_alias,
record=mapped_record,
version=None,
time_extracted=utc_now(),
)
yield record_message
def _write_record_message(self, record: dict) -> None:
"""Write out a RECORD message.
Args:
record: A single stream record.
"""
for record_message in self._generate_record_messages(record):
singer.write_message(record_message)
@property
def _metric_logging_function(self) -> Optional[Callable]:
"""TODO.
Returns:
TODO
"""
if METRICS_LOG_LEVEL_SETTING not in self.config:
return self.logger.info
if self.config[METRICS_LOG_LEVEL_SETTING].upper() == "INFO":
return self.logger.info
if self.config[METRICS_LOG_LEVEL_SETTING].upper() == "DEBUG":
return self.logger.debug
if self.config[METRICS_LOG_LEVEL_SETTING].upper() == "NONE":
return None
assert False, (
"Unexpected logging level for metrics: "
+ self.config[METRICS_LOG_LEVEL_SETTING]
)
def _write_metric_log(self, metric: dict, extra_tags: Optional[dict]) -> None:
"""Emit a metric log. Optionally with appended tag info.
Args:
metric: TODO
extra_tags: TODO
Returns:
None
"""
if not self._metric_logging_function:
return None
if extra_tags:
metric["tags"].update(extra_tags)
self._metric_logging_function(f"INFO METRIC: {str(metric)}")
def _write_record_count_log(
self, record_count: int, context: Optional[dict]
) -> None:
"""Emit a metric log. Optionally with appended tag info.
Args:
record_count: TODO
context: Stream partition or context dictionary.
"""
extra_tags = {} if not context else {"context": context}
counter_metric: Dict[str, Any] = {
"type": "counter",
"metric": "record_count",
"value": record_count,
"tags": {"stream": self.name},
}
self._write_metric_log(counter_metric, extra_tags=extra_tags)
def _write_request_duration_log(
self,
endpoint: str,
response: requests.Response,
context: Optional[dict],
extra_tags: Optional[dict],
) -> None:
"""TODO.
Args:
endpoint: TODO
response: TODO
context: Stream partition or context dictionary.
extra_tags: TODO
"""
request_duration_metric: Dict[str, Any] = {
"type": "timer",
"metric": "http_request_duration",
"value": response.elapsed.total_seconds(),
"tags": {
"endpoint": endpoint,
"http_status_code": response.status_code,
"status": "succeeded" if response.status_code < 400 else "failed",
},
}
extra_tags = extra_tags or {}
if context:
extra_tags["context"] = context
self._write_metric_log(metric=request_duration_metric, extra_tags=extra_tags)
def _check_max_record_limit(self, record_count: int) -> None:
"""TODO.
Args:
record_count: TODO.
Raises:
MaxRecordsLimitException: TODO.
"""
if (
self._MAX_RECORDS_LIMIT is not None
and record_count >= self._MAX_RECORDS_LIMIT
):
raise MaxRecordsLimitException(
"Stream prematurely aborted due to the stream's max record "
f"limit ({self._MAX_RECORDS_LIMIT}) being reached."
)
# Handle interim stream state
def reset_state_progress_markers(self, state: Optional[dict] = None) -> None:
"""Reset progress markers. If all=True, all state contexts will be set.
This method is internal to the SDK and should not need to be overridden.
Args:
state: State object to promote progress markers with.
"""
if state is None or state == {}:
context: Optional[dict]
for context in self.partitions or [{}]:
context = context or None
state = self.get_context_state(context)
reset_state_progress_markers(state)
return
reset_state_progress_markers(state)
def finalize_state_progress_markers(self, state: Optional[dict] = None) -> None:
"""Reset progress markers. If all=True, all state contexts will be finalized.
This method is internal to the SDK and should not need to be overridden.
If all=True and the stream has children, child streams will also be finalized.
Args:
state: State object to promote progress markers with.
"""
if state is None or state == {}:
for child_stream in self.child_streams or []:
child_stream.finalize_state_progress_markers()
context: Optional[dict]
for context in self.partitions or [{}]:
context = context or None
state = self.get_context_state(context)
finalize_state_progress_markers(state)
return
finalize_state_progress_markers(state)
# Private sync methods:
def _sync_records( # noqa C901 # too complex
self, context: Optional[dict] = None
) -> None:
"""Sync records, emitting RECORD and STATE messages.
Args:
context: Stream partition or context dictionary.
Raises:
InvalidStreamSortException: TODO
"""
record_count = 0
current_context: Optional[dict]
context_list: Optional[List[dict]]
context_list = [context] if context is not None else self.partitions
selected = self.selected
for current_context in context_list or [{}]:
partition_record_count = 0
current_context = current_context or None
state = self.get_context_state(current_context)
state_partition_context = self._get_state_partition_context(current_context)
self._write_starting_replication_value(current_context)
child_context: Optional[dict] = (
None if current_context is None else copy.copy(current_context)
)
for record_result in self.get_records(current_context):
if isinstance(record_result, tuple):
# Tuple items should be the record and the child context
record, child_context = record_result
else:
record = record_result
child_context = copy.copy(
self.get_child_context(record=record, context=child_context)
)
for key, val in (state_partition_context or {}).items():
# Add state context to records if not already present
if key not in record:
record[key] = val
# Sync children, except when primary mapper filters out the record
if self.stream_maps[0].get_filter_result(record):
self._sync_children(child_context)
self._check_max_record_limit(record_count)
if selected:
if (record_count - 1) % self.STATE_MSG_FREQUENCY == 0:
self._write_state_message()
self._write_record_message(record)
try:
self._increment_stream_state(record, context=current_context)
except InvalidStreamSortException as ex:
log_sort_error(
log_fn=self.logger.error,
ex=ex,
record_count=record_count + 1,
partition_record_count=partition_record_count + 1,
current_context=current_context,
state_partition_context=state_partition_context,
stream_name=self.name,
)
raise ex
record_count += 1
partition_record_count += 1
if current_context == state_partition_context:
# Finalize per-partition state only if 1:1 with context
finalize_state_progress_markers(state)
if not context:
# Finalize total stream only if we have the full full context.
# Otherwise will be finalized by tap at end of sync.
finalize_state_progress_markers(self.stream_state)
self._write_record_count_log(record_count=record_count, context=context)
# Reset interim bookmarks before emitting final STATE message:
self._write_state_message()
# Public methods ("final", not recommended to be overridden)
@final
def sync(self, context: Optional[dict] = None) -> None:
"""Sync this stream.
This method is internal to the SDK and should not need to be overridden.
Args:
context: Stream partition or context dictionary.
"""
msg = f"Beginning {self.replication_method.lower()} sync of '{self.name}'"
if context:
msg += f" with context: {context}"
self.logger.info(f"{msg}...")
# Use a replication signpost, if available
signpost = self.get_replication_key_signpost(context)
if signpost:
self._write_replication_key_signpost(context, signpost)
# Send a SCHEMA message to the downstream target:
self._write_schema_message()
# Sync the records themselves:
self._sync_records(context)
def _sync_children(self, child_context: dict) -> None:
for child_stream in self.child_streams:
if child_stream.selected or child_stream.has_selected_descendents:
child_stream.sync(context=child_context)
# Overridable Methods
def apply_catalog(self, catalog: Catalog) -> None:
"""Apply a catalog dict, updating any settings overridden within the catalog.
Developers may override this method in order to introduce advanced catalog
parsing, or to explicitly fail on advanced catalog customizations which
are not supported by the tap.
Args:
catalog: Catalog object passed to the tap. Defines schema, primary and
replication keys, as well as selection metadata.
"""
self._tap_input_catalog = catalog
catalog_entry = catalog.get_stream(self.name)
if catalog_entry:
self.primary_keys = catalog_entry.key_properties
self.replication_key = catalog_entry.replication_key
if catalog_entry.replication_method:
self.forced_replication_method = catalog_entry.replication_method
def _get_state_partition_context(self, context: Optional[dict]) -> Optional[Dict]:
"""Override state handling if Stream.state_partitioning_keys is specified.
Args:
context: Stream partition or context dictionary.
Returns:
TODO
"""
if context is None:
return None
if self.state_partitioning_keys is None:
return context
return {k: v for k, v in context.items() if k in self.state_partitioning_keys}
def get_child_context(self, record: dict, context: Optional[dict]) -> dict:
"""Return a child context object from the record and optional provided context.
By default, will return context if provided and otherwise the record dict.
Developers may override this behavior to send specific information to child
streams for context.
Args:
record: Individual record in the stream.
context: Stream partition or context dictionary.
Returns:
A dictionary with context values for a child stream.
Raises:
NotImplementedError: If the stream has children but this method is not
overriden.
"""
if context is None:
for child_stream in self.child_streams:
if child_stream.state_partitioning_keys is None:
parent_type = type(self).__name__
child_type = type(child_stream).__name__
raise NotImplementedError(
"No child context behavior was defined between parent stream "
f"'{self.name}' and child stream '{child_stream.name}'."
"The parent stream must define "
f"`{parent_type}.get_child_context()` and/or the child stream "
f"must define `{child_type}.state_partitioning_keys`."
)
return context or record
# Abstract Methods
@abc.abstractmethod
def get_records(
self, context: Optional[dict]
) -> Iterable[Union[dict, Tuple[dict, dict]]]:
"""Abstract row generator function. Must be overridden by the child class.
Each row emitted should be a dictionary of property names to their values.
Returns either a record dict or a tuple: (record_dict, child_context)
A method which should retrieve data from the source and return records
incrementally using the python `yield` operator.
Only custom stream types need to define this method. REST and GraphQL streams
should instead use the class-specific methods for REST or GraphQL, respectively.
This method takes an optional `context` argument, which can be safely ignored
unless the stream is a child stream or requires partitioning.
More info: :doc:`/partitioning`.
Parent streams can optionally return a tuple, in which
case the second item in the tuple being a `child_context` dictionary for the
stream's `context`.
More info: :doc:`/parent_streams`
Args:
context: Stream partition or context dictionary.
"""
pass
def post_process(self, row: dict, context: Optional[dict] = None) -> Optional[dict]:
"""As needed, append or transform raw data to match expected structure.
Optional. This method gives developers an opportunity to "clean up" the results
prior to returning records to the downstream tap - for instance: cleaning,
renaming, or appending properties to the raw record result returned from the
API.
Developers may also return `None` from this method to filter out
invalid or not-applicable records from the stream.
Args:
row: Individual record in the stream.
context: Stream partition or context dictionary.
Returns:
The resulting record dict, or `None` if the record should be excluded.
"""
return row
| 34.612281 | 88 | 0.616225 |
c05bd9eeb96614475b1276eb4066b357a051df2d | 3,683 | py | Python | cbstar/comicfile.py | chivalry/cbstar | 2d4badfb0dacfbb4208d5b863a828e1e923a7353 | [
"MIT"
] | 1 | 2018-07-04T21:18:30.000Z | 2018-07-04T21:18:30.000Z | cbstar/comicfile.py | chivalry/cbstar | 2d4badfb0dacfbb4208d5b863a828e1e923a7353 | [
"MIT"
] | null | null | null | cbstar/comicfile.py | chivalry/cbstar | 2d4badfb0dacfbb4208d5b863a828e1e923a7353 | [
"MIT"
] | null | null | null | from zipfile import ZipFile
from zipfile import ZIP_DEFLATED
from enum import Enum
import os
from os import path
from tempfile import TemporaryDirectory
import shutil
class ComicFile():
"""An object representing a comic book archive file"""
class FileType(Enum):
none = 0
zip = 1
rar = 2
sevenz = 3
ace = 4
tar = 5
class ComicFileError(Exception): pass
class FileNotFoundError(OSError): pass
class PageOutOfRangeError(IndexError): pass
def __init__(self, file_path:str, save_path:str=None):
"""Initialization for the class."""
self.file_path = file_path
self.save_path = save_path or file_path
def __str__(self) -> str:
"""Return a sring representation of the object."""
return os.path.basename(self.file_path) if self.file_path != None else "empty"
@property
def file_type(self) -> FileType:
file_types = {'.zip': self.FileType.zip,
'.cbz': self.FileType.zip,
'.rar': self.FileType.rar,
'.cbr': self.FileType.rar,
'.7z' : self.FileType.sevenz,
'.cb7': self.FileType.sevenz,
'.ace': self.FileType.ace,
'.cba': self.FileType.ace,
'.tar': self.FileType.tar,
'.cbt': self.FileType.tar,
}
ext = os.path.splitext(self.file_path)
return file_types.get(ext, self.FileType.none)
def page_names(self):
"""Returns a list of the pages in the archive."""
if not os.path.isfile(self.file_path):
raise ComicFile.FileNotFoundError()
with ZipFile(self.file_path) as zip:
members = zip.namelist()
pruned = [item for item in members if not item.endswith('/')]
return pruned
def page_count(self):
"""Return the number of pages in the file."""
return len(self.page_names())
def delete_page(self, page:int=1):
"""
Remove the indicated page from the archive and save it. Page order is
determined by case-insensitive sorting order.
"""
# Loop through pages, writing each one to a new zip archive except the
# page to delete, then write to the `save_path` location.
base_name = os.path.basename(self.file_path)
with ZipFile(self.file_path, 'r') as zip_in:
with TemporaryDirectory() as tmpdir_path:
tmp_zip_path = os.path.join(tmpdir_path, base_name)
with ZipFile(tmp_zip_path, 'w', ZIP_DEFLATED) as zip_out:
i = 1
for member in zip_in.infolist():
buffer = zip_in.read(member.filename)
if i != page:
zip_out.writestr(member, buffer)
i += 1
shutil.copy(tmp_zip_path, self.save_path)
def set_attribute(self, name:str, value:str):
"""Set the comic book archive attribute to the passed value."""
pass
def append_attribute(self, name:str, value:str):
"""Append the passed value to the named attribute."""
pass
if __name__ == '__main__':
comic = ComicFile()
script_path = path.realpath(__file__)
parent_dir = path.abspath(path.join(script_path, os.pardir))
targ_dir = path.join(parent_dir, 'files')
targ_file = path.join(targ_dir, '100 Bullets - Brian Azzarello.cbz' )
comic.file = targ_file
print(comic.page_count())
print(comic.file_type)
print(comic)
print(repr(comic))
| 34.101852 | 86 | 0.582134 |
dc53631b9ad1fc9659b36cfc470792c8e28dc6d2 | 462 | py | Python | tests/assets/test_mock_service.py | Vikash-Kothary/aem-cmd-python | a01d4b9cbc34c326f74c32a0015e0c77efb6be91 | [
"MIT"
] | 2 | 2020-07-01T03:36:45.000Z | 2022-02-09T06:16:34.000Z | tests/assets/test_mock_service.py | Vikash-Kothary/aem-cmd-python | a01d4b9cbc34c326f74c32a0015e0c77efb6be91 | [
"MIT"
] | 4 | 2019-11-13T23:28:01.000Z | 2019-11-25T13:34:17.000Z | tests/assets/test_mock_service.py | Vikash-Kothary/aem-cmd | a01d4b9cbc34c326f74c32a0015e0c77efb6be91 | [
"MIT"
] | null | null | null | # coding: utf-8
from nose.tools import eq_
from test_utils.mocks.dam import MockAssetsService
def test_mock_assets_service():
s = MockAssetsService()
eq_(1, len(s.repo))
s.add_folder('/', 'my_folder')
eq_(2, len(s.repo))
eq_(s.repo['/'], s.repo['/my_folder']['parent'])
s.add_asset('/my_folder', 'bernard.jpg')
eq_(1, len(s.repo['/my_folder']['assets']))
eq_(s.repo['/my_folder/bernard.jpg'], s.repo['/my_folder']['assets'][0])
| 30.8 | 76 | 0.645022 |
4afae7a1e5ff273d3a8cb4eb679b44f3e428d1d9 | 1,524 | py | Python | 30-Days-of-Code/Day2:_Operators.py | sanskritilakhmani/Hackerrank | 500694c1e6888fc657becc2c503fb5860bf39ec7 | [
"Apache-2.0"
] | 1 | 2022-02-16T18:37:49.000Z | 2022-02-16T18:37:49.000Z | 30-Days-of-Code/Day2:_Operators.py | TAFFAHACHRAF/Hackerrank | 500694c1e6888fc657becc2c503fb5860bf39ec7 | [
"Apache-2.0"
] | null | null | null | 30-Days-of-Code/Day2:_Operators.py | TAFFAHACHRAF/Hackerrank | 500694c1e6888fc657becc2c503fb5860bf39ec7 | [
"Apache-2.0"
] | 4 | 2021-07-18T16:24:02.000Z | 2022-02-15T21:42:03.000Z | # Task
# Given the meal price (base cost of a meal), tip percent (the percentage of the meal price being added as tip), and tax percent (the
# percentage of the meal price being added as tax) for a meal, find and print the meal's total cost.
# Note: Be sure to use precise values for your calculations, or you may end up with an incorrectly rounded result!
# Input Format
# There are 3 lines of numeric input:
# The first line has a double, mealCost (the cost of the meal before tax and tip).
# The second line has an integer, tipPercent (the percentage of mealCost being added as tip).
# The third line has an integer, taxPercent (the percentage of mealCost being added as tax).
# Output Format
# Print The total meal cost is totalCost dollars., where totalCost is the rounded integer result of the entire bill (mealCost with
# added tax and tip).
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'solve' function below.
#
# The function accepts following parameters:
# 1. DOUBLE meal_cost
# 2. INTEGER tip_percent
# 3. INTEGER tax_percent
#
def solve(meal_cost, tip_percent, tax_percent):
# Write your code here
tip = (meal_cost*tip_percent)/100
tax = (meal_cost*tax_percent)/100
total_cost = meal_cost + tip + tax
return print(round(total_cost))
if __name__ == '__main__':
meal_cost = float(input().strip())
tip_percent = int(input().strip())
tax_percent = int(input().strip())
solve(meal_cost, tip_percent, tax_percent)
| 29.307692 | 133 | 0.727034 |
50bdedf7e35a4cd407a32de49e1d6065fa6f412d | 338 | py | Python | codeChef/practice/easy/candle.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | 1 | 2020-09-30T19:53:08.000Z | 2020-09-30T19:53:08.000Z | codeChef/practice/easy/candle.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | null | null | null | codeChef/practice/easy/candle.py | gauravsingh58/algo | 397859a53429e7a585e5f6964ad24146c6261326 | [
"WTFPL"
] | 1 | 2020-10-15T09:10:57.000Z | 2020-10-15T09:10:57.000Z | for _ in range(int(raw_input())):
candles = map(int, raw_input().split())
m1 = min(candles)
i1 = candles.index(m1)
if i1 == 0:
candles.remove(m1)
m2 = min(candles)
i2 = candles.index(m2) + 1
if m1 == m2:
print str(i2) * (m2+1)
else:
print '1' + '0' * (m1+1)
else:
print str(i1) * (m1+1) | 21.125 | 41 | 0.523669 |
14dddc3390551a6be0cef97fb099bedaa21f85c4 | 6,697 | py | Python | gpMgmt/bin/gpload_test/gpload2/TEST_local_config.py | abhisheknishant138/gpdb | 1805743d505837026aa137cabb8a7072d745a129 | [
"PostgreSQL",
"Apache-2.0"
] | 4 | 2017-11-28T08:12:58.000Z | 2020-10-28T04:15:52.000Z | gpMgmt/bin/gpload_test/gpload2/TEST_local_config.py | abhisheknishant138/gpdb | 1805743d505837026aa137cabb8a7072d745a129 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | gpMgmt/bin/gpload_test/gpload2/TEST_local_config.py | abhisheknishant138/gpdb | 1805743d505837026aa137cabb8a7072d745a129 | [
"PostgreSQL",
"Apache-2.0"
] | null | null | null | from TEST_local_base import *
@prepare_before_test(num=44, cmd="-h "+str(hostNameAddrs))
def test_44_gpload_config_h():
"44 gpload command config test -h hostname"
copy_data('external_file_01.txt', 'data_file.txt')
write_config_file( host="",format='text',file='data_file.txt',table='texttable',delimiter="'|'")
@prepare_before_test(num=45, cmd="-p "+str(coordinatorPort))
def test_45_gpload_config_p():
"45 gpload command config test -p port"
copy_data('external_file_01.txt', 'data_file.txt')
write_config_file(port="", format='text',file='data_file.txt',table='texttable')
@prepare_before_test(num=46, cmd="-p 9999")
def test_46_gpload_config_wrong_p():
"46 gpload command config test -p port"
copy_data('external_file_01.txt', 'data_file.txt')
write_config_file(port="", format='text',file='data_file.txt',table='texttable')
''' this case runs extreamly slowly, comment it here
@prepare_before_test(num=47, cmd="-h 1.2.3.4")
def test_47_gpload_config_wrong_h():
"47 gpload command config test -h hostname"
copy_data('external_file_01.txt', 'data_file.txt')
write_config_file( host="",format='text',file='data_file.txt',table='texttable',delimiter="'|'")
'''
@prepare_before_test(num=48, cmd="-d reuse_gptest")
def test_48_gpload_config_d():
"48 gpload command config test -d database"
copy_data('external_file_01.txt', 'data_file.txt')
write_config_file(database="", format='text',file='data_file.txt',table='texttable')
@prepare_before_test(num=49, cmd="-d notexistdb")
def test_49_gpload_config_wrong_d():
"49 gpload command config test -d with wrong database"
copy_data('external_file_01.txt', 'data_file.txt')
write_config_file(database="", format='text',file='data_file.txt',table='texttable')
@prepare_before_test(num=50, cmd="-U gpadmin")
def test_50_gpload_config_U():
"50 gpload command config test -U username"
copy_data('external_file_01.txt', 'data_file.txt')
write_config_file(user="", format='text',file='data_file.txt',table='texttable')
@prepare_before_test(num=51, cmd="-U notexistusr")
def test_51_gpload_config_wrong_U():
"51 gpload command config test -U wrong username"
copy_data('external_file_01.txt', 'data_file.txt')
write_config_file(user="", format='text',file='data_file.txt',table='texttable')
@prepare_before_test(num=52, cmd='--gpfdist_timeout 2')
def test_52_gpload_config_gpfdist_timeout():
"52 gpload command config test gpfdist_timeout"
drop_tables()
copy_data('external_file_01.txt','data_file.txt')
write_config_file(format='text',file='data_file.txt',table='texttable')
''' maybe some bug in gpfdist
@prepare_before_test(num=53, cmd='--gpfdist_timeout aa')
def test_53_gpload_config_gpfdist_timeout_wrong():
"53 gpload command config test gpfdist_timeout with a string"
runfile(mkpath('setup.sql'))
copy_data('external_file_01.txt','data_file.txt')
write_config_file(format='text',file='data_file.txt',table='texttable')
'''
@prepare_before_test(num=54, cmd='-l 54tmp.log')
def test_54_gpload_config_l():
"54 gpload command config test -l logfile"
run('rm 54tmp.log')
copy_data('external_file_01.txt','data_file.txt')
write_config_file(format='text',file='data_file.txt',table='texttable')
@prepare_before_test(num=55)
def test_55_gpload_yaml_version():
"55 gpload yaml version"
copy_data('external_file_01.txt','data_file.txt')
write_config_file(version='1.0.0.2',format='text',file='data_file.txt',table='texttable')
@prepare_before_test(num=56)
def test_56_gpload_yaml_wrong_database():
"56 gpload yaml writing a not exist database"
copy_data('external_file_01.txt','data_file.txt')
write_config_file(database='notexist',format='text',file='data_file.txt',table='texttable')
@prepare_before_test(num=57)
def test_57_gpload_yaml_wrong_user():
"57 gpload yaml writing a not exist user"
copy_data('external_file_01.txt','data_file.txt')
write_config_file(user='notexist',format='text',file='data_file.txt',table='texttable')
''' wrong host runs slowly
@prepare_before_test(num=58)
def test_58_gpload_yaml_wrong_host():
"58 gpload yaml writing a not exist host"
copy_data('external_file_01.txt','data_file.txt')
write_config_file(host='1.2.3.4',format='text',file='data_file.txt',table='texttable')
'''
@prepare_before_test(num=59)
def test_59_gpload_yaml_wrong_port():
"59 gpload yaml writing a not exist port"
copy_data('external_file_01.txt','data_file.txt')
write_config_file(port='111111',format='text',file='data_file.txt',table='texttable')
'''
@prepare_before_test(num=60)
def test_60_gpload_local_hostname():
"60 gpload yaml local host with 127.0.0.1 and none and a not exist host"
runfile(mkpath('setup.sql'))
copy_data('external_file_01.txt','data_file.txt')
write_config_file(local_host=['127.0.0.1'],format='text',file='data_file.txt',table='texttable')
write_config_file(config='config/config_file2',local_host=None,format='text',file='data_file.txt',table='texttable')
write_config_file(config='config/config_file3',local_host=['123.123.1.1'],format='text',file='data_file.txt',table='texttable')
f = open('query60.sql','w')
f.write("\\! gpload -f "+mkpath('config/config_file')+"\n")
f.write("\\! gpload -f "+mkpath('config/config_file2')+"\n")
f.write("\\! gpload -f "+mkpath('config/config_file3')+"\n")
f.close()
'''
@prepare_before_test(num=61, times=1)
def test_61_gpload_local_no_port():
"61 gpload yaml file port not specified"
copy_data('external_file_01.txt','data_file.txt')
write_config_file(input_port=None,format='text',file='data_file.txt',table='texttable')
@prepare_before_test(num=62, times=1)
def test_62_gpload_wrong_port_range():
"62 gpload yaml file use wrong port_range"
copy_data('external_file_01.txt','data_file.txt')
write_config_file(input_port=None,port_range='[8081,8070]' ,format='text',file='data_file.txt',table='texttable')
@prepare_before_test(num=63, times=1)
def test_63_gpload_test_port_range():
"63 gpload yaml file use port_range"
copy_data('external_file_01.txt','data_file.txt')
write_config_file(input_port=None,port_range='[8082,8090]' ,format='text',file='data_file.txt',table='texttable')
@prepare_before_test(num=100, times=1)
def test_100_gpload_transform():
runfile(mkpath('setup.sql'))
write_config_file(file='data/transform/prices.xml',
transform_config='data/transform/transform_config.yaml',
transform='prices_input',
format='text',
table='prices',
mode='insert')
| 44.946309 | 131 | 0.728386 |
f90120525416b36b2982e854f266273384b07f09 | 884 | py | Python | add_function.py | jlmonge/FlaskDatabaseQuery | bd3cb09f64ff120b3a02a1e8d508a9583712f557 | [
"OML"
] | null | null | null | add_function.py | jlmonge/FlaskDatabaseQuery | bd3cb09f64ff120b3a02a1e8d508a9583712f557 | [
"OML"
] | null | null | null | add_function.py | jlmonge/FlaskDatabaseQuery | bd3cb09f64ff120b3a02a1e8d508a9583712f557 | [
"OML"
] | null | null | null | import json
#from os import get_exec_path, set_inheritable
#pass in strings or the format of the JSON WILL NOT HAVE STRINGS IN IT !!
def add_to_json(data ,ID, name, category, main_category, currency, deadline, goal, launched, pledged, state, backers, country, usd_pledged, usd_pledged_real):
entry = {
"ID": ID,
"name": name,
"category": category,
"main_category": main_category,
"currency": currency,
"deadline": deadline,
"goal": goal,
"launched": launched,
"pledged": pledged,
"state": state,
"backers": backers,
"country": country,
"usd pledged": usd_pledged,
"usd_pledged_real": usd_pledged_real}
data.append(entry)
#test case
#add_to_json("999320001988282","TEST69420", "Performance Art", "Art", "USD", "2011-08-16","2000.00", "2011-07-19 09:07:47","524.00","failed", "17", "US", "524.00", "524.00")
| 29.466667 | 174 | 0.658371 |
c4413a9f4c7dab4f3769f867b279b95daa9d3327 | 4,238 | py | Python | lale/lib/autoai_ts_libs/window_standard_row_mean_center_uts.py | mfeffer/lale | 57b58843c7c14dc2e5658244280f2c1918bf030b | [
"Apache-2.0"
] | 265 | 2019-08-06T14:45:43.000Z | 2022-03-30T23:57:48.000Z | lale/lib/autoai_ts_libs/window_standard_row_mean_center_uts.py | mfeffer/lale | 57b58843c7c14dc2e5658244280f2c1918bf030b | [
"Apache-2.0"
] | 467 | 2019-08-08T02:01:21.000Z | 2022-03-25T16:12:00.000Z | lale/lib/autoai_ts_libs/window_standard_row_mean_center_uts.py | mfeffer/lale | 57b58843c7c14dc2e5658244280f2c1918bf030b | [
"Apache-2.0"
] | 81 | 2019-08-07T19:59:31.000Z | 2022-03-31T09:11:58.000Z | # Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from autoai_ts_libs.sklearn.small_data_standard_row_mean_center_transformers import ( # type: ignore # noqa
WindowStandardRowMeanCenterUTS as model_to_be_wrapped,
)
import lale.docstrings
import lale.operators
class _WindowStandardRowMeanCenterUTSImpl:
def __init__(self, lookback_window=10, prediction_horizon=1):
self._hyperparams = {
"lookback_window": lookback_window,
"prediction_horizon": prediction_horizon,
}
self._wrapped_model = model_to_be_wrapped(**self._hyperparams)
def fit(self, X, y=None):
self._wrapped_model.fit(X, y)
return self
def transform(self, X, y=None):
return self._wrapped_model.transform(X, y)
_hyperparams_schema = {
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters.",
"type": "object",
"additionalProperties": False,
"required": ["prediction_horizon"],
"relevantToOptimizer": ["lookback_window"],
"properties": {
"lookback_window": {
"description": "The number of time points to include in each of the generated feature windows.",
"type": "integer",
"default": 10,
},
"prediction_horizon": {
"description": "The number of time points to include in each of the generated target windows.",
"type": "integer",
"default": 1,
},
},
}
]
}
_input_fit_schema = {
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_input_transform_schema = {
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {
"X": { # Handles 1-D arrays as well
"anyOf": [
{"type": "array", "items": {"laleType": "Any"}},
{
"type": "array",
"items": {"type": "array", "items": {"laleType": "Any"}},
},
]
},
"y": {"laleType": "Any"},
},
}
_output_transform_schema = {
"description": "Features; the outer array is over samples.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """Operator from `autoai_ts_libs`_.
.. _`autoai_ts_libs`: https://pypi.org/project/autoai-ts-libs""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.autoai_ts_libs.window_standard_row_mean_center_uts.html",
"import_from": "autoai_ts_libs.sklearn.small_data_standard_row_mean_center_transformers",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
WindowStandardRowMeanCenterUTS = lale.operators.make_operator(
_WindowStandardRowMeanCenterUTSImpl, _combined_schemas
)
lale.docstrings.set_docstrings(WindowStandardRowMeanCenterUTS)
| 33.904 | 152 | 0.597924 |
13007e7270736d4e08062eb9ac1ac1db6e43bb57 | 10,849 | py | Python | src/models/SIDPAMIwsa_model.py | BalinLin/SID | 8c2dfb0e671ee9333945545c82a12cfe227813cd | [
"MIT"
] | 44 | 2020-11-18T00:15:52.000Z | 2022-03-21T12:33:33.000Z | src/models/SIDPAMIwsa_model.py | WeiGai/SID | a1eab049a08359fd041bc67205177c73902e2a37 | [
"MIT"
] | 17 | 2020-11-20T02:45:54.000Z | 2022-03-22T18:58:26.000Z | src/models/SIDPAMIwsa_model.py | WeiGai/SID | a1eab049a08359fd041bc67205177c73902e2a37 | [
"MIT"
] | 11 | 2020-11-18T00:15:55.000Z | 2021-12-24T14:21:43.000Z | import torch
from collections import OrderedDict
import time
import numpy as np
import torch.nn.functional as F
from util.image_pool import ImagePool
from .base_model import BaseModel
from . import networks
import util.util as util
from .distangle_model import DistangleModel
from PIL import ImageOps,Image
from .loss_function import smooth_loss
class SIDPAMIWSAModel(DistangleModel):
def name(self):
return 'Shadow Image Decomposition model PAMI19 weighted boundary loss standalone'
@staticmethod
def modify_commandline_options(parser, is_train=True):
parser.set_defaults(pool_size=0, no_lsgan=True, norm='batch')
parser.add_argument('--wdataroot',default='None', help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--use_our_mask', action='store_true')
parser.add_argument('--mask_train',type=str,default=None)
parser.add_argument('--mask_test',type=str,default=None)
parser.add_argument('--lambda_bd',type=float,default=100)
parser.add_argument('--lambda_res',type=float,default=100)
parser.add_argument('--lambda_param',type=float,default=100)
# parser.add_argument('--lambda_smooth',type=float,default=100)
return parser
def initialize(self, opt):
BaseModel.initialize(self, opt)
self.isTrain = opt.isTrain
self.loss_names = ['G_param','alpha','rescontruction','bd','smooth']
# specify the images you want to save/display. The program will call base_model.get_current_visuals
self.visual_names = ['input_img', 'alpha_pred','out','final','masked_fake']
# specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks
self.model_names = ['G','M']
# load/define networks
opt.output_nc= 3
if self.opt.netG =='vgg':
self.netG = networks.define_vgg(3,6, gpu_ids = self.gpu_ids)
if self.opt.netG =='RESNEXT':
self.netG = networks.define_G(3, 6, opt.ngf, 'RESNEXT', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netM = networks.define_G(6, 3, opt.ngf, 'unet_256', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
use_sigmoid = opt.no_lsgan
self.netG.to(self.device)
self.netM.to(self.device)
print(self.netG)
print(self.netM)
if self.isTrain:
self.fake_AB_pool = ImagePool(opt.pool_size)
# define loss functions
self.MSELoss = torch.nn.MSELoss()
self.criterionL1 = torch.nn.L1Loss()
self.bce = torch.nn.BCEWithLogitsLoss()
self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)
# initialize optimizers
self.optimizers = []
#self.optimizer_G = torch.optim.SGD(self.netG.parameters(),
# lr=0.002, momentum=0.9)
self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999),weight_decay=1e-5)
self.optimizer_M = torch.optim.Adam(self.netM.parameters(),
lr=opt.lr, betas=(opt.beta1, 0.999),weight_decay=1e-5)
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_M)
def set_input(self, input):
self.input_img = input['A'].to(self.device)
self.shadow_mask = input['B'].to(self.device)
self.shadow_param = input['param'].to(self.device).type(torch.float)
self.shadow_mask = (self.shadow_mask>0.9).type(torch.float)*2-1
self.penumbra = input['penumbra'].to(self.device).type(torch.float)
self.penumbra = (self.penumbra>0).type(torch.float)
self.penumbra = self.penumbra.expand(self.input_img.shape)
self.shadowfree_img = input['C'].to(self.device)
self.shadow_mask_3d= (self.shadow_mask>0).type(torch.float).expand(self.input_img.shape)
if 'isreal' in input:
self.isreal = input['isreal']
def forward(self):
inputG = self.input_img
self.shadow_param_pred = torch.squeeze(self.netG(inputG))
n = self.shadow_param_pred.shape[0]
#m = self.shadow_param_pred.shape[1]
w = inputG.shape[2]
h = inputG.shape[3]
#self.shadow_param_pred = torch.mean(self.shadow_param_pred.view([n,m,-1]),dim=2)
add = self.shadow_param_pred[:,[0,2,4]]
mul = (self.shadow_param_pred[:,[1,3,5]]*2) +3
#mul = (mul +2) * 5/3
add = add.view(n,3,1,1).expand((n,3,w,h))
mul = mul.view(n,3,1,1).expand((n,3,w,h))
addgt = self.shadow_param[:,[0,2,4]]
mulgt = self.shadow_param[:,[1,3,5]]
addgt = addgt.view(n,3,1,1).expand((n,3,w,h))
mulgt = mulgt.view(n,3,1,1).expand((n,3,w,h))
self.litgt = self.input_img.clone()/2+0.5
self.lit = self.input_img.clone()/2+0.5
self.lit = self.lit*mul + add
self.litgt = (self.litgt*mulgt+addgt)*2-1
self.out = (self.input_img/2+0.5)*(1-self.shadow_mask_3d) + self.lit*self.shadow_mask_3d
self.out = self.out*2-1
#self.outgt = (self.input_img/2+0.5)*(1-self.alpha_3d) + self.lit*(self.alpha_3d)
#self.outgt = self.outgt*2-1
#lit.detach if no final loss for paramnet
inputM = torch.cat([self.input_img,self.lit],1)
self.alpha_pred = self.netM(inputM)
self.alpha_pred = (self.alpha_pred +1) /2
self.final = (self.input_img/2+0.5)*(1-self.alpha_pred) + self.lit*(self.alpha_pred)
self.final = self.final*2-1
#GAN input:
#self.masked_fake = self.final*self.penumbra
#self.masked_real = self.shadowfree_img*self.penumbra
def backward(self):
criterion = self.criterionL1
self.shadow_param[:,[1,3,5]] = (self.shadow_param[:,[1,3,5]])/2 - 1.5
self.loss_G_param = criterion(self.shadow_param_pred, self.shadow_param) * self.opt.lambda_param
self.loss_rescontruction = criterion(self.final,self.shadowfree_img) * self.opt.lambda_res
self.loss_bd = criterion(self.final[self.penumbra>0],self.shadowfree_img[self.penumbra>0]) * self.opt.lambda_bd
#self.loss_smooth = smooth_loss(self.alpha_pred) * self.opt.lambda_smooth
self.loss = self.loss_rescontruction + self.loss_G_param + self.loss_bd #+ self.loss_smooth
self.loss.backward()
def optimize_parameters(self):
self.forward()
self.optimizer_G.zero_grad()
self.optimizer_M.zero_grad()
self.backward()
self.optimizer_G.step()
self.optimizer_M.step()
def get_current_visuals(self):
t= time.time()
nim = self.input_img.shape[0]
visual_ret = OrderedDict()
all =[]
for i in range(0,min(nim-1,5)):
row=[]
for name in self.visual_names:
if isinstance(name, str):
if hasattr(self,name):
im = util.tensor2im(getattr(self, name).data[i:i+1,:,:,:])
row.append(im)
row=tuple(row)
row = np.hstack(row)
if hasattr(self,'isreal'):
if self.isreal[i] == 0:
row = ImageOps.crop(Image.fromarray(row),border =5)
row = ImageOps.expand(row,border=5,fill=(0,200,0))
row = np.asarray(row)
all.append(row)
all = tuple(all)
allim = np.vstack(all)
return OrderedDict([(self.opt.name,allim)])
def get_prediction(self,input):
self.input_img = input['A'].to(self.device)
self.shadow_mask = input['B'].to(self.device)
inputG =self.input_img
self.shadow_mask = (self.shadow_mask>0.9).type(torch.float)*2-1
self.shadow_mask_3d= (self.shadow_mask>0).type(torch.float).expand(self.input_img.shape)
inputG = F.upsample(inputG,size=(self.opt.fineSize,self.opt.fineSize))
self.shadow_param_pred = self.netG(inputG)
w = self.input_img.shape[2]
h = self.input_img.shape[3]
n = self.input_img.shape[0]
m = self.input_img.shape[1]
self.shadow_param_pred = self.shadow_param_pred.view([n,6,-1])
self.shadow_param_pred = torch.mean(self.shadow_param_pred,dim=2)
self.shadow_param_pred[:,[1,3,5]] = (self.shadow_param_pred[:,[1,3,5]]*2)+3
self.lit = self.input_img.clone()/2+0.5
add = self.shadow_param_pred[:,[0,2,4]]
mul = self.shadow_param_pred[:,[1,3,5]]
#mul = (mul +2) * 5/3
n = self.shadow_param_pred.shape[0]
add = add.view(n,3,1,1).expand((n,3,w,h))
mul = mul.view(n,3,1,1).expand((n,3,w,h))
self.lit = self.lit*mul + add
self.out = (self.input_img/2+0.5)*(1-self.shadow_mask_3d) + self.lit*self.shadow_mask_3d
self.out = self.out*2-1
inputM = torch.cat([self.input_img,self.lit],1)
self.alpha_pred = self.netM(inputM)
self.alpha_pred = (self.alpha_pred +1) /2
#self.alpha_pred_3d= self.alpha_pred.repeat(1,3,1,1)
self.final = (self.input_img/2+0.5)*(1-self.alpha_pred) + self.lit*self.alpha_pred
self.final = self.final*2-1
RES = dict()
RES['final']= util.tensor2im(self.final,scale =0)
#RES['phase1'] = util.tensor2im(self.out,scale =0)
#RES['param']= self.shadow_param_pred.detach().cpu()
RES['matte'] = util.tensor2im(self.alpha_pred.detach().cpu()/2,scale =0)
###EVAL on original size
input_img_ori = input['A_ori'].to(self.device)
input_img_ori = input_img_ori/2+0.5
lit_ori = input_img_ori
w = input_img_ori.shape[2]
h = input_img_ori.shape[3]
add = self.shadow_param_pred[:,[0,2,4]]
mul = self.shadow_param_pred[:,[1,3,5]]
#mul = (mul +2) * 5/3
n = self.shadow_param_pred.shape[0]
add = add.view(n,3,1,1).expand((n,3,w,h))
mul = mul.view(n,3,1,1).expand((n,3,w,h))
lit_ori = lit_ori*mul + add
alpha_pred = F.upsample(self.alpha_pred,(w,h),mode='bilinear',align_corners=True)
final = input_img_ori * (1-alpha_pred) + lit_ori*(alpha_pred)
final = final*2 -1
RES['ori_Size'] = util.tensor2im(final.detach().cpu())
return RES
| 43.051587 | 138 | 0.597751 |
e24e33338083e264318f388009a28e27bcbc867b | 192 | py | Python | read/humidity.py | mawdesley/sensors | 7d77be744aff405f28e7a1bf87cc8320e660d66e | [
"Unlicense"
] | null | null | null | read/humidity.py | mawdesley/sensors | 7d77be744aff405f28e7a1bf87cc8320e660d66e | [
"Unlicense"
] | null | null | null | read/humidity.py | mawdesley/sensors | 7d77be744aff405f28e7a1bf87cc8320e660d66e | [
"Unlicense"
] | null | null | null | #!/usr/bin/python3
import Adafruit_DHT
sensor = Adafruit_DHT.DHT22
pin = 24
humidity, _ = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None:
print("{0:0.1f}".format(humidity))
| 16 | 50 | 0.734375 |
cb3237e72bb9cc550f61ec5a10dae9357621c44d | 3,138 | py | Python | envoy/tests/test_envoy.py | andersenleo/integrations-core | e521b88e32820a286a70c7797a663d4f9ba41110 | [
"BSD-3-Clause"
] | null | null | null | envoy/tests/test_envoy.py | andersenleo/integrations-core | e521b88e32820a286a70c7797a663d4f9ba41110 | [
"BSD-3-Clause"
] | null | null | null | envoy/tests/test_envoy.py | andersenleo/integrations-core | e521b88e32820a286a70c7797a663d4f9ba41110 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import mock
from datadog_checks.envoy import Envoy
from datadog_checks.envoy.metrics import METRIC_PREFIX, METRICS
from .common import INSTANCES, response
class TestEnvoy:
CHECK_NAME = 'envoy'
def test_success(self, aggregator):
instance = INSTANCES['main']
c = Envoy(self.CHECK_NAME, None, {}, [instance])
c.check(instance)
metrics_collected = 0
for metric in METRICS.keys():
metrics_collected += len(aggregator.metrics(METRIC_PREFIX + metric))
assert metrics_collected >= 250
def test_success_fixture(self, aggregator):
instance = INSTANCES['main']
c = Envoy(self.CHECK_NAME, None, {}, [instance])
with mock.patch('requests.get', return_value=response('multiple_services')):
c.check(instance)
metrics_collected = 0
for metric in METRICS.keys():
metrics_collected += len(aggregator.metrics(METRIC_PREFIX + metric))
num_metrics = len(response('multiple_services').content.decode().splitlines())
num_metrics -= sum(c.unknown_metrics.values()) + sum(c.unknown_tags.values())
assert 4159 <= metrics_collected == num_metrics
def test_success_fixture_whitelist(self, aggregator):
instance = INSTANCES['whitelist']
c = Envoy(self.CHECK_NAME, None, {}, [instance])
with mock.patch('requests.get', return_value=response('multiple_services')):
c.check(instance)
for metric in aggregator.metric_names:
assert metric.startswith('envoy.cluster.')
def test_success_fixture_blacklist(self, aggregator):
instance = INSTANCES['blacklist']
c = Envoy(self.CHECK_NAME, None, {}, [instance])
with mock.patch('requests.get', return_value=response('multiple_services')):
c.check(instance)
for metric in aggregator.metric_names:
assert not metric.startswith('envoy.cluster.')
def test_success_fixture_whitelist_blacklist(self, aggregator):
instance = INSTANCES['whitelist_blacklist']
c = Envoy(self.CHECK_NAME, None, {}, [instance])
with mock.patch('requests.get', return_value=response('multiple_services')):
c.check(instance)
for metric in aggregator.metric_names:
assert metric.startswith("envoy.cluster.") and not metric.startswith("envoy.cluster.out")
def test_service_check(self, aggregator):
instance = INSTANCES['main']
c = Envoy(self.CHECK_NAME, None, {}, [instance])
with mock.patch('requests.get', return_value=response('multiple_services')):
c.check(instance)
assert aggregator.service_checks(Envoy.SERVICE_CHECK_NAME)[0].status == Envoy.OK
def test_unknown(self):
instance = INSTANCES['main']
c = Envoy(self.CHECK_NAME, None, {}, [instance])
with mock.patch('requests.get', return_value=response('unknown_metrics')):
c.check(instance)
assert sum(c.unknown_metrics.values()) == 5
| 35.659091 | 101 | 0.664436 |
b01fc70c5109cd7bb8470439942eb8eda86e3254 | 1,426 | py | Python | K-th Smallest Prime Fraction.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | K-th Smallest Prime Fraction.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | K-th Smallest Prime Fraction.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | '''
A sorted list A contains 1, plus some number of primes. Then, for every p < q in the list, we consider the fraction p/q.
What is the K-th smallest fraction considered? Return your answer as an array of ints, where answer[0] = p and answer[1] = q.
Examples:
Input: A = [1, 2, 3, 5], K = 3
Output: [2, 5]
Explanation:
The fractions to be considered in sorted order are:
1/5, 1/3, 2/5, 1/2, 3/5, 2/3.
The third fraction is 2/5.
Input: A = [1, 7], K = 1
Output: [1, 7]
Note:
A will have length between 2 and 2000.
Each A[i] will be between 1 and 30000.
K will be between 1 and A.length * (A.length - 1) / 2.
'''
class Solution(object):
def kthSmallestPrimeFraction(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: List[int]
"""
A.sort()
h = []
i = 0
j = len(A)-1
visited = set([(i, j)])
heapq.heappush(h, (1.0 * A[i] / A[j], i, j))
for idx in xrange(K):
val, x, y = heapq.heappop(h)
i = x
j = y
visited.remove((x, y))
if (i+1, j) not in visited:
visited.add((i+1, j))
heapq.heappush(h, (1.0 * A[i+1] / A[j], i+1, j))
if (i, j-1) not in visited:
visited.add((i, j-1))
heapq.heappush(h, (1.0 * A[i] / A[j-1], i, j-1))
return [A[i], A[j]]
| 27.960784 | 126 | 0.498597 |
29aa641f91425cdb58e8fc0f6f1e11e3eda4079a | 4,590 | py | Python | scripts/validation/DS/compare_DS_gen.py | FMS-Mu2e/helicalc | 557ab63696459807998a9ab44f92badd62e93a2a | [
"MIT"
] | null | null | null | scripts/validation/DS/compare_DS_gen.py | FMS-Mu2e/helicalc | 557ab63696459807998a9ab44f92badd62e93a2a | [
"MIT"
] | null | null | null | scripts/validation/DS/compare_DS_gen.py | FMS-Mu2e/helicalc | 557ab63696459807998a9ab44f92badd62e93a2a | [
"MIT"
] | 2 | 2020-05-22T15:54:38.000Z | 2022-02-04T23:51:27.000Z | import sys
import numpy as np
import pandas as pd
from helicalc import helicalc_dir, helicalc_data
from helicalc.coil import CoilIntegrator
from helicalc.busbar import ArcIntegrator3D
from helicalc.geometry import read_solenoid_geom_combined
from helicalc.solenoid_geom_funcs import load_all_geoms
from helicalc.constants import dxyz_dict, dxyz_arc_bar_dict
from tqdm import tqdm
# output info
output_dir = helicalc_data+'Bmaps/helicalc_validation/'
# coil only map
# full
#save_name = output_dir+'Mau14.DS1_region.standard-helicalc.coil_56_full.pkl'
# y=0 plane
save_name = output_dir+'Mau14.DS_region_plane.standard-helicalc.coil_56-66_full.pkl'
# load coil geometry
paramdir = helicalc_dir + 'dev/params/'
# paramname = 'Mu2e_V13'
paramname = 'Mu2e_V14' # correct
geom_df = read_solenoid_geom_combined(paramdir,paramname).iloc[55:].copy()
# load chunk data
chunk_file = helicalc_data+'Bmaps/aux/batch_N_helicalc_03-16-22.txt'
df_chunks = pd.read_csv(chunk_file)
# load interlayer geometry
df_dict = load_all_geoms(version=14, return_dict=True)
df_inter = df_dict['interlayers']
#N_chunk_inter = 10000
N_chunk_inter = 500
dxyz_interlayer = dxyz_arc_bar_dict[1]
# load OPERA dataframe for grid to calculate on
opera_file = helicalc_data+'Bmaps/single_coil_Mau13/DSMap_V14_nobus.pkl'
df_O = pd.read_pickle(opera_file)
# y=0 plane
df_O = df_O.query('(Y==0) & (-4.796 <= X <= -2.996)').copy()
# testing
#df_O = df_O[(np.isin(df_O.X, df_O.X.unique()[::16])) & (np.isin(df_O.Z, df_O.Z.unique()[::16]))]
df_O = df_O[(np.isin(df_O.X, df_O.X.unique()[::4])) & (np.isin(df_O.Z, df_O.Z.unique()[::4]))]
def DSi_calc(Coil_Num=56, df=df_O, df_coils=geom_df,
df_interlayers=df_dict['interlayers'],
override_chunk=True):
# specific to coil
df_ = df_coils.query(f'Coil_Num == {Coil_Num}').copy().iloc[0]
# test
# df_ = df_coils.query(f'Coil_Num == {Coil_Num}').copy()#.iloc[0]
# print(df_)
# df_ = df_.iloc[0]
if override_chunk:
N_chunk_coil = 50
else:
N_chunk_coil = df_chunks.query(f'Nt_Ri == {df_.Nt_Ri}').iloc[0].N_field_points
N_layers = df_.N_layers
if N_layers > 1:
df_int = df_interlayers.query(f'`cond N` == {Coil_Num}').copy().iloc[0]
# kludge for better column naming
df_int['cond N'] = f'{int(df_int["cond N"])}_il'
df = DS1_calc(df=df, df_coil=df_, df_interlayer=df_int)
else:
df = DS8_calc(df=df, df_coil=df_, N_chunk=N_chunk_coil)
return df
def DS1_calc(df=df_O, df_coil=geom_df.iloc[0],
df_interlayer=df_inter.query(f'`cond N` == 56').copy().iloc[0],
N_chunk=100):
df_ = df.copy()
cn = df_coil.Coil_Num
# loop over two layers
for layer, dev in zip([1, 2], [1, 2]):
# test on 1 GPU
# for layer, dev in zip([1, 2], [1, 1]):
# create coil
myCoil = CoilIntegrator(df_coil, dxyz=dxyz_dict[df_coil.dxyz], layer=layer, dev=dev)
# integrate on grid and add to dataframe
df_ = myCoil.integrate_grid(df_, N_batch=N_chunk, tqdm=tqdm)
# interlayer connect
# create interlayer
myArc = ArcIntegrator3D(df_interlayer, dxyz=dxyz_interlayer, dev=3)
# integrate on grid and add to dataframe
df_ = myArc.integrate_grid(df_, N_batch=N_chunk_inter, tqdm=tqdm)
return df_
def DS8_calc(df=df_O, df_coil=geom_df.iloc[0], N_chunk=100):
df_ = df.copy()
cn = df_coil.Coil_Num
# loop over layers (only 1 this time)
for layer, dev in zip([1], [1]):
# create coil
myCoil = CoilIntegrator(df_coil, dxyz=dxyz_dict[df_coil.dxyz], layer=layer, dev=dev)
# integrate on grid and add to dataframe
df_ = myCoil.integrate_grid(df_, N_batch=N_chunk, tqdm=tqdm)
return df_
def combine_columns(df):
for i in ['x', 'y', 'z']:
cols = []
for col in df.columns:
if (f'B{i}' in col) & (len(col) > 2):
cols.append(col)
eval_str = f'B{i}_helicalc = '+'+'.join(cols)
df.eval(eval_str, inplace=True, engine='python')
# Tesla to Gauss
df.eval(f'B{i} = B{i} * 1e4', inplace=True)
df.eval(f'B{i}_helicalc = B{i}_helicalc * 1e4', inplace=True)
df.eval(f'B{i}_delta = B{i}_helicalc - B{i}', inplace=True)
# delta
return df
if __name__ == '__main__':
coils = range(56, 67)
for cn in coils:
df_O = DSi_calc(Coil_Num=cn, df=df_O, df_coils=geom_df,
df_interlayers=df_inter,
override_chunk=True)
df_O = combine_columns(df_O)
df_O.to_pickle(save_name)
print(df_O.columns)
print(df_O)
| 36.141732 | 97 | 0.664924 |
0f63c48072a774e722b3e91329e7cf56270f50b8 | 241 | py | Python | config.example.py | C4ptainCrunch/gehol-tools | cb139bc6bc2a703bd4a792bcccbbbeb672ff683f | [
"Apache-2.0"
] | 1 | 2015-02-09T08:32:12.000Z | 2015-02-09T08:32:12.000Z | config.example.py | C4ptainCrunch/gehol-tools | cb139bc6bc2a703bd4a792bcccbbbeb672ff683f | [
"Apache-2.0"
] | null | null | null | config.example.py | C4ptainCrunch/gehol-tools | cb139bc6bc2a703bd4a792bcccbbbeb672ff683f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
MAX_URL = 10
DOMAIN = "http://127.0.0.1:5000/" # with trailing "/"
KEY = "à!èa(§aà'èçy§â'ygùhm;rth:s,<mdjkbgm<kfh)" # Should be random !
DEBUG = True
ROOT_DIR = "/home/nikita/code/gehol-tools/"
| 26.777778 | 69 | 0.630705 |
8ed2782a5a2be77a51f8d79755bcab1f07e29c57 | 3,201 | py | Python | esmvaltool/cmorizers/obs/nsidc_common.py | cffbots/ESMValTool | a9b6592a02f2085634a214ff5f36a736fa18ff47 | [
"Apache-2.0"
] | 148 | 2017-02-07T13:16:03.000Z | 2022-03-26T02:21:56.000Z | esmvaltool/cmorizers/obs/nsidc_common.py | cffbots/ESMValTool | a9b6592a02f2085634a214ff5f36a736fa18ff47 | [
"Apache-2.0"
] | 2,026 | 2017-02-03T12:57:13.000Z | 2022-03-31T15:11:51.000Z | esmvaltool/cmorizers/obs/nsidc_common.py | cffbots/ESMValTool | a9b6592a02f2085634a214ff5f36a736fa18ff47 | [
"Apache-2.0"
] | 113 | 2017-01-27T13:10:19.000Z | 2022-02-03T13:42:11.000Z | """Common tools to CMORize NSIDC-0116 northern and sothern data."""
import logging
import os
import glob
import numpy as np
import iris
from iris.coords import AuxCoord
from iris.cube import Cube
from .utilities import fix_var_metadata, save_variable, set_global_atts
logger = logging.getLogger(__name__)
def cmorize(cfg, region, in_dir, out_dir):
"""Cmorize NSIDC-0116 dataset."""
glob_attrs = cfg['attributes']
logger.info("Starting cmorization for Tier%s OBS files: %s",
glob_attrs['tier'], glob_attrs['dataset_id'])
logger.info("Input data from: %s", in_dir)
logger.info("Output will be written to: %s", out_dir)
file_expr = os.path.join(in_dir, 'icemotion_daily_{}_*.nc'.format(region))
for filepath in glob.glob(file_expr):
logger.info('Cmorizing file %s', filepath)
cubes = iris.load(filepath)
logger.debug(cubes)
lat_coord = _create_coord(cubes, 'lat', 'latitude')
lon_coord = _create_coord(cubes, 'lon', 'longitude')
lon_coord.points[lon_coord.points < 0] += 360
for var, vals in cfg['variables'].items():
var_info = cfg['cmor_table'].get_variable(vals['mip'], var)
logger.info('Cmorizing var %s', var)
cube = cubes.extract_cube(iris.Constraint(vals['raw']))
cube.add_aux_coord(lat_coord, (1, 2))
cube.add_aux_coord(lon_coord, (1, 2))
cube.convert_units(var_info.units)
logger.debug(cube)
glob_attrs['mip'] = vals['mip']
fix_var_metadata(cube, var_info)
set_global_atts(cube, glob_attrs)
zlib = vals.get('compress', False)
if zlib:
# Realize data to speed-up writing
# pylint: disable=pointless-statement
cube.data
save_variable(cube, var, out_dir, glob_attrs, zlib=zlib)
cubes.remove(cube)
_create_areacello(cfg, cube, glob_attrs, out_dir)
def _create_areacello(cfg, sample_cube, glob_attrs, out_dir):
if not cfg['custom'].get('create_areacello', False):
return
var_info = cfg['cmor_table'].get_variable('fx', 'areacello')
glob_attrs['mip'] = 'fx'
lat_coord = sample_cube.coord('latitude')
cube = Cube(
np.full(lat_coord.shape, cfg['custom']['grid_cell_size'], np.float32),
standard_name=var_info.standard_name,
long_name=var_info.long_name,
var_name=var_info.short_name,
units='m2',
)
cube.add_aux_coord(lat_coord, (0, 1))
cube.add_aux_coord(sample_cube.coord('longitude'), (0, 1))
cube.add_dim_coord(sample_cube.coord('projection_y_coordinate'), 0)
cube.add_dim_coord(sample_cube.coord('projection_x_coordinate'), 1)
fix_var_metadata(cube, var_info)
set_global_atts(cube, glob_attrs)
save_variable(
cube, var_info.short_name, out_dir, glob_attrs, zlib=True
)
def _create_coord(cubes, var_name, standard_name):
cube = cubes.extract_cube(standard_name)
coord = AuxCoord(
cube.data,
standard_name=standard_name,
long_name=cube.long_name,
var_name=var_name,
units=cube.units,
)
return coord
| 35.175824 | 78 | 0.651984 |
63dc850dd229942df720e81864ba33e73eb2f7ae | 410 | py | Python | practice_problems/MilitaryTimeConverter.py | smenon8/practice_scripts | 2a07cb12c75bd9b5ee5140cbe38e23d92835a108 | [
"MIT"
] | null | null | null | practice_problems/MilitaryTimeConverter.py | smenon8/practice_scripts | 2a07cb12c75bd9b5ee5140cbe38e23d92835a108 | [
"MIT"
] | 1 | 2017-02-13T20:48:06.000Z | 2017-02-13T20:48:06.000Z | practice_problems/MilitaryTimeConverter.py | smenon8/AlgDataStruct_practice | 2a07cb12c75bd9b5ee5140cbe38e23d92835a108 | [
"MIT"
] | null | null | null | #!/bin/python3
import sys
time = input().strip()
l = time.split(':')
if 'PM' in l[2] and l[0] != '12':
l[0] = str(int(l[0]) + 12)
l[2] = l[2].replace('PM','')
else:
if 'AM' in l[2] and l[0] != '12':
l[2] = l[2].replace('AM','')
if l[0] == '12' and 'AM' in l[2]:
l[0] = '00'
l[2] = l[2].replace('AM','')
else:
l[2] = l[2].replace('PM','')
print(':'.join(l)) | 17.826087 | 38 | 0.42439 |
b5757999719930c01874bd1e6fbc0460b3a1a5a3 | 1,108 | py | Python | greedy_algorithms/2_maximum_value_of_the_loot/fractional_knapsack.py | Desaiakshata/Algorithms-problems | 90f4e40ba05e4bdfc783614bb70b9156b05eec0b | [
"MIT"
] | null | null | null | greedy_algorithms/2_maximum_value_of_the_loot/fractional_knapsack.py | Desaiakshata/Algorithms-problems | 90f4e40ba05e4bdfc783614bb70b9156b05eec0b | [
"MIT"
] | null | null | null | greedy_algorithms/2_maximum_value_of_the_loot/fractional_knapsack.py | Desaiakshata/Algorithms-problems | 90f4e40ba05e4bdfc783614bb70b9156b05eec0b | [
"MIT"
] | null | null | null | # Uses python3
import sys
def get_optimal_value(capacity, weights, values):
value = 0.
a=0
weight_val = list(map(lambda v,w: v/w ,values,weights))
# write your code here
while a>=0:
print("val:",value)
max = weight_val[0]
ind = 0
for i in range(len(weight_val)):
if weight_val[i]>max:
max=weight_val[i]
ind = i
print("ind and max",ind,max,weight_val)
a = capacity - weights[ind]
if(a>0):
value += max*weights[ind]
capacity = a
elif(a==0):
value += values[ind]
return value
else:
value += max*capacity
return value
weight_val[ind] = 0
return value
if __name__ == "__main__":
data = list(map(int, input().split(' '))) #sys.stdin.read()
n, capacity = data[0:2]
values = data[2:(2 * n + 2):2]
weights = data[3:(2 * n + 2):2]
print(values,weights,n,capacity)
opt_value = get_optimal_value(capacity, weights, values)
print("{:.10f}".format(opt_value))
| 26.380952 | 63 | 0.528881 |
37709203953affbd67bfafe10a8ae4465a264e05 | 3,321 | py | Python | LR/lr/lib/resumption_token.py | LearningRegistry/LearningRegistry | d9f0a8117a4adb8fcf6bf101d3d58d799463a2e2 | [
"Apache-2.0"
] | 26 | 2015-04-14T03:11:58.000Z | 2022-01-06T14:31:07.000Z | LR/lr/lib/resumption_token.py | LearningRegistry/LearningRegistry | d9f0a8117a4adb8fcf6bf101d3d58d799463a2e2 | [
"Apache-2.0"
] | 11 | 2015-04-03T21:54:03.000Z | 2017-05-02T17:20:03.000Z | LR/lr/lib/resumption_token.py | LearningRegistry/LearningRegistry | d9f0a8117a4adb8fcf6bf101d3d58d799463a2e2 | [
"Apache-2.0"
] | 16 | 2015-02-11T09:30:18.000Z | 2020-11-20T02:06:24.000Z | # -*- coding: utf-8 -*-
'''
Created on Aug 17, 2011
@author: jklo
'''
# MIT licensed JWT implementation: https://github.com/progrium/pyjwt
import jwt
import datetime
__JWT_ALG = "HS256"
def parse_token(serviceid, token):
decoded = {}
try:
decoded = jwt.decode(str(token), str(serviceid), __JWT_ALG)
except jwt.DecodeError as e:
decoded = jwt.decode(str(token), verify=False)
decoded["error"] = e.message
return decoded
def get_payload(startkey=None, endkey={}, startkey_docid=None, from_date=None, until_date=None, key=None, keys=None, any_tags=None, identity=None):
payload = {}
payload["startkey"] = startkey
payload["endkey"] = endkey
if key:
payload["key"] = key
if keys:
payload["keys"] = keys
if startkey_docid:
payload["startkey_docid"] = startkey_docid
if any_tags:
payload['any_tags'] = any_tags
if identity:
payload['identity'] = identity
if from_date and isinstance(from_date, datetime.datetime):
from lr.lib import helpers as h
payload["from_date"] = h.convertToISO8601Zformat(from_date)
if until_date and isinstance(until_date, datetime.datetime):
payload["until_date"] = h.convertToISO8601Zformat(until_date)
return payload
def get_offset_payload(offset=None, keys=None, maxResults=None):
payload = {}
if offset:
payload["offset"] = offset
if keys:
payload["keys"] = keys
if maxResults:
payload["maxResults"] = maxResults
return payload
def get_token_slice(serviceid, startkey=None, endkey={}, startkey_docid=None, any_tags=None, identity=None, maxResults=None):
return jwt.encode(get_payload(startkey, endkey, startkey_docid, None, None, None, None, any_tags, identity), serviceid, __JWT_ALG)
def get_token(serviceid, startkey=None, endkey={}, startkey_docid=None, from_date=None, until_date=None, key=None, keys=None, maxResults=None):
return jwt.encode(get_payload(startkey, endkey, startkey_docid, from_date, until_date, key, keys), serviceid, __JWT_ALG)
def get_offset_token(serviceid, offset=None, keys=None, maxResults=None):
return jwt.encode(get_offset_payload(offset, keys, maxResults), serviceid, __JWT_ALG)
if __name__ == "__main__":
from uuid import uuid4
import json
params = {
u'startkey': [u'LODE', None],
u'endkey': [u'LODE', u'2011-08-12T10:21:04'],
u'startkey_docid': u'7623e2d6c055481988041e06c156fca8'
}
serviceid = uuid4().hex
token = get_token(serviceid, **params)
print "Param: %s\n" % repr(params)
print "Token: %s\n" % token
parsed = parse_token(serviceid, token)
assert params == parsed, "Tokens don't match: %s" % repr(parsed)
params = {
u'startkey': [u'11, κλάση, εικόνα,', None],
u'endkey': [u'11, κλάση, εικόνα,', u'2011-08-12T10:21:04'],
u'startkey_docid': u'7623e2d6c055481988041e06c156fca8'
}
serviceid = uuid4().hex
token = get_token(serviceid, **params)
print "Unicode Param: %s\n" % repr(params)
print "Unicode Token: %s\n" % token
parsed = parse_token(serviceid, token)
assert params == parsed, "Unicode Tokens don't match: %s" % repr(parsed)
| 27 | 147 | 0.655224 |
83d5b87c87ce318db24cf5a7cc18cc04ae189e19 | 7,163 | py | Python | scripts/manage_translations.py | AlexHill/django | fe1389e911b0cdc487e5547c09c920c12f4e1ce0 | [
"BSD-3-Clause"
] | 1 | 2019-09-21T06:40:37.000Z | 2019-09-21T06:40:37.000Z | scripts/manage_translations.py | AlexHill/django | fe1389e911b0cdc487e5547c09c920c12f4e1ce0 | [
"BSD-3-Clause"
] | null | null | null | scripts/manage_translations.py | AlexHill/django | fe1389e911b0cdc487e5547c09c920c12f4e1ce0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# This python file contains utility scripts to manage Django translations.
# It has to be run inside the django git root directory.
#
# The following commands are available:
#
# * update_catalogs: check for new strings in core and contrib catalogs, and
# output how much strings are new/changed.
#
# * lang_stats: output statistics for each catalog/language combination
#
# * fetch: fetch translations from transifex.com
#
# Each command support the --languages and --resources options to limit their
# operation to the specified language or resource. For example, to get stats
# for Spanish in contrib.admin, run:
#
# $ python scripts/manage_translations.py lang_stats --language=es --resources=admin
import os
from optparse import OptionParser
from subprocess import call, Popen, PIPE
from django.core.management import call_command
HAVE_JS = ['admin']
def _get_locale_dirs(resources, include_core=True):
"""
Return a tuple (contrib name, absolute path) for all locale directories,
optionally including the django core catalog.
If resources list is not None, filter directories matching resources content.
"""
contrib_dir = os.path.join(os.getcwd(), 'django', 'contrib')
dirs = []
# Collect all locale directories
for contrib_name in os.listdir(contrib_dir):
path = os.path.join(contrib_dir, contrib_name, 'locale')
if os.path.isdir(path):
dirs.append((contrib_name, path))
if contrib_name in HAVE_JS:
dirs.append(("%s-js" % contrib_name, path))
if include_core:
dirs.insert(0, ('core', os.path.join(os.getcwd(), 'django', 'conf', 'locale')))
# Filter by resources, if any
if resources is not None:
res_names = [d[0] for d in dirs]
dirs = [ld for ld in dirs if ld[0] in resources]
if len(resources) > len(dirs):
print("You have specified some unknown resources. "
"Available resource names are: %s" % (', '.join(res_names),))
exit(1)
return dirs
def _tx_resource_for_name(name):
""" Return the Transifex resource name """
if name == 'core':
return "django-core.core"
else:
return "django-core.contrib-%s" % name
def _check_diff(cat_name, base_path):
"""
Output the approximate number of changed/added strings in the en catalog.
"""
po_path = '%(path)s/en/LC_MESSAGES/django%(ext)s.po' % {
'path': base_path, 'ext': 'js' if cat_name.endswith('-js') else ''}
p = Popen("git diff -U0 %s | egrep -v '^@@|^[-+]#|^..POT-Creation' | wc -l" % po_path,
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
num_changes = int(output.strip()) - 4
print("%d changed/added messages in '%s' catalog." % (num_changes, cat_name))
def update_catalogs(resources=None, languages=None):
"""
Update the en/LC_MESSAGES/django.po (main and contrib) files with
new/updated translatable strings.
"""
contrib_dirs = _get_locale_dirs(resources, include_core=False)
os.chdir(os.path.join(os.getcwd(), 'django'))
print("Updating main en catalog")
call_command('makemessages', locale='en')
_check_diff('core', os.path.join(os.getcwd(), 'conf', 'locale'))
# Contrib catalogs
for name, dir_ in contrib_dirs:
os.chdir(os.path.join(dir_, '..'))
print("Updating en catalog in %s" % dir_)
if name.endswith('-js'):
call_command('makemessages', locale='en', domain='djangojs')
else:
call_command('makemessages', locale='en')
_check_diff(name, dir_)
def lang_stats(resources=None, languages=None):
"""
Output language statistics of committed translation files for each
Django catalog.
If resources is provided, it should be a list of translation resource to
limit the output (e.g. ['core', 'gis']).
"""
locale_dirs = _get_locale_dirs(resources)
for name, dir_ in locale_dirs:
print("\nShowing translations stats for '%s':" % name)
langs = sorted([d for d in os.listdir(dir_) if not d.startswith('_')])
for lang in langs:
if languages and not lang in languages:
continue
# TODO: merge first with the latest en catalog
p = Popen("msgfmt -vc -o /dev/null %(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po" % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''},
stdout=PIPE, stderr=PIPE, shell=True)
output, errors = p.communicate()
if p.returncode == 0:
# msgfmt output stats on stderr
print("%s: %s" % (lang, errors.strip()))
else:
print("Errors happened when checking %s translation for %s:\n%s" % (
lang, name, errors))
def fetch(resources=None, languages=None):
"""
Fetch translations from Transifex, wrap long lines, generate mo files.
"""
locale_dirs = _get_locale_dirs(resources)
errors = []
for name, dir_ in locale_dirs:
# Transifex pull
if languages is None:
call('tx pull -r %(res)s -a -f --minimum-perc=5' % {'res': _tx_resource_for_name(name)}, shell=True)
languages = sorted([d for d in os.listdir(dir_) if not d.startswith('_') and d != 'en'])
else:
for lang in languages:
call('tx pull -r %(res)s -f -l %(lang)s' % {
'res': _tx_resource_for_name(name), 'lang': lang}, shell=True)
# msgcat to wrap lines and msgfmt for compilation of .mo file
for lang in languages:
po_path = '%(path)s/%(lang)s/LC_MESSAGES/django%(ext)s.po' % {
'path': dir_, 'lang': lang, 'ext': 'js' if name.endswith('-js') else ''}
if not os.path.exists(po_path):
print("No %(lang)s translation for resource %(name)s" % {
'lang': lang, 'name': name})
continue
call('msgcat -o %s %s' % (po_path, po_path), shell=True)
res = call('msgfmt -c -o %s.mo %s' % (po_path[:-3], po_path), shell=True)
if res != 0:
errors.append((name, lang))
if errors:
print("\nWARNING: Errors have occurred in following cases:")
for resource, lang in errors:
print("\tResource %s for language %s" % (resource, lang))
exit(1)
if __name__ == "__main__":
RUNABLE_SCRIPTS = ('update_catalogs', 'lang_stats', 'fetch')
parser = OptionParser(usage="usage: %prog [options] cmd")
parser.add_option("-r", "--resources", action='append',
help="limit operation to the specified resources")
parser.add_option("-l", "--languages", action='append',
help="limit operation to the specified languages")
options, args = parser.parse_args()
if not args:
parser.print_usage()
exit(1)
if args[0] in RUNABLE_SCRIPTS:
eval(args[0])(options.resources, options.languages)
else:
print("Available commands are: %s" % ", ".join(RUNABLE_SCRIPTS))
| 38.304813 | 113 | 0.614547 |
4f71204e46837b32cfd2b3a3a3c79870111f8905 | 1,998 | py | Python | tests/__init__.py | arvy/sperf | c047ae5f3b1daf70cc227784197e4ef37caaf556 | [
"Apache-2.0"
] | null | null | null | tests/__init__.py | arvy/sperf | c047ae5f3b1daf70cc227784197e4ef37caaf556 | [
"Apache-2.0"
] | null | null | null | tests/__init__.py | arvy/sperf | c047ae5f3b1daf70cc227784197e4ef37caaf556 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 DataStax, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""top level tests package with helper methods"""
import os
from io import StringIO
import contextlib
import logging
LOGGER = logging.getLogger(__name__)
def test_dir():
"""returns the test directory"""
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "testdata")
def test_dse_tarball():
"""default dse tarball to use for all tests"""
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "testdata", "diag", "DSE_CLUSTER")
def test_cassandra_tarball():
"""default cassandra tarball of a given version to use for all tests"""
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "testdata", "diag", "cassandra")
def current_dir(current_file):
"""takes the current_file and finds the directory where it is located"""
return os.path.dirname(os.path.abspath(current_file))
def steal_output(func, *args, **kwargs):
"""captures stdout from a function"""
temp_stdout = StringIO()
with contextlib.redirect_stdout(temp_stdout):
func(*args, **kwargs)
output = temp_stdout.getvalue().strip()
return output
def assert_in_output(expected_text, text):
"""asserts the expected_text is in the text, if not
present will provide an output of the text field so it'll be easier to debug"""
try:
assert expected_text in text
except Exception as ex:
LOGGER.info(text)
LOGGER.error(ex)
raise
| 35.678571 | 102 | 0.722222 |
a360458a3c13c747395df458158320d83405468f | 6,529 | py | Python | accelbyte_py_sdk/api/lobby/operations/third_party/admin_delete_third_part_c9d441.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | accelbyte_py_sdk/api/lobby/operations/third_party/admin_delete_third_part_c9d441.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | 1 | 2021-10-13T03:46:58.000Z | 2021-10-13T03:46:58.000Z | accelbyte_py_sdk/api/lobby/operations/third_party/admin_delete_third_part_c9d441.py | AccelByte/accelbyte-python-sdk | dcd311fad111c59da828278975340fb92e0f26f7 | [
"MIT"
] | null | null | null | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# justice-lobby-server (staging)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import RestapiErrorResponseV1
class AdminDeleteThirdPartyConfig(Operation):
"""Delete Third Party Steam Config (adminDeleteThirdPartyConfig)
Required permission : `ADMIN:NAMESPACE:{namespace}:THIRDPARTY:CONFIG [DELETE]` with scope `social`
delete third party config in a namespace.
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:THIRDPARTY:CONFIG [DELETE]
Required Scope(s):
- social
Properties:
url: /lobby/v1/admin/thirdparty/namespaces/{namespace}/config/steam
method: DELETE
tags: ["thirdParty"]
consumes: []
produces: ["application/json"]
securities: [BEARER_AUTH]
namespace: (namespace) REQUIRED str in path
Responses:
204: No Content - str (No Content)
400: Bad Request - RestapiErrorResponseV1 (Bad Request)
401: Unauthorized - RestapiErrorResponseV1 (Unauthorized)
403: Forbidden - RestapiErrorResponseV1 (Forbidden)
500: Internal Server Error - RestapiErrorResponseV1 (Internal Server Error)
"""
# region fields
_url: str = "/lobby/v1/admin/thirdparty/namespaces/{namespace}/config/steam"
_method: str = "DELETE"
_consumes: List[str] = []
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"]]
_location_query: str = None
namespace: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"path": self.get_path_params(),
}
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_namespace(self, value: str) -> AdminDeleteThirdPartyConfig:
self.namespace = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(self, code: int, content_type: str, content: Any) -> Tuple[None, Union[None, HttpResponse, RestapiErrorResponseV1]]:
"""Parse the given response.
204: No Content - str (No Content)
400: Bad Request - RestapiErrorResponseV1 (Bad Request)
401: Unauthorized - RestapiErrorResponseV1 (Unauthorized)
403: Forbidden - RestapiErrorResponseV1 (Forbidden)
500: Internal Server Error - RestapiErrorResponseV1 (Internal Server Error)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(code=code, content_type=content_type, content=content)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 204:
return None, None
if code == 400:
return None, RestapiErrorResponseV1.create_from_dict(content)
if code == 401:
return None, RestapiErrorResponseV1.create_from_dict(content)
if code == 403:
return None, RestapiErrorResponseV1.create_from_dict(content)
if code == 500:
return None, RestapiErrorResponseV1.create_from_dict(content)
return None, self.handle_undocumented_response(code=code, content_type=content_type, content=content)
# endregion response methods
# region static methods
@classmethod
def create(
cls,
namespace: str,
) -> AdminDeleteThirdPartyConfig:
instance = cls()
instance.namespace = namespace
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> AdminDeleteThirdPartyConfig:
instance = cls()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"namespace": "namespace",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"namespace": True,
}
# endregion static methods
| 27.901709 | 139 | 0.65232 |
b6e29be2b387b6bbf3551bfe343c064f4e021494 | 5,207 | py | Python | scipy/constants.py | Fernal73/LearnPython3 | 5288017c0dbf95633b84f1e6324f00dec6982d36 | [
"MIT"
] | 1 | 2021-12-17T11:03:13.000Z | 2021-12-17T11:03:13.000Z | scipy/constants.py | Fernal73/LearnPython3 | 5288017c0dbf95633b84f1e6324f00dec6982d36 | [
"MIT"
] | 1 | 2020-02-05T00:14:43.000Z | 2020-02-06T09:22:49.000Z | scipy/constants.py | Fernal73/LearnPython3 | 5288017c0dbf95633b84f1e6324f00dec6982d36 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from scipy import constants
print(constants.pi)
print(dir(constants))
print(constants.yotta) #1e+24
print(constants.zetta) #1e+21
print(constants.exa) #1e+18
print(constants.peta) #1000000000000000.0
print(constants.tera) #1000000000000.0
print(constants.giga) #1000000000.0
print(constants.mega) #1000000.0
print(constants.kilo) #1000.0
print(constants.hecto) #100.0
print(constants.deka) #10.0
print(constants.deci) #0.1
print(constants.centi) #0.01
print(constants.milli) #0.001
print(constants.micro) #1e-06
print(constants.nano) #1e-09
print(constants.pico) #1e-12
print(constants.femto) #1e-15
print(constants.atto) #1e-18
print(constants.zepto) #1e-21
print(constants.kibi) #1024
print(constants.mebi) #1048576
print(constants.gibi) #1073741824
print(constants.tebi) #1099511627776
print(constants.pebi) #1125899906842624
print(constants.exbi) #1152921504606846976
print(constants.zebi) #1180591620717411303424
print(constants.yobi) #1208925819614629174706176
print(constants.gram) #0.001
print(constants.metric_ton) #1000.0
print(constants.grain) #6.479891e-05
print(constants.lb) #0.45359236999999997
print(constants.pound) #0.45359236999999997
print(constants.oz) #0.028349523124999998
print(constants.ounce) #0.028349523124999998
print(constants.stone) #6.3502931799999995
print(constants.long_ton) #1016.0469088
print(constants.short_ton) #907.1847399999999
print(constants.troy_ounce) #0.031103476799999998
print(constants.troy_pound) #0.37324172159999996
print(constants.carat) #0.0002
print(constants.atomic_mass) #1.66053904e-27
print(constants.m_u) #1.66053904e-27
print(constants.u) #1.66053904e-27
print(constants.degree) #0.017453292519943295
print(constants.arcmin) #0.0002908882086657216
print(constants.arcminute) #0.0002908882086657216
print(constants.arcsec) #4.84813681109536e-06
print(constants.arcsecond) #4.84813681109536e-06
print(constants.minute) #60.0
print(constants.hour) #3600.0
print(constants.day) #86400.0
print(constants.week) #604800.0
print(constants.year) #31536000.0
print(constants.Julian_year) #31557600.0
print(constants.inch) #0.0254
print(constants.foot) #0.30479999999999996
print(constants.yard) #0.9143999999999999
print(constants.mile) #1609.3439999999998
print(constants.mil) #2.5399999999999997e-05
print(constants.pt) #0.00035277777777777776
print(constants.point) #0.00035277777777777776
print(constants.survey_foot) #0.3048006096012192
print(constants.survey_mile) #1609.3472186944373
print(constants.nautical_mile) #1852.0
print(constants.fermi) #1e-15
print(constants.angstrom) #1e-10
print(constants.micron) #1e-06
print(constants.au) #149597870691.0
print(constants.astronomical_unit) #149597870691.0
print(constants.light_year) #9460730472580800.0
print(constants.parsec) #3.0856775813057292e+16
print(constants.atm) #101325.0
print(constants.atmosphere) #101325.0
print(constants.bar) #100000.0
print(constants.torr) #133.32236842105263
print(constants.mmHg) #133.32236842105263
print(constants.psi) #6894.757293168361
print(constants.hectare) #10000.0
print(constants.acre) #4046.8564223999992
print(constants.liter) #0.001
print(constants.litre) #0.001
print(constants.gallon) #0.0037854117839999997
print(constants.gallon_US) #0.0037854117839999997
print(constants.gallon_imp) #0.00454609
print(constants.fluid_ounce) #2.9573529562499998e-05
print(constants.fluid_ounce_US) #2.9573529562499998e-05
print(constants.fluid_ounce_imp) #2.84130625e-05
print(constants.barrel) #0.15898729492799998
print(constants.bbl) #0.15898729492799998
print(constants.kmh) #0.2777777777777778
print(constants.mph) #0.44703999999999994
print(constants.mach) #340.5
print(constants.speed_of_sound) #340.5
print(constants.knot) #0.5144444444444445
print(constants.zero_Celsius) #273.15
print(constants.degree_Fahrenheit) #0.5555555555555556
print(constants.eV) #1.6021766208e-19
print(constants.electron_volt) #1.6021766208e-19
print(constants.calorie) #4.184
print(constants.calorie_th) #4.184
print(constants.calorie_IT) #4.1868
print(constants.erg) #1e-07
print(constants.Btu) #1055.05585262
print(constants.Btu_IT) #1055.05585262
print(constants.Btu_th) #1054.3502644888888
print(constants.ton_TNT) #4184000000.0
print(constants.hp) #745.6998715822701
print(constants.horsepower) #745.6998715822701
print(constants.dyn) #1e-05
print(constants.dyne) #1e-05
print(constants.lbf) #4.4482216152605
print(constants.pound_force) #4.4482216152605
print(constants.kgf) #9.80665
print(constants.kilogram_force) #9.80665
| 38.286765 | 58 | 0.70482 |
b0d066e700452a62c33ca5bee90145e9e47116d6 | 289 | py | Python | Mini Projects/Extension Changer/Extension_Changer.py | Iamtripathisatyam/Python_Beginner_Level_Projects | 993ebe862a90e42db83cd2aa86943950d5ddd4a6 | [
"MIT"
] | 4 | 2021-05-03T10:20:52.000Z | 2021-05-04T05:42:32.000Z | Mini Projects/Extension Changer/Extension_Changer.py | Sujit199696/Python_Beginner_Level_Projects | dad2deb38c02e36378823c57ef09c2911f66dda8 | [
"MIT"
] | null | null | null | Mini Projects/Extension Changer/Extension_Changer.py | Sujit199696/Python_Beginner_Level_Projects | dad2deb38c02e36378823c57ef09c2911f66dda8 | [
"MIT"
] | 1 | 2021-05-04T01:58:07.000Z | 2021-05-04T01:58:07.000Z | import os
# Jump to directory where you want to edit extension
os.chdir("C:\\Users\\Dell\\Desktop\ha")
files = os.listdir()
C = ['.pdf']
Cs = [file for file in files if os.path.splitext(file)[1].lower() in C]
for i in Cs:
base = os.path.splitext(i)[0]
os.rename(i, base + '.docx')
| 28.9 | 71 | 0.650519 |
92483f1e7511eaa00ebbe32dda12452c3bb51e78 | 18,906 | py | Python | pandas/tests/sparse/test_combine_concat.py | blueenvelope31/pandas | 6e1f41b45a0d08f662b794ed98695b8595f39023 | [
"BSD-3-Clause"
] | 4 | 2021-03-02T19:57:18.000Z | 2021-06-20T19:23:57.000Z | pandas/tests/sparse/test_combine_concat.py | 16umm001/pandas | a2e599499667b256bc5b8b13a75f0601eccfd432 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2016-08-15T12:35:16.000Z | 2016-08-15T12:35:16.000Z | pandas/tests/sparse/test_combine_concat.py | 16umm001/pandas | a2e599499667b256bc5b8b13a75f0601eccfd432 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2017-05-27T03:25:12.000Z | 2021-09-21T21:51:12.000Z | # pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
import itertools
class TestSparseArrayConcat(object):
@pytest.mark.parametrize('kind', ['integer', 'block'])
def test_basic(self, kind):
a = pd.SparseArray([1, 0, 0, 2], kind=kind)
b = pd.SparseArray([1, 0, 2, 2], kind=kind)
result = pd.SparseArray._concat_same_type([a, b])
# Can't make any assertions about the sparse index itself
# since we aren't don't merge sparse blocs across arrays
# in to_concat
expected = np.array([1, 2, 1, 2, 2], dtype='int64')
tm.assert_numpy_array_equal(result.sp_values, expected)
assert result.kind == kind
@pytest.mark.parametrize('kind', ['integer', 'block'])
def test_uses_first_kind(self, kind):
other = 'integer' if kind == 'block' else 'block'
a = pd.SparseArray([1, 0, 0, 2], kind=kind)
b = pd.SparseArray([1, 0, 2, 2], kind=other)
result = pd.SparseArray._concat_same_type([a, b])
expected = np.array([1, 2, 1, 2, 2], dtype='int64')
tm.assert_numpy_array_equal(result.sp_values, expected)
assert result.kind == kind
class TestSparseSeriesConcat(object):
@pytest.mark.parametrize('kind', [
'integer',
'block',
])
def test_concat(self, kind):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
sparse1 = pd.SparseSeries(val1, fill_value=0, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, fill_value=0, name='y', kind=kind)
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, fill_value=0, kind=kind)
tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
def test_concat_axis1(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x')
sparse2 = pd.SparseSeries(val2, name='y')
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name='x'),
pd.Series(val2, name='y')], axis=1)
exp = pd.SparseDataFrame(exp)
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
def test_concat_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
for kind in ['integer', 'block']:
sparse1 = pd.SparseSeries(val1, name='x', kind=kind)
sparse2 = pd.SparseSeries(val2, name='y', kind=kind, fill_value=0)
with tm.assert_produces_warning(PerformanceWarning):
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=kind)
tm.assert_sp_series_equal(res, exp)
with tm.assert_produces_warning(PerformanceWarning):
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind=kind, fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_concat_axis1_different_fill(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x')
sparse2 = pd.SparseSeries(val2, name='y', fill_value=0)
res = pd.concat([sparse1, sparse2], axis=1)
exp = pd.concat([pd.Series(val1, name='x'),
pd.Series(val2, name='y')], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_different_kind(self):
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse1 = pd.SparseSeries(val1, name='x', kind='integer')
sparse2 = pd.SparseSeries(val2, name='y', kind='block')
res = pd.concat([sparse1, sparse2])
exp = pd.concat([pd.Series(val1), pd.Series(val2)])
exp = pd.SparseSeries(exp, kind=sparse1.kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([sparse2, sparse1])
exp = pd.concat([pd.Series(val2), pd.Series(val1)])
exp = pd.SparseSeries(exp, kind=sparse2.kind)
tm.assert_sp_series_equal(res, exp, consolidate_block_indices=True)
@pytest.mark.parametrize('kind', [
'integer',
'block',
])
def test_concat_sparse_dense(self, kind):
# use first input's fill_value
val1 = np.array([1, 2, np.nan, np.nan, 0, np.nan])
val2 = np.array([3, np.nan, 4, 0, 0])
sparse = pd.SparseSeries(val1, name='x', kind=kind)
dense = pd.Series(val2, name='y')
res = pd.concat([sparse, dense])
exp = pd.SparseSeries(pd.concat([pd.Series(val1), dense]), kind=kind)
tm.assert_sp_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
# XXX: changed from SparseSeries to Series[sparse]
exp = pd.Series(
pd.SparseArray(exp, kind=kind),
index=exp.index,
name=exp.name,
)
tm.assert_series_equal(res, exp)
sparse = pd.SparseSeries(val1, name='x', kind=kind, fill_value=0)
dense = pd.Series(val2, name='y')
res = pd.concat([sparse, dense])
# XXX: changed from SparseSeries to Series[sparse]
exp = pd.concat([pd.Series(val1), dense])
exp = pd.Series(
pd.SparseArray(exp, kind=kind, fill_value=0),
index=exp.index,
name=exp.name,
)
tm.assert_series_equal(res, exp)
res = pd.concat([dense, sparse, dense])
exp = pd.concat([dense, pd.Series(val1), dense])
# XXX: changed from SparseSeries to Series[sparse]
exp = pd.Series(
pd.SparseArray(exp, kind=kind, fill_value=0),
index=exp.index,
name=exp.name,
)
tm.assert_series_equal(res, exp)
class TestSparseDataFrameConcat(object):
def setup_method(self, method):
self.dense1 = pd.DataFrame({'A': [0., 1., 2., np.nan],
'B': [0., 0., 0., 0.],
'C': [np.nan, np.nan, np.nan, np.nan],
'D': [1., 2., 3., 4.]})
self.dense2 = pd.DataFrame({'A': [5., 6., 7., 8.],
'B': [np.nan, 0., 7., 8.],
'C': [5., 6., np.nan, np.nan],
'D': [np.nan, np.nan, np.nan, np.nan]})
self.dense3 = pd.DataFrame({'E': [5., 6., 7., 8.],
'F': [np.nan, 0., 7., 8.],
'G': [5., 6., np.nan, np.nan],
'H': [np.nan, np.nan, np.nan, np.nan]})
def test_concat(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse()
res = pd.concat([sparse, sparse])
exp = pd.concat([self.dense1, self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse2])
exp = pd.concat([self.dense2, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse2 = self.dense2.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse])
exp = pd.concat([self.dense1, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse2])
exp = pd.concat([self.dense2, self.dense2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
def test_concat_different_fill_value(self):
# 1st fill_value will be used
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse(fill_value=0)
with tm.assert_produces_warning(PerformanceWarning):
res = pd.concat([sparse, sparse2])
exp = pd.concat([self.dense1, self.dense2]).to_sparse()
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
with tm.assert_produces_warning(PerformanceWarning):
res = pd.concat([sparse2, sparse])
exp = pd.concat([self.dense2, self.dense1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True)
def test_concat_different_columns_sort_warns(self):
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
with tm.assert_produces_warning(FutureWarning):
res = pd.concat([sparse, sparse3])
with tm.assert_produces_warning(FutureWarning):
exp = pd.concat([self.dense1, self.dense3])
exp = exp.to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
def test_concat_different_columns(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
res = pd.concat([sparse, sparse3], sort=True)
exp = pd.concat([self.dense1, self.dense3], sort=True).to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
res = pd.concat([sparse3, sparse], sort=True)
exp = pd.concat([self.dense3, self.dense1], sort=True).to_sparse()
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, check_kind=False)
def test_concat_bug(self):
from pandas.core.sparse.api import SparseDtype
x = pd.SparseDataFrame({"A": pd.SparseArray([np.nan, np.nan],
fill_value=0)})
y = pd.SparseDataFrame({"B": []})
res = pd.concat([x, y], sort=False)[['A']]
exp = pd.DataFrame({"A": pd.SparseArray([np.nan, np.nan],
dtype=SparseDtype(float, 0))})
tm.assert_frame_equal(res, exp)
def test_concat_different_columns_buggy(self):
sparse = self.dense1.to_sparse(fill_value=0)
sparse3 = self.dense3.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse3], sort=True)
exp = (pd.concat([self.dense1, self.dense3], sort=True)
.to_sparse(fill_value=0))
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, check_kind=False,
consolidate_block_indices=True)
res = pd.concat([sparse3, sparse], sort=True)
exp = (pd.concat([self.dense3, self.dense1], sort=True)
.to_sparse(fill_value=0))
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, check_kind=False,
consolidate_block_indices=True)
# different fill values
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse(fill_value=0)
# each columns keeps its fill_value, thus compare in dense
res = pd.concat([sparse, sparse3], sort=True)
exp = pd.concat([self.dense1, self.dense3], sort=True)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
res = pd.concat([sparse3, sparse], sort=True)
exp = pd.concat([self.dense3, self.dense1], sort=True)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
def test_concat_series(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse2 = self.dense2.to_sparse()
for col in ['A', 'D']:
res = pd.concat([sparse, sparse2[col]])
exp = pd.concat([self.dense1, self.dense2[col]]).to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
res = pd.concat([sparse2[col], sparse])
exp = pd.concat([self.dense2[col], self.dense1]).to_sparse()
tm.assert_sp_frame_equal(res, exp, check_kind=False)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse2 = self.dense2.to_sparse(fill_value=0)
for col in ['C', 'D']:
res = pd.concat([sparse, sparse2[col]])
exp = pd.concat([self.dense1,
self.dense2[col]]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, check_kind=False,
consolidate_block_indices=True)
res = pd.concat([sparse2[col], sparse])
exp = pd.concat([self.dense2[col],
self.dense1]).to_sparse(fill_value=0)
exp['C'] = res['C']
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp, consolidate_block_indices=True,
check_kind=False)
def test_concat_axis1(self):
# fill_value = np.nan
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse()
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3], axis=1).to_sparse()
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1], axis=1).to_sparse()
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# fill_value = 0
sparse = self.dense1.to_sparse(fill_value=0)
sparse3 = self.dense3.to_sparse(fill_value=0)
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3],
axis=1).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1],
axis=1).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(res, exp)
# different fill values
sparse = self.dense1.to_sparse()
sparse3 = self.dense3.to_sparse(fill_value=0)
# each columns keeps its fill_value, thus compare in dense
res = pd.concat([sparse, sparse3], axis=1)
exp = pd.concat([self.dense1, self.dense3], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
res = pd.concat([sparse3, sparse], axis=1)
exp = pd.concat([self.dense3, self.dense1], axis=1)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
@pytest.mark.parametrize('fill_value,sparse_idx,dense_idx',
itertools.product([None, 0, 1, np.nan],
[0, 1],
[1, 0]))
def test_concat_sparse_dense_rows(self, fill_value, sparse_idx, dense_idx):
frames = [self.dense1, self.dense2]
sparse_frame = [frames[dense_idx],
frames[sparse_idx].to_sparse(fill_value=fill_value)]
dense_frame = [frames[dense_idx], frames[sparse_idx]]
# This will try both directions sparse + dense and dense + sparse
for _ in range(2):
res = pd.concat(sparse_frame)
exp = pd.concat(dense_frame)
assert isinstance(res, pd.SparseDataFrame)
tm.assert_frame_equal(res.to_dense(), exp)
sparse_frame = sparse_frame[::-1]
dense_frame = dense_frame[::-1]
@pytest.mark.parametrize('fill_value,sparse_idx,dense_idx',
itertools.product([None, 0, 1, np.nan],
[0, 1],
[1, 0]))
@pytest.mark.xfail(reason="The iloc fails and I can't make expected",
strict=False)
def test_concat_sparse_dense_cols(self, fill_value, sparse_idx, dense_idx):
# See GH16874, GH18914 and #18686 for why this should be a DataFrame
from pandas.core.dtypes.common import is_sparse
frames = [self.dense1, self.dense3]
sparse_frame = [frames[dense_idx],
frames[sparse_idx].to_sparse(fill_value=fill_value)]
dense_frame = [frames[dense_idx], frames[sparse_idx]]
# This will try both directions sparse + dense and dense + sparse
for _ in range(2):
res = pd.concat(sparse_frame, axis=1)
exp = pd.concat(dense_frame, axis=1)
cols = [i for (i, x) in enumerate(res.dtypes) if is_sparse(x)]
for col in cols:
exp.iloc[:, col] = exp.iloc[:, col].astype("Sparse")
for column in frames[dense_idx].columns:
if dense_idx == sparse_idx:
tm.assert_frame_equal(res[column], exp[column])
else:
tm.assert_series_equal(res[column], exp[column])
tm.assert_frame_equal(res, exp)
sparse_frame = sparse_frame[::-1]
dense_frame = dense_frame[::-1]
| 41.010846 | 79 | 0.58648 |
91d1d22841c8acef0745d98d4fde518574921bd5 | 213 | py | Python | lucida/commandcenter/controllers/__init__.py | rlugojr/lucida | a2a59d131dbf0835572faf0a968359829f199aa3 | [
"BSD-3-Clause"
] | 1 | 2017-02-09T09:24:21.000Z | 2017-02-09T09:24:21.000Z | lucida/commandcenter/controllers/__init__.py | rlugojr/lucida | a2a59d131dbf0835572faf0a968359829f199aa3 | [
"BSD-3-Clause"
] | 1 | 2021-02-08T20:25:37.000Z | 2021-02-08T20:25:37.000Z | lucida/commandcenter/controllers/__init__.py | iru-ken/lucida | cd060b0aa0d8721828a8f37b271e1aa83e76ea6f | [
"BSD-3-Clause"
] | 1 | 2017-03-19T11:02:14.000Z | 2017-03-19T11:02:14.000Z | __all__ = ['Main', 'AccessManagement', 'WebSocket', 'Service', 'Graph',
'ThriftClient', 'Create', 'Learn', 'Infer',
'QueryClassifier', 'Config', 'User', 'Utilities', 'Database', 'Memcached']
| 53.25 | 85 | 0.596244 |
7323687f9994c0fbf3f1819585a700a63d3a1435 | 256 | py | Python | scheduler/__init__.py | JarnoRFB/MoAI-scheduler | 6891b39d6b4b21188458b8ab5701278fba435173 | [
"MIT"
] | 2 | 2019-02-19T09:34:37.000Z | 2019-05-22T01:39:52.000Z | scheduler/__init__.py | JarnoRFB/MoAI-scheduler | 6891b39d6b4b21188458b8ab5701278fba435173 | [
"MIT"
] | null | null | null | scheduler/__init__.py | JarnoRFB/MoAI-scheduler | 6891b39d6b4b21188458b8ab5701278fba435173 | [
"MIT"
] | 1 | 2018-11-07T22:10:17.000Z | 2018-11-07T22:10:17.000Z | from scheduler.instructor import Instructor
from scheduler.timeslot import Timeslot
from scheduler.room import Room
from scheduler.timetable import Timetable
from scheduler.assignment import Assignment
from scheduler.exceptions import ImpossibleAssignments | 42.666667 | 54 | 0.886719 |
830e5913a0a452096a36a6ef89bacea59fe10f97 | 4,087 | py | Python | run.py | benjefferies/branch-protection-bot | 76b8a9d745a68c6a6e9b4bf745d4b32ad805e9a1 | [
"MIT"
] | 57 | 2019-11-14T16:20:39.000Z | 2022-03-11T00:22:13.000Z | run.py | benjefferies/branch-protection-bot | 76b8a9d745a68c6a6e9b4bf745d4b32ad805e9a1 | [
"MIT"
] | 10 | 2019-11-15T21:13:21.000Z | 2021-09-18T17:54:59.000Z | run.py | benjefferies/branch-protection-bot | 76b8a9d745a68c6a6e9b4bf745d4b32ad805e9a1 | [
"MIT"
] | 19 | 2019-12-17T16:38:12.000Z | 2022-02-04T12:39:16.000Z | #!/usr/bin/env python
from distutils.util import strtobool
from time import sleep
import configargparse
from github3 import login
from github3.exceptions import NotFoundError, GitHubException
def toggle_enforce_admin(options):
access_token, owner, repo_name, branch_name, retries, github_repository = options.access_token, options.owner, options.repo, options.branch, int(options.retries), options.github_repository
if not owner and not repo_name and github_repository and "/" in github_repository:
owner = github_repository.split("/")[0]
repo_name = github_repository.split("/")[1]
if owner == '' or repo_name == '':
print('Owner and repo or GITHUB_REPOSITORY not set')
raise RuntimeError
enforce_admins = bool(strtobool(options.enforce_admins)) if options.enforce_admins is not None and not options.enforce_admins == '' else None
# or using an access token
print(f"Getting branch protection settings for {owner}/{repo_name}")
protection = get_protection(access_token, branch_name, owner, repo_name)
print(f"Enforce admins branch protection enabled? {protection.enforce_admins.enabled}")
# save the current status for use later on if desired
print(f"::set-output name=initial_status::{protection.enforce_admins.enabled}")
print(f"Setting enforce admins branch protection to {enforce_admins if enforce_admins is not None else not protection.enforce_admins.enabled}")
for i in range(retries):
try:
if enforce_admins is False:
disable(protection)
return
elif enforce_admins is True:
enable(protection)
return
elif protection.enforce_admins.enabled:
disable(protection)
return
elif not protection.enforce_admins.enabled:
enable(protection)
return
except GitHubException:
print(f"Failed to set enforce admins to {not protection.enforce_admins.enabled}. Retrying...")
sleep(i ** 2) # Exponential back-off
print(f"Failed to set enforce admins to {not protection.enforce_admins.enabled}.")
exit(1)
def get_protection(access_token, branch_name, owner, repo_name):
gh = login(token=access_token)
if gh is None:
print(f"Could not login. Have you provided credentials?")
raise exit(1)
try:
repo = gh.repository(owner, repo_name)
except NotFoundError:
print(f"Could not find repo https://github.com/{owner}/{repo_name}")
raise
branch = repo.branch(branch_name)
protection = branch.protection()
return protection
def enable(protection):
protection.enforce_admins.enable()
def disable(protection):
protection.enforce_admins.disable()
if __name__ == '__main__':
p = configargparse.ArgParser()
p.add_argument('-t', '--access-token', env_var='ACCESS_TOKEN', required=True, help='Github access token. https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line')
p.add_argument('-o', '--owner', env_var='OWNER', required=False, default='', help='Owner. For example benjefferies for https://github.com/benjefferies/branch-protection-bot')
p.add_argument('-r', '--repo', env_var='REPO', required=False, default='', help='Repo. For example branch-protection-bot for https://github.com/benjefferies/branch-protection-bot')
p.add_argument('--github_repository', env_var='GITHUB_REPOSITORY', required=False, default='', help='Owner and repo. For example benjefferies/branch-protection-bot for https://github.com/benjefferies/branch-protection-bot')
p.add_argument('-b', '--branch', env_var='BRANCH', default='master', help='Branch name')
p.add_argument('--retries', env_var='RETRIES', default=5, help='Number of times to retry before exiting')
p.add_argument('--enforce-admins', env_var='ENFORCE_ADMINS', default=None, help='Flag to explicitly enable or disable "Include administrators"')
toggle_enforce_admin(p.parse_args())
| 48.654762 | 227 | 0.705652 |
6f3ccbf0cca722f19745f68a395bdff7fb1bf04e | 6,072 | py | Python | owlbot.py | poplindata/python-bigquery-sqlalchemy | f1a249e30050d1d92e4906b4676a6fa06faff1e2 | [
"MIT"
] | null | null | null | owlbot.py | poplindata/python-bigquery-sqlalchemy | f1a249e30050d1d92e4906b4676a6fa06faff1e2 | [
"MIT"
] | null | null | null | owlbot.py | poplindata/python-bigquery-sqlalchemy | f1a249e30050d1d92e4906b4676a6fa06faff1e2 | [
"MIT"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import pathlib
import synthtool as s
from synthtool import gcp
from synthtool.languages import python
REPO_ROOT = pathlib.Path(__file__).parent.absolute()
common = gcp.CommonTemplates()
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
extras = ["tests"]
extras_by_python = {
"3.8": ["tests", "alembic"],
"3.10": ["tests", "geography"],
}
templated_files = common.py_library(
unit_test_python_versions=["3.6", "3.7", "3.8", "3.9", "3.10"],
system_test_python_versions=["3.8", "3.10"],
cov_level=100,
unit_test_extras=extras,
unit_test_extras_by_python=extras_by_python,
system_test_extras=extras,
system_test_extras_by_python=extras_by_python,
)
s.move(templated_files, excludes=[
# sqlalchemy-bigquery was originally licensed MIT
"LICENSE",
"docs/multiprocessing.rst",
# exclude gh actions as credentials are needed for tests
".github/workflows",
])
# ----------------------------------------------------------------------------
# Fixup files
# ----------------------------------------------------------------------------
s.replace(
[".coveragerc"],
"google/cloud/__init__.py",
"sqlalchemy_bigquery/requirements.py",
)
s.replace(
["noxfile.py"],
r"[\"']google[\"']",
'"sqlalchemy_bigquery"',
)
s.replace(
["noxfile.py"], "--cov=google", "--cov=sqlalchemy_bigquery",
)
def place_before(path, text, *before_text, escape=None):
replacement = "\n".join(before_text) + "\n" + text
if escape:
for c in escape:
text = text.replace(c, '\\' + c)
s.replace([path], text, replacement)
place_before(
"noxfile.py",
"SYSTEM_TEST_PYTHON_VERSIONS=",
"",
"# We're using two Python versions to test with sqlalchemy 1.3 and 1.4.",
)
place_before(
"noxfile.py",
"nox.options.error_on_missing_interpreters = True",
"nox.options.stop_on_first_error = True",
)
old_sessions = '''
"unit",
"system",
"cover",
"lint",
'''
new_sessions = '''
"lint",
"unit",
"cover",
"system",
"compliance",
'''
s.replace( ["noxfile.py"], old_sessions, new_sessions)
# Maybe we can get rid of this when we don't need pytest-rerunfailures,
# which we won't need when BQ retries itself:
# https://github.com/googleapis/python-bigquery/pull/837
compliance = '''
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def compliance(session):
"""Run the SQLAlchemy dialect-compliance system tests"""
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
system_test_folder_path = os.path.join("tests", "sqlalchemy_dialect_compliance")
if os.environ.get("RUN_COMPLIANCE_TESTS", "true") == "false":
session.skip("RUN_COMPLIANCE_TESTS is set to false, skipping")
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
session.install("pyopenssl")
if not os.path.exists(system_test_folder_path):
session.skip("Compliance tests were not found")
session.install("--pre", "grpcio")
session.install(
"mock",
"pytest",
"pytest-rerunfailures",
"google-cloud-testutils",
"-c",
constraints_path,
)
if session.python == "3.8":
extras = "[tests,alembic]"
elif session.python == "3.10":
extras = "[tests,geography]"
else:
extras = "[tests]"
session.install("-e", f".{extras}", "-c", constraints_path)
session.run(
"py.test",
"-vv",
f"--junitxml=compliance_{session.python}_sponge_log.xml",
"--reruns=3",
"--reruns-delay=60",
"--only-rerun=403 Exceeded rate limits",
"--only-rerun=409 Already Exists",
"--only-rerun=404 Not found",
"--only-rerun=400 Cannot execute DML over a non-existent table",
system_test_folder_path,
*session.posargs,
)
'''
place_before(
"noxfile.py",
"@nox.session(python=DEFAULT_PYTHON_VERSION)\n"
"def cover(session):",
compliance,
escape="()",
)
s.replace(["noxfile.py"], '"alabaster"', '"alabaster", "geoalchemy2", "shapely"')
# Add DB config for SQLAlchemy dialect test suite.
# https://github.com/googleapis/python-bigquery-sqlalchemy/issues/89
s.replace(
["setup.cfg"],
"universal = 1\n",
"""universal = 1
[sqla_testing]
requirement_cls=sqlalchemy_bigquery.requirements:Requirements
profile_file=.sqlalchemy_dialect_compliance-profiles.txt
[tool:pytest]
addopts= --tb native -v -r fxX -p no:warnings
python_files=tests/*test_*.py
"""
)
# ----------------------------------------------------------------------------
# Samples templates
# ----------------------------------------------------------------------------
python.py_samples(skip_readmes=True)
# ----------------------------------------------------------------------------
# Final cleanup
# ----------------------------------------------------------------------------
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
for noxfile in REPO_ROOT.glob("samples/**/noxfile.py"):
s.shell.run(["nox", "-s", "blacken"], cwd=noxfile.parent, hide_output=False)
| 29.333333 | 84 | 0.596014 |
0baee2c49d1621a127b5dc5b86bf5bbfdb3b375e | 18,499 | py | Python | espnet/nets/pytorch_backend/tacotron2/decoder.py | kazu-hama/espnet | 4124efe3a474f54dab1c2f14463e56f1cdf818a4 | [
"Apache-2.0"
] | 16 | 2020-05-05T10:37:12.000Z | 2022-01-05T08:23:59.000Z | espnet/nets/pytorch_backend/tacotron2/decoder.py | kazu-hama/espnet | 4124efe3a474f54dab1c2f14463e56f1cdf818a4 | [
"Apache-2.0"
] | 7 | 2020-05-20T11:40:12.000Z | 2021-12-07T14:33:09.000Z | espnet/nets/pytorch_backend/tacotron2/decoder.py | kazu-hama/espnet | 4124efe3a474f54dab1c2f14463e56f1cdf818a4 | [
"Apache-2.0"
] | 2 | 2020-05-07T07:17:57.000Z | 2021-01-11T10:48:19.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Nagoya University (Tomoki Hayashi)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import six
import torch
import torch.nn.functional as F
from espnet.nets.pytorch_backend.rnn.attentions import AttForwardTA
def decoder_init(m):
if isinstance(m, torch.nn.Conv1d):
torch.nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain('tanh'))
class ZoneOutCell(torch.nn.Module):
"""ZoneOut Cell
This code is modified from https://github.com/eladhoffer/seq2seq.pytorch
:param torch.nn.Module cell: pytorch recurrent cell
:param float zoneout_rate: probability of zoneout
"""
def __init__(self, cell, zoneout_rate=0.1):
super(ZoneOutCell, self).__init__()
self.cell = cell
self.hidden_size = cell.hidden_size
self.zoneout_rate = zoneout_rate
if zoneout_rate > 1.0 or zoneout_rate < 0.0:
raise ValueError("zoneout probability must be in the range from 0.0 to 1.0.")
def forward(self, inputs, hidden):
next_hidden = self.cell(inputs, hidden)
next_hidden = self._zoneout(hidden, next_hidden, self.zoneout_rate)
return next_hidden
def _zoneout(self, h, next_h, prob):
# apply recursively
if isinstance(h, tuple):
num_h = len(h)
if not isinstance(prob, tuple):
prob = tuple([prob] * num_h)
return tuple([self._zoneout(h[i], next_h[i], prob[i]) for i in range(num_h)])
if self.training:
mask = h.new(*h.size()).bernoulli_(prob)
return mask * h + (1 - mask) * next_h
else:
return prob * h + (1 - prob) * next_h
class Prenet(torch.nn.Module):
"""Prenet for tacotron2 decoder
:param int idim: dimension of the inputs
:param int odim: dimension of the outputs
:param int n_layers: the number of prenet layers
:param int n_units: the number of prenet units
"""
def __init__(self, idim, n_layers=2, n_units=256, dropout_rate=0.5):
super(Prenet, self).__init__()
self.dropout_rate = dropout_rate
self.prenet = torch.nn.ModuleList()
for layer in six.moves.range(n_layers):
n_inputs = idim if layer == 0 else n_units
self.prenet += [torch.nn.Sequential(
torch.nn.Linear(n_inputs, n_units),
torch.nn.ReLU())]
def forward(self, x):
"""Prenet forward calculation
:param torch.Tensor x: batch of input tensor (B, idim)
:return: output tensor (B, odim)
:rtype: torch.Tensor
"""
for l in six.moves.range(len(self.prenet)):
x = F.dropout(self.prenet[l](x), self.dropout_rate)
return x
class Postnet(torch.nn.Module):
"""Postnet for tacotron2 decoder
:param int idim: dimension of the inputs
:param int odim: dimension of the outputs
:param int n_layers: the number of postnet layers
:param int n_filts: the number of postnet filter size
:param int n_chans: the number of postnet filter channels
:param bool use_batch_norm: whether to use batch normalization
:param float dropout_rate: dropout_rate rate
"""
def __init__(self, idim, odim, n_layers=5, n_chans=512, n_filts=5, dropout_rate=0.5, use_batch_norm=True):
super(Postnet, self).__init__()
self.postnet = torch.nn.ModuleList()
for layer in six.moves.range(n_layers - 1):
ichans = odim if layer == 0 else n_chans
ochans = odim if layer == n_layers - 1 else n_chans
if use_batch_norm:
self.postnet += [torch.nn.Sequential(
torch.nn.Conv1d(ichans, ochans, n_filts, stride=1,
padding=(n_filts - 1) // 2, bias=False),
torch.nn.BatchNorm1d(ochans),
torch.nn.Tanh(),
torch.nn.Dropout(dropout_rate))]
else:
self.postnet += [torch.nn.Sequential(
torch.nn.Conv1d(ichans, ochans, n_filts, stride=1,
padding=(n_filts - 1) // 2, bias=False),
torch.nn.Tanh(),
torch.nn.Dropout(dropout_rate))]
ichans = n_chans if n_layers != 1 else odim
if use_batch_norm:
self.postnet += [torch.nn.Sequential(
torch.nn.Conv1d(ichans, odim, n_filts, stride=1,
padding=(n_filts - 1) // 2, bias=False),
torch.nn.BatchNorm1d(odim),
torch.nn.Dropout(dropout_rate))]
else:
self.postnet += [torch.nn.Sequential(
torch.nn.Conv1d(ichans, odim, n_filts, stride=1,
padding=(n_filts - 1) // 2, bias=False),
torch.nn.Dropout(dropout_rate))]
def forward(self, xs):
"""Postnet forward calculation
:param torch.Tensor xs: batch of the sequences of padded input tensor (B, idim, Tmax)
:return: outputs without postnets (B, odim, Tmax)
:rtype: torch.Tensor
"""
for l in six.moves.range(len(self.postnet)):
xs = self.postnet[l](xs)
return xs
class Decoder(torch.nn.Module):
"""Decoder to predict the sequence of features
This the decoder which generate the sequence of features from
the sequence of the hidden states. The network structure is
based on that of the tacotron2 in the field of speech synthesis.
:param int idim: dimension of the inputs
:param int odim: dimension of the outputs
:param instance att: instance of attention class
:param int dlayers: the number of decoder lstm layers
:param int dunits: the number of decoder lstm units
:param int prenet_layers: the number of prenet layers
:param int prenet_units: the number of prenet units
:param int postnet_layers: the number of postnet layers
:param int postnet_filts: the number of postnet filter size
:param int postnet_chans: the number of postnet filter channels
:param function output_activation_fn: activation function for outputs
:param bool cumulate_att_w: whether to cumulate previous attention weight
:param bool use_batch_norm: whether to use batch normalization
:param bool use_concate: whether to concatenate encoder embedding with decoder lstm outputs
:param float dropout_rate: dropout rate
:param float zoneout_rate: zoneout rate
:param int reduction_factor: reduction factor
:param float threshold: threshold in inference
:param float minlenratio: minimum length ratio in inference
:param float maxlenratio: maximum length ratio in inference
"""
def __init__(self, idim, odim, att,
dlayers=2,
dunits=1024,
prenet_layers=2,
prenet_units=256,
postnet_layers=5,
postnet_chans=512,
postnet_filts=5,
output_activation_fn=None,
cumulate_att_w=True,
use_batch_norm=True,
use_concate=True,
dropout_rate=0.5,
zoneout_rate=0.1,
reduction_factor=1):
super(Decoder, self).__init__()
# store the hyperparameters
self.idim = idim
self.odim = odim
self.att = att
self.output_activation_fn = output_activation_fn
self.cumulate_att_w = cumulate_att_w
self.use_concate = use_concate
self.reduction_factor = reduction_factor
# check attention type
if isinstance(self.att, AttForwardTA):
self.use_att_extra_inputs = True
else:
self.use_att_extra_inputs = False
# define lstm network
prenet_units = prenet_units if prenet_layers != 0 else odim
self.lstm = torch.nn.ModuleList()
for layer in six.moves.range(dlayers):
iunits = idim + prenet_units if layer == 0 else dunits
lstm = torch.nn.LSTMCell(iunits, dunits)
if zoneout_rate > 0.0:
lstm = ZoneOutCell(lstm, zoneout_rate)
self.lstm += [lstm]
# define prenet
if prenet_layers > 0:
self.prenet = Prenet(
idim=odim,
n_layers=prenet_layers,
n_units=prenet_units,
dropout_rate=dropout_rate)
else:
self.prenet = None
# define postnet
if postnet_layers > 0:
self.postnet = Postnet(
idim=idim,
odim=odim,
n_layers=postnet_layers,
n_chans=postnet_chans,
n_filts=postnet_filts,
use_batch_norm=use_batch_norm,
dropout_rate=dropout_rate)
else:
self.postnet = None
# define projection layers
iunits = idim + dunits if use_concate else dunits
self.feat_out = torch.nn.Linear(iunits, odim * reduction_factor, bias=False)
self.prob_out = torch.nn.Linear(iunits, reduction_factor)
# initialize
self.apply(decoder_init)
def zero_state(self, hs):
init_hs = hs.new_zeros(hs.size(0), self.lstm[0].hidden_size)
return init_hs
def forward(self, hs, hlens, ys):
"""Decoder forward computation
:param torch.Tensor hs: batch of the sequences of padded hidden states (B, Tmax, idim)
:param list hlens: list of lengths of each input batch (B)
:param torch.Tensor ys: batch of the sequences of padded target features (B, Lmax, odim)
:return: outputs with postnets (B, Lmax, odim)
:rtype: torch.Tensor
:return: outputs without postnets (B, Lmax, odim)
:rtype: torch.Tensor
:return: stop logits (B, Lmax)
:rtype: torch.Tensor
:return: attention weights (B, Lmax, Tmax)
:rtype: torch.Tensor
"""
# thin out frames (B, Lmax, odim) -> (B, Lmax/r, odim)
if self.reduction_factor > 1:
ys = ys[:, self.reduction_factor - 1::self.reduction_factor]
# length list should be list of int
hlens = list(map(int, hlens))
# initialize hidden states of decoder
c_list = [self.zero_state(hs)]
z_list = [self.zero_state(hs)]
for _ in six.moves.range(1, len(self.lstm)):
c_list += [self.zero_state(hs)]
z_list += [self.zero_state(hs)]
prev_out = hs.new_zeros(hs.size(0), self.odim)
# initialize attention
prev_att_w = None
self.att.reset()
# loop for an output sequence
outs, logits, att_ws = [], [], []
for y in ys.transpose(0, 1):
if self.use_att_extra_inputs:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w, prev_out)
else:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w)
prenet_out = self.prenet(prev_out) if self.prenet is not None else prev_out
xs = torch.cat([att_c, prenet_out], dim=1)
z_list[0], c_list[0] = self.lstm[0](xs, (z_list[0], c_list[0]))
for l in six.moves.range(1, len(self.lstm)):
z_list[l], c_list[l] = self.lstm[l](
z_list[l - 1], (z_list[l], c_list[l]))
zcs = torch.cat([z_list[-1], att_c], dim=1) if self.use_concate else z_list[-1]
outs += [self.feat_out(zcs).view(hs.size(0), self.odim, -1)]
logits += [self.prob_out(zcs)]
att_ws += [att_w]
prev_out = y # teacher forcing
if self.cumulate_att_w and prev_att_w is not None:
prev_att_w = prev_att_w + att_w # Note: error when use +=
else:
prev_att_w = att_w
logits = torch.cat(logits, dim=1) # (B, Lmax)
before_outs = torch.cat(outs, dim=2) # (B, odim, Lmax)
att_ws = torch.stack(att_ws, dim=1) # (B, Lmax, Tmax)
if self.reduction_factor > 1:
before_outs = before_outs.view(before_outs.size(0), self.odim, -1) # (B, odim, Lmax)
if self.postnet is not None:
after_outs = before_outs + self.postnet(before_outs) # (B, odim, Lmax)
else:
after_outs = before_outs
before_outs = before_outs.transpose(2, 1) # (B, Lmax, odim)
after_outs = after_outs.transpose(2, 1) # (B, Lmax, odim)
logits = logits
# apply activation function for scaling
if self.output_activation_fn is not None:
before_outs = self.output_activation_fn(before_outs)
after_outs = self.output_activation_fn(after_outs)
return after_outs, before_outs, logits, att_ws
def inference(self, h, threshold=0.5, minlenratio=0.0, maxlenratio=10.0):
"""Generate the sequence of features given the encoder hidden states
:param torch.Tensor h: the sequence of encoder states (T, C)
:param float threshold: threshold in inference
:param float minlenratio: minimum length ratio in inference
:param float maxlenratio: maximum length ratio in inference
:return: the sequence of features (L, D)
:rtype: torch.Tensor
:return: the sequence of stop probabilities (L)
:rtype: torch.Tensor
:return: the sequence of attention weight (L, T)
:rtype: torch.Tensor
"""
# setup
assert len(h.size()) == 2
hs = h.unsqueeze(0)
ilens = [h.size(0)]
maxlen = int(h.size(0) * maxlenratio)
minlen = int(h.size(0) * minlenratio)
# initialize hidden states of decoder
c_list = [self.zero_state(hs)]
z_list = [self.zero_state(hs)]
for _ in six.moves.range(1, len(self.lstm)):
c_list += [self.zero_state(hs)]
z_list += [self.zero_state(hs)]
prev_out = hs.new_zeros(1, self.odim)
# initialize attention
prev_att_w = None
self.att.reset()
# loop for an output sequence
idx = 0
outs, att_ws, probs = [], [], []
while True:
# updated index
idx += self.reduction_factor
# decoder calculation
if self.use_att_extra_inputs:
att_c, att_w = self.att(hs, ilens, z_list[0], prev_att_w, prev_out)
else:
att_c, att_w = self.att(hs, ilens, z_list[0], prev_att_w)
att_ws += [att_w]
prenet_out = self.prenet(prev_out) if self.prenet is not None else prev_out
xs = torch.cat([att_c, prenet_out], dim=1)
z_list[0], c_list[0] = self.lstm[0](xs, (z_list[0], c_list[0]))
for l in six.moves.range(1, len(self.lstm)):
z_list[l], c_list[l] = self.lstm[l](
z_list[l - 1], (z_list[l], c_list[l]))
zcs = torch.cat([z_list[-1], att_c], dim=1) if self.use_concate else z_list[-1]
outs += [self.feat_out(zcs).view(1, self.odim, -1)] # [(1, odim, r), ...]
probs += [torch.sigmoid(self.prob_out(zcs))[0]] # [(r), ...]
if self.output_activation_fn is not None:
prev_out = self.output_activation_fn(outs[-1][:, :, -1]) # (1, odim)
else:
prev_out = outs[-1][:, :, -1] # (1, odim)
if self.cumulate_att_w and prev_att_w is not None:
prev_att_w = prev_att_w + att_w # Note: error when use +=
else:
prev_att_w = att_w
# check whether to finish generation
if int(sum(probs[-1] >= threshold)) > 0 or idx >= maxlen:
# check mininum length
if idx < minlen:
continue
outs = torch.cat(outs, dim=2) # (1, odim, L)
if self.postnet is not None:
outs = outs + self.postnet(outs) # (1, odim, L)
outs = outs.transpose(2, 1).squeeze(0) # (L, odim)
probs = torch.cat(probs, dim=0)
att_ws = torch.cat(att_ws, dim=0)
break
if self.output_activation_fn is not None:
outs = self.output_activation_fn(outs)
return outs, probs, att_ws
def calculate_all_attentions(self, hs, hlens, ys):
"""Decoder attention calculation
:param torch.Tensor hs: batch of the sequences of padded hidden states (B, Tmax, idim)
:param list hlens: list of lengths of each input batch (B)
:param torch.Tensor ys: batch of the sequences of padded target features (B, Lmax, odim)
:return: attention weights (B, Lmax, Tmax)
:rtype: numpy array
"""
# thin out frames (B, Lmax, odim) -> (B, Lmax/r, odim)
if self.reduction_factor > 1:
ys = ys[:, self.reduction_factor - 1::self.reduction_factor]
# length list should be list of int
hlens = list(map(int, hlens))
# initialize hidden states of decoder
c_list = [self.zero_state(hs)]
z_list = [self.zero_state(hs)]
for _ in six.moves.range(1, len(self.lstm)):
c_list += [self.zero_state(hs)]
z_list += [self.zero_state(hs)]
prev_out = hs.new_zeros(hs.size(0), self.odim)
# initialize attention
prev_att_w = None
self.att.reset()
# loop for an output sequence
att_ws = []
for y in ys.transpose(0, 1):
if self.use_att_extra_inputs:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w, prev_out)
else:
att_c, att_w = self.att(hs, hlens, z_list[0], prev_att_w)
att_ws += [att_w]
prenet_out = self.prenet(prev_out) if self.prenet is not None else prev_out
xs = torch.cat([att_c, prenet_out], dim=1)
z_list[0], c_list[0] = self.lstm[0](xs, (z_list[0], c_list[0]))
for l in six.moves.range(1, len(self.lstm)):
z_list[l], c_list[l] = self.lstm[l](
z_list[l - 1], (z_list[l], c_list[l]))
prev_out = y # teacher forcing
if self.cumulate_att_w and prev_att_w is not None:
prev_att_w = prev_att_w + att_w # Note: error when use +=
else:
prev_att_w = att_w
att_ws = torch.stack(att_ws, dim=1) # (B, Lmax, Tmax)
return att_ws
| 40.041126 | 110 | 0.584248 |
584116d2c6671087d6f8ae629c58a69551f4868f | 17,441 | py | Python | deployment/TraceServer/entities/accounts/artifactsHelper.py | capitalch/trace | 530893ae212b1809b2785c4ba01c538e10ecc2fc | [
"Apache-2.0"
] | null | null | null | deployment/TraceServer/entities/accounts/artifactsHelper.py | capitalch/trace | 530893ae212b1809b2785c4ba01c538e10ecc2fc | [
"Apache-2.0"
] | 35 | 2020-05-22T03:09:09.000Z | 2021-04-22T18:10:46.000Z | dev/TraceServer/entities/accounts/artifactsHelper.py | capitalch/trace | 530893ae212b1809b2785c4ba01c538e10ecc2fc | [
"Apache-2.0"
] | null | null | null | import pandas as pd
import psycopg2
from psycopg2.extras import RealDictCursor
import numpy as np
import simplejson as json
from decimal import Decimal
from nested_lookup import nested_lookup
from dateutil.parser import parse
from .sql import allSqls
from postgres import execSql, execSqls, getPool
from postgresHelper import execSqlObject
from util import getErrorMessage, getschemaSearchPath
from app.link_client import connectToLinkServer, disconnectFromLinkServer, sendToPoint
# from app import socketio, store
def formatTree(rawData):
# formats in form of tree consumable by react.js
def getNodeDict(tData):
nodeDict = {}
for item in tData:
data = {}
for ite in item:
data[ite] = item[ite]
data.pop('children') # remove children from data
nodeDict[item['id']] = {
'key': item['id'],
'data': data,
'children': item['children'] if 'children' in item else None
}
return nodeDict
# recursively replaces children with corresponding child objects from nodeDict
def processChildren(obj):
if obj['children']:
temp = []
for child in obj['children']:
processChildren(nodeDict[child])
temp.append(nodeDict[child])
obj['children'] = temp
nodeDict = getNodeDict(rawData)
tempRoots = filter(lambda x: x['parentId'] is None, rawData)
ret = []
for it in tempRoots:
the = nodeDict[it['id']]
processChildren(the)
ret.append(the)
return ret
def accountsMasterGroupsLedgersHelper(dbName, buCode):
sqlString = allSqls['getJson_accountsMaster_groups_ledgers']
allKeys = []
jsonResult = execSql(dbName, isMultipleRows=False,
sqlString=sqlString, buCode=buCode)['jsonResult']
for item in jsonResult['accountsMaster']:
allKeys.append(item['id'])
jsonResult['accountsMaster'] = [] if jsonResult['accountsMaster'] is None else formatTree(
jsonResult['accountsMaster'])
jsonResult['allKeys'] = allKeys
return jsonResult
def accountsOpBalHelper(dbName, buCode, finYearId, branchId):
sqlString = allSqls['get_opBal']
result = execSql(dbName, isMultipleRows=True, args={
'finYearId': finYearId, 'branchId': branchId}, sqlString=sqlString, buCode=buCode)
allKeys = []
for item in result:
allKeys.append(item['id'])
res = [] if result is None else formatTree(result)
return {'opBal': res, 'allKeys': allKeys}
def accountsUpdateOpBalHelper(rows, dbName, buCode, finYearId, branchId):
sqlTupleListWithArgs = []
for i, row in enumerate(rows):
debit = Decimal(row['debit'])
credit = Decimal(row['credit'])
if debit > 0:
dc = 'D'
amount = debit
else:
dc = 'C'
amount = credit
if row['opId'] is None:
sql = allSqls['insert_opBal']
args = {
'accId': row['accMId'], 'finYearId': finYearId, 'branchId': branchId, 'amount': amount, 'dc': dc
}
tup = (i, sql, args)
sqlTupleListWithArgs.append(tup)
else:
sql = allSqls['update_opBal']
args = {
'id': row['opId'], 'amount': amount, 'dc': dc
}
tup = (i, sql, args)
sqlTupleListWithArgs.append(tup)
execSqls(dbName, sqlTupleListWithArgs, buCode)
def allCategoriesHelper(dbName, buCode):
sqlString = allSqls['getJson_categories']
allKeys = []
jsonResult = execSql(dbName, isMultipleRows=False,
sqlString=sqlString, buCode=buCode)['jsonResult']
if(jsonResult['categories']):
for item in jsonResult['categories']:
allKeys.append(item['id'])
jsonResult['categories'] = [] if jsonResult['categories'] is None else formatTree(
jsonResult['categories'])
jsonResult['allKeys'] = allKeys
return jsonResult
def balanceSheetProfitLossHelper(dbName, buCode, finYearId, branchId):
sqlString = allSqls['get_balanceSheetProfitLoss']
allKeys = []
jsonResult = execSql(dbName, sqlString, args={
'finYearId': finYearId, 'branchId': branchId}, isMultipleRows=False, buCode=buCode)['jsonResult']
if(jsonResult['balanceSheetProfitLoss'] is not None):
for item in jsonResult['balanceSheetProfitLoss']:
allKeys.append(item['id'])
jsonResult['balanceSheetProfitLoss'] = formatTree(
jsonResult['balanceSheetProfitLoss'])
jsonResult['allKeys'] = allKeys
return jsonResult
def doDebitsEqualCredits(sqlObject):
ret = False
# get all instances of data in the nested object
dat = nested_lookup('data', sqlObject)
def checkList(tranList):
allDebits = 0.00
allCredits = 0.00
for row in tranList:
if row['dc'] == 'C':
allCredits = allCredits + float(row['amount'])
else:
allDebits = allDebits + float(row['amount'])
return(allDebits == allCredits)
# dat[1] contains list of all transactions, with dc = 'D' or dc = 'C'
if type(dat) is list:
if len(dat) > 0:
tranList = dat[1]
ret = checkList(tranList)
return(ret)
def getSetAutoRefNo(sqlObject, cursor, no, buCode):
try:
searchPath = getschemaSearchPath(buCode)
lastNoSql = allSqls["getSet_lastVoucherNo"]
# converted to string because the sql function args are smallint type. String type property converts to smallint
tup = (str(sqlObject["data"][0]["branchId"]), str(
sqlObject["data"][0]["tranTypeId"]), str(sqlObject["data"][0]["finYearId"]), no)
cursor.execute(f'{searchPath}; {lastNoSql}', tup)
ret = cursor.fetchone()
branchCode, tranTypeCode, finYear, lastNo = ret[0].split(',')
lastNo = int(lastNo)
if no == 0:
lastNo = lastNo + 1
autoRefNo = f'{branchCode}\{tranTypeCode}\{lastNo}\{finYear}'
return (autoRefNo, lastNo)
except (Exception) as error:
raise Exception(getErrorMessage())
def genericUpdateMasterDetailsHelper(dbName, buCode, valueDict):
connection = None
try:
connection = None
pool = getPool(dbName)
connection = pool.getconn()
cursor = connection.cursor()
autoRefNo = ''
# calculate autoRefNo only if id field is not there, insert operation
if not 'id' in valueDict["data"][0]:
branchId = valueDict["data"][0]["branchId"]
tranTypeId = valueDict["data"][0]["tranTypeId"]
finYearId = valueDict["data"][0]["finYearId"]
sqlString = allSqls['getJson_branchCode_tranCode']
res = execSql(dbName, sqlString, {'branchId': branchId, 'tranTypeId': tranTypeId},
isMultipleRows=False, buCode=buCode)
tranCode = res['jsonResult']['tranCode']
branchCode = res['jsonResult']['branchCode']
sqlString = allSqls['get_lastNo']
lastNo = execSql(dbName, sqlString, {'branchId': branchId, 'tranTypeId': tranTypeId,
'finYearId': finYearId}, isMultipleRows=False, buCode=buCode)['lastNo']
if lastNo == 0:
lastNo = 1
autoRefNo = f'{branchCode}\{tranCode}\{lastNo}\{finYearId}'
valueDict["data"][0]["autoRefNo"] = autoRefNo
execSqlObject(valueDict, cursor, buCode=buCode)
sqlString = allSqls['update_last_no']
if not 'id' in valueDict["data"][0]: # insert mode only
execSql(dbName, sqlString, {'lastNo': lastNo + 1, 'branchId': branchId,
'tranTypeId': tranTypeId, 'finYearId': finYearId}, isMultipleRows=False, buCode=buCode)
connection.commit()
return autoRefNo
except (Exception, psycopg2.Error) as error:
print("Error with PostgreSQL", error)
if connection:
connection.rollback()
raise Exception(getErrorMessage('generic', error))
finally:
if connection:
cursor.close()
connection.close()
def bulkGenericUpdateMasterDetailsHelper(dbName, buCode, valueDictList, pointId=None):
try:
connection = None
pool = getPool(dbName)
connection = pool.getconn()
cursor = connection.cursor()
autoRefNo = ''
if(len(valueDictList) == 0):
sendToPoint('COMPLETED', None, pointId)
raise Exception('Empty list for export')
branchId = valueDictList[0]["data"][0]["branchId"]
tranTypeId = valueDictList[0]["data"][0]["tranTypeId"]
finYearId = valueDictList[0]["data"][0]["finYearId"]
searchPathSql = getschemaSearchPath(buCode)
cursor.execute(searchPathSql)
cursor.execute(allSqls['insert_last_no'], {
'branchId': branchId,
'tranTypeId': tranTypeId,
'finYearId': finYearId})
count = 0
for valueDict in valueDictList:
userRefNo = valueDict["data"][0]["userRefNo"]
cursor.execute(allSqls['is_exist_user_ref_no'], {
'branchId': branchId,
'tranTypeId': tranTypeId,
'finYearId': finYearId,
'userRefNo': str(userRefNo)
})
result = cursor.fetchone()
# userRefNo already is not there
if((result is None) or (result[0] != 1)):
cursor.execute(allSqls['get_auto_ref_no'], {
'branchId': branchId,
'tranTypeId': tranTypeId,
'finYearId': finYearId})
result = cursor.fetchone()
autoRefNo = result[0]
lastNo = result[1]
valueDict["data"][0]["autoRefNo"] = autoRefNo
execSqlObject(valueDict, cursor, buCode=buCode)
sqlString = allSqls['update_last_no']
cursor.execute(sqlString, {'lastNo': lastNo + 1, 'branchId': branchId,
'tranTypeId': tranTypeId, 'finYearId': finYearId})
count = count+1
sendToPoint('SC-NOTIFY-ROWS-PROCESSED', count, pointId)
sendToPoint('COMPLETED', count, pointId)
connection.commit()
except (Exception, psycopg2.Error) as error:
print("Error with PostgreSQL", error)
if connection:
connection.rollback()
raise Exception(getErrorMessage('generic', error))
finally:
if connection:
cursor.close()
connection.close()
# disconnectFromLinkServer()
def searchProductHelper(dbName, buCode, valueDict):
def createSql():
template = allSqls['get_search_product']
argDict = {}
some = ''
for index, item in enumerate(valueDict):
some = some + f" '%%' || '{item}' || '%%' ,"
some = some.rstrip(",")
sqlString = template.replace('someArgs', some)
return sqlString
sqlString = createSql()
result = execSql(dbName, sqlString, isMultipleRows=True, buCode=buCode)
return result
def transferClosingBalancesHelper(dbName, buCode, finYearId, branchId):
nextFinYearId = int(finYearId) + 1
args = {
'finYearId': finYearId, 'branchId': branchId, 'nextFinYearId': nextFinYearId
}
sqlString = allSqls['transfer_closingBalances']
execSql(dbName, sqlString, args=args, isMultipleRows=False, buCode=buCode)
return True
def trialBalanceHelper(dbName, buCode, finYearId, branchId):
if (finYearId is None) or (branchId is None):
return {'trialBal': [], 'allKeys': []}
sqlString = allSqls['get_trialBalance']
allKeys = []
data = execSql(dbName, sqlString, args={
'finYearId': finYearId, 'branchId': branchId}, isMultipleRows=True, buCode=buCode)
for item in data:
allKeys.append(item['id'])
dt = formatTree(data)
return {'trialBal': dt, 'allKeys': allKeys}
# def searchProductHelper1(dbName, buCode, valueDict):
# def createSql():
# template = allSqls['get_searchProduct']
# argDict = {}
# temp = ''
# for index, item in enumerate(valueDict): # valueDict is a list
# sqlX = template.replace('arg', f'arg{str(index)}')
# temp = f'{temp} union {sqlX}'
# argDict['arg'+str(index)] = item
# # remove first occurence of ' union '
# sqlString = temp.replace(' union ', '', 1)
# return sqlString, argDict
# sqlString, argDict = createSql()
# result = execSql(dbName, sqlString, args=argDict,
# isMultipleRows=True, buCode=buCode)
# return result
# 0 means getting a tuple with autoRefNo and corresponding lastNo in tranCounter table
# autoRefNoTup = getSetAutoRefNo(valueDict, cursor, 0, buCode)
# autoRefNo = autoRefNoTup[0]
# lastNo = autoRefNoTup[1]
# to set last no in TranCounter table
# autoRefNoTup = getSetAutoRefNo(valueDict, cursor, lastNo, buCode)
# autoRefNo = autoRefNoTup[0]
# Don't delete dataframe implementation
# def format_tranHeadersWithDetails_data(data):
# if data == []:
# return data
# df = pd.DataFrame(data)
# df = df.replace({None: ''}) # None values create problem while indexing
# pivot = pd.pivot_table(df, index=["tranHeaderId", "tranDetailsId", "autoRefNo", "tranDate", "userRefNo", "tags", "headerRemarks",
# "tranTypeId", "accName", "lineRefNo", "lineRemarks", "instrNo"], columns=["dc"],
# values="amount", aggfunc=np.sum, fill_value=0)
# pivot.rename(
# columns={
# 'D': 'debits',
# 'C': 'credits'
# },
# inplace=True
# )
# pivot.sort_values(by=['tranHeaderId', 'tranDetailsId'],
# axis=0, ascending=[False, True], inplace=True)
# j = pivot.to_json(orient='table')
# jsonObj = json.loads(j)
# dt = jsonObj["data"]
# return dt
# def tranHeadersWithDetails_helper(tranTypeId, noOfRecords):
# sql = allSqls['tranHeadersWithDetails']
# # args should be tuple
# data = execSql(DB_NAME, sql, (tranTypeId, noOfRecords,))
# for x in data:
# x['tranDate'] = str(x['tranDate']) # ISO formatted date
# dt = format_tranHeadersWithDetails_data(data)
# return dt
# def tranHeaderAndDetails_helper(id):
# sql1 = allSqls['tranHeader']
# sql2 = allSqls['tranDetails']
# dataset = execSqls(
# DB_NAME, [('sql1', sql1, (id,)), ('sql2', sql2, (id,))]
# )
# data1 = dataset['sql1']
# data2 = dataset['sql2']
# return {"tranHeader": data1, "tranDetails": data2}
# def format_trial_balance_data(data):
# if data == []:
# return []
# df = pd.DataFrame(data)
# pivot = pd.pivot_table(df, index=["id", "accCode", "accName", "accType", "accLeaf", "parentId"], columns=["dc"],
# values="amount", aggfunc=np.sum, fill_value=0)
# if 'O' not in pivot:
# # shape[0] is no of rows in dataframe
# pivot['O'] = [Decimal(0.00)] * pivot.shape[0]
# if 'D' not in pivot:
# pivot['D'] = [Decimal(0.00)] * pivot.shape[0]
# if 'C' not in pivot:
# pivot['C'] = [Decimal(0.00)] * pivot.shape[0]
# pivot.rename(
# columns={
# 'O': 'opening',
# 'D': 'debits',
# 'C': 'credits'
# },
# inplace=True
# )
# pivot['closing'] = pivot['opening'] + pivot['debits'] - pivot['credits']
# pivot.loc['total', 'closing'] = pivot['closing'].sum()
# pivot.loc['total', 'debits'] = pivot['debits'].sum()
# pivot.loc['total', 'credits'] = pivot['credits'].sum()
# pivot.loc['total', 'opening'] = pivot['opening'].sum()
# pivot['closing_dc'] = pivot['closing'].apply(
# lambda x: 'Dr' if x >= 0 else 'Cr')
# pivot['closing'] = pivot['closing'].apply(
# lambda x: x if x >= 0 else -x) # remove minus sign
# pivot['opening_dc'] = pivot['opening'].apply(
# lambda x: 'Dr' if x >= 0 else 'Cr')
# pivot['opening'] = pivot['opening'].apply(
# lambda x: x if x >= 0 else -x) # remove minus sign
# pivot = pivot.reindex(
# columns=['opening', 'opening_dc', 'debits', 'credits', 'closing', 'closing_dc'])
# # print(pivot)
# j = pivot.to_json(orient='table')
# jsonObj = json.loads(j)
# dt = jsonObj["data"]
# return dt
# def trial_balance_helper():
# sql = allSqls['trial_balance']
# data = execSql(DB_NAME, sql)
# dt = format_trial_balance_data(data) # trial_format
# return dt
# def subledgers_helper():
# sql = allSqls['subledgers']
# data = execSql(DB_NAME, sql)
# dt = format_trial_balance_data(data)
# return dt
# def trial_balance_subledgers_helper():
# sql1 = allSqls['trial_balance']
# sql2 = allSqls['subledgers']
# dataset = execSqls(
# DB_NAME, [('sql1', sql1, None), ('sql2', sql2, None)])
# data1 = dataset["sql1"]
# data2 = dataset["sql2"]
# dt1 = format_trial_balance_data(data1)
# dt2 = format_trial_balance_data(data2)
# return {'trial_balance': dt1, 'subledgers': dt2}
| 36.640756 | 135 | 0.593257 |
ba0d63c12011937bebc5e1995f7cd5f7df23c083 | 11,475 | py | Python | python/ray/rllib/env/async_vector_env.py | zhiyun/ray | 99d0d96aeff5a6c0756e63562b2d61606c75c536 | [
"Apache-2.0"
] | null | null | null | python/ray/rllib/env/async_vector_env.py | zhiyun/ray | 99d0d96aeff5a6c0756e63562b2d61606c75c536 | [
"Apache-2.0"
] | null | null | null | python/ray/rllib/env/async_vector_env.py | zhiyun/ray | 99d0d96aeff5a6c0756e63562b2d61606c75c536 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ray.rllib.env.serving_env import ServingEnv
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.env.multi_agent_env import MultiAgentEnv
class AsyncVectorEnv(object):
"""The lowest-level env interface used by RLlib for sampling.
AsyncVectorEnv models multiple agents executing asynchronously in multiple
environments. A call to poll() returns observations from ready agents
keyed by their environment and agent ids, and actions for those agents
can be sent back via send_actions().
All other env types can be adapted to AsyncVectorEnv. RLlib handles these
conversions internally in PolicyEvaluator, for example:
gym.Env => rllib.VectorEnv => rllib.AsyncVectorEnv
rllib.MultiAgentEnv => rllib.AsyncVectorEnv
rllib.ServingEnv => rllib.AsyncVectorEnv
Examples:
>>> env = MyAsyncVectorEnv()
>>> obs, rewards, dones, infos, off_policy_actions = env.poll()
>>> print(obs)
{
"env_0": {
"car_0": [2.4, 1.6],
"car_1": [3.4, -3.2],
}
}
>>> env.send_actions(
actions={
"env_0": {
"car_0": 0,
"car_1": 1,
}
})
>>> obs, rewards, dones, infos, off_policy_actions = env.poll()
>>> print(obs)
{
"env_0": {
"car_0": [4.1, 1.7],
"car_1": [3.2, -4.2],
}
}
>>> print(dones)
{
"env_0": {
"__all__": False,
"car_0": False,
"car_1": True,
}
}
"""
@staticmethod
def wrap_async(env, make_env=None, num_envs=1):
"""Wraps any env type as needed to expose the async interface."""
if not isinstance(env, AsyncVectorEnv):
if isinstance(env, MultiAgentEnv):
env = _MultiAgentEnvToAsync(
make_env=make_env, existing_envs=[env], num_envs=num_envs)
elif isinstance(env, ServingEnv):
if num_envs != 1:
raise ValueError(
"ServingEnv does not currently support num_envs > 1.")
env = _ServingEnvToAsync(env)
elif isinstance(env, VectorEnv):
env = _VectorEnvToAsync(env)
else:
env = VectorEnv.wrap(
make_env=make_env, existing_envs=[env], num_envs=num_envs)
env = _VectorEnvToAsync(env)
assert isinstance(env, AsyncVectorEnv)
return env
def poll(self):
"""Returns observations from ready agents.
The returns are two-level dicts mapping from env_id to a dict of
agent_id to values. The number of agents and envs can vary over time.
Returns
-------
obs (dict): New observations for each ready agent.
rewards (dict): Reward values for each ready agent. If the
episode is just started, the value will be None.
dones (dict): Done values for each ready agent. The special key
"__all__" is used to indicate env termination.
infos (dict): Info values for each ready agent.
off_policy_actions (dict): Agents may take off-policy actions. When
that happens, there will be an entry in this dict that contains
the taken action. There is no need to send_actions() for agents
that have already chosen off-policy actions.
"""
raise NotImplementedError
def send_actions(self, action_dict):
"""Called to send actions back to running agents in this env.
Actions should be sent for each ready agent that returned observations
in the previous poll() call.
Arguments:
action_dict (dict): Actions values keyed by env_id and agent_id.
"""
raise NotImplementedError
def try_reset(self, env_id):
"""Attempt to reset the env with the given id.
If the environment does not support synchronous reset, None can be
returned here.
Returns:
obs (dict|None): Resetted observation or None if not supported.
"""
return None
def get_unwrapped(self):
"""Return a reference to some underlying gym env, if any.
Returns:
env (gym.Env|None): Underlying gym env or None.
"""
return None
# Fixed agent identifier when there is only the single agent in the env
_DUMMY_AGENT_ID = "single_agent"
def _with_dummy_agent_id(env_id_to_values, dummy_id=_DUMMY_AGENT_ID):
return {k: {dummy_id: v} for (k, v) in env_id_to_values.items()}
class _ServingEnvToAsync(AsyncVectorEnv):
"""Internal adapter of ServingEnv to AsyncVectorEnv."""
def __init__(self, serving_env):
self.serving_env = serving_env
serving_env.start()
def poll(self):
with self.serving_env._results_avail_condition:
results = self._poll()
while len(results[0]) == 0:
self.serving_env._results_avail_condition.wait()
results = self._poll()
if not self.serving_env.isAlive():
raise Exception("Serving thread has stopped.")
limit = self.serving_env._max_concurrent_episodes
assert len(results[0]) < limit, \
("Too many concurrent episodes, were some leaked? This ServingEnv "
"was created with max_concurrent={}".format(limit))
return results
def _poll(self):
all_obs, all_rewards, all_dones, all_infos = {}, {}, {}, {}
off_policy_actions = {}
for eid, episode in self.serving_env._episodes.copy().items():
data = episode.get_data()
if episode.cur_done:
del self.serving_env._episodes[eid]
if data:
all_obs[eid] = data["obs"]
all_rewards[eid] = data["reward"]
all_dones[eid] = data["done"]
all_infos[eid] = data["info"]
if "off_policy_action" in data:
off_policy_actions[eid] = data["off_policy_action"]
return _with_dummy_agent_id(all_obs), \
_with_dummy_agent_id(all_rewards), \
_with_dummy_agent_id(all_dones, "__all__"), \
_with_dummy_agent_id(all_infos), \
_with_dummy_agent_id(off_policy_actions)
def send_actions(self, action_dict):
for eid, action in action_dict.items():
self.serving_env._episodes[eid].action_queue.put(
action[_DUMMY_AGENT_ID])
class _VectorEnvToAsync(AsyncVectorEnv):
"""Internal adapter of VectorEnv to AsyncVectorEnv.
We assume the caller will always send the full vector of actions in each
call to send_actions(), and that they call reset_at() on all completed
environments before calling send_actions().
"""
def __init__(self, vector_env):
self.vector_env = vector_env
self.num_envs = vector_env.num_envs
self.new_obs = self.vector_env.vector_reset()
self.cur_rewards = [None for _ in range(self.num_envs)]
self.cur_dones = [False for _ in range(self.num_envs)]
self.cur_infos = [None for _ in range(self.num_envs)]
def poll(self):
new_obs = dict(enumerate(self.new_obs))
rewards = dict(enumerate(self.cur_rewards))
dones = dict(enumerate(self.cur_dones))
infos = dict(enumerate(self.cur_infos))
self.new_obs = []
self.cur_rewards = []
self.cur_dones = []
self.cur_infos = []
return _with_dummy_agent_id(new_obs), \
_with_dummy_agent_id(rewards), \
_with_dummy_agent_id(dones, "__all__"), \
_with_dummy_agent_id(infos), {}
def send_actions(self, action_dict):
action_vector = [None] * self.num_envs
for i in range(self.num_envs):
action_vector[i] = action_dict[i][_DUMMY_AGENT_ID]
self.new_obs, self.cur_rewards, self.cur_dones, self.cur_infos = \
self.vector_env.vector_step(action_vector)
def try_reset(self, env_id):
return {_DUMMY_AGENT_ID: self.vector_env.reset_at(env_id)}
def get_unwrapped(self):
return self.vector_env.get_unwrapped()
class _MultiAgentEnvToAsync(AsyncVectorEnv):
"""Internal adapter of MultiAgentEnv to AsyncVectorEnv.
This also supports vectorization if num_envs > 1.
"""
def __init__(self, make_env, existing_envs, num_envs):
"""Wrap existing multi-agent envs.
Arguments:
make_env (func|None): Factory that produces a new multiagent env.
Must be defined if the number of existing envs is less than
num_envs.
existing_envs (list): List of existing multiagent envs.
num_envs (int): Desired num multiagent envs to keep total.
"""
self.make_env = make_env
self.envs = existing_envs
self.num_envs = num_envs
self.dones = set()
while len(self.envs) < self.num_envs:
self.envs.append(self.make_env())
for env in self.envs:
assert isinstance(env, MultiAgentEnv)
self.env_states = [_MultiAgentEnvState(env) for env in self.envs]
def poll(self):
obs, rewards, dones, infos = {}, {}, {}, {}
for i, env_state in enumerate(self.env_states):
obs[i], rewards[i], dones[i], infos[i] = env_state.poll()
return obs, rewards, dones, infos, {}
def send_actions(self, action_dict):
for env_id, agent_dict in action_dict.items():
if env_id in self.dones:
raise ValueError("Env {} is already done".format(env_id))
env = self.envs[env_id]
obs, rewards, dones, infos = env.step(agent_dict)
if dones["__all__"]:
self.dones.add(env_id)
self.env_states[env_id].observe(obs, rewards, dones, infos)
def try_reset(self, env_id):
obs = self.env_states[env_id].reset()
if obs is not None:
self.dones.remove(env_id)
return obs
class _MultiAgentEnvState(object):
def __init__(self, env):
assert isinstance(env, MultiAgentEnv)
self.env = env
self.reset()
def poll(self):
obs, rew, dones, info = (self.last_obs, self.last_rewards,
self.last_dones, self.last_infos)
self.last_obs = {}
self.last_rewards = {}
self.last_dones = {"__all__": False}
self.last_infos = {}
return obs, rew, dones, info
def observe(self, obs, rewards, dones, infos):
self.last_obs = obs
self.last_rewards = rewards
self.last_dones = dones
self.last_infos = infos
def reset(self):
self.last_obs = self.env.reset()
self.last_rewards = {
agent_id: None
for agent_id in self.last_obs.keys()
}
self.last_dones = {
agent_id: False
for agent_id in self.last_obs.keys()
}
self.last_infos = {agent_id: {} for agent_id in self.last_obs.keys()}
self.last_dones["__all__"] = False
return self.last_obs
| 36.313291 | 79 | 0.600523 |
a62424529f75da3f66db04f77563701ccbd071e7 | 7,289 | py | Python | burton/parser/strings.py | Extensis/Burton | a948f045a021f468ef34d6e8e6b8a5caaa132e27 | [
"MIT"
] | 2 | 2018-01-09T23:32:35.000Z | 2018-08-10T23:48:33.000Z | burton/parser/strings.py | Extensis/Burton | a948f045a021f468ef34d6e8e6b8a5caaa132e27 | [
"MIT"
] | null | null | null | burton/parser/strings.py | Extensis/Burton | a948f045a021f468ef34d6e8e6b8a5caaa132e27 | [
"MIT"
] | 5 | 2017-03-23T16:49:46.000Z | 2022-02-18T12:06:59.000Z | import codecs
import os
import re
import subprocess
import types
import unicodedata
import burton
from base import Base
from util import detect_encoding
class Strings(Base):
def __init__(self):
Base.__init__(self)
self.baseLocalizationRegex = re.compile(
'\w{3}-\w{2}-\w{3}\.(placeholder|text|title|normalTitle)'
)
def extract_strings_from_filename(self, filename):
return_values = set([])
def _add_key(key, value):
if key and key[0] == '"':
key = key[1:-1]
if self.baseLocalizationRegex.match(key):
key = value
if key and key[0] == '"':
key = key[1:-1]
return_values.add(key)
self._parse(filename, _add_key)
return return_values
def extract_mapping_from_filename(self, filename, strip_keys = True):
string_mapping = burton.StringMapping(filename = filename)
def _add_mapping(key, value):
if strip_keys and key and key[0] == '"':
key = key[1:-1]
if value and value[0] == '"':
value = value[1:-1]
if self.baseLocalizationRegex.match(key):
key = value
string_mapping.add_mapping(key, value)
self._parse(filename, _add_mapping)
return string_mapping
def _parse(self, filename, func):
file, encoding = self._open_file(filename)
contents = self._strip_comments(file.read())
in_string_file_info = False
incomplete_line = None
for line in re.split("\r|\n", contents):
key = None
value = None
line = line.rstrip("\r\n")
assert type(line) is types.UnicodeType
if incomplete_line is not None:
if incomplete_line.strip().endswith("\\"):
line = incomplete_line.strip().rstrip("\\") + line
else:
line = incomplete_line + "\\n" + line
incomplete_line = None
if line.strip().endswith("\\"):
incomplete_line = line
elif line and not line.strip().endswith(";"):
incomplete_line = line
else:
in_string = False
in_variable = False
escaping = 0
current_token = ""
for c in line:
if in_string or in_variable:
current_token = current_token + c
if c == '"':
if escaping == 0:
in_string = not in_string
if in_variable:
current_token = ""
in_variable = False
if not in_string:
if current_token[-1] != '"':
current_token += '"'
if key is None:
key = current_token
else:
value = current_token
current_token = ""
else:
current_token = '"'
elif c == ";":
if not in_string:
if key is not None and value is None:
value = key
elif c == "\\" and escaping == 0:
escaping = 2
elif self._is_unicode_whitespace(c):
if in_variable:
if key is None:
key = current_token[:-1]
current_token = ""
in_variable = False
elif not in_variable and not in_string and key is None:
current_token = c
in_variable = True
if escaping > 0:
escaping -= 1
if key is not None and value is not None:
key = key
value = value
func(key, value)
file.close()
def write_mapping(self, file, mapping):
sorted_keys = mapping.keys()
sorted_keys.sort()
for key in sorted_keys:
if key is not None and mapping[key] is not None:
value = self._encode(mapping[key])
quote_key = False
if key and key[0] == '"':
quote_key = True
key = key[1:-1]
key = self._encode(key)
if quote_key:
key = '"' + key + '"'
file.write(key + ' = "' + value + '";\n')
def _open_file(self, filename):
encoding = detect_encoding(open(filename, "r"))
# Strings files should always be unicode of some sort.
# Sometimes chardet guesses UTF-8 wrong.
if encoding is None or not encoding.lower().startswith("utf"):
encoding = "utf-8"
return codecs.open(filename, "r", encoding), encoding
def _strip_comments(self, contents):
output = u""
in_string = False
in_comment = False
in_slash = False
in_asterisk = False
in_single_line_comment = False
in_multiline_comment = False
for c in contents:
if c == '"' and not in_comment:
in_string = not in_string
elif c == '/' and not in_string:
if in_slash:
in_single_line_comment = True
in_comment = True
in_slash = False
elif in_asterisk:
in_multiline_comment = False
in_comment = False
else:
in_slash = True
elif c == '*' and not in_string:
if in_comment:
in_asterisk = True
elif in_slash:
in_multiline_comment = True
in_comment = True
in_slash = False
elif c in "\r\n" and in_single_line_comment:
in_single_line_comment = False
in_comment = False
if not in_comment:
if in_slash and c != '/':
output += '/'
in_slash = False
if in_asterisk:
in_asterisk = False
elif not in_slash:
output += c
return output
def _encode(self, str):
return str.encode("unicode-escape")\
.replace("\"", "\\\"")\
.replace("\\x", "\\U00")\
.replace("\\u", "\\U")\
.replace("\\\\", "\\") # Reverse earlier double-escaping
def _is_unicode_whitespace(self, c):
category = unicodedata.category(c)
return category == "Zs" or category == "Cc"
| 31.691304 | 75 | 0.443545 |
2a1ccb574c107abdd89878fee560195bd107f900 | 806 | py | Python | assethub/manage.py | portnov/assethub | 0f2f9d5e190b2a633a794514b54d8b408c9ed2a6 | [
"BSD-3-Clause"
] | 3 | 2017-01-07T16:56:53.000Z | 2019-10-21T00:39:47.000Z | assethub/manage.py | portnov/assethub | 0f2f9d5e190b2a633a794514b54d8b408c9ed2a6 | [
"BSD-3-Clause"
] | 11 | 2017-01-08T06:10:34.000Z | 2017-01-26T13:09:32.000Z | assethub/manage.py | portnov/assethub | 0f2f9d5e190b2a633a794514b54d8b408c9ed2a6 | [
"BSD-3-Clause"
] | 1 | 2019-05-16T03:58:40.000Z | 2019-05-16T03:58:40.000Z | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "assethub.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.043478 | 77 | 0.64268 |
e32933371a973219a5efec54862a476b472d7c2c | 13,056 | py | Python | models/attendgru.py | KennardWang/funcom_reproduction | ae1e13a0bd3c7a50b888ed98191ec77b7a97ee21 | [
"MIT"
] | null | null | null | models/attendgru.py | KennardWang/funcom_reproduction | ae1e13a0bd3c7a50b888ed98191ec77b7a97ee21 | [
"MIT"
] | null | null | null | models/attendgru.py | KennardWang/funcom_reproduction | ae1e13a0bd3c7a50b888ed98191ec77b7a97ee21 | [
"MIT"
] | null | null | null | from keras.models import Model
from keras.layers import Input, Dense, Embedding, Reshape, GRU, merge, LSTM, Dropout, BatchNormalization, Activation, concatenate, multiply, MaxPooling1D, Conv1D, Flatten, Bidirectional, CuDNNGRU, RepeatVector, Permute, TimeDistributed, dot
from keras.optimizers import RMSprop, Adamax
import keras
import keras.utils
import tensorflow as tf
from keras import metrics
# This is a generic attentional seq2seq model. Guide by Collin to help students understand
# how to implement the basic idea and what attention is.
# I write this guide with much thanks to:
# https://wanasit.github.io/attention-based-sequence-to-sequence-in-keras.html
# https://arxiv.org/abs/1508.04025
class AttentionGRUModel:
def __init__(self, config):
# override default tdatlen
config['tdatlen'] = 50
self.config = config
self.tdatvocabsize = config['tdatvocabsize']
self.comvocabsize = config['comvocabsize']
self.datlen = config['tdatlen']
self.comlen = config['comlen']
self.embdims = 100
self.recdims = 256
self.config['num_input'] = 2
self.config['num_output'] = 1
def create_model(self):
# The first two lines here are the input. We assume a fixed-size input, padded to
# datlen and comlen. In principle, we could avoid padding if we carefully controlled
# the batches during training, so all equal-sized inputs go in during the same batches.
# But that is a lot of trouble for not much benefit.
dat_input = Input(shape=(self.datlen,))
com_input = Input(shape=(self.comlen,))
# This is a fairly-common encoder structure. It is just an embedding space followed by
# a uni-directional GRU. We have to disable masking here, since not all following layers
# support masking. Hopefully the network will learn to ignore zeros anyway.
ee = Embedding(output_dim=self.embdims, input_dim=self.tdatvocabsize, mask_zero=False)(dat_input)
# The regular GRU can be swapped for the CuDNNGRU if desired. The CuDNNGRU seems to be
# around 50% faster in this model, but we are constained to GPU training /and testing/.
# The return_state flag is necessary so that we get the hidden state of the encoder, to pass
# to the decoder later. The return_sequences flag is necessary because we want to get the
# state /at every cell/ instead just the final state. We need the state at every cell for the
# attention mechanism later.
# The embedding will output a shape of (batch_size, tdatvocabsize, embdims). What this means
# is that for every batch, each word in the sequence has one vector of length embdims. For
# example, (300, 100, 100) means that for each of 300 examples in a batch, there are 100 words.
# and each word is represented by a 100 length embedding vector.
#enc = GRU(self.recdims, return_state=True, return_sequences=True, activation='tanh', unroll=True)
enc = CuDNNGRU(self.recdims, return_state=True, return_sequences=True)
encout, state_h = enc(ee)
# Tensor encout would normally have shape (batch_size, recdims), a recdims-length vector
# representation of every input in the batch. However, since we have return_sequences enabled,
# encout has the shape (batch_size, tdatvocabsize, recdims), which is the recdims-length
# vector at every time-step. That is, the recdims-length vector at every word in the sequence.
# So we see the status of the output vector as it changes with each word in the sequence.
# We also have return_state enabled, which just means that we get state_h, the recdims vector
# from the last cell. This is a GRU, so this state_h is the same as the output vector, but we
# get it here anyway for convenience, to use as the initial state in the decoder.
# The decoder is basically the same as the encoder, except the GRU does not need return_state
# enabled. The embedding will output (batch_size, comvocabsize, embdims). The GRU will
# output (batch_size, comvocabsize, recdims).
# I suppose we could speed things up by not setting the initial_state of the decoder to have
# the output state of the encoder. Right now, the GPU will have to wait until the encoder is
# done before starting the decoder. Not sure how much this would affect quality.
de = Embedding(output_dim=self.embdims, input_dim=self.comvocabsize, mask_zero=False)(com_input)
#dec = GRU(self.recdims, return_sequences=True, activation='tanh', unroll=True)
dec = CuDNNGRU(self.recdims, return_sequences=True)
decout = dec(de, initial_state=state_h)
# Ok now things become more interesting. This is the start of the attention mechanism.
# In the first of these two lines, we take the dot product of the decoder and encoder
# output. Remember that the output shape of decout is, e.g., (batch_size, 13, 256) and
# encout is (batch_size, 100, 256).
# The axis 2 of decout is 256 long. The axis 2 of encout is also 256 long. So by computing
# the dot product along the 2 axis in both, we get a tensor of shape (batch_size, 13, 100).
# For one example in the batch, we get decout of (13, 256) and encout (100, 256).
#
# 1 2 ... 256 1 2 ... 256 1 2 ... 100
# ___________ __________ __________
# 1 |v1-------> 1 |v3-------> 1 |a b
# 2 |v2-------> * 2 |v4-------> = 2 |c d
# .. | .. | .. |
# 13 | 100 | 13 |
#
# Where a is the dot product of vectors v1 and v3, and b is the dot product of v1 and v4, etc.
# a = v1 . v3
# b = v1 . v4
# c = v2 . v3
# d = v2 . v4
# This may look a little different than the dot product we all did back in school, but the
# behavior is dictated by the axes given in the dot() function parameters.
# Another way to think of it is if we transpose the second matrix prior to computing the
# product. Then it behaves more like we expect.
# In any case, the result is that each of the 13 positions in the decoder sequence is now
# represented by a 100-length vector. Each value in the 100-length vector reflects the
# similarity between the element in the decoder sequence and the element in the encoder
# sequence. I.e. 'b' above reflects how similar element 1 in the output/decoder sequence
# is similar to element 2 in the input/encoder sequence. This is the heart of how attention
# works. The 100-length vector for each of the 13 input positions represents how much
# that a given input position is similar (should "pay attention to") a given position in
# the output vector.
# The second line applies a softmax to each of the 13 (100-length) vectors. The effect is
# to exaggerate the 'most similar' things, so that 'more attention' will be paid to the
# more-similar input vectors.
# Note that the dot product here is not normalized, so it is not necessarily equivalent to
# cosine similarity. Also, the application of softmax is not required, but recommended.
attn = dot([decout, encout], axes=[2, 2])
attn = Activation('softmax')(attn)
# To be 100% clear, the output shape of attn is (batch_size, comvocabsize, tdatvocabsize).
# But what do we do with the attention vectors, now that we have them? Answer is that we
# need to scale the encoder vectors by the attention vectors. This is how we 'pay
# attention' to particular areas of input for specific outputs. The following line
# takes attn, with shape (batch_size, 13, 100), and takes the dot product with
# encout (batch_size, 100, 256). Remember that the encoder has tdatvocabsize, 100 in this
# example, elements since it takes a sequence of 100 words. Axis 1 of this tensor means
# 'for each element of the input sequence'.
# The multiplication this time, for each sample in the batch, is:
#
# attn (axis 2) encout (axis 1) context
# 1 2 ... 100 1 2 ... 100 1 2 ... 256
# ___________ __________ __________
# 1 |v1-------> 1 |v3-------> 1 |a b
# 2 |v2-------> * 2 |v4-------> = 2 |c d
# .. | .. | .. |
# 13 | 256 | 13 |
#
# The result is a context /matrix/ that has one context /vector/ for each element in
# the output sequence. This is different than the vanilla sequence to sequence
# approach, which has only one context vector used for every output.
#
# Each output word has its own customized context vector. The context vector is
# created from the most attended-to part of the encoder sequence.
context = dot([attn, encout], axes=[2,1])
# But... we still don't have the decoder sequence information. This is important
# because when we train, we send each word one at a time. So instead of sending:
# [ 'cat', 'on', 'the', 'table' ] => [ 'chat', 'sur', 'la', 'table' ]
# we send:
# [ 'cat', 'on', 'the', 'table' ] => [ 'chat', 0, 0, 0 ] + [ 'sur' ]
# [ 'cat', 'on', 'the', 'table' ] => [ 'chat', 'sur', 0, 0 ] + [ 'la' ]
# [ 'cat', 'on', 'the', 'table' ] => [ 'chat', 'sur', 'la', 0 ] + [ 'table' ]
# (plus start and end sentence tokens)
# In other words, the model gets to look at the previous words in the sentence in
# addition to the words in the encoder sequence. It does not have the burden of
# predicting the entire output sequence all at once.
# But somehow we have to get the decoder information into the final dense prediction
# layers along with the context matrix. So we just concatenate them. Technically,
# what we have here is a context matrix with shape (batch_size, 13, 256) and a
# decout with shape (batch_size, 13, 256). The default axis is -1, which means the
# last part of the shape (the 256 one in this case). All this does is create
# a tensor of shape (batch_size, 13, 512)... one 512-length vector for each of the
# 13 input elements instead of two 256-length vectors.
context = concatenate([context, decout])
# To be clear, context's shape is now (batch_size, comvocabsize, recdims*2).
# Now we are ready to actually predict something. Using TimeDistributed here gives
# us one dense layer per vector in the context matrix. So, we end up with one
# recdims-length vector for every element in the decoder sequence. For example,
# one 256-length vector for each of the 13 positions in the decoder sequence.
# A way to think of it is, one predictor for each of the 13 decoder positions.
# The hope is that the network will learn which of the predictors to use, based
# on which position the network is trying to predict.
out = TimeDistributed(Dense(self.recdims, activation="tanh"))(context)
# out's shape is now (batch_size, 13, 256)
# But... we are trying to output a single word, the next word in the sequence.
# So ultimately we need a single output vector of length comsvocabsize.
# For example, this could be a single 3000-length vector in which every element
# represents one word in the vocab.
# To get that, we first flatten the (13, 256) matrix into a single (3328) vector.
# Then, we use a dense output layer of length comsvocabsize, and apply softmax.
out = Flatten()(out)
out = Dense(self.comvocabsize, activation="softmax")(out)
model = Model(inputs=[dat_input, com_input], outputs=out)
# I do not imagine that the multigpu model will help much here, because the layers
# depend a lot on the output of other layers in a sequential pattern, and
# the overhead of moving everything back and forth between the GPUs is likely to
# soak up any advantage we get from parallelizing the arithmetic.
if self.config['multigpu']:
model = keras.utils.multi_gpu_model(model, gpus=2)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return self.config, model
| 58.026667 | 240 | 0.6322 |
468e2a7627d0dca2ef300393e19fa84920ce3fae | 1,349 | py | Python | google/ads/googleads/v10/enums/types/user_list_string_rule_item_operator.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v10/enums/types/user_list_string_rule_item_operator.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v10/enums/types/user_list_string_rule_item_operator.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.enums",
marshal="google.ads.googleads.v10",
manifest={"UserListStringRuleItemOperatorEnum",},
)
class UserListStringRuleItemOperatorEnum(proto.Message):
r"""Supported rule operator for string type.
"""
class UserListStringRuleItemOperator(proto.Enum):
r"""Enum describing possible user list string rule item
operators.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CONTAINS = 2
EQUALS = 3
STARTS_WITH = 4
ENDS_WITH = 5
NOT_EQUALS = 6
NOT_CONTAINS = 7
NOT_STARTS_WITH = 8
NOT_ENDS_WITH = 9
__all__ = tuple(sorted(__protobuf__.manifest))
| 28.702128 | 74 | 0.688658 |
0ef3cee0bd8eabb36967a2ddf176fda8f7ca0670 | 468 | py | Python | lib/connector/Connector.py | Justontheway/quality-monitor | 6ca296acfcdf6b5352ad3874d21c90e28b3d6018 | [
"Apache-2.0"
] | null | null | null | lib/connector/Connector.py | Justontheway/quality-monitor | 6ca296acfcdf6b5352ad3874d21c90e28b3d6018 | [
"Apache-2.0"
] | 7 | 2017-03-24T03:02:51.000Z | 2017-04-14T02:57:19.000Z | lib/connector/Connector.py | Justontheway/quality-monitor | 6ca296acfcdf6b5352ad3874d21c90e28b3d6018 | [
"Apache-2.0"
] | null | null | null | '''Connector is an abstract class for connecting any place to get data or info.'''
class Connector(object):
'''Abstract class for connect datastore to get information.'''
def __init__(self, conf = {}):
self._conf = conf
self.open()
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, *exe_info):
return
def open(self):
return
def close(self):
return
| 24.631579 | 82 | 0.604701 |
43003b2a99ec1eb4e2621b9c72047bc46c7e52f0 | 4,814 | py | Python | loss/Vgg16PerceptualLoss.py | uzielroy/StyleGan_FewShot | 94e4c49dbf39d1c6299f33787afb3e471ece11e3 | [
"MIT"
] | 76 | 2020-03-04T16:25:10.000Z | 2022-03-25T08:58:18.000Z | loss/Vgg16PerceptualLoss.py | uzielroy/StyleGan_FewShot | 94e4c49dbf39d1c6299f33787afb3e471ece11e3 | [
"MIT"
] | 7 | 2020-05-24T07:02:44.000Z | 2022-02-10T01:57:40.000Z | loss/Vgg16PerceptualLoss.py | uzielroy/StyleGan_FewShot | 94e4c49dbf39d1c6299f33787afb3e471ece11e3 | [
"MIT"
] | 9 | 2020-07-04T16:35:14.000Z | 2022-03-12T06:20:40.000Z | import torch
from torchvision import models
import torch.nn.functional as F
class Vgg16PerceptualLoss(torch.nn.Module):
def __init__(self, perceptual_indices = [1,3,6,8,11,13,15,18,20,22] ,loss_func="l1",requires_grad = False):
'''
perceptual_indices: indices to use for perceptural loss
loss_func: loss type l1 or l2
Here's the list of layers and its indices. Fully connected layers are dopped.
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace)
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(6): ReLU(inplace)
(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU(inplace)
(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace)
(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(13): ReLU(inplace)
(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): ReLU(inplace)
(16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(18): ReLU(inplace)
(19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(20): ReLU(inplace)
(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(22): ReLU(inplace)
(23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(25): ReLU(inplace)
(26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(27): ReLU(inplace)
(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(29): ReLU(inplace)
(30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
'''
super(Vgg16PerceptualLoss, self).__init__()
vgg_pretrained_features = models.vgg16(pretrained=True).features.eval()
max_layer_idx = max(perceptual_indices)
self.perceptual_indices = set(perceptual_indices)#set is faster to query
self.vgg_partial = torch.nn.Sequential(*list(vgg_pretrained_features.children())[0:max_layer_idx])
if loss_func == "l1":
self.loss_func = F.l1_loss
elif loss_func == "l2":
self.loss_func = F.mse_loss
else:
raise NotImpementedError(loss_func)
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def normalize(self,batch):
'''
normalize using imagenet mean and std
batch: batched imagse
'''
mean = batch.new_tensor([0.485, 0.456, 0.406]).view(-1, 1, 1)
std = batch.new_tensor([0.229, 0.224, 0.225]).view(-1, 1, 1)
return (batch - mean) / std
def rescale(self,batch,lower,upper):
'''
rescale image to 0 to 1
batch: batched images
upper: upper bound of pixel
lower: lower bound of pixel
'''
return (batch - lower)/(upper - lower)
def forward_img(self, h):
'''
h: image batch
'''
intermidiates = []
for i,layer in enumerate(self.vgg_partial):
h = layer(h)
if i in self.perceptual_indices:
intermidiates.append(h)
return intermidiates
def forward(self, img1, img2, img1_minmax=(0,1),img2_minmax=(0,1), apply_imagenet_norm = True):
'''
img1: image1
img2: image2
img1_minmax: upper bound and lower bound of image1. default is (0,1)
img2_minmax: upper bound and lower bound of image2. default is (0,1)
apply_imagenet_norm: normalize using imagenet mean and std. default is True
'''
if img1_minmax!=(0,1):
img1 = self.rescale(img1,img1_minmax[0],img1_minmax[1])
if img2_minmax!=(0,1):
img2 = self.rescale(img2,img2_minmax[0],img2_minmax[1])
if apply_imagenet_norm:
img1 = self.normalize(img1)
img2 = self.normalize(img2)
losses = []
for img1_h,img2_h in zip(self.forward_img(img1),self.forward_img(img2)):
losses.append(self.loss_func(img1_h,img2_h))
return losses | 42.982143 | 111 | 0.579352 |
b4b4062399637be948118f7b512cd0427011f07e | 182 | py | Python | relative_imports/project/package/demo.py | scotthuang1989/python_basics | 82bd8a934c8c77efa6eb1481a77e66e5f1d8b225 | [
"MIT"
] | null | null | null | relative_imports/project/package/demo.py | scotthuang1989/python_basics | 82bd8a934c8c77efa6eb1481a77e66e5f1d8b225 | [
"MIT"
] | null | null | null | relative_imports/project/package/demo.py | scotthuang1989/python_basics | 82bd8a934c8c77efa6eb1481a77e66e5f1d8b225 | [
"MIT"
] | null | null | null | print('__file__={} | __name__={} | __package__={}'.format(__file__,__name__,str(__package__)))
from .. import config
print("The value of config.count is {0}".\
format(config.count))
| 36.4 | 94 | 0.714286 |
90e3054882acd4b83708461c9b8abcf6eb6aea15 | 18,242 | py | Python | google/cloud/logging_v2/services/config_service_v2/transports/base.py | LaudateCorpus1/python-logging | d86be6cf83c3f3b91c4fc0b2e0666b0ca1d7e248 | [
"Apache-2.0"
] | null | null | null | google/cloud/logging_v2/services/config_service_v2/transports/base.py | LaudateCorpus1/python-logging | d86be6cf83c3f3b91c4fc0b2e0666b0ca1d7e248 | [
"Apache-2.0"
] | null | null | null | google/cloud/logging_v2/services/config_service_v2/transports/base.py | LaudateCorpus1/python-logging | d86be6cf83c3f3b91c4fc0b2e0666b0ca1d7e248 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.logging_v2.types import logging_config
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-logging",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ConfigServiceV2Transport(abc.ABC):
"""Abstract transport class for ConfigServiceV2."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/cloud-platform.read-only",
"https://www.googleapis.com/auth/logging.admin",
"https://www.googleapis.com/auth/logging.read",
)
DEFAULT_HOST: str = "logging.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_buckets: gapic_v1.method.wrap_method(
self.list_buckets, default_timeout=None, client_info=client_info,
),
self.get_bucket: gapic_v1.method.wrap_method(
self.get_bucket, default_timeout=None, client_info=client_info,
),
self.create_bucket: gapic_v1.method.wrap_method(
self.create_bucket, default_timeout=None, client_info=client_info,
),
self.update_bucket: gapic_v1.method.wrap_method(
self.update_bucket, default_timeout=None, client_info=client_info,
),
self.delete_bucket: gapic_v1.method.wrap_method(
self.delete_bucket, default_timeout=None, client_info=client_info,
),
self.undelete_bucket: gapic_v1.method.wrap_method(
self.undelete_bucket, default_timeout=None, client_info=client_info,
),
self.list_views: gapic_v1.method.wrap_method(
self.list_views, default_timeout=None, client_info=client_info,
),
self.get_view: gapic_v1.method.wrap_method(
self.get_view, default_timeout=None, client_info=client_info,
),
self.create_view: gapic_v1.method.wrap_method(
self.create_view, default_timeout=None, client_info=client_info,
),
self.update_view: gapic_v1.method.wrap_method(
self.update_view, default_timeout=None, client_info=client_info,
),
self.delete_view: gapic_v1.method.wrap_method(
self.delete_view, default_timeout=None, client_info=client_info,
),
self.list_sinks: gapic_v1.method.wrap_method(
self.list_sinks,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_sink: gapic_v1.method.wrap_method(
self.get_sink,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.create_sink: gapic_v1.method.wrap_method(
self.create_sink, default_timeout=120.0, client_info=client_info,
),
self.update_sink: gapic_v1.method.wrap_method(
self.update_sink,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.delete_sink: gapic_v1.method.wrap_method(
self.delete_sink,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.list_exclusions: gapic_v1.method.wrap_method(
self.list_exclusions,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_exclusion: gapic_v1.method.wrap_method(
self.get_exclusion,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.create_exclusion: gapic_v1.method.wrap_method(
self.create_exclusion, default_timeout=120.0, client_info=client_info,
),
self.update_exclusion: gapic_v1.method.wrap_method(
self.update_exclusion, default_timeout=120.0, client_info=client_info,
),
self.delete_exclusion: gapic_v1.method.wrap_method(
self.delete_exclusion,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.InternalServerError,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_cmek_settings: gapic_v1.method.wrap_method(
self.get_cmek_settings, default_timeout=None, client_info=client_info,
),
self.update_cmek_settings: gapic_v1.method.wrap_method(
self.update_cmek_settings,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def list_buckets(
self,
) -> Callable[
[logging_config.ListBucketsRequest],
Union[
logging_config.ListBucketsResponse,
Awaitable[logging_config.ListBucketsResponse],
],
]:
raise NotImplementedError()
@property
def get_bucket(
self,
) -> Callable[
[logging_config.GetBucketRequest],
Union[logging_config.LogBucket, Awaitable[logging_config.LogBucket]],
]:
raise NotImplementedError()
@property
def create_bucket(
self,
) -> Callable[
[logging_config.CreateBucketRequest],
Union[logging_config.LogBucket, Awaitable[logging_config.LogBucket]],
]:
raise NotImplementedError()
@property
def update_bucket(
self,
) -> Callable[
[logging_config.UpdateBucketRequest],
Union[logging_config.LogBucket, Awaitable[logging_config.LogBucket]],
]:
raise NotImplementedError()
@property
def delete_bucket(
self,
) -> Callable[
[logging_config.DeleteBucketRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def undelete_bucket(
self,
) -> Callable[
[logging_config.UndeleteBucketRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_views(
self,
) -> Callable[
[logging_config.ListViewsRequest],
Union[
logging_config.ListViewsResponse,
Awaitable[logging_config.ListViewsResponse],
],
]:
raise NotImplementedError()
@property
def get_view(
self,
) -> Callable[
[logging_config.GetViewRequest],
Union[logging_config.LogView, Awaitable[logging_config.LogView]],
]:
raise NotImplementedError()
@property
def create_view(
self,
) -> Callable[
[logging_config.CreateViewRequest],
Union[logging_config.LogView, Awaitable[logging_config.LogView]],
]:
raise NotImplementedError()
@property
def update_view(
self,
) -> Callable[
[logging_config.UpdateViewRequest],
Union[logging_config.LogView, Awaitable[logging_config.LogView]],
]:
raise NotImplementedError()
@property
def delete_view(
self,
) -> Callable[
[logging_config.DeleteViewRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_sinks(
self,
) -> Callable[
[logging_config.ListSinksRequest],
Union[
logging_config.ListSinksResponse,
Awaitable[logging_config.ListSinksResponse],
],
]:
raise NotImplementedError()
@property
def get_sink(
self,
) -> Callable[
[logging_config.GetSinkRequest],
Union[logging_config.LogSink, Awaitable[logging_config.LogSink]],
]:
raise NotImplementedError()
@property
def create_sink(
self,
) -> Callable[
[logging_config.CreateSinkRequest],
Union[logging_config.LogSink, Awaitable[logging_config.LogSink]],
]:
raise NotImplementedError()
@property
def update_sink(
self,
) -> Callable[
[logging_config.UpdateSinkRequest],
Union[logging_config.LogSink, Awaitable[logging_config.LogSink]],
]:
raise NotImplementedError()
@property
def delete_sink(
self,
) -> Callable[
[logging_config.DeleteSinkRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def list_exclusions(
self,
) -> Callable[
[logging_config.ListExclusionsRequest],
Union[
logging_config.ListExclusionsResponse,
Awaitable[logging_config.ListExclusionsResponse],
],
]:
raise NotImplementedError()
@property
def get_exclusion(
self,
) -> Callable[
[logging_config.GetExclusionRequest],
Union[logging_config.LogExclusion, Awaitable[logging_config.LogExclusion]],
]:
raise NotImplementedError()
@property
def create_exclusion(
self,
) -> Callable[
[logging_config.CreateExclusionRequest],
Union[logging_config.LogExclusion, Awaitable[logging_config.LogExclusion]],
]:
raise NotImplementedError()
@property
def update_exclusion(
self,
) -> Callable[
[logging_config.UpdateExclusionRequest],
Union[logging_config.LogExclusion, Awaitable[logging_config.LogExclusion]],
]:
raise NotImplementedError()
@property
def delete_exclusion(
self,
) -> Callable[
[logging_config.DeleteExclusionRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def get_cmek_settings(
self,
) -> Callable[
[logging_config.GetCmekSettingsRequest],
Union[logging_config.CmekSettings, Awaitable[logging_config.CmekSettings]],
]:
raise NotImplementedError()
@property
def update_cmek_settings(
self,
) -> Callable[
[logging_config.UpdateCmekSettingsRequest],
Union[logging_config.CmekSettings, Awaitable[logging_config.CmekSettings]],
]:
raise NotImplementedError()
__all__ = ("ConfigServiceV2Transport",)
| 35.013436 | 101 | 0.594617 |
41d72bffde57de0c887481e4de6e5aa0d0d90d94 | 4,071 | py | Python | app/base/models.py | HarinarayananP/Flask-Backend-AirPolutionMonitoring | 0a0d60e08579ac8fbf1dd544ef5f2b938a1b5a5e | [
"Apache-2.0"
] | 1 | 2021-08-03T08:17:32.000Z | 2021-08-03T08:17:32.000Z | app/base/models.py | HarinarayananP/Flask-Backend-AirPolutionMonitoring | 0a0d60e08579ac8fbf1dd544ef5f2b938a1b5a5e | [
"Apache-2.0"
] | null | null | null | app/base/models.py | HarinarayananP/Flask-Backend-AirPolutionMonitoring | 0a0d60e08579ac8fbf1dd544ef5f2b938a1b5a5e | [
"Apache-2.0"
] | 1 | 2021-08-01T05:47:21.000Z | 2021-08-01T05:47:21.000Z | # -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
from flask_login import UserMixin
from sqlalchemy import Binary, Column, Integer, String, DateTime, Boolean, ForeignKey, Float
from app import db, login_manager
from app.base.util import hash_pass
class User(db.Model, UserMixin):
__tablename__ = 'User'
id = Column(Integer, primary_key=True)
username = Column(String(20), unique=True)
email = Column(String(20), unique=True)
password = Column(Binary)
def __init__(self, **kwargs):
for property, value in kwargs.items():
# depending on whether value is an iterable or not, we must
# unpack it's value (when **kwargs is request.form, some values
# will be a 1-element list)
if hasattr(value, '__iter__') and not isinstance(value, str):
# the ,= unpack of a singleton fails PEP8 (travis flake8 test)
value = value[0]
if property == 'password':
value = hash_pass(value) # we need bytes here (not plain str)
setattr(self, property, value)
def __repr__(self):
return str(self.username)
class SensorData(db.Model, UserMixin):
__tablename__ = 'SensorData'
id = Column(Integer, primary_key=True)
sensorname = Column(String(20), ForeignKey("SensorDetails.name"))
time = Column(DateTime)
value = Column(Integer)
sensordetails = db.relationship("SensorDetails", lazy=True)
def __init__(self, **kwargs):
for property, value in kwargs.items():
# depending on whether value is an iterable or not, we must
# unpack it's value (when **kwargs is request.form, some values
# will be a 1-element list)
if hasattr(value, '__iter__') and not isinstance(value, str):
# the ,= unpack of a singleton fails PEP8 (travis flake8 test)
value = value[0]
setattr(self, property, value)
def __repr__(self):
return str(self.id)
class Settings(db.Model, UserMixin):
__tablename__ = 'Settings'
id = Column(Integer, primary_key=True)
name = Column(String(20))
value = Column(String(400))
def __init__(self, **kwargs):
for property, value in kwargs.items():
# depending on whether value is an iterable or not, we must
# unpack it's value (when **kwargs is request.form, some values
# will be a 1-element list)
if hasattr(value, '__iter__') and not isinstance(value, str):
# the ,= unpack of a singleton fails PEP8 (travis flake8 test)
value = value[0]
setattr(self, property, value)
def __repr__(self):
return self.value
class SensorDetails(db.Model, UserMixin):
__tablename__ = 'SensorDetails'
id = Column(Integer, primary_key=True)
name = Column(String(20), unique=True)
location = Column(String(400))
unit = Column(String(20))
warning_level = Column(Integer)
severe_level = Column(Integer)
active = Column(Boolean)
min_value = Column(Integer)
max_value = Column(Integer)
color = Column(String(15))
icon = Column(String(20))
def __init__(self, **kwargs):
for property, value in kwargs.items():
# depending on whether value is an iterable or not, we must
# unpack it's value (when **kwargs is request.form, some values
# will be a 1-element list)
if hasattr(value, '__iter__') and not isinstance(value, str):
# the ,= unpack of a singleton fails PEP8 (travis flake8 test)
value = value[0]
setattr(self, property, value)
def __repr__(self):
return str(self.id)
@login_manager.user_loader
def user_loader(id):
return User.query.filter_by(id=id).first()
@login_manager.request_loader
def request_loader(request):
username = request.form.get('username')
user = User.query.filter_by(username=username).first()
return user if user else None
| 32.568 | 92 | 0.63154 |
f5fa81963eaeaad09d8ff77f16b1ea081eccb5f9 | 11,886 | py | Python | main.py | Mr-xn/proxy-scraper-checker | ffbcd57cf1c66a5d6648817602466086c0cc9b97 | [
"MIT"
] | 2 | 2022-02-27T09:41:14.000Z | 2022-02-28T15:55:19.000Z | main.py | Mr-xn/proxy-scraper-checker | ffbcd57cf1c66a5d6648817602466086c0cc9b97 | [
"MIT"
] | null | null | null | main.py | Mr-xn/proxy-scraper-checker | ffbcd57cf1c66a5d6648817602466086c0cc9b97 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
import re
from pathlib import Path
from random import shuffle
from shutil import rmtree
from time import perf_counter
from typing import Callable, Dict, Iterable, List, Optional, Set, Tuple, Union
from aiohttp import ClientSession
from aiohttp_socks import ProxyConnector
from rich.console import Console
from rich.progress import (
BarColumn,
Progress,
TaskID,
TextColumn,
TimeRemainingColumn,
)
from rich.table import Table
import config
class Proxy:
def __init__(self, socket_address: str, ip: str) -> None:
"""
Args:
socket_address: ip:port
"""
self.socket_address = socket_address
self.ip = ip
self.is_anonymous: Optional[bool] = None
self.geolocation: str = "::None::None::None"
self.timeout = float("inf")
def update(self, info: Dict[str, str]) -> None:
"""Set geolocation and is_anonymous.
Args:
info: Response from http://ip-api.com/json.
"""
country = info.get("country") or None
region = info.get("regionName") or None
city = info.get("city") or None
self.geolocation = f"::{country}::{region}::{city}"
self.is_anonymous = self.ip != info.get("query")
def __eq__(self, other: object) -> bool:
if not isinstance(other, Proxy):
return NotImplemented
return self.socket_address == other.socket_address
def __hash__(self) -> int:
return hash(("socket_address", self.socket_address))
class Folder:
def __init__(self, folder_name: str, path: Path) -> None:
self.folder_name = folder_name
self.path = path / folder_name
self.for_anonymous = "anon" in folder_name
self.for_geolocation = "geo" in folder_name
def remove(self) -> None:
try:
rmtree(self.path)
except FileNotFoundError:
pass
def create(self) -> None:
self.path.mkdir(parents=True, exist_ok=True)
class ProxyScraperChecker:
"""HTTP, SOCKS4, SOCKS5 proxies scraper and checker."""
def __init__(
self,
*,
timeout: float,
max_connections: int,
sort_by_speed: bool,
save_path: str,
proxies: bool,
proxies_anonymous: bool,
proxies_geolocation: bool,
proxies_geolocation_anonymous: bool,
http_sources: Optional[Iterable[str]],
socks4_sources: Optional[Iterable[str]],
socks5_sources: Optional[Iterable[str]],
console: Optional[Console] = None,
) -> None:
"""HTTP, SOCKS4, SOCKS5 proxies scraper and checker.
Args:
timeout: How many seconds to wait for the connection.
max_connections: Maximum concurrent connections.
sort_by_speed: Set to False to sort proxies alphabetically.
save_path: Path to the folder where the proxy folders will be
saved.
"""
self.path = Path(save_path)
folders_mapping = {
"proxies": proxies,
"proxies_anonymous": proxies_anonymous,
"proxies_geolocation": proxies_geolocation,
"proxies_geolocation_anonymous": proxies_geolocation_anonymous,
}
self.all_folders = [
Folder(folder_name, self.path) for folder_name in folders_mapping
]
self.enabled_folders = [
folder
for folder in self.all_folders
if folders_mapping[folder.folder_name]
]
if not self.enabled_folders:
raise ValueError("all folders are disabled in the config")
regex = r"(?:^|\D)(({0}\.{1}\.{1}\.{1}):{2})(?:\D|$)".format(
r"(?:[1-9]|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])", # 1-255
r"(?:\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])", # 0-255
r"(?:\d|[1-9]\d{1,3}|[1-5]\d{4}|6[0-4]\d{3}"
+ r"|65[0-4]\d{2}|655[0-2]\d|6553[0-5])", # 0-65535
)
self.regex = re.compile(regex)
self.sort_by_speed = sort_by_speed
self.timeout = timeout
self.sources = {
proto: (sources,)
if isinstance(sources, str)
else frozenset(sources)
for proto, sources in (
("http", http_sources),
("socks4", socks4_sources),
("socks5", socks5_sources),
)
if sources
}
self.proxies: Dict[str, Set[Proxy]] = {
proto: set() for proto in self.sources
}
self.proxies_count = {proto: 0 for proto in self.sources}
self.c = console or Console()
self.sem = asyncio.Semaphore(max_connections)
async def fetch_source(
self,
session: ClientSession,
source: str,
proto: str,
progress: Progress,
task: TaskID,
) -> None:
"""Get proxies from source.
Args:
source: Proxy list URL.
proto: http/socks4/socks5.
"""
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4710.168 Safari/537.36'}
try:
async with session.get(source.strip(), headers=headers, proxy='http://127.0.0.1:7890', timeout=15) as r:
text = await r.text(encoding="utf-8")
except Exception as e:
self.c.print(f"{source}: {e}")
else:
for proxy in self.regex.finditer(text):
self.proxies[proto].add(Proxy(proxy.group(1), proxy.group(2)))
progress.update(task, advance=1)
async def check_proxy(
self, proxy: Proxy, proto: str, progress: Progress, task: TaskID
) -> None:
"""Check if proxy is alive."""
try:
async with self.sem:
start = perf_counter()
async with ClientSession(
connector=ProxyConnector.from_url(
f"{proto}://{proxy.socket_address}"
)
) as session:
async with session.get(
"http://ip-api.com/json/", timeout=self.timeout
) as r:
res = (
None
if r.status in {403, 404, 429}
else await r.json()
)
except Exception as e:
# Too many open files
if isinstance(e, OSError) and e.errno == 24:
self.c.print(
"[red]Please, set MAX_CONNECTIONS to lower value."
)
self.proxies[proto].remove(proxy)
else:
proxy.timeout = perf_counter() - start
if res:
proxy.update(res)
progress.update(task, advance=1)
async def fetch_all_sources(self) -> None:
with self._progress as progress:
tasks = {
proto: progress.add_task(
f"[yellow]Scraper [red]:: [green]{proto.upper()}",
total=len(sources),
)
for proto, sources in self.sources.items()
}
async with ClientSession() as session:
coroutines = (
self.fetch_source(
session, source, proto, progress, tasks[proto]
)
for proto, sources in self.sources.items()
for source in sources
)
await asyncio.gather(*coroutines)
# Remember total count so we could print it in the table
for proto, proxies in self.proxies.items():
self.proxies_count[proto] = len(proxies)
async def check_all_proxies(self) -> None:
with self._progress as progress:
tasks = {
proto: progress.add_task(
f"[yellow]Checker [red]:: [green]{proto.upper()}",
total=len(proxies),
)
for proto, proxies in self.proxies.items()
}
coroutines = [
self.check_proxy(proxy, proto, progress, tasks[proto])
for proto, proxies in self.proxies.items()
for proxy in proxies
]
shuffle(coroutines)
await asyncio.gather(*coroutines)
def save_proxies(self) -> None:
"""Delete old proxies and save new ones."""
sorted_proxies = self.sorted_proxies.items()
for folder in self.all_folders:
folder.remove()
for folder in self.enabled_folders:
folder.create()
for proto, proxies in sorted_proxies:
text = "\n".join(
"{}{}".format(
proxy.socket_address,
proxy.geolocation if folder.for_geolocation else "",
)
for proxy in proxies
if (proxy.is_anonymous if folder.for_anonymous else True)
)
(folder.path / f"{proto}.txt").write_text(
text, encoding="utf-8"
)
async def main(self) -> None:
await self.fetch_all_sources()
await self.check_all_proxies()
table = Table()
table.add_column("Protocol", style="cyan")
table.add_column("Working", style="magenta")
table.add_column("Total", style="green")
for proto, proxies in self.proxies.items():
working = len(proxies)
total = self.proxies_count[proto]
percentage = working / total * 100 if total else 0
table.add_row(
proto.upper(), f"{working} ({percentage:.1f}%)", str(total)
)
self.c.print(table)
self.save_proxies()
self.c.print(
"[green]Proxy folders have been created in the "
+ f"{self.path.absolute()} folder."
+ "\nThank you for using proxy-scraper-checker :)"
)
@property
def sorted_proxies(self) -> Dict[str, List[Proxy]]:
key = self._sorting_key
return {
proto: sorted(proxies, key=key)
for proto, proxies in self.proxies.items()
}
@property
def _sorting_key(
self,
) -> Union[Callable[[Proxy], float], Callable[[Proxy], Tuple[int, ...]]]:
if self.sort_by_speed:
return lambda proxy: proxy.timeout
return lambda proxy: tuple(
map(int, proxy.socket_address.replace(":", ".").split("."))
)
@property
def _progress(self) -> Progress:
return Progress(
TextColumn("[progress.description]{task.description}"),
BarColumn(),
TextColumn("[progress.percentage]{task.percentage:3.0f}%"),
TextColumn("[blue][{task.completed}/{task.total}]"),
TimeRemainingColumn(),
console=self.c,
)
async def main() -> None:
await ProxyScraperChecker(
timeout=config.TIMEOUT,
max_connections=config.MAX_CONNECTIONS,
sort_by_speed=config.SORT_BY_SPEED,
save_path=config.SAVE_PATH,
proxies=config.PROXIES,
proxies_anonymous=config.PROXIES_ANONYMOUS,
proxies_geolocation=config.PROXIES_GEOLOCATION,
proxies_geolocation_anonymous=config.PROXIES_GEOLOCATION_ANONYMOUS,
http_sources=config.HTTP_SOURCES
if config.HTTP and config.HTTP_SOURCES
else None,
socks4_sources=config.SOCKS4_SOURCES
if config.SOCKS4 and config.SOCKS4_SOURCES
else None,
socks5_sources=config.SOCKS5_SOURCES
if config.SOCKS5 and config.SOCKS5_SOURCES
else None,
).main()
if __name__ == "__main__":
asyncio.run(main())
| 34.253602 | 146 | 0.54947 |
1cdbbd48abf4872d464d6864e6bea3ee17e8e109 | 486 | py | Python | examples/test.py | weijentu/sgp30-python | 6027d71a6cdfd1cbd4783522d249200ba4fc26d3 | [
"MIT"
] | 27 | 2019-10-22T15:52:30.000Z | 2022-03-30T23:51:29.000Z | examples/test.py | danthedeckie/sgp30-python | a6375878895f22b279797838a2902c4a952bfe56 | [
"MIT"
] | 8 | 2020-02-10T10:44:25.000Z | 2021-03-18T17:36:42.000Z | examples/test.py | danthedeckie/sgp30-python | a6375878895f22b279797838a2902c4a952bfe56 | [
"MIT"
] | 15 | 2020-01-04T23:28:31.000Z | 2021-12-26T16:42:19.000Z | from sgp30 import SGP30
import time
import sys
sgp30 = SGP30()
# result = sgp30.command('set_baseline', (0xFECA, 0xBEBA))
# result = sgp30.command('get_baseline')
# print(["{:02x}".format(n) for n in result])
print("Sensor warming up, please wait...")
def crude_progress_bar():
sys.stdout.write('.')
sys.stdout.flush()
sgp30.start_measurement(crude_progress_bar)
sys.stdout.write('\n')
while True:
result = sgp30.get_air_quality()
print(result)
time.sleep(1.0)
| 21.130435 | 58 | 0.697531 |
9c6ffb583fb153c136c1c4e90e10d223dfac3c29 | 660 | py | Python | zeeguu/api/api/user_statistics.py | mircealungu/Zeeguu-API-2 | 1e8ea7f5dd0b883ed2d714b9324162b1a8edd170 | [
"MIT"
] | 8 | 2018-02-06T15:47:55.000Z | 2021-05-26T15:24:49.000Z | zeeguu/api/api/user_statistics.py | mircealungu/Zeeguu-API-2 | 1e8ea7f5dd0b883ed2d714b9324162b1a8edd170 | [
"MIT"
] | 57 | 2018-02-02T19:54:38.000Z | 2021-07-15T15:45:15.000Z | zeeguu/api/api/user_statistics.py | mircealungu/Zeeguu-API-2 | 1e8ea7f5dd0b883ed2d714b9324162b1a8edd170 | [
"MIT"
] | 13 | 2017-10-12T09:05:19.000Z | 2020-02-19T09:38:01.000Z | import flask
from zeeguu.api.api.utils.json_result import json_result
from zeeguu.core.user_statistics.activity import activity_duration_by_day
from . import api
from .utils.route_wrappers import cross_domain, with_session
@api.route("/bookmark_counts_by_date", methods=("GET",))
@cross_domain
@with_session
def bookmark_counts_by_date():
"""
Words that have been translated in texts
"""
return flask.g.user.bookmark_counts_by_date()
@api.route("/activity_by_day", methods=("GET",))
@cross_domain
@with_session
def activity_by_day():
"""
User sessions by day
"""
return json_result(activity_duration_by_day(flask.g.user))
| 23.571429 | 73 | 0.757576 |
b6fad4772416187c5d8e225a7abf4d7a841e71b3 | 16,825 | py | Python | examples/asynchronous_consumer_example.py | DavidWittman/pika | 6d9896c89ee187ce1a1a5c6e55a1ee0adcc5b538 | [
"BSD-3-Clause"
] | 2,479 | 2015-01-01T20:06:23.000Z | 2022-03-31T13:29:19.000Z | examples/asynchronous_consumer_example.py | DavidWittman/pika | 6d9896c89ee187ce1a1a5c6e55a1ee0adcc5b538 | [
"BSD-3-Clause"
] | 813 | 2015-01-07T07:13:49.000Z | 2022-03-28T05:05:06.000Z | examples/asynchronous_consumer_example.py | DavidWittman/pika | 6d9896c89ee187ce1a1a5c6e55a1ee0adcc5b538 | [
"BSD-3-Clause"
] | 763 | 2015-01-10T04:38:33.000Z | 2022-03-31T07:24:57.000Z | # -*- coding: utf-8 -*-
# pylint: disable=C0111,C0103,R0205
import functools
import logging
import time
import pika
from pika.exchange_type import ExchangeType
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
class ExampleConsumer(object):
"""This is an example consumer that will handle unexpected interactions
with RabbitMQ such as channel and connection closures.
If RabbitMQ closes the connection, this class will stop and indicate
that reconnection is necessary. You should look at the output, as
there are limited reasons why the connection may be closed, which
usually are tied to permission related issues or socket timeouts.
If the channel is closed, it will indicate a problem with one of the
commands that were issued and that should surface in the output as well.
"""
EXCHANGE = 'message'
EXCHANGE_TYPE = ExchangeType.topic
QUEUE = 'text'
ROUTING_KEY = 'example.text'
def __init__(self, amqp_url):
"""Create a new instance of the consumer class, passing in the AMQP
URL used to connect to RabbitMQ.
:param str amqp_url: The AMQP url to connect with
"""
self.should_reconnect = False
self.was_consuming = False
self._connection = None
self._channel = None
self._closing = False
self._consumer_tag = None
self._url = amqp_url
self._consuming = False
# In production, experiment with higher prefetch values
# for higher consumer throughput
self._prefetch_count = 1
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika.
:rtype: pika.SelectConnection
"""
LOGGER.info('Connecting to %s', self._url)
return pika.SelectConnection(
parameters=pika.URLParameters(self._url),
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed)
def close_connection(self):
self._consuming = False
if self._connection.is_closing or self._connection.is_closed:
LOGGER.info('Connection is closing or already closed')
else:
LOGGER.info('Closing connection')
self._connection.close()
def on_connection_open(self, _unused_connection):
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:param pika.SelectConnection _unused_connection: The connection
"""
LOGGER.info('Connection opened')
self.open_channel()
def on_connection_open_error(self, _unused_connection, err):
"""This method is called by pika if the connection to RabbitMQ
can't be established.
:param pika.SelectConnection _unused_connection: The connection
:param Exception err: The error
"""
LOGGER.error('Connection open failed: %s', err)
self.reconnect()
def on_connection_closed(self, _unused_connection, reason):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param Exception reason: exception representing reason for loss of
connection.
"""
self._channel = None
if self._closing:
self._connection.ioloop.stop()
else:
LOGGER.warning('Connection closed, reconnect necessary: %s', reason)
self.reconnect()
def reconnect(self):
"""Will be invoked if the connection can't be opened or is
closed. Indicates that a reconnect is necessary then stops the
ioloop.
"""
self.should_reconnect = True
self.stop()
def open_channel(self):
"""Open a new channel with RabbitMQ by issuing the Channel.Open RPC
command. When RabbitMQ responds that the channel is open, the
on_channel_open callback will be invoked by pika.
"""
LOGGER.info('Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.info('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
self.setup_exchange(self.EXCHANGE)
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
LOGGER.info('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reason):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param Exception reason: why the channel was closed
"""
LOGGER.warning('Channel %i was closed: %s', channel, reason)
self.close_connection()
def setup_exchange(self, exchange_name):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
LOGGER.info('Declaring exchange: %s', exchange_name)
# Note: using functools.partial is not required, it is demonstrating
# how arbitrary data can be passed to the callback when it is called
cb = functools.partial(
self.on_exchange_declareok, userdata=exchange_name)
self._channel.exchange_declare(
exchange=exchange_name,
exchange_type=self.EXCHANGE_TYPE,
callback=cb)
def on_exchange_declareok(self, _unused_frame, userdata):
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
:param str|unicode userdata: Extra user data (exchange name)
"""
LOGGER.info('Exchange declared: %s', userdata)
self.setup_queue(self.QUEUE)
def setup_queue(self, queue_name):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
LOGGER.info('Declaring queue %s', queue_name)
cb = functools.partial(self.on_queue_declareok, userdata=queue_name)
self._channel.queue_declare(queue=queue_name, callback=cb)
def on_queue_declareok(self, _unused_frame, userdata):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method _unused_frame: The Queue.DeclareOk frame
:param str|unicode userdata: Extra user data (queue name)
"""
queue_name = userdata
LOGGER.info('Binding %s to %s with %s', self.EXCHANGE, queue_name,
self.ROUTING_KEY)
cb = functools.partial(self.on_bindok, userdata=queue_name)
self._channel.queue_bind(
queue_name,
self.EXCHANGE,
routing_key=self.ROUTING_KEY,
callback=cb)
def on_bindok(self, _unused_frame, userdata):
"""Invoked by pika when the Queue.Bind method has completed. At this
point we will set the prefetch count for the channel.
:param pika.frame.Method _unused_frame: The Queue.BindOk response frame
:param str|unicode userdata: Extra user data (queue name)
"""
LOGGER.info('Queue bound: %s', userdata)
self.set_qos()
def set_qos(self):
"""This method sets up the consumer prefetch to only be delivered
one message at a time. The consumer must acknowledge this message
before RabbitMQ will deliver another one. You should experiment
with different prefetch values to achieve desired performance.
"""
self._channel.basic_qos(
prefetch_count=self._prefetch_count, callback=self.on_basic_qos_ok)
def on_basic_qos_ok(self, _unused_frame):
"""Invoked by pika when the Basic.QoS method has completed. At this
point we will start consuming messages by calling start_consuming
which will invoke the needed RPC commands to start the process.
:param pika.frame.Method _unused_frame: The Basic.QosOk response frame
"""
LOGGER.info('QOS set to: %d', self._prefetch_count)
self.start_consuming()
def start_consuming(self):
"""This method sets up the consumer by first calling
add_on_cancel_callback so that the object is notified if RabbitMQ
cancels the consumer. It then issues the Basic.Consume RPC command
which returns the consumer tag that is used to uniquely identify the
consumer with RabbitMQ. We keep the value to use it when we want to
cancel consuming. The on_message method is passed in as a callback pika
will invoke when a message is fully received.
"""
LOGGER.info('Issuing consumer related RPC commands')
self.add_on_cancel_callback()
self._consumer_tag = self._channel.basic_consume(
self.QUEUE, self.on_message)
self.was_consuming = True
self._consuming = True
def add_on_cancel_callback(self):
"""Add a callback that will be invoked if RabbitMQ cancels the consumer
for some reason. If RabbitMQ does cancel the consumer,
on_consumer_cancelled will be invoked by pika.
"""
LOGGER.info('Adding consumer cancellation callback')
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
def on_consumer_cancelled(self, method_frame):
"""Invoked by pika when RabbitMQ sends a Basic.Cancel for a consumer
receiving messages.
:param pika.frame.Method method_frame: The Basic.Cancel frame
"""
LOGGER.info('Consumer was cancelled remotely, shutting down: %r',
method_frame)
if self._channel:
self._channel.close()
def on_message(self, _unused_channel, basic_deliver, properties, body):
"""Invoked by pika when a message is delivered from RabbitMQ. The
channel is passed for your convenience. The basic_deliver object that
is passed in carries the exchange, routing key, delivery tag and
a redelivered flag for the message. The properties passed in is an
instance of BasicProperties with the message properties and the body
is the message that was sent.
:param pika.channel.Channel _unused_channel: The channel object
:param pika.Spec.Basic.Deliver: basic_deliver method
:param pika.Spec.BasicProperties: properties
:param bytes body: The message body
"""
LOGGER.info('Received message # %s from %s: %s',
basic_deliver.delivery_tag, properties.app_id, body)
self.acknowledge_message(basic_deliver.delivery_tag)
def acknowledge_message(self, delivery_tag):
"""Acknowledge the message delivery from RabbitMQ by sending a
Basic.Ack RPC method for the delivery tag.
:param int delivery_tag: The delivery tag from the Basic.Deliver frame
"""
LOGGER.info('Acknowledging message %s', delivery_tag)
self._channel.basic_ack(delivery_tag)
def stop_consuming(self):
"""Tell RabbitMQ that you would like to stop consuming by sending the
Basic.Cancel RPC command.
"""
if self._channel:
LOGGER.info('Sending a Basic.Cancel RPC command to RabbitMQ')
cb = functools.partial(
self.on_cancelok, userdata=self._consumer_tag)
self._channel.basic_cancel(self._consumer_tag, cb)
def on_cancelok(self, _unused_frame, userdata):
"""This method is invoked by pika when RabbitMQ acknowledges the
cancellation of a consumer. At this point we will close the channel.
This will invoke the on_channel_closed method once the channel has been
closed, which will in-turn close the connection.
:param pika.frame.Method _unused_frame: The Basic.CancelOk frame
:param str|unicode userdata: Extra user data (consumer tag)
"""
self._consuming = False
LOGGER.info(
'RabbitMQ acknowledged the cancellation of the consumer: %s',
userdata)
self.close_channel()
def close_channel(self):
"""Call to close the channel with RabbitMQ cleanly by issuing the
Channel.Close RPC command.
"""
LOGGER.info('Closing the channel')
self._channel.close()
def run(self):
"""Run the example consumer by connecting to RabbitMQ and then
starting the IOLoop to block and allow the SelectConnection to operate.
"""
self._connection = self.connect()
self._connection.ioloop.start()
def stop(self):
"""Cleanly shutdown the connection to RabbitMQ by stopping the consumer
with RabbitMQ. When RabbitMQ confirms the cancellation, on_cancelok
will be invoked by pika, which will then closing the channel and
connection. The IOLoop is started again because this method is invoked
when CTRL-C is pressed raising a KeyboardInterrupt exception. This
exception stops the IOLoop which needs to be running for pika to
communicate with RabbitMQ. All of the commands issued prior to starting
the IOLoop will be buffered but not processed.
"""
if not self._closing:
self._closing = True
LOGGER.info('Stopping')
if self._consuming:
self.stop_consuming()
self._connection.ioloop.start()
else:
self._connection.ioloop.stop()
LOGGER.info('Stopped')
class ReconnectingExampleConsumer(object):
"""This is an example consumer that will reconnect if the nested
ExampleConsumer indicates that a reconnect is necessary.
"""
def __init__(self, amqp_url):
self._reconnect_delay = 0
self._amqp_url = amqp_url
self._consumer = ExampleConsumer(self._amqp_url)
def run(self):
while True:
try:
self._consumer.run()
except KeyboardInterrupt:
self._consumer.stop()
break
self._maybe_reconnect()
def _maybe_reconnect(self):
if self._consumer.should_reconnect:
self._consumer.stop()
reconnect_delay = self._get_reconnect_delay()
LOGGER.info('Reconnecting after %d seconds', reconnect_delay)
time.sleep(reconnect_delay)
self._consumer = ExampleConsumer(self._amqp_url)
def _get_reconnect_delay(self):
if self._consumer.was_consuming:
self._reconnect_delay = 0
else:
self._reconnect_delay += 1
if self._reconnect_delay > 30:
self._reconnect_delay = 30
return self._reconnect_delay
def main():
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
amqp_url = 'amqp://guest:guest@localhost:5672/%2F'
consumer = ReconnectingExampleConsumer(amqp_url)
consumer.run()
if __name__ == '__main__':
main()
| 38.151927 | 80 | 0.666924 |
807befa472c0c217bf2e836321cbf7d422d74591 | 1,685 | py | Python | google-cloud-eventarc/synth.py | teamapp/google-cloud-ruby | 4687c69c70782661a9be1f418693780fd95ddff1 | [
"Apache-2.0"
] | null | null | null | google-cloud-eventarc/synth.py | teamapp/google-cloud-ruby | 4687c69c70782661a9be1f418693780fd95ddff1 | [
"Apache-2.0"
] | null | null | null | google-cloud-eventarc/synth.py | teamapp/google-cloud-ruby | 4687c69c70782661a9be1f418693780fd95ddff1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.ruby as ruby
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICMicrogenerator()
library = gapic.ruby_library(
"eventarc", "v1",
proto_path="google/cloud/eventarc/v1",
generator_args={
"ruby-cloud-gem-name": "google-cloud-eventarc",
"ruby-cloud-title": "Eventarc",
"ruby-cloud-description": "Eventarc lets you asynchronously deliver events from Google services, SaaS, and your own apps using loosely coupled services that react to state changes. Eventarc requires no infrastructure management — you can optimize productivity and costs while building a modern, event-driven solution.",
"ruby-cloud-env-prefix": "EVENTARC",
"ruby-cloud-wrapper-of": "v1:0.0",
"ruby-cloud-product-url": "https://cloud.google.com/eventarc/",
"ruby-cloud-api-id": "eventarc.googleapis.com",
"ruby-cloud-api-shortname": "eventarc",
}
)
s.copy(library, merge=ruby.global_merge)
| 41.097561 | 327 | 0.731751 |
dfed85f72ff9c5df6846a981b7594f956bb0db63 | 2,407 | py | Python | recipe_scrapers/meljoulwan.py | gloriousDan/recipe-scrapers | 4e11b04db92abe11b75d373a147cc566629f265b | [
"MIT"
] | null | null | null | recipe_scrapers/meljoulwan.py | gloriousDan/recipe-scrapers | 4e11b04db92abe11b75d373a147cc566629f265b | [
"MIT"
] | null | null | null | recipe_scrapers/meljoulwan.py | gloriousDan/recipe-scrapers | 4e11b04db92abe11b75d373a147cc566629f265b | [
"MIT"
] | null | null | null | import re
from ._abstract import AbstractScraper
from ._utils import get_minutes, get_yields
class Meljoulwan(AbstractScraper):
@classmethod
def host(cls):
return "meljoulwan.com"
def author(self):
return (
self.soup.find("div", {"class": "post-author"})
.findChild("span")
.get_text()
.strip()
)
def title(self):
return (
self.soup.find("div", {"class": "recipe-post"})
.findChild("h2")
.get_text()
.strip()
)
def category(self):
ulList = (
self.soup.find("div", {"class": "post-info"})
.findChild("div", {"class", "post-category"})
.findChildren("a")
)
categories = []
for li in ulList:
if li.get_text() != "Blog":
categories.append(li.get_text())
return ",".join(categories)
def total_time(self):
infostring = (
self.soup.find("div", {"class": "recipe-copy"}).find("em").get_text()
)
matches = re.search(
r"(Cook|Total time:)\s(\d+\-?\d+)\s\bmin(utes)?", infostring
)
return get_minutes(matches.group(2))
def yields(self):
infostring = (
self.soup.find("div", {"class": "recipe-copy"})
.find("em")
.get_text()
.strip()
)
matches = re.search(r"^Serves\s(\d+\-?\–?\d+)", infostring)
return get_yields(matches.group(1))
def image(self):
return self.schema.image()
def ingredients(self):
ulList = self.soup.find("div", {"class": "tabbed-list"}).findChildren("ul")
ingredients = []
for ul in ulList:
liList = ul.findChildren("li")
for li in liList:
ingredients.append(li.get_text().strip())
return ingredients
def instructions(self):
ulList = self.soup.find("div", {"class": "numbered-list"}).findChildren(
"div", {"class", "num-list-group"}
)
count = 0
instructions = []
for li in ulList:
count += 1
instructions.append(
str(count)
+ ". "
+ li.findChild("div", {"class": "num-list-copy"}).get_text().strip()
)
return "\n".join(instructions)
| 25.88172 | 84 | 0.493145 |
de2ba11bf8bbd61c7fd9256c51e91542ed514075 | 4,034 | bzl | Python | tools/project/build_defs.bzl | TokTok/toktok-stack | fe60e13d42d2b45e13ea4a9448a49925bc6a6d89 | [
"CNRI-Python",
"AML",
"Xnet",
"Linux-OpenIB",
"X11"
] | 12 | 2016-10-16T09:36:50.000Z | 2021-12-02T03:59:24.000Z | tools/project/build_defs.bzl | TokTok/toktok-stack | fe60e13d42d2b45e13ea4a9448a49925bc6a6d89 | [
"CNRI-Python",
"AML",
"Xnet",
"Linux-OpenIB",
"X11"
] | 218 | 2016-12-31T23:35:51.000Z | 2022-03-30T15:55:07.000Z | tools/project/build_defs.bzl | TokTok/toktok-stack | fe60e13d42d2b45e13ea4a9448a49925bc6a6d89 | [
"CNRI-Python",
"AML",
"Xnet",
"Linux-OpenIB",
"X11"
] | 6 | 2016-09-20T12:43:36.000Z | 2021-08-06T17:39:52.000Z | """Defines a project macro used in every TokTok sub-project.
It checks constraints such as the use of the correct license and the presence
and correctness of the license text.
"""
def _haskell_travis_impl(ctx):
ctx.actions.expand_template(
template = ctx.file._template,
output = ctx.outputs.source_file,
substitutions = {
"{PACKAGE}": ctx.attr.package,
},
)
outs = [ctx.outputs.source_file]
return DefaultInfo(files = depset(outs), runfiles = ctx.runfiles(files = outs))
_haskell_travis = rule(
attrs = {
"package": attr.string(mandatory = True),
"_template": attr.label(
default = Label("//tools/project:haskell_travis.yml.in"),
allow_single_file = True,
),
},
outputs = {"source_file": ".travis-expected.yml"},
implementation = _haskell_travis_impl,
)
def _haskell_project(standard_travis = True):
haskell_package = native.package_name()[3:]
cabal_file = haskell_package + ".cabal"
native.sh_test(
name = "cabal_test",
size = "small",
srcs = ["//tools/project:cabal_test.py"],
args = [
"$(location BUILD.bazel)",
"$(location %s)" % cabal_file,
],
data = [
"BUILD.bazel",
cabal_file,
],
)
if standard_travis:
_haskell_travis(
name = "travis",
package = haskell_package,
)
native.sh_test(
name = "travis_test",
size = "small",
srcs = ["//tools/project:diff_test.sh"],
data = [
".travis.yml",
":travis",
],
args = [
"$(location .travis.yml)",
"$(location :travis)",
],
)
def project(license = "gpl3", standard_travis = False):
"""Adds some checks to make sure the project is uniform."""
native.sh_test(
name = "license_test",
size = "small",
srcs = ["//tools/project:diff_test.sh"],
args = [
"$(location LICENSE)",
"$(location //tools:LICENSE.%s)" % license,
],
data = [
"LICENSE",
"//tools:LICENSE.%s" % license,
],
)
native.sh_test(
name = "readme_test",
size = "small",
srcs = ["//tools/project:readme_test.sh"],
args = ["$(location README.md)"],
data = ["README.md"],
)
native.sh_test(
name = "settings_test",
size = "small",
srcs = ["//tools/project:settings_test.sh"],
args = [
"$(location .github/settings.yml)",
# qTox is an exception. Maybe we should rename the submodule?
"qTox" if native.package_name() == "qtox" else native.package_name().replace("_", "-"),
],
data = [".github/settings.yml"],
)
if (native.package_name().startswith("hs-") and
any([f for f in native.glob(["*"]) if f.endswith(".cabal")])):
_haskell_project(
standard_travis = standard_travis,
)
def workspace(projects):
native.sh_test(
name = "git_modules_test",
size = "small",
srcs = [":git_modules_test.pl"],
args = [
"$(location gitmodules)",
"$(location git-remotes)",
] + projects,
data = [
"gitmodules",
"git-remotes",
],
)
native.test_suite(
name = "license_tests",
tests = ["//%s:license_test" % p for p in projects],
)
native.test_suite(
name = "readme_tests",
tests = ["//%s:readme_test" % p for p in projects],
)
native.test_suite(
name = "settings_tests",
tests = ["//%s:settings_test" % p for p in projects],
)
native.test_suite(
name = "workspace_tests",
tests = [
":license_tests",
":readme_tests",
":settings_tests",
],
)
| 27.256757 | 99 | 0.515865 |
9b46e0c5391c4985bb1d3156bd6c487c8bfbd7bf | 1,687 | py | Python | cleartext/scripts/eval.py | cschmidat/cleartext | c99c24792c06aba5b3d3848a4a74768b260188e5 | [
"MIT"
] | 1 | 2020-07-09T19:51:18.000Z | 2020-07-09T19:51:18.000Z | cleartext/scripts/eval.py | cschmidat/cleartext | c99c24792c06aba5b3d3848a4a74768b260188e5 | [
"MIT"
] | null | null | null | cleartext/scripts/eval.py | cschmidat/cleartext | c99c24792c06aba5b3d3848a4a74768b260188e5 | [
"MIT"
] | 2 | 2020-06-26T13:55:17.000Z | 2020-07-09T19:16:23.000Z | #!/usr/bin/env python3
import click
import warnings
from .. import PROJ_ROOT, utils
from cleartext.data import WikiSmall, WikiLarge
from cleartext.pipeline import Pipeline
@click.command()
@click.argument('name', default='pl', required=False, type=str)
@click.argument('dataset', default='wikilarge', type=str)
@click.option('-b', '--beam_size', default=10, type=int, help='Beam size')
@click.option('-l', '--max_len', required=False, type=str, help='Max length')
@click.option('-a', '--alpha', default=0.5, type=float, help='Beam search regularization')
@click.option('--batch_size', '-b', default=64, type=int, help='Batch size')
def main(name: str, dataset: str, beam_size: int, max_len: str, alpha: float, batch_size: int):
warnings.filterwarnings('ignore', category=DeprecationWarning, lineno=6)
# parse/validate arguments
if dataset.lower() == 'wikismall':
dataset = WikiSmall
elif dataset.lower() == 'wikilarge':
dataset = WikiLarge
else:
raise ValueError(f'Unknown dataset "{dataset}"')
# deserialize pipeline
MODELS_ROOT = PROJ_ROOT / 'models'
path = MODELS_ROOT / name
print(f'Loading {name}')
pipeline = Pipeline.deserialize(path)
print()
# load data (only validation and test sets)
print(f'Loading {dataset.__name__} data')
_, _, test_len = pipeline.load_data(dataset, 1000)
print(f'Loaded {test_len} test examples')
pipeline.prepare_data(batch_size)
print()
# evaluate and save/print results
print('\nEvaluating model')
_, _, _, bleu = pipeline.evaluate(beam_size, max_len, alpha)
print(f'\tBLEU score:\t{bleu:.3f}\t')
if __name__ == '__main__':
main()
| 33.74 | 95 | 0.684647 |
7cfa08c399bba40bee122adc5e849d8e0efaa03a | 4,922 | py | Python | nonbonded/backend/api/dev/endpoints/plotly.py | SimonBoothroyd/nonbonded | 3efbb7d943d936b47248975f9ad0d8a006ea8684 | [
"MIT"
] | 5 | 2020-05-11T18:25:00.000Z | 2022-01-27T10:55:09.000Z | nonbonded/backend/api/dev/endpoints/plotly.py | SimonBoothroyd/nonbonded | 3efbb7d943d936b47248975f9ad0d8a006ea8684 | [
"MIT"
] | 88 | 2020-06-02T14:40:05.000Z | 2022-03-02T09:20:39.000Z | nonbonded/backend/api/dev/endpoints/plotly.py | SimonBoothroyd/nonbonded | 3efbb7d943d936b47248975f9ad0d8a006ea8684 | [
"MIT"
] | null | null | null | import logging
from typing import Dict, List, Tuple
from fastapi import APIRouter, Depends
from pydantic import BaseModel
from sqlalchemy.orm import Session
from typing_extensions import Literal
from nonbonded.backend.api import depends
from nonbonded.backend.database.crud.datasets import DataSetCRUD
from nonbonded.backend.database.crud.projects import BenchmarkCRUD, OptimizationCRUD
from nonbonded.backend.database.crud.results import (
BenchmarkResultCRUD,
OptimizationResultCRUD,
)
from nonbonded.library.models.plotly import Figure
from nonbonded.library.models.projects import Benchmark, Optimization
from nonbonded.library.models.results import BenchmarkResult, OptimizationResult
from nonbonded.library.plotting.plotly.benchmark import (
plot_overall_statistics,
plot_scatter_results,
)
from nonbonded.library.plotting.plotly.optimization import (
plot_objective_per_iteration,
plot_target_rmse,
)
from nonbonded.library.statistics.statistics import StatisticType
logger = logging.getLogger(__name__)
router = APIRouter()
class SubStudyId(BaseModel):
project_id: str
study_id: str
sub_study_id: str
class PlotlyEndpoints:
@staticmethod
def _get_optimization_results(
db: Session, project_id: str, study_id: str
) -> Tuple[List[Optimization], List[OptimizationResult]]:
optimizations = OptimizationCRUD.read_all(
db, project_id=project_id, study_id=study_id
)
results = [
OptimizationResultCRUD.read(
db,
project_id=project_id,
study_id=study_id,
sub_study_id=optimization.id,
)
for optimization in optimizations
]
return optimizations, results
@staticmethod
def _get_benchmark_results(
db: Session, project_id: str, study_id: str
) -> Tuple[List[Benchmark], List[BenchmarkResult]]:
benchmarks = BenchmarkCRUD.read_all(
db, project_id=project_id, study_id=study_id
)
results = [
BenchmarkResultCRUD.read(
db,
project_id=project_id,
study_id=study_id,
sub_study_id=benchmark.id,
)
for benchmark in benchmarks
]
return benchmarks, results
@staticmethod
@router.get("/optimizations/objective")
async def get_optimization_objective_function(
projectid: str,
studyid: str,
db: Session = Depends(depends.get_db),
) -> Figure:
return plot_objective_per_iteration(
*PlotlyEndpoints._get_optimization_results(db, projectid, studyid)
)
@staticmethod
@router.get("/optimizations/rmse")
async def get_optimization_rmse(
projectid: str,
studyid: str,
db: Session = Depends(depends.get_db),
) -> Dict[str, Dict[str, Dict[str, Figure]]]:
optimizations, results = PlotlyEndpoints._get_optimization_results(
db, projectid, studyid
)
figures = {}
for optimization, result in zip(optimizations, results):
if result is None or len(result.target_results) == 0:
continue
targets_by_id = {target.id: target for target in optimization.targets}
final_iteration = sorted(result.target_results)[-1]
figures[optimization.id] = {
target_id: plot_target_rmse(
[targets_by_id[target_id], targets_by_id[target_id]],
[initial_result, result.target_results[final_iteration][target_id]],
["Initial", "Final"],
)
for target_id, initial_result in result.target_results[0].items()
}
return figures
@staticmethod
@router.get("/benchmarks/statistics/{statistic_type}")
async def get_overall_benchmark_statistics(
projectid: str,
studyid: str,
statistic_type: Literal["rmse"],
db: Session = Depends(depends.get_db),
) -> Figure:
return plot_overall_statistics(
*PlotlyEndpoints._get_benchmark_results(db, projectid, studyid),
StatisticType[statistic_type.upper()]
)
@staticmethod
@router.get("/benchmarks/scatter")
async def get_benchmark_scatter_results(
projectid: str,
studyid: str,
db: Session = Depends(depends.get_db),
) -> Dict[str, Figure]:
benchmarks, results = PlotlyEndpoints._get_benchmark_results(
db, projectid, studyid
)
data_set_ids = {
test_set_id
for benchmark in benchmarks
for test_set_id in benchmark.test_set_ids
}
data_sets = [DataSetCRUD.read(db, data_set_id) for data_set_id in data_set_ids]
return plot_scatter_results(benchmarks, results, data_sets)
| 30.7625 | 88 | 0.654206 |
c50880948fb851b92583605512e250ad0d461cf7 | 189 | py | Python | bert/dataset/__init__.py | 292916808/MolCloze | 1a900ee3cc641499df60e171777f8f2595e84e88 | [
"Apache-2.0"
] | null | null | null | bert/dataset/__init__.py | 292916808/MolCloze | 1a900ee3cc641499df60e171777f8f2595e84e88 | [
"Apache-2.0"
] | null | null | null | bert/dataset/__init__.py | 292916808/MolCloze | 1a900ee3cc641499df60e171777f8f2595e84e88 | [
"Apache-2.0"
] | null | null | null | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# author: chinshin
# datetime: 2020/4/20 15:28
from bert.dataset.dataset import MolBertDataset
from bert.dataset.vocab import WordVocab
| 27 | 48 | 0.714286 |
15bba84f159f507f9ba20af8a0d6cd7dc6cfc7a0 | 264 | py | Python | temp.py | pombredanne/xpdf_python | a247601e7f15d8775fbfec369a805954eb000808 | [
"BSD-3-Clause"
] | 18 | 2017-08-07T18:56:59.000Z | 2022-02-11T18:35:30.000Z | temp.py | pombredanne/xpdf_python | a247601e7f15d8775fbfec369a805954eb000808 | [
"BSD-3-Clause"
] | 5 | 2018-04-05T20:49:34.000Z | 2020-08-21T06:41:59.000Z | temp.py | pombredanne/xpdf_python | a247601e7f15d8775fbfec369a805954eb000808 | [
"BSD-3-Clause"
] | 15 | 2017-08-29T13:49:31.000Z | 2021-03-22T13:58:46.000Z | import os
x = "cd /tmp/ && wget ftp://ftp.foolabs.com/pub/xpdf/xpdfbin-mac-3.04.tar.gz && tar -xvzf xpdfbin-mac-3.04.tar.gz && cp xpdfbin-mac-3.04/bin64/* /usr/local/bin && cp xpdfbin-mac-3.04/doc/sample-xpdfrc /usr/local/etc/xpdfrc"
# x = "cd /tmp/"
os.system(x) | 52.8 | 223 | 0.670455 |
5cf60d9c9c163889c640f4202fdd1effa674e5a4 | 120 | py | Python | exfi/__init__.py | jlanga/exfi | 6cd28423213aba0ab8ac191e002396ddc84c4be3 | [
"MIT"
] | 2 | 2017-11-02T11:31:41.000Z | 2020-11-28T07:42:27.000Z | exfi/__init__.py | jlanga/exfi | 6cd28423213aba0ab8ac191e002396ddc84c4be3 | [
"MIT"
] | 36 | 2017-04-26T09:36:54.000Z | 2021-04-16T12:35:52.000Z | exfi/__init__.py | jlanga/exon_finder | 6cd28423213aba0ab8ac191e002396ddc84c4be3 | [
"MIT"
] | 2 | 2017-07-23T23:03:36.000Z | 2017-09-29T15:30:55.000Z | """
Compute the splice graph from a transcriptome and raw genomic reads using bloom
filters.
"""
__version__ = '1.5.6'
| 17.142857 | 79 | 0.733333 |
751a78d14d2db343d13ca5b16999a84f667e0d93 | 11,017 | py | Python | src/realsense-ransac/Python_scripts/test_stream.py | Lucas-SEB/Sewer-Pipes-Measurement | b449a8b5110cedcd93b9e7f3a63d044bd299ba9f | [
"Apache-2.0"
] | 2 | 2022-02-01T12:47:25.000Z | 2022-03-09T13:47:36.000Z | src/realsense-ransac/Python_scripts/test_stream.py | Lucas-SEB/Sewer-Pipes-Measurement | b449a8b5110cedcd93b9e7f3a63d044bd299ba9f | [
"Apache-2.0"
] | null | null | null | src/realsense-ransac/Python_scripts/test_stream.py | Lucas-SEB/Sewer-Pipes-Measurement | b449a8b5110cedcd93b9e7f3a63d044bd299ba9f | [
"Apache-2.0"
] | 1 | 2021-12-16T22:27:50.000Z | 2021-12-16T22:27:50.000Z | # License: Apache 2.0. See LICENSE file in root directory.
# Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved.
"""
OpenCV and Numpy Point cloud Software Renderer
This sample is mostly for demonstration and educational purposes.
It really doesn't offer the quality or performance that can be
achieved with hardware acceleration.
Usage:
------
Mouse:
Drag with left button to rotate around pivot (thick small axes),
with right button to translate and the wheel to zoom.
Keyboard:
[p] Pause
[r] Reset View
[d] Cycle through decimation values
[z] Toggle point scaling
[c] Toggle color source
[s] Save PNG (./out.png)
[e] Export points to ply (./out.ply)
[q\ESC] Quit
"""
import math
import time
import cv2
import numpy as np
import pyrealsense2.pyrealsense2 as rs
class AppState:
def __init__(self, *args, **kwargs):
self.WIN_NAME = 'RealSense'
self.pitch, self.yaw = math.radians(-10), math.radians(-15)
self.translation = np.array([0, 0, -1], dtype=np.float32)
self.distance = 2
self.prev_mouse = 0, 0
self.mouse_btns = [False, False, False]
self.paused = False
self.decimate = 1
self.scale = True
self.color = True
def reset(self):
self.pitch, self.yaw, self.distance = 0, 0, 2
self.translation[:] = 0, 0, -1
@property
def rotation(self):
Rx, _ = cv2.Rodrigues((self.pitch, 0, 0))
Ry, _ = cv2.Rodrigues((0, self.yaw, 0))
return np.dot(Ry, Rx).astype(np.float32)
@property
def pivot(self):
return self.translation + np.array((0, 0, self.distance), dtype=np.float32)
state = AppState()
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
found_rgb = False
for s in device.sensors:
if s.get_info(rs.camera_info.name) == 'RGB Camera':
found_rgb = True
break
if not found_rgb:
print("The demo requires Depth camera with Color sensor")
exit(0)
config.enable_stream(rs.stream.depth, rs.format.z16, 30)
config.enable_stream(rs.stream.color, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
# Get stream profile and camera intrinsics
profile = pipeline.get_active_profile()
depth_profile = rs.video_stream_profile(profile.get_stream(rs.stream.color))
depth_intrinsics = depth_profile.get_intrinsics()
w, h = depth_intrinsics.width, depth_intrinsics.height
# Processing blocks
pc = rs.pointcloud()
decimate = rs.decimation_filter()
decimate.set_option(rs.option.filter_magnitude, 2 ** state.decimate)
colorizer = rs.colorizer()
def mouse_cb(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
state.mouse_btns[0] = True
if event == cv2.EVENT_LBUTTONUP:
state.mouse_btns[0] = False
if event == cv2.EVENT_RBUTTONDOWN:
state.mouse_btns[1] = True
if event == cv2.EVENT_RBUTTONUP:
state.mouse_btns[1] = False
if event == cv2.EVENT_MBUTTONDOWN:
state.mouse_btns[2] = True
if event == cv2.EVENT_MBUTTONUP:
state.mouse_btns[2] = False
if event == cv2.EVENT_MOUSEMOVE:
h, w = out.shape[:2]
dx, dy = x - state.prev_mouse[0], y - state.prev_mouse[1]
if state.mouse_btns[0]:
state.yaw += float(dx) / w * 2
state.pitch -= float(dy) / h * 2
elif state.mouse_btns[1]:
dp = np.array((dx / w, dy / h, 0), dtype=np.float32)
state.translation -= np.dot(state.rotation, dp)
elif state.mouse_btns[2]:
dz = math.sqrt(dx**2 + dy**2) * math.copysign(0.01, -dy)
state.translation[2] += dz
state.distance -= dz
if event == cv2.EVENT_MOUSEWHEEL:
dz = math.copysign(0.1, flags)
state.translation[2] += dz
state.distance -= dz
state.prev_mouse = (x, y)
cv2.namedWindow(state.WIN_NAME, cv2.WINDOW_AUTOSIZE)
cv2.resizeWindow(state.WIN_NAME, w, h)
cv2.setMouseCallback(state.WIN_NAME, mouse_cb)
def project(v):
"""project 3d vector array to 2d"""
h, w = out.shape[:2]
view_aspect = float(h)/w
# ignore divide by zero for invalid depth
with np.errstate(divide='ignore', invalid='ignore'):
proj = v[:, :-1] / v[:, -1, np.newaxis] * \
(w*view_aspect, h) + (w/2.0, h/2.0)
# near clipping
znear = 0.03
proj[v[:, 2] < znear] = np.nan
return proj
def view(v):
"""apply view transformation on vector array"""
return np.dot(v - state.pivot, state.rotation) + state.pivot - state.translation
def line3d(out, pt1, pt2, color=(0x80, 0x80, 0x80), thickness=1):
"""draw a 3d line from pt1 to pt2"""
p0 = project(pt1.reshape(-1, 3))[0]
p1 = project(pt2.reshape(-1, 3))[0]
if np.isnan(p0).any() or np.isnan(p1).any():
return
p0 = tuple(p0.astype(int))
p1 = tuple(p1.astype(int))
rect = (0, 0, out.shape[1], out.shape[0])
inside, p0, p1 = cv2.clipLine(rect, p0, p1)
if inside:
cv2.line(out, p0, p1, color, thickness, cv2.LINE_AA)
def grid(out, pos, rotation=np.eye(3), size=1, n=10, color=(0x80, 0x80, 0x80)):
"""draw a grid on xz plane"""
pos = np.array(pos)
s = size / float(n)
s2 = 0.5 * size
for i in range(0, n+1):
x = -s2 + i*s
line3d(out, view(pos + np.dot((x, 0, -s2), rotation)),
view(pos + np.dot((x, 0, s2), rotation)), color)
for i in range(0, n+1):
z = -s2 + i*s
line3d(out, view(pos + np.dot((-s2, 0, z), rotation)),
view(pos + np.dot((s2, 0, z), rotation)), color)
def axes(out, pos, rotation=np.eye(3), size=0.075, thickness=2):
"""draw 3d axes"""
line3d(out, pos, pos +
np.dot((0, 0, size), rotation), (0xff, 0, 0), thickness)
line3d(out, pos, pos +
np.dot((0, size, 0), rotation), (0, 0xff, 0), thickness)
line3d(out, pos, pos +
np.dot((size, 0, 0), rotation), (0, 0, 0xff), thickness)
def frustum(out, intrinsics, color=(0x40, 0x40, 0x40)):
"""draw camera's frustum"""
orig = view([0, 0, 0])
w, h = intrinsics.width, intrinsics.height
for d in range(1, 6, 2):
def get_point(x, y):
p = rs.rs2_deproject_pixel_to_point(intrinsics, [x, y], d)
line3d(out, orig, view(p), color)
return p
top_left = get_point(0, 0)
top_right = get_point(w, 0)
bottom_right = get_point(w, h)
bottom_left = get_point(0, h)
line3d(out, view(top_left), view(top_right), color)
line3d(out, view(top_right), view(bottom_right), color)
line3d(out, view(bottom_right), view(bottom_left), color)
line3d(out, view(bottom_left), view(top_left), color)
def pointcloud(out, verts, texcoords, color, painter=True):
"""draw point cloud with optional painter's algorithm"""
if painter:
# Painter's algo, sort points from back to front
# get reverse sorted indices by z (in view-space)
# https://gist.github.com/stevenvo/e3dad127598842459b68
v = view(verts)
s = v[:, 2].argsort()[::-1]
proj = project(v[s])
else:
proj = project(view(verts))
if state.scale:
proj *= 0.5**state.decimate
h, w = out.shape[:2]
# proj now contains 2d image coordinates
j, i = proj.astype(np.uint32).T
# create a mask to ignore out-of-bound indices
im = (i >= 0) & (i < h)
jm = (j >= 0) & (j < w)
m = im & jm
cw, ch = color.shape[:2][::-1]
if painter:
# sort texcoord with same indices as above
# texcoords are [0..1] and relative to top-left pixel corner,
# multiply by size and add 0.5 to center
v, u = (texcoords[s] * (cw, ch) + 0.5).astype(np.uint32).T
else:
v, u = (texcoords * (cw, ch) + 0.5).astype(np.uint32).T
# clip texcoords to image
np.clip(u, 0, ch-1, out=u)
np.clip(v, 0, cw-1, out=v)
# perform uv-mapping
out[i[m], j[m]] = color[u[m], v[m]]
out = np.empty((h, w, 3), dtype=np.uint8)
while True:
# Grab camera data
if not state.paused:
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
depth_frame = decimate.process(depth_frame)
# Grab new intrinsics (may be changed by decimation)
depth_intrinsics = rs.video_stream_profile(
depth_frame.profile).get_intrinsics()
w, h = depth_intrinsics.width, depth_intrinsics.height
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
depth_colormap = np.asanyarray(
colorizer.colorize(depth_frame).get_data())
if state.color:
mapped_frame, color_source = color_frame, color_image
else:
mapped_frame, color_source = depth_frame, depth_colormap
points = pc.calculate(depth_frame)
pc.map_to(mapped_frame)
# Pointcloud data to arrays
v, t = points.get_vertices(), points.get_texture_coordinates()
verts = np.asanyarray(v).view(np.float32).reshape(-1, 3) # xyz
texcoords = np.asanyarray(t).view(np.float32).reshape(-1, 2) # uv
# Render
now = time.time()
out.fill(0)
grid(out, (0, 0.5, 1), size=1, n=10)
frustum(out, depth_intrinsics)
axes(out, view([0, 0, 0]), state.rotation, size=0.1, thickness=1)
if not state.scale or out.shape[:2] == (h, w):
pointcloud(out, verts, texcoords, color_source)
else:
tmp = np.zeros((h, w, 3), dtype=np.uint8)
pointcloud(tmp, verts, texcoords, color_source)
tmp = cv2.resize(
tmp, out.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
np.putmask(out, tmp > 0, tmp)
if any(state.mouse_btns):
axes(out, view(state.pivot), state.rotation, thickness=4)
dt = time.time() - now
cv2.setWindowTitle(
state.WIN_NAME, "RealSense (%dx%d) %dFPS (%.2fms) %s" %
(w, h, 1.0/dt, dt*1000, "PAUSED" if state.paused else ""))
cv2.imshow(state.WIN_NAME, out)
key = cv2.waitKey(1)
if key == ord("r"):
state.reset()
if key == ord("p"):
state.paused ^= True
if key == ord("d"):
state.decimate = (state.decimate + 1) % 3
decimate.set_option(rs.option.filter_magnitude, 2 ** state.decimate)
if key == ord("z"):
state.scale ^= True
if key == ord("c"):
state.color ^= True
if key == ord("s"):
cv2.imwrite('./out.png', out)
if key == ord("e"):
points.export_to_ply('./out.ply', mapped_frame)
if key in (27, ord("q")) or cv2.getWindowProperty(state.WIN_NAME, cv2.WND_PROP_AUTOSIZE) < 0:
break
# Stop streaming
pipeline.stop()
| 30.019074 | 97 | 0.60806 |
343e16420e6431d306be87616a3c826eabdc0480 | 412 | py | Python | examples/complex/complex/commands/cmd_init.py | landonb/click-hotoffthehamster | 8cdf6be108ab455747e60d72ca25b2d989596899 | [
"BSD-3-Clause"
] | null | null | null | examples/complex/complex/commands/cmd_init.py | landonb/click-hotoffthehamster | 8cdf6be108ab455747e60d72ca25b2d989596899 | [
"BSD-3-Clause"
] | null | null | null | examples/complex/complex/commands/cmd_init.py | landonb/click-hotoffthehamster | 8cdf6be108ab455747e60d72ca25b2d989596899 | [
"BSD-3-Clause"
] | 1 | 2020-03-28T18:57:15.000Z | 2020-03-28T18:57:15.000Z | from complex.cli import pass_environment
import click_hotoffthehamster as click
@click.command("init", short_help="Initializes a repo.")
@click.argument("path", required=False, type=click.Path(resolve_path=True))
@pass_environment
def cli(ctx, path):
"""Initializes a repository."""
if path is None:
path = ctx.home
ctx.log("Initialized the repository in %s", click.format_filename(path))
| 29.428571 | 76 | 0.735437 |
c14ff128c5d8cb1481e4cc0e00897536e22cf8ea | 960 | py | Python | main.py | gabryatfendor/PyEscape | 3b375252466d380de3f516ebfa39695f1ee8287e | [
"MIT"
] | 1 | 2016-08-13T07:23:19.000Z | 2016-08-13T07:23:19.000Z | main.py | gabryatfendor/PyEscape | 3b375252466d380de3f516ebfa39695f1ee8287e | [
"MIT"
] | 4 | 2016-08-11T14:57:04.000Z | 2018-05-18T09:59:34.000Z | main.py | gabryatfendor/PyEscape | 3b375252466d380de3f516ebfa39695f1ee8287e | [
"MIT"
] | null | null | null | """File containing the main game loop"""
import pygame
from modules.game import Game, Menu
from modules.screen import Screen
from modules.graphics import Color
from modules.audio import Music
def main():
"""Main method containing initialization and game loop"""
pygame.init()
Screen.display_surface.fill(Color.WHITE)
pygame.display.set_caption('PyRPG')
#Music starts
pygame.mixer.init()
Music.play_music(Music.background_music, True, 0.4)
#Draw the main menu
Menu.main_menu()
replay = True
#Main Game loop
while replay:
game_object = Game()
game_object.game_setup()
events = pygame.event.get()
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_y:
pass
elif event.key == pygame.K_n:
replay = False
break
if __name__ == '__main__':
main()
| 25.263158 | 61 | 0.613542 |
6c06adf279809d49dbed494e5ff0447925debe50 | 1,057 | py | Python | examples/pylab_examples/scatter_demo2.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 3 | 2015-11-16T07:22:28.000Z | 2016-11-11T17:55:14.000Z | examples/pylab_examples/scatter_demo2.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | examples/pylab_examples/scatter_demo2.py | pierre-haessig/matplotlib | 0d945044ca3fbf98cad55912584ef80911f330c6 | [
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 3 | 2017-05-31T01:42:22.000Z | 2020-06-23T13:57:49.000Z | """
make a scatter plot with varying color and size arguments
"""
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
# load a numpy record array from yahoo csv data with fields date,
# open, close, volume, adj_close from the mpl-data/example directory.
# The record array stores python datetime.date as an object array in
# the date column
datafile = cbook.get_sample_data('goog.npy')
r = np.load(datafile).view(np.recarray)
r = r[-250:] # get the most recent 250 trading days
delta1 = np.diff(r.adj_close)/r.adj_close[:-1]
# size in points ^2
volume = (15*r.volume[:-2]/r.volume[0])**2
close = 0.003*r.close[:-2]/0.003*r.open[:-2]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(delta1[:-1], delta1[1:], c=close, s=volume, alpha=0.75)
#ticks = arange(-0.06, 0.061, 0.02)
#xticks(ticks)
#yticks(ticks)
ax.set_xlabel(r'$\Delta_i$', fontsize=20)
ax.set_ylabel(r'$\Delta_{i+1}$', fontsize=20)
ax.set_title('Volume and percent change')
ax.grid(True)
plt.show()
| 25.780488 | 69 | 0.717124 |
5ee5b3f41be194ffbd0b33b30db03254bb8c7f1b | 390 | py | Python | pysperf/model_library.py | ZedongPeng/pysperf | 9d8536c56aee8508ffa142369b1ab7e3d88baaac | [
"BSD-2-Clause"
] | null | null | null | pysperf/model_library.py | ZedongPeng/pysperf | 9d8536c56aee8508ffa142369b1ab7e3d88baaac | [
"BSD-2-Clause"
] | null | null | null | pysperf/model_library.py | ZedongPeng/pysperf | 9d8536c56aee8508ffa142369b1ab7e3d88baaac | [
"BSD-2-Clause"
] | null | null | null | """
This file imports `__all__` from the models directory, thus populating the model registry.
It also runs 'compute_model_stats()' on the model library.
This can be an expensive operation, if the model info is not already cached.
"""
from .model_library_tools import compute_model_stats
from pysperf.models import *
from .config import models
compute_model_stats()
__all__ = ['models']
| 27.857143 | 90 | 0.784615 |
ab7ec611d3a0846476cab2e31152a14407d21fc5 | 4,652 | py | Python | MeasureVAE/encoder.py | RetroCirce/Music-SketchNet | 40b45a658414703b7583e25a2c41e753c4a61e81 | [
"CC0-1.0"
] | 57 | 2020-08-01T04:08:36.000Z | 2022-02-21T10:13:56.000Z | MeasureVAE/encoder.py | RetroCirce/Music-SketchNet | 40b45a658414703b7583e25a2c41e753c4a61e81 | [
"CC0-1.0"
] | 3 | 2020-10-09T11:37:28.000Z | 2021-09-08T02:24:50.000Z | MeasureVAE/encoder.py | RetroCirce/Music-SketchNet | 40b45a658414703b7583e25a2c41e753c4a61e81 | [
"CC0-1.0"
] | 8 | 2020-08-05T12:32:45.000Z | 2022-03-22T02:23:10.000Z | import torch
from torch.distributions import Normal
from torch import nn
from utils.helpers import *
class Encoder(nn.Module):
def __init__(self,
note_embedding_dim,
rnn_hidden_size,
num_layers,
num_notes,
dropout,
bidirectional,
z_dim,
rnn_class):
super(Encoder, self).__init__()
self.bidirectional = bidirectional
self.num_directions = 2 if bidirectional else 1
self.note_embedding_dim = note_embedding_dim
self.num_layers = num_layers
self.rnn_hidden_size = rnn_hidden_size
self.z_dim = z_dim
self.dropout = dropout
self.rnn_class = rnn_class
print("embedding:",note_embedding_dim)
self.lstm = self.rnn_class(
input_size=int(note_embedding_dim),
hidden_size=rnn_hidden_size,
num_layers=num_layers,
dropout=self.dropout,
bidirectional=self.bidirectional,
batch_first=True
)
self.num_notes = num_notes
self.note_embedding_layer = nn.Embedding(self.num_notes,
self.note_embedding_dim)
self.linear_mean = nn.Sequential(
nn.Linear(self.rnn_hidden_size * self.num_directions * self.num_layers,
self.rnn_hidden_size * self.num_directions),
nn.SELU(),
nn.Linear(self.rnn_hidden_size * self.num_directions, z_dim)
)
self.linear_log_std = nn.Sequential(
nn.Linear(self.rnn_hidden_size * self.num_directions * self.num_layers,
self.rnn_hidden_size * self.num_directions),
nn.SELU(),
nn.Linear(self.rnn_hidden_size * self.num_directions, z_dim)
)
self.xavier_initialization()
def __repr__(self):
"""
String Representation of class
:return: string, class representation
"""
return f'Encoder(' \
f'{self.note_embedding_dim},' \
f'{self.rnn_class},' \
f'{self.num_layers},' \
f'{self.rnn_hidden_size},' \
f'{self.dropout},' \
f'{self.bidirectional},' \
f'{self.z_dim},' \
f')'
def xavier_initialization(self):
"""
Initializes the network params
:return:
"""
for name, param in self.named_parameters():
if 'weight' in name:
nn.init.xavier_normal_(param)
def hidden_init(self, batch_size):
"""
Initializes the hidden state of the encoder GRU
:param batch_size: int
:return: torch tensor,
(self.num_encoder_layers x self.num_directions, batch_size, self.encoder_hidden_size)
"""
hidden = torch.zeros(self.num_layers * self.num_directions,
batch_size,
self.rnn_hidden_size
)
return to_cuda_variable(hidden)
def embed_forward(self, score_tensor):
"""
Performs the forward pass of the embedding layer
:param score_tensor: torch tensor,
(batch_size, measure_seq_len)
:return: torch tensor,
(batch_size, measure_seq_len, embedding_size)
"""
x = self.note_embedding_layer(score_tensor)
return x
def forward(self, score_tensor):
"""
Performs the forward pass of the model, overrides torch method
:param score_tensor: torch Variable
(batch_size, measure_seq_len)
:return: torch distribution
"""
for name, param in self.named_parameters():
if 'weight' in name:
nan_check = torch.isnan(param.data)
if nan_check.nonzero().size(0) > 0:
print('Encoder has become nan')
raise ValueError
batch_size, measure_seq_len = score_tensor.size()
# embed score
embedded_seq = self.embed_forward(score_tensor=score_tensor)
# pass through RNN
hidden = self.hidden_init(batch_size)
_, hidden = self.lstm(embedded_seq, hidden)
hidden = hidden.transpose(0, 1).contiguous()
hidden = hidden.view(batch_size, -1)
# compute distribution parameters
z_mean = self.linear_mean(hidden)
z_log_std = self.linear_log_std(hidden)
z_distribution = Normal(loc=z_mean, scale=torch.exp(z_log_std))
return z_distribution
| 34.205882 | 100 | 0.571797 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.